blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
473268ca477e93353c4880008855c1dca2b2e4d7
|
fe5ae06133f41eddc00157df6ab85f8eabb50afd
|
/ex8.py
|
861e8bffc510f5339b04c59f08da86af531d1f9d
|
[] |
no_license
|
haokong0703/PythonStartUp
|
32b39f8d2752a69a9cce83761554660264ea4461
|
c6edac07ee0cae0fc21560de364ea89cfed33fd0
|
refs/heads/master
| 2021-01-02T09:36:01.760941
| 2017-08-03T17:18:20
| 2017-08-03T17:18:20
| 99,257,442
| 0
| 0
| null | 2017-08-03T17:14:54
| 2017-08-03T17:14:54
| null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
formatter="%r %r %r %r"
print formatter % (1,2,3,4)
print formatter % ("one","two","three","four")
print formatter % (True,False,False,True)
print formatter % (formatter,formatter,formatter,formatter)
print formatter % (
"I had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight."
)
|
[
"noreply@github.com"
] |
haokong0703.noreply@github.com
|
6aec8a5f6568461bf58c30842b564a52615b417d
|
1a2ef374bca08dd879f9798bccfa8005cb7e08be
|
/apps/Configuration/migrations/0001_initial.py
|
ef159af27810426ac954b8ed169f716e9017eb50
|
[] |
no_license
|
littleyunyun16/Auto_test_platform
|
ee9abe9d17dc60a28a46b29e019e4dbc6cc05cb4
|
fc10379b832e5d52b14c5f03c67f76d8dc985191
|
refs/heads/master
| 2023-02-25T19:39:48.890230
| 2021-01-28T15:09:10
| 2021-01-28T15:09:10
| 333,119,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
# Generated by Django 3.1.3 on 2021-01-28 14:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserPower',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('power', models.CharField(max_length=255)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '用户权限',
'verbose_name_plural': '用户权限',
},
),
]
|
[
"1563115157@qq.com"
] |
1563115157@qq.com
|
ccf3b93947b537abb68556df332c82c96f72d2bb
|
d9fbf494fc537b933debd9bad073fa42d546c770
|
/oops/oop_concept.py
|
5611c55a64bb1037544beadf5a41e32ea5b47539
|
[] |
no_license
|
aks789/python-code-basics-to-expert
|
c125e9f37bdcf0928b4366b66df29f2eff5e881a
|
ff5e15f3fd7338bc8105e94a8b2d4877797d6156
|
refs/heads/main
| 2023-05-07T11:11:29.632261
| 2021-05-30T06:13:59
| 2021-05-30T06:13:59
| 371,897,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
class Dog():
species = 'mammal'
def __init__(self,breed,name):
self.my_attr=breed
self.name=name
def bark(self,number):
print('Woof!! My name is {} and number is {}'.format(self.name,number))
my_dog=Dog('Lab','akshay')
print(type(my_dog))
print(my_dog.my_attr)
print(my_dog.name)
print(my_dog.species)
my_dog.bark(12)
class Circle:
pi = 3.14
def __init__(self,radius=1):
self.radius=radius
def circumference(self):
return Circle.pi * self.radius * 2
my_circle = Circle()
print(my_circle.circumference())
|
[
"aks789@gmail.com"
] |
aks789@gmail.com
|
8647d26df7f988b7991592e5749aad87ff561a08
|
0f02f1e13993149833d09d0f85fc1e49435ddc13
|
/Lab9/hh_back/api/migrations/0002_auto_20210412_1409.py
|
2b2b5d9637fb50ad82142555625df6ace6b9415a
|
[] |
no_license
|
beksultanrrr/Web-development
|
f821b345eb9637f2d81a5b772c7f2269a80b71e1
|
a4192adae583b7c6a27d453ee1d325e01cb501df
|
refs/heads/master
| 2023-04-01T22:24:53.965225
| 2021-04-19T17:02:08
| 2021-04-19T17:02:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
# Generated by Django 2.0 on 2021-04-12 14:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='company',
options={'verbose_name_plural': 'Companies'},
),
migrations.AlterModelOptions(
name='vacancy',
options={'verbose_name_plural': 'Vacancies'},
),
migrations.AddField(
model_name='company',
name='address',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='company',
name='description',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='vacancy',
name='description',
field=models.TextField(default=''),
),
migrations.AddField(
model_name='vacancy',
name='salary',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='company',
name='city',
field=models.CharField(max_length=300),
),
migrations.AlterField(
model_name='vacancy',
name='company',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='api.Company'),
),
]
|
[
"elvinashm@gmail.com"
] |
elvinashm@gmail.com
|
72ee7a4ce27029e51b027616e8398695f52f6fe0
|
778bc60655bc3e7d2b474cc9280eceabe700bfea
|
/models/__init__.py
|
363c1cdef1efe920dcfe8cc18ff8015178f9f8c6
|
[] |
no_license
|
12334zq/speculosity
|
166512beb14551b3e31a27d759d4e523f44ad2b2
|
01144dfaed2e380c6da16dd132fbe9081a3fab29
|
refs/heads/master
| 2021-09-15T23:02:49.075668
| 2018-06-12T10:14:37
| 2018-06-12T10:14:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
from .vgg16 import VGG16
from .vgg19 import VGG19
from .vgg11 import VGG11
|
[
"thibault.wildi@epfl.ch"
] |
thibault.wildi@epfl.ch
|
c97b9d3e3131f7d9ab9e546f82002586e0c0d257
|
bd2900b3a3e77d3cf1f123ae2b5845391660b159
|
/tests/test_basic_elements.py
|
59559ec035ec12929a655847d089354b17424daf
|
[] |
no_license
|
YakunAlex/MIPT-testing-e2e
|
7c87245cf3ff48e9abd31fbc31842e135fe83a9e
|
64ee524cfa6bc5dc542a9f4319d75997589d5294
|
refs/heads/master
| 2022-07-31T00:19:43.570947
| 2020-05-21T20:08:56
| 2020-05-21T20:08:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from pages.main_page import MainPage
def test_basic_elements(browser_is_opened):
main_page = MainPage(*browser_is_opened)
assert main_page.check_news_banner_exit()
assert main_page.check_sponsors_notes_exist()
|
[
"ihatecapslook@gmail.com"
] |
ihatecapslook@gmail.com
|
5d97d95d16c772bc0539baded389858d75f40403
|
7597fe68b0d7b750c3ddfeaeee19e8dedf8c3186
|
/library/sources/core/kernels/binop/binop_common.py
|
bbf7ca60afc1c3426fa8137f7a922756e4f4791f
|
[] |
no_license
|
nasingfaund/Yeppp-Mirror
|
33de08f1e5a50b7e75ee905ac4f026a178786752
|
23cc725a7489d376558bef3e92e31fda014b6c47
|
refs/heads/master
| 2023-03-20T13:14:23.502813
| 2016-08-06T06:34:54
| 2016-08-06T06:34:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,832
|
py
|
from peachpy.x86_64 import *
from peachpy import *
from instruction_maps.avx_instruction_maps import *
from instruction_maps.sse_instruction_maps import *
def avx2_scalar_instruction_select(input_type, output_type, op):
if input_type == Yep32u and output_type == Yep64u:
SCALAR_LOAD = lambda x, y: MOV(x.as_dword, y)
elif input_type.size == output_type.size:
SCALAR_LOAD = avx_scalar_mov_map[input_type]
else:
SCALAR_LOAD = avx_scalar_movsx_map[(input_type, output_type)]
op_map = { "add" : avx_scalar_add_map,
"subtract" : avx_scalar_sub_map,
"max" : avx_scalar_max_map,
"min" : avx_scalar_min_map }[op]
if output_type in [Yep8s, Yep8u, Yep16s, Yep16u, Yep32s, Yep32u,
Yep64s, Yep64u]:
SCALAR_OP = lambda x, y, z: op_map[output_type](x, z) \
if x == y else op_map[output_type](x, y)
else:
SCALAR_OP = lambda x, y, z: op_map[output_type](x, y, z)
SCALAR_STORE = avx_scalar_mov_map[output_type]
return SCALAR_LOAD, SCALAR_OP, SCALAR_STORE
def avx2_vector_instruction_select(input_type, output_type, op):
if input_type.size == output_type.size:
SIMD_LOAD = avx_vector_unaligned_mov_map[input_type]
else:
SIMD_LOAD = avx_vector_movsx_map[(input_type, output_type)]
op_map = { "add" : avx_vector_add_map,
"subtract" : avx_vector_sub_map,
"max" : avx_vector_max_map,
"min" : avx_vector_min_map }[op]
SIMD_OP = op_map[output_type]
SIMD_STORE = avx_vector_aligned_mov_map[output_type]
return SIMD_LOAD, SIMD_OP, SIMD_STORE
def sse_scalar_instruction_select(input_type, output_type, op):
if input_type == Yep32u and output_type == Yep64u:
SCALAR_LOAD = lambda x, y: MOV(x.as_dword, y)
elif input_type.size == output_type.size:
SCALAR_LOAD = sse_scalar_mov_map[input_type]
else:
SCALAR_LOAD = sse_scalar_movsx_map[(input_type, output_type)]
op_map = { "add" : sse_scalar_add_map,
"subtract" : sse_scalar_sub_map,
"max" : sse_scalar_max_map,
"min" : sse_scalar_min_map }[op]
SCALAR_OP = lambda x, y, z: op_map[output_type](x, z) \
if x == y else op_map[output_type](x, y)
SCALAR_STORE = sse_scalar_mov_map[output_type]
return SCALAR_LOAD, SCALAR_OP, SCALAR_STORE
def sse_vector_instruction_select(input_type, output_type, op):
if input_type.size == output_type.size:
SIMD_LOAD = sse_vector_unaligned_mov_map[input_type]
else:
SIMD_LOAD = sse_vector_movsx_map[(input_type, output_type)]
op_map = { "add" : sse_vector_add_map,
"subtract" : sse_vector_sub_map,
"max" : sse_vector_max_map,
"min" : sse_vector_min_map }[op]
SIMD_OP = lambda x, y, z: op_map[output_type](x, z) \
if x == y else op_map[output_type](x, y)
SIMD_STORE = sse_vector_aligned_mov_map[output_type]
return SIMD_LOAD, SIMD_OP, SIMD_STORE
def scalar_instruction_select(input_type, output_type, op, isa_ext):
if isa_ext == "AVX2":
return avx2_scalar_instruction_select(input_type, output_type, op)
elif isa_ext == "SSE":
return sse_scalar_instruction_select(input_type, output_type, op)
def vector_instruction_select(input_type, output_type, op, isa_ext):
if isa_ext == "AVX2":
return avx2_vector_instruction_select(input_type, output_type, op)
elif isa_ext == "SSE":
return sse_vector_instruction_select(input_type, output_type, op)
def scalar_reg_select(OUTPUT_TYPE, isa_ext):
if isa_ext == "AVX2":
reg_type = avx_scalar_register_map[OUTPUT_TYPE]
elif isa_ext == "SSE":
reg_type = sse_scalar_register_map[OUTPUT_TYPE]
return reg_type(), reg_type()
def vector_reg_select(isa_ext, UNROLL_FACTOR, scalar=False):
if isa_ext == "AVX2":
reg = YMMRegister
elif isa_ext == "SSE":
reg = XMMRegister
simd_accs = [reg() for _ in range(UNROLL_FACTOR)]
if scalar:
simd_ops = reg()
else:
simd_ops = [reg() for _ in range(UNROLL_FACTOR)]
return simd_accs, simd_ops
def MOV_GPR_TO_VECTOR(vector_reg, gpr, input_type, output_type, isa_ext):
if isa_ext == "AVX2":
avx2_mov_gpr_to_vector(vector_reg, gpr, input_type, output_type)
elif isa_ext == "SSE":
sse_mov_gpr_to_vector(vector_reg, gpr, input_type, output_type)
def avx2_mov_gpr_to_vector(vector_reg, gpr, input_type, output_type):
if input_type == Yep32f:
VMOVSS(vector_reg.as_xmm, gpr, gpr)
elif input_type == Yep64f:
VMOVSD(vector_reg.as_xmm, gpr, gpr)
else:
GPR_TO_VECTOR_MOV = avx_scalar_reg_to_vector_reg_mov_map[output_type]
if input_type.size < 4:
GPR_TO_VECTOR_MOV(vector_reg.as_xmm, gpr.as_dword)
else:
GPR_TO_VECTOR_MOV(vector_reg.as_xmm, gpr)
BROADCAST = avx_broadcast_map[output_type]
BROADCAST(vector_reg, vector_reg.as_xmm)
def sse_mov_gpr_to_vector(vector_reg, gpr, input_type, output_type):
if input_type == Yep32f:
PSHUFD(vector_reg, gpr, 0x0)
elif input_type == Yep64f:
assert vector_reg == gpr
PUNPCKLQDQ(vector_reg, gpr) # In this case, vector_reg == gpr
elif output_type.size <= 4:
if output_type.size < 4:
MOVZX(gpr.as_dword, gpr)
if output_type.size == 1:
IMUL(gpr.as_dword, gpr.as_dword, 0x01010101)
elif output_type.size == 2:
IMUL(gpr.as_dword, gpr.as_dword, 0x00010001)
MOVD(vector_reg, gpr.as_dword)
PSHUFD(vector_reg, vector_reg, 0x0)
elif output_type.size == 8:
MOVQ(vector_reg, gpr)
PUNPCKLQDQ(vector_reg, vector_reg)
|
[
"rguthrie6595@gmail.com"
] |
rguthrie6595@gmail.com
|
0accec4ed702daf5f14fea3c2b52ce1ee500ff57
|
8f1738c453a38b4fb2cb75a471d7938f3ea6d49d
|
/167.py
|
e1a5e047e25c2bdc83845d4ab52ea543edd1f47e
|
[
"MIT"
] |
permissive
|
geethakamath18/Leetcode
|
5f7492ad5909f84f1ee8f13b6484baa2af235ecc
|
8e55e0a47ee35ed100b30dda6682c7ce1033d4b2
|
refs/heads/master
| 2023-05-13T09:52:13.577146
| 2021-06-11T23:20:34
| 2021-06-11T23:20:34
| 291,392,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
#LeetCode problem 167: Two Sum II - Input array is sorted
class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
d={}
for i in range(len(numbers)):
d[numbers[i]]=i
for j in range(len(numbers)):
if((target-numbers[j])) in d and d[target-numbers[j]]!=j:
return[j+1,d[target-numbers[j]]+1]
|
[
"37272001+geethakamath18@users.noreply.github.com"
] |
37272001+geethakamath18@users.noreply.github.com
|
043c662c8aea819fade56d6bb682f6c44f7d36f3
|
162dec075b6e622c9495d115fbe8b7a83a992e3d
|
/apps/exam/admin.py
|
41011d4e67e2ef6196307e5ee93ecd27937ca81b
|
[] |
no_license
|
shubhamdevgan/flamboyant
|
b4fcca86b3c85b82fcdf42bc1af8279de465536a
|
461e5a16320cdea5c1945628633932e75b5e207a
|
refs/heads/master
| 2022-02-10T09:34:03.398111
| 2022-01-30T06:43:18
| 2022-01-30T06:43:18
| 230,481,030
| 0
| 0
| null | 2020-05-10T03:39:47
| 2019-12-27T16:58:50
|
CSS
|
UTF-8
|
Python
| false
| false
| 109
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Exam)
|
[
"devganshubham2@gmail.com"
] |
devganshubham2@gmail.com
|
72c869f6621fd60a33ce24785b818e1ae15e5e87
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/gluinoGMSB/gluinoGMSB_M2450_ctau10000p0_TuneCUETP8M1_13TeV_pythia8_cff.py
|
f7d05477a5be68d6fcfb3e3c2425fddbdc7d028d
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 50,207
|
py
|
FLAVOR='gluino'
COM_ENERGY = 13000.
CROSS_SECTION = 0.000104886
CTAU = 10000.0
MASS_POINT = 2450
SLHA_TABLE="""
## Important note!
## This file has been modified by hand to give the gluino and the
## stop_1 a very narrow width, such that it can be used to try out
## the R-hadron machinery. It is not a realistic SUSY scenario.
##
##******************************************************************
## MadGraph/MadEvent *
##******************************************************************
## *
## param_card corresponding the SPS point 1a (by SoftSusy 2.0.5) *
## *
##******************************************************************
## Les Houches friendly file for the (MS)SM parameters of MadGraph *
## SM parameter set and decay widths produced by MSSMCalc *
##******************************************************************
##*Please note the following IMPORTANT issues: *
## *
##0. REFRAIN from editing this file by hand! Some of the parame- *
## ters are not independent. Always use a calculator. *
## *
##1. alpha_S(MZ) has been used in the calculation of the parameters*
## This value is KEPT by madgraph when no pdf are used lpp(i)=0, *
## but, for consistency, it will be reset by madgraph to the *
## value expected IF the pdfs for collisions with hadrons are *
## used. *
## *
##2. Values of the charm and bottom kinematic (pole) masses are *
## those used in the matrix elements and phase space UNLESS they *
## are set to ZERO from the start in the model (particles.dat) *
## This happens, for example, when using 5-flavor QCD where *
## charm and bottom are treated as partons in the initial state *
## and a zero mass might be hardwired in the model definition. *
## *
## The SUSY decays have calculated using SDECAY 1.1a *
## *
##******************************************************************
#
BLOCK DCINFO # Decay Program information
1 SDECAY # decay calculator
2 1.1a # version number
#
BLOCK SPINFO # Spectrum calculator information
1 SOFTSUSY # spectrum calculator
2 2.0.5 # version number
#
BLOCK MODSEL # Model selection
1 1 sugra
#
BLOCK SMINPUTS # Standard Model inputs
1 1.27934000E+02 # alpha_em^-1(M_Z)^MSbar
2 1.16637000E-05 # G_F [GeV^-2]
3 1.18000000E-01 # alpha_S(M_Z)^MSbar
4 9.11876000E+01 # M_Z pole mass
5 4.25000000E+00 # mb(mb)^MSbar
6 1.75000000E+02 # mt pole mass
7 1.77700000E+00 # mtau pole mass
#
BLOCK MINPAR # Input parameters - minimal models
1 1.00000000E+02 # m0
2 2.50000000E+02 # m12
3 1.00000000E+01 # tanb
4 1.00000000E+00 # sign(mu)
5 -1.00000000E+02 # A0
#
BLOCK MASS # Mass Spectrum
# PDG code mass particle
5 4.88991651E+00 # b-quark pole mass calculated from mb(mb)_Msbar
6 1.75000000E+02 # mt pole mass (not read by ME)
24 7.98290131E+01 # W+
25 1.25899057E+02 # h
35 3.99960116E+05 # H
36 3.99583917E+05 # A
37 4.07879012E+05 # H+
1000001 5.68441109E+05 # ~d_L
2000001 5.45228462E+05 # ~d_R
1000002 5.61119014E+05 # ~u_L
2000002 5.49259265E+05 # ~u_R
1000003 5.68441109E+05 # ~s_L
2000003 5.45228462E+05 # ~s_R
1000004 5.61119014E+05 # ~c_L
2000004 5.49259265E+05 # ~c_R
1000005 10000000 # ~b_1
2000005 5.12315123E+05 # ~b_2
1000006 1000000.00 # ~t_1
2000006 5.85785818E+05 # ~t_2
1000011 2.02915690E+05 # ~e_L
2000011 1.44102799E+05 # ~e_R
1000012 1.85258326E+05 # ~nu_eL
1000013 2.02915690E+05 # ~mu_L
2000013 1.44102799E+05 # ~mu_R
1000014 1.85258326E+05 # ~nu_muL
1000015 1.34490864E+05 # ~tau_1
2000015 2.06867805E+05 # ~tau_2
1000016 1.84708464E+05 # ~nu_tauL
1000021 2450 # ~g
1000022 10.0 # ~chi_10
1000023 1.81088157E+05 # ~chi_20
1000025 -3.63756027E+05 # ~chi_30
1000035 3.81729382E+05 # ~chi_40
1000024 1.81696474E+05 # ~chi_1+
1000037 3.79939320E+05 # ~chi_2+
#
BLOCK NMIX # Neutralino Mixing Matrix
1 1 9.86364430E-01 # N_11
1 2 -5.31103553E-02 # N_12
1 3 1.46433995E-01 # N_13
1 4 -5.31186117E-02 # N_14
2 1 9.93505358E-02 # N_21
2 2 9.44949299E-01 # N_22
2 3 -2.69846720E-01 # N_23
2 4 1.56150698E-01 # N_24
3 1 -6.03388002E-02 # N_31
3 2 8.77004854E-02 # N_32
3 3 6.95877493E-01 # N_33
3 4 7.10226984E-01 # N_34
4 1 -1.16507132E-01 # N_41
4 2 3.10739017E-01 # N_42
4 3 6.49225960E-01 # N_43
4 4 -6.84377823E-01 # N_44
#
BLOCK UMIX # Chargino Mixing Matrix U
1 1 9.16834859E-01 # U_11
1 2 -3.99266629E-01 # U_12
2 1 3.99266629E-01 # U_21
2 2 9.16834859E-01 # U_22
#
BLOCK VMIX # Chargino Mixing Matrix V
1 1 9.72557835E-01 # V_11
1 2 -2.32661249E-01 # V_12
2 1 2.32661249E-01 # V_21
2 2 9.72557835E-01 # V_22
#
BLOCK STOPMIX # Stop Mixing Matrix
1 1 5.53644960E-01 # O_{11}
1 2 8.32752820E-01 # O_{12}
2 1 8.32752820E-01 # O_{21}
2 2 -5.53644960E-01 # O_{22}
#
BLOCK SBOTMIX # Sbottom Mixing Matrix
1 1 9.38737896E-01 # O_{11}
1 2 3.44631925E-01 # O_{12}
2 1 -3.44631925E-01 # O_{21}
2 2 9.38737896E-01 # O_{22}
#
BLOCK STAUMIX # Stau Mixing Matrix
1 1 2.82487190E-01 # O_{11}
1 2 9.59271071E-01 # O_{12}
2 1 9.59271071E-01 # O_{21}
2 2 -2.82487190E-01 # O_{22}
#
BLOCK ALPHA # Higgs mixing
-1.13825210E-01 # Mixing angle in the neutral Higgs boson sector
#
BLOCK HMIX Q= 4.67034192E+02 # DRbar Higgs Parameters
1 3.57680977E+02 # mu(Q)MSSM DRbar
2 9.74862403E+00 # tan beta(Q)MSSM DRba
3 2.44894549E+02 # higgs vev(Q)MSSM DRb
4 1.66439065E+05 # mA^2(Q)MSSM DRbar
#
BLOCK GAUGE Q= 4.67034192E+02 # The gauge couplings
3 1.10178679E+00 # g3(Q) MSbar
#
BLOCK AU Q= 4.67034192E+02 # The trilinear couplings
1 1 0.00000000E+00 # A_u(Q) DRbar
2 2 0.00000000E+00 # A_c(Q) DRbar
3 3 -4.98129778E+02 # A_t(Q) DRbar
#
BLOCK AD Q= 4.67034192E+02 # The trilinear couplings
1 1 0.00000000E+00 # A_d(Q) DRbar
2 2 0.00000000E+00 # A_s(Q) DRbar
3 3 -7.97274397E+02 # A_b(Q) DRbar
#
BLOCK AE Q= 4.67034192E+02 # The trilinear couplings
1 1 0.00000000E+00 # A_e(Q) DRbar
2 2 0.00000000E+00 # A_mu(Q) DRbar
3 3 -2.51776873E+02 # A_tau(Q) DRbar
#
BLOCK YU Q= 4.67034192E+02 # The Yukawa couplings
3 3 8.92844550E-01 # y_t(Q) DRbar
#
BLOCK YD Q= 4.67034192E+02 # The Yukawa couplings
3 3 1.38840206E-01 # y_b(Q) DRbar
#
BLOCK YE Q= 4.67034192E+02 # The Yukawa couplings
3 3 1.00890810E-01 # y_tau(Q) DRbar
#
BLOCK MSOFT Q= 4.67034192E+02 # The soft SUSY breaking masses at the scale Q
1 1.01396534E+02 # M_1(Q)
2 1.91504241E+02 # M_2(Q)
3 5.88263031E+02 # M_3(Q)
21 3.23374943E+04 # mH1^2(Q)
22 -1.28800134E+05 # mH2^2(Q)
31 1.95334764E+02 # meL(Q)
32 1.95334764E+02 # mmuL(Q)
33 1.94495956E+02 # mtauL(Q)
34 1.36494061E+02 # meR(Q)
35 1.36494061E+02 # mmuR(Q)
36 1.34043428E+02 # mtauR(Q)
41 5.47573466E+02 # mqL1(Q)
42 5.47573466E+02 # mqL2(Q)
43 4.98763839E+02 # mqL3(Q)
44 5.29511195E+02 # muR(Q)
45 5.29511195E+02 # mcR(Q)
46 4.23245877E+02 # mtR(Q)
47 5.23148807E+02 # mdR(Q)
48 5.23148807E+02 # msR(Q)
49 5.19867261E+02 # mbR(Q)
#
#
#
# =================
# |The decay table|
# =================
#
# - The multi-body decays for the inos, stops and sbottoms are included.
#
# PDG Width
DECAY 25 1.98610799E-03 # h decays
# BR NDA ID1 ID2
1.45642955E-01 2 15 -15 # BR(H1 -> tau- tau+)
8.19070713E-01 2 5 -5 # BR(H1 -> b bb)
3.36338173E-02 2 24 -24 # BR(H1 -> W+ W-)
1.65251528E-03 2 23 23 # BR(H1 -> Z Z)
#
# PDG Width
DECAY 35 5.74801389E-01 # H decays
# BR NDA ID1 ID2
1.39072676E-01 2 15 -15 # BR(H -> tau- tau+)
4.84110879E-02 2 6 -6 # BR(H -> t tb)
7.89500067E-01 2 5 -5 # BR(H -> b bb)
3.87681171E-03 2 24 -24 # BR(H -> W+ W-)
1.80454752E-03 2 23 23 # BR(H -> Z Z)
0.00000000E+00 2 24 -37 # BR(H -> W+ H-)
0.00000000E+00 2 -24 37 # BR(H -> W- H+)
0.00000000E+00 2 37 -37 # BR(H -> H+ H-)
1.73348101E-02 2 25 25 # BR(H -> h h)
0.00000000E+00 2 36 36 # BR(H -> A A)
#
# PDG Width
DECAY 36 6.32178488E-01 # A decays
# BR NDA ID1 ID2
1.26659725E-01 2 15 -15 # BR(A -> tau- tau+)
1.51081526E-01 2 6 -6 # BR(A -> t tb)
7.19406137E-01 2 5 -5 # BR(A -> b bb)
2.85261228E-03 2 23 25 # BR(A -> Z h)
0.00000000E+00 2 23 35 # BR(A -> Z H)
0.00000000E+00 2 24 -37 # BR(A -> W+ H-)
0.00000000E+00 2 -24 37 # BR(A -> W- H+)
#
# PDG Width
DECAY 37 5.46962813E-01 # H+ decays
# BR NDA ID1 ID2
1.49435135E-01 2 -15 16 # BR(H+ -> tau+ nu_tau)
8.46811711E-01 2 6 -5 # BR(H+ -> t bb)
3.75315387E-03 2 24 25 # BR(H+ -> W+ h)
0.00000000E+00 2 24 35 # BR(H+ -> W+ H)
0.00000000E+00 2 24 36 # BR(H+ -> W+ A)
#
# PDG Width
DECAY 1000021 1.9732e-17 #1.9732E-14 # gluino decays
# BR NDA ID1 ID2
1.0 2 1000039 21
# 2.08454202E-02 2 1000001 -1 # BR(~g -> ~d_L db)
# 2.08454202E-02 2 -1000001 1 # BR(~g -> ~d_L* d )
# 5.07075274E-02 2 2000001 -1 # BR(~g -> ~d_R db)
# 5.07075274E-02 2 -2000001 1 # BR(~g -> ~d_R* d )
# 2.89787767E-02 2 1000002 -2 # BR(~g -> ~u_L ub)
# 2.89787767E-02 2 -1000002 2 # BR(~g -> ~u_L* u )
# 4.46872773E-02 2 2000002 -2 # BR(~g -> ~u_R ub)
# 4.46872773E-02 2 -2000002 2 # BR(~g -> ~u_R* u )
# 2.08454202E-02 2 1000003 -3 # BR(~g -> ~s_L sb)
# 2.08454202E-02 2 -1000003 3 # BR(~g -> ~s_L* s )
# 5.07075274E-02 2 2000003 -3 # BR(~g -> ~s_R sb)
# 5.07075274E-02 2 -2000003 3 # BR(~g -> ~s_R* s )
# 2.89787767E-02 2 1000004 -4 # BR(~g -> ~c_L cb)
# 2.89787767E-02 2 -1000004 4 # BR(~g -> ~c_L* c )
# 4.46872773E-02 2 2000004 -4 # BR(~g -> ~c_R cb)
# 4.46872773E-02 2 -2000004 4 # BR(~g -> ~c_R* c )
# 1.05840237E-01 2 1000005 -5 # BR(~g -> ~b_1 bb)
# 1.05840237E-01 2 -1000005 5 # BR(~g -> ~b_1* b )
# 5.56574805E-02 2 2000005 -5 # BR(~g -> ~b_2 bb)
# 5.56574805E-02 2 -2000005 5 # BR(~g -> ~b_2* b )
# 4.80642793E-02 2 1000006 -6 # BR(~g -> ~t_1 tb)
# 4.80642793E-02 2 -1000006 6 # BR(~g -> ~t_1* t )
# 0.00000000E+00 2 2000006 -6 # BR(~g -> ~t_2 tb)
# 0.00000000E+00 2 -2000006 6 # BR(~g -> ~t_2* t )
#
# PDG Width
DECAY 1000006 1.97326971684839e-14 # stop1 decays
# BR NDA ID1 ID2
5.0E-01 2 -5 -1
5.0E-01 2 -5 -3
# 3.33333333E-01 2 5 -11
# 3.33333333E-01 2 5 -13
# 3.33333333E-01 2 5 -15
# 1.92947616E-01 2 1000022 6 # BR(~t_1 -> ~chi_10 t )
# 1.17469211E-01 2 1000023 6 # BR(~t_1 -> ~chi_20 t )
# 0.00000000E+00 2 1000025 6 # BR(~t_1 -> ~chi_30 t )
# 0.00000000E+00 2 1000035 6 # BR(~t_1 -> ~chi_40 t )
# 6.75747693E-01 2 1000024 5 # BR(~t_1 -> ~chi_1+ b )
# 1.38354802E-02 2 1000037 5 # BR(~t_1 -> ~chi_2+ b )
# 0.00000000E+00 2 1000021 6 # BR(~t_1 -> ~g t )
# 0.00000000E+00 2 1000005 37 # BR(~t_1 -> ~b_1 H+)
# 0.00000000E+00 2 2000005 37 # BR(~t_1 -> ~b_2 H+)
# 0.00000000E+00 2 1000005 24 # BR(~t_1 -> ~b_1 W+)
# 0.00000000E+00 2 2000005 24 # BR(~t_1 -> ~b_2 W+)
#
# PDG Width
DECAY 2000006 7.37313275E+00 # stop2 decays
# BR NDA ID1 ID2
2.96825635E-02 2 1000022 6 # BR(~t_2 -> ~chi_10 t )
8.68035358E-02 2 1000023 6 # BR(~t_2 -> ~chi_20 t )
4.18408351E-02 2 1000025 6 # BR(~t_2 -> ~chi_30 t )
1.93281647E-01 2 1000035 6 # BR(~t_2 -> ~chi_40 t )
2.19632356E-01 2 1000024 5 # BR(~t_2 -> ~chi_1+ b )
2.02206148E-01 2 1000037 5 # BR(~t_2 -> ~chi_2+ b )
0.00000000E+00 2 1000021 6 # BR(~t_2 -> ~g t )
3.66397706E-02 2 1000006 25 # BR(~t_2 -> ~t_1 h )
0.00000000E+00 2 1000006 35 # BR(~t_2 -> ~t_1 H )
0.00000000E+00 2 1000006 36 # BR(~t_2 -> ~t_1 A )
0.00000000E+00 2 1000005 37 # BR(~t_2 -> ~b_1 H+)
0.00000000E+00 2 2000005 37 # BR(~t_2 -> ~b_2 H+)
1.89913144E-01 2 1000006 23 # BR(~t_2 -> ~t_1 Z )
0.00000000E+00 2 1000005 24 # BR(~t_2 -> ~b_1 W+)
0.00000000E+00 2 2000005 24 # BR(~t_2 -> ~b_2 W+)
#
# PDG Width
DECAY 1000005 1.97326971684839e-14 # sbottom1 decays
# BR NDA ID1 ID2
0.33333333 2 1 12
0.33333333 2 1 14
0.33333333 2 1 16
# 4.43307074E-02 2 1000022 5 # BR(~b_1 -> ~chi_10 b )
# 3.56319904E-01 2 1000023 5 # BR(~b_1 -> ~chi_20 b )
# 5.16083795E-03 2 1000025 5 # BR(~b_1 -> ~chi_30 b )
# 1.04105080E-02 2 1000035 5 # BR(~b_1 -> ~chi_40 b )
# 4.45830064E-01 2 -1000024 6 # BR(~b_1 -> ~chi_1- t )
# 0.00000000E+00 2 -1000037 6 # BR(~b_1 -> ~chi_2- t )
# 0.00000000E+00 2 1000021 5 # BR(~b_1 -> ~g b )
# 0.00000000E+00 2 1000006 -37 # BR(~b_1 -> ~t_1 H-)
# 0.00000000E+00 2 2000006 -37 # BR(~b_1 -> ~t_2 H-)
# 1.37947979E-01 2 1000006 -24 # BR(~b_1 -> ~t_1 W-)
# 0.00000000E+00 2 2000006 -24 # BR(~b_1 -> ~t_2 W-)
#
# PDG Width
DECAY 2000005 8.01566294E-01 # sbottom2 decays
# BR NDA ID1 ID2
2.86200590E-01 2 1000022 5 # BR(~b_2 -> ~chi_10 b )
1.40315912E-01 2 1000023 5 # BR(~b_2 -> ~chi_20 b )
5.32635592E-02 2 1000025 5 # BR(~b_2 -> ~chi_30 b )
7.48748121E-02 2 1000035 5 # BR(~b_2 -> ~chi_40 b )
1.79734294E-01 2 -1000024 6 # BR(~b_2 -> ~chi_1- t )
0.00000000E+00 2 -1000037 6 # BR(~b_2 -> ~chi_2- t )
0.00000000E+00 2 1000021 5 # BR(~b_2 -> ~g b )
0.00000000E+00 2 1000005 25 # BR(~b_2 -> ~b_1 h )
0.00000000E+00 2 1000005 35 # BR(~b_2 -> ~b_1 H )
0.00000000E+00 2 1000005 36 # BR(~b_2 -> ~b_1 A )
0.00000000E+00 2 1000006 -37 # BR(~b_2 -> ~t_1 H-)
0.00000000E+00 2 2000006 -37 # BR(~b_2 -> ~t_2 H-)
0.00000000E+00 2 1000005 23 # BR(~b_2 -> ~b_1 Z )
2.65610832E-01 2 1000006 -24 # BR(~b_2 -> ~t_1 W-)
0.00000000E+00 2 2000006 -24 # BR(~b_2 -> ~t_2 W-)
#
# PDG Width
DECAY 1000002 5.47719539E+00 # sup_L decays
# BR NDA ID1 ID2
6.65240987E-03 2 1000022 2 # BR(~u_L -> ~chi_10 u)
3.19051458E-01 2 1000023 2 # BR(~u_L -> ~chi_20 u)
8.44929059E-04 2 1000025 2 # BR(~u_L -> ~chi_30 u)
1.03485173E-02 2 1000035 2 # BR(~u_L -> ~chi_40 u)
6.49499518E-01 2 1000024 1 # BR(~u_L -> ~chi_1+ d)
1.36031676E-02 2 1000037 1 # BR(~u_L -> ~chi_2+ d)
0.00000000E+00 2 1000021 2 # BR(~u_L -> ~g u)
#
# PDG Width
DECAY 2000002 1.15297292E+00 # sup_R decays
# BR NDA ID1 ID2
9.86377420E-01 2 1000022 2 # BR(~u_R -> ~chi_10 u)
8.46640647E-03 2 1000023 2 # BR(~u_R -> ~chi_20 u)
1.23894695E-03 2 1000025 2 # BR(~u_R -> ~chi_30 u)
3.91722611E-03 2 1000035 2 # BR(~u_R -> ~chi_40 u)
0.00000000E+00 2 1000024 1 # BR(~u_R -> ~chi_1+ d)
0.00000000E+00 2 1000037 1 # BR(~u_R -> ~chi_2+ d)
0.00000000E+00 2 1000021 2 # BR(~u_R -> ~g u)
#
# PDG Width
DECAY 1000001 5.31278772E+00 # sdown_L decays
# BR NDA ID1 ID2
2.32317969E-02 2 1000022 1 # BR(~d_L -> ~chi_10 d)
3.10235077E-01 2 1000023 1 # BR(~d_L -> ~chi_20 d)
1.52334771E-03 2 1000025 1 # BR(~d_L -> ~chi_30 d)
1.48849798E-02 2 1000035 1 # BR(~d_L -> ~chi_40 d)
6.06452481E-01 2 -1000024 2 # BR(~d_L -> ~chi_1- u)
4.36723179E-02 2 -1000037 2 # BR(~d_L -> ~chi_2- u)
0.00000000E+00 2 1000021 1 # BR(~d_L -> ~g d)
#
# PDG Width
DECAY 2000001 2.85812308E-01 # sdown_R decays
# BR NDA ID1 ID2
9.86529614E-01 2 1000022 1 # BR(~d_R -> ~chi_10 d)
8.44510350E-03 2 1000023 1 # BR(~d_R -> ~chi_20 d)
1.21172119E-03 2 1000025 1 # BR(~d_R -> ~chi_30 d)
3.81356102E-03 2 1000035 1 # BR(~d_R -> ~chi_40 d)
0.00000000E+00 2 -1000024 2 # BR(~d_R -> ~chi_1- u)
0.00000000E+00 2 -1000037 2 # BR(~d_R -> ~chi_2- u)
0.00000000E+00 2 1000021 1 # BR(~d_R -> ~g d)
#
# PDG Width
DECAY 1000004 5.47719539E+00 # scharm_L decays
# BR NDA ID1 ID2
6.65240987E-03 2 1000022 4 # BR(~c_L -> ~chi_10 c)
3.19051458E-01 2 1000023 4 # BR(~c_L -> ~chi_20 c)
8.44929059E-04 2 1000025 4 # BR(~c_L -> ~chi_30 c)
1.03485173E-02 2 1000035 4 # BR(~c_L -> ~chi_40 c)
6.49499518E-01 2 1000024 3 # BR(~c_L -> ~chi_1+ s)
1.36031676E-02 2 1000037 3 # BR(~c_L -> ~chi_2+ s)
0.00000000E+00 2 1000021 4 # BR(~c_L -> ~g c)
#
# PDG Width
DECAY 2000004 1.15297292E+00 # scharm_R decays
# BR NDA ID1 ID2
9.86377420E-01 2 1000022 4 # BR(~c_R -> ~chi_10 c)
8.46640647E-03 2 1000023 4 # BR(~c_R -> ~chi_20 c)
1.23894695E-03 2 1000025 4 # BR(~c_R -> ~chi_30 c)
3.91722611E-03 2 1000035 4 # BR(~c_R -> ~chi_40 c)
0.00000000E+00 2 1000024 3 # BR(~c_R -> ~chi_1+ s)
0.00000000E+00 2 1000037 3 # BR(~c_R -> ~chi_2+ s)
0.00000000E+00 2 1000021 4 # BR(~c_R -> ~g c)
#
# PDG Width
DECAY 1000003 5.31278772E+00 # sstrange_L decays
# BR NDA ID1 ID2
2.32317969E-02 2 1000022 3 # BR(~s_L -> ~chi_10 s)
3.10235077E-01 2 1000023 3 # BR(~s_L -> ~chi_20 s)
1.52334771E-03 2 1000025 3 # BR(~s_L -> ~chi_30 s)
1.48849798E-02 2 1000035 3 # BR(~s_L -> ~chi_40 s)
6.06452481E-01 2 -1000024 4 # BR(~s_L -> ~chi_1- c)
4.36723179E-02 2 -1000037 4 # BR(~s_L -> ~chi_2- c)
0.00000000E+00 2 1000021 3 # BR(~s_L -> ~g s)
#
# PDG Width
DECAY 2000003 2.85812308E-01 # sstrange_R decays
# BR NDA ID1 ID2
9.86529614E-01 2 1000022 3 # BR(~s_R -> ~chi_10 s)
8.44510350E-03 2 1000023 3 # BR(~s_R -> ~chi_20 s)
1.21172119E-03 2 1000025 3 # BR(~s_R -> ~chi_30 s)
3.81356102E-03 2 1000035 3 # BR(~s_R -> ~chi_40 s)
0.00000000E+00 2 -1000024 4 # BR(~s_R -> ~chi_1- c)
0.00000000E+00 2 -1000037 4 # BR(~s_R -> ~chi_2- c)
0.00000000E+00 2 1000021 3 # BR(~s_R -> ~g s)
#
# PDG Width
DECAY 1000011 2.13682161E-01 # selectron_L decays
# BR NDA ID1 ID2
5.73155386E-01 2 1000022 11 # BR(~e_L -> ~chi_10 e-)
1.64522579E-01 2 1000023 11 # BR(~e_L -> ~chi_20 e-)
0.00000000E+00 2 1000025 11 # BR(~e_L -> ~chi_30 e-)
0.00000000E+00 2 1000035 11 # BR(~e_L -> ~chi_40 e-)
2.62322035E-01 2 -1000024 12 # BR(~e_L -> ~chi_1- nu_e)
0.00000000E+00 2 -1000037 12 # BR(~e_L -> ~chi_2- nu_e)
#
# PDG Width
DECAY 2000011 2.16121626E-01 # selectron_R decays
# BR NDA ID1 ID2
1.00000000E+00 2 1000022 11 # BR(~e_R -> ~chi_10 e-)
0.00000000E+00 2 1000023 11 # BR(~e_R -> ~chi_20 e-)
0.00000000E+00 2 1000025 11 # BR(~e_R -> ~chi_30 e-)
0.00000000E+00 2 1000035 11 # BR(~e_R -> ~chi_40 e-)
0.00000000E+00 2 -1000024 12 # BR(~e_R -> ~chi_1- nu_e)
0.00000000E+00 2 -1000037 12 # BR(~e_R -> ~chi_2- nu_e)
#
# PDG Width
DECAY 1000013 2.13682161E-01 # smuon_L decays
# BR NDA ID1 ID2
5.73155386E-01 2 1000022 13 # BR(~mu_L -> ~chi_10 mu-)
1.64522579E-01 2 1000023 13 # BR(~mu_L -> ~chi_20 mu-)
0.00000000E+00 2 1000025 13 # BR(~mu_L -> ~chi_30 mu-)
0.00000000E+00 2 1000035 13 # BR(~mu_L -> ~chi_40 mu-)
2.62322035E-01 2 -1000024 14 # BR(~mu_L -> ~chi_1- nu_mu)
0.00000000E+00 2 -1000037 14 # BR(~mu_L -> ~chi_2- nu_mu)
#
# PDG Width
DECAY 2000013 2.16121626E-01 # smuon_R decays
# BR NDA ID1 ID2
1.00000000E+00 2 1000022 13 # BR(~mu_R -> ~chi_10 mu-)
0.00000000E+00 2 1000023 13 # BR(~mu_R -> ~chi_20 mu-)
0.00000000E+00 2 1000025 13 # BR(~mu_R -> ~chi_30 mu-)
0.00000000E+00 2 1000035 13 # BR(~mu_R -> ~chi_40 mu-)
0.00000000E+00 2 -1000024 14 # BR(~mu_R -> ~chi_1- nu_mu)
0.00000000E+00 2 -1000037 14 # BR(~mu_R -> ~chi_2- nu_mu)
#
# PDG Width
DECAY 1000015 1.48327268E-01 # stau_1 decays
# BR NDA ID1 ID2
1.00000000E+00 2 1000022 15 # BR(~tau_1 -> ~chi_10 tau-)
0.00000000E+00 2 1000023 15 # BR(~tau_1 -> ~chi_20 tau-)
0.00000000E+00 2 1000025 15 # BR(~tau_1 -> ~chi_30 tau-)
0.00000000E+00 2 1000035 15 # BR(~tau_1 -> ~chi_40 tau-)
0.00000000E+00 2 -1000024 16 # BR(~tau_1 -> ~chi_1- nu_tau)
0.00000000E+00 2 -1000037 16 # BR(~tau_1 -> ~chi_2- nu_tau)
0.00000000E+00 2 1000016 -37 # BR(~tau_1 -> ~nu_tauL H-)
0.00000000E+00 2 1000016 -24 # BR(~tau_1 -> ~nu_tauL W-)
#
# PDG Width
DECAY 2000015 2.69906096E-01 # stau_2 decays
# BR NDA ID1 ID2
5.96653046E-01 2 1000022 15 # BR(~tau_2 -> ~chi_10 tau-)
1.54536760E-01 2 1000023 15 # BR(~tau_2 -> ~chi_20 tau-)
0.00000000E+00 2 1000025 15 # BR(~tau_2 -> ~chi_30 tau-)
0.00000000E+00 2 1000035 15 # BR(~tau_2 -> ~chi_40 tau-)
2.48810195E-01 2 -1000024 16 # BR(~tau_2 -> ~chi_1- nu_tau)
0.00000000E+00 2 -1000037 16 # BR(~tau_2 -> ~chi_2- nu_tau)
0.00000000E+00 2 1000016 -37 # BR(~tau_2 -> ~nu_tauL H-)
0.00000000E+00 2 1000016 -24 # BR(~tau_2 -> ~nu_tauL W-)
0.00000000E+00 2 1000015 25 # BR(~tau_2 -> ~tau_1 h)
0.00000000E+00 2 1000015 35 # BR(~tau_2 -> ~tau_1 H)
0.00000000E+00 2 1000015 36 # BR(~tau_2 -> ~tau_1 A)
0.00000000E+00 2 1000015 23 # BR(~tau_2 -> ~tau_1 Z)
#
# PDG Width
DECAY 1000012 1.49881634E-01 # snu_eL decays
# BR NDA ID1 ID2
9.77700764E-01 2 1000022 12 # BR(~nu_eL -> ~chi_10 nu_e)
8.11554922E-03 2 1000023 12 # BR(~nu_eL -> ~chi_20 nu_e)
0.00000000E+00 2 1000025 12 # BR(~nu_eL -> ~chi_30 nu_e)
0.00000000E+00 2 1000035 12 # BR(~nu_eL -> ~chi_40 nu_e)
1.41836867E-02 2 1000024 11 # BR(~nu_eL -> ~chi_1+ e-)
0.00000000E+00 2 1000037 11 # BR(~nu_eL -> ~chi_2+ e-)
#
# PDG Width
DECAY 1000014 1.49881634E-01 # snu_muL decays
# BR NDA ID1 ID2
9.77700764E-01 2 1000022 14 # BR(~nu_muL -> ~chi_10 nu_mu)
8.11554922E-03 2 1000023 14 # BR(~nu_muL -> ~chi_20 nu_mu)
0.00000000E+00 2 1000025 14 # BR(~nu_muL -> ~chi_30 nu_mu)
0.00000000E+00 2 1000035 14 # BR(~nu_muL -> ~chi_40 nu_mu)
1.41836867E-02 2 1000024 13 # BR(~nu_muL -> ~chi_1+ mu-)
0.00000000E+00 2 1000037 13 # BR(~nu_muL -> ~chi_2+ mu-)
#
# PDG Width
DECAY 1000016 1.47518977E-01 # snu_tauL decays
# BR NDA ID1 ID2
9.85994529E-01 2 1000022 16 # BR(~nu_tauL -> ~chi_10 nu_tau)
6.25129612E-03 2 1000023 16 # BR(~nu_tauL -> ~chi_20 nu_tau)
0.00000000E+00 2 1000025 16 # BR(~nu_tauL -> ~chi_30 nu_tau)
0.00000000E+00 2 1000035 16 # BR(~nu_tauL -> ~chi_40 nu_tau)
7.75417479E-03 2 1000024 15 # BR(~nu_tauL -> ~chi_1+ tau-)
0.00000000E+00 2 1000037 15 # BR(~nu_tauL -> ~chi_2+ tau-)
0.00000000E+00 2 -1000015 -37 # BR(~nu_tauL -> ~tau_1+ H-)
0.00000000E+00 2 -2000015 -37 # BR(~nu_tauL -> ~tau_2+ H-)
0.00000000E+00 2 -1000015 -24 # BR(~nu_tauL -> ~tau_1+ W-)
0.00000000E+00 2 -2000015 -24 # BR(~nu_tauL -> ~tau_2+ W-)
#
# PDG Width
DECAY 1000024 1.70414503E-02 # chargino1+ decays
# BR NDA ID1 ID2
0.00000000E+00 2 1000002 -1 # BR(~chi_1+ -> ~u_L db)
0.00000000E+00 2 2000002 -1 # BR(~chi_1+ -> ~u_R db)
0.00000000E+00 2 -1000001 2 # BR(~chi_1+ -> ~d_L* u )
0.00000000E+00 2 -2000001 2 # BR(~chi_1+ -> ~d_R* u )
0.00000000E+00 2 1000004 -3 # BR(~chi_1+ -> ~c_L sb)
0.00000000E+00 2 2000004 -3 # BR(~chi_1+ -> ~c_R sb)
0.00000000E+00 2 -1000003 4 # BR(~chi_1+ -> ~s_L* c )
0.00000000E+00 2 -2000003 4 # BR(~chi_1+ -> ~s_R* c )
0.00000000E+00 2 1000006 -5 # BR(~chi_1+ -> ~t_1 bb)
0.00000000E+00 2 2000006 -5 # BR(~chi_1+ -> ~t_2 bb)
0.00000000E+00 2 -1000005 6 # BR(~chi_1+ -> ~b_1* t )
0.00000000E+00 2 -2000005 6 # BR(~chi_1+ -> ~b_2* t )
0.00000000E+00 2 1000012 -11 # BR(~chi_1+ -> ~nu_eL e+ )
0.00000000E+00 2 1000014 -13 # BR(~chi_1+ -> ~nu_muL mu+ )
0.00000000E+00 2 1000016 -15 # BR(~chi_1+ -> ~nu_tau1 tau+)
0.00000000E+00 2 -1000011 12 # BR(~chi_1+ -> ~e_L+ nu_e)
0.00000000E+00 2 -2000011 12 # BR(~chi_1+ -> ~e_R+ nu_e)
0.00000000E+00 2 -1000013 14 # BR(~chi_1+ -> ~mu_L+ nu_mu)
0.00000000E+00 2 -2000013 14 # BR(~chi_1+ -> ~mu_R+ nu_mu)
9.25161117E-01 2 -1000015 16 # BR(~chi_1+ -> ~tau_1+ nu_tau)
0.00000000E+00 2 -2000015 16 # BR(~chi_1+ -> ~tau_2+ nu_tau)
7.48388828E-02 2 1000022 24 # BR(~chi_1+ -> ~chi_10 W+)
0.00000000E+00 2 1000023 24 # BR(~chi_1+ -> ~chi_20 W+)
0.00000000E+00 2 1000025 24 # BR(~chi_1+ -> ~chi_30 W+)
0.00000000E+00 2 1000035 24 # BR(~chi_1+ -> ~chi_40 W+)
0.00000000E+00 2 1000022 37 # BR(~chi_1+ -> ~chi_10 H+)
0.00000000E+00 2 1000023 37 # BR(~chi_1+ -> ~chi_20 H+)
0.00000000E+00 2 1000025 37 # BR(~chi_1+ -> ~chi_30 H+)
0.00000000E+00 2 1000035 37 # BR(~chi_1+ -> ~chi_40 H+)
#
# PDG Width
DECAY 1000037 2.48689510E+00 # chargino2+ decays
# BR NDA ID1 ID2
0.00000000E+00 2 1000002 -1 # BR(~chi_2+ -> ~u_L db)
0.00000000E+00 2 2000002 -1 # BR(~chi_2+ -> ~u_R db)
0.00000000E+00 2 -1000001 2 # BR(~chi_2+ -> ~d_L* u )
0.00000000E+00 2 -2000001 2 # BR(~chi_2+ -> ~d_R* u )
0.00000000E+00 2 1000004 -3 # BR(~chi_2+ -> ~c_L sb)
0.00000000E+00 2 2000004 -3 # BR(~chi_2+ -> ~c_R sb)
0.00000000E+00 2 -1000003 4 # BR(~chi_2+ -> ~s_L* c )
0.00000000E+00 2 -2000003 4 # BR(~chi_2+ -> ~s_R* c )
0.00000000E+00 2 1000006 -5 # BR(~chi_2+ -> ~t_1 bb)
0.00000000E+00 2 2000006 -5 # BR(~chi_2+ -> ~t_2 bb)
0.00000000E+00 2 -1000005 6 # BR(~chi_2+ -> ~b_1* t )
0.00000000E+00 2 -2000005 6 # BR(~chi_2+ -> ~b_2* t )
2.00968837E-02 2 1000012 -11 # BR(~chi_2+ -> ~nu_eL e+ )
2.00968837E-02 2 1000014 -13 # BR(~chi_2+ -> ~nu_muL mu+ )
2.74507395E-02 2 1000016 -15 # BR(~chi_2+ -> ~nu_tau1 tau+)
5.20406111E-02 2 -1000011 12 # BR(~chi_2+ -> ~e_L+ nu_e)
0.00000000E+00 2 -2000011 12 # BR(~chi_2+ -> ~e_R+ nu_e)
5.20406111E-02 2 -1000013 14 # BR(~chi_2+ -> ~mu_L+ nu_mu)
0.00000000E+00 2 -2000013 14 # BR(~chi_2+ -> ~mu_R+ nu_mu)
2.82859898E-04 2 -1000015 16 # BR(~chi_2+ -> ~tau_1+ nu_tau)
5.66729336E-02 2 -2000015 16 # BR(~chi_2+ -> ~tau_2+ nu_tau)
2.31513269E-01 2 1000024 23 # BR(~chi_2+ -> ~chi_1+ Z )
6.76715120E-02 2 1000022 24 # BR(~chi_2+ -> ~chi_10 W+)
2.93654849E-01 2 1000023 24 # BR(~chi_2+ -> ~chi_20 W+)
0.00000000E+00 2 1000025 24 # BR(~chi_2+ -> ~chi_30 W+)
0.00000000E+00 2 1000035 24 # BR(~chi_2+ -> ~chi_40 W+)
1.78478848E-01 2 1000024 25 # BR(~chi_2+ -> ~chi_1+ h )
0.00000000E+00 2 1000024 35 # BR(~chi_2+ -> ~chi_1+ H )
0.00000000E+00 2 1000024 36 # BR(~chi_2+ -> ~chi_1+ A )
0.00000000E+00 2 1000022 37 # BR(~chi_2+ -> ~chi_10 H+)
0.00000000E+00 2 1000023 37 # BR(~chi_2+ -> ~chi_20 H+)
0.00000000E+00 2 1000025 37 # BR(~chi_2+ -> ~chi_30 H+)
0.00000000E+00 2 1000035 37 # BR(~chi_2+ -> ~chi_40 H+)
#
# PDG Width
DECAY 1000022 0.00000000E+00 # neutralino1 decays
#
# PDG Width
DECAY 1000023 2.07770048E-02 # neutralino2 decays
# BR NDA ID1 ID2
0.00000000E+00 2 1000022 23 # BR(~chi_20 -> ~chi_10 Z )
0.00000000E+00 2 1000024 -24 # BR(~chi_20 -> ~chi_1+ W-)
0.00000000E+00 2 -1000024 24 # BR(~chi_20 -> ~chi_1- W+)
0.00000000E+00 2 1000037 -24 # BR(~chi_20 -> ~chi_2+ W-)
0.00000000E+00 2 -1000037 24 # BR(~chi_20 -> ~chi_2- W+)
0.00000000E+00 2 1000022 25 # BR(~chi_20 -> ~chi_10 h )
0.00000000E+00 2 1000022 35 # BR(~chi_20 -> ~chi_10 H )
0.00000000E+00 2 1000022 36 # BR(~chi_20 -> ~chi_10 A )
0.00000000E+00 2 1000024 -37 # BR(~chi_20 -> ~chi_1+ H-)
0.00000000E+00 2 -1000024 37 # BR(~chi_20 -> ~chi_1- H+)
0.00000000E+00 2 1000037 -37 # BR(~chi_20 -> ~chi_2+ H-)
0.00000000E+00 2 -1000037 37 # BR(~chi_20 -> ~chi_2- H+)
0.00000000E+00 2 1000002 -2 # BR(~chi_20 -> ~u_L ub)
0.00000000E+00 2 -1000002 2 # BR(~chi_20 -> ~u_L* u )
0.00000000E+00 2 2000002 -2 # BR(~chi_20 -> ~u_R ub)
0.00000000E+00 2 -2000002 2 # BR(~chi_20 -> ~u_R* u )
0.00000000E+00 2 1000001 -1 # BR(~chi_20 -> ~d_L db)
0.00000000E+00 2 -1000001 1 # BR(~chi_20 -> ~d_L* d )
0.00000000E+00 2 2000001 -1 # BR(~chi_20 -> ~d_R db)
0.00000000E+00 2 -2000001 1 # BR(~chi_20 -> ~d_R* d )
0.00000000E+00 2 1000004 -4 # BR(~chi_20 -> ~c_L cb)
0.00000000E+00 2 -1000004 4 # BR(~chi_20 -> ~c_L* c )
0.00000000E+00 2 2000004 -4 # BR(~chi_20 -> ~c_R cb)
0.00000000E+00 2 -2000004 4 # BR(~chi_20 -> ~c_R* c )
0.00000000E+00 2 1000003 -3 # BR(~chi_20 -> ~s_L sb)
0.00000000E+00 2 -1000003 3 # BR(~chi_20 -> ~s_L* s )
0.00000000E+00 2 2000003 -3 # BR(~chi_20 -> ~s_R sb)
0.00000000E+00 2 -2000003 3 # BR(~chi_20 -> ~s_R* s )
0.00000000E+00 2 1000006 -6 # BR(~chi_20 -> ~t_1 tb)
0.00000000E+00 2 -1000006 6 # BR(~chi_20 -> ~t_1* t )
0.00000000E+00 2 2000006 -6 # BR(~chi_20 -> ~t_2 tb)
0.00000000E+00 2 -2000006 6 # BR(~chi_20 -> ~t_2* t )
0.00000000E+00 2 1000005 -5 # BR(~chi_20 -> ~b_1 bb)
0.00000000E+00 2 -1000005 5 # BR(~chi_20 -> ~b_1* b )
0.00000000E+00 2 2000005 -5 # BR(~chi_20 -> ~b_2 bb)
0.00000000E+00 2 -2000005 5 # BR(~chi_20 -> ~b_2* b )
0.00000000E+00 2 1000011 -11 # BR(~chi_20 -> ~e_L- e+)
0.00000000E+00 2 -1000011 11 # BR(~chi_20 -> ~e_L+ e-)
2.95071995E-02 2 2000011 -11 # BR(~chi_20 -> ~e_R- e+)
2.95071995E-02 2 -2000011 11 # BR(~chi_20 -> ~e_R+ e-)
0.00000000E+00 2 1000013 -13 # BR(~chi_20 -> ~mu_L- mu+)
0.00000000E+00 2 -1000013 13 # BR(~chi_20 -> ~mu_L+ mu-)
2.95071995E-02 2 2000013 -13 # BR(~chi_20 -> ~mu_R- mu+)
2.95071995E-02 2 -2000013 13 # BR(~chi_20 -> ~mu_R+ mu-)
4.40985601E-01 2 1000015 -15 # BR(~chi_20 -> ~tau_1- tau+)
4.40985601E-01 2 -1000015 15 # BR(~chi_20 -> ~tau_1+ tau-)
0.00000000E+00 2 2000015 -15 # BR(~chi_20 -> ~tau_2- tau+)
0.00000000E+00 2 -2000015 15 # BR(~chi_20 -> ~tau_2+ tau-)
0.00000000E+00 2 1000012 -12 # BR(~chi_20 -> ~nu_eL nu_eb)
0.00000000E+00 2 -1000012 12 # BR(~chi_20 -> ~nu_eL* nu_e )
0.00000000E+00 2 1000014 -14 # BR(~chi_20 -> ~nu_muL nu_mub)
0.00000000E+00 2 -1000014 14 # BR(~chi_20 -> ~nu_muL* nu_mu )
0.00000000E+00 2 1000016 -16 # BR(~chi_20 -> ~nu_tau1 nu_taub)
0.00000000E+00 2 -1000016 16 # BR(~chi_20 -> ~nu_tau1* nu_tau )
#
# PDG Width
DECAY 1000025 1.91598495E+00 # neutralino3 decays
# BR NDA ID1 ID2
1.13226601E-01 2 1000022 23 # BR(~chi_30 -> ~chi_10 Z )
2.11969194E-01 2 1000023 23 # BR(~chi_30 -> ~chi_20 Z )
2.95329778E-01 2 1000024 -24 # BR(~chi_30 -> ~chi_1+ W-)
2.95329778E-01 2 -1000024 24 # BR(~chi_30 -> ~chi_1- W+)
0.00000000E+00 2 1000037 -24 # BR(~chi_30 -> ~chi_2+ W-)
0.00000000E+00 2 -1000037 24 # BR(~chi_30 -> ~chi_2- W+)
2.13076490E-02 2 1000022 25 # BR(~chi_30 -> ~chi_10 h )
0.00000000E+00 2 1000022 35 # BR(~chi_30 -> ~chi_10 H )
0.00000000E+00 2 1000022 36 # BR(~chi_30 -> ~chi_10 A )
1.24538329E-02 2 1000023 25 # BR(~chi_30 -> ~chi_20 h )
0.00000000E+00 2 1000023 35 # BR(~chi_30 -> ~chi_20 H )
0.00000000E+00 2 1000023 36 # BR(~chi_30 -> ~chi_20 A )
0.00000000E+00 2 1000024 -37 # BR(~chi_30 -> ~chi_1+ H-)
0.00000000E+00 2 -1000024 37 # BR(~chi_30 -> ~chi_1- H+)
0.00000000E+00 2 1000037 -37 # BR(~chi_30 -> ~chi_2+ H-)
0.00000000E+00 2 -1000037 37 # BR(~chi_30 -> ~chi_2- H+)
0.00000000E+00 2 1000002 -2 # BR(~chi_30 -> ~u_L ub)
0.00000000E+00 2 -1000002 2 # BR(~chi_30 -> ~u_L* u )
0.00000000E+00 2 2000002 -2 # BR(~chi_30 -> ~u_R ub)
0.00000000E+00 2 -2000002 2 # BR(~chi_30 -> ~u_R* u )
0.00000000E+00 2 1000001 -1 # BR(~chi_30 -> ~d_L db)
0.00000000E+00 2 -1000001 1 # BR(~chi_30 -> ~d_L* d )
0.00000000E+00 2 2000001 -1 # BR(~chi_30 -> ~d_R db)
0.00000000E+00 2 -2000001 1 # BR(~chi_30 -> ~d_R* d )
0.00000000E+00 2 1000004 -4 # BR(~chi_30 -> ~c_L cb)
0.00000000E+00 2 -1000004 4 # BR(~chi_30 -> ~c_L* c )
0.00000000E+00 2 2000004 -4 # BR(~chi_30 -> ~c_R cb) 0.00000000E+00 2 -2000004 4 # BR(~chi_30 -> ~c_R* c )
0.00000000E+00 2 1000003 -3 # BR(~chi_30 -> ~s_L sb)
0.00000000E+00 2 -1000003 3 # BR(~chi_30 -> ~s_L* s )
0.00000000E+00 2 2000003 -3 # BR(~chi_30 -> ~s_R sb)
0.00000000E+00 2 -2000003 3 # BR(~chi_30 -> ~s_R* s )
0.00000000E+00 2 1000006 -6 # BR(~chi_30 -> ~t_1 tb)
0.00000000E+00 2 -1000006 6 # BR(~chi_30 -> ~t_1* t )
0.00000000E+00 2 2000006 -6 # BR(~chi_30 -> ~t_2 tb)
0.00000000E+00 2 -2000006 6 # BR(~chi_30 -> ~t_2* t )
0.00000000E+00 2 1000005 -5 # BR(~chi_30 -> ~b_1 bb)
0.00000000E+00 2 -1000005 5 # BR(~chi_30 -> ~b_1* b )
0.00000000E+00 2 2000005 -5 # BR(~chi_30 -> ~b_2 bb)
0.00000000E+00 2 -2000005 5 # BR(~chi_30 -> ~b_2* b )
5.57220455E-04 2 1000011 -11 # BR(~chi_30 -> ~e_L- e+)
5.57220455E-04 2 -1000011 11 # BR(~chi_30 -> ~e_L+ e-)
1.25266782E-03 2 2000011 -11 # BR(~chi_30 -> ~e_R- e+)
1.25266782E-03 2 -2000011 11 # BR(~chi_30 -> ~e_R+ e-)
5.57220455E-04 2 1000013 -13 # BR(~chi_30 -> ~mu_L- mu+)
5.57220455E-04 2 -1000013 13 # BR(~chi_30 -> ~mu_L+ mu-)
1.25266782E-03 2 2000013 -13 # BR(~chi_30 -> ~mu_R- mu+)
1.25266782E-03 2 -2000013 13 # BR(~chi_30 -> ~mu_R+ mu-)
5.26279239E-03 2 1000015 -15 # BR(~chi_30 -> ~tau_1- tau+)
5.26279239E-03 2 -1000015 15 # BR(~chi_30 -> ~tau_1+ tau-)
6.72814564E-03 2 2000015 -15 # BR(~chi_30 -> ~tau_2- tau+)
6.72814564E-03 2 -2000015 15 # BR(~chi_30 -> ~tau_2+ tau-)
3.18920485E-03 2 1000012 -12 # BR(~chi_30 -> ~nu_eL nu_eb)
3.18920485E-03 2 -1000012 12 # BR(~chi_30 -> ~nu_eL* nu_e )
3.18920485E-03 2 1000014 -14 # BR(~chi_30 -> ~nu_muL nu_mub)
3.18920485E-03 2 -1000014 14 # BR(~chi_30 -> ~nu_muL* nu_mu )
3.20245934E-03 2 1000016 -16 # BR(~chi_30 -> ~nu_tau1 nu_taub)
3.20245934E-03 2 -1000016 16 # BR(~chi_30 -> ~nu_tau1* nu_tau )
#
# PDG Width
DECAY 1000035 2.58585079E+00 # neutralino4 decays
# BR NDA ID1 ID2
2.15369294E-02 2 1000022 23 # BR(~chi_40 -> ~chi_10 Z )
1.85499971E-02 2 1000023 23 # BR(~chi_40 -> ~chi_20 Z )
0.00000000E+00 2 1000025 23 # BR(~chi_40 -> ~chi_30 Z )
2.49541430E-01 2 1000024 -24 # BR(~chi_40 -> ~chi_1+ W-)
2.49541430E-01 2 -1000024 24 # BR(~chi_40 -> ~chi_1- W+)
0.00000000E+00 2 1000037 -24 # BR(~chi_40 -> ~chi_2+ W-)
0.00000000E+00 2 -1000037 24 # BR(~chi_40 -> ~chi_2- W+)
6.93213268E-02 2 1000022 25 # BR(~chi_40 -> ~chi_10 h )
0.00000000E+00 2 1000022 35 # BR(~chi_40 -> ~chi_10 H )
0.00000000E+00 2 1000022 36 # BR(~chi_40 -> ~chi_10 A )
1.47602336E-01 2 1000023 25 # BR(~chi_40 -> ~chi_20 h )
0.00000000E+00 2 1000023 35 # BR(~chi_40 -> ~chi_20 H )
0.00000000E+00 2 1000023 36 # BR(~chi_40 -> ~chi_20 A )
0.00000000E+00 2 1000025 25 # BR(~chi_40 -> ~chi_30 h )
0.00000000E+00 2 1000025 35 # BR(~chi_40 -> ~chi_30 H )
0.00000000E+00 2 1000025 36 # BR(~chi_40 -> ~chi_30 A )
0.00000000E+00 2 1000024 -37 # BR(~chi_40 -> ~chi_1+ H-)
0.00000000E+00 2 -1000024 37 # BR(~chi_40 -> ~chi_1- H+)
0.00000000E+00 2 1000037 -37 # BR(~chi_40 -> ~chi_2+ H-)
0.00000000E+00 2 -1000037 37 # BR(~chi_40 -> ~chi_2- H+)
0.00000000E+00 2 1000002 -2 # BR(~chi_40 -> ~u_L ub)
0.00000000E+00 2 -1000002 2 # BR(~chi_40 -> ~u_L* u )
0.00000000E+00 2 2000002 -2 # BR(~chi_40 -> ~u_R ub)
0.00000000E+00 2 -2000002 2 # BR(~chi_40 -> ~u_R* u )
0.00000000E+00 2 1000001 -1 # BR(~chi_40 -> ~d_L db)
0.00000000E+00 2 -1000001 1 # BR(~chi_40 -> ~d_L* d )
0.00000000E+00 2 2000001 -1 # BR(~chi_40 -> ~d_R db)
0.00000000E+00 2 -2000001 1 # BR(~chi_40 -> ~d_R* d )
0.00000000E+00 2 1000004 -4 # BR(~chi_40 -> ~c_L cb)
0.00000000E+00 2 -1000004 4 # BR(~chi_40 -> ~c_L* c )
0.00000000E+00 2 2000004 -4 # BR(~chi_40 -> ~c_R cb)
0.00000000E+00 2 -2000004 4 # BR(~chi_40 -> ~c_R* c )
0.00000000E+00 2 1000003 -3 # BR(~chi_40 -> ~s_L sb)
0.00000000E+00 2 -1000003 3 # BR(~chi_40 -> ~s_L* s )
0.00000000E+00 2 2000003 -3 # BR(~chi_40 -> ~s_R sb)
0.00000000E+00 2 -2000003 3 # BR(~chi_40 -> ~s_R* s )
0.00000000E+00 2 1000006 -6 # BR(~chi_40 -> ~t_1 tb)
0.00000000E+00 2 -1000006 6 # BR(~chi_40 -> ~t_1* t )
0.00000000E+00 2 2000006 -6 # BR(~chi_40 -> ~t_2 tb)
0.00000000E+00 2 -2000006 6 # BR(~chi_40 -> ~t_2* t )
0.00000000E+00 2 1000005 -5 # BR(~chi_40 -> ~b_1 bb)
0.00000000E+00 2 -1000005 5 # BR(~chi_40 -> ~b_1* b )
0.00000000E+00 2 2000005 -5 # BR(~chi_40 -> ~b_2 bb)
0.00000000E+00 2 -2000005 5 # BR(~chi_40 -> ~b_2* b )
9.64835418E-03 2 1000011 -11 # BR(~chi_40 -> ~e_L- e+)
9.64835418E-03 2 -1000011 11 # BR(~chi_40 -> ~e_L+ e-)
3.75684470E-03 2 2000011 -11 # BR(~chi_40 -> ~e_R- e+)
3.75684470E-03 2 -2000011 11 # BR(~chi_40 -> ~e_R+ e-)
9.64835418E-03 2 1000013 -13 # BR(~chi_40 -> ~mu_L- mu+)
9.64835418E-03 2 -1000013 13 # BR(~chi_40 -> ~mu_L+ mu-)
3.75684470E-03 2 2000013 -13 # BR(~chi_40 -> ~mu_R- mu+)
3.75684470E-03 2 -2000013 13 # BR(~chi_40 -> ~mu_R+ mu-)
2.68215241E-03 2 1000015 -15 # BR(~chi_40 -> ~tau_1- tau+)
2.68215241E-03 2 -1000015 15 # BR(~chi_40 -> ~tau_1+ tau-)
1.62289809E-02 2 2000015 -15 # BR(~chi_40 -> ~tau_2- tau+)
1.62289809E-02 2 -2000015 15 # BR(~chi_40 -> ~tau_2+ tau-)
2.53796547E-02 2 1000012 -12 # BR(~chi_40 -> ~nu_eL nu_eb)
2.53796547E-02 2 -1000012 12 # BR(~chi_40 -> ~nu_eL* nu_e )
2.53796547E-02 2 1000014 -14 # BR(~chi_40 -> ~nu_muL nu_mub)
2.53796547E-02 2 -1000014 14 # BR(~chi_40 -> ~nu_muL* nu_mu )
2.54724352E-02 2 1000016 -16 # BR(~chi_40 -> ~nu_tau1 nu_taub)
2.54724352E-02 2 -1000016 16 # BR(~chi_40 -> ~nu_tau1* nu_tau )
"""
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(-1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring('Tune:pp = 5',
'SUSY:all = off',
#'SUSY:gg2squarkantisquark = on',
#'SUSY:qqbar2squarkantisquark= on',
'SUSY:gg2gluinogluino = on',
'SUSY:qqbar2gluinogluino = on',
'RHadrons:allow = on',
'RHadrons:allowDecay = on',
'RHadrons:setMasses = on',
'RHadrons:probGluinoball = 0.1',
'1000021:tau0 = %.1f' % CTAU),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
),
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"ksung@cern.ch"
] |
ksung@cern.ch
|
ee3747640d2d81beb67e38eb7bf9195041503fd6
|
51bdac517ec342a7a38a67e2b3c521f8bd53c5f2
|
/numba/tests/pointers/test_null.py
|
fa46e26b67aa41253b5f4b2b6e874e710d7a3aaf
|
[
"BSD-2-Clause"
] |
permissive
|
cu6yu4/numba
|
66bc7ee751fdfaabab92b6f571dbff00cb4d7652
|
f64aced5a7c94a434fd2d8c678d93ff8ac3ae1fb
|
refs/heads/master
| 2020-12-25T13:45:44.629782
| 2013-01-25T20:28:12
| 2013-01-25T20:28:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
import ctypes
import numba
from numba import *
#intp = ctypes.POINTER(ctypes.c_int)
#voidp = ctypes.c_void_p
intp = int_.pointer()
voidp = void.pointer()
@autojit
def test_compare_null():
"""
>>> test_compare_null()
True
"""
return intp(Py_uintptr_t(0)) == NULL
@autojit
def test_compare_null_attribute():
"""
>>> test_compare_null_attribute()
True
"""
return voidp(Py_uintptr_t(0)) == numba.NULL
if __name__ == '__main__':
# test_compare_null()
# test_compare_null_attribute()
numba.testmod()
|
[
"markflorisson88@gmail.com"
] |
markflorisson88@gmail.com
|
3dd23060467fe4b63a7c25228f5126fa8d571160
|
cc3c46bf2c6bceeb3fbfea28bec7786e7f691df3
|
/FORRIT/python_verkefni/byrjendanamskeid/verkefni_day_1/verkefni_07aftur.py
|
739b073f40a0ab079206964f46103cf5fc6aebec
|
[] |
no_license
|
magnuskonrad98/max_int
|
8d7608978eb49c759377f4826bcdfcd59e7f1551
|
ba3a8bf38e90674546ca6f5d118ab37babfe0178
|
refs/heads/master
| 2020-07-20T07:42:04.678497
| 2019-09-05T15:48:04
| 2019-09-05T15:48:04
| 206,600,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 103
|
py
|
price = int(input("Enter the product price: "))
price_with_tax = price * 1.255
print(price_with_tax)
|
[
"magnuskonrad@gmail.com"
] |
magnuskonrad@gmail.com
|
ae0a2f0b5b3f0590e6e1b9e931af7d3b64441bfe
|
635049987366fa45d03e8839f482b76414e59ee0
|
/web/migrations/0005_auto_20180407_2211.py
|
cfbbb385aaf89550c9a5db46c837ecbf8d2303df
|
[] |
no_license
|
313799043/-
|
5ecdef19feb47d36b2f73660012184a88ede830c
|
353feee2ccadd8742cbdf061084224e5c41742b5
|
refs/heads/master
| 2020-03-08T11:25:23.825488
| 2018-04-08T07:13:30
| 2018-04-08T07:13:30
| 128,097,125
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
# Generated by Django 2.0.3 on 2018-04-07 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0004_auto_20180407_2156'),
]
operations = [
migrations.AlterField(
model_name='order',
name='status',
field=models.IntegerField(choices=[(1, '公户'), (2, '私户')], default=1),
),
]
|
[
"m313799043@163.com"
] |
m313799043@163.com
|
3ad172fe5ecd7b6deb6d78171e29b47f0aac4bf3
|
d013f83ac25db3cbdb87e37bda7249feaa88da1c
|
/code/models/user.py
|
946efcc86698044ec1d9633620dc4c88e91c2e07
|
[] |
no_license
|
Jitesh-Khuttan/BookMyTicket
|
97e5685bd9f80c8edba6aab87e7f3e023d0c1d32
|
a6608b7d43147ba7a22e498bfb4b3385b6f4b18c
|
refs/heads/main
| 2023-08-16T02:46:43.089198
| 2021-10-19T07:07:30
| 2021-10-19T07:07:30
| 415,529,560
| 0
| 0
| null | 2021-10-19T07:01:18
| 2021-10-10T08:37:37
|
Python
|
UTF-8
|
Python
| false
| false
| 888
|
py
|
from code.db.alchemy_db import db
class UserModel(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(60))
password = db.Column(db.String(100))
def to_json(self):
return {'id': self.id, 'username': self.username}
def register(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username=username).first()
@classmethod
def find_by_id(cls, userid):
return cls.query.filter_by(id=userid).first()
@classmethod
def find_all(cls):
return cls.query.all()
if __name__ == "__main__":
user = UserModel(username="jkhuttan", password="asdf")
print(user.to_json())
|
[
"jiteshkhuttan15@gmail.com"
] |
jiteshkhuttan15@gmail.com
|
1a6f7da3348496cfdcfd72dc5c1d6c662ac408c9
|
4d7e6eaf9c2a4749edd025d5b204289a01e469a2
|
/FlaskTest/day_01/carts/__init__.py
|
d9788c7c7678ef467346e24f97c22e07a933c098
|
[
"MIT"
] |
permissive
|
tjhlp/FlaskProject
|
f5db4a020a5523516624117583aa70183dc0d520
|
2213060ec3ee2720d79a7a3f71fbcaf23a85d64d
|
refs/heads/master
| 2020-06-24T09:33:18.592170
| 2019-08-02T07:57:02
| 2019-08-02T07:57:02
| 198,929,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
from flask import Blueprint
cart_bp = Blueprint('cart', __name__, url_prefix='/cart')
from .views import *
|
[
"374586186@qq.com"
] |
374586186@qq.com
|
1b340ebd2248f63c39e2921394126e7da83f5247
|
8a46f370477ea9fabd36249a4f6d70226917c24b
|
/blogdown/plugin.py
|
8ad745eddeda8d95b2c65f40734315b3b18705c3
|
[
"BSD-3-Clause"
] |
permissive
|
blogdown/blogdown
|
af551991013d03e3b7b033cf45687f952eb41def
|
4a463d341a1fe7547a3de33f03d356e74a89569e
|
refs/heads/master
| 2022-06-09T11:53:03.728491
| 2022-05-17T19:26:54
| 2022-05-17T19:28:16
| 5,064,814
| 5
| 4
| null | 2016-03-14T02:44:58
| 2012-07-16T08:30:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,147
|
py
|
# -*- coding: utf-8 -*-
"""
blogdown.plugin
~~~~~~~~~~~~~~~
Utilities for a simple plugin system.
:copyright: (c) 2015 by Thomas Gläßle
:license: BSD, see LICENSE for more details.
"""
import os
from importlib import import_module
from pkg_resources import iter_entry_points
from runpy import run_path
__all__ = [
"EntryPointLoader",
"PathLoader",
"PackageLoader",
"ChainLoader",
]
class EntryPointLoader:
"""Load plugins from specified entrypoint group."""
def __init__(self, ep_group):
self.ep_group = ep_group
def __call__(self, name):
for ep in iter_entry_points(self.ep_group, name):
yield ep.load()
class PathLoader:
"""Load plugins from specified folder."""
def __init__(self, search_path):
self.search_path = os.path.abspath(search_path)
def __call__(self, name):
module_path = os.path.join(self.search_path, name + ".py")
if not os.path.isfile(module_path):
return
module = run_path(module_path)
try:
yield module["setup"]
except KeyError:
raise AttributeError(
"Module at {0!r} can't be used as a plugin, "
"since it has no 'setup' function.".format(module_path)
)
class PackageLoader:
"""Load plugins from specified package."""
def __init__(self, package_name):
self.package_name = package_name
def __call__(self, module_name):
try:
module = import_module(self.package_name + "." + module_name)
except ImportError:
return
try:
yield module.setup
except AttributeError:
raise AttributeError(
"{0!r} can't be used as a plugin, "
"since it has no 'setup' function.".format(module)
)
class ChainLoader:
"""Load plugins from all of the sub-loaders."""
def __init__(self, loaders):
self.loaders = loaders
def __call__(self, name):
for loader in self.loaders:
for plugin in loader(name):
yield plugin
|
[
"t_glaessle@gmx.de"
] |
t_glaessle@gmx.de
|
c2b459c2282096b0821f5cafcca9b1d79861dd95
|
9619daf132259c31b31c9e23a15baa675ebc50c3
|
/memphis.users/memphis/users/registration.py
|
42d0886c2b83d4155d69ef9eca86b59d3b64b673
|
[] |
no_license
|
fafhrd91/memphis-dev
|
ade93c427c1efc374e0e1266382faed2f8e7cd89
|
c82aac1ad3a180ff93370b429498dbb1c2e655b8
|
refs/heads/master
| 2016-09-05T19:32:35.109441
| 2011-08-22T06:30:43
| 2011-08-22T06:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
from zope import interface
from memphis import controlpanel
from interfaces import _, ISiteRegistration
class SiteRegistration(object):
interface.implements(ISiteRegistration)
controlpanel.registerConfiglet(
'principals.registration', ISiteRegistration, SiteRegistration,
_("Site registration"), _("Site registration configuration."))
|
[
"fafhrd91@gmail.com"
] |
fafhrd91@gmail.com
|
550323588bb7c91d6f193aa3a636c51d6a3b730e
|
07f7e1296e528e83d570ee7f5c75ff83e331d949
|
/cufacesearch/cufacesearch/api/api.py
|
0a7465d9ef4781c7538c68ae161aa902f996b4e7
|
[
"Apache-2.0"
] |
permissive
|
wuqixiaobai/ColumbiaImageSearch
|
e0ab1ed8ab9724b70838085a37c3cd06638e93b2
|
a4c4816174c522c844b08feb1c9ddcad5ca2f6db
|
refs/heads/master
| 2020-03-08T10:38:13.669538
| 2018-03-22T19:56:34
| 2018-03-22T19:56:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,861
|
py
|
import sys
import time
import json
from datetime import datetime
from flask import Markup, flash, request, render_template, make_response
from flask_restful import Resource
from ..imgio.imgio import ImageMIMETypes, get_SHA1_img_type_from_B64, get_SHA1_img_info_from_buffer, buffer_to_B64
from ..detector.utils import build_bbox_str_list
from socket import *
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
global_searcher = None
global_start_time = None
input_type = "image"
class APIResponder(Resource):
def __init__(self):
self.searcher = global_searcher
self.start_time = global_start_time
self.input_type = input_type
# This could be loaded from config?
self.default_no_blur = True
self.default_max_height = 120
# how to blur canvas images but keep the face clean?
self.valid_options = ["near_dup", "near_dup_th", "no_blur", "detect_only", "max_height", "max_returned"]
def get(self, mode):
query = request.args.get('data')
#query = unicode(request.args.get('data'), "utf8")
options = request.args.get('options')
if query:
print "[get] received parameters: {}".format(request.args.keys())
print "[get] received data: "+query.encode('ascii','ignore')
print "[get] received options: {}".format(options)
return self.process_query(mode, query, options)
else:
return self.process_mode(mode)
def put(self, mode):
return self.put_post(mode)
def post(self, mode):
return self.put_post(mode)
def put_post(self, mode):
print("[put/post] received parameters: {}".format(request.form.keys()))
print("[put/post] received request: {}".format(request))
query = request.form['data']
try:
options = request.form['options']
except:
options = None
print("[put/post] received data of length: {}".format(len(query)))
print("[put/post] received options: {}".format(options))
if not query:
return {'error': 'no data received'}
else:
return self.process_query(mode, query, options)
def process_mode(self, mode):
if mode == "status":
return self.status()
elif mode == "refresh":
return self.refresh()
else:
return {'error': 'unknown_mode: '+str(mode)+'. Did you forget to give \'data\' parameter?'}
def process_query(self, mode, query, options=None):
start = time.time()
if mode == "byURL":
resp = self.search_byURL(query, options)
elif mode == "bySHA1":
resp = self.search_bySHA1(query, options)
elif mode == "byPATH":
resp = self.search_byPATH(query, options)
elif mode == "byB64":
resp = self.search_byB64(query, options)
elif mode == "view_image_sha1":
return self.view_image_sha1(query, options)
elif mode == "view_similar_byURL":
query_reponse = self.search_byURL(query, options)
return self.view_similar_query_response('URL', query, query_reponse, options)
elif mode == "view_similar_byB64":
query_reponse = self.search_byB64(query, options)
return self.view_similar_query_response('B64', query, query_reponse, options)
elif mode == "view_similar_byPATH":
query_reponse = self.search_byPATH(query, options)
return self.view_similar_query_response('PATH', query, query_reponse, options)
elif mode == "view_similar_bySHA1":
query_reponse = self.search_bySHA1(query, options)
return self.view_similar_query_response('SHA1', query, query_reponse, options)
# elif mode == "byURL_nocache":
# resp = self.search_byURL_nocache(query, options)
# elif mode == "bySHA1_nocache":
# resp = self.search_bySHA1_nocache(query, options)
# elif mode == "byB64_nocache":
# resp = self.search_byB64_nocache(query, options)
else:
return {'error': 'unknown_mode: '+str(mode)}
resp['Timing'] = time.time()-start
return resp
def get_options_dict(self, options):
errors = []
options_dict = dict()
if options:
try:
options_dict = json.loads(options)
except Exception as inst:
err_msg = "[get_options: error] Could not load options from: {}. {}".format(options, inst)
print(err_msg)
errors.append(err_msg)
for k in options_dict:
if k not in self.valid_options:
err_msg = "[get_options: error] Unknown option {}".format(k)
print(err_msg)
errors.append(err_msg)
return options_dict, errors
def append_errors(self, outp, errors=[]):
if errors:
e_d = dict()
if 'errors' in outp:
e_d = outp['errors']
for i,e in enumerate(errors):
e_d['error_{}'.format(i)] = e
outp['errors'] = e_d
return outp
def search_byURL(self, query, options=None):
query_urls = self.get_clean_urls_from_query(query)
options_dict, errors = self.get_options_dict(options)
#outp = self.searcher.search_image_list(query_urls, options_dict)
outp = self.searcher.search_imageURL_list(query_urls, options_dict)
outp_we = self.append_errors(outp, errors)
sys.stdout.flush()
return outp_we
def search_byPATH(self, query, options=None):
query_paths = query.split(',')
options_dict, errors = self.get_options_dict(options)
outp = self.searcher.search_image_path_list(query_paths, options_dict)
outp_we = self.append_errors(outp, errors)
sys.stdout.flush()
return outp_we
def search_bySHA1(self, query, options=None):
query_sha1s = query.split(',')
options_dict, errors = self.get_options_dict(options)
# get the image URLs/paths from HBase and search
# TODO: should we actually try to get features?
rows_imgs = self.searcher.indexer.get_columns_from_sha1_rows(query_sha1s, columns=[self.searcher.img_column])
# TODO: what shoudl we do if we get less rows_imgs than query_sha1s?
query_imgs = [row[1][self.searcher.img_column] for row in rows_imgs]
if self.searcher.file_input:
outp = self.searcher.search_image_path_list(query_imgs, options_dict)
else:
outp = self.searcher.search_imageURL_list(query_imgs, options_dict)
outp_we = self.append_errors(outp, errors)
sys.stdout.flush()
return outp_we
def search_byB64(self, query, options=None):
query_b64s = [str(x) for x in query.split(',') if not x.startswith('data:')]
options_dict, errors = self.get_options_dict(options)
outp = self.searcher.search_imageB64_list(query_b64s, options_dict)
outp_we = self.append_errors(outp, errors)
sys.stdout.flush()
return outp_we
def refresh(self):
# Force check if new images are available in HBase
# Could be called if data needs to be as up-to-date as it can be...
if self.searcher:
self.searcher.load_codes(full_refresh=True)
return {'refresh': 'just run a full refresh'}
def status(self):
# prepare output
status_dict = {'status': 'OK'}
status_dict['API_start_time'] = self.start_time.isoformat(' ')
status_dict['API_uptime'] = str(datetime.now()-self.start_time)
# Try to refresh on status call but at most every 4 hours
if self.searcher.last_refresh:
last_refresh_time = self.searcher.last_refresh
else:
last_refresh_time = self.searcher.indexer.last_refresh
diff_time = datetime.now()-last_refresh_time
if self.searcher and diff_time.total_seconds() > 3600*4:
self.searcher.load_codes()
last_refresh_time = self.searcher.last_refresh
status_dict['last_refresh_time'] = last_refresh_time.isoformat(' ')
status_dict['nb_indexed'] = str(self.searcher.searcher.get_nb_indexed())
return status_dict
#TODO: Deal with muliple query images with an array parameter request.form.getlist(key)
@staticmethod
def get_clean_urls_from_query(query):
""" To deal with comma in URLs.
"""
# tmp_query_urls = ['http'+str(x) for x in query.split('http') if x]
# fix issue with unicode in URL
from ..common.dl import fixurl
tmp_query_urls = [fixurl('http' + x) for x in query.split('http') if x]
query_urls = []
for x in tmp_query_urls:
if x[-1] == ',':
query_urls.append(x[:-1])
else:
query_urls.append(x)
print "[get_clean_urls_from_query: info] {}".format(query_urls)
return query_urls
def get_image_str(self, row):
return "<img src=\"{}\" title=\"{}\" class=\"img_blur\">".format(row[1]["info:s3_url"],row[0])
def view_image_sha1(self, query, options=None):
# Not really used anymore...
query_sha1s = [str(x) for x in query.split(',')]
rows = self.searcher.indexer.get_columns_from_sha1_rows(query_sha1s, ["info:s3_url"])
images_str = ""
# TODO: change this to actually just produce a list of images to fill a new template
for row in rows:
images_str += self.get_image_str(row)
images = Markup(images_str)
flash(images)
headers = {'Content-Type': 'text/html'}
return make_response(render_template('view_images.html'),200,headers)
def view_similar_query_response(self, query_type, query, query_response, options=None):
if query_type == 'B64':
# get :
# - sha1 to be able to map to query response
# - image type to make sure the image is displayed properly
# - embedded format for each b64 query
# TODO: use array parameter
query_list = query.split(',')
query_b64_infos = [get_SHA1_img_type_from_B64(q) for q in query_list if not q.startswith('data:')]
query_urls_map = dict()
for img_id, img_info in enumerate(query_b64_infos):
query_urls_map[img_info[0]] = "data:"+ImageMIMETypes[img_info[1]]+";base64,"+str(query_list[img_id])
elif query_type == "PATH" or (query_type == "SHA1" and self.searcher.file_input):
# Encode query in B64
query_infos = []
query_list = query.split(',')
# Get images paths from sha1s
if query_type == 'SHA1' and self.searcher.file_input:
rows_imgs = self.searcher.indexer.get_columns_from_sha1_rows(query_list, columns=[self.searcher.img_column])
query_list = [row[1][self.searcher.img_column] for row in rows_imgs]
query_list_B64 = []
for q in query_list:
with open(q,'rb') as img_buffer:
query_infos.append(get_SHA1_img_info_from_buffer(img_buffer))
query_list_B64.append(buffer_to_B64(img_buffer))
query_urls_map = dict()
for img_id, img_info in enumerate(query_infos):
query_urls_map[img_info[0]] = "data:" + ImageMIMETypes[img_info[1]] + ";base64," + str(query_list_B64[img_id])
elif query_type == "URL" or (query_type == "SHA1" and not self.searcher.file_input):
# URLs should already be in query response
pass
else:
print "[view_similar_query_response: error] Unknown query_type: {}".format(query_type)
return None
# Get errors
options_dict, errors_options = self.get_options_dict(options)
# Parse similar faces response
all_sim_faces = query_response[self.searcher.do.map['all_similar_'+self.input_type+'s']]
search_results = []
print "[view_similar_query_response: log] len(sim_images): {}".format(len(all_sim_faces))
for i in range(len(all_sim_faces)):
# Parse query face, and build face tuple (sha1, url/b64 img, face bounding box)
query_face = all_sim_faces[i]
#print "query_face [{}]: {}".format(query_face.keys(), query_face)
sys.stdout.flush()
query_sha1 = query_face[self.searcher.do.map['query_sha1']]
if query_type == "B64" or query_type == "PATH" or (query_type == "SHA1" and self.searcher.file_input):
query_face_img = query_urls_map[query_sha1]
else:
query_face_img = query_face[self.searcher.do.map['query_url']].decode("utf8")
#query_face_img = query_face[self.searcher.do.map['query_url']]
if self.searcher.do.map['query_'+self.input_type] in query_face:
query_face_bbox = query_face[self.searcher.do.map['query_'+self.input_type]]
query_face_bbox_compstr = build_bbox_str_list(query_face_bbox)
else:
query_face_bbox_compstr = []
img_size = None
if self.searcher.do.map['img_info'] in query_face:
img_size = query_face[self.searcher.do.map['img_info']][1:]
out_query_face = (query_sha1, query_face_img, query_face_bbox_compstr, img_size)
# Parse similar faces
similar_faces = query_face[self.searcher.do.map['similar_'+self.input_type+'s']]
#print similar_faces[self.searcher.do.map['number_faces']]
out_similar_faces = []
for j in range(similar_faces[self.searcher.do.map['number_'+self.input_type+'s']]):
# build face tuple (sha1, url/b64 img, face bounding box, distance) for one similar face
osface_sha1 = similar_faces[self.searcher.do.map['image_sha1s']][j]
#if query_type == "PATH":
if self.searcher.file_input:
with open(similar_faces[self.searcher.do.map['cached_image_urls']][j], 'rb') as img_buffer:
img_info = get_SHA1_img_info_from_buffer(img_buffer)
img_B64 = buffer_to_B64(img_buffer)
osface_url = "data:" + ImageMIMETypes[img_info[1]] + ";base64," + str(img_B64)
else:
osface_url = similar_faces[self.searcher.do.map['cached_image_urls']][j]
osface_bbox_compstr = None
if self.input_type != "image":
osface_bbox = similar_faces[self.searcher.do.map[self.input_type+'s']][j]
osface_bbox_compstr = build_bbox_str_list(osface_bbox)
osface_img_size = None
if self.searcher.do.map['img_info'] in similar_faces:
osface_img_size = similar_faces[self.searcher.do.map['img_info']][j][1:]
osface_dist = similar_faces[self.searcher.do.map['distances']][j]
out_similar_faces.append((osface_sha1, osface_url, osface_bbox_compstr, osface_dist, osface_img_size))
# build output
search_results.append((out_query_face, [out_similar_faces]))
# Prepare settings
settings = dict()
settings["no_blur"] = self.default_no_blur
settings["max_height"] = self.default_max_height
if "no_blur" in options_dict:
settings["no_blur"] = options_dict["no_blur"]
if "max_height" in options_dict:
settings["max_height"] = options_dict["max_height"]
headers = {'Content-Type': 'text/html'}
#print search_results
sys.stdout.flush()
if self.input_type != "image":
return make_response(render_template('view_similar_faces_wbbox.html',
settings=settings,
search_results=search_results),
200, headers)
else:
return make_response(render_template('view_similar_images.html',
settings=settings,
search_results=search_results),
200, headers)
|
[
"svebor.karaman@gmail.com"
] |
svebor.karaman@gmail.com
|
b670f36db9b9c4d1136a27548f323e31a2c032fb
|
864720a85259b6c38614d715b901f0a1f166ff2b
|
/MainProject/newfor.py
|
03da98b279241608912485393a240a8bf4ab2606
|
[] |
no_license
|
rhlsin/Weather-Forecast-Cum-Schedular
|
c0218335b5b58a14310dd89726bd9228f7c1bb5a
|
971d51e783757a00b2c735e69fb291acef63f26a
|
refs/heads/master
| 2021-09-18T05:39:06.565853
| 2018-07-10T10:03:05
| 2018-07-10T10:03:05
| 104,628,841
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,160
|
py
|
import datetime
import forecastio
from forecastiopy import *
from geopy.geocoders import Nominatim
Lisbon=[]
api_key = 'a94a682750fdb4970407428c7795efaa'
def namcty(name):
geolocator = Nominatim()
location = geolocator.geocode(name)
Lisbon.append(location.latitude)
Lisbon.append(location.longitude)
def curtem():
fio = ForecastIO.ForecastIO(api_key, latitude=Lisbon[0], longitude=Lisbon[1])
current = FIOCurrently.FIOCurrently(fio)
return current.temperature
def curpre():
fio = ForecastIO.ForecastIO(api_key, latitude=Lisbon[0], longitude=Lisbon[1])
current = FIOCurrently.FIOCurrently(fio)
return current.pressure
def curhumi():
fio = ForecastIO.ForecastIO(api_key, latitude=Lisbon[0], longitude=Lisbon[1])
current = FIOCurrently.FIOCurrently(fio)
return current.humidity
def curusm():
fio = ForecastIO.ForecastIO(api_key, latitude=Lisbon[0], longitude=Lisbon[1])
current = FIOCurrently.FIOCurrently(fio)
return current.summary
def curpri():
fio = ForecastIO.ForecastIO(api_key, latitude=Lisbon[0], longitude=Lisbon[1])
current = FIOCurrently.FIOCurrently(fio)
return current.precipProbability
def houtem(x,y,z):
tem=[]
time = datetime.datetime(x, y, z, 0, 0, 0) #x=year,,y=mon,,z=date
forecast = forecastio.load_forecast(api_key, Lisbon[0], Lisbon[1], time=time)
by_hour = forecast.hourly()
byHour = forecast.hourly()
for hourlyData in byHour.data:
tem.append(hourlyData.temperature)
return tem
def houtim(x,y,z):
tem=[]
time = datetime.datetime(x, y, z, 0, 0, 0)
forecast = forecastio.load_forecast(api_key, Lisbon[0], Lisbon[1], time=time)
by_hour = forecast.hourly()
byHour = forecast.hourly()
for hourlyData in byHour.data:
tem.append(hourlyData.time)
return tem
def houpre(x,y,z):
tem=[]
time = datetime.datetime(x, y, z, 0, 0, 0)
forecast = forecastio.load_forecast(api_key, Lisbon[0], Lisbon[1], time=time)
by_hour = forecast.hourly()
byHour = forecast.hourly()
for hourlyData in byHour.data:
tem.append(hourlyData.pressure)
return tem
def houhumi(x,y,z):
tem=[]
time = datetime.datetime(x, y, z, 0, 0, 0)
forecast = forecastio.load_forecast(api_key, Lisbon[0], Lisbon[1], time=time)
by_hour = forecast.hourly()
byHour = forecast.hourly()
for hourlyData in byHour.data:
tem.append(hourlyData.humidity)
return tem
def housum(x,y,z):
tem=[]
time = datetime.datetime(x, y, z, 0, 0, 0)
forecast = forecastio.load_forecast(api_key, Lisbon[0], Lisbon[1], time=time)
by_hour = forecast.hourly()
byHour = forecast.hourly()
for hourlyData in byHour.data:
tem.append(hourlyData.summary)
return tem
def houpri(x,y,z):
tem=[]
time = datetime.datetime(x, y, z, 0, 0, 0)
forecast = forecastio.load_forecast(api_key, Lisbon[0], Lisbon[1], time=time)
by_hour = forecast.hourly()
byHour = forecast.hourly()
for hourlyData in byHour.data:
tem.append(hourlyData.precipProbability)
return tem
#namcty('bhimtal')
#x=houpre()
#print(x)
|
[
"rhl_92@yahoo.com"
] |
rhl_92@yahoo.com
|
814f23902fd16e2421040797070c339848a66750
|
33bddacfd54d9d85989d96298ebdd70af3a190f8
|
/user_profile/migrations/0002_userprofile_gender.py
|
2109b8db1c7e9ca3162ef92167a89401e0630d63
|
[] |
no_license
|
Jagdish010/Django-API
|
85a64342e1de500a39b9c91247fe2c212e4d9082
|
90615954e5d77a0022b5ad185461bbba18a800b9
|
refs/heads/master
| 2023-02-28T19:07:06.431872
| 2021-01-29T09:33:02
| 2021-01-29T09:33:02
| 334,087,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
# Generated by Django 2.0.7 on 2021-01-28 19:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='gender',
field=models.CharField(choices=[('Male', 'Male'), ('Female', 'Female')], max_length=100, null=True),
),
]
|
[
"jagdishn@avu.net.in"
] |
jagdishn@avu.net.in
|
82d49a9ea24a6ef56776243ff4a21c12b816e9f6
|
eab72229ae04d1160704cbf90a08a582802a739c
|
/put_zero_den.py
|
34666a2ec393a250b458da9b91999832b8c281fe
|
[
"MIT"
] |
permissive
|
megatazm/Crowd-Counting
|
444d39b0e3d6e98995f53badf4c073829038b6b7
|
647a055baccee2c3b6b780f38930e2ffd14d1664
|
refs/heads/master
| 2022-04-01T04:49:16.409675
| 2020-01-31T21:24:02
| 2020-01-31T21:24:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
import cv2
import numpy as np
import os
import glob
from paint_den import paint
import params
path = params.input
dirs = [f for f in glob.glob(path + '/*/')]
images = []
for x in dirs:
images.append([f for f in glob.glob(x + '/*_pos.png')])
images.sort()
images = [item for sublist in images for item in sublist]
for img_path in images:
#paint(img_path, 36, 1785, 393, 75, 567, 60, 951, 1776)
paint(img_path, 0, 3234, 737, 198, 1034, 220, 1617, 3228)
|
[
"gpsunicamp016@gmail.com"
] |
gpsunicamp016@gmail.com
|
4ec352b7fec601c5527b86e77fd11e0ead795e56
|
a37219a018aab5a2c0d1934056b72aee8d32b9b8
|
/0409/0409/ntust/Scripts/django-admin.py
|
9c5695d0d64deb3f67f5536dbf3876dd66d52d87
|
[] |
no_license
|
Leesinbaka/JerryHW
|
6a573defac546740e4358837f9c390ddef395edf
|
6b7de91d69ae51fdc90601843ed1f46eb2ba2c3f
|
refs/heads/master
| 2022-12-25T14:05:30.745364
| 2018-06-09T10:17:02
| 2018-06-09T10:17:02
| 124,868,685
| 0
| 1
| null | 2022-12-09T08:24:52
| 2018-03-12T09:58:28
|
Python
|
UTF-8
|
Python
| false
| false
| 159
|
py
|
#!c:\users\him\desktop\0409\ntust\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"kenneth72355700@gmail.com"
] |
kenneth72355700@gmail.com
|
82c2659eb4d6f99896c3ba268b2a4a1384b640aa
|
7375c15595cd1010d87c1616c598466a413b5d41
|
/HW3/NeuralNet.py
|
a596fea80d7eb168efa899f0751b9804e6875d93
|
[] |
no_license
|
gdr0ge/Fa19-ML
|
dfd12e8b550cdb09bce15adbfa81288e7be5e362
|
c65a76f59d05db46da342c57cd6a6ecdbd053c08
|
refs/heads/main
| 2023-03-31T15:58:28.409003
| 2021-04-02T02:44:03
| 2021-04-02T02:44:03
| 353,881,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,958
|
py
|
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
n_epochs = 6
lr = 0.01
batch_size = 32
log_interval = 100
class Net(nn.Module):
def __init__(self):
super(Net, self,).__init__()
# self.bias = nn.Parameter(torch.ones((28*28),1))
self.fc1 = nn.Linear(1 * 28 * 28, 128,bias=True)
self.fc2 = nn.Linear(128, 10,bias=False)
def forward(self,x):
# Flatten image
# x = x + self.bias
x = F.relu(self.fc1(x))
x = F.softmax(self.fc2(x), dim=0)
return x
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./data', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('./data', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
net = Net()
optimizer = optim.SGD(net.parameters(), lr=lr)
loss_fn = nn.CrossEntropyLoss()
train_losses = []
train_accuracies = []
test_losses = []
def train(epoch):
net.train()
correct = 0
train_loss = 0
t_losses = []
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = net(data.reshape(batch_size,28*28))
loss = loss_fn(output, target)
train_loss += loss.item()
t_losses.append(loss.item())
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# torch.save(net.state_dict(), '/results/model.pth')
# torch.save(optimizer.state_dict(), '/results/optimizer.pth')
train_loss /= len(train_loader.dataset)
train_losses.append(t_losses)
train_accuracies.append(100. * correct / len(train_loader.dataset))
print('\nTrain set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
train_loss, correct, len(train_loader.dataset),
100. * correct / len(train_loader.dataset)))
def test():
net.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = net(data.reshape(batch_size,28*28))
test_loss += loss_fn(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
for epoch in range(1, n_epochs + 1):
train(epoch)
plt.figure()
for i,t in enumerate(train_losses):
plt.plot(range(len(t)),t,label="epoch-{}".format(i))
plt.xlabel("Iteration")
plt.ylabel("Loss")
plt.title("Loss functions for each epoch")
plt.legend()
plt.show()
plt.figure()
plt.plot(range(len(train_accuracies)),train_accuracies)
plt.xlabel("Epoch #")
plt.ylabel("Accuracy (%)")
plt.title("Accuracy Over Training Epochs")
plt.show()
|
[
"grahamdroge@gmail.com"
] |
grahamdroge@gmail.com
|
2cffed30653acf460e4754cf7749eaf6a5e2e45b
|
cc0cc5268223f9c80339d1bbc2e499edc828e904
|
/wallets/thrifty_wallets/manage.py
|
e4a50db512daef9656866ea7fe7ac714993b463d
|
[] |
no_license
|
deone/thrifty
|
0ba2b0445e7e9fd4cc378350de158dc6c89838b4
|
a0ee4af9447b2765f4139deb87a3c1464e7c7751
|
refs/heads/master
| 2021-01-10T12:00:00.618968
| 2015-11-01T23:36:36
| 2015-11-01T23:36:36
| 45,340,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "thrifty_wallets.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"alwaysdeone@gmail.com"
] |
alwaysdeone@gmail.com
|
9e640d5e9035eb3c390422021a2cbf28beb05318
|
cc2411f7ab2f743b619294e332a07aceec82ea41
|
/faces-train.py
|
9aa8b4c3695c8ad43c161b5693e944924c26f737
|
[
"MIT"
] |
permissive
|
GoPerry/opencv-face-recognition
|
23c7a37e99d0769d0a90cacbc8ebf3763b7d09aa
|
53b008dff52582be909c8dd19c33d11aa79d10b7
|
refs/heads/main
| 2022-12-27T17:43:40.591684
| 2020-10-13T14:38:41
| 2020-10-13T15:27:45
| 303,733,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
import cv2
import os
import numpy as np
from PIL import Image
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(BASE_DIR, "images")
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
current_id = 0
label_ids = {}
y_labels = []
x_train = []
for root, dirs, files in os.walk(image_dir):
for file in files:
if file.endswith("png") or file.endswith("jpg"):
path = os.path.join(root, file)
label = os.path.basename(root).replace(" ", "-").lower()
#print(label, path)
if not label in label_ids:
label_ids[label] = current_id
current_id += 1
id_ = label_ids[label]
#print(label_ids)
#y_labels.append(label) # some number
#x_train.append(path) # verify this image, turn into a NUMPY arrray, GRAY
pil_image = Image.open(path).convert("L") # grayscale
size = (550, 550)
final_image = pil_image.resize(size, Image.ANTIALIAS)
image_array = np.array(final_image, "uint8")
#print(image_array)
faces = face_cascade.detectMultiScale(image_array, scaleFactor=1.5, minNeighbors=5)
for (x,y,w,h) in faces:
roi = image_array[y:y+h, x:x+w]
x_train.append(roi)
y_labels.append(id_)
print(y_labels)
print(label_ids)
#print(x_train)
with open("pickles/face-labels.pickle", 'wb') as f:
pickle.dump(label_ids, f)
recognizer.train(x_train, np.array(y_labels))
recognizer.save("recognizers/face-trainner.yml")
|
[
"perry_yuan@mail.com"
] |
perry_yuan@mail.com
|
16e0ae410ab9c5056f793ef00a29456e3926cbfc
|
3b9bf497cd29cea9c24462e0411fa8adbfa6ba60
|
/leetcode/Problems/116--Populating-Next-Right-Pointers-in-Each-Node-Medium.py
|
2e8b81530cf65226d4d6de3352b0c75892188c4a
|
[] |
no_license
|
niteesh2268/coding-prepation
|
918823cb7f4965bec096ec476c639a06a9dd9692
|
19be0766f6b9c298fb32754f66416f79567843c1
|
refs/heads/master
| 2023-01-02T05:30:59.662890
| 2020-10-17T13:12:34
| 2020-10-17T13:12:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
"""
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def connect(self, root: 'Node') -> 'Node':
if not root:
return None
def assign(r1, r2):
if not r1:
return
r1.next = r2
assign(r1.left, r1.right)
if r2:
assign(r1.right, r2.left)
assign(r2.left, r2.right)
assign(r2.right, None)
assign(root.left, root.right)
return root
|
[
"akulajayaprakash@gmail.com"
] |
akulajayaprakash@gmail.com
|
5ef53c9e1394c1d2f92962a9f34217c5c9134413
|
11841e8fb1e44c69ae7e50c0b85b324c4d90abda
|
/chutu/exmapxx.py
|
5a8c550eb45031c938a4fb4f4a1d660bcf2fed3d
|
[] |
no_license
|
chenlong2019/python
|
1d7bf6fb60229221c79538234ad2f1a91bb03c50
|
fc9e239754c5715a67cb6d743109800b64d74dc8
|
refs/heads/master
| 2020-12-08T11:11:49.951752
| 2020-01-10T04:58:29
| 2020-01-10T04:59:50
| 232,968,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,386
|
py
|
# coding=utf-8
import arcpy
import os,glob,time
res=200
# 模板mxd文档路径,生成mxd文档路径
def createMxd(modelpath,mxdpath,symbologyLayer,jpgpath,string,lyrfile):
mxd=arcpy.mapping.MapDocument(modelpath)
if(os.path.exists(mxdpath)):
mxd=arcpy.mapping.MapDocument(mxdpath)
print("location as "+mxdpath)
arcpy.AddWarning("该文件已经存在")
else:
mxd.saveACopy(mxdpath)
print(mxdpath+" saveAs succefssful")
if(os.path.exists(mxdpath)):
mxd=arcpy.mapping.MapDocument(mxdpath)
print("location in "+mxdpath)
# 查找数据框
df = arcpy.mapping.ListDataFrames(mxd, "*")[0]
# 增加底图
#symbologyLayer = "D:\\cs\\model\\lyr\\Rectangle_#1_常熟卫图_Level_16.tif.lyr"
#"F:\\xinxiang\\fil\\20190817mydPM25.tif"
rasLayer=arcpy.mapping.Layer(lyrfile)
symbologyLayr=arcpy.mapping.Layer(symbologyLayer)
# rasLayer.symbology.
arcpy.ApplySymbologyFromLayer_management (rasLayer,symbologyLayr)
arcpy.mapping.AddLayer(df, rasLayer, "AUTO_ARRANGE")
arcpy.AddMessage(str(time.ctime())+":"+symbologyLayer+"添加成功。。。")
for legend in arcpy.mapping.ListLayoutElements(mxd, "LEGEND_ELEMENT", "Legend"):
print(legend.items)
arcpy.RefreshActiveView()
for legend in arcpy.mapping.ListLayoutElements(mxd, "LEGEND_ELEMENT", "Legend"):
print(legend.items)
mxd.save()
arcpy.mapping.ExportToJPEG(mxd, jpgpath, resolution = res)
if __name__ == '__main__':
rootpath=u'F:\\xx\\中心城区'
pathDir = os.listdir(rootpath)
try:
os.makedirs(u'F:\\xx\\AutoMap\\result\\mxd\\o3')
except:
pass
try:
os.makedirs(u'F:\\xx\\AutoMap\\result\\JpgOutput')
except:
pass
for filename in pathDir:
if filename[-4:].lower() == '.tif':
# o3
if filename[-5:-4].lower() == '3':
try:
filepath=os.path.join(rootpath,filename)
print(filename)
mxdpath=u"F:\\xx\\AutoMap\\result\\mxd\\xinxiang{}.mxd".format(filename[:-4])
modelpath=u"F:\\xx\\AutoMap\\Mxd\\xinxiang_O3.mxd"
# mxd模板文件路径
#modelpath=arcpy.GetParameterAsText(0)
# 输出mxd文件路径
#mxdpath=arcpy.GetParameterAsText(1)
# tif文件路径
symbologyLayer=u'F:\\xx\\Lyr\\C_20191111modo356.lyr'
#filepath = "D:\\cs\\data\\pic3"
# shp文件夹路径
#filepath=arcpy.GetParameterAsText(3)
# jpg输出路径
jpgpath=u"F:\\xx\\AutoMap\\result\\JpgOutput\\{}.jpg".format(filename[:-4])
# jpgpath=arcpy.GetParameterAsText(4)
arcpy.AddMessage('')
arcpy.AddMessage(str(time.ctime())+"输出开始!")
createMxd(modelpath,mxdpath,symbologyLayer,jpgpath,'',filepath)
print('successful')
arcpy.AddMessage(str(time.ctime())+"输出完成!")
except Exception as e:
print(e.message)
|
[
"1174650816@qq.com"
] |
1174650816@qq.com
|
10c811755bbeff6b27cebbc77dbe356bb64edc11
|
15ed3ab4510677e6df9b11af8fd7a36fc6d826fc
|
/v1/og_mc_3/tau=0.01/eta=0.04/library/mc6.py
|
a573e2553235d58cd70aaa9530cdec9d32c14c5f
|
[] |
no_license
|
pe-ge/Computational-analysis-of-memory-capacity-in-echo-state-networks
|
929347575538de7015190d35a7c2f5f0606235f2
|
85873d8847fb2876cc8a6a2073c2d1779ea1b20b
|
refs/heads/master
| 2020-04-02T08:08:38.595974
| 2018-01-17T08:12:26
| 2018-01-17T08:12:26
| 61,425,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,280
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
mc6.py
Created 21.3.2015
Based on mc5.py
Goal: Measuring Memory Capacity of reservoirs.
Changes:
- removed correlation coefficient correction MC <- MC - q / iterations_coef_measure
- added input-to-output connections
"""
from numpy import random, zeros, tanh, dot, linalg, \
corrcoef, average, std, sqrt, hstack
from library.lyapunov import lyapunov_exp
import scipy.linalg
def memory_capacity(W, WI, memory_max=None,
iterations=1000, iterations_skipped=None, iterations_coef_measure=1000,
runs=1, input_dist=(-1., 1.),
use_input=False, target_later=False, calc_lyapunov=False):
"""Calculates memory capacity of a NN
[given by its input weights WI and reservoir weights W].
W = q x q matrix storing hidden reservoir weights
WI = q x 1 vector storing input weights
Returns: a non-negative real number MC
MC: memory capacity sum for histories 1..MEMORY_MAX
"""
# matrix shape checks
if len(WI.shape) != 1:
raise Exception("input matrix WI must be vector-shaped!")
q, = WI.shape
if W.shape != (q, q):
raise Exception("W and WI matrix sizes do not match")
if memory_max is None:
memory_max = q
if iterations_skipped is None:
iterations_skipped = max(memory_max, 100) + 1
iterations_measured = iterations - iterations_skipped
dist_input = lambda: random.uniform(input_dist[0], input_dist[1], iterations)
# vector initialization
X = zeros(q)
if use_input:
S = zeros([q + 1, iterations_measured])
else:
S = zeros([q, iterations_measured])
# generate random input
u = dist_input() # all input; dimension: [iterations, 1]
# run 2000 iterations and fill the matrices D and S
for it in range(iterations):
X = tanh(dot(W, X) + dot(WI, u[it]))
if it >= iterations_skipped:
# record the state of reservoir activations X into S
if use_input:
S[:, it - iterations_skipped] = hstack([X, u[it]])
else:
S[:, it - iterations_skipped] = X
# prepare matrix D of desired values (that is, shifted inputs)
assert memory_max < iterations_skipped
D = zeros([memory_max, iterations_measured])
if target_later:
# if we allow direct input-output connections, there is no point in measuring 0-delay corr. coef. (it is always 1)
for h in range(memory_max):
D[h,:] = u[iterations_skipped - (h+1) : iterations - (h+1)]
else:
for h in range(memory_max):
D[h,:] = u[iterations_skipped - h : iterations - h]
# calculate pseudoinverse S+ and with it, the matrix WO
S_PINV = scipy.linalg.pinv(S)
WO = dot(D, S_PINV)
# do a new run for an unbiased test of quality of our newly trained WO
# we skip memory_max iterations to have large enough window
MC = zeros([runs, memory_max]) # here we store memory capacity
LE = zeros(runs) # lyapunov exponent
for run in range(runs):
u = random.uniform(input_dist[0], input_dist[1], iterations_coef_measure + memory_max)
X = zeros(q)
o = zeros([memory_max, iterations_coef_measure]) # 200 x 1000
for it in range(iterations_coef_measure + memory_max):
X = tanh(dot(W, X) + dot(WI, u[it]))
if it >= memory_max:
# we calculate output nodes using WO
if use_input:
o[:, it - memory_max] = dot(WO, hstack([X, u[it]]))
else:
o[:, it - memory_max] = dot(WO, X)
# correlate outputs with inputs (shifted)
for h in range(memory_max):
k = h + 1
if target_later:
cc = corrcoef(u[memory_max - k : memory_max + iterations_coef_measure - k], o[h, : ]) [0, 1]
else:
cc = corrcoef(u[memory_max - h : memory_max + iterations_coef_measure - h], o[h, : ]) [0, 1]
MC[run, h] = cc * cc
# calculate lyapunov
if calc_lyapunov:
LE[run] = lyapunov_exp(W, WI, X)
return sum(average(MC, axis=0)), LE
def main():
print("I am a library. Please don't run me directly.")
if __name__ == '__main__':
main()
|
[
"gergelp@gmail.com"
] |
gergelp@gmail.com
|
fc07e766feb0209e6a93a506e4dfd139b922334c
|
1c4306e8c32b05d3c61917d915da182703b473ba
|
/Classroom/hyperion/urls.py
|
71c307c218a3695f9e078dde0eb02914b5b59c0b
|
[] |
no_license
|
SabbirSzl/Software-Engineering-Project-Hyperion-ClassRoom
|
96cec2633883d933c7586bcdf6826b8bbdb65257
|
b9a808c2aa64ff8aa2d845f4e68492506e1589c2
|
refs/heads/main
| 2023-02-26T14:21:32.978147
| 2021-01-25T16:24:20
| 2021-01-25T16:24:20
| 332,654,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
from django.conf.urls import url, include
from django.urls import path
from hyperion import views
#from hyperion.views import AccountControllerTemplate,LoginController
urlpatterns = [
path("", views.classes, name = 'classes'), #this empty path will be redirect to classes
path("individualclass/", views.individualclass, name = 'individualclass'),
path("classes", views.classes, name = 'classes'),
path("createdclass/", views.createdclass, name = 'createdclass'),
path("calendar/", views.calendar, name = 'calendar'),
path("setting/", views.setting, name = 'setting'),
path("login/", views.login, name='login'),
path("logout/", views.logout, name='logout'),
path("registration/", views.registration, name = 'registration'),
path("classcreating/", views.classcreating, name = 'classcreating'),
path("joinclass/", views.joinclass, name='joinclass'),
#path('registrationView/', RegistrationController.as_view(), name='registration'),
#path('login/', LoginController.as_view(), name='login')
]
|
[
"sabbirsozol@gmail.com"
] |
sabbirsozol@gmail.com
|
3624606505d2c4d32dc651e65c3f931dac1fee9d
|
9ba95201a8e27799f6d3de0cfca766cb42b78b61
|
/venv/bin/easy_install-3.7
|
57db12113d976c9a92700e4f0d919c0810c0dad9
|
[] |
no_license
|
LuisaBarbalho/tasks-python
|
011b603aa15b6e0fc9aedfd8f6f164845d0020ad
|
fc5b75073d1021d90df4ae6b805ab9c173fdffc4
|
refs/heads/master
| 2020-05-29T16:04:56.622115
| 2019-05-29T14:16:32
| 2019-05-29T14:16:32
| 189,238,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
7
|
#!/Users/luisabarbalho/PycharmProjects/python1/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"luisa.barbalho@lais.huol.ufrn.br"
] |
luisa.barbalho@lais.huol.ufrn.br
|
abdc779c2d410101111196e525ec6947439523c7
|
b4fee5ef70c34e161c4fd8daf19a1bc233cd5f6b
|
/Python/Warmup-2/codebat_wm2_front_times.py
|
d92f281320236eb638b1324e92a33a9b92eaf3ea
|
[] |
no_license
|
Hoan1028/CodingBat-Solutions
|
2bf680a43cc5721f90ae731ac0996ca8f5f7f360
|
f64412a88345240f70c4b0f1e1461d61b310ecb7
|
refs/heads/master
| 2020-05-19T00:34:39.963638
| 2019-05-10T01:06:11
| 2019-05-10T01:06:11
| 184,738,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
#CodingBat Python Warmup-2>front_times:
def main():
#unit test
print('Expected Values: \nChoCho, ChoChoCho, AbcAbcAbc, AbAbAbAb, AAAA, \'\', \'\' \nActual:')
print(front_times('Chocolate', 2), front_times('Chocolate', 3), front_times('Abc', 3), front_times('Ab', 4), front_times('A', 4), front_times('', 4),front_times('Abc', 0))
#Problem:
#Given a string and a non-negative int n, we'll say that the front of the string is the first 3 chars,
#or whatever is there if the string is less than length 3.
#Return n copies of the front.
#Solution:
def front_times(str, n):
if len(str) < 3:
return str*n
else:
return str[:3]*n
main()
|
[
"hoannguyen1028@gmail.com"
] |
hoannguyen1028@gmail.com
|
1e68855aee7ae6eefdb3a2f5f9190bde7e590908
|
0e7d6dd418f6a7617ced67ac8c8e8c2a4270f030
|
/py_course/1-4/test3.py
|
0a6bbbe5c9f7d2dc723026bc9417df976a7f1369
|
[] |
no_license
|
alexoch/python_101_course
|
b72dfb8d0c71baf60cc8c0c62ec02bd505deb506
|
4f8c765c3ce7d55562c5f18f7b45d6b0199d4628
|
refs/heads/master
| 2021-01-19T04:05:20.754879
| 2017-03-12T20:47:34
| 2017-03-12T20:47:34
| 84,426,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
"""
: 10 20 30
: python lab3_1.py 10 20 30
: not triangle
: 1 1 1
: python lab3_1.py 1 1 1
: triangle
: 5.5 5.5 -2
: python lab3_1.py 5.5 5.5 -2
: not triangle
"""
import sys
X = int(float(sys.argv[1]))
Y = int(float(sys.argv[2]))
Z = int(float(sys.argv[3]))
if X <= 0 or Y <= 0 or Z <= 0:
print 'not triangle'
elif X+Y <= Z or X+Z <= Y or X+Z <= Y:
print 'not triangle'
else:
print 'triangle'
|
[
"s.ocheretyany@gmail.com"
] |
s.ocheretyany@gmail.com
|
3e95082625682532c740b5cbbb866d6699da683e
|
0f9cb900b0e7b07cc7b872ccd94fa25d0ebd5f99
|
/section-5-lists-and-tuples/magic_adder.py
|
51d733b4931b7742540a8f04da831ddeb0e2107d
|
[] |
no_license
|
petkonnikolov/python-masterclass
|
94aada4886ec808e9984059022b879079322855c
|
635ba0dca44661baeb73d4e3446d7e083985852f
|
refs/heads/main
| 2023-04-11T19:12:38.138602
| 2021-05-11T11:51:02
| 2021-05-11T11:51:02
| 359,227,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
string = input('Please enter three integers in format "a,b,c": ')
list1 = string.split(sep=',')
for index in range(len(list1)):
list1[index] = int(list1[index])
# calculation = (list1[0] + list1[1]) - list1[2]
a, b, c = list1
calculation = (a + b) - c
print(calculation)
|
[
"petko.nikolov@dxc.com"
] |
petko.nikolov@dxc.com
|
6a75a718dcc0386af8e66481b8e6baabcf4235b7
|
5b586be4d47d4d588e11ceb5b9213aea87b67257
|
/bambou/contextual/decorators.py
|
ea5b31b9b03a295068e324cf69bc67f85f0095ff
|
[
"BSD-3-Clause"
] |
permissive
|
Dogild/bambou
|
47a202941e1c67eef207711538878a3e0ac0742e
|
a148e2b926d227b2da501910522859cf3e35d0df
|
refs/heads/master
| 2021-01-21T09:53:06.691163
| 2016-03-05T02:08:18
| 2016-03-05T02:08:18
| 53,224,552
| 1
| 0
| null | 2016-03-05T21:36:41
| 2016-03-05T21:36:40
| null |
UTF-8
|
Python
| false
| false
| 21,707
|
py
|
from types import ClassType, FunctionType
import sys, os
__all__ = [
'decorate_class', 'metaclass_is_decorator', 'metaclass_for_bases',
'frameinfo', 'decorate_assignment', 'decorate', 'struct', 'classy',
'template_function', 'rewrap', 'cache_source', 'enclosing_frame',
'synchronized',
]
def decorate(*decorators):
"""Use Python 2.4 decorators w/Python 2.3+
Example::
class Foo(object):
decorate(classmethod)
def something(cls,etc):
\"""This is a classmethod\"""
You can pass in more than one decorator, and they are applied in the same
order that would be used for ``@`` decorators in Python 2.4.
This function can be used to write decorator-using code that will work with
both Python 2.3 and 2.4 (and up).
"""
if len(decorators)>1:
decorators = list(decorators)
decorators.reverse()
def callback(frame,k,v,old_locals):
for d in decorators:
v = d(v)
return v
return decorate_assignment(callback)
def enclosing_frame(frame=None, level=3):
"""Get an enclosing frame that skips DecoratorTools callback code"""
frame = frame or sys._getframe(level)
while frame.f_globals.get('__name__')==__name__: frame = frame.f_back
return frame
def name_and_spec(func):
from inspect import formatargspec, getargspec
funcname = func.__name__
if funcname=='<lambda>':
funcname = "anonymous"
args, varargs, kwargs, defaults = getargspec(func)
return funcname, formatargspec(args, varargs, kwargs)[1:-1]
def qname(func):
m = func.__module__
return m and m+'.'+func.__name__ or func.__name__
class Bomb:
def __str__(self):
raise RuntimeError("template functions must return a static string!")
bomb = Bomb()
def getbody(func):
from inspect import getargspec
args, varargs, kwargs, defaults = getargspec(func)
return func(*[bomb] * len(args))
def apply_template(wrapper, func, *args, **kw):
funcname, argspec = name_and_spec(func)
wrapname, wrapspec = name_and_spec(wrapper)
body = wrapper.__doc__ or getbody(wrapper)
if not body:
raise RuntimeError(
"Missing docstring or empty return value from"
" %s(%s) - please switch the calling code from using docstrings"
" to return values" % (wrapname, wrapspec)
)
body = body.replace('%','%%').replace('$args','%(argspec)s')
body = """
def %(wrapname)s(%(wrapspec)s):
def %(funcname)s(%(argspec)s): """+body+"""
return %(funcname)s
"""
body %= locals()
filename = "<%s wrapping %s at 0x%08X>" % (qname(wrapper), qname(func), id(func))
d ={}
exec compile(body, filename, "exec") in func.func_globals, d
f = d[wrapname](func, *args, **kw)
cache_source(filename, body, f)
f.func_defaults = func.func_defaults
f.__doc__ = func.__doc__
f.__dict__ = func.__dict__
return f
def rewrap(func, wrapper):
"""Create a wrapper with the signature of `func` and a body of `wrapper`
Example::
def before_and_after(func):
def decorated(*args, **kw):
print "before"
try:
return func(*args, **kw)
finally:
print "after"
return rewrap(func, decorated)
The above function is a normal decorator, but when users run ``help()``
or other documentation tools on the returned wrapper function, they will
see a function with the original function's name, signature, module name,
etc.
This function is similar in use to the ``@template_function`` decorator,
but rather than generating the entire decorator function in one calling
layer, it simply generates an extra layer for signature compatibility.
NOTE: the function returned from ``rewrap()`` will have the same attribute
``__dict__`` as the original function, so if you need to set any function
attributes you should do so on the function returned from ``rewrap()``
(or on the original function), and *not* on the wrapper you're passing in
to ``rewrap()``.
"""
def rewrap(__original, __decorated):
return """return __decorated($args)"""
return apply_template(rewrap, func, wrapper)
if sys.version<"2.5":
# We'll need this for monkeypatching linecache
def checkcache(filename=None):
"""Discard cache entries that are out of date.
(This is not checked upon each call!)"""
if filename is None:
filenames = linecache.cache.keys()
else:
if filename in linecache.cache:
filenames = [filename]
else:
return
for filename in filenames:
size, mtime, lines, fullname = linecache.cache[filename]
if mtime is None:
continue # no-op for files loaded via a __loader__
try:
stat = os.stat(fullname)
except os.error:
del linecache.cache[filename]
continue
if size != stat.st_size or mtime != stat.st_mtime:
del linecache.cache[filename]
def _cache_lines(filename, lines, owner=None):
if owner is None:
owner = filename
else:
from weakref import ref
owner = ref(owner, lambda r: linecache and linecache.cache.__delitem__(filename))
global linecache; import linecache
if sys.version<"2.5" and linecache.checkcache.__module__!=__name__:
linecache.checkcache = checkcache
linecache.cache[filename] = 0, None, lines, owner
def cache_source(filename, source, owner=None):
_cache_lines(filename, source.splitlines(True), owner)
def template_function(wrapper=None):
"""Decorator that uses its wrapped function's docstring as a template
Example::
def before_and_after(func):
@template_function
def wrap(__func, __message):
'''
print "before", __message
try:
return __func($args)
finally:
print "after", __message
'''
return wrap(func, "test")
The above code will return individually-generated wrapper functions whose
signature, defaults, ``__name__``, ``__module__``, and ``func_globals``
match those of the wrapped functions.
You can use define any arguments you wish in the wrapping function, as long
as the first argument is the function to be wrapped, and the arguments are
named so as not to conflict with the arguments of the function being
wrapped. (i.e., they should have relatively unique names.)
Note that the function body will *not* have access to the globals of the
calling module, as it is compiled with the globals of the *wrapped*
function! Thus, any non-builtin values that you need in the wrapper should
be passed in as arguments to the template function.
"""
if wrapper is None:
return decorate_assignment(lambda f,k,v,o: template_function(v))
return apply_template.__get__(wrapper)
def struct(*mixins, **kw):
"""Turn a function into a simple data structure class
This decorator creates a tuple subclass with the same name and docstring as
the decorated function. The class will have read-only properties with the
same names as the function's arguments, and the ``repr()`` of its instances
will look like a call to the original function. The function should return
a tuple of values in the same order as its argument names, as it will be
used by the class' constructor. The function can perform validation, add
defaults, and/or do type conversions on the values.
If the function takes a ``*``, argument, it should flatten this argument
into the result tuple, e.g.::
@struct()
def pair(first, *rest):
return (first,) + rest
The ``rest`` property of the resulting class will thus return a tuple for
the ``*rest`` arguments, and the structure's ``repr()`` will reflect the
way it was created.
The ``struct()`` decorator takes optional mixin classes (as positional
arguments), and dictionary entries (as keyword arguments). The mixin
classes will be placed before ``tuple`` in the resulting class' bases, and
the dictionary entries will be placed in the class' dictionary. These
entries take precedence over any default entries (e.g. methods, properties,
docstring, etc.) that are created by the ``struct()`` decorator.
"""
def callback(frame, name, func, old_locals):
def __new__(cls, *args, **kw):
result = func(*args, **kw)
if type(result) is tuple:
return tuple.__new__(cls, (cls,)+result)
else:
return result
def __repr__(self):
return name+tuple.__repr__(self[1:])
import inspect
args, star, dstar, defaults = inspect.getargspec(func)
d = dict(
__new__ = __new__, __repr__ = __repr__, __doc__=func.__doc__,
__module__ = func.__module__, __args__ = args, __star__ = star,
__slots__ = [],
)
for p,a in enumerate(args):
if isinstance(a,str):
d[a] = property(lambda self, p=p+1: self[p])
if star:
d[star] = property(lambda self, p=len(args)+1: self[p:])
d.update(kw)
return type(name, mixins+(tuple,), d)
return decorate_assignment(callback)
def synchronized(func=None):
"""Create a method synchronized by first argument's ``__lock__`` attribute
If the object has no ``__lock__`` attribute at run-time, the wrapper will
attempt to add one by creating a ``threading.RLock`` and adding it to the
object's ``__dict__``. If ``threading`` isn't available, it will use a
``dummy_threading.RLock`` instead. Neither will be imported unless the
method is called on an object that doesn't have a ``__lock__``.
This decorator can be used as a standard decorator (e.g. ``@synchronized``)
or as a Python 2.3-compatible decorator by calling it with no arguments
(e.g. ``[synchronized()]``).
"""
if func is None:
return decorate_assignment(lambda f,k,v,o: synchronized(v))
from inspect import getargspec
first_arg = getargspec(func)[0][0]
def wrap(__func):
return '''
try:
lock = $self.__lock__
except AttributeError:
try:
from threading import RLock
except ImportError:
from dummy_threading import RLock
lock = $self.__dict__.setdefault('__lock__',RLock())
lock.acquire()
try:
return __func($args)
finally:
lock.release()'''.replace('$self', first_arg)
return apply_template(wrap, func)
def frameinfo(frame):
"""Return (kind, module, locals, globals) tuple for a frame
'kind' is one of "exec", "module", "class", "function call", or "unknown".
"""
f_locals = frame.f_locals
f_globals = frame.f_globals
sameNamespace = f_locals is f_globals
hasModule = '__module__' in f_locals
hasName = '__name__' in f_globals
sameName = hasModule and hasName
sameName = sameName and f_globals['__name__']==f_locals['__module__']
module = hasName and sys.modules.get(f_globals['__name__']) or None
namespaceIsModule = module and module.__dict__ is f_globals
if not namespaceIsModule:
# some kind of funky exec
kind = "exec"
if hasModule and not sameNamespace:
kind="class"
elif sameNamespace and not hasModule:
kind = "module"
elif sameName and not sameNamespace:
kind = "class"
elif not sameNamespace:
kind = "function call"
else:
# How can you have f_locals is f_globals, and have '__module__' set?
# This is probably module-level code, but with a '__module__' variable.
kind = "unknown"
return kind,module,f_locals,f_globals
def decorate_class(decorator, depth=2, frame=None, allow_duplicates=False):
"""Set up `decorator` to be passed the containing class upon creation
This function is designed to be called by a decorator factory function
executed in a class suite. The factory function supplies a decorator that
it wishes to have executed when the containing class is created. The
decorator will be given one argument: the newly created containing class.
The return value of the decorator will be used in place of the class, so
the decorator should return the input class if it does not wish to replace
it.
The optional `depth` argument to this function determines the number of
frames between this function and the targeted class suite. `depth`
defaults to 2, since this skips the caller's frame. Thus, if you call this
function from a function that is called directly in the class suite, the
default will be correct, otherwise you will need to determine the correct
depth value yourself. Alternately, you can pass in a `frame` argument to
explicitly indicate what frame is doing the class definition.
This function works by installing a special class factory function in
place of the ``__metaclass__`` of the containing class. Therefore, only
decorators *after* the last ``__metaclass__`` assignment in the containing
class will be executed. Thus, any classes using class decorators should
declare their ``__metaclass__`` (if any) *before* specifying any class
decorators, to ensure that all class decorators will be applied."""
frame = enclosing_frame(frame, depth+1)
kind, module, caller_locals, caller_globals = frameinfo(frame)
if kind != "class":
raise SyntaxError(
"Class decorators may only be used inside a class statement"
)
elif not allow_duplicates and has_class_decorator(decorator, None, frame):
return
previousMetaclass = caller_locals.get('__metaclass__')
defaultMetaclass = caller_globals.get('__metaclass__', ClassType)
def advise(name,bases,cdict):
if '__metaclass__' in cdict:
del cdict['__metaclass__']
if previousMetaclass is None:
if bases:
# find best metaclass or use global __metaclass__ if no bases
meta = metaclass_for_bases(bases)
else:
meta = defaultMetaclass
elif metaclass_is_decorator(previousMetaclass):
# special case: we can't compute the "true" metaclass here,
# so we need to invoke the previous metaclass and let it
# figure it out for us (and apply its own advice in the process)
meta = previousMetaclass
else:
meta = metaclass_for_bases(bases, previousMetaclass)
newClass = meta(name,bases,cdict)
# this lets the decorator replace the class completely, if it wants to
return decorator(newClass)
# introspection data only, not used by inner function
# Note: these attributes cannot be renamed or it will break compatibility
# with zope.interface and any other code that uses this decoration protocol
advise.previousMetaclass = previousMetaclass
advise.callback = decorator
# install the advisor
caller_locals['__metaclass__'] = advise
def metaclass_is_decorator(ob):
"""True if 'ob' is a class advisor function"""
return isinstance(ob,FunctionType) and hasattr(ob,'previousMetaclass')
def iter_class_decorators(depth=2, frame=None):
frame = enclosing_frame(frame, depth+1)
m = frame.f_locals.get('__metaclass__')
while metaclass_is_decorator(m):
yield getattr(m, 'callback', None)
m = m.previousMetaclass
def has_class_decorator(decorator, depth=2, frame=None):
return decorator in iter_class_decorators(0, frame or sys._getframe(depth))
def metaclass_for_bases(bases, explicit_mc=None):
"""Determine metaclass from 1+ bases and optional explicit __metaclass__"""
meta = [getattr(b,'__class__',type(b)) for b in bases]
if explicit_mc is not None:
# The explicit metaclass needs to be verified for compatibility
# as well, and allowed to resolve the incompatible bases, if any
meta.append(explicit_mc)
if len(meta)==1:
# easy case
return meta[0]
classes = [c for c in meta if c is not ClassType]
candidates = []
for m in classes:
for n in classes:
if issubclass(n,m) and m is not n:
break
else:
# m has no subclasses in 'classes'
if m in candidates:
candidates.remove(m) # ensure that we're later in the list
candidates.append(m)
if not candidates:
# they're all "classic" classes
return ClassType
elif len(candidates)>1:
# We could auto-combine, but for now we won't...
raise TypeError("Incompatible metatypes",bases)
# Just one, return it
return candidates[0]
def decorate_assignment(callback, depth=2, frame=None):
"""Invoke 'callback(frame,name,value,old_locals)' on next assign in 'frame'
The frame monitored is determined by the 'depth' argument, which gets
passed to 'sys._getframe()'. When 'callback' is invoked, 'old_locals'
contains a copy of the frame's local variables as they were before the
assignment took place, allowing the callback to access the previous value
of the assigned variable, if any. The callback's return value will become
the new value of the variable. 'name' is the name of the variable being
created or modified, and 'value' is its value (the same as
'frame.f_locals[name]').
This function also returns a decorator function for forward-compatibility
with Python 2.4 '@' syntax. Note, however, that if the returned decorator
is used with Python 2.4 '@' syntax, the callback 'name' argument may be
'None' or incorrect, if the 'value' is not the original function (e.g.
when multiple decorators are used).
"""
frame = enclosing_frame(frame, depth+1)
oldtrace = [frame.f_trace]
old_locals = frame.f_locals.copy()
def tracer(frm,event,arg):
if event=='call':
# We don't want to trace into any calls
if oldtrace[0]:
# ...but give the previous tracer a chance to, if it wants
return oldtrace[0](frm,event,arg)
else:
return None
try:
if frm is frame and event !='exception':
# Aha, time to check for an assignment...
for k,v in frm.f_locals.items():
if k not in old_locals or old_locals[k] is not v:
break
else:
# No luck, keep tracing
return tracer
# Got it, fire the callback, then get the heck outta here...
frm.f_locals[k] = callback(frm,k,v,old_locals)
finally:
# Give the previous tracer a chance to run before we return
if oldtrace[0]:
# And allow it to replace our idea of the "previous" tracer
oldtrace[0] = oldtrace[0](frm,event,arg)
uninstall()
return oldtrace[0]
def uninstall():
# Unlink ourselves from the trace chain.
frame.f_trace = oldtrace[0]
sys.settrace(oldtrace[0])
# Install the trace function
frame.f_trace = tracer
sys.settrace(tracer)
def do_decorate(f):
# Python 2.4 '@' compatibility; call the callback
uninstall()
frame = sys._getframe(1)
return callback(
frame, getattr(f,'__name__',None), f, frame.f_locals
)
return do_decorate
def super_next(cls, attr):
for c in cls.__mro__:
if attr in c.__dict__:
yield getattr(c, attr).im_func
# Python 2.6 and above mix ABCMeta into various random places :-(
try:
from abc import ABCMeta as base
except ImportError:
base = type
class classy_class(base):
"""Metaclass that delegates selected operations back to the class"""
def __new__(meta, name, bases, cdict):
cls = super(classy_class, meta).__new__(meta, name, bases, cdict)
supr = super_next(cls, '__class_new__').next
return supr()(meta, name, bases, cdict, supr)
def __init__(cls, name, bases, cdict):
supr = super_next(cls, '__class_init__').next
return supr()(cls, name, bases, cdict, supr)
def __call__(cls, *args, **kw):
return cls.__class_call__.im_func(cls, *args, **kw)
if base is not type:
# Our instances do not support ABC-ness
def register(*args): raise NotImplementedError
__instancecheck__ = type.__dict__['__instancecheck__']
__subclasscheck__ = type.__dict__['__subclasscheck__']
class classy(object):
"""Base class for classes that want to be their own metaclass"""
__metaclass__ = classy_class
__slots__ = ()
def __class_new__(meta, name, bases, cdict, supr):
return type.__new__(meta, name, bases, cdict)
def __class_init__(cls, name, bases, cdict, supr):
return type.__init__(cls, name, bases, cdict)
def __class_call__(cls, *args, **kw):
return type.__call__(cls, *args, **kw)
__class_call__ = classmethod(__class_call__)
|
[
"antoine.mercadal@gmail.com"
] |
antoine.mercadal@gmail.com
|
20223d251cf7c1ee244f3ff6bda6aeac1170471e
|
02842943a8e8c5c53f5f8146234271446f1203ce
|
/102_morphological_analysis.py
|
eb9ada5fd4c6b12d0915f447c3a6585661eacd1e
|
[
"CC0-1.0"
] |
permissive
|
utda/portal_keyword
|
e38856747bdd413519fe249a2bf4a7c49011bc37
|
b83b5a70e766235361ec34e5d5d45610d649c248
|
refs/heads/master
| 2022-12-12T07:03:34.552994
| 2020-06-12T08:55:56
| 2020-06-12T08:55:56
| 252,589,741
| 0
| 0
|
CC0-1.0
| 2022-09-30T19:00:11
| 2020-04-02T23:48:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,672
|
py
|
# text-mining.py
# python解析器janomeをインポート - 1
from janome.tokenizer import Tokenizer
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
import json
import os
import requests
import configparser
import numpy as np
import glob
import csv
import os.path
# 形態素解析用オブジェクトの生成 - 2
text = Tokenizer()
idir = "data/text"
odir = "data/ma"
os.makedirs(odir, exist_ok=True)
files = glob.glob(odir+'/*.txt')
for i in range(len(files)):
if i % 100 == 0:
print(i+1, len(files))
file = files[i]
output = file.replace(idir, odir)
# txtファイルからデータの読み込み - 3
text_file = open(file)
bindata = text_file.read()
txt = bindata
# テキストを一行ごとに処理 - 5
word_dic = {}
lines_1 = txt.split("\r\n")
for line in lines_1:
malist = text.tokenize(line)
for w in malist:
word = w.surface
ps = w.part_of_speech # 品詞 - 6
if ps.find("名詞") < 0:
continue # 名詞だけをカウント - 7
if not word.isalpha():
continue
if not word in word_dic:
word_dic[word] = 0
word_dic[word] += 1
if "『" in word:
print(word)
# よく使われる単語を表示 - 8
keys = sorted(word_dic.items(), key=lambda x: x[1], reverse=True)
f2 = open(output, 'w')
writer = csv.writer(f2, lineterminator='\n')
writer.writerow(["word", "cnt"])
for word, cnt in keys:
writer.writerow([word, cnt])
f2.close()
|
[
"na.kamura.1263@gmail.com"
] |
na.kamura.1263@gmail.com
|
fb3ec15864cfb1866c1711d0586b7d7b0fff7090
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/ms_data/particles/particles_ie013_xsd/__init__.py
|
e0ffd1abcba9a881fbd645379ab76771f0c5d955
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 169
|
py
|
from output.models.ms_data.particles.particles_ie013_xsd.particles_ie013 import (
Base,
Doc,
Testing,
)
__all__ = [
"Base",
"Doc",
"Testing",
]
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
f5f6a28ca792b417bdb0991b2ed1d631db33abdd
|
87d4572262010ad6fab9a68e6fd3ffe956183f86
|
/SadguruProject/SadguruTeaProj/tea_app/serializer.py
|
778ff0cf149d3d3eecaa3aaa3c2dd2ced2190f35
|
[] |
no_license
|
NikhilDeshbhratar/ThinkBridgeTest
|
9f4b557660c0de2227bb4e0674a8c696498e2d60
|
4c39b418ba324defdc33807e6292204a2b5dc36d
|
refs/heads/master
| 2023-02-12T15:47:04.628706
| 2020-05-21T07:04:49
| 2020-05-21T07:04:49
| 265,541,580
| 0
| 0
| null | 2021-01-06T03:00:25
| 2020-05-20T11:18:01
|
Python
|
UTF-8
|
Python
| false
| false
| 346
|
py
|
from rest_framework import serializers
from .models import Tea
class TeaSerializer(serializers.ModelSerializer):
class Meta:
model = Tea
fields = "__all__"
class TeaListSerializer(serializers.ModelSerializer):
class Meta:
model = Tea
fields = ["id","name","price","description","created_at","image_url"]
|
[
"nikhil28.deshbhratar@gmail.com"
] |
nikhil28.deshbhratar@gmail.com
|
46d5774627783aa5520575af7c9d0f2cde6b0511
|
eaf3c103ec2e0aaf5e3660fccc8f3dda6e55b44e
|
/hw2/FindBestParam_ma.py
|
25b2f83a25f2a8f8f7818b62a51bd1eb806799f6
|
[] |
no_license
|
b06705039/FinTech
|
d0ca963fac3565c6de2c4de757496abe2f8034b1
|
f5dae8fe1ff869212f8dae4dc2b0afb2692fb234
|
refs/heads/master
| 2020-09-16T11:36:49.953237
| 2020-04-20T02:53:00
| 2020-04-20T02:53:00
| 257,004,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,005
|
py
|
import sys
import numpy as np
import pandas as pd
# Decision of the current day by the current price, with 3 modifiable parameters
def myStrategy(pastPriceVec, currentPrice, windowSize, alpha, beta):
import numpy as np
action=0 # action=1(buy), -1(sell), 0(hold), with 0 as the default action
dataLen=len(pastPriceVec) # Length of the data vector
if dataLen==0:
return action
# Compute ma
if dataLen<windowSize:
ma=np.mean(pastPriceVec) # If given price vector is small than windowSize, compute MA by taking the average
else:
windowedData=pastPriceVec[-windowSize:] # Compute the normal MA using windowSize
ma=np.mean(windowedData)
# Determine action
if (currentPrice-ma)>alpha: # If price-ma > alpha ==> buy
action=1
elif (currentPrice-ma)<-beta: # If price-ma < -beta ==> sell
action=-1
return action
# Compute return rate over a given price vector, with 3 modifiable parameters
def computeReturnRate(priceVec, windowSize, alpha, beta):
capital=1000 # Initial available capital
capitalOrig=capital # original capital
dataCount=len(priceVec) # day size
suggestedAction=np.zeros((dataCount,1)) # Vec of suggested actions
stockHolding=np.zeros((dataCount,1)) # Vec of stock holdings
total=np.zeros((dataCount,1)) # Vec of total asset
realAction=np.zeros((dataCount,1)) # Real action, which might be different from suggested action. For instance, when the suggested action is 1 (buy) but you don't have any capital, then the real action is 0 (hold, or do nothing).
# Run through each day
for ic in range(dataCount):
currentPrice=priceVec[ic] # current price
suggestedAction[ic]=myStrategy(priceVec[0:ic], currentPrice, windowSize, alpha, beta) # Obtain the suggested action
# get real action by suggested action
if ic>0:
stockHolding[ic]=stockHolding[ic-1] # The stock holding from the previous day
if suggestedAction[ic]==1: # Suggested action is "buy"
if stockHolding[ic]==0: # "buy" only if you don't have stock holding
stockHolding[ic]=capital/currentPrice # Buy stock using cash
capital=0 # Cash
realAction[ic]=1
elif suggestedAction[ic]==-1: # Suggested action is "sell"
if stockHolding[ic]>0: # "sell" only if you have stock holding
capital=stockHolding[ic]*currentPrice # Sell stock to have cash
stockHolding[ic]=0 # Stocking holding
realAction[ic]=-1
elif suggestedAction[ic]==0: # No action
realAction[ic]=0
else:
assert False
total[ic]=capital+stockHolding[ic]*currentPrice # Total asset, including stock holding and cash
returnRate=(total[-1]-capitalOrig)/capitalOrig # Return rate of this run
return returnRate
if __name__=='__main__':
returnRateBest=-1.00 # Initial best return rate
df=pd.read_csv(sys.argv[1]) # read stock file
adjClose=df["Adj Close"].values # get adj close as the price vector
windowSizeMin=3; windowSizeMax=6; # Range of windowSize to explore
alphaMin=5; alphaMax=10; # Range of alpha to explore
betaMin=13; betaMax=18 # Range of beta to explore
# Start exhaustive search
for windowSize in range(windowSizeMin, windowSizeMax+1): # For-loop for windowSize
print("windowSize=%d" %(windowSize))
for alpha in range(alphaMin, alphaMax+1): # For-loop for alpha
print("\talpha=%d" %(alpha))
for beta in range(betaMin, betaMax+1): # For-loop for beta
print("\t\tbeta=%d" %(beta), end="") # No newline
returnRate=computeReturnRate(adjClose, windowSize, alpha, beta) # Start the whole run with the given parameters
print(" ==> returnRate=%f " %(returnRate))
if returnRate > returnRateBest: # Keep the best parameters
windowSizeBest=windowSize
alphaBest=alpha
betaBest=beta
returnRateBest=returnRate
print("Best settings: windowSize=%d, alpha=%d, beta=%d ==> returnRate=%f" %(windowSizeBest,alphaBest,betaBest,returnRateBest)) # Print the best result
|
[
"b06705039@ntu.edu.tw"
] |
b06705039@ntu.edu.tw
|
353b3aac10cc74d54979bc18d4bb35675b95e170
|
00030a24bb6ca535240ba4c737c0b22b1b931acd
|
/flight_log_analyzer/flight_log_analyzer.py
|
92fc4733b73e8cadc7f6ccd4d0a83eb09fe3bee8
|
[] |
no_license
|
UCSD-E4E/e4e-tools
|
80ebee514a216769e0e7d29438441f2177c90a5f
|
e68c8bf29814b6a746d9b384cb06f7554134ad6a
|
refs/heads/master
| 2023-05-26T18:41:30.716768
| 2023-05-15T15:46:58
| 2023-05-15T15:46:58
| 9,737,213
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,854
|
py
|
#!/usr/bin/env python
import numpy as np
import argparse
import Tkinter as tk
import tkFileDialog
import datetime
import os
def leap(date):
"""
Return the number of leap seconds since 6/Jan/1980
:param date: datetime instance
:return: leap seconds for the date (int)
"""
if date < datetime.datetime(1981, 6, 30, 23, 59, 59):
return 0
leap_list = [(1981, 6, 30), (1982, 6, 30), (1983, 6, 30),
(1985, 6, 30), (1987, 12, 31), (1989, 12, 31),
(1990, 12, 31), (1992, 6, 30), (1993, 6, 30),
(1994, 6, 30), (1995, 12, 31), (1997, 6, 30),
(1998, 12, 31), (2005, 12, 31), (2008, 12, 31),
(2012, 6, 30), (2015, 6, 30)]
leap_dates = map(lambda x: datetime.datetime(x[0], x[1], x[2], 23, 59, 59),
leap_list)
for j in xrange(len(leap_dates[:-1])):
if leap_dates[j] < date < leap_dates[j + 1]:
return j + 1
return len(leap_dates)
def main():
parser = argparse.ArgumentParser(description='E4E ArduPilot Autopilot '
'Flight Log Analyzer')
parser.add_argument('-i', '--input', help = 'Input flight log',
metavar = 'log', dest = 'log', default = None)
parser.add_argument('-s', '--split_log', action = 'store_true',
help = 'If present, split log into individual flights',
dest = 'split_log')
args = parser.parse_args()
fileName = args.log
split_log = args.split_log
if fileName is None:
root = tk.Tk()
root.withdraw()
root.update()
options = {}
options['filetypes'] = [('MAVLINK Log Files', '.log')]
filename_tuple = tkFileDialog.askopenfilename(**options)
if len(filename_tuple) == 0:
return
fileName = filename_tuple[0]
pass
else:
if os.path.splitext(os.path.basename(fileName))[1] != '.log':
print("Error: Input .log files only!")
return
timeInAir = 0;
prevCurr = -1;
FMT = []
PARM = []
takeoff_lineNums = []
landing_lineNums = []
lineNum = 0
with open(fileName) as file:
for line in file:
elements = [element.strip() for element in line.split(',')]
if elements[0] == 'FMT':
FMT.append(line)
if elements[1] == 'PARM':
PARM.append(line)
if elements[0] == 'GPS':
gps_time = int(elements[2])
gps_week = int(elements[3])
apm_time = int(elements[13])
offset = gps_time - apm_time;
lastGPS = lineNum
if elements[0] == 'CURR':
if int(elements[4]) > 200:
if prevCurr != -1:
timeInAir = timeInAir + int(elements[1]) - prevCurr
prevCurr = int(elements[1])
else:
prevCurr = int(elements[1])
secs_in_week = 604800
gps_epoch = datetime.datetime(1980, 1, 6, 0, 0, 0)
date_before_leaps = (gps_epoch +
datetime.timedelta(seconds = gps_week *
secs_in_week + (prevCurr + offset) / 1000.0))
date = (date_before_leaps -
datetime.timedelta(seconds=leap(date_before_leaps)))
print("Takeoff at %s UTC" %
(date.strftime('%Y-%m-%d %H:%M:%S')))
takeoff_lineNums.append(lastGPS)
else:
if prevCurr != -1:
landing_lineNums.append(lineNum)
prevCurr = -1
lineNum = lineNum + 1
print('')
timeInAir = timeInAir / 1000.0 / 60 / 60
print("Time In Air: %.2f" % timeInAir)
if split_log:
for i in xrange(len(takeoff_lineNums)):
output_filename = (os.path.basename(fileName).strip('.log') +
"_%d.log" % (i + 1))
output_file = open(os.path.join(os.path.dirname(fileName),
output_filename), 'w')
for line in FMT:
output_file.write(line)
for line in PARM:
output_file.write(line)
lineNum = 0
with open(fileName) as file:
for line in file:
if (lineNum >= takeoff_lineNums[i] and lineNum <=
landing_lineNums[i]):
output_file.write(line)
lineNum = lineNum + 1
output_file.close()
if __name__ == '__main__':
main()
|
[
"ntlhui@gmail.com"
] |
ntlhui@gmail.com
|
300bf255e57aa3bb3508afab1994003379177da8
|
5a5cc86b58e8f09495e5e2f9dcfae77159b4cbf6
|
/练习1.py
|
1e683124ed011e3dfb1a7c7ccc3e6538a45326d1
|
[] |
no_license
|
Wangyinglihai/tijiaozuoye
|
b06711238d0e17a1fa2b9e0ba46d936a0a29691c
|
f7fa802839268a2a9d63acbccfc44b290900779a
|
refs/heads/master
| 2020-03-23T21:37:14.814676
| 2018-07-24T09:48:20
| 2018-07-24T09:48:20
| 142,120,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 17 17:38:43 2018
练习1:
1.定义一个天气温度列表,写出里面每一天的温度
2.打印一周的天气,天气里面的周三打印例外:周三+温度
@author: user
"""
print('一周的温度')
a=[21,22,23,24,25,26,27]
print(a[0])
print(a[1])
print('the temp of 周三 is:'+str(a[2]))
print(a[3])
print(a[4])
print(a[5])
|
[
"noreply@github.com"
] |
Wangyinglihai.noreply@github.com
|
4894fa27068d286744754d2916a964a130d12341
|
f789170ebaef7c0dc06695e60d54981d5484de8d
|
/src/RSS_filter.py
|
ed3255b5b583099e7301d0f47b7ac6938fe8b51a
|
[
"MIT"
] |
permissive
|
cega/RSS-filter
|
39cfc05bc753fac1922ce2d256f0f745f6919b4f
|
84092e74fc9dccb8c2e169f2ff27e49134107e75
|
refs/heads/master
| 2020-12-30T17:45:04.079484
| 2013-03-23T16:02:39
| 2013-03-23T16:02:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,138
|
py
|
#!/usr/bin/env python2
"""
RSS Filter
Usage:
RSS-filter
RSS-filter -e | --edit
RSS-filter -l | --list
RSS-filter -h | --help
Options:
-h --help Show this message.
-e --edit Edit the filters with your default editor.
-l --list List feed titles.
"""
import os
import errno
import stat
from cStringIO import StringIO
import subprocess
import sys
import re
import logging
from collections import OrderedDict
import libgreader
import requests
import configobj
import validate
import appdirs
import docopt
import demjson
class GoogleReader:
"""A partial wrapper around libgreader."""
def __init__(self, client_ID, client_secret, refresh_token):
self.client_ID = client_ID
self.client_secret = client_secret
if refresh_token:
self.refresh_token = refresh_token
self.auth = self._authenticate()
self.libgreader = libgreader.GoogleReader(self.auth)
def _authenticate(self):
payload = {"client_id": self.client_ID, "client_secret": self.client_secret,
"refresh_token": self.refresh_token, "grant_type": "refresh_token"}
r = requests.post("https://accounts.google.com/o/oauth2/token", data=payload)
try:
access_token = r.json()["access_token"]
except KeyError:
logging.critical("Couldn't authenticate with Google Reader.")
print "Error. Couldn't authenticate with Google Reader."
exit(3)
auth = libgreader.OAuth2Method(self.client_ID, self.client_secret)
auth.setRedirectUri("urn:ietf:wg:oauth:2.0:oob")
auth.access_token = access_token
return auth
def get_refresh_token(self):
auth = libgreader.OAuth2Method(self.client_ID, self.client_secret)
auth.setRedirectUri("urn:ietf:wg:oauth:2.0:oob")
auth_URL = auth.buildAuthUrl()
params = {"format": "json", "url": auth_URL, "logstats": 0}
r = requests.get("http://is.gd/create.php", params=params)
if r.ok:
auth_URL = r.json()["shorturl"]
print ("To authorize access to Google Reader, visit this URL "
"and follow the instructions:\n\n{}\n").format(auth_URL)
auth_code = raw_input("Enter verification code: ")
print
payload = {"client_id": self.client_ID, "client_secret": self.client_secret, "code": auth_code,
"redirect_uri": "urn:ietf:wg:oauth:2.0:oob", "grant_type": "authorization_code"}
r = requests.post("https://accounts.google.com/o/oauth2/token", data=payload)
return r.json()["refresh_token"]
def user_info(self):
return self.libgreader.getUserInfo()
def subscription_list(self):
"""
Return an OrderedDict mapping tags to their contained feeds (sorted by tag label and feed
title, respectively). Non-tagged feeds are put in a tag called "<Untagged>" in the last position.
"""
self.libgreader.buildSubscriptionList()
categories = {cat: sorted(cat.getFeeds(), key=lambda f: f.title)
for cat in self.libgreader.getCategories()}
categories = sorted(categories.items(), key=lambda c: c[0].label)
feeds = [feed for feed in self.libgreader.getFeeds() if not feed.getCategories()]
if feeds:
untagged = type("Category", (), {"label": u"<Untagged>"})()
categories.append((untagged, feeds))
return OrderedDict(categories)
def category_list(self):
"""
Return an OrderedDict mapping category labels to a list of tuples containing the unread count and
title for each feed in the category.
"""
categories = self.subscription_list()
feeds = {cat.label: [(feed.unread, feed.title) for feed in categories[cat]] for cat in categories
if cat.label != u"<Untagged>"}
untagged = {cat.label: [(feed.unread, feed.title) for feed in categories[cat]] for cat in categories
if cat.label == u"<Untagged>"}
sorted_feeds = sorted(feeds.items())
sorted_feeds.extend(untagged.items())
return OrderedDict(sorted_feeds)
def get_unread_items(self, feed):
items = []
while len(items) < feed.unread:
if items:
feed.loadMoreItems(excludeRead=True)
else:
feed.loadItems(excludeRead=True)
items = [i for i in feed.getItems() if i.isUnread()]
return items
def _apply_filter(self, feed, patterns):
"""Apply filters to a feed. Returns the number of items marked-as-read"""
items = self.get_unread_items(feed)
count = 0
for pattern in patterns:
regex = re.compile(pattern)
for item in items:
if regex.search(item.title):
count += 1
item.markRead()
return count
def apply_filters(self, filters):
feed_count = 0
item_count = 0
self.auth.setActionToken()
filtered_feeds = set()
categories = self.subscription_list()
print u"Applying filters..."
try:
universal_patterns = filters[u"*"]
except KeyError:
universal_patterns = []
for category in categories:
try:
category_has_matching_feeds = False
for feed in categories[category]:
patterns = universal_patterns
try:
patterns.extend(filters[feed.title])
except KeyError:
pass
if not feed.id in filtered_feeds:
filtered_feeds.add(feed.id)
if not category_has_matching_feeds:
category_has_matching_feeds = True
print u"\n{}\n{}".format(category.label, u"=" * len(category.label))
print u"Searching \"{}\" for matching items...".format(feed.title),
sys.stdout.flush()
feed_count += 1
items_found = self._apply_filter(feed, patterns)
item_count += items_found
print u"found {}.".format(items_found)
except KeyboardInterrupt:
print "skipped."
# skip to next category
return feed_count, item_count
def check_config(config_dir):
try:
os.makedirs(config_dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(config_dir):
pass
else:
raise
spec = """client_ID = string\nclient_secret = string\nrefresh_token = string"""
config_spec = configobj.ConfigObj(StringIO(spec))
config_path = os.path.join(config_dir, "settings.ini")
config = configobj.ConfigObj(config_path, configspec=config_spec)
valid = config.validate(validate.Validator())
if valid is True:
try:
with open(os.path.join(config_dir, "filters.json")) as filters_file:
filters_json = filters_file.read()
try:
filters = demjson.decode(filters_json, encoding="utf8", strict=True, allow_comments=True)
except demjson.JSONDecodeError as e:
filters = (False, e.pretty_description())
return config, filters
except IOError as e:
if e.errno == errno.ENOENT:
f = open(os.path.join(config_dir, "filters.json"), "w")
f.write('{\n'
' // comment\n'
' "feed name": ["filter regexp", "another filter regexp"]\n'
'}\n')
f.close()
else:
raise
elif valid is False:
config["client_ID"] = raw_input("Google OAuth Client ID\n: ")
config["client_secret"] = raw_input("Google OAuth Client Secret\n: ")
GR = GoogleReader(config["client_ID"], config["client_secret"], None)
config["refresh_token"] = GR.get_refresh_token()
try:
config.write()
os.chmod(config_path, stat.S_IRUSR | stat.S_IWUSR) # mode -rw-------
print "Config written successfully."
exit(1)
except Exception as e:
print "{}\nConfig file was not written.".format(e)
exit(2)
def edit_filters(filters, config_dir):
"""open the filters file with the default editor"""
if filters is None:
print "No filters specified."
elif isinstance(filters, tuple) and filters[0] is False:
print "Filters file is invalid: {}\n".format(filters[1])
# else: "--edit" option was passed
filters_path = os.path.join(config_dir, "filters.json")
print "Opening filters file (\"{}\")...".format(filters_path)
if sys.platform.startswith("darwin"): # OSX
subprocess.call(("open", filters_path))
elif os.name == "nt": # Windows
os.startfile(filters_path)
elif os.name == "posix": # other *nix
try:
with open(os.devnull, "w") as fnull:
retcode = subprocess.call(("xdg-open", filters_path), stderr=fnull)
if retcode != 0:
raise OSError
except OSError:
editor = os.environ["EDITOR"]
subprocess.call((editor, filters_path))
def list_feeds(reader):
"""
Print the user's subscribed feeds and their respective unread counts,
separated by category name and ordered alphabetically.
"""
categories = reader.category_list()
col_width = max(len(str(unread_count)) for unread_count in
[feed[0] for cat in categories for feed in categories[cat]]) + 4
for cat in categories:
try:
print "\n{}\n{}".format(cat, "=" * len(cat))
except UnicodeEncodeError:
print "\n{}\n{}".format(cat, "=" * len(cat)).encode("cp850", "backslashreplace")
for feed in categories[cat]:
try:
print "".join(unicode(column).ljust(col_width) for column in feed)
except UnicodeEncodeError:
print "".join(unicode(column).ljust(col_width)
for column in feed).encode("cp850", "backslashreplace")
def main():
args = docopt.docopt(__doc__)
config_dir = appdirs.user_data_dir("RSS-filter", "U2Ft")
config, filters = check_config(config_dir)
logging.basicConfig(filename=os.path.join(config_dir, "RSS-filter.log"), level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S", format="%(asctime)s: %(message)s")
# silence requests.packages.urllib3's logging of every connection at level INFO
requests_logger = logging.getLogger("requests.packages.urllib3")
requests_logger.setLevel(logging.WARNING)
if isinstance(filters, tuple) or args["--edit"]:
edit_filters(filters, config_dir)
exit(4)
reader = GoogleReader(config["client_ID"], config["client_secret"], config["refresh_token"])
if args["--list"]:
list_feeds(reader)
exit(0)
feed_count, item_count = reader.apply_filters(filters)
if feed_count == 1:
if item_count == 1:
logging.info("1 matching item was found in 1 matching feed.")
print "\n1 matching item was found in 1 matching feed."
else:
logging.info("{} matching items were found in 1 matching feed.".format(item_count))
print "\n{} matching items were found in 1 matching feed.".format(item_count)
else:
if item_count == 1:
logging.info("1 matching item was found in {} matching feeds.".format(feed_count))
print "\n1 matching item was found in {} matching feeds.".format(feed_count)
else:
logging.info("{} matching items were found in {} matching feeds.".format(item_count, feed_count))
print "\n{} matching items were found in {} matching feeds.".format(item_count, feed_count)
if __name__ == "__main__":
main()
|
[
"github@u2ft.me"
] |
github@u2ft.me
|
8a06cc0bb2e553d32d22186fb3d417bad7e6eca4
|
62e8affbac7b31f0075f67a12391031132ea42f6
|
/code/excluded/our/deepgrudgy1.py
|
6b8ce4ae8a8b8e33554f58c6661429349f5be55f
|
[
"MIT"
] |
permissive
|
CompetitiveMazeSolutions/PrisonersDilemmaTournament
|
0bab13ec9fc0963b2acb58791ec84e47897ed155
|
c8422af02d434399db8a7db05a8b944d76f0afaf
|
refs/heads/main
| 2023-05-05T19:14:16.525405
| 2021-05-27T01:28:58
| 2021-05-27T01:28:58
| 369,590,104
| 0
| 0
| null | 2021-05-21T16:12:00
| 2021-05-21T16:11:59
| null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
import random
import numpy as np
def strategy(history, memory):
if memory is None:
MAX = 0
MIN = -2
coop_level = 0
else:
MAX, MIN, coop_level = memory
n = history.shape[1]
if n >= 1:
coop_level += 1 if history[1,-1] else -1.5
if n >= 1:
MAX += 0; MIN -= 1/n
if coop_level > MAX:
coop_level = MAX
if coop_level < MIN:
coop_level = MIN
choice = coop_level >= 0
return choice, (MAX,MIN,coop_level)
|
[
"greenlee04@gmail.com"
] |
greenlee04@gmail.com
|
24351931a129027ae9d1350e8f467ed17eb4a146
|
3773bc425b338cf6b73451885a378ed5563d27c2
|
/char/ascii.py
|
125f9ca24c7ebe5f915de14a630944a53c70970f
|
[] |
no_license
|
xiangunremarkable/hiyanlou-code
|
009b2bf5bb23ef562892de9949b1dec424e9ecbe
|
13e5805727e37a080b460491f06f49cbafa50432
|
refs/heads/master
| 2023-01-21T03:06:58.698008
| 2020-11-27T03:22:39
| 2020-11-27T03:22:39
| 314,746,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,141
|
py
|
from PIL import Image #图像处理库
import argparse #管理命令行参数输入
#首先,构建命令行输入参数处理ArgumentParser实例
parser = argparse.ArgumentParser()
#定义输入文件、输出文件、输出字符画的宽和高
parser.add_argument('file') #输入文件
parser.add_argument('-o','--output') #输出文件
parser.add_argument('--width',type = int, default = 80) #输出字符画宽
parser.add_argument('--height', type = int, default = 80) #输出字符画高
#解析并获取参数
args = parser.parse_args()
#输入的图片路径
IMG = args.file
#输出字符画的宽度
WIDTH = args.width
#输出字符画的高度
HEIGHT = args.height
#输出字符画的路径
OUTPUT = args.output
#字符画所用字符集
ascii_char = list("$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\|()1{}[]?-_+~<>i!lI;:,\"^`'. ")
#RGB值转字符函数
def get_char(r,g,b,alpha = 256):
#判断alpha值
if alpha == 0:
return ' '
#获取字符集的长度,这里为70
length = len(ascii_char)
#将RGB值转为灰度值gray,灰度值范围为0-255
gray = int(0.2126*r+0.7152*g+0.0722*b)
#灰度值范围为0-255,而字符集只有70
#需要进行如下操作才能将灰度值映射到指定的字符上
unit = (256.0+1)/length
#返回灰度值对应的字符
return ascii_char[int(gray/unit)]
if __name__ == '__main__':
# 打开并调整图片的宽和高
im = Image.open(IMG)
im = im.resize((WIDTH,HEIGHT), Image.NEAREST)
# 初始化输出的字符串
txt = ""
# 遍历图片中的每一行
for i in range(HEIGHT):
#遍历该行中的每一列
for j in range(WIDTH):
# 将 (j,i) 坐标的 RGB 像素转为字符后添加到 txt 字符串
txt += get_char(*im.getpixel((j,i)))
# 遍历完一行后需要增加换行符
txt += '\n'
# 输出到屏幕
print(txt)
# 字符画输出到文件
if OUTPUT:
with open(OUTPUT,'w') as f:
f.write(txt)
else:
with open("output.txt",'w') as f:
f.write(txt)
|
[
"3553667047@qq.com"
] |
3553667047@qq.com
|
4ff812638778fa97edd09c504ffcd6dd6cdc11fa
|
239ea6447abfc78571a64e1f7e0014e8d6258381
|
/novice/02-02/unit_test_soring.py
|
e623454d5075aaee0a9f8f27b9585d14d703459d
|
[] |
no_license
|
faizal-alkaff/praxis-academy
|
b6110661968f94d2cfaff56ac6220ef38da05da6
|
9c3069999ffaecd39016a015cbe6e9ec031991bc
|
refs/heads/master
| 2020-09-21T05:33:25.644890
| 2019-12-20T08:53:22
| 2019-12-20T08:53:22
| 224,646,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
import unittest
from sorting_method import *
class TestMethods(unittest.TestCase):
def test_bubblesort(self):
bbllist = [8, 11, 3, 22, 16]
bubbleSort(bbllist)
self.assertEqual(bbllist, [3, 8, 11, 16, 22])
def test_quick_sort(self):
quickSort = [22, 5, 1, 18, 99]
quick_sort(quickSort)
self.assertEqual(quickSort, [1, 5, 18, 22, 99])
def test_insertion_sort(self):
insertionSort = [9, 8, 19, 0, 34]
insertion_sort(insertionSort)
self.assertEqual(insertionSort, [0, 8, 9, 19, 34])
def test_heap_sort(self):
heapSort = [35, 12, 43, 8, 51]
heap_sort(heapSort)
self.assertEqual(heapSort, [8, 12, 35, 43, 51])
def test_selection_sort(self):
selectionSort = [12, 8, 3, 20, 11]
selection_sort(selectionSort)
self.assertEqual(selectionSort, [3, 8, 11, 12, 20])
if __name__ == '__main__':
unittest.main()
|
[
"alkaff.faizal@gmail.com"
] |
alkaff.faizal@gmail.com
|
0cd0e4e8ac5f482d0c574c61b50f82a0ddd477af
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-1/f30589c91d8946586faff2c994e99395239bd50b-<main>-fix.py
|
1d51b1b9cb0c17280c516c955697eab9c96e41df
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(name=dict(required=True), eradicate=dict(default='false', type='bool'), state=dict(default='present', choices=['present', 'absent']), size=dict()))
required_if = [('state', 'present', ['size'])]
module = AnsibleModule(argument_spec, required_if=required_if, supports_check_mode=True)
if (not HAS_PURESTORAGE):
module.fail_json(msg='purestorage sdk is required for this module in volume')
state = module.params['state']
array = get_system(module)
volume = get_volume(module, array)
if ((state == 'present') and (not volume)):
create_volume(module, array)
elif ((state == 'present') and volume):
update_volume(module, array)
elif ((state == 'absent') and volume):
delete_volume(module, array)
elif ((state == 'absent') and (not volume)):
module.exit_json(changed=False)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
69b79f560be12c0e9e42677a4b97215c43d4af93
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_cuneiform.py
|
aa5197e96b7bd8efc91b06c79ac4112f74a72e7c
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
#calss header
class _CUNEIFORM():
def __init__(self,):
self.name = "CUNEIFORM"
self.definitions = [u'of a form of writing used for over 3,000 years until the 1st century BC in the ancient countries of Western Asia', u'pointed at one end and wide at the other: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9a4e9897475ce33d6365819dd1de00ebac7154ee
|
16e54286bb66e4173f746936c829dda2c280ce68
|
/python/13369.世界时钟(时区处理).py
|
709107d93e28b099cd3926247907b96ea60c0dcf
|
[] |
no_license
|
569985011/Work-Space
|
653b932ada697d29178c2211a40d7de7650f9421
|
9afeb1846433d22514a0997b7a48254ce85f5009
|
refs/heads/master
| 2021-05-04T00:28:23.098655
| 2020-09-30T11:32:57
| 2020-09-30T11:32:57
| 71,839,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
import datetime
moscow_time = datetime.datetime(2019, 3, 18, 18, 13, 27)
lundon_time = moscow_time-datetime.timedelta(hours=+3)
beijing_time =lundon_time+datetime.timedelta(hours=+8)
tokyo_time =lundon_time+datetime.timedelta(hours=+9)
newyork_time =lundon_time+datetime.timedelta(hours=-4)
print("伦敦 "+datetime.datetime.strftime(lundon_time,"%Y-%m-%d %H:%M:%S"))
print("北京 "+datetime.datetime.strftime(beijing_time,"%Y-%m-%d %H:%M:%S"))
print("东京 "+datetime.datetime.strftime(tokyo_time,"%Y-%m-%d %H:%M:%S"))
print("纽约 "+datetime.datetime.strftime(newyork_time,"%Y-%m-%d %H:%M:%S"))
|
[
"Chauncy523@gmail.com"
] |
Chauncy523@gmail.com
|
25b0faff57a134389ac668ba40d1d3421f140816
|
08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc
|
/src/mnistk/networks/conv1dtanh_24.py
|
c8888a08e65cd985171dd5b7947bf12cd3c0dedf
|
[] |
no_license
|
ahgamut/mnistk
|
58dadffad204602d425b18549e9b3d245dbf5486
|
19a661185e6d82996624fc6fcc03de7ad9213eb0
|
refs/heads/master
| 2021-11-04T07:36:07.394100
| 2021-10-27T18:37:12
| 2021-10-27T18:37:12
| 227,103,881
| 2
| 1
| null | 2020-02-19T22:07:24
| 2019-12-10T11:33:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,688
|
py
|
# -*- coding: utf-8 -*-
"""
conv1dtanh_24.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
class Conv1dTanh_24(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Conv1d(in_channels=16, out_channels=22, kernel_size=(11,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=True, padding_mode='zeros')
self.f1 = nn.Conv1d(in_channels=22, out_channels=16, kernel_size=(38,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f2 = nn.Conv1d(in_channels=16, out_channels=27, kernel_size=(2,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f3 = nn.Tanh()
self.f4 = nn.Conv1d(in_channels=27, out_channels=35, kernel_size=(1,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f5 = nn.Tanh()
self.f6 = nn.Conv1d(in_channels=35, out_channels=30, kernel_size=(1,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f7 = nn.Conv1d(in_channels=30, out_channels=10, kernel_size=(1,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f8 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],16,49)
x = self.f0(x)
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
x = self.f4(x)
x = self.f5(x)
x = self.f6(x)
x = self.f7(x)
x = x.view(x.shape[0],10)
x = self.f8(x)
return x
|
[
"41098605+ahgamut@users.noreply.github.com"
] |
41098605+ahgamut@users.noreply.github.com
|
941023f561d7407ed3b0bee51288d9010a9e728c
|
cd22c49ca8e322ba91fa9688b144034a7711dc47
|
/defi_streaming/token_activity.py
|
dfed24ef7c641fddc25cf33f50d63591f9a3bbf4
|
[
"MIT"
] |
permissive
|
howardpen9/eth_analytics
|
ef5f306da973289dd3c17781bacdf5457797d2ea
|
cf9f35f867c88cd3ec11836253861908a9bc3d92
|
refs/heads/main
| 2023-06-21T07:15:03.168679
| 2021-08-09T04:43:32
| 2021-08-09T04:43:32
| 387,317,327
| 0
| 0
|
MIT
| 2021-07-23T06:37:25
| 2021-07-19T02:34:36
| null |
UTF-8
|
Python
| false
| false
| 3,639
|
py
|
# This script can be used to view all the transactions from a specific ERC-20 token over a defined # of blocks.
# This is useful for counting volume of transfers, viewing where it's being used, etc.
from web3 import Web3
# Import Alchemy API Key
from dotenv import dotenv_values
config = dotenv_values("../.env")
# I prefer Alchemy as the pricing plans are friendly for people who aren't loaded : )
w3 = Web3(Web3.HTTPProvider('https://eth-mainnet.alchemyapi.io/v2/' + config['API_KEY']))
# Get the block number for the latest block
latest_block = w3.eth.block_number
# Get this block number in another variable we will increment
block = latest_block
# Read data from last 270 blocks to get ~1 hour of data, change number to change historical length
num_blocks_to_scan = 1
# ERC-20 token you want to track transfers for, change address to view different token
erc_20_address = '0x1f9840a85d5af5bf1d1762f925bdaddc4201f984'
erc_20_history = []
# add the contract addresses you care about as well as the logic below to add additional contracts
# if you do not add this logic the number of transfers will still be tracked but not the protocols to which it is sent
uni_v2_history = []
uni_v2_address = '0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D'
uni_v2_30_address = ''
#uni_v3_history = []
#metamask_router_history = []
#sushiswap_history = []
#compound_history = []
#aave_history = []
# Loop over num_blocks_to_scan starting from the latest mined block
while block > (latest_block - num_blocks_to_scan):
# Read data from the latest block
print(latest_block - num_blocks_to_scan)
print(block)
current_block = w3.eth.getBlock(block)
# Read transactions from this block
block_transactions = current_block['transactions']
print(len(block_transactions))
# Loop over all transactions in this block
for x in block_transactions:
# Get the data we need for each individual transaction
#print(x.hex())
block_receipt = w3.eth.getTransactionReceipt(x.hex())
try:
# Check if our ERC_20 of interest is being used
if str(block_receipt['logs'][0]['address']).lower() == erc_20_address:
# track number of transfers
print('yes')
erc_20_history.append("Uniswap Token Transfer")
looping = True
# view a list of all the EOAs and contracts the token goes through in this transaction
# these nested loops are inefficient and can be improved, consider using checksum addresses instead of lowercasing everything, and fix these disasterous breaks xD
for x in block_receipt['logs']:
for y in x['topics']:
contract_address = str(y.hex())[-40:]
contract_address = '0x' + contract_address
if contract_address.lower() == uni_v2_address.lower():
uni_v2_history.append('Uniswap Token Traded Through Uniswap V2')
print('yes')
looping = False
break
if not looping:
break
except:
print('no erc-20 transfer')
print('subtract')
# increment block - 1 to move on to the next block and scan for transactions
block -= 1
# print how many times the ERC_20 in question was transferred over the last num_blocks_to_scan
print(len(erc_20_history))
# print how many of those ERC_20 transactions were transactions through Uniswap v2
print(len(uni_v2_history))
|
[
"lpose22@gmail.com"
] |
lpose22@gmail.com
|
fede356b76cd95095561b4fdc7bba90441403527
|
bfb28ec9c6504c08f874ea3f7fb069d29b47fd51
|
/testing/products/tests/test_urls.py
|
0198ea583bc5ef1f99ff38ee1d2a41b3c98915bd
|
[] |
no_license
|
anantonyk/python
|
c15d00108a1db0602f6ebd6ea1d8ec84e7858b8c
|
9b3a15984e810e6ea0e7116f30d89e95b84a2ffa
|
refs/heads/master
| 2020-04-01T16:53:26.677779
| 2018-12-05T08:16:29
| 2018-12-05T08:16:29
| 153,402,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
from django.urls import reverse, resolve
class TestUrls:
def test_url(self):
path = reverse('detail', kwargs={'pk':1})
assert resolve(path).view_name=='detail'
|
[
"nastiaantonyk14@gmail.com"
] |
nastiaantonyk14@gmail.com
|
c695fddcefdc0efae0816934bae5aaba3b17ab7c
|
54ddb3f38cd09ac25213a7eb8743376fe778fee8
|
/topic_02_syntax/hw/tests/logic_1_arithmetic_test.py
|
24a92c5b8bec9f07cd079054c5fbfa6afd539e1c
|
[] |
no_license
|
ryndovaira/leveluppythonlevel1_300321
|
dbfd4ee41485870097ee490f652751776ccbd7ab
|
0877226e6fdb8945531775c42193a90ddb9c8a8b
|
refs/heads/master
| 2023-06-06T07:44:15.157913
| 2021-06-18T11:53:35
| 2021-06-18T11:53:35
| 376,595,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
import pytest
from topic_02_syntax.hw.logic_1_arithmetic import arithmetic
params = [
(0, 0, '+', 0),
(1, 0, '+', 1),
(0, 1, '+', 1),
(100, 100, '+', 200),
(100, -100, '+', 0),
(-100, 100, '+', 0),
(-100, -100, '+', -200),
(0, 0, '-', 0),
(1, 0, '-', 1),
(0, 1, '-', -1),
(100, 100, '-', 0),
(100, -100, '-', 200),
(-100, 100, '-', -200),
(-100, -100, '-', 0),
(0, 0, '*', 0),
(1, 0, '*', 0),
(0, 1, '*', 0),
(100, 100, '*', 10000),
(100, -100, '*', -10000),
(-100, 100, '*', -10000),
(-100, -100, '*', 10000),
(0, 1, '/', 0),
(1, 1, '/', 1),
(100, 100, '/', 1),
(100, -100, '/', -1),
(-100, 100, '/', -1),
(-100, -100, '/', 1),
(0, 1, '=', "Unknown operator"),
(1, 1, '%', "Unknown operator"),
(100, 100, '#', "Unknown operator"),
(100, -100, '.', "Unknown operator"),
(-100, 100, '0', "Unknown operator"),
(-100, -100, '&', "Unknown operator"),
]
ids = ["(%s) %s (%s) == (%s)" % (num1, op, num2, expected) for (num1, num2, op, expected) in params]
@pytest.mark.parametrize(argnames="num1, num2, op, expected",
argvalues=params,
ids=ids)
def test_arithmetic(num1, num2, op, expected):
assert arithmetic(num1, num2, op) == expected
|
[
"ryndovaira@gmail.com"
] |
ryndovaira@gmail.com
|
90b782501b6f2a82869a0606c0c34ed8fdefb653
|
228cf193205005b8057bfcc11cfe59a2cce99e25
|
/xianfengsg/online/V_1.3/XGBOOST_forecast_engineer.py
|
c79d9fc05b5d55310820164fd1b08f6542939a20
|
[] |
no_license
|
jimmyeva/AI-predict
|
a4b7a0adb4bf40497324332cbfe9106a78078403
|
2d7783ee0008e60bd04d27219b542b8abea235bd
|
refs/heads/master
| 2022-04-11T06:11:36.332609
| 2020-04-08T08:23:10
| 2020-04-08T08:23:10
| 250,241,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,756
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/9/13 11:16
# @Author : Ye Jinyu__jimmy
# @File : XGBOOST_foreacst
import pandas as pd
# 显示所有列
pd.set_option('display.max_columns', None)
# 显示所有行
pd.set_option('display.max_rows', 500)
# 设置value的显示长度为100,默认为50
pd.set_option('max_colwidth', 100)
from sklearn import preprocessing
import numpy as np
import time
import xgboost as xgb
from sklearn.model_selection import GridSearchCV,train_test_split
import features_engineering
import cx_Oracle
import datetime
import pymysql
import tqdm
import os
from statsmodels.tsa.holtwinters import ExponentialSmoothing, SimpleExpSmoothing, Holt
import psycopg2
import multiprocessing
import math
#
# def mkdir(path):
# folder = os.path.exists('/root/ai/wh_repl/program/prediction/'+path)
# if not folder: # 判断是否存在文件夹如果不存在则创建为文件夹
# os.makedirs('/root/ai/wh_repl/program/prediction/'+path) # makedirs 创建文件时如果路径不存在会创建这个路径
# print(
# "----生成新的文件目录----")
# else:
# print("当前文件夹已经存在")
#
#
# def print_in_log(string):
# print(string)
# date_1 = datetime.datetime.now()
# str_10 = datetime.datetime.strftime(date_1, '%Y%m%d')
# file = open('/root/ai/wh_repl/program/prediction/' + str(str_10) + '/' + 'log' + str(str_10) + '.txt', 'a')
# file.write(str(string) + '\n')
def mkdir(path):
folder = os.path.exists('./'+path)
if not folder: # 判断是否存在文件夹如果不存在则创建为文件夹
os.makedirs('./'+path) # makedirs 创建文件时如果路径不存在会创建这个路径
print(
"----生成新的文件目录----")
else:
print("当前文件夹已经存在")
def print_in_log(string):
print(string)
date_1 = datetime.datetime.now()
str_10 = datetime.datetime.strftime(date_1, '%Y%m%d')
file = open('./' + 'log_decision' + str(str_10) + '.txt', 'a')
file.write(str(string) + '\n')
#====================================================================
#------------------------------>根据SKU 的id来获取每个SKU的具体的销售明细数据
def get_detail_sales_data(wh_code,sku_code,start_date,end_date):
conn = psycopg2.connect(database="dc_rpt", user="ads", password="ads@xfsg2019", host="192.168.1.205", port="3433")
print_in_log("Opened database successfully,connected with PG DB")
ads_rpt_ai_wh_d_sql = """SELECT * FROM ads_aig_supply_chain.ads_rpt_ai_wh_d WHERE wh_code ='%s' AND sty_code = '%s'
AND stat_date >'%s' AND stat_date <= '%s' """ % \
(wh_code,sku_code,start_date,end_date)
try:
wh_sales = pd.read_sql(ads_rpt_ai_wh_d_sql,conn)
except:
print("load data from postgres failure !")
wh_sales = pd.DataFrame()
exit()
conn.close()
wh_sales['stat_date'] = pd.to_datetime(wh_sales['stat_date'])
wh_sales = wh_sales.rename(index = str, columns = {'stat_date': 'Account_date'})
# print(wh_sales)
# wh_sales.columns = ['SENDER','DC_NAME','WRH','WAREHOUSE_NAME','NUM','GDGID','SKU_NAME',
# 'OCRDATE','CRTOTAL','MUNIT','QTY','QTYSTR','TOTAL','PRICE','QPC','RTOTAL']
print_in_log(str(sku_code)+'销售数据读取完成')
return wh_sales
#------------------------------------------------------------------------------->以日期作为分组内容查看每天每个SKU的具体的销量
def data_group(data):
#以下是用来保存分组后的数据
sales_data = pd.DataFrame(columns = ["Account_date","Sku_code",'Dc_name',"Sales_qty",'Price','Dc_code',
'Wrh','Warehouse_name','Sku_name'])
sales_data["Sales_qty"]=data.groupby(["Account_date"],as_index = False).sum()["sal_qty_1d"]
sales_data["Price"] = data.groupby(["Account_date"],as_index = False).mean()["sal_amt_1d"]
sales_data["Account_date"]= data.groupby(['Account_date']).sum().index
sales_data["Sku_code"] = [data["sty_code"].iloc[0]]*len(sales_data["Sales_qty"])
sales_data["Dc_name"] = [data["wh_name"].iloc[0]] * len(sales_data["Sku_code"])
sales_data["Dc_code"] = [data["wh_code"].iloc[0]] * len(sales_data["Sku_code"])
sales_data["Sku_id"] = [data["sty_id"].iloc[0]] * len(sales_data["Sku_code"])
sales_data["Wrh"] = sales_data["Dc_code"]
sales_data["Warehouse_name"] = sales_data["Dc_name"]
sales_data["Sku_name"] = [data["sty_name"].iloc[0]] * len(sales_data["Sales_qty"])
sales_data = sales_data.sort_values( by = ['Account_date'], ascending = False)
return sales_data
#--------------------------------------------------------------------------------->对日期进行转化返回string前一天的日期
def date_convert(end):
datetime_forma= datetime.datetime.strptime(end, "%Y%m%d")
yesterday = datetime_forma - datetime.timedelta(days=1)
yesterday = yesterday.strftime("%Y%m%d")
return yesterday
#---------------------------------设置函数用于对异常点的处理
def process_abnormal(data):
mid_data = data
Q1 = mid_data['Sales_qty'].quantile(q=0.25)
Q3 = mid_data['Sales_qty'].quantile(q=0.75)
IQR = Q3 - Q1
mid_data["Sales_qty"].iloc[np.where(mid_data["Sales_qty"] > Q3 + 1.5 * IQR)] = np.median(mid_data['Sales_qty'])
mid_data["Sales_qty"].iloc[np.where(mid_data["Sales_qty"] < Q1 - 1.5 * IQR)] = np.median(mid_data['Sales_qty'])
return mid_data
'''采用三次指数平滑的方式进行数据噪声处理'''
def sigle_holt_winters(data):
#先修正那些明显的错误的数据
# data["Sales_qty"].iloc[np.where(data["Sales_qty"] < 0) ] = 0
sales = data.drop(data[data.Sales_qty <= 0].index)
# sales.to_csv('D:/AI/xianfengsg/online/V_1.2/data/sale.csv', encoding='utf_8_sig')
y = pd.Series(sales['Sales_qty'].values)
date = pd.Series(sales['Account_date'].values)
seaonal = round(len(sales) / 4)
ets3 = ExponentialSmoothing(y, trend='add', seasonal='add',seasonal_periods=seaonal)
r3 = ets3.fit()
anomaly_data = pd.DataFrame({
'Account_date': date,
'fitted': r3.fittedvalues,
})
merge_data = pd.merge(data,anomaly_data,on='Account_date',how='inner')
# merge_data.to_csv('D:/AI/xianfengsg/online/V_1.2/data/merge_data.csv', encoding='utf_8_sig')
merge_data.drop('Sales_qty',axis=1, inplace=True)
merge_data = merge_data.rename(columns={'fitted':'Sales_qty'})
# except OSError as reason:
# print('出错原因是:' +str(reason))
# pass
# merge_data = pd.DataFrame()
return merge_data
#----------------------------------------------------->对日期没有销量和价格等信息进行补齐操作,并做异常值的处理
def date_fill(start_date, end, data):
yesterday = date_convert(end)
date_range_sku = pd.date_range(start = start_date, end=yesterday)
data_sku = pd.DataFrame({'Account_date': date_range_sku})
#采用三阶指数平滑的方式处理
'''此处需要设置判断的采用两种噪声处理方式'''
if len(data) >= 150:
print('使用三阶指数平滑处理')
process_data = sigle_holt_winters(data)
else:
print('基于统计学处理')
process_data = process_abnormal(data)
process_data['Sales_qty'] = process_data['Sales_qty'].astype(int)
result = pd.merge(process_data, data_sku, on=['Account_date'], how='right')
# 如果在某一天没有销量的话,采取补零的操作
result["Sales_qty"].iloc[np.where(np.isnan(result["Sales_qty"]))] = 0
result["Sales_qty"].iloc[np.where(result["Sales_qty"] < 0)] = 0
result = result.fillna(method='ffill')
result = result.sort_values(["Account_date"], ascending=1)
return result
# 获取所有的SKU的的gid
def get_all_sku(data):
data.to_csv('./data.csv',encoding='utf_8_sig')
sku_colomn = set(data["Sku_code"])
sku_list = list(sku_colomn)
return sku_list
def get_features_target(data):
data_array = pd.np.array(data) # 传入dataframe,为了遍历,先转为array
features_list = []
target_list = []
columns = [column for column in data]
print_in_log('该资源的长度是:'+str(len(columns)))
if 'Sales_qty' in columns:
for line in data_array:
temp_list = []
for i in range(2, int(data.shape[1])): # 一共有107个特征
if i == 2: # index=2对应的当前的目标值,也就是当下的销售量
target_temp = int(line[i])
target_list.append(target_temp)
else:
temp_list.append(int(line[i]))
features_list.append(temp_list)
else:
for line in data_array:
temp_list = []
target_list =[]
#因为第二个数会存在是商品code的情况,而商品code第一位有可能是0所以需要单独出来
for i in range(2, int(data.shape[1])): # 一共有107个特征
if i == 1:
temp_list.append(str(line[i]))
else:
temp_list.append(int(line[i]))
features_list.append(temp_list)
features_data = pd.DataFrame(features_list)
target_data = pd.DataFrame(target_list)
return features_data, target_data
def get_sku_number_dict(data):
data_array = pd.np.array(data)
max_dict = {}
min_dict = {}
ave_dict = {}
sum_dict = {}
count_dict = {}
all_sku_list = []
for line in data_array:
all_sku_list.append(line[1])
all_sku_code_set = set(all_sku_list)
for sku in all_sku_code_set:
max_dict[sku] = 0
min_dict[sku] = 0
ave_dict[sku] = 0
sum_dict[sku] = 0
count_dict[sku] = 0
for line in data_array:
sales_qty = line[2]
sku = line[1]
sum_dict[sku] += sales_qty
count_dict[sku] += 1
#获取最大最小的销量
if max_dict[sku] < sales_qty:
max_dict[sku] = sales_qty
if min_dict[sku] > sales_qty:
min_dict[sku] = sales_qty
for sku in all_sku_code_set:
ave_dict[sku] = sum_dict[sku] / count_dict[sku]
return max_dict, min_dict, ave_dict
# 得到评价指标rmspe_xg训练模型
def rmspe_xg(yhat, y):
# y DMatrix对象
y = y.get_label()
# y.get_label 二维数组
y = np.exp(y) # 二维数组
yhat = np.exp(yhat) # 一维数组
rmspe = np.sqrt(np.mean((y - yhat) ** 2))
return "rmspe", rmspe
# 该评价指标用来评价模型好坏
def rmspe(zip_list):
sum_value = 0.0
count = len(list(zip_list))
for real, predict in zip_list:
v1 = (real - predict) ** 2
sum_value += v1
v2 = sum_value / count
v3 = np.sqrt(v2)
return v3
def predict_with_XGBoosting(sku_code,data,test_data):
data_process = data
data_process.ix[data_process['Sales_qty'] == 0, 'Sales_qty'] = 1
train_and_valid, test = train_test_split(data_process, test_size=0.2, random_state=10)
# train, valid = train_test_split(train_and_valid, test_size=0.1, random_state=10)
train_feature, train_target = get_features_target(train_and_valid)
# test_feature, test_target = get_features_target(test)
valid_feature, valid_target = get_features_target(test)
#--------------------------------------------------
dtrain = xgb.DMatrix(train_feature, np.log(train_target)) # 取log是为了数据更稳定
dvalid = xgb.DMatrix(valid_feature, np.log(valid_target))
watchlist = [(dvalid, 'eval'), (dtrain, 'train')]
# 设置参数
num_trees = 450
params = {"objective": "reg:linear",
"eta": 0.15,
"max_depth": 8,
"subsample": 0.8,
"colsample_bytree": 0.7,
"silent": 1
}
# 训练模型
gbm = xgb.train(params, dtrain, num_trees, evals=watchlist,
early_stopping_rounds=60, feval=rmspe_xg, verbose_eval=True)
#---------------------------------------------------
#用于最后的输出结果进行切分
print_in_log('test_data:length'+str(len(test_data)))
predict_feature,target_empty = get_features_target(test_data)
# 将测试集代入模型进行预测
print("Make predictions on the future set")
# print(predict_feature.columns)
# print('predict_feature',predict_feature)
predict_probs = gbm.predict(xgb.DMatrix(predict_feature))
predict_qty = list(np.exp(predict_probs))
# 对预测结果进行矫正
max_dict, min_dict, ave_dict = get_sku_number_dict(data_process)
predict_qty_improve = []
for predict in predict_qty:
sku_code = str(sku_code)
print_in_log('predict'+'\n'+str(predict))
print_in_log('sku_code'+str(sku_code))
print_in_log('max_dict'+str(max_dict))
print_in_log('min_dict' + str(min_dict))
if predict > max_dict[sku_code]:
predict = ave_dict[sku_code]
if predict < min_dict[sku_code]:
predict = ave_dict[sku_code]
predict_qty_improve.append(predict)
# 计算误差
# list_zip_real_predict_improve = zip(test_target_list, predict_qty_improve)
# error = rmspe(list_zip_real_predict_improve)
# print('error', error)
return predict_qty_improve
#定义一个函数用来切构建用于训练的数据集
def separate_data(sales_data,feature_data):
# 以下这部操作是配送中心和sku进行区分,同时将特征信息加入进行合并
sales_data = sales_data[['Account_date', 'Sku_code', 'Sales_qty']]
merge_data = sales_data.merge(feature_data, on='Account_date', how='inner')
merge_data = merge_data.reset_index(drop=True)
return merge_data
#构建一个用来预测的数据集
def create_future(sku_code,features_data,end_date,date_7days_after):
sku_feature_data = features_data[features_data['Account_date'] >= end_date]
sku_feature_data = sku_feature_data[sku_feature_data['Account_date'] <= date_7days_after]
# sku_feature_data = sku_feature_data.drop(['index'],axis=1)
# sku_feature_data = sku_feature_data[sku_feature_data['Account_date'] < '2019-06-22']
sku_feature_data.insert(1,'Sku_code',pd.np.array(sku_code))
return sku_feature_data
#结果计算后再定义一个函数用来将信息不全
def fill_data(predict_data,sales_data,sku_code):
mid_sales_data = sales_data[sales_data['Sku_code'] == sku_code]
mid_predict = predict_data
mid_predict["Price"] = pd.np.array(mid_sales_data["Price"].iloc[0])
# mid_predict["Gross_profit_rate"] = pd.np.array(mid_sales_data["Gross_profit_rate"].iloc[0])
mid_predict["Dc_name"] = pd.np.array(mid_sales_data["Dc_name"].iloc[0])
mid_predict["Dc_code"] = pd.np.array(mid_sales_data["Dc_code"].iloc[0])
mid_predict["Sku_id"] = pd.np.array(mid_sales_data["Sku_id"].iloc[0])
mid_predict["Wrh"] = pd.np.array(mid_sales_data["Wrh"].iloc[0])
mid_predict["Warehouse_name"] = pd.np.array(mid_sales_data["Warehouse_name"].iloc[0])
mid_predict["Sku_name"] = pd.np.array(mid_sales_data["Sku_name"].iloc[0])
return mid_predict
#设置预测的主函数
#这是从库存表中进行商品的选择,选择需要预测的sku的code
#-------------最新的逻辑是从叫货目录进行选择
def get_order_code(wh_code):
print_in_log('正在读取叫货目录的数据')
dbconn = pymysql.connect(host="rm-bp1jfj82u002onh2t.mysql.rds.aliyuncs.com", database="purchare_sys",
user="purchare_sys", password="purchare_sys@123", port=3306,
charset='utf8')
get_orders = """SELECT pcr.goods_code GOODS_CODE FROM p_call_record pcr WHERE pcr.warehouse_code LIKE '%s%%'"""%\
(wh_code)
orders = pd.read_sql(get_orders, dbconn)
orders['GOODS_CODE'] = orders['GOODS_CODE'].astype(str)
print_in_log('叫货目录读取完成')
dbconn.close()
return orders
#设置多进程来读取原始销售数据和数据的平滑处理
def format_multi(data,start_date,end_date,wh_code):
sku_code = data[['GOODS_CODE']]
'''只用于测试使用'''
# sku_code = sku_code.loc[0:24]
sku_code = sku_code.dropna(axis=0,how='any')
sku_code_list = sku_code['GOODS_CODE'].to_list()
length_res = len(sku_code_list)
n = 8 # 这里计划设置8个进程进行计算
step = int(math.ceil(length_res / n))
pool = multiprocessing.Pool(processes=8) # 创建8个进程
results = []
for i in range(0, 8):
mid_sku_code = sku_code_list[(i*step) : ((i+1)*step)]
print('mid_sku_code',mid_sku_code)
results.append(pool.apply_async(get_his_sales, args =(mid_sku_code,start_date,end_date,wh_code,)))
pool.close() # 关闭进程池,表示不能再往进程池中添加进程,需要在join之前调用
pool.join() # 等待进程池中的所有进程执行完毕
final_data= pd.DataFrame(columns=['Account_date','Sku_code','Dc_name','Price','Dc_code','Wrh','Warehouse_name','Sku_name',
'Sku_id','Sales_qty'])
for i in results:
a = i.get()
final_data = final_data.append(a, ignore_index=True)
final_data = final_data.dropna(axis=0, how='any')
return final_data
#输入叫货目录的code,配送中心对应的仓位号,进行每个code的销量读取信息
def get_his_sales(sku_code_list,start_date,end_date,wh_code):
# sku_code = df[['GOODS_CODE']]
# '''只用于测试使用'''
# sku_code = sku_code.loc[0:5]
#
# sku_code = sku_code.dropna(axis=0,how='any')
# sku_code_list = sku_code['GOODS_CODE'].to_list()
#data为获取的所有需要预测的sku的历史销售数据
data = pd.DataFrame()
for code in sku_code_list:
wh_sales = get_detail_sales_data(wh_code,code,start_date,end_date)
print_in_log('sku的CODE:'+str(code))
if wh_sales.empty == True:
print_in_log('sku的code:'+str(code)+'未获取到销售数据')
pass
else:
result_mid = data_group(wh_sales)
result = date_fill(start_date,end_date,result_mid)
data = data.append(result)
return data
#主计算函数,
def main_forecast(start_date,end_date,date_7days,wh_code,date_7days_after):
print_in_log(start_date+end_date+date_7days+wh_code)
df = get_order_code(wh_code)
# sales_data = get_his_sales(df,start_date,end_date,wh_code)format_multi
sales_data = format_multi(df, start_date, end_date, wh_code)
# print('sales_data',sales_data)
'''这里需要对读取的销售信息再进行一次数据清洗,'''
features_data = features_engineering.made_feature()
train_data = separate_data(sales_data,features_data)
sku_list =get_all_sku(sales_data)
result = pd.DataFrame()
#分别对每个sku进行学习预测
print_in_log('分别对每个sku进行学习预测:'+str(set(sku_list)))
for sku_code in sku_list:
test_data = create_future(sku_code, features_data, end_date,date_7days_after)
train_data_mid = train_data[train_data['Sku_code'] == sku_code]
print_in_log('sku_code:' + str(sku_code))
print_in_log('数据长度是:%d' % len(train_data_mid))
if len(train_data_mid) > 45:
train_data_mid = train_data_mid.reset_index(drop=True)
test_data = test_data.reset_index(drop=True)
predict_qty_improve = predict_with_XGBoosting(sku_code,train_data_mid,test_data)
test_data['Forecast_qty'] = pd.Series(predict_qty_improve)
predict_data = test_data[['Account_date','Sku_code','Forecast_qty']]
result_data = fill_data(predict_data,sales_data,sku_code)
print('predict_qty_improve',predict_qty_improve)
result = result.append(result_data)
else:
print_in_log('%s,因为学习数据较少,因此采用逻辑进行预测'%sku_code)
train_data_mid = train_data[train_data['Account_date'] > date_7days]
predict_qty_improve = train_data_mid['Sales_qty'].mean()
test_data['Forecast_qty'] = predict_qty_improve
predict_data = test_data[['Account_date','Sku_code','Forecast_qty']]
# print('predict_data',predict_data)
result_data = fill_data(predict_data, sales_data, sku_code)
# print('result_data',result_data)
result = result.append(result_data)
result['Update_time'] = pd.to_datetime(end_date).strftime('%Y-%m-%d')
# result['Update_time'] = datetime.date.today().strftime('%Y-%m-%d')
result['Account_date']= pd.to_datetime(result['Account_date'], unit='s').dt.strftime('%Y-%m-%d')
result = result.replace([np.inf, -np.inf], np.nan)
result = result.fillna(0)
return result
#-----------------------------------连接mysql数据库
def connectdb():
print_in_log('连接到mysql服务器...')
db = pymysql.connect(host="rm-bp1jfj82u002onh2t.mysql.rds.aliyuncs.com",
database="purchare_sys", user="purchare_sys",
password="purchare_sys@123",port=3306, charset='utf8')
print_in_log('连接成功')
return db
#《----------------------------------------------------------------------删除重复日期数据
def drop_data(wh_code,end_date,db):
cursor = db.cursor()
sql = """delete from dc_forecast where Update_time = DATE ('%s') and Dc_code = '%s'"""%(end_date,wh_code)
cursor.execute(sql)
#<===========================================插入数据
def insertdb(db,data):
cursor = db.cursor()
# param = list(map(tuple, np.array(data).tolist()))
data_list = data.values.tolist()
print_in_log('data_list:len'+str(len(data_list)))
sql = """INSERT INTO dc_forecast (Account_date,Sku_code,Forecast_qty,Price,Dc_name,Dc_code,
Sku_id,Wrh,Warehouse_name,Sku_name,Update_time)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"""
try:
cursor.executemany(sql, data_list)
print_in_log("所有品牌的sku数据插入数据库成功")
db.commit()
except OSError as reason:
print_in_log('出错原因是%s' % str(reason))
db.rollback()
#<==================================================
def closedb(db):
db.close()
#<=============================================================================
def main(start_date,end_date,date_7days,wh_code,date_7days_after):
mkdir(end_date)
print_in_log(start_date+end_date+date_7days+wh_code)
result_forecast = main_forecast(start_date,end_date,date_7days,wh_code,date_7days_after)
print_in_log('result_forecast:len'+str(len(result_forecast)))
result_forecast.to_csv('./'+str(wh_code)+'result_forecast'+str(end_date)+'.csv',encoding='utf_8_sig')
db = connectdb()
drop_data(wh_code,end_date,db)
if result_forecast.empty:
print_in_log("The data frame is empty")
print_in_log("result:1")
closedb(db)
else:
insertdb(db,result_forecast)
closedb(db)
print_in_log("result:1")
#《============================================================================主函数入口
if __name__ == '__main__':
today = datetime.date.today()- datetime.timedelta(45)
end_date = today.strftime('%Y%m%d')
date_7days = (today - datetime.timedelta(7)).strftime('%Y%m%d')
wh_code = '001'
start_date = '20180101'
print(start_date,end_date,date_7days,wh_code)
try:
main(start_date,end_date,date_7days,wh_code)
except OSError as reason:
print_in_log('出错原因是%s'%str(reason))
print_in_log ("result:0")
|
[
"usstyjy@163.com"
] |
usstyjy@163.com
|
e7b14c0367b594b8adebfd94bd611149cf852b69
|
bec7cb71348269467e720876d1b262379129e138
|
/ask/ask/urls.py
|
113ba92f8651397964e4890672e116f908d80ddc
|
[] |
no_license
|
aftana/st-web31
|
6b18548891135454f6e5c822a92203feec04be2e
|
862a73e0b576f1a611232aac4afec504a909c0af
|
refs/heads/master
| 2020-04-05T12:38:04.863561
| 2017-07-12T13:40:13
| 2017-07-12T13:40:13
| 95,201,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
"""ask URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('qa.urls')),
]
|
[
"art-xub@xubuntu.com"
] |
art-xub@xubuntu.com
|
52df8a6e0f658bda2e297a144ee28205503152b5
|
51a5c2772232690a34ba2030c643ffcb8d41023d
|
/cloudy-scripts/grid-density/plot-electron-temp.py
|
db0cff264676e6631603b21f62d7347a1f04e9d6
|
[] |
no_license
|
calvin-sykes/mmas-project
|
000e456f83f97c001d196721c2b3bcad7bf64f99
|
4a57334fd7036d49760811373ef2bc8c4fc9a89f
|
refs/heads/master
| 2021-09-05T19:18:24.325590
| 2018-01-30T13:49:00
| 2018-01-30T13:49:00
| 106,418,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
#!/usr/bin/python3
import numpy as np
import matplotlib, matplotlib.pyplot as plt
import cloudy_plots as clplt
if __name__ == '__main__':
fn_base = 'grid-density'
cp = clplt.CloudyPlotter(fn_base)
colourvals = np.linspace(1., 0., cp.nfiles())
colours = [matplotlib.cm.jet(x) for x in colourvals]
# Plot of HI fraction vs radius for each H density
plt.figure()
ax = plt.gca()
ax.set_xscale('log')
ax.set_xlabel('$\log(\mathrm{depth/[cm]})$')
ax.set_ylabel('$x_\mathrm{HI}$')
ax.set_title('$x_\mathrm{HI}$ as a function of depth')
ax.xaxis.set_major_formatter(matplotlib.ticker.LogFormatterExponent())
for idx in range(cp.nfiles()):
# find value of parameter with this index
hden = cp.get_grid_param(idx, 'hden')
depth = cp.get_col(idx, 'depth')
etemp = cp.get_col(idx, 'Te')
plt.plot(depth, etemp,
label='$\log(n_H)={}$'.format(hden),
color=colours[idx], marker='.', linestyle='')
plt.legend()
plt.show()
|
[
"sykescalvin09@gmail.com"
] |
sykescalvin09@gmail.com
|
3f83f46f9d7f4babfa8c81312e6fd61edc7c8c9a
|
cc52011cb420651cdd9d37af1ffbad68f935c7be
|
/junk/face_test.py
|
02765a052dfcd7d9789560f7bae2bde65f24a6ca
|
[] |
no_license
|
fatpat314/mask_detection
|
0988a341fd47849977bbb7babdc0ed2fce928a6d
|
025b420014e8aac71d867e06ef9202a473e5357c
|
refs/heads/master
| 2022-12-20T05:43:44.700740
| 2020-09-28T08:19:56
| 2020-09-28T08:19:56
| 290,686,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,137
|
py
|
import face_recognition
import cv2
import numpy as np
import os
import glob
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
"""img_dir = "images"
data_path = os.path.join(img_dir, '*g')
files = glob.glob(data_path)
data = []
masked_faces_encodings = []
for fl in files:
data.append(fl)
masked_faces_images = face_recognition.load_image_file(fl)
masked_faces_encoding = face_recognition.face_encodings(masked_faces_images)
masked_faces_encodings.append(masked_faces_encoding)
masked_faces = ["Masked"]
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
print(masked_faces_encodings)
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame,(0,0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
# matches = face_recognition.compare_faces(masked_faces_encodings, face_encoding)
name = "Unmasked"
if name == "Unmasked":
print("ALERT!!!!", "\a")
# # If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = masked_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(masked_faces_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = masked_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
print("IMG DATA: ", data)"""
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("face.jpeg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("face2.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
obama_face_encoding,
biden_face_encoding
]
known_face_names = [
"Barack Obama",
"Joe Biden"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# tolerance=0.0
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unmasked"
if name == "Unmasked":
print("ALERT!!!!", "\a")
# # If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
# import face_recognition
# import cv2
# import numpy as np
#
# """Does this only need to trigger when it sees a face? Otherwise just keep looping through frames until a face a found.
# Because the facial recognition is not able to recognized masked faces"""
#
# video_capture = cv2.VideoCapture(0)
#
# # Initialize some variables
# face_locations = []
# face_encodings = []
# face_names = []
# process_this_frame = True
#
# while True:
# # Grab a single frame of video
# ret, frame = video_capture.read()
#
# # Resize frame of video to 1/4 size for faster face recognition processing
# small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
#
# # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
# rgb_small_frame = small_frame[:, :, ::-1]
#
# # Only process every other frame of video to save time
# if process_this_frame:
# # Find all the faces and face encodings in the current frame of video
# face_locations = face_recognition.face_locations(rgb_small_frame)
# face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
#
# face_names = []
# for face_encoding in face_encodings:
# name = "Unmasked"
# if name == "Unmasked":
# print("ALERT!!!", '\a')
#
# process_this_frame = not process_this_frame
#
#
# # Display the results
# for (top, right, bottom, left), name in zip(face_locations, face_names):
# # Scale back up face locations since the frame we detected in was scaled to 1/4 size
# top *= 4
# right *= 4
# bottom *= 4
# left *= 4
#
# # Draw a box around the face
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
#
# # Draw a label with a name below the face
# cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
# font = cv2.FONT_HERSHEY_DUPLEX
# cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
#
# # Display the resulting image
# cv2.imshow('Video', frame)
#
# # Hit 'q' on the keyboard to quit!
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
#
# # Release handle to the webcam
# video_capture.release()
# cv2.destroyAllWindows()
#
#
#
#
#
#
#
#
#
#
# # from PIL import Image
# # import face_recognition
# # import cv2
# # import sys
#
# # faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# #
# # video_capture = cv2.VideoCapture(0)
# #
# # while True:
# # ret, frame = video_capture.read()
# #
# # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# # faces = faceCascade.detectMultiScale(
# # gray,
# # scaleFactor=1.5,
# # minNeighbors=5,
# # minSize=(30, 30),
# # flags=cv2.CASCADE_SCALE_IMAGE
# # )
# #
# # for (x, y, w, h) in faces:
# # cv2.rectangle(frame, (x, y), (x+w, y+h), (0 ,255, 0), 2)
# #
# # cv2.imshow('FaceDetections', frame)
# #
# # if k%256 == 27:
# # break
# #
# # elif k%256 -- 32:
# # img_name = "facedetect_webcam_{}.png".format(img_counter)
# # cv2.imwrite(img_name, frame)
# # print("{} written!".format(img_name))
# # img_counter += 1
# #
# # video_capture.release()
# # cv2.destroyAllWindows()
#
#
# #
# # # cascPath = sys.argv[1]
# # faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# #
# # video_capture = cv2.VideoCapture(0)
# #
# # while True:
# # # Capture frame-by-frame
# # ret, frame = video_capture.read()
# #
# # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# #
# # faces = faceCascade.detectMultiScale(
# # gray,
# # scaleFactor=1.1,
# # minNeighbors=5,
# # minSize=(30, 30),
# # # flags=cv2.cv2.CV_HAAR_SCALE_IMAGE
# # )
# #
# # # Draw a rectangle around the faces
# # for (x, y, w, h) in faces:
# # cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# #
# # # Display the resulting frame
# # cv2.imshow('Video', frame)
# #
# # if cv2.waitKey(1) & 0xFF == ord('q'):
# # break
# #
# # # When everything is done, release the capture
# # video_capture.release()
# # cv2.destroyAllWindows()
# #
# #
# #
# #
# #
# #
# #
# #
# #
# # masked_faces = face_recognition
# #
# # known_image = face_recognition.load_image_file("mask.jpeg")
# # unknown_image = face_recognition.load_image_file("face.jpeg")
# #
# # try:
# # known_image_encoding = face_recognition.face_encodings(known_image)[0]
# # unknown_image_encoding = face_recognition.face_encodings(unknown_image)[0]
# # except IndexError:
# # print("I was not able to locate any faces in at least one of the images. Check the image files. Aborting...")
# # quit()
# #
# # known_faces = [
# # known_image_encoding
# # ]
# #
# # results = face_recognition.compare_faces(known_faces, unknown_image_encoding)
# #
# # print("Is the unknown face face.jpg {}".format(results[0]))
# # print("Is the unknown face a new person that we have never seen before? {}".format(not True in results))
# #
#
# #
# #
# # # def face_rec():
# # # known_image = face_recognition.load_image_file("face.jpg")
# # # unknown_image = face_recognition.load_image_file("face.jpeg")
# # #
# # # known_encoding = face_recognition.face_encodings(known_image)[0]
# # # unknown_encoding = face_recognition.face_encodings(unknown_image)[0]
# # #
# # # results = face_recognition.compare_faces([known_encoding], unknown_encoding)
# # # print(results)
# # # return results
# #
# #
# #
# #
# # image = face_recognition.load_image_file("group.jpg")
# # face_locations = face_recognition.face_locations(image)
# # print("I found {} face(s) in this photograth.".format(len(face_locations)))
# # for face_location in face_locations:
# # top, right, bottom, left = face_location
# # print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
# # face_image = image[top:bottom, left:right]
# # pil_image = Image.fromarray(face_image)
# # pil_image.show()
|
[
"w.patrick.kelly@gmail.com"
] |
w.patrick.kelly@gmail.com
|
02a65f8279d97c74d7a8b059a3e2e39c3ed462f9
|
f00ee3b1d271741ddd5d07e490cc300f4b468964
|
/nNums.py
|
383324688a5a32b7b4c0cb01318e42978adee673
|
[] |
no_license
|
HyungjinLee/codingtest_programmers_Nnums
|
a353f9c6ee14e8c34afb5fd0858b115db3468ece
|
127f193261eda6da1947e3b6695d5f71caf91bd9
|
refs/heads/master
| 2022-04-19T10:22:08.083652
| 2020-04-17T14:56:10
| 2020-04-17T14:56:10
| 256,531,697
| 0
| 0
| null | null | null | null |
UHC
|
Python
| false
| false
| 933
|
py
|
import copy
# copy를 이용 안하는 법은 없을까? 고민중
def solution(N, number):
candidates = [set() for _ in range (8)]
for i in range (8) :
candidates[i].add(int(str(N) * (i+1)))
def search(count) :
global answer
if count >= 8 :
answer = -1
return False
for i in range (count) :
j = count-i-1
for x in copy.deepcopy(candidates[i]) :
for y in copy.deepcopy(candidates[j]) :
candidates[count].add(x+y)
candidates[count].add(x-y)
candidates[count].add(x*y)
if x != 0 :
candidates[count].add(y//x)
if number in candidates[count] :
answer = count+1
return True
search(count+1)
search(1)
return answer
|
[
"2014104136@khu.ac.kr"
] |
2014104136@khu.ac.kr
|
974489579d1b3a01da4b771465ba222ddba4a144
|
d42bf95794c9c2ba90d5484c5760181f3f810a91
|
/my_answers.py
|
725b1bfc73279a32421531808fdb174c370c8360
|
[] |
no_license
|
shokir85/DLML_Bike_Sharing_project
|
61e1f239488f57a4835135ca3ebb677528956088
|
ec704287ebe5975df5e5c63cbcb0dd7db492f2dd
|
refs/heads/main
| 2023-03-09T23:30:04.096818
| 2021-02-24T22:17:14
| 2021-02-24T22:17:14
| 342,050,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,464
|
py
|
import numpy as np
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
#self.activation_function = lambda x : sigmoid(x) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
def sigmoid(x):
return 1/(1+ np.exp(-x)) # Replace 0 with your sigmoid calculation here
self.activation_function = sigmoid
def train(self, features, targets):
n_records = features.shape[0]
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
#n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
# Implement the forward pass function below
final_outputs, hidden_outputs = self.forward_pass_train(X)
# Implement the backproagation function below
delta_weights_i_h, delta_weights_h_o = self.backpropagation(final_outputs, hidden_outputs, X, y,
delta_weights_i_h, delta_weights_h_o)
self.update_weights(delta_weights_i_h, delta_weights_h_o, n_records)
def forward_pass_train(self, X):
''' Implement forward pass here
Arguments
---------
X: features batch
'''
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs, hidden_outputs
def backpropagation(self, final_outputs, hidden_outputs, X, y, delta_weights_i_h, delta_weights_h_o):
''' Implement backpropagation
Arguments
---------
final_outputs: output from forward pass
y: target (i.e. label) batch
delta_weights_i_h: change in weights from input to hidden layers
delta_weights_h_o: change in weights from hidden to output layers
'''
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
output_error_term = error
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = self.weights_hidden_to_output * output_error_term
# TODO: Backpropagated error terms - Replace these values with your calculations.
hidden_error_term = hidden_error.T * hidden_outputs *(1 - hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X.T[:, None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs.T[:,None]
return delta_weights_i_h, delta_weights_h_o
def update_weights(self, delta_weights_i_h, delta_weights_h_o, n_records):
''' Update weights on gradient descent step
Arguments
---------
delta_weights_i_h: change in weights from input to hidden layers
delta_weights_h_o: change in weights from hidden to output layers
n_records: number of records
'''
self.weights_hidden_to_output += self.lr * delta_weights_h_o/n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h/n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features , self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
#########################################################
# Set your hyperparameters here
##########################################################
iterations = 2000
learning_rate = 1
hidden_nodes = 10
output_nodes = 1
|
[
"57154234+shokir85@users.noreply.github.com"
] |
57154234+shokir85@users.noreply.github.com
|
8e3f79372433e9b9c6eb7aa9fd960c6a2ff2d754
|
8edbd1bf5c7cd8e42926596445fd0470ad942795
|
/model/Biller.py
|
b47bcf3ffc29bd61cdc403e96543422fb264a47c
|
[] |
no_license
|
Luca-Guettinger/122-LB2
|
3d14f81f432b0569db63350dd80e96bae1432a8b
|
c89094d9af3516389b8ca44ed8ed963be842ffd8
|
refs/heads/master
| 2023-06-19T19:48:14.180694
| 2021-06-16T09:44:08
| 2021-06-16T09:44:08
| 377,446,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
class Biller:
def __init__(
self,
client_id,
name,
street,
zip,
location,
company_id,
mail,
):
self.client_id = client_id
self.name = name
self.street = street
self.zip = zip
self.location = location
self.company_id = company_id
self.mail = mail
def __repr__(self):
return f'Client_id: {self.client_id};\n' \
f'Name: {self.name};\n' \
f'Street: {self.street};\n' \
f'Zip: {self.zip};\n' \
f'Location: {self.location};\n' \
f'Company_id: {self.company_id};\n' \
f'Mail: {self.mail};'
def __str__(self):
return f'Client_id: {self.client_id};\n' \
f'Name: {self.name};\n' \
f'Street: {self.street};\n' \
f'Zip: {self.zip};\n' \
f'Location: {self.location};\n' \
f'Company_id: {self.company_id};\n' \
f'Mail: {self.mail};'
|
[
"luca.guettinger@cmiag.ch"
] |
luca.guettinger@cmiag.ch
|
7ee6dfd65f6902adeb07ab3e77ae072964561905
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/samples/cli/accelbyte_py_sdk_cli/iam/_admin_get_list_country_age_restriction_v3.py
|
74d2f077e0a0ec3dedd98d7a8e75cccd7aeadc41
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,385
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Iam Service (6.2.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.iam import (
admin_get_list_country_age_restriction_v3 as admin_get_list_country_age_restriction_v3_internal,
)
from accelbyte_py_sdk.api.iam.models import ModelCountryV3Response
from accelbyte_py_sdk.api.iam.models import RestErrorResponse
@click.command()
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def admin_get_list_country_age_restriction_v3(
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(admin_get_list_country_age_restriction_v3_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
result, error = admin_get_list_country_age_restriction_v3_internal(
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"AdminGetListCountryAgeRestrictionV3 failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
admin_get_list_country_age_restriction_v3.operation_id = (
"AdminGetListCountryAgeRestrictionV3"
)
admin_get_list_country_age_restriction_v3.is_deprecated = False
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
a0a7a472bbf79b3afa0328c6e60d3805b1c3f573
|
d63633a0370581fa9a4a603c9c3a6e05677846bf
|
/data_loader_val.py
|
f234185bd2ba3d564f2ef3fd69cae6f710f8b013
|
[] |
no_license
|
aazabal1/image-captioning-project
|
92393cf5f5cf3201f3d7f514ddbba851a4289624
|
a92922be1c0223d647c4d5b4d69d00dd6498f138
|
refs/heads/master
| 2022-11-26T14:51:29.210475
| 2020-07-29T08:40:31
| 2020-07-29T08:40:31
| 283,444,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,496
|
py
|
import nltk
import os
import torch
import torch.utils.data as data
from vocabulary import Vocabulary
from PIL import Image
from pycocotools.coco import COCO
import numpy as np
from tqdm import tqdm
import random
import json
def get_loader_val(transform,
mode='val',
batch_size=1,
vocab_threshold=None,
vocab_file='./vocab.pkl',
start_word="<start>",
end_word="<end>",
unk_word="<unk>",
vocab_from_file=True,
num_workers=0,
cocoapi_loc='/opt'):
"""Returns the data loader.
Args:
transform: Image transform.
mode: One of 'train' or 'test'.
batch_size: Batch size (if in testing mode, must have batch_size=1).
vocab_threshold: Minimum word count threshold.
vocab_file: File containing the vocabulary.
start_word: Special word denoting sentence start.
end_word: Special word denoting sentence end.
unk_word: Special word denoting unknown words.
vocab_from_file: If False, create vocab from scratch & override any existing vocab_file.
If True, load vocab from from existing vocab_file, if it exists.
num_workers: Number of subprocesses to use for data loading
cocoapi_loc: The location of the folder containing the COCO API: https://github.com/cocodataset/cocoapi
"""
assert mode in ['train', 'test',"val"], "mode must be one of 'train' or 'test'."
if vocab_from_file==False: assert mode=='train', "To generate vocab from captions file, must be in training mode (mode='train')."
# Based on mode (train, val, test), obtain img_folder and annotations_file.
if mode == 'train':
if vocab_from_file==True: assert os.path.exists(vocab_file), "vocab_file does not exist. Change vocab_from_file to False to create vocab_file."
img_folder = os.path.join(cocoapi_loc, 'cocoapi/images/train2014/')
annotations_file = os.path.join(cocoapi_loc, 'cocoapi/annotations/captions_train2014.json')
if mode == 'test':
assert batch_size==1, "Please change batch_size to 1 if testing your model."
assert os.path.exists(vocab_file), "Must first generate vocab.pkl from training data."
assert vocab_from_file==True, "Change vocab_from_file to True."
img_folder = os.path.join(cocoapi_loc, 'cocoapi/images/test2014/')
annotations_file = os.path.join(cocoapi_loc, 'cocoapi/annotations/image_info_test2014.json')
if mode == 'val':
assert os.path.exists(vocab_file), "Must first generate vocab.pkl from training data."
assert vocab_from_file==True, "Change vocab_from_file to True."
img_folder = os.path.join(cocoapi_loc, 'cocoapi/images/train2014/')
annotations_file = os.path.join(cocoapi_loc, 'cocoapi/annotations/captions_val2014.json')
# COCO caption dataset.
dataset = CoCoDataset(transform=transform,
mode=mode,
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_file=vocab_file,
start_word=start_word,
end_word=end_word,
unk_word=unk_word,
annotations_file=annotations_file,
vocab_from_file=vocab_from_file,
img_folder=img_folder)
if mode == 'train':
# Randomly sample a caption length, and sample indices with that length.
indices = dataset.get_train_indices()
# Create and assign a batch sampler to retrieve a batch with the sampled indices.
initial_sampler = data.sampler.SubsetRandomSampler(indices=indices)
# data loader for COCO dataset.
data_loader = data.DataLoader(dataset=dataset,
num_workers=num_workers,
batch_sampler=data.sampler.BatchSampler(sampler=initial_sampler,
batch_size=dataset.batch_size,
drop_last=False))
else:
data_loader = data.DataLoader(dataset=dataset,
batch_size=dataset.batch_size,
shuffle=True,
num_workers=num_workers)
return data_loader
class CoCoDataset(data.Dataset):
def __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word,
end_word, unk_word, annotations_file, vocab_from_file, img_folder):
self.transform = transform
self.mode = mode
self.batch_size = batch_size
self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,
end_word, unk_word, annotations_file, vocab_from_file)
self.img_folder = img_folder
if self.mode == 'train':
self.coco = COCO(annotations_file)
self.ids = list(self.coco.anns.keys())
print('Obtaining caption lengths...')
all_tokens = [nltk.tokenize.word_tokenize(str(self.coco.anns[self.ids[index]]['caption']).lower()) for index in tqdm(np.arange(len(self.ids)))]
self.caption_lengths = [len(token) for token in all_tokens]
else:
test_info = json.loads(open(annotations_file).read())
self.paths = [item['file_name'] for item in test_info['images']]
def __getitem__(self, index):
# obtain image and caption if in training mode
if self.mode == 'train':
ann_id = self.ids[index]
caption = self.coco.anns[ann_id]['caption']
img_id = self.coco.anns[ann_id]['image_id']
path = self.coco.loadImgs(img_id)[0]['file_name']
# Convert image to tensor and pre-process using transform
image = Image.open(os.path.join(self.img_folder, path)).convert('RGB')
image = self.transform(image)
# Convert caption to tensor of word ids.
tokens = nltk.tokenize.word_tokenize(str(caption).lower())
caption = []
caption.append(self.vocab(self.vocab.start_word))
caption.extend([self.vocab(token) for token in tokens])
caption.append(self.vocab(self.vocab.end_word))
caption = torch.Tensor(caption).long()
# return pre-processed image and caption tensors
return image, caption
# obtain image if in test mode
else:
path = self.paths[index]
# Convert image to tensor and pre-process using transform
PIL_image = Image.open(os.path.join(self.img_folder, path)).convert('RGB')
orig_image = np.array(PIL_image)
image = self.transform(PIL_image)
# return original image and pre-processed image tensor
return orig_image, image
def get_train_indices(self):
sel_length = np.random.choice(self.caption_lengths)
all_indices = np.where([self.caption_lengths[i] == sel_length for i in np.arange(len(self.caption_lengths))])[0]
indices = list(np.random.choice(all_indices, size=self.batch_size))
return indices
def __len__(self):
if self.mode == 'train':
return len(self.ids)
else:
return len(self.paths)
|
[
"alvaro.azabal95@gmail.com"
] |
alvaro.azabal95@gmail.com
|
a6deb99f06f534c972c5a08262ff9b30c27b248f
|
980f502d17e27339a3906b0877abac7e03c34538
|
/flask/bin/python-config
|
28d131869f2d77b140c84709d5a277e6de27a6b9
|
[] |
no_license
|
ben-holland-young/learning-flask
|
25e5f437bcbc835120578e5380ac91947141a18f
|
0fe4a3b9c43e1615b080cae7eed31b350337b79f
|
refs/heads/master
| 2021-01-12T06:37:11.377838
| 2016-12-29T15:46:09
| 2016-12-29T15:46:09
| 77,396,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,360
|
#!/Users/benholland/Desktop/flask_proj/flask/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"bbne45@icloud.com"
] |
bbne45@icloud.com
|
|
cd7ca0848790ab8b6fa8f0a2dca430f44d1e1aea
|
362224f8a23387e8b369b02a6ff8690c200a2bce
|
/django/django_orm/courses/courses_app/migrations/0004_auto_20210507_1257.py
|
44b3c5750ec2e09c2a574516f4e4ef23d781992c
|
[] |
no_license
|
Helenyixuanwang/python_stack
|
ac94c7c532655bf47592a8453738daac10f220ad
|
97fbc77e3971b5df1fe3e79652b294facf8d6cee
|
refs/heads/main
| 2023-06-11T02:17:27.277551
| 2021-06-21T17:01:09
| 2021-06-21T17:01:09
| 364,336,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
# Generated by Django 2.2 on 2021-05-07 19:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses_app', '0003_auto_20210507_1107'),
]
operations = [
migrations.RemoveField(
model_name='description',
name='course',
),
migrations.AddField(
model_name='course',
name='description',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='course', to='courses_app.Description'),
),
]
|
[
"wangyixuan@msn.com"
] |
wangyixuan@msn.com
|
ca35561181fc69e2265ec83fc9065543dcf1bc0c
|
0e23d367851e002e706c5fff996cbaf921ee78cb
|
/problem_algorithms/python3/Offer/53-I.py
|
d9c3b7c33b71653aaff71189bbcf5b82b90515e2
|
[] |
no_license
|
SpenceGuo/my_leetcode
|
56d62650e96d26ee665a75dcb81109a2ffd854ba
|
3d2c61ef245d83a818b05614ff1306a42823110f
|
refs/heads/main
| 2023-04-15T15:11:03.766150
| 2021-04-09T10:33:01
| 2021-04-09T10:33:01
| 307,364,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
from typing import List
class Solution:
def search(self, nums: List[int], target: int) -> int:
return nums.count(target)
|
[
"spence@sjtu.edu.cn"
] |
spence@sjtu.edu.cn
|
c9e0dbd320e21b8f48a680fafbcf65f86dc2b503
|
8541f96e5f48780050a7995ed735c70d4f3c2642
|
/dedupe/merge.py
|
1a8143458547b86d4f06c3387b56a2f067410d9b
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
influence-usa/lobbying_federal_domestic
|
ede64752f81b6a8496a9f6b31b3f2a6c3a5f485e
|
117a1991e01a203c2c181c1c6163b044c4d5c56f
|
refs/heads/master
| 2021-01-02T23:02:57.140303
| 2016-11-30T22:47:46
| 2016-11-30T22:47:46
| 17,500,504
| 7
| 2
| null | 2014-05-08T21:32:29
| 2014-03-07T02:39:15
|
Python
|
UTF-8
|
Python
| false
| false
| 608
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, pickle, sys
import simplejson as json
import dedupe.serializer as serializer
def merger():
def load(filename):
return json.load(open(filename,"r"),
cls=serializer.dedupe_decoder)
input1 = load(sys.argv[1])
input2 = load(sys.argv[2])
output = open(sys.argv[3],"w")
outdict = {"distinct": input1["distinct"]+input2["distinct"],
"match": input1["match"]+input2["match"]}
json.dump(outdict,output,default=serializer._to_json)
if __name__ == "__main__":
merger()
|
[
"zmaril@sunlightfoundation.com"
] |
zmaril@sunlightfoundation.com
|
c26cdbd6de229d90cf71d67bf49f6a27ab68512f
|
2d0bada349646b801a69c542407279cc7bc25013
|
/src/vai_optimizer/tensorflow/tf_nndct/utils/__init__.py
|
3274ff4b5eecc566b7b6ab5c8e5fb76d2336b986
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Xilinx/Vitis-AI
|
31e664f7adff0958bb7d149883ab9c231efb3541
|
f74ddc6ed086ba949b791626638717e21505dba2
|
refs/heads/master
| 2023-08-31T02:44:51.029166
| 2023-07-27T06:50:28
| 2023-07-27T06:50:28
| 215,649,623
| 1,283
| 683
|
Apache-2.0
| 2023-08-17T09:24:55
| 2019-10-16T21:41:54
|
Python
|
UTF-8
|
Python
| false
| false
| 616
|
py
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nndct_shared.utils import registry
|
[
"do-not-reply@gitenterprise.xilinx.com"
] |
do-not-reply@gitenterprise.xilinx.com
|
46d653f0ab2161be1b5b3920db66f39cf1230998
|
5321a9ec4589860d0f3ea803b54ea318c3c1cddf
|
/workplace/通光检验/Moudle/PreConfiguration.py
|
d1cba28bb1a6a4e11bbf643de9e585ef159fa458
|
[] |
no_license
|
Aphranda/PythonCode
|
7859b40d394dd9568823f120bcdb417fb256f312
|
3e2dfd2254b4051b9b408d5c661c0083af6e4c94
|
refs/heads/master
| 2021-07-13T13:31:29.866007
| 2021-03-01T16:13:41
| 2021-03-01T16:13:41
| 238,137,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,805
|
py
|
import configparser
from ErrorConfiguration import Logits
Logits = Logits()
class Configuration(object):
def __init__(self):
self.config = configparser.ConfigParser()
def check_section(self, value):
try:
result = self.config.has_section(value[1])
return result
except Exception as e:
print(e)
def add_section(self, value):
try:
self.config.read(f"{value[0]}.ini", encoding="gbk")
if self.check_section(value):
receive = f"[{value[1]}]已经存在,请更换名称"
else:
self.config.add_section(f"{value[1]}")
for i in value[2]:
self.config.set(value[1], i, value[2][i])
receive = f"[{value[1]}]已经添加进入配置"
self.write_config(value)
return receive
except Exception as e:
print(e)
def delete_section(self, value):
try:
self.config.read(f"{value[0]}.ini", encoding="gbk")
self.config.remove_section(value[1])
receive = f"[{value[1]}]已经删除"
self.write_config(value)
return receive
except Exception as e:
print(e)
def modify_section(self, value):
try:
self.config.read(f"{value[0]}.ini", encoding="gbk")
if self.check_section(value):
for i in value[2]:
self.config.set(value[1], i, value[2][i])
receive = f"[{value[1]}]修改完成"
else:
receive = f"[{value[1]}]不存在,请更换配置名称"
self.write_config(value)
return receive
except Exception as e:
print(e)
def count_section(self, value):
try:
receive = {}
self.config.read(f"{value}.ini", encoding="gbk")
data = self.config.sections()
for i in data:
options = self.config.items(i)
receive[i] = options
return receive
except Exception as e:
print(e)
def search_option(self, value, section):
try:
self.config.read(f"{value}.ini", encoding="gbk")
data = self.config.items(section)
return data
except Exception as e:
print(e)
def write_config(self, value):
try:
self.config.write(open(f"{value[0]}.ini", "w"))
except Exception as e:
print(e)
# def main():
# data = ["111", "section", {"item01":"123", "item02":"456"}]
# data01 = ["111", "section", {"item01":"1", "item02":"4"}]
# con = Configuration()
# con.add_section(data)
# if __name__ == "__main__":
# main()
|
[
"1044770247@qq.com"
] |
1044770247@qq.com
|
8842513fbd0945b4aed9b8dbd6d3dd035a7e02d5
|
6a6b56b86967e0ff0ab123b74e19b38af5fd74cc
|
/Bruteforce/bruteforcetest.py
|
9b96d8b07669b10de16fc2ba247803882629555a
|
[] |
no_license
|
Lucas-Jollie/Heuristieken
|
bcd3a7e3bd011f2051105859d7be0b04d01c6353
|
561493bd58eec8ea21f79897eb0fa9c788663191
|
refs/heads/master
| 2021-01-10T16:56:00.366848
| 2015-12-16T21:37:13
| 2015-12-16T21:37:13
| 45,121,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,671
|
py
|
# def sort(ins, i):
# if i == 0: return
# sort(ins, i-1)
# j = i
# while j > 0 and ins[j-1] > ins[j]:
# ins[j-1], ins[j] = ins[j], ins[j-1]
# j -= 1
# return ins
#
# a = [3,4,5,2,1]
# i = 1
# print sort(a,i)
# def sort2(ins):
# for i in range(1, len(ins)):
# j = i
# while j > 0 and ins[j-1] > ins[j]:
# ins[j-1], ins[j] = ins[j], ins[j-1]
# j -= 1
#
# print sort2(a)
# def combinations(iterable, r) van der sende:
# # combinations('ABCD', 2) --> AB AC AD BC BD CD
# pool = tuple(iterable)
# n = len(pool)
# if r > n:
# return
# indices = range(r)
# yield tuple(pool[i] for i in indices)
# while True:
# for i in reversed(range(r)):
# if indices[i] != i + n - r:
# break
# else:
# return
# indices[i] += 1
# for j in range(i+1, r):
# indices[j] = indices[j-1] + 1
# yield tuple(pool[i] for i in indices)
#
# print combinations(range(4), 3)
import itertools
genomes = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25]
# test = list(itertools.combinations(range(1,26), 25))
# print test == genomes
#
# bla = list(range(1,10))
# print bla
import random
# trial = ' '
# while trial != genomes:
# trial = str(random.randint(1,25))
# print trial
# if trial == genomes:
# print('Forced match found:' + trial)
# input()
# bforce = []
# while bforce != genomes:
# while len(bforce) != 25:
# gene = random.randint(1,25)
# if gene not in bforce:
# bforce.append(gene)
# print bforce
import logging
import time
# BRUTE FORCE SHORT
# genomes = [1,2,3,4,5]
# random_list = []
loops = 0
# while random_list != genomes:
# random_list = random.sample(xrange(1,6),5)
# print random_list
# loops += 1
# if random_list == genomes:
# print 'SUCCCES, number of loops: ', loops
# logger
# create logger
# logger = logging.getLogger("logging_tryout2")
# logger.setLevel(logging.DEBUG)
#
# # create console handler and set level to debug
# ch = logging.StreamHandler()
# ch.setLevel(logging.DEBUG)
#
# # create formatter
# formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s")
#
# # add formatter to ch
# ch.setFormatter(formatter)
#
# # add ch to logger
# logger.addHandler(ch)
# BRUTE FORCE LONG
random_list = []
solved_list = []
# begin = logger.info("Started")
# while random_list != genomes:
# random_list = random.sample(xrange(1,26),25)
# print random_list
# loops += 1
# if random_list == genomes:
# print 'SUCCCES, number of loops: ', loops
# break
# # "application" code
# print 'FINISHED: ' , logger.info('done')
# Brute force test 2
# random_list = random.sample(xrange(1,26),25)
# for i in genomes:
# while genomes[i] != random_list[i]:
# if i == len(random_list):
# i = 0
# temp = random_list[i]
# random_list[i] = random_list[i+1]
# random_list[i+1] = temp
# i += 1
# print i
# Extreem brute force test -----------------------------------------------------
# maakt een random list en shuffelt deze totdat hij klopt
from random import shuffle
random_list = random.sample(xrange(1,6),5)
start_time = time.time()
while random_list != genomes:
if random_list != solved_list:
shuffle(random_list)
solved_list.append(random_list)
print random_list
if random_list == genomes:
elapsed_time = time.time() - start_time
print elapsed_time, ' seconds'
# breadth first
breadth_list = []
|
[
"lucas_jollie@outlook.com"
] |
lucas_jollie@outlook.com
|
286164908e84c5092d847553da2a904fea968b9a
|
905068636fd1e771f4cd2f4b3b57b7e1fac2686b
|
/python_code
|
2290fdd41c713e8767e8255de77bdfea8cf4e9bb
|
[] |
no_license
|
KinloongTiao/deribitconnection
|
0595ba6dcd95880471729234423b11da858692bd
|
f402ee56105d1a3d087556ed7b4abfbc20cec58f
|
refs/heads/master
| 2020-03-26T13:03:36.172971
| 2018-08-16T01:30:52
| 2018-08-16T01:30:52
| 144,920,963
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,232
|
#!/usr/bin/python
# -*- coding:utf8 -*-
import psycopg2 as psy
from deribit_api import RestClient
import time
import ssl
from threading import Timer
from datetime import datetime
class Deribit:
context = ssl._create_unverified_context()
# 用户名:3RkEWsw1wFTkG,密码:ES2AVXHWB5QOOWBWMHYXHSJDUBOQAZSB)
# 连接deribit数据库
# 测试数据库:172.19.2.151
# 香港数据库:47.75.174.88
deribit = RestClient('3RkEWsw1wFTkG', 'ES2AVXHWB5QOOWBWMHYXHSJDUBOQAZSB', 'https://www.deribit.com')
def insert_orderbook(self, tim):
# 创建数据库deribit实例
try:
conn = psy.connect(database="pdt", user="user1", password="pass1", host="172.31.93.18", port="5432")
print('Opened database successfully')
except:
print('连接数据库出错')
print('正在尝试重新连接')
self.insert_orderbook(300)
# 创建cursor对象,用来在数据库里执行PostgreSQL命令。
try:
cur = conn.cursor()
except:
print('创建cursor对象出错')
print('正在重新尝试连接')
self.insert_orderbook(300)
# 调用changeorderbook函数,调整orderbook的格式
# 获取instrument
instrument = self.deribit.getinstruments()
# 获取所有instrument的名称
instrument_namelist = []
for i in range(len(instrument)):
instrument_namelist.append(instrument[i].get('instrumentName'))
# 获取定单薄
try:
for n in instrument_namelist:
orderbook, updatedtime = self.change_orderbook(self.deribit.getorderbook(n))
summary = self.deribit.getsummary(self.deribit.getorderbook(n)['instrument'])
# timestamp = ''.join(list(filter(lambda ch: ch in '0123456789', updatedtime)))
# 写入数据库
cur.execute("""INSERT INTO deribit
(contract_type, data_type, exchange, order_meta_data, symbol, updated_time, summary_meta_data)
VALUES(%s, %s, %s, %s, %s, %s, %s);""",
(orderbook['contract_type'], orderbook['data_type'], orderbook['exchange'], str(orderbook),
orderbook['symbol'], updatedtime, str(summary)))
conn.commit()
del summary
del orderbook
del updatedtime
del instrument
del instrument_namelist
del cur
except:
print('写入数据出错。')
self.insert_orderbook(300)
else:
conn.close()
timenow = time.ctime(time.time())
print("loop at", timenow)
global t
t = Timer(tim, self.insert_orderbook, (tim,))
t.start()
def change_orderbook(self, orderbook):
# 获取contract type
if len(orderbook['instrument'].split('-')) == 2:
contract = 'future'
else:
contract = 'option'
# 获取symbol
symbol = orderbook['instrument']
# 时间戳转时间
timeStamp = orderbook['tstamp']
updatedTime = datetime.fromtimestamp(timeStamp / 1000.0).strftime('%Y-%m-%d %H:%M:%S.%f')
timestamp = ''.join(list(filter(lambda ch: ch in '0123456789', updatedTime)))
# 获取ask数据
ask = []
for n in orderbook['asks']:
ask1 = [n['price'], n['quantity']]
ask.append(ask1)
# 获取bid数据
bid = []
for n in orderbook['bids']:
bid1 = [n['price'], n['quantity']]
bid.append(bid1)
# 构成meta_data数据
meta_data = orderbook
# 构成orderbook返回值
orderbook = {"contract_type": contract, "data_type": "contract", "exchange": "deribit", "updated_time": timestamp,
"symbol": symbol, "meta_data": meta_data}
return orderbook, updatedTime
def close_database_connection(self):
self.conn.close()
print('Closed database successfully')
if __name__ == '__main__':
D = Deribit()
D.insert_orderbook(300)
|
[
"noreply@github.com"
] |
KinloongTiao.noreply@github.com
|
|
e823daeff120484c8d0edc305d5b065826e3dc10
|
5f2178281d7bab664df29177307bbac1f00106b7
|
/settings/adx_config.py
|
5ab7911467d1f11758e1e4391902da09f180d3e1
|
[] |
no_license
|
pablin87/rtb_exchange_sim
|
f5da98edd35f88b5e36d125cbd27c5c5ba76281a
|
c579e4a7e257cda496724720da0821c2dd01d3bd
|
refs/heads/master
| 2020-05-26T00:35:55.361745
| 2017-01-31T18:08:57
| 2017-01-31T18:08:57
| 18,372,564
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,903
|
py
|
conf = {
# Definde what resource of the exchange connector endpoint is hitted
# in order to place a bid request.
'http_resource' : 'adx',
# Keys for price encryption in string hex
'encryption_key' : "b08c70cfbcb0eb6cab7e82c6b75da52072ae62b2bf4b990bb80a48d8141eec07",
'integrity_key' : "bf77ec55c30130c1d8cd1862ed2a4cd2c76ac33bc0c4ce8a3d3bbd3ad5687792",
'initialization_vector' : "4a3a6f470001e2407b8c4a605b9200f2",
# BE CAREFUL when using this parameter set to true. When it is set to
# true, the requester will ignore the event template parameters below
# and the EVENT_ENDPOINT from the general settings.
# It will use directly the impression url that came in the html_snipet
# from the adx bid response.
'use_html_snippet' : True,
# Define the url where the notifications of impressions and clicks will
# be send. By this, the adm field is not have in count. The ip and
# port set it here are replaced by global parameters of the endpoint
# settings.
'adserver_endpt_imp_tmpl' : "http://localhost:8080/events?ev=imp&aid=%%AUCTION_ID%%&apr=%%WINNING_PRICE%%&sptid=%%AUCTION_IMP_ID%%",
'adserver_endpt_click_tmpl' : "http://localhost:8080/events?ev=cli&aid=%%AUCTION_ID%%&sptid=%%AUCTION_IMP_ID%%",
# If set to true, instead of using the 'adserver_endpt_imp_tmpl' and
# 'adserver_endpt_click_tmpl', it will use the following heh templates
# in order to hit heh instead of the ad server connector directly.
'use_heh_endpoint' : False,
'heh_endpt_imp_tmpl' : "http://localhost:8080/impression/adx/%%AUCTION_ID%%/%%WINNING_PRICE%%?impid=%%AUCTION_IMP_ID%%",
'heh_endpt_click_tmpl' : "http://localhost:8080/click/adx/%%AUCTION_ID%%?impid=%%AUCTION_IMP_ID%%"
}
|
[
"pabloberrilio@gmail.com"
] |
pabloberrilio@gmail.com
|
da6cdfe9ab180d0e96dc02d884b46c6a2f8a3e88
|
6e8f2e28479566dbaa338300b2d61f784ff83f97
|
/.history/code/preprocess_20210421153926.py
|
328dd81758da2a656921e2d8033defa2f29c1d4b
|
[] |
no_license
|
eeng5/CV-final-project
|
55a7d736f75602858233ebc380c4e1d67ab2b866
|
580e28819560b86f6974959efb1d31ef138198fc
|
refs/heads/main
| 2023-04-09T21:28:21.531293
| 2021-04-21T19:57:22
| 2021-04-21T19:57:22
| 352,703,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,518
|
py
|
import os
import random
import numpy as np
from PIL import Image
import tensorflow as tf
import hyperparameters as hp
class Datasets():
""" Class for containing the training and test sets as well as
other useful data-related information. Contains the functions
for preprocessing.
"""
def __init__(self, data_path, task, aug, generate):
self.data_path = data_path
self.emotions = ['angry', 'happy', 'disgust', 'sad', 'neutral', 'surprise', 'fear']
self.emotion_dict = self.createEmotionDict()
self.task = task
self.aug = aug
if generate == 1:
if self.aug == '1':
self.createSimpleData()
else:
self.createComplexData()
# Dictionaries for (label index) <--> (class name)
self.idx_to_class = {}
self.class_to_idx = {}
# For storing list of classes
self.classes = [""] * hp.num_classes
# Setup data generators
self.train_data = self.get_data(
os.path.join(self.data_path, "train/"), False)
self.test_data = self.get_data(
os.path.join(self.data_path, "test/"), False)
def cleanTestDirs(self,):
for e in self.emotions:
pathy = self.data_path+'test/'+e
pics = 1
for f in Path(pathy).glob('*.jpg'):
try:
#f.unlink()
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanTrainDirs(self,):
for e in self.emotions:
pathy = self.data_path+'train/'+e
for f in Path(pathy).glob('*.jpg'):
try:
#f.unlink()
os.remove(f)
except OSError as e:
print("Error: %s : %s" % (f, e.strerror))
def cleanAll(self,):
self.cleanTestDirs()
self.cleanTrainDirs()
def createPixelArray(self, arr):
arr = list(map(int, arr.split()))
array = np.array(arr, dtype=np.uint8)
array = array.reshape((48, 48))
return array
def equalize_hist(self, img):
img = cv2.equalizeHist(img)
return img
def showImages(self, imgs):
_, axs = plt.subplots(1, len(imgs), figsize=(20, 20))
axs = axs.flatten()
for img, ax in zip(imgs, axs):
ax.imshow(img,cmap=plt.get_cmap('gray'))
plt.show()
def augmentIMG(self, img, task):
imgs = [img]
img1 = self.equalize_hist(img)
imgs.append(img1)
img2 = cv2.bilateralFilter(img1, d=9, sigmaColor=75, sigmaSpace=75)
imgs.append(img2)
if task == 3:
kernel = np.array([[-1.0, -1.0, -1.0],
[-1.0, 9, -1.0],
[-1.0, -1.0, -1.0]])
img3 = cv2.filter2D(img2,-1,kernel)
imgs.append(img3)
img4 = self.equalize_hist(img3)
imgs.append(img4)
img5 = cv2.bilateralFilter(img4, d=9, sigmaColor=100, sigmaSpace=100)
imgs.append(img5)
img6 = cv2.flip(img, 1) # flip horizontally
imgs.append(img6)
return imgs
def saveIMG(self, arr, num, folderLoc):
im = Image.fromarray(arr)
filename = folderLoc + "image_"+ num+".jpg"
im.save(filename)
def createTrain(self, task):
path1 = self.data_path+"train.csv"
df = pd.read_csv(path1) # CHANGE ME
base_filename = data_path+"train/" # CHANGE ME
for index, row in df.iterrows():
px = row['pixels']
emot = int(row['emotion'])
emot_loc = self.emotion_dict[emot]
filename = base_filename + emot_loc
img = self.createPixelArray(px)
img_arr = self.augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
self.saveIMG(i, num, filename)
def createTest(self, task):
path1 = data_path +"icml_face_data.csv"
df = pd.read_csv(path1) # CHANGE ME
base_filename = data_path + "test/" # CHANGE ME
for index, row in df.iterrows():
if (row[' Usage'] == "PublicTest"):
px = row[' pixels']
emot = int(row['emotion'])
emot_loc = self.emotion_dict[emot]
filename = base_filename + emot_loc
img = self.createPixelArray(px)
img_arr = self.augmentIMG(img, task)
idx = 0
for i in img_arr:
num = str(index) + "_" + str(idx)
idx +=1
saveIMG(i, num, filename)
def createEmotionDict(self,):
emotionDict = {}
emotionDict[0]="angry/"
emotionDict[1]="disgust/"
emotionDict[2]="fear/"
emotionDict[3]="happy/"
emotionDict[4]="sad/"
emotionDict[5]="surprise/"
emotionDict[6] = "neutral/"
return emotionDict
def createSimpleData(self,):
self.cleanAll()
print("Cleaning done")
self.createTrain(1)
print("Training Data Generation done")
self.createTest(1)
print("Testing Data Generation done")
def createComplexData(self,):
self.cleanAll()
self.createTrain(3)
self.createTest(3)
def preprocess_fn(self, img):
""" Preprocess function for ImageDataGenerator. """
img = img / 255.
return img
def get_data(self, path, shuffle):
""" Returns an image data generator which can be iterated
through for images and corresponding class labels.
Arguments:
path - Filepath of the data being imported, such as
"../data/train" or "../data/test"
shuffle - Boolean value indicating whether the data should
be randomly shuffled.
Returns:
An iterable image-batch generator
"""
data_gen = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=self.preprocess_fn)
# VGG must take images of size 224x224
img_size = hp.img_size
classes_for_flow = None
# Make sure all data generators are aligned in label indices
if bool(self.idx_to_class):
classes_for_flow = self.classes
# Form image data generator from directory structure
data_gen = data_gen.flow_from_directory(
path,
target_size=(img_size, img_size),
class_mode='sparse',
batch_size=hp.batch_size,
shuffle=shuffle,
classes=classes_for_flow)
# Setup the dictionaries if not already done
if not bool(self.idx_to_class):
unordered_classes = []
for dir_name in os.listdir(path):
if os.path.isdir(os.path.join(path, dir_name)):
unordered_classes.append(dir_name)
for img_class in unordered_classes:
self.idx_to_class[data_gen.class_indices[img_class]] = img_class
self.class_to_idx[img_class] = int(data_gen.class_indices[img_class])
self.classes[int(data_gen.class_indices[img_class])] = img_class
return data_gen
|
[
"natalie_rshaidat@brown.edu"
] |
natalie_rshaidat@brown.edu
|
729109b79364e3f3d02773c0155d279de1716fcf
|
185d401e9e6ab7e2bb59ed022ede1b25ead7c8bc
|
/gen_data.py
|
2dfd7d978660eacfeacf488fea5143d690209a3e
|
[] |
no_license
|
ShanghaitechGeekPie/StartHack
|
6cd857b6f0b82a3295c8817704dfea9708a10135
|
9a166128ca3d80a9952c8fd3f1464625a83222d2
|
refs/heads/master
| 2023-03-24T06:06:30.156563
| 2021-03-21T07:42:30
| 2021-03-21T07:42:30
| 349,931,155
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
import h5py
import pandas as pd
import numpy as np
import os
import glob
def readH5(filename, save_dir_name):
f = h5py.File(filename, 'r')
def visitandsave(name):
ele = f[name]
if isinstance(ele, h5py.Dataset):
data = pd.DataFrame(np.array(ele))
data.to_pickle(os.path.join(save_dir_name, name.replace('/', '_')))
print(data.keys())
f.visit(visitandsave)
readH5('data/overhang/bae8f52c-407e-5f89-a8e3-61fcca51ee0a.h5','data/overhang_exported')
readH5('data/overhang/bae8f52c-407e-5f89-a8e3-61fcca51ee0a_raw.h5','data/overhang_exported')
readH5('data/traverse/e897d166-1618-5bd3-ba3a-cb7577c64647.h5','data/traverse_exported')
readH5('data/traverse/e897d166-1618-5bd3-ba3a-cb7577c64647_raw.h5','data/traverse_exported')
# filelist = ['data/overhang', 'data/traverse']
# for ff in filelist:
# for q in glob.glob(os.path.join(ff, '*.h5')):
# print(q)
# os.mkdir(ff + "_exported")
# readH5(q, ff + "_exported")
|
[
"leomundspedoske@gmail.com"
] |
leomundspedoske@gmail.com
|
5d0f4cb826491c6d60bd55e2f82ff687aad64d45
|
9acbf0279c38d11e89f16831e9c43b49badabb00
|
/IPTVPlayer/tsiplayer/addons/resources/sites/hds_stream.py
|
9875340d9a861185488cd59312f8b1383ca23e95
|
[] |
no_license
|
dgbkn/e2iPlayer
|
4f101b87bc5f67bf14690d012a62cbe8755ab82c
|
e5f413ea032eb9012569d9d149a368a3e73d9579
|
refs/heads/master
| 2023-05-15T05:01:18.204256
| 2021-06-06T18:03:42
| 2021-06-06T18:03:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,881
|
py
|
# -*- coding: utf-8 -*-
# vStream https://github.com/Kodi-vStream/venom-xbmc-addons
import re
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.gui.hoster import cHosterGui
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.gui.gui import cGui
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.inputParameterHandler import cInputParameterHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.requestHandler import cRequestHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.parser import cParser
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import progress
# from resources.lib.util import cUtil # outils pouvant etre utiles
SITE_IDENTIFIER = 'hds_stream'
SITE_NAME = 'Hds-stream'
SITE_DESC = 'Film streaming HD complet en vf. Des films et séries pour les fan de streaming hds.'
URL_MAIN = 'https://hds.club/'
MOVIE_MOVIES = (True, 'showMenuMovies')
MOVIE_NEWS = (URL_MAIN + 'films/', 'showMovies')
MOVIE_GENRES = (URL_MAIN, 'showGenres')
MOVIE_EXCLUS = (URL_MAIN + 'tendance/', 'showMovies')
# MOVIE_ANNEES = (True, 'showMovieYears')
SERIE_SERIES = (True, 'showMenuTvShows')
SERIE_NEWS = (URL_MAIN + 'series/', 'showMovies')
URL_SEARCH = (URL_MAIN + '?s=', 'showMovies')
URL_SEARCH_MOVIES = (URL_SEARCH[0], 'showMovies')
URL_SEARCH_SERIES = (URL_SEARCH[0], 'showMovies')
FUNCTION_SEARCH = 'showMovies'
def load():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_SEARCH[0])
oGui.addDir(SITE_IDENTIFIER, 'showSearch', 'Recherche', 'search.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_EXCLUS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_EXCLUS[1], 'Films (Populaire)', 'news.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler)
# oOutputParameterHandler.addParameter('siteUrl', MOVIE_ANNEES[0])
# oGui.addDir(SITE_IDENTIFIER, MOVIE_ANNEES[1], 'Films (Par années)', 'annees.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMenuMovies():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_SEARCH_MOVIES[0])
oGui.addDir(SITE_IDENTIFIER, URL_SEARCH_MOVIES[1], 'Recherche Films', 'search.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_EXCLUS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_EXCLUS[1], 'Films (Populaire)', 'news.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_NEWS[1], 'Films (Derniers ajouts)', 'news.png', oOutputParameterHandler)
# oOutputParameterHandler.addParameter('siteUrl', MOVIE_ANNEES[0])
# oGui.addDir(SITE_IDENTIFIER, MOVIE_ANNEES[1], 'Films (Par années)', 'annees.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', MOVIE_GENRES[0])
oGui.addDir(SITE_IDENTIFIER, MOVIE_GENRES[1], 'Films (Genres)', 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMenuTvShows():
oGui = cGui()
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', URL_SEARCH_SERIES[0])
oGui.addDir(SITE_IDENTIFIER, URL_SEARCH_SERIES[1], 'Recherche Séries ', 'search.png', oOutputParameterHandler)
oOutputParameterHandler.addParameter('siteUrl', SERIE_NEWS[0])
oGui.addDir(SITE_IDENTIFIER, SERIE_NEWS[1], 'Séries (Derniers ajouts)', 'news.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showSearch():
oGui = cGui()
sSearchText = oGui.showKeyBoard()
if (sSearchText != False):
sUrl = URL_SEARCH[0] + sSearchText.replace(' ', '+')
showMovies(sUrl)
oGui.setEndOfDirectory()
return
def showGenres():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oParser = cParser()
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sPattern = 'menu-item-object-genres.+?<a href="([^"]+)".*?>(.+?)<'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
genres = set(aResult[1])
genres = sorted(genres, key=lambda genre: genre[1])
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in genres:
sUrl = aEntry[0]
sTitle = aEntry[1]
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', sTitle, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMovieYears():
oGui = cGui()
oParser = cParser()
oRequestHandler = cRequestHandler(URL_MAIN)
sHtmlContent = oRequestHandler.request()
sHtmlContent = oParser.abParse(sHtmlContent, '<h2>Films Par Années</h2>', '<h2>Films Par Genres</h2>')
sPattern = '<li><a href="([^"]+)">([^<]+)<'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
sUrl = aEntry[0]
sYear = aEntry[1]
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oGui.addDir(SITE_IDENTIFIER, 'showMovies', sYear, 'genres.png', oOutputParameterHandler)
oGui.setEndOfDirectory()
def showMovies(sSearch=''):
oGui = cGui()
oParser = cParser()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
if sSearch:
sUrl = sSearch
sPattern = 'class="result-item">.*?href="([^"]+)"><img src="([^"]+).*?class="title"><a.*?>([^<]+).*?class="year">([^<]+).*?class="contenido"><p>([^<]+)</p>'
elif 'tendance/' in sUrl:
sPattern = 'id="post-[0-9].+?<img src="([^"]+)".+?class="data".+?href="([^"]+)">([^<]+).*?, ([^<]+)</span>'
else:
sPattern = 'id="post-[0-9].+?<img src="([^"]+)".+?class="data".+?href="([^"]+)">([^<]+).*?, ([^<]+)</span>.*?<div class="texto">([^<]*)</div>'
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == False):
oGui.addText(SITE_IDENTIFIER)
if (aResult[0] == True):
total = len(aResult[1])
progress_ = progress().VScreate(SITE_NAME)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
if sSearch:
sUrl2 = aEntry[0]
sThumb = aEntry[1]
sTitle = aEntry[2]
sYear = aEntry[3]
sDesc = aEntry[4]
else:
sThumb = aEntry[0]
if sThumb.startswith('//'):
sThumb = 'https:' + sThumb
sUrl2 = aEntry[1]
sTitle = aEntry[2]
sYear = aEntry[3]
if 'tendance/' in sUrl:
sDesc = ''
else:
sDesc = aEntry[4]
sDisplayTitle = ('%s (%s)') % (sTitle, sYear)
oOutputParameterHandler.addParameter('siteUrl', sUrl2)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('sDesc', sDesc)
oOutputParameterHandler.addParameter('sYear', sYear)
if '/series' in sUrl2:
oGui.addTV(SITE_IDENTIFIER, 'showSxE', sTitle, '', sThumb, sDesc, oOutputParameterHandler)
else:
oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sDisplayTitle, '', sThumb, sDesc, oOutputParameterHandler)
progress_.VSclose(progress_)
if not sSearch:
sNextPage, sPaging = __checkForNextPage(sHtmlContent)
if (sNextPage != False):
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sNextPage)
oGui.addNext(SITE_IDENTIFIER, 'showMovies', 'Page ' + sPaging, oOutputParameterHandler)
oGui.setEndOfDirectory()
def __checkForNextPage(sHtmlContent):
oParser = cParser()
sPattern = '>Page \d+ de (\d+)</span>.*?<span class="current.+?href=["\']([^"\']+/page/\d+)/["\'] class="inactive'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sNumberMax = aResult[1][0][0]
sNextPage = aResult[1][0][1]
sNumberNext = re.search('page/([0-9]+)', sNextPage).group(1)
sPaging = sNumberNext + '/' + sNumberMax
return sNextPage, sPaging
return False, 'none'
def showSxE():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sThumb = oInputParameterHandler.getValue('sThumb')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sDesc = oInputParameterHandler.getValue('sDesc')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sPattern = '<span class=\'title\'>([^<]+)|class=\'numerando\'>\d - ([^<]+).+?class=\'episodiotitle\'><a href=\'([^\']+)\'>([^<]+)'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
if aEntry[0]:
oGui.addText(SITE_IDENTIFIER, '[COLOR crimson]' + aEntry[0] + '[/COLOR]')
else:
sUrl = aEntry[2]
EpTitle = aEntry[3]
Ep = aEntry[1]
sTitle = sMovieTitle + ' Episode' + Ep + EpTitle
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('sDesc', sDesc)
oGui.addEpisode(SITE_IDENTIFIER, 'showSeriesHosters', sTitle, '', sThumb, sDesc, oOutputParameterHandler)
oGui.setEndOfDirectory()
def showHosters():
oGui = cGui()
oParser = cParser()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
sDesc = oInputParameterHandler.getValue('sDesc')
oRequest = cRequestHandler(sUrl)
sHtmlContent = oRequest.request()
sPattern = "class='dooplay_player_option' data-type='([^']+)' data-post='([^']+)' data-nume='([^']+)'"
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
url_main = GET_REAL_URLMAIN(sUrl)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
sUrl2 = url_main + 'wp-admin/admin-ajax.php'
dType = aEntry[0]
dPost = aEntry[1]
dNum = aEntry[2]
pdata = 'action=doo_player_ajax&post=' + dPost + '&nume=' + dNum + '&type=' + dType
sHost = 'Serveur ' + dNum
sTitle = ('%s [COLOR coral]%s[/COLOR]') % (sMovieTitle, sHost)
oOutputParameterHandler.addParameter('siteUrl', sUrl2)
oOutputParameterHandler.addParameter('referer', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sMovieTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('pdata', pdata)
oGui.addLink(SITE_IDENTIFIER, 'showLink', sTitle, sThumb, sDesc, oOutputParameterHandler)
oGui.setEndOfDirectory()
def showSeriesHosters():
oGui = cGui()
oParser = cParser()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
sDesc = oInputParameterHandler.getValue('sDesc')
oRequest = cRequestHandler(sUrl)
sHtmlContent = oRequest.request()
sPattern = "id='player-option-.+?data-type='([^']+)'.+?data-post='([^']+)'.+?data-nume='([^']+)'.+?'server'>([^.|^<]+)"
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
url_main = GET_REAL_URLMAIN(sUrl)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
sUrl2 = url_main + 'wp-admin/admin-ajax.php'
dType = aEntry[0]
dPost = aEntry[1]
dNum = aEntry[2]
pdata = 'action=doo_player_ajax&post=' + dPost + '&nume=' + dNum + '&type=' + dType
if (aEntry[3]).startswith('Unknown'):
sHost = 'Serveur ' + dNum
else:
sHost = aEntry[3].capitalize()
sTitle = ('%s [COLOR coral]%s[/COLOR]') % (sMovieTitle, sHost)
oOutputParameterHandler.addParameter('siteUrl', sUrl2)
oOutputParameterHandler.addParameter('referer', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sMovieTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oOutputParameterHandler.addParameter('pdata', pdata)
oGui.addLink(SITE_IDENTIFIER, 'showLink', sTitle, sThumb, sDesc, oOutputParameterHandler)
oGui.setEndOfDirectory()
def showLink():
oGui = cGui()
oParser = cParser()
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
sMovieTitle = oInputParameterHandler.getValue('sMovieTitle')
sThumb = oInputParameterHandler.getValue('sThumb')
referer = oInputParameterHandler.getValue('referer')
pdata = oInputParameterHandler.getValue('pdata')
oRequest = cRequestHandler(sUrl)
oRequest.setRequestType(1)
oRequest.addHeaderEntry('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:70.0) Gecko/20100101 Firefox/70.0')
oRequest.addHeaderEntry('Referer', referer)
oRequest.addHeaderEntry('Accept', '*/*')
oRequest.addHeaderEntry('Accept-Language', 'fr-FR,fr;q=0.9,en-US;q=0.8,en;q=0.7')
oRequest.addHeaderEntry('Content-Type', 'application/x-www-form-urlencoded')
oRequest.addParametersLine(pdata)
sHtmlContent = oRequest.request().replace('\\', '')
sPattern = '(http[^"]+)'
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
sHosterUrl = aEntry
oHoster = cHosterGui().checkHoster(sHosterUrl)
if (oHoster != False):
oHoster.setDisplayName(sMovieTitle)
oHoster.setFileName(sMovieTitle)
cHosterGui().showHoster(oGui, oHoster, sHosterUrl, sThumb)
oGui.setEndOfDirectory()
def GET_REAL_URLMAIN(url):
sd = url.split('.')
sdm = URL_MAIN.split('.')
return URL_MAIN.replace(sdm[0], sd[0])
|
[
"echosmart76@gmail.com"
] |
echosmart76@gmail.com
|
1771d6d530278607822e15854013169af3b2e70d
|
f5183c0cd7fe2ef16edccb32e3d1aafe06c77c9c
|
/tests/08.py
|
c1f47c281c5a70b73605736c28b1e3ec7e89d7a7
|
[] |
no_license
|
sayan-rc/ants-vs-somebees
|
0b6452d21dd68301865efb54f368a52e9b8ff113
|
3948a834bff25879eb4904fe8ddc33e39855a16e
|
refs/heads/master
| 2022-10-26T07:21:15.297425
| 2020-06-12T21:45:50
| 2020-06-12T21:45:50
| 111,988,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,596
|
py
|
test = {
'name': 'Problem 8',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> # Testing TankAnt parameters
>>> TankAnt.food_cost
6
>>> TankAnt.damage
1
>>> TankAnt.container
True
>>> tank = TankAnt()
>>> tank.armor
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Testing TankAnt action
>>> tank = TankAnt()
>>> place = colony.places['tunnel_0_1']
>>> place.add_insect(tank)
>>> for _ in range(3):
... place.add_insect(Bee(3))
>>> tank.action(colony)
>>> [bee.armor for bee in place.bees]
[2, 2, 2]
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Testing TankAnt container methods
>>> tank = TankAnt()
>>> thrower = ThrowerAnt()
>>> place = colony.places['tunnel_0_1']
>>> place.add_insect(thrower)
>>> place.add_insect(tank)
>>> place.ant is tank
True
>>> bee = Bee(3)
>>> place.add_insect(bee)
>>> tank.action(colony) # Both ants attack bee
>>> bee.armor
1
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from ants import *
>>> hive, layout = Hive(make_test_assault_plan()), dry_layout
>>> dimensions = (1, 9)
>>> colony = AntColony(None, hive, ant_types(), layout, dimensions)
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> # Testing TankAnt action
>>> tank = TankAnt()
>>> place = colony.places['tunnel_0_1']
>>> place.add_insect(tank)
>>> for _ in range(3):
... place.add_insect(Bee(1))
>>> tank.action(colony)
>>> len(place.bees)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Placement of ants
>>> tank0 = TankAnt()
>>> tank1 = TankAnt()
>>> harvester0 = HarvesterAnt()
>>> harvester1 = HarvesterAnt()
>>> place0 = colony.places['tunnel_0_0']
>>> place1 = colony.places['tunnel_0_1']
>>> # Add tank before harvester
>>> place0.add_insect(tank0)
>>> place0.add_insect(harvester0)
>>> colony.food = 0
>>> tank0.action(colony)
>>> colony.food
1
>>> for ant in [TankAnt(), HarvesterAnt()]:
... try:
... place0.add_insect(ant)
... except AssertionError:
... assert place0.ant is tank0,\
... 'Tank was kicked out by {0}'.format(ant)
... assert tank0.ant is harvester0,\
... 'Contained ant was kicked out by {0}'.format(ant)
... continue
... assert False, 'No AssertionError raised when adding {0}'.format(ant)
>>> # Add harvester before tank
>>> place1.add_insect(harvester1)
>>> place1.add_insect(tank1)
>>> tank1.action(colony)
>>> colony.food
2
>>> for ant in [TankAnt(), HarvesterAnt()]:
... try:
... place1.add_insect(ant)
... except AssertionError:
... assert place1.ant is tank1,\
... 'Tank was kicked out by {0}'.format(ant)
... assert tank1.ant is harvester1,\
... 'Contained ant was kicked out by {0}'.format(ant)
... continue
... assert False, 'No AssertionError raised when adding {0}'.format(ant)
>>> tank0.reduce_armor(tank0.armor)
>>> place0.ant is harvester0
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Removing ants
>>> tank = TankAnt()
>>> test_ant = Ant()
>>> place = Place('Test')
>>> place.add_insect(tank)
>>> place.add_insect(test_ant)
>>> place.remove_insect(test_ant)
>>> tank.ant is None
True
>>> test_ant.place is None
True
>>> place.remove_insect(tank)
>>> place.ant is None
True
>>> tank.place is None
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> tank = TankAnt()
>>> place = Place('Test')
>>> place.add_insect(tank)
>>> tank.action(colony) # Action without contained ant should not error
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from ants import *
>>> hive, layout = Hive(make_test_assault_plan()), dry_layout
>>> dimensions = (1, 9)
>>> colony = AntColony(None, hive, ant_types(), layout, dimensions)
""",
'teardown': '',
'type': 'doctest'
}
]
}
|
[
"sayanrayc@gmail.com"
] |
sayanrayc@gmail.com
|
e21691cac022d724653dc85d124ac0682dbbfdfa
|
ce85013a3041cd90dc820aa3a577acd07c9178f1
|
/opgo/data/parallel_processor.py
|
aff1aff37700acf2651175227e56ae3a50b1ee45
|
[] |
no_license
|
pranav1416/opc_go_engine
|
c9cfa009571e1297f2b7dd9863efbfc086b727f6
|
34fc9946831144be90910edbeaddfe9352ea131c
|
refs/heads/master
| 2020-08-14T11:40:24.870873
| 2019-10-21T03:00:09
| 2019-10-21T03:00:09
| 215,161,532
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,435
|
py
|
from __future__ import print_function
from __future__ import absolute_import
import os
import glob
import os.path
import tarfile
import gzip
import shutil
import numpy as np
import multiprocessing
from os import sys
from keras.utils import to_categorical
from opgo.gosgf import Sgf_game
from opgo.goboard_fast import Board, GameState, Move
from opgo.gotypes import Player, Point
from opgo.data.index_processor import KGSIndex
from opgo.data.sampling import Sampler
from opgo.data.generator import DataGenerator
from opgo.encoders.base import get_encoder_by_name
def worker(jobinfo):
try:
clazz, encoder, zip_file, data_file_name, game_list = jobinfo
clazz(encoder=encoder).process_zip(zip_file, data_file_name, game_list)
except (KeyboardInterrupt, SystemExit):
raise Exception('>>> Exiting child process.')
class GoDataProcessor:
def __init__(self, encoder='simple', data_directory='data'):
self.encoder_string = encoder
self.encoder = get_encoder_by_name(encoder, 19)
self.data_dir = data_directory
# tag::load_generator[]
def load_go_data(self, data_type='train', num_samples=1000,
use_generator=False):
index = KGSIndex(data_directory=self.data_dir)
index.download_files()
sampler = Sampler(data_dir=self.data_dir)
data = sampler.draw_data(data_type, num_samples)
self.map_to_workers(data_type, data) # <1>
if use_generator:
generator = DataGenerator(self.data_dir, data)
return generator # <2>
else:
features_and_labels = self.consolidate_games(data_type, data)
return features_and_labels # <3>
# <1> Map workload to CPUs
# <2> Either return a Go data generator...
# <3> ... or return consolidated data as before.
# end::load_generator[]
def unzip_data(self, zip_file_name):
this_gz = gzip.open(self.data_dir + '/' + zip_file_name)
tar_file = zip_file_name[0:-3]
this_tar = open(self.data_dir + '/' + tar_file, 'wb')
shutil.copyfileobj(this_gz, this_tar)
this_tar.close()
return tar_file
def process_zip(self, zip_file_name, data_file_name, game_list):
tar_file = self.unzip_data(zip_file_name)
zip_file = tarfile.open(self.data_dir + '/' + tar_file)
name_list = zip_file.getnames()
total_examples = self.num_total_examples(zip_file, game_list, name_list)
shape = self.encoder.shape()
feature_shape = np.insert(shape, 0, np.asarray([total_examples]))
features = np.zeros(feature_shape)
labels = np.zeros((total_examples,))
counter = 0
for index in game_list:
name = name_list[index + 1]
if not name.endswith('.sgf'):
raise ValueError(name + ' is not a valid sgf')
sgf_content = zip_file.extractfile(name).read()
sgf = Sgf_game.from_string(sgf_content)
game_state, first_move_done = self.get_handicap(sgf)
for item in sgf.main_sequence_iter():
color, move_tuple = item.get_move()
point = None
if color is not None:
if move_tuple is not None:
row, col = move_tuple
point = Point(row + 1, col + 1)
move = Move.play(point)
else:
move = Move.pass_turn()
if first_move_done and point is not None:
features[counter] = self.encoder.encode(game_state)
labels[counter] = self.encoder.encode_point(point)
counter += 1
game_state = game_state.apply_move(move)
first_move_done = True
feature_file_base = self.data_dir + '/' + data_file_name + '_features_%d'
label_file_base = self.data_dir + '/' + data_file_name + '_labels_%d'
chunk = 0 # Due to files with large content, split up after chunksize
chunksize = 1024
while features.shape >= chunksize:
feature_file = feature_file_base % chunk
label_file = label_file_base % chunk
chunk += 1
current_features, features = features[:chunksize], features[chunksize:]
current_labels, labels = labels[:chunksize], labels[chunksize:]
np.save(feature_file, current_features)
np.save(label_file, current_labels)
def consolidate_games(self, name, samples):
files_needed = set(file_name for file_name, index in samples)
file_names = []
for zip_file_name in files_needed:
file_name = zip_file_name.replace('.tar.gz', '') + name
file_names.append(file_name)
feature_list = []
label_list = []
for file_name in file_names:
file_prefix = file_name.replace('.tar.gz', '')
base = self.data_dir + '/' + file_prefix + '_features_*.npy'
for feature_file in glob.glob(base):
label_file = feature_file.replace('features', 'labels')
x = np.load(feature_file)
y = np.load(label_file)
x = x.astype('float32')
y = to_categorical(y.astype(int), 19 * 19)
feature_list.append(x)
label_list.append(y)
features = np.concatenate(feature_list, axis=0)
labels = np.concatenate(label_list, axis=0)
feature_file = self.data_dir + '/' + name
label_file = self.data_dir + '/' + name
np.save(feature_file, features)
np.save(label_file, labels)
return features, labels
@staticmethod
def get_handicap(sgf): # Get handicap stones
go_board = Board(19, 19)
first_move_done = False
move = None
game_state = GameState.new_game(19)
if sgf.get_handicap() is not None and sgf.get_handicap() != 0:
for setup in sgf.get_root().get_setup_stones():
for move in setup:
row, col = move
go_board.place_stone(Player.black, Point(row + 1, col + 1)) # black gets handicap
first_move_done = True
game_state = GameState(go_board, Player.white, None, move)
return game_state, first_move_done
def map_to_workers(self, data_type, samples):
zip_names = set()
indices_by_zip_name = {}
for filename, index in samples:
zip_names.add(filename)
if filename not in indices_by_zip_name:
indices_by_zip_name[filename] = []
indices_by_zip_name[filename].append(index)
zips_to_process = []
for zip_name in zip_names:
base_name = zip_name.replace('.tar.gz', '')
data_file_name = base_name + data_type
if not os.path.isfile(self.data_dir + '/' + data_file_name):
zips_to_process.append((self.__class__, self.encoder_string, zip_name,
data_file_name, indices_by_zip_name[zip_name]))
cores = multiprocessing.cpu_count() # Determine number of CPU cores and split work load among them
pool = multiprocessing.Pool(processes=cores)
p = pool.map_async(worker, zips_to_process)
try:
_ = p.get()
except KeyboardInterrupt: # Caught keyboard interrupt, terminating workers
pool.terminate()
pool.join()
sys.exit(-1)
def num_total_examples(self, zip_file, game_list, name_list):
total_examples = 0
for index in game_list:
name = name_list[index + 1]
if name.endswith('.sgf'):
sgf_content = zip_file.extractfile(name).read()
sgf = Sgf_game.from_string(sgf_content)
game_state, first_move_done = self.get_handicap(sgf)
num_moves = 0
for item in sgf.main_sequence_iter():
color, move = item.get_move()
if color is not None:
if first_move_done:
num_moves += 1
first_move_done = True
total_examples = total_examples + num_moves
else:
raise ValueError(name + ' is not a valid sgf')
return total_examples
|
[
"pranav.borole@gmail.com"
] |
pranav.borole@gmail.com
|
0060ceda36e78cb67618690effb675d9a7041d38
|
28887a9bfd18f64a045c419037e9aba2cfd1bf50
|
/typed/for/sw.py
|
46ac83c715ce619f759e6f1d7cf5e92de57aa3e1
|
[] |
no_license
|
Coobeliues/pp2_py
|
18ba46bc82edc3f0c189f5e51ec6950af5269751
|
89cffd04499691e72e9dbbf9626a84ad27fddf79
|
refs/heads/main
| 2023-06-28T01:13:12.602998
| 2021-07-29T05:43:17
| 2021-07-29T05:43:17
| 380,136,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
a, b=int(input()), int(input())
while a!=b:
if a//2>=b and a%2==0:
a//=2
print(":2")
else:
a-=1
print('-1')
|
[
"70967044+Coobeliues@users.noreply.github.com"
] |
70967044+Coobeliues@users.noreply.github.com
|
4db1b6a570c6c09cb4abbde4d2d5b91439464880
|
86a563e6eff56cf96bfa3c6dcdfb706e68114530
|
/ch05/layer_naive.py
|
f4262f3f86dabc938f835840d0e9ffd66c61601c
|
[] |
no_license
|
mingrammer/deep-learning-from-scratch
|
be322ee82fe5c8d2bcde3ac3e7d35792c5314d1f
|
4e158aa3f773ac7c60585f3f1627e94dac7a05ba
|
refs/heads/master
| 2021-01-01T06:36:44.414300
| 2017-08-10T17:15:55
| 2017-08-10T17:15:55
| 97,468,838
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
class MulLayer:
def __init__(self):
self.x = None
self.y = None
def forward(self, x, y):
self.x = x
self.y = y
out = x * y
return out
def backward(self, dout):
dx = dout * self.y
dy = dout * self.x
return dx, dy
class AddLayer:
def __init__(self):
pass
def forward(self, x, y):
out = x + y
return out
def backward(self, dout):
dx = dout * 1
dy = dout * 1
return dx, dy
|
[
"k239507@gmail.com"
] |
k239507@gmail.com
|
47ae5977cf0f5aac3687f9b062a692f09c6f5c78
|
3b034c36c383af330f765c063d13537692f7361c
|
/alien_invasion/settings.py
|
1f168a0249e7b01d1a64a5ae79a2142ac5389f6c
|
[] |
no_license
|
ctestabu/python
|
87ba39a588afb3a88d738fac7743015e02c9e1a7
|
3d92c4ea161b32060757e5781cfb5688ecfab9b9
|
refs/heads/master
| 2020-08-30T04:23:28.229889
| 2019-11-28T15:11:40
| 2019-11-28T15:11:40
| 218,261,986
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
class Settings():
# Class for settings
def __init__(self):
# settings init
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230, 230, 230)
# ship settings
# self.ship_speed_factor = 25
self.ship_limit = 3
# bullet
# self.bullet_speed_factor = 10
self.bullet_width = 300
self.bullet_height = 15
self.bullet_color = (255, 0, 0)
self.bullets_allowed = 10
# alien
# self.alien_speed_factor = 50
self.fleet_drop_speed = 10
# 1 -left -1 -right
self.fleet_direction = 1
# change settings
self.speedup_scale = 1.1
self.initialize_dynamic_settings()
def increase_speed(self):
self.ship_speed_factor *= self.speedup_scale
self.bullet_speed_factor *= self.speedup_scale
self.alien_speed_factor *= self.speedup_scale
def initialize_dynamic_settings(self):
self.ship_speed_factor = 10
self.bullet_speed_factor = 10
self.alien_speed_factor = 1
self.fleet_direction = 1
self.alien_points = 50
|
[
"noreply@github.com"
] |
ctestabu.noreply@github.com
|
e91eee6139b204ca38f3a5eead878773fbaea690
|
07ba86aee8532f6a5b2bfcdc30f8c2e8234751bc
|
/LeetCode/Problems/Python/347. Top K Frequent Elements.py
|
4c8aaff201e9876cbd75d872e2e2be462a8fc479
|
[] |
no_license
|
yukai-chiu/CodingPractice
|
1b7d4b0ffe9a8583091a8165976f71c7b3df41b5
|
3390a0ca4eceff72c69721bdc425a3099670faff
|
refs/heads/master
| 2022-12-14T05:35:38.336552
| 2020-08-30T22:56:20
| 2020-08-30T22:56:20
| 263,502,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,849
|
py
|
#Heap
#Time: O(n+klogn)
#Space: O(n)
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
n = Counter(nums)
n = [(-value, key) for key,value in n.items()]
heapq.heapify(n)
return [heapq.heappop(n)[1] for _ in range(k)]
#quick select
#Time: O(n) average, worst O(n^2)
#Space: O(n) for hash map and unique elements array
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
count = Counter(nums)
unique = list(count.keys())
def partition(l, r, pivot):
pivot_freq = count[unique[pivot]]
#move pivot to end
unique[pivot], unique[r] = unique[r], unique[pivot]
#move all larger to left
store_idx = l
for i in range(l,r):
if count[unique[i]] > pivot_freq:
unique[store_idx], unique[i] = unique[i], unique[store_idx]
store_idx+=1
#put pivot to the correct idx
unique[store_idx], unique[r] = unique[r], unique[store_idx]
return store_idx
def quickSelect(left, right, k):
#base case: only one element
if left == right:
return
#select a pivot
pivot_idx = random.randint(left, right)
#find the pivot position in a sorted list
pivot_idx= partition(left, right, pivot_idx)
if pivot_idx == k:
return
elif pivot_idx > k:
quickSelect(left, pivot_idx-1, k)
else:
quickSelect(pivot_idx+1, right, k)
n = len(unique)
quickSelect(0, n-1, k)
return unique[:k]
|
[
"48868656+kevinyukaic@users.noreply.github.com"
] |
48868656+kevinyukaic@users.noreply.github.com
|
a6d693cdcbe37656bb5535ac4a05fe5cc9372d37
|
41d0bd94bbaec0299e6be6fc56a726545c1894cb
|
/sources/nytimes/__init__.py
|
6b17755df17000bfee582d94d3ef7ceaa7c83853
|
[
"Unlicense"
] |
permissive
|
AB9IL/stream-sources
|
f86eec0552d0992e7ee02a39076e0a1042ebfe27
|
ede8bd3ad7d51723d489192d0a6c5b2ea31ffe56
|
refs/heads/master
| 2023-02-03T23:09:25.582012
| 2020-12-23T08:12:42
| 2020-12-23T08:12:42
| 319,333,418
| 0
| 0
|
Unlicense
| 2020-12-07T13:47:06
| 2020-12-07T13:47:05
| null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
from sources.generic import FeedSource
class Source(FeedSource):
SOURCE = {
'name': 'The New York Times',
'url': 'https://www.nytimes.com',
}
FEED_URL = 'http://rss.nytimes.com/services/xml/rss/nyt/HomePage.xml'
|
[
"davy.wybiral@gmail.com"
] |
davy.wybiral@gmail.com
|
c21c0892e84b6e23f693f6f8b3b2a019f0daca3d
|
bbe9000b56086b7f8f52fcc3b49702c9045166a4
|
/timestamp_graphs.py
|
06dcc0944264bea2d4ed7625deca629e2f299cdf
|
[] |
no_license
|
galexa05/Thesis-Subject---Event-Detection-in-micro-blocks
|
7abd124f1f1d34b1d9062c96903ef56acb59a7a0
|
dd25f7ea27ab9773106039a7624875c9b86e1479
|
refs/heads/master
| 2022-08-30T00:57:49.142048
| 2020-05-22T17:00:03
| 2020-05-22T17:00:03
| 235,589,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,475
|
py
|
import datetime
from time import strptime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def getList(df,name):
tableFlag=[]
data_sorted = df.sort_values(by=['date'],inplace=False)
data_sorted = data_sorted.reset_index(drop=True)
list_date = []
for temp in data_sorted[name]:
list_date.append(temp)
list_date = list(dict.fromkeys(list_date))
list_date = sorted(list_date)
return list_date
def create_date(text):
temp = text.split(" ")
x = str(datetime.datetime(int(temp[5]),int(strptime(temp[1],'%b').tm_mon),int(temp[2]))).split()[0]
return x
def createTimestamps(df):
df['date'] = df['created_at'].apply(lambda x: create_date(x))
df['time'] = np.array([tweet.split()[3] for tweet in df["created_at"]])
df['Datetime']= pd.to_datetime(df['date'].apply(str)+' '+df['time'].apply(lambda x: x.split(':')[0]) + df['time'].apply(lambda x: x.split(':')[1]))
df['DateHour'] = pd.to_datetime(df['date'].apply(str)+' '+df['time'].apply(lambda x: x.split(':')[0])+':00')
df['Date_Ten_Minutes'] = pd.to_datetime(df['date'].apply(str)+' '+df['time'].apply(lambda x: x.split(':')[0])+':'+df["time"].apply(lambda x: x.split(":")[1][0]+'0'))
return df
def monitorGraphPerTenMinutes(df_relevant,df_irrelevant):
# graphing the threshold vs hourly tweet occurences
dfchange = df_irrelevant.loc[df_irrelevant['date'] != -1]
ts = dfchange.set_index('Date_Ten_Minutes')
vc = ts.groupby('Date_Ten_Minutes').count()
col = ['id']
vc2 = vc[col]
vc3 = vc2.copy()
dfchange1 = df_relevant.loc[df_relevant["date"]!=-1]
rs = dfchange1.set_index('Date_Ten_Minutes')
rc = rs.groupby('Date_Ten_Minutes').count()
col = ['id']
rc2 = rc[col]
rc3 = rc2.copy()
vc3.rename(columns={'id':'Irrelevant Tweets'},inplace=True)
rc3.rename(columns={'id':'Relevant Tweets'},inplace=True)
ax = vc3.plot()
rc3.plot(ax = ax)
def monitorGraphPerMinute(df_relevant,df_irrelevant):
# gca stands for 'get current axis'
# graphing the threshold vs hourly tweet occurences
dfchange = df_irrelevant.loc[df_irrelevant['date'] != -1]
ts = dfchange.set_index('Datetime')
vc = ts.groupby('Datetime').count()
col = ['id']
vc2 = vc[col]
vc3 = vc2.copy()
dfchange1 = df_relevant.loc[df_relevant["date"]!=-1]
rs = dfchange1.set_index('Datetime')
rc = rs.groupby('Datetime').count()
col1 = ['id']
rc2 = rc[col1]
rc3 = rc2.copy()
vc3.rename(columns={'id':'Noise Tweets'},inplace=True)
rc3.rename(columns={'id':'Relevant Tweets'},inplace=True)
ax = vc3.plot()
rc3.plot(ax = ax)
def monitorGraphPerHour(df_relevant,df_irrelevant):
# gca stands for 'get current axis'
# graphing the threshold vs hourly tweet occurences
dfchange = df_irrelevant.loc[df_irrelevant['date'] != -1]
ts = dfchange.set_index('DateHour')
vc = ts.groupby('DateHour').count()
col = ['id']
vc2 = vc[col]
vc3 = vc2.copy()
dfchange1 = df_relevant.loc[df_relevant["date"]!=-1]
rs = dfchange1.set_index('DateHour')
rc = rs.groupby('DateHour').count()
col1 = ['id']
rc2 = rc[col1]
rc3 = rc2.copy()
vc3.rename(columns={'id':'Noise Tweets'},inplace=True)
rc3.rename(columns={'id':'Relevant Tweets'},inplace=True)
ax = vc3.plot()
rc3.plot(ax = ax)
|
[
"noreply@github.com"
] |
galexa05.noreply@github.com
|
eb61ed6b4228b822dcbf63b841b03b4644006653
|
1f52594461c78eda903f76e03aaf966ad15afaec
|
/iftest/condition03.py
|
aa16e57b2b8b347575c8db59d9710aa3bcb31e64
|
[] |
no_license
|
TDOGX86/mycode
|
9b3c3a8a9a3c796ec00d1698f9bb20a0364a333e
|
b0d85b89636d88447f123a5a71294ccb080f0f55
|
refs/heads/main
| 2023-04-06T20:12:42.999480
| 2021-04-08T16:44:11
| 2021-04-08T16:44:11
| 352,703,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
#!/usr/bin/env python3
hostname = input("What value should we set for hostname?")
## Notice how the next line has changed
## here we use the str.lower() method to return a lowercase string
if hostname.lower() == "mtg":
print("The hostname was found to be mtg")
print("hostname matches expected config")
elif hostname.lower() == "":
print("You didn't type anything in, dummy")
## Always print out to the user
print("Exiting the script")
##if hostname.lower() == "":
## print("You didn't type anything in, dummy")
|
[
"tamarris.jenkins@gmail.com"
] |
tamarris.jenkins@gmail.com
|
0684829db7c9258607788fa7ea09f77338d7d555
|
029cb41afd2b757fb9b80162259dc29d090e1d48
|
/Chapter_3_Working_with_the_Command_Line/Creating_Command_Line_Tools/implementing_plugins/plugin_a.py
|
c2cb31a42779e5ba3254a6cfaab9839f6a1bebe4
|
[] |
no_license
|
mdorante/Python_For_Devops
|
983ff667b20bbcba652930bcbed75f34fa1212de
|
570d80586e1c6ba26951d02d68daf84bb3f086a6
|
refs/heads/master
| 2023-02-18T00:12:49.423777
| 2021-01-18T12:38:55
| 2021-01-18T12:38:55
| 269,327,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
'''
simple plugin for plugin system example
'''
def run():
print('Plugin A doing some stuff!')
|
[
"mdorante10@gmail.com"
] |
mdorante10@gmail.com
|
23531571afd810ca8392d8346a552f92c595e5c2
|
51c3fc9ebdf2a0b990e645810cbf8fce2f34c2b3
|
/pricelist_test/__manifest__.py
|
3c4b857c77cfacc73b51f9296c208edcabb2ae2a
|
[] |
no_license
|
boming0529/pricelist_demo
|
6e985fc39e1273af8f6c2fa087a44b2a97652357
|
fd60f5b7057924aca5ca1b0715e6bc419e65f9af
|
refs/heads/master
| 2020-05-07T21:50:53.067981
| 2019-04-12T02:59:44
| 2019-04-12T02:59:44
| 180,919,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
# -*- coding: utf-8 -*-
{
'name': "pricelist_test",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "My Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/12.0/odoo/addons/base/data/ir_module_category_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': [
'base',
'product',
'sale',
],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'data/product.xml',
'views/views.xml',
'views/templates.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
}
|
[
"boming0529@gmail.com"
] |
boming0529@gmail.com
|
8922342ba5b5f8e04704a9baea1599907bccb947
|
91c6806fc5ae20dcc26308b58cd5ca52b22bc16d
|
/payment.py
|
f516bd27571a583cc697c2987987a408a1ea88f4
|
[] |
no_license
|
samarthgupta29/Python-Based-Wallet
|
c2e330c3ef322e71a63cfe5f517fdf33c4f87ecf
|
1ea3c92385f9b66fa4f69395c2a4a3ee65f04229
|
refs/heads/master
| 2021-05-07T18:35:42.445053
| 2017-12-13T04:55:07
| 2017-12-13T04:55:07
| 108,803,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,845
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'payment.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
import sqlite3
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow3(object):
def paymentreg(self):
semail = self.lineEdit.text()
remail = self.lineEdit_2.text()
amt = self.lineEdit_3.text()
amt2 = int(amt)
connection = sqlite3.connect("login.db")
connection.execute("INSERT INTO TRANSACTIONTABLE3 VALUES(?,?,?)", (semail, remail, amt2))
connection.execute("UPDATE USERS2 SET BALANCE=BALANCE-(?) WHERE EMAIL=(?)", (amt2, semail))
connection.execute("UPDATE USERS2 SET BALANCE=BALANCE+(?) WHERE EMAIL=(?)", (amt2, remail))
connection.commit()
connection.close()
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(549, 425)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 80, 101, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(10, 150, 81, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(10, 210, 81, 16))
self.label_3.setObjectName("label_3")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(140, 80, 113, 20))
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(140, 150, 113, 20))
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_3.setGeometry(QtCore.QRect(140, 210, 113, 20))
self.lineEdit_3.setObjectName("lineEdit_3")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(250, 280, 75, 23))
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.paymentreg)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(460, 340, 75, 23))
self.pushButton_2.setObjectName("pushButton_2")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 549, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Sender\'s E-Mail"))
self.label_2.setText(_translate("MainWindow", "Reciever\'s E-Mail"))
self.label_3.setText(_translate("MainWindow", "Amount"))
self.pushButton.setText(_translate("MainWindow", "Pay"))
self.pushButton_2.setText(_translate("MainWindow", "Home"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow3()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"samarthgupta29@gmail.com"
] |
samarthgupta29@gmail.com
|
42edb48f292eb66a4d4f302b8e4a79b7eac25f28
|
7834fd5292ab1678c129f92018cf9617f15c19dd
|
/hatboard/__init__.py
|
3df786d07a9e49308d01d07659cb7fd796bd50fe
|
[] |
no_license
|
3453-315h/HatBoard
|
f642ef9cf453dbc5bd8a52a1f43a198c9157b631
|
643df6535399e105c1c2a98e476f168bf122337b
|
refs/heads/main
| 2023-06-27T16:04:35.041007
| 2021-08-02T13:37:06
| 2021-08-02T13:37:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
|
[
"noreply@github.com"
] |
3453-315h.noreply@github.com
|
e28e29aba2f8f7f326b946462b4c962e23748f95
|
688e5a53f5f8a6760e499fc24ab0cbb20edbb4cc
|
/final/src/lidar_test.py
|
f82be0985e7f84875ac700dd4d2665fe38879d62
|
[] |
no_license
|
gravity101/Final_Project_TeamC
|
2f429065d54b1f6766da7aa6975e9f75c007ae66
|
5ffbc1397061cd1d3afe23e347cd6225408d560e
|
refs/heads/main
| 2023-08-30T19:01:43.130437
| 2021-09-24T07:50:56
| 2021-09-24T07:50:56
| 400,028,091
| 0
| 0
| null | 2021-08-30T02:40:21
| 2021-08-26T03:26:39
|
Lua
|
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import rospy
import time
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Int32MultiArray
from xycar_msgs.msg import xycar_motor
class test() :
def __init__(self):
rospy.Subscriber("/scan", LaserScan, self.callback_lidar, queue_size=1)
self.pub = rospy.Publisher('xycar_motor', xycar_motor, queue_size=1)
self.msg = xycar_motor()
self.lidar_points = None
self.fr_list = None
self.lidar_count = 0
self.inter_flag = False
def callback_lidar(self, data):
self.lidar_points = data.ranges
def Detect(self):
print("==================================================================================")
if self.lidar_points == None:
return
self.fr_list = self.lidar_points[-75:-60]
self.fr_list = list(self.fr_list)
for i in range(len(self.fr_list)):
print("index: ", i+430, ", lidar: ", self.fr_list[i])
# self.fr_list[:] = [value for value in self.fr_list if value != 0]
|
[
"khoram@ajou.ac.kr"
] |
khoram@ajou.ac.kr
|
9f174734342379109b5d2e786c230191b683d8dd
|
d49f1a4b64c33c024001306f1337c39ab09d88ee
|
/ncr/db.py
|
a9d50fc5e931e61fb52c5a4de8575837020062f5
|
[] |
no_license
|
nathanjordan/ncr
|
d1f299c8fc03db2bfa6c8b63d8ecb5fa1eb3f6ea
|
b8e7b94579a4c5721f5850d53fc91e0ebe592ad2
|
refs/heads/master
| 2020-12-26T02:23:01.564803
| 2014-06-15T03:41:05
| 2014-06-15T03:41:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,801
|
py
|
from __future__ import unicode_literals
from mongokit import Document, Connection
import datetime
from crypt import Crypt
class Controller(object):
def __init__(self):
self.conn = Connection()
self.conn.register([User, Session, Neuron])
# 12 hours?
self.conn['ncr']['session'].ensure_index('created', expireAfterSeconds=43200)
def login(self, username, password):
u = User.find({"username": username})
hashed_pass = Crypt.hash_pw(password, u['salt'])
if hashed_pass != u['password']:
return None
ses = Session.find({"username": username})
if ses:
return ses['token']
token = Crypt.gen_token()
created = datetime.datetime.now()
ses = Session({"username": username, "token": token,
"created": created})
ses.save()
return token
def verify_token(self, token):
ses = Session.find({"token": token})
return True if ses else False
class User(Document):
__database__ = "ncr"
__collection__ = "user"
structure = {
"username": str,
"password": str,
"salt": str,
"first_name": str,
"last_name": str,
"institution": str,
"email": str
}
class Session(Document):
__database__ = "ncr"
__collection__ = "session"
structure = {
"username": str,
"token": str,
"created": datetime.Datetime
}
class Entity(Document):
__database__ = "ncr"
structure = {
"_id": str,
"entity_type": str,
"entity_name": str,
"description": str,
"author": str,
"author_email": str,
"specification": dict
}
class Neuron(Entity):
__collection__ = "neuron"
|
[
"natedagreat27274@gmail.com"
] |
natedagreat27274@gmail.com
|
33ce6b4e0bf8cebf3b43a04e20f421954223437d
|
0fdabf061564cb809889eead9dce3efea16bf7b5
|
/test_regsiter.py
|
cde3f29bfd5bf390533b53317720ea21d8513d9c
|
[] |
no_license
|
huangyanping128/-
|
6662b56567fb6f4392aceaff1f3d44b04523fa5f
|
7aa315e18f821f7b083f735c953ebbdc83474319
|
refs/heads/master
| 2021-01-02T16:52:35.652646
| 2020-02-11T08:13:25
| 2020-02-11T08:13:25
| 239,710,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
import requests
from common.connect_mysql import execute_sql
import pytest
s = requests.session()
@pytest.fixture(scope="function")
def delete_user():
"""先删除已经注册的用户数据"""
delete_sql = 'DELETE from auth_user where username="小晓的筱";'
execute_sql(delete_sql)
def test_regsiter(delete_user):
"""注册登录接口"""
url = 'http://49.235.92.12:9000/api/v1/register'
body = {
"username": "小晓的筱",
"password": "123456",
"mail": "xiaoxiao@qq.com"
}
r = s.post(url, json=body)
# print(r.text)
print(r.json())
assert r.json()['msg'] == '注册成功!'
|
[
"wanglei_g@aspirecn.com"
] |
wanglei_g@aspirecn.com
|
e05c8b6335f2a71af28e423b1002904eee1db3b1
|
aad4370c12f4d7ca13b348eec5131840695956f8
|
/margin/ArcMarginProduct.py
|
8e233348fbc9a4c1573ff875a8f5184163b41225
|
[] |
no_license
|
onurcaydere/ArcFaceTrain
|
732051065135ced45746b26dc98a7f0d8f4bd764
|
2a49ad52d1f7596aefa68d952e1124ef7d187c3d
|
refs/heads/main
| 2023-08-21T21:57:22.326706
| 2021-10-13T11:35:16
| 2021-10-13T11:35:16
| 416,712,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
class ArcMarginProduct(nn.Module):
def __init__(self, in_feature=128, out_feature=10575, s=32.0, m=0.50, easy_margin=False):
super(ArcMarginProduct, self).__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.s = s
self.m = m
self.weight = Parameter(torch.Tensor(out_feature, in_feature))
nn.init.xavier_uniform_(self.weight)
self.easy_margin = easy_margin
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
# make the function cos(theta+m) monotonic decreasing while theta in [0°,180°]
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
def forward(self, x, label):
# cos(theta)
cosine = F.linear(F.normalize(x), F.normalize(self.weight))
# cos(theta + m)
sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
phi = cosine * self.cos_m - sine * self.sin_m
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where((cosine - self.th) > 0, phi, cosine - self.mm)
#one_hot = torch.zeros(cosine.size(), device='cuda' if torch.cuda.is_available() else 'cpu')
one_hot = torch.zeros_like(cosine)
one_hot.scatter_(1, label.view(-1, 1), 1)
output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
output = output * self.s
return output
if __name__ == '__main__':
pass
|
[
"noreply@github.com"
] |
onurcaydere.noreply@github.com
|
56be18a63c0d30a9e4ba2dae5d07aad985c61656
|
40c4b8b618d67fc48b862809b6e2835bb7cf76eb
|
/leetcode/65.py
|
e19e991fccbe8881504df78c7650cbe96eaad2ad
|
[] |
no_license
|
berquist/ctci
|
9fa08ac724990eee32f8ad7cffc3517491570d41
|
f0a69d3e4dd1b73a43c96dcb7a9c7b9955c04c39
|
refs/heads/master
| 2022-08-18T01:53:16.994300
| 2022-08-15T00:36:07
| 2022-08-15T00:36:07
| 120,108,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
class Solution(object):
def isNumber(self, s):
"""
:type s: str
:rtype: bool
"""
# assert Solution().isNumber("0") == True
# assert Solution().isNumber(" 0.1 ") == True
# assert Solution().isNumber("abc") == False
# assert Solution().isNumber("1 a") == False
# assert Solution().isNumber("2e10") == True
|
[
"eric.berquist@gmail.com"
] |
eric.berquist@gmail.com
|
57058094d1fac2a6430800baef3bfb044fb40353
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/plugin/core/searchtext/iterators/InstructionSearchAddressIterator.pyi
|
714c2a10a62f4d6f5eb1d692fba25f1bfdbdb764
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
pyi
|
from typing import Iterator
import ghidra.program.model.address
import java.lang
import java.util
import java.util.function
class InstructionSearchAddressIterator(object, ghidra.program.model.address.AddressIterator):
def __init__(self, __a0: ghidra.program.model.listing.InstructionIterator): ...
def __iter__(self) -> Iterator[object]: ...
def equals(self, __a0: object) -> bool: ...
def forEach(self, __a0: java.util.function.Consumer) -> None: ...
def forEachRemaining(self, __a0: java.util.function.Consumer) -> None: ...
def getClass(self) -> java.lang.Class: ...
def hasNext(self) -> bool: ...
def hashCode(self) -> int: ...
def iterator(self) -> java.util.Iterator: ...
def next(self) -> object: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def remove(self) -> None: ...
def spliterator(self) -> java.util.Spliterator: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
dd05a3286f39ae892ec0ac84a9eb8ba21a202787
|
5905c1dbcbec6c2748e92b6464e82505bae0d9b8
|
/sensor_utils/sensor_manager.py
|
fda91403c888257ebd809c4db625d2f55400f0ef
|
[] |
no_license
|
trevormcinroe/auto_garden
|
791dc9186e2a655229415264082309c7a02cf744
|
da19f34d0c311da6c741b21a1ad6c4b2cec4f77d
|
refs/heads/master
| 2020-12-05T01:55:48.781067
| 2020-01-06T00:51:20
| 2020-01-06T00:51:20
| 231,974,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
"""
"""
import numpy as np
import pandas as pd
class SensorManager:
def __init__(self,
camera):
self.camera = camera
def take_timelapse(self):
""""""
self.camera.take_timelapse()
def take_video(self):
""""""
self.camera.take_video()
|
[
"ThomasBayes69!"
] |
ThomasBayes69!
|
bd348ac6e8fc24a4b1ef4555fbb10763fc02b7da
|
4cc6e438966b983c0f1e85cc093bd90bcd685953
|
/prob2.py
|
7d02753855377050cabc6c9daf4cf8ef195cfd95
|
[] |
no_license
|
eskguptha/assignments
|
e96eab1cea904895f00372c4a87e6a7d1f4b1772
|
66998bce363ad4c64eefd812562af1d5db4f3458
|
refs/heads/master
| 2020-07-04T16:39:31.987477
| 2019-11-04T09:30:03
| 2019-11-04T09:30:03
| 74,152,645
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,637
|
py
|
"""
Problem #2
Fill in the Blanks
You are given a puzzle like this:
7 __ 10 __ 2
Each blank may be filled with a '+' (plus) or '-' (minus), producing a value k.
For all combinations of plus and minus, find the value of k that is closest to
0.
In the above case, there are 4 combinations, each producing a different
value:
7 + 10 + 2 = 19
7 + 10 - 2 = 15
7 - 10 + 2 = -1
7 - 10 - 2 = -5
Of all these combinations, the value that is closest to zero is -1 . So the
answer is -
1 . If there are more than one number that is closest, print the
absolute value.
Sample Input/Output:
Enter digits: 7,10,2
Value close to zero is -1
Enter digits: 1,2,3,4
Value close to zero is 0
"""
INPUT_DIGITS = input("Enter digits: ")
OUTPUT_DIGIT = None
OUTPUT_DIGIT_LIST = []
# Split input string by comma seperated
input_digit_list = INPUT_DIGITS.split(',')
# Input Numbers should be greter than 2
if len(input_digit_list) > 2:
# Get First Three Numbers
input_digit_list = input_digit_list[:3]
try:
# Convert string to number and remove space if exist
first_number, second_number, thrid_number = [int(each_num.strip()) for each_num in input_digit_list]
# Apply four combinations on give three numbers
combination_result_1 = first_number + second_number + thrid_number
combination_result_2 = first_number + second_number - thrid_number
combination_result_3 = first_number - second_number + thrid_number
combination_result_4 = first_number - second_number - thrid_number
# Append each combination result into an array
OUTPUT_DIGIT_LIST.append(combination_result_1)
OUTPUT_DIGIT_LIST.append(combination_result_2)
OUTPUT_DIGIT_LIST.append(combination_result_3)
OUTPUT_DIGIT_LIST.append(combination_result_4)
# Sort array from small to large
OUTPUT_DIGIT_LIST.sort()
# get all <= 0 values from resultset
sorted_result_set = [each_num for each_num in OUTPUT_DIGIT_LIST if each_num <= 0]
# if sorted result set is empty than get list minimum value from >= 0 valueset
if not sorted_result_set:
sorted_result_set = [each_num for each_num in OUTPUT_DIGIT_LIST if each_num >= 0]
sorted_result_set.sort(reverse=True)
# get least element from resultset
OUTPUT_DIGIT = sorted_result_set.pop()
print ("Value close to zero is {}".format(OUTPUT_DIGIT))
except TypeError as e:
print ("Please Enter Numbers Only. Ex:7,10,2")
pass
else:
print ("Please Enter any three numbers by comma sperated Ex:7,10,2")
|
[
"noreply@github.com"
] |
eskguptha.noreply@github.com
|
97bc6fd7abdac7d0908aa7cdf1c52f8472cf91f6
|
dc8d6d4297a004fc98c0c998a0dc251565c1fb9d
|
/pythonScripts/redditbot/wordFinder.py
|
a2e5e078837c3f7c96f5ed4c7736f1fa51194955
|
[] |
no_license
|
CodingDonky/python_misc
|
a4a5c9e9bf89cbf4327e3c6ba9c3703d6d26a9f7
|
08aaecb150e55e90a2782b624a516183ce608ec1
|
refs/heads/master
| 2020-03-25T06:03:16.038747
| 2018-12-18T00:43:14
| 2018-12-18T00:43:14
| 143,480,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
# First Reddit Bot
import praw
import config
def bot_login():
r = praw.Reddit(username = config.username,
password = config.password,
client_id = config.client_id,
client_secret = config.client_secret,
user_agent = "My first python bot v0.1")
return r
def run_bot(r):
word = "Rick"
word2 = "Morty"
word_users = set() # to avoid duplicates
word_users2 = set()
for comment in r.subreddit('rickandmorty').comments(limit=250):
if word in comment.body:
word_users.add(comment.author)
if word2 in comment.body:
word_users2.add(comment.author)
print "The users that mentioned "+ word +" are :"
for user in word_users:
print " " + str(user)
print "The users that mentioned "+ word2 +" are :"
for user in word_users2:
print " " + str(user)
print str(len(word_users)) + " users mentioned "+ word
print str(len(word_users2)) + " users mentioned "+ word2
r = bot_login()
run_bot(r)
|
[
"saintdane75@gmail.com"
] |
saintdane75@gmail.com
|
61743775cdfc1f99eb0f4f97a39cac5ad4a23f9b
|
58b27192c8f83297666796e2ae81549abffc5061
|
/first.py
|
7e380193a03d362bb5e2cbf469bb938be6b81c83
|
[] |
no_license
|
echapma3/test
|
191c8891069660151df0dbae23b9c8281d553833
|
951816f8bdea4cea167f9630ed367735aaea5661
|
refs/heads/master
| 2022-09-05T08:11:09.118354
| 2020-06-01T09:29:33
| 2020-06-01T09:29:33
| 265,587,784
| 0
| 0
| null | 2020-06-01T09:29:34
| 2020-05-20T14:16:08
|
Python
|
UTF-8
|
Python
| false
| false
| 123
|
py
|
print("test the second time")
print("hello")
print("12345")
number = 4
if number > 0:
print('number is positive')
|
[
"echapma3@jaguarlandrover.com"
] |
echapma3@jaguarlandrover.com
|
6edd212f8ff5d8dc3dbcc92e23d29dc81c6da083
|
1b4f196d2e7ee7c0aab43c0b75cce7f3aeba87a7
|
/phase4/model.py
|
0f056c58ce3e7dec0e3803d844248588ef12b389
|
[] |
no_license
|
raphael-cohen/facial_recognition_CS584
|
4cb2fc0120c3abccc108eec2e9e96d16fddedfb6
|
14dd5cd1cd006d27f5d30c02a445877f80a278da
|
refs/heads/master
| 2020-06-21T09:48:49.737806
| 2019-07-17T15:26:53
| 2019-07-17T15:26:53
| 197,413,417
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,500
|
py
|
from keras.models import Sequential, Model
from keras.layers import Dense, Input, Conv2D, MaxPooling2D, UpSampling2D, Flatten, Dropout
from keras import backend as K
from keras import regularizers
import numpy as np
import pickle as pkl
from sklearn.model_selection import train_test_split
from keras import backend as K
from keras.utils import to_categorical
from keras import optimizers
from keras.utils.vis_utils import plot_model
from keras_sequential_ascii import sequential_model_to_ascii_printout
from os import listdir
import os
from os.path import isfile, join
import matplotlib.pyplot as plt
import cv2
#pip install keras_sequential_ascii
def main():
name = 'williams'
# data = np.array(pkl.load(open('../X.pkl', 'rb')))
npath = 'np_faces'
onlyfiles = [ p for p in listdir('np_faces') if isfile(join('np_faces',p)) ]
total = len(onlyfiles)
total_img = 0
# for n, f in enumerate(onlyfiles):
#
# imgs = np.load(join(npath,f))
# total_img += imgs.shape[0]
# if n%20 == 0:
# print("{0}/{1}\n{2}".format(n,total, total_img))
#What is xxx ?
total_img = 63564 #beurk but no choice
print(total_img)
data = np.empty((total_img, 64,64,1))
names = np.empty(total_img, dtype=object)
start = 0
sp = 0
for n, f in enumerate(onlyfiles):
imgs = np.load(join(npath,f)).astype(np.float32)
nimg = imgs.shape[0]
space = np.prod(imgs.shape)
# print(imgs)
np.put(data, range(sp, sp+space), imgs)
np.put(names, range(start, start+nimg), [f]*nimg)
start += nimg
sp += space
if n%10 == 0:
print("{0}/{1}".format(n,total))
print(names.shape)
# celebrities = np.array(['Anthony_Hopkins.npy', 'Burt_Reynolds.npy',
# 'Jack_Nicholson.npy', 'John_Cusack.npy',
# 'Jeffrey_Tambor.npy', 'Leslie_Neilsen.npy',
# 'Mark_Wahlberg.npy', 'Richard_E._Grant.npy'])
celebrities = np.array(['Lourdes_Benedicto.npy', 'Lisa_Bonet.npy',
'Samuel_L._Jackson.npy', 'Tatyana_M._Ali.npy',
'Tempestt_Bledsoe.npy', 'Wanda_De_Jesus.npy',
'Shannon_Kane.npy','Jasmine_Guy.npy'])
# print(celebrities.shape)
labels = np.zeros(names.shape[0])
for cel in celebrities:
print(cel)
labels += np.where(names == cel, 1, 0)
# print(names[0:30])
# print(labels)
print(sum(labels))
print(data.shape)
print(data[0].shape)
#
# plt.imshow((data[1]).reshape((64,64)))
# plt.show()
# print(data[0])
# labels = np.array(pkl.load(open('../y_bush_vs_others.pkl', 'rb'))).flatten()
# labels = np.array(pkl.load(open('../y_{0}_vs_others.pkl'.format(name), 'rb'))).flatten()
# one = np.ones(500)
# labels = np.hstack((one, np.zeros(data.shape[0]-500)))
# print(np.asarray(data[0] ,dtype=np.float32))
# for d
# print((data[0]).reshape(64,64,1).shape)
x_train, x_test, y_train, y_test = train_test_split(
data, labels, test_size=1. / 3, random_state=2518, stratify = labels, shuffle = True)
num_positive = np.sum(labels)
class_weight = {0: 1., 1: len(labels)/num_positive*2}
# class_weight = {0:1, 1:1}
print(class_weight)
model = conv_predict_model(acthidden='tanh', actoutput='sigmoid')
opt = optimizers.adadelta()
# opt = optimizers.nadam()
# Revenir là dessus
# plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
#
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])#, precision, recall, f1])
model.fit(x_train, y_train, validation_data=(
x_test, y_test), shuffle=True, epochs=350, batch_size=256, class_weight=class_weight)
# sequential_model_to_ascii_printout(model)
#400
name ='initial-model-williams'
model.save("{0}.model".format(name))
#
# with open("{0}_history.pkl".format(name), 'wb') as file_pi:
# pkl.dump(model.history, file_pi)
def conv_predict_model(acthidden='tanh', actoutput='sigmoid'):
input_img = Input(shape=(64, 64, 1))
model = Sequential()
model.add(Conv2D(32, (3, 3), activation=acthidden, padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(32, (3, 3), activation=acthidden, padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Dropout(0.3))
model.add(Conv2D(32, (3, 3), activation=acthidden, padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(64, (3, 3), activation=acthidden, padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), activation=acthidden, padding='same'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Flatten())
model.add(Dropout(0.3))
model.add(Dense(40, activation=acthidden))#, kernel_regularizer=regularizers.l2(0.01)))
model.add(Dense(20, activation=acthidden))
model.add(Dropout(0.3))
model.add(Dense(10, activation=acthidden))
model.add(Dense(1, activation=actoutput))
return model
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1(y_true, y_pred):
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
raphael-cohen.noreply@github.com
|
1f66d2f362e53b3976edac547b79f60c4e925f1b
|
fbd6c98a999d558f28a3cea0e8a2c43dbc095d6c
|
/tests/unit/tabular/test_copulas.py
|
bca913a4ca506fc6fb0afc3f52c0d27d30ce30fc
|
[
"MIT"
] |
permissive
|
daanknoors/SDV
|
702859d39506b1245f48dc08161a62eadb1a3bf8
|
c9234aa6fba2c6c23fa439736efd19fd06f19770
|
refs/heads/master
| 2023-01-12T11:30:57.003936
| 2020-11-19T09:10:42
| 2020-11-19T09:10:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,358
|
py
|
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
import pytest
from copulas.multivariate.gaussian import GaussianMultivariate
from copulas.univariate import GaussianKDE
from sdv.tabular.base import NonParametricError
from sdv.tabular.copulas import GaussianCopula
class TestGaussianCopula:
@patch('sdv.tabular.copulas.BaseTabularModel.__init__')
def test___init__no_metadata(self, init_mock):
"""Test ``__init__`` without passing a table_metadata.
In this case, the parent __init__ will be called and the metadata
will be created based on the given arguments
Input:
- field_names
- field_types
- field_transformers
- anonymize_fields
- primary_key
- constraints
- distribution
- default_distribution
- categorical_transformer
Side Effects
- attributes are set to the right values
- super().__init__ is called with the right arguments
"""
gc = GaussianCopula(
field_names=['a_field'],
field_types={
'a_field': {
'type': 'categorical',
}
},
field_transformers={'a_field': 'categorical'},
anonymize_fields={'a_field': 'name'},
primary_key=['a_field'],
constraints=['a_constraint'],
distribution={'a_field': 'gaussian'},
default_distribution='bounded',
categorical_transformer='categorical_fuzzy'
)
assert gc._distribution == {'a_field': 'gaussian'}
assert gc._default_distribution == 'bounded'
assert gc._categorical_transformer == 'categorical_fuzzy'
assert gc._DTYPE_TRANSFORMERS == {'O': 'categorical_fuzzy'}
init_mock.assert_called_once_with(
field_names=['a_field'],
primary_key=['a_field'],
field_types={
'a_field': {
'type': 'categorical',
}
},
field_transformers={'a_field': 'categorical'},
anonymize_fields={'a_field': 'name'},
constraints=['a_constraint'],
table_metadata=None,
)
@patch('sdv.tabular.copulas.Table.from_dict')
@patch('sdv.tabular.copulas.BaseTabularModel.__init__')
def test___init__metadata_dict(self, init_mock, from_dict_mock):
"""Test ``__init__`` without passing a table_metadata dict.
In this case, metadata will be loaded from the dict and passed
to the parent.
Input:
- table_metadata
- distribution
- default_distribution
- categorical_transformer
Side Effects
- attributes are set to the right values
- super().__init__ is called with the loaded metadata
"""
table_metadata = {
'fields': {
'a_field': {
'type': 'categorical'
},
},
'model_kwargs': {
'GaussianCopula': {
'distribution': {
'a_field': 'gaussian',
},
'categorical_transformer': 'categorical_fuzzy',
}
}
}
gc = GaussianCopula(
distribution={'a_field': 'gaussian'},
categorical_transformer='categorical_fuzzy',
table_metadata=table_metadata,
)
assert gc._distribution == {'a_field': 'gaussian'}
assert gc._categorical_transformer == 'categorical_fuzzy'
assert gc._DTYPE_TRANSFORMERS == {'O': 'categorical_fuzzy'}
init_mock.assert_called_once_with(
field_names=None,
primary_key=None,
field_types=None,
field_transformers=None,
anonymize_fields=None,
constraints=None,
table_metadata=from_dict_mock.return_value,
)
def test__update_metadata_existing_model_kargs(self):
"""Test ``_update_metadata`` if metadata already has model_kwargs.
If ``self._metadata`` already has ``model_kwargs`` in it, this
method should do nothing.
Setup:
- self._metadata.get_model_kwargs that returns a kwargs dict
Expected Output
- None
Side Effects
- ``self._metadata.set_model_kwargs`` is not called.
"""
# Setup
gaussian_copula = Mock(spec_set=GaussianCopula)
# Run
out = GaussianCopula._update_metadata(gaussian_copula)
# Asserts
assert out is None
assert not gaussian_copula._metadata.set_model_kwargs.called
def test__update_metadata_no_model_kwargs(self):
"""Test ``_update_metadata`` if metadata has no model_kwargs.
If ``self._metadata`` has no ``model_kwargs`` in it, this
method should prepare the ``model_kwargs`` dict and call
``self._metadata.set_model_kwargs`` with it.
Setup:
- self._metadata.get_model_kwargs that returns None.
- self.get_distributions that returns a distribution dict.
Expected Output
- None
Side Effects
- ``self._metadata.set_model_kwargs`` is called with the
expected dict.
"""
# Setup
gaussian_copula = Mock(spec_set=GaussianCopula)
gaussian_copula._metadata.get_model_kwargs.return_value = dict()
gaussian_copula._categorical_transformer = 'a_categorical_transformer_value'
gaussian_copula.get_distributions.return_value = {
'foo': 'copulas.univariate.gaussian.GaussianUnivariate'
}
# Run
out = GaussianCopula._update_metadata(gaussian_copula)
# Asserts
assert out is None
expected_kwargs = {
'distribution': {'foo': 'copulas.univariate.gaussian.GaussianUnivariate'},
'categorical_transformer': 'a_categorical_transformer_value',
}
gaussian_copula._metadata.set_model_kwargs.assert_called_once_with(
'GaussianCopula', expected_kwargs)
@patch('sdv.tabular.copulas.copulas.multivariate.GaussianMultivariate',
spec_set=GaussianMultivariate)
def test__fit(self, gm_mock):
"""Test the ``GaussianCopula._fit`` method.
The ``_fit`` method is expected to:
- Call the _get_distribution method to build the distributions dict.
- Set the output from _get_distribution method as self._distribution.
- Create a GaussianMultivriate object with the self._distribution value.
- Store the GaussianMultivariate instance in the self._model attribute.
- Fit the GaussianMultivariate instance with the given table data, unmodified.
- Call the _update_metadata method.
Setup:
- mock _get_distribution to return a distribution dict
Input:
- pandas.DataFrame
Expected Output:
- None
Side Effects:
- self._distribution is set to the output from _get_distribution
- GaussianMultivariate is called with self._distribution as input
- GaussianMultivariate output is stored as self._model
- self._model.fit is called with the input dataframe
- self._update_metadata is called without arguments
"""
# Setup
gaussian_copula = Mock(spec_set=GaussianCopula)
gaussian_copula._get_distribution.return_value = {'a': 'a_distribution'}
# Run
data = pd.DataFrame({
'a': [1, 2, 3]
})
out = GaussianCopula._fit(gaussian_copula, data)
# asserts
assert out is None
assert gaussian_copula._distribution == {'a': 'a_distribution'}
gm_mock.assert_called_once_with(distribution={'a': 'a_distribution'})
assert gaussian_copula._model == gm_mock.return_value
expected_data = pd.DataFrame({
'a': [1, 2, 3]
})
call_args = gaussian_copula._model.fit.call_args_list
passed_table_data = call_args[0][0][0]
pd.testing.assert_frame_equal(expected_data, passed_table_data)
gaussian_copula._update_metadata.assert_called_once_with()
def test__sample(self):
"""Test the ``GaussianCopula._sample`` method.
The GaussianCopula._sample method is expected to:
- call ``self._model.sample`` method passing the given num_rows.
- Return the output from the ``self._model.sample call``.
Input:
- Integer
Expected Output:
- ``self._model.sample.return_value``
Side Effects:
- ``self._model.sample`` is called with the given integer as input
"""
# Setup
n_rows = 2
gaussian_copula = Mock(spec_set=GaussianCopula)
expected = pd.DataFrame([1, 2, 3])
gaussian_copula._model.sample.return_value = expected
# Run
out = GaussianCopula._sample(gaussian_copula, n_rows)
# Asserts
gaussian_copula._model.sample.assert_called_once_with(n_rows)
assert expected.equals(out)
def test_get_parameters(self):
"""Test the ``get_parameters`` method when model is parametric.
If all the distributions are parametric, ``get_parameters``
should return a flattened version of the parameters returned
by the ``GaussianMultivariate`` instance.
Setup:
- ``self._model`` will be set to a REAL GaussianMultivarite instance
with the following properties:
- Uses the following distributions:
- GaussianUnivariate
- Univariate(parametric=PARAMETRIC)
- Is fitted with a two column dataframe where the column
of the ``GaussianMultivariate`` is constant (to force
``scale==0``) and the other one is not constant (to
force ``scale!=0``). The dataframe can contain only
three rows:
gm = GaussianMultivariate(distribution={
'a': GaussianUnivariate,
'b': Univariate(parametric=PARAMETRIC)
})
pd.DataFrame({
'a': [1, 1, 1],
'b': [1, 2, 3],
})
Output:
- Flattened parameter dictionary with the right values in it:
- triangular covariance matrix
- ``np.log`` applied to the ``EPSILON`` value for the
univariate that had ``scale==0``.
- ``np.log`` applied to the other ``scale`` parameter.
"""
def test_get_parameters_non_parametric(self):
"""Test the ``get_parameters`` method when model is parametric.
If there is at least one distributions in the model that is not
parametric, a NonParametricError should be raised.
Setup:
- ``self._model`` is set to a ``GaussianMultivariate`` that
uses ``GaussianKDE`` as its ``distribution``.
Side Effects:
- A NonParametricError is raised.
"""
# Setup
gm = GaussianMultivariate(distribution=GaussianKDE())
data = pd.DataFrame([1, 1, 1])
gm.fit(data)
gc = Mock()
gc._model = gm
# Run, Assert
with pytest.raises(NonParametricError):
GaussianCopula.get_parameters(gc)
def test__rebuild_covariance_matrix_positive_definite(self):
"""Test the ``_rebuild_covariance_matrix``
method for a positive definide covariance matrix.
The _rebuild_covariance_matrix method is expected to:
- Rebuild a square covariance matrix out of a triangular one.
- Call ``make_positive_definite`` if input matrix is not positive definite,
Input
- numpy array, Symmetric positive definite matrix triangular format
output
- numpy array, Square matrix positive definite
Side Effects:
- ``make_positive_definite`` is not called.
"""
# Run
covariance = [[1], [0, 1]]
result = GaussianCopula._rebuild_covariance_matrix(Mock(), covariance)
# Asserts
expected = np.array([[1., 0.], [0., 1.0]])
np.testing.assert_almost_equal(result, expected)
def test__rebuild_covariance_matrix_not_positive_definite(self):
"""Test the ``_rebuild_covariance_matrix``
method for a not positive definide covariance matrix.
The _rebuild_covariance_matrix method is expected to:
- Rebuild a square covariance matrix out of a triangular one.
- Call ``make_positive_definite`` if input matrix is not positive definite,
Input
- numpy array, Symmetric no positive definite matrix triangular format
output
- numpy array, Square matrix positive definite
Side Effects:
- ``make_positive_definite`` is called.
"""
# Run
covariance = [[1], [-1, 1]]
result = GaussianCopula._rebuild_covariance_matrix(Mock(), covariance)
# Asserts
expected = np.array([[1, -1.0], [-1.0, 1.0]])
np.testing.assert_almost_equal(result, expected)
def test__rebuild_gaussian_copula(self):
"""Test the ``GaussianCopula._rebuild_gaussian_copula`` method.
The ``test__rebuild_gaussian_copula`` method is expected to:
- Rebuild a square covariance matrix out of a triangular one.
Input:
- numpy array, Triangular covariance matrix
Expected Output:
- numpy array, Square covariance matrix
"""
# Setup
gaussian_copula = Mock(autospec=GaussianCopula)
gaussian_copula._rebuild_covariance_matrix.return_value = [[0.4, 0.17], [0.17, 0.07]]
gaussian_copula._distribution = {'foo': 'GaussianUnivariate'}
# Run
model_parameters = {
'univariates': {
'foo': {
'scale': 0.0,
'loc': 5
},
},
'covariance': [[0.1], [0.4, 0.1]],
'distribution': 'GaussianUnivariate',
}
result = GaussianCopula._rebuild_gaussian_copula(gaussian_copula, model_parameters)
# Asserts
expected = {
'univariates': [
{
'scale': 1.0,
'loc': 5,
'type': 'GaussianUnivariate'
}
],
'columns': ['foo'],
'distribution': 'GaussianUnivariate',
'covariance': [[0.4, 0.17], [0.17, 0.07]]
}
assert result == expected
def test_set_parameters(self):
"""Test the ``set_parameters`` method with positive num_rows.
The ``GaussianCopula.set_parameters`` method is expected to:
- Transform a flattened dict into its original form with
the unflatten_dict function.
- pass the unflattended dict to the ``self._rebuild_gaussian_copula``
method.
- Store the number of rows in the `self._num_rows` attribute.
- Create a GaussianMultivariate instance from the params dict
and store it in the 'self._model' attribute.
Input:
- flat parameters dict
Output:
- None
Side Effects:
- Call ``_rebuild_gaussian_copula`` with the unflatted dict.
- ``self._num_rows`` gets the given value.
- ``GaussianMultivariate`` is called
- ``GaussianMultivariate`` return value is stored as `self._model`
"""
# Setup
gaussian_copula = Mock(autospec=GaussianCopula)
returned = {
'univariates': [
{
'scale': 1.0,
'loc': 5,
'type': 'copulas.univariate.gaussian.GaussianUnivariate'
}
],
'columns': ['foo'],
'num_rows': 3,
'covariance': [[0.4, 0.17], [0.17, 0.07]]
}
gaussian_copula._rebuild_gaussian_copula.return_value = returned
# Run
flatten_parameters = {
'univariates__foo__scale': 0.0,
'univariates__foo__loc': 5,
'covariance__0__0': 0.1,
'covariance__1__0': 0.4,
'covariance__1__1': 0.1,
'num_rows': 3
}
GaussianCopula.set_parameters(gaussian_copula, flatten_parameters)
# Asserts
expected = {
'covariance': [[0.1], [0.4, 0.1]],
'num_rows': 3,
'univariates': {
'foo': {
'loc': 5,
'scale': 0.0
}
}
}
gaussian_copula._rebuild_gaussian_copula.assert_called_once_with(expected)
assert gaussian_copula._num_rows == 3
assert isinstance(gaussian_copula._model, GaussianMultivariate)
def test_set_parameters_negative_max_rows(self):
"""Test the ``set_parameters`` method with negative num_rows.
If the max rows value is negative, it is expected to be set
to zero.
The ``GaussianCopula.set_parameters`` method is expected to:
- Transform a flattened dict into its original form with
the unflatten_dict function.
- pass the unflattended dict to the ``self._rebuild_gaussian_copula``
method.
- Store ``0`` in the `self._num_rows` attribute.
- Create a GaussianMultivariate instance from the params dict
and store it in the 'self._model' attribute.
Input:
- flat parameters dict
Output:
- None
Side Effects:
- Call ``_rebuild_gaussian_copula`` with the unflatted dict.
- ``self._num_rows`` is set to ``0``.
- ``GaussianMultivariate`` is called
- ``GaussianMultivariate`` return value is stored as `self._model`
"""
# Setup
gaussian_copula = Mock(autospec=GaussianCopula)
returned = {
'univariates': [
{
'scale': 1.0,
'loc': 5,
'type': 'copulas.univariate.gaussian.GaussianUnivariate'
}
],
'columns': ['foo'],
'num_rows': -3,
'covariance': [[0.4, 0.17], [0.17, 0.07]]
}
gaussian_copula._rebuild_gaussian_copula.return_value = returned
# Run
flatten_parameters = {
'univariates__foo__scale': 0.0,
'univariates__foo__loc': 5,
'covariance__0__0': 0.1,
'covariance__1__0': 0.4,
'covariance__1__1': 0.1,
'num_rows': -3
}
GaussianCopula.set_parameters(gaussian_copula, flatten_parameters)
# Asserts
expected = {
'covariance': [[0.1], [0.4, 0.1]],
'num_rows': -3,
'univariates': {
'foo': {
'loc': 5,
'scale': 0.0
}
}
}
gaussian_copula._rebuild_gaussian_copula.assert_called_once_with(expected)
assert gaussian_copula._num_rows == 0
assert isinstance(gaussian_copula._model, GaussianMultivariate)
|
[
"noreply@github.com"
] |
daanknoors.noreply@github.com
|
046c3669d38f4376af10f67bab79eef3d7c72f08
|
6633f57a55159651b1af03c60bed93fc077b49c5
|
/transfer.py
|
4f140df66f64c208bfcf38bb087737f28830c9e8
|
[] |
no_license
|
msfeldstein/airdrop-script
|
8e72fab5ea7a5aa7e30a882c88cdafa1696ac016
|
18099be2f122d49d2db41884918b390087d93ecb
|
refs/heads/master
| 2022-07-27T22:26:15.902208
| 2019-09-13T05:02:47
| 2019-09-13T05:02:47
| 208,136,560
| 1
| 1
| null | 2022-06-25T08:01:48
| 2019-09-12T20:02:47
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 8,775
|
py
|
#!/usr/bin/python3
import pickle
import pprint
import requests
import stellar_base
from stellar_base.keypair import Keypair
from stellar_base.address import Address
from stellar_base.asset import Asset
from stellar_base.operation import Payment, CreateAccount, ChangeTrust, SetOptions, AccountMerge
from stellar_base.transaction import Transaction
from stellar_base.transaction_envelope import TransactionEnvelope as Te
from stellar_base.memo import TextMemo
from stellar_base.horizon import horizon_testnet
horizon = horizon_testnet()
def setup_account(ID):
"""
Setup and cache some account from friendbot
"""
try:
return pickle.load(open(ID, "rb"))
except:
kp = Keypair.random()
publickey = kp.address().decode()
url = 'https://friendbot.stellar.org'
r = requests.get(url, params={'addr': publickey})
pickle.dump(kp, open(ID, "wb"))
return setup_account(ID)
def create_empty_acct(r_kp):
"""
Creates a tmp account which is empty except for min balance (no fees even)
"""
r = Address(address=r_kp.address().decode())
r.get()
kp = Keypair.random()
dest = kp.address().decode()
tx = Transaction(
source=r.address,
sequence=r.sequence,
fee=100,
operations=[
CreateAccount(destination=dest, starting_balance="1")
]
)
env = Te(tx=tx, network_id="TESTNET")
env.sign(r_kp)
horizon.submit(env.xdr())
return kp
def balance_to_asset(b):
if b['asset_type'] == 'native':
return Asset.native()
return Asset(b['asset_code'], b['asset_issuer'])
def transfer_send(sender_kp, receiver_address, asset, amount):
"""
Execute the send portion of a transfer. This is used by the issuer,
airdropper, or sender of an asset. When this is done, a new temporary
account exists, which contains the transferred asset and enough XLM to
merge it into the receiving account.
Args:
sender_kp (Keypair): keypair of sending account
receiver_address (string): address of account to receive asset
asset (Asset): asset to send
amount (string): amount to transfer, float encoded as string
Returns:
response, tmp_dest: the Horizon response and the newly created
account holding the transfer
"""
sender = Address(sender_kp.address())
sender.get()
# Generate a tmp keypair
tmp_kp = Keypair.random()
tmp_dest = tmp_kp.address().decode()
# This is a speculative transaction!
# It may fail if someone pre-empts the CreateAccount -- in which case, it
# should be either re-run with a new kp or it should be attempted again with
# a payment to ensure at least 2.00006 native instead of create account
# This has been left out of this demo for simplicity
txn = Transaction(
source=sender.address,
sequence=sender.sequence,
fee=400,
operations=[
CreateAccount(destination=tmp_dest,
starting_balance="4.1"),
ChangeTrust(asset, amount, tmp_dest),
Payment(tmp_dest, asset, amount),
SetOptions(master_weight=0, signer_weight=1,
source=tmp_dest, signer_address=receiver_address)
]
)
txe = Te(tx=txn, network_id="TESTNET")
txe.sign(sender_kp)
txe.sign(tmp_kp)
xdr = txe.xdr()
response = horizon.submit(xdr)
return response, tmp_dest
def transfer_receive(tmp_address, receiver_kp, asset):
"""
Receive a transfer. This is used by a wallet on behalf of the receiving
user to pull the new asset in. When it's done the receiving account has
all of the asset from tmp_address, and all of the XLM reserve required to
perform the transfer.
Args:
tmp_address (string): address of temporary account containing the transfer asset
receiver_kp (Keypair): Keypair for the (optionally created) receiving account
asset (Asset): asset to receive
Returns:
response: the Horizon response
"""
account_exists = False
receiver_address = receiver_kp.address()
receiver_acct = Address(receiver_address)
try:
receiver_acct.get()
account_exists = True
except stellar_base.exceptions.HorizonError:
pass
needs_trustline = True
if account_exists:
for b in receiver_acct.balances:
if balance_to_asset(b) == asset:
needs_trustline = False
break
tmp_acct = Address(tmp_address)
tmp_acct.get()
# assumes that the temp account cointains the specified asset
amount = [b['balance']
for b in tmp_acct.balances if balance_to_asset(b) == asset][0]
operations = []
if not account_exists:
operations.append(CreateAccount(receiver_address, "1"))
if needs_trustline:
operations.extend([
# enough for trustline and one offer
Payment(receiver_address, Asset.native(), "1"),
ChangeTrust(asset, source=receiver_kp.address())
])
else:
operations.append(
# enough for one offer
Payment(receiver_address, Asset.native(), "0.5"),
)
operations.extend([
# Send Asset
Payment(receiver_address, asset, amount),
# Clear signers
SetOptions(signer_weight=0, signer_address=receiver_address),
# Clear trustlines
ChangeTrust(asset, "0"),
# Merge Account
AccountMerge(receiver_address)
])
txn = Transaction(
source=tmp_acct.address,
sequence=tmp_acct.sequence,
fee=100 * len(operations),
operations=operations
)
txe = Te(tx=txn, network_id="TESTNET")
txe.sign(receiver_kp)
# Potentially the issuer needs to sign this too with an allow trust --
# depends on the asset in question!
response = horizon.submit(txe.xdr())
return response
def print_acct(name, acct):
print("{} Has:".format(name))
print_table(acct.balances, cols=['asset_code', 'balance', 'limit'], defaults={
'asset_code': 'XLM'})
print()
def print_table(dicts, cols=None, defaults={}):
""" Pretty print a list of dictionaries (myDict) as a dynamically sized table.
If column names (colList) aren't specified, they will show in random order.
Author: Thierry Husson - Use it as you want but don't blame me.
"""
if not dicts:
print('-')
return
if not cols:
cols = list(dicts[0].keys() if dicts else [])
table_list = [cols] # 1st row = header
for item in dicts:
table_list.append(
[str(item.get(col, defaults.get(col, ''))) for col in cols])
colSize = [max(map(len, col)) for col in zip(*table_list)]
formatStr = ' | '.join(["{{:<{}}}".format(i) for i in colSize])
table_list.insert(1, ['-' * i for i in colSize]) # Seperating line
for item in table_list:
print(formatStr.format(*item))
def main():
alice = (setup_account("key.dat"))
bob = setup_account("bob.dat")
carol = create_empty_acct(bob)
alice_addr = Address(address=alice.address())
bob_addr = Address(address=bob.address())
carol_addr = Address(address=carol.address())
dave = Keypair.random()
dave_addr = Address(address=dave.address())
alice_addr.get()
bob_addr.get()
carol_addr.get()
print("PRECONDITIONS\n")
print_acct("Alice", alice_addr)
print_acct("Bob", bob_addr)
print_acct("Carol", carol_addr)
print_acct("Dave", dave_addr)
print("========================")
fakeusd = Asset("USD", alice_addr.address)
# Transfer Funds to Bob, who has an account (and a prior trustline)
_, tmp_addr = transfer_send(
sender_kp=alice, receiver_address=bob_addr.address, asset=fakeusd, amount="10")
transfer_receive(tmp_address=tmp_addr, receiver_kp=bob, asset=fakeusd)
# Transfer funds to Carol who has a bare-bones account (no funds, no trustline)
_, tmp_addr = transfer_send(
sender_kp=alice, receiver_address=carol_addr.address, asset=fakeusd, amount="10")
transfer_receive(tmp_address=tmp_addr, receiver_kp=carol, asset=fakeusd)
# Transfer Funds to Dave, who has no account
_, tmp_addr = transfer_send(
sender_kp=alice, receiver_address=dave_addr.address, asset=fakeusd, amount="10")
transfer_receive(tmp_address=tmp_addr, receiver_kp=dave, asset=fakeusd)
alice_addr.get()
bob_addr.get()
carol_addr.get()
dave_addr.get()
print("POSTCONDITIONS\n")
print_acct("Alice", alice_addr)
print_acct("Bob", bob_addr)
print_acct("Carol", carol_addr)
print_acct("Dave", dave_addr)
if __name__ == "__main__":
main()
|
[
"msfeldstein@gmail.com"
] |
msfeldstein@gmail.com
|
07efd69bf4a0ce7a3e803a67475882a21e223353
|
c8896d14a56680217941f48dcc93468a134977ab
|
/devel/lib/python2.7/dist-packages/package/msg/_xx.py
|
c7ac51e4707f5150fa5d7bc6a815eaf2a2186a1c
|
[] |
no_license
|
masterwei1024/-
|
afa69dd3502ab18cbb5a3971e80ef40d60683969
|
1a177ee2b133011fc3210c76291cf56ac728eaac
|
refs/heads/master
| 2020-06-14T15:42:47.735972
| 2019-07-03T11:58:35
| 2019-07-03T11:58:35
| 195,044,986
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,452
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from package/xx.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class xx(genpy.Message):
_md5sum = "b3087778e93fcd34cc8d65bc54e850d1"
_type = "package/xx"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32 value
"""
__slots__ = ['value']
_slot_types = ['int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
value
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(xx, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.value is None:
self.value = 0
else:
self.value = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_i().pack(self.value))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(self.value,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_i().pack(self.value))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(self.value,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_i = None
def _get_struct_i():
global _struct_i
if _struct_i is None:
_struct_i = struct.Struct("<i")
return _struct_i
|
[
"1520247204@qq.com"
] |
1520247204@qq.com
|
e639bb543b86f6c690e50ff7109bb4d460094e0d
|
d04403f2d3f49e0ce6141f7aa67dddd8bfdec0f8
|
/Configuration/CMSTotemStandardSequences/test/TOTEMCMS/separate/geometryRPT1T2CMS_cfi.py
|
8b14b4d08389bf60e6b08b9aadce5f4bec3ae0e2
|
[] |
no_license
|
developerpedroivo/src
|
2eefc792c7652c7116c83121722c6228ae81e00d
|
01506c383b389faa97dcd7f269daab724a2baf24
|
refs/heads/master
| 2021-01-23T21:38:21.330660
| 2015-12-07T06:49:12
| 2015-12-07T06:49:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,685
|
py
|
import FWCore.ParameterSet.Config as cms
XMLIdealGeometryESSource = cms.ESSource("XMLIdealGeometryESSource",
geomXMLFiles = cms.vstring(* ('Geometry/CMSCommonData/data/materials.xml',
'Geometry/CMSCommonData/data/rotations.xml',
'Geometry/CMSCommonData/data/normal/cmsextent.xml', # DIFF: TOTEM uses normal, CMS uses extend
'Geometry/CMSCommonData/data/cms.xml',
'Geometry/CMSCommonData/data/cmsMother.xml',
# 'Geometry/CMSCommonData/data/cmsTracker.xml',
# 'Geometry/CMSCommonData/data/caloBase.xml',
# 'Geometry/CMSCommonData/data/cmsCalo.xml',
# 'Geometry/CMSCommonData/data/muonBase.xml',
'Geometry/CMSCommonData/data/cmsMuon.xml',
'Geometry/CMSCommonData/data/mgnt.xml',
'Geometry/CMSCommonData/data/PhaseI/beampipe.xml',
'Geometry/CMSCommonData/data/cmsBeam.xml',
'Geometry/CMSCommonData/data/muonMB.xml',
'Geometry/CMSCommonData/data/muonMagnet.xml',
'Geometry/CMSCommonData/data/cavern.xml',
'Geometry/TrackerCommonData/data/pixfwdMaterials.xml',
'Geometry/TrackerCommonData/data/pixfwdCommon.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq1x2.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq1x5.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x3.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x4.xml',
'Geometry/TrackerCommonData/data/pixfwdPlaq2x5.xml',
'Geometry/TrackerCommonData/data/pixfwdPanelBase.xml',
'Geometry/TrackerCommonData/data/pixfwdPanel.xml',
'Geometry/TrackerCommonData/data/pixfwdBlade.xml',
'Geometry/TrackerCommonData/data/pixfwdNipple.xml',
'Geometry/TrackerCommonData/data/pixfwdDisk.xml',
'Geometry/TrackerCommonData/data/pixfwdCylinder.xml',
'Geometry/TrackerCommonData/data/pixfwd.xml',
'Geometry/TrackerCommonData/data/pixbarmaterial.xml',
'Geometry/TrackerCommonData/data/pixbarladder.xml',
'Geometry/TrackerCommonData/data/pixbarladderfull.xml',
'Geometry/TrackerCommonData/data/pixbarladderhalf.xml',
'Geometry/TrackerCommonData/data/pixbarlayer.xml',
'Geometry/TrackerCommonData/data/pixbarlayer0.xml',
'Geometry/TrackerCommonData/data/pixbarlayer1.xml',
'Geometry/TrackerCommonData/data/pixbarlayer2.xml',
'Geometry/TrackerCommonData/data/pixbar.xml',
'Geometry/TrackerCommonData/data/tibtidcommonmaterial.xml',
'Geometry/TrackerCommonData/data/tibmaterial.xml',
'Geometry/TrackerCommonData/data/tibmodpar.xml',
'Geometry/TrackerCommonData/data/tibmodule0.xml',
'Geometry/TrackerCommonData/data/tibmodule0a.xml',
'Geometry/TrackerCommonData/data/tibmodule0b.xml',
'Geometry/TrackerCommonData/data/tibmodule2.xml',
'Geometry/TrackerCommonData/data/tibstringpar.xml',
'Geometry/TrackerCommonData/data/tibstring0ll.xml',
'Geometry/TrackerCommonData/data/tibstring0lr.xml',
'Geometry/TrackerCommonData/data/tibstring0ul.xml',
'Geometry/TrackerCommonData/data/tibstring0ur.xml',
'Geometry/TrackerCommonData/data/tibstring0.xml',
'Geometry/TrackerCommonData/data/tibstring1ll.xml',
'Geometry/TrackerCommonData/data/tibstring1lr.xml',
'Geometry/TrackerCommonData/data/tibstring1ul.xml',
'Geometry/TrackerCommonData/data/tibstring1ur.xml',
'Geometry/TrackerCommonData/data/tibstring1.xml',
'Geometry/TrackerCommonData/data/tibstring2ll.xml',
'Geometry/TrackerCommonData/data/tibstring2lr.xml',
'Geometry/TrackerCommonData/data/tibstring2ul.xml',
'Geometry/TrackerCommonData/data/tibstring2ur.xml',
'Geometry/TrackerCommonData/data/tibstring2.xml',
'Geometry/TrackerCommonData/data/tibstring3ll.xml',
'Geometry/TrackerCommonData/data/tibstring3lr.xml',
'Geometry/TrackerCommonData/data/tibstring3ul.xml',
'Geometry/TrackerCommonData/data/tibstring3ur.xml',
'Geometry/TrackerCommonData/data/tibstring3.xml',
'Geometry/TrackerCommonData/data/tiblayerpar.xml',
'Geometry/TrackerCommonData/data/tiblayer0.xml',
'Geometry/TrackerCommonData/data/tiblayer1.xml',
'Geometry/TrackerCommonData/data/tiblayer2.xml',
'Geometry/TrackerCommonData/data/tiblayer3.xml',
'Geometry/TrackerCommonData/data/tib.xml',
'Geometry/TrackerCommonData/data/tidmaterial.xml',
'Geometry/TrackerCommonData/data/tidmodpar.xml',
'Geometry/TrackerCommonData/data/tidmodule0.xml',
'Geometry/TrackerCommonData/data/tidmodule0r.xml',
'Geometry/TrackerCommonData/data/tidmodule0l.xml',
'Geometry/TrackerCommonData/data/tidmodule1.xml',
'Geometry/TrackerCommonData/data/tidmodule1r.xml',
'Geometry/TrackerCommonData/data/tidmodule1l.xml',
'Geometry/TrackerCommonData/data/tidmodule2.xml',
'Geometry/TrackerCommonData/data/tidringpar.xml',
'Geometry/TrackerCommonData/data/tidring0.xml',
'Geometry/TrackerCommonData/data/tidring0f.xml',
'Geometry/TrackerCommonData/data/tidring0b.xml',
'Geometry/TrackerCommonData/data/tidring1.xml',
'Geometry/TrackerCommonData/data/tidring1f.xml',
'Geometry/TrackerCommonData/data/tidring1b.xml',
'Geometry/TrackerCommonData/data/tidring2.xml',
'Geometry/TrackerCommonData/data/tid.xml',
'Geometry/TrackerCommonData/data/tidf.xml',
'Geometry/TrackerCommonData/data/tidb.xml',
'Geometry/TrackerCommonData/data/tibtidservices.xml',
'Geometry/TrackerCommonData/data/tibtidservicesf.xml',
'Geometry/TrackerCommonData/data/tibtidservicesb.xml',
'Geometry/TrackerCommonData/data/tobmaterial.xml',
'Geometry/TrackerCommonData/data/tobmodpar.xml',
'Geometry/TrackerCommonData/data/tobmodule0.xml',
'Geometry/TrackerCommonData/data/tobmodule2.xml',
'Geometry/TrackerCommonData/data/tobmodule4.xml',
'Geometry/TrackerCommonData/data/tobrodpar.xml',
'Geometry/TrackerCommonData/data/tobrod0c.xml',
'Geometry/TrackerCommonData/data/tobrod0l.xml',
'Geometry/TrackerCommonData/data/tobrod0h.xml',
'Geometry/TrackerCommonData/data/tobrod0.xml',
'Geometry/TrackerCommonData/data/tobrod1l.xml',
'Geometry/TrackerCommonData/data/tobrod1h.xml',
'Geometry/TrackerCommonData/data/tobrod1.xml',
'Geometry/TrackerCommonData/data/tobrod2c.xml',
'Geometry/TrackerCommonData/data/tobrod2l.xml',
'Geometry/TrackerCommonData/data/tobrod2h.xml',
'Geometry/TrackerCommonData/data/tobrod2.xml',
'Geometry/TrackerCommonData/data/tobrod3l.xml',
'Geometry/TrackerCommonData/data/tobrod3h.xml',
'Geometry/TrackerCommonData/data/tobrod3.xml',
'Geometry/TrackerCommonData/data/tobrod4c.xml',
'Geometry/TrackerCommonData/data/tobrod4l.xml',
'Geometry/TrackerCommonData/data/tobrod4h.xml',
'Geometry/TrackerCommonData/data/tobrod4.xml',
'Geometry/TrackerCommonData/data/tobrod5l.xml',
'Geometry/TrackerCommonData/data/tobrod5h.xml',
'Geometry/TrackerCommonData/data/tobrod5.xml',
'Geometry/TrackerCommonData/data/tob.xml',
'Geometry/TrackerCommonData/data/tecmaterial.xml',
'Geometry/TrackerCommonData/data/tecmodpar.xml',
'Geometry/TrackerCommonData/data/tecmodule0.xml',
'Geometry/TrackerCommonData/data/tecmodule0r.xml',
'Geometry/TrackerCommonData/data/tecmodule0s.xml',
'Geometry/TrackerCommonData/data/tecmodule1.xml',
'Geometry/TrackerCommonData/data/tecmodule1r.xml',
'Geometry/TrackerCommonData/data/tecmodule1s.xml',
'Geometry/TrackerCommonData/data/tecmodule2.xml',
'Geometry/TrackerCommonData/data/tecmodule3.xml',
'Geometry/TrackerCommonData/data/tecmodule4.xml',
'Geometry/TrackerCommonData/data/tecmodule4r.xml',
'Geometry/TrackerCommonData/data/tecmodule4s.xml',
'Geometry/TrackerCommonData/data/tecmodule5.xml',
'Geometry/TrackerCommonData/data/tecmodule6.xml',
'Geometry/TrackerCommonData/data/tecpetpar.xml',
'Geometry/TrackerCommonData/data/tecring0.xml',
'Geometry/TrackerCommonData/data/tecring1.xml',
'Geometry/TrackerCommonData/data/tecring2.xml',
'Geometry/TrackerCommonData/data/tecring3.xml',
'Geometry/TrackerCommonData/data/tecring4.xml',
'Geometry/TrackerCommonData/data/tecring5.xml',
'Geometry/TrackerCommonData/data/tecring6.xml',
'Geometry/TrackerCommonData/data/tecring0f.xml',
'Geometry/TrackerCommonData/data/tecring1f.xml',
'Geometry/TrackerCommonData/data/tecring2f.xml',
'Geometry/TrackerCommonData/data/tecring3f.xml',
'Geometry/TrackerCommonData/data/tecring4f.xml',
'Geometry/TrackerCommonData/data/tecring5f.xml',
'Geometry/TrackerCommonData/data/tecring6f.xml',
'Geometry/TrackerCommonData/data/tecring0b.xml',
'Geometry/TrackerCommonData/data/tecring1b.xml',
'Geometry/TrackerCommonData/data/tecring2b.xml',
'Geometry/TrackerCommonData/data/tecring3b.xml',
'Geometry/TrackerCommonData/data/tecring4b.xml',
'Geometry/TrackerCommonData/data/tecring5b.xml',
'Geometry/TrackerCommonData/data/tecring6b.xml',
'Geometry/TrackerCommonData/data/tecpetalf.xml',
'Geometry/TrackerCommonData/data/tecpetalb.xml',
'Geometry/TrackerCommonData/data/tecpetal0.xml',
'Geometry/TrackerCommonData/data/tecpetal0f.xml',
'Geometry/TrackerCommonData/data/tecpetal0b.xml',
'Geometry/TrackerCommonData/data/tecpetal3.xml',
'Geometry/TrackerCommonData/data/tecpetal3f.xml',
'Geometry/TrackerCommonData/data/tecpetal3b.xml',
'Geometry/TrackerCommonData/data/tecpetal6f.xml',
'Geometry/TrackerCommonData/data/tecpetal6b.xml',
'Geometry/TrackerCommonData/data/tecpetal8f.xml',
'Geometry/TrackerCommonData/data/tecpetal8b.xml',
'Geometry/TrackerCommonData/data/tecwheel.xml',
'Geometry/TrackerCommonData/data/tecwheela.xml',
'Geometry/TrackerCommonData/data/tecwheelb.xml',
'Geometry/TrackerCommonData/data/tecwheelc.xml',
'Geometry/TrackerCommonData/data/tecwheeld.xml',
'Geometry/TrackerCommonData/data/tecwheel6.xml',
'Geometry/TrackerCommonData/data/tecservices.xml',
'Geometry/TrackerCommonData/data/tecbackplate.xml',
'Geometry/TrackerCommonData/data/tec.xml',
'Geometry/TrackerCommonData/data/trackermaterial.xml',
'Geometry/TrackerCommonData/data/tracker.xml',
'Geometry/TrackerCommonData/data/trackerpixbar.xml',
'Geometry/TrackerCommonData/data/trackerpixfwd.xml',
'Geometry/TrackerCommonData/data/trackertibtidservices.xml',
'Geometry/TrackerCommonData/data/trackertib.xml',
'Geometry/TrackerCommonData/data/trackertid.xml',
'Geometry/TrackerCommonData/data/trackertob.xml',
'Geometry/TrackerCommonData/data/trackertec.xml',
'Geometry/TrackerCommonData/data/trackerbulkhead.xml',
'Geometry/TrackerCommonData/data/trackerother.xml',
'Geometry/EcalCommonData/data/eregalgo.xml',
'Geometry/EcalCommonData/data/ebalgo.xml',
'Geometry/EcalCommonData/data/ebcon.xml',
'Geometry/EcalCommonData/data/ebrot.xml',
'Geometry/EcalCommonData/data/eecon.xml',
'Geometry/EcalCommonData/data/eefixed.xml',
'Geometry/EcalCommonData/data/eehier.xml',
'Geometry/EcalCommonData/data/eealgo.xml',
'Geometry/EcalCommonData/data/escon.xml',
'Geometry/EcalCommonData/data/esalgo.xml',
'Geometry/EcalCommonData/data/eeF.xml',
'Geometry/EcalCommonData/data/eeB.xml',
'Geometry/HcalCommonData/data/hcalrotations.xml',
'Geometry/HcalCommonData/data/hcalalgo.xml',
'Geometry/HcalCommonData/data/hcalbarrelalgo.xml',
'Geometry/HcalCommonData/data/hcalendcapalgo.xml',
'Geometry/HcalCommonData/data/hcalouteralgo.xml',
'Geometry/HcalCommonData/data/hcalforwardalgo.xml',
'Geometry/HcalCommonData/data/average/hcalforwardmaterial.xml',
'Geometry/MuonCommonData/data/v1/mbCommon.xml',
'Geometry/MuonCommonData/data/v1/mb1.xml',
'Geometry/MuonCommonData/data/v1/mb2.xml',
'Geometry/MuonCommonData/data/v1/mb3.xml',
'Geometry/MuonCommonData/data/v1/mb4.xml',
'Geometry/MuonCommonData/data/design/muonYoke.xml',
'Geometry/MuonCommonData/data/v2/mf.xml',
'Geometry/MuonCommonData/data/v2/rpcf.xml',
'Geometry/MuonCommonData/data/v2/csc.xml',
'Geometry/MuonCommonData/data/v2/mfshield.xml',
'Geometry/ForwardCommonData/data/forward.xml',
'Geometry/ForwardCommonData/data/v2/forwardshield.xml',
'Geometry/ForwardCommonData/data/brmrotations.xml',
'Geometry/ForwardCommonData/data/brm.xml',
'Geometry/ForwardCommonData/data/totemMaterials.xml',
'Geometry/ForwardCommonData/data/totemRotations.xml',
'Geometry/ForwardCommonData/data/totemt1.xml',
'Geometry/ForwardCommonData/data/totemt2.xml',
'Geometry/ForwardCommonData/data/ionpump.xml',
'Geometry/ForwardCommonData/data/castor.xml',
'Geometry/ForwardSimData/data/totemsensGem.xml', # DIFF: TOTEM only
#'Geometry/ForwardCommonData/data/zdcmaterials.xml',
'Geometry/ForwardCommonData/data/lumimaterials.xml',
#'Geometry/ForwardCommonData/data/zdcrotations.xml',
'Geometry/ForwardCommonData/data/lumirotations.xml',
#'Geometry/ForwardCommonData/data/zdc.xml',
#'Geometry/ForwardCommonData/data/zdclumi.xml',
#'Geometry/ForwardCommonData/data/cmszdc.xml',
'Geometry/MuonCommonData/data/v2/muonNumbering.xml',
'Geometry/TrackerCommonData/data/trackerStructureTopology.xml',
'Geometry/TrackerSimData/data/trackersens.xml',
'Geometry/TrackerRecoData/data/trackerRecoMaterial.xml',
'Geometry/EcalSimData/data/ecalsens.xml',
'Geometry/HcalCommonData/data/hcalsenspmf.xml',
'Geometry/HcalSimData/data/hf.xml',
'Geometry/HcalSimData/data/hfpmt.xml',
'Geometry/HcalSimData/data/hffibrebundle.xml',
'Geometry/HcalSimData/data/CaloUtil.xml',
'Geometry/MuonSimData/data/muonSens.xml',
'Geometry/DTGeometryBuilder/data/dtSpecsFilter.xml',
'Geometry/CSCGeometryBuilder/data/cscSpecsFilter.xml',
'Geometry/CSCGeometryBuilder/data/cscSpecs.xml',
'Geometry/RPCGeometryBuilder/data/RPCSpecs.xml',
'Geometry/ForwardCommonData/data/brmsens.xml',
'Geometry/ForwardSimData/data/castorsens.xml',
#'Geometry/ForwardSimData/data/zdcsens.xml',
'Geometry/HcalSimData/data/HcalProdCuts.xml',
'Geometry/EcalSimData/data/EcalProdCuts.xml',
'Geometry/EcalSimData/data/ESProdCuts.xml',
'Geometry/TrackerSimData/data/trackerProdCuts.xml',
'Geometry/TrackerSimData/data/trackerProdCutsBEAM.xml',
'Geometry/MuonSimData/data/muonProdCuts.xml',
'Geometry/ForwardSimData/data/CastorProdCuts.xml',
'Geometry/ForwardSimData/data/zdcProdCuts.xml',
'Geometry/ForwardSimData/data/ForwardShieldProdCuts.xml',
'Geometry/CMSCommonData/data/FieldParameters.xml',
'Geometry/ForwardSimData/data/TotemProdCuts.xml',
'Geometry/TotemRPData/data/RP_Box.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_000.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_001.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_002.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_003.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_004.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_005.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_020.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_021.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_022.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_023.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_024.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_025.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_100.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_101.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_102.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_103.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_104.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_105.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_120.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_121.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_122.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_123.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_124.xml',
'Geometry/TotemRPData/data/RP_Box/RP_Box_125.xml',
'Geometry/TotemRPData/data/RP_Hybrid.xml',
'Geometry/TotemRPData/data/RP_Materials.xml',
'Geometry/TotemRPData/data/RP_Transformations.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_000.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_001.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_002.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_003.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_004.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_005.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_020.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_021.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_022.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_023.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_024.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_025.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_100.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_101.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_102.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_103.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_104.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_105.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_120.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_121.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_122.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_123.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_124.xml',
'Geometry/TotemRPData/data/RP_Detectors_Assembly/RP_Detectors_Assembly_125.xml',
'Geometry/TotemRPData/data/RP_Device.xml',
'Geometry/TotemRPData/data/RP_Vertical_Device.xml',
'Geometry/TotemRPData/data/RP_Horizontal_Device.xml',
'Geometry/TotemRPData/data/RP_220_Right_Station.xml',
'Geometry/TotemRPData/data/RP_220_Left_Station.xml',
'Geometry/TotemRPData/data/RP_147_Right_Station.xml',
'Geometry/TotemRPData/data/RP_147_Left_Station.xml',
'Geometry/TotemRPData/data/RP_Stations_Assembly.xml',
'Geometry/TotemRPData/data/RP_Sensitive_Dets.xml',
'Geometry/TotemRPData/data/RP_Cuts_Per_Region.xml',
'Geometry/TotemRPData/data/TotemRPGlobal.xml',
'Geometry/TotemRPData/data/RP_Param_Beam_Region.xml',
'Geometry/TotemRPData/data/RP_Beta_90/RP_Dist_Beam_Cent.xml',
'Geometry/PPSCommonData/data/PPS_Transformations.xml',
'Geometry/PPSCommonData/data/Cylindrical_pot.xml',
'Geometry/PPSCommonData/data/RP_Timing_Lbar.xml',
'Geometry/PPSCommonData/data/RP_Timing_Box_Left.xml',
'Geometry/PPSCommonData/data/RP_Timing_Box_Right.xml',
'Geometry/PPSCommonData/data/RP_215_Left_Detector_Assembly.xml',
'Geometry/PPSCommonData/data/RP_215_Right_Detector_Assembly.xml',
'Geometry/PPSCommonData/data/RP_215_Right_Station.xml',
'Geometry/PPSCommonData/data/RP_215_Left_Station.xml',
'Geometry/PPSCommonData/data/PPS_Stations_Assembly.xml',
'Geometry/PPSCommonData/data/PPS_timing_Sensitive_Dets.xml'
)), # DIFF: TOTEM only
# rootNodeName = cms.string('cms:OCMS')
rootNodeName = cms.string('TotemRPGlobal:OTOTEM')
)
# real geometry
TotemRPGeometryESModule = cms.ESProducer("TotemRPGeometryESModule",
verbosity = cms.untracked.uint32(1)
)
|
[
"setesami@cern.ch"
] |
setesami@cern.ch
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.