blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
91466fad4434ae3cac50d7862221c1f502a79389
|
c13b4b0d3ab59d76a91a0811c30110098f8e4e9d
|
/catalogo/views.py
|
1b3105a16718c6cbae293edd2b91c715d5ddbea7
|
[] |
no_license
|
ryujiin/store
|
8d44b1f70df28df855c8966b3e9b50c99d99c409
|
dab4e586daa9162d0a5d2fef0b3856669fd2795c
|
refs/heads/master
| 2021-01-23T01:41:43.503719
| 2017-05-31T00:29:33
| 2017-05-31T00:29:33
| 92,887,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,948
|
py
|
from django.shortcuts import render
from django.http import HttpResponse, Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
# Create your views here.
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated,IsAdminUser
from models import *
from serializers import *
from datetime import datetime, timedelta, time
how_many_days = 20
class CatalogoViewsets(viewsets.ReadOnlyModelViewSet):
serializer_class = ProductoSingleSereializer
ordering_fields = ('precio_sort', 'num_comentarios')
def get_queryset(self):
queryset = Producto.objects.filter(activo=True).order_by('-actualizado')
categoria = self.request.query_params.get('categoria', None)
slug = self.request.query_params.get('slug',None)
limite = self.request.query_params.get('limite',None)
if categoria:
if categoria == 'ofertas':
queryset = queryset.filter(en_oferta=True)
elif categoria == 'novedades':
queryset = queryset.filter(actualizado__gte=datetime.now()-timedelta(days=how_many_days))
else:
queryset = queryset.filter(categorias__slug=categoria)
if slug:
queryset = queryset.filter(slug=slug)
if limite:
queryset = queryset[:limite]
return queryset
class ListaProductosViewsets(viewsets.ReadOnlyModelViewSet):
serializer_class = ProductoListaSerializers
def get_queryset(self):
queryset = Producto.objects.filter(activo=True).order_by('-actualizado')
categoria = self.request.query_params.get('categoria', None)
slug = self.request.query_params.get('slug',None)
limite = self.request.query_params.get('limite',None)
if categoria:
if categoria == 'ofertas':
queryset = queryset.filter(en_oferta=True)
elif categoria == 'novedades':
queryset = queryset.filter(actualizado__gte=datetime.now()-timedelta(days=how_many_days))
else:
queryset = queryset.filter(categorias__slug=categoria)
if slug:
queryset = queryset.filter(slug=slug)
if limite:
queryset = queryset[:limite]
return queryset
#from drf_haystack.viewsets import HaystackViewSet
##aun no se usa la busqueda mas adelante derrepente
#class ProductoBusquedaView(HaystackViewSet):
#index_models = [Producto]
#serializer_class = ProductoBusquedaSerializer
class CategoriaViewsets(viewsets.ReadOnlyModelViewSet):
serializer_class = CategoriaSerializer
queryset = Categoria.objects.all()
#Vistas para la oficina
class ProductosOficinaViewsets(viewsets.ReadOnlyModelViewSet):
serializer_class = ProductoListaSerializer
permission_classes = (IsAdminUser,)
def get_queryset(self):
queryset = Producto.objects.filter(activo=True).order_by('-pk')
return queryset
class ProductoSingleEditableViewsets(viewsets.ModelViewSet):
serializer_class = ProductoSingleEditable
permission_classes = (IsAdminUser,)
def get_queryset(self):
queryset = Producto.objects.all().order_by('-pk')
return queryset
|
[
"ryujiin22@gmail.com"
] |
ryujiin22@gmail.com
|
2cc2e4c133cd02104d71c53eb5e1e727fac86306
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayInsUnderwritePolicyQueryModel.py
|
37b60946a99f201e4b87ce3ea9ab5c476f897e8a
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,895
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsUnderwritePolicyQueryModel(object):
def __init__(self):
self._out_biz_no = None
self._policy_no = None
self._prod_code = None
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def policy_no(self):
return self._policy_no
@policy_no.setter
def policy_no(self, value):
self._policy_no = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
def to_alipay_dict(self):
params = dict()
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.policy_no:
if hasattr(self.policy_no, 'to_alipay_dict'):
params['policy_no'] = self.policy_no.to_alipay_dict()
else:
params['policy_no'] = self.policy_no
if self.prod_code:
if hasattr(self.prod_code, 'to_alipay_dict'):
params['prod_code'] = self.prod_code.to_alipay_dict()
else:
params['prod_code'] = self.prod_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsUnderwritePolicyQueryModel()
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'policy_no' in d:
o.policy_no = d['policy_no']
if 'prod_code' in d:
o.prod_code = d['prod_code']
return o
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
07ec8a97e20250841881fe935a533613ac674c22
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/prime-big-253.py
|
4df2497c7f4ca82b29a6f23247f3508acd85d1c8
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
# Get the n-th prime starting from 2
def get_prime(n:int) -> int:
candidate:int = 2
found:int = 0
while True:
if is_prime(candidate):
found = found + 1
if found == n:
return candidate
candidate = candidate + 1
return 0 # Never happens
def is_prime(x:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime2(x:int, x2:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < $Exp:
if x % div == 0:
return False
div = div + 1
return True
def is_prime3(x:int, x2:int, x3:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime4(x:int, x2:int, x3:int, x4:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
def is_prime5(x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
div:int = 2
div2:int = 2
div3:int = 2
div4:int = 2
div5:int = 2
while div < x:
if x % div == 0:
return False
div = div + 1
return True
# Input parameter
n:int = 15
n2:int = 15
n3:int = 15
n4:int = 15
n5:int = 15
# Run [1, n]
i:int = 1
i2:int = 1
i3:int = 1
i4:int = 1
i5:int = 1
# Crunch
while i <= n:
print(get_prime(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
dd95d06ea3f6f07d84040319aff47321d5d4a365
|
fc0c01ffcbb20dfcdfe177f0f527bcea68bb0909
|
/backend/home/migrations/0002_load_initial_data.py
|
f1bd274487616909656e29aa72105a5bae7c4c68
|
[] |
no_license
|
crowdbotics-apps/msm-mobile-0312145--16172
|
e6736783118f61d3b928042da41c983a27011c91
|
a4c5eef430966d7f3f528ff3c400cc86bb56cda0
|
refs/heads/master
| 2023-01-25T04:07:39.278417
| 2020-12-03T10:49:41
| 2020-12-03T10:49:41
| 318,110,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "MSM-mobile-0312145"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">MSM-mobile-0312145</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "msm-mobile-0312145--16172.botics.co"
site_params = {
"name": "MSM-mobile-0312145",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
f7c3af69717fdd9fe8134409fada9606a5081c25
|
d247d27a9e48b53db2ff298467ceddc47368d963
|
/forExamples/SEA1stFlwithoutContinue.py
|
6f170c8b6906b58deb5e547552e15e4a0ba6423f
|
[] |
no_license
|
Panzl/PythonClass2016
|
4814fa6bb3eea33248199a2985684b9eb870bbf1
|
e486d149ebeba7358d7f50cf390facd403559f9e
|
refs/heads/master
| 2021-01-11T03:40:06.468815
| 2016-10-19T13:45:16
| 2016-10-19T13:45:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
for iFloor in range(1,6):
for iWing in range(100,401,100):
if not(iFloor==1 and iWing<300):
for iRoom in range(26):
wingRoom = iWing + iRoom
roomNumber = 'SEA ' + str(iFloor) + '.' + str(wingRoom)
print(roomNumber)
|
[
"hayasaka@utexas.edu"
] |
hayasaka@utexas.edu
|
25471bac763649727c460bd35d3e989530910df7
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5636311922769920_1/Python/bquark/fractiles.py
|
d40a361db6a8c357bb963ba9a04b429c9ff10cbb
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
import math
fi = open("D-large.in")
fo = open("D-large.out", "w")
line = next(fi)
T = int(line)
for t in range(T):
line = next(fi)
[K, C, S] = [int(x) for x in line.split(' ')]
smin = (K + C - 1) // C
y = ['IMPOSSIBLE']
if S >= smin:
y = []
k = 0
while k < K:
yy = 0
for c in range(C):
yy += K**(C-c-1) * min(k, K-1)
k += 1
y.append(yy+1)
fo.write("Case #" + str(t+1) + ": " + ' '.join([str(s) for s in y]) + "\n")
fi.close()
fo.close()
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
76bbf0b45b41d05234904bd18feaf90dc6401f40
|
7589cdaaf2f3eba5854028732792c1ef16172eb7
|
/plot_supp_mat_figures.py
|
c9e5cbc7c17c578b8024edda0adefac2ae9cb253
|
[
"MIT"
] |
permissive
|
IdoSpringer/TCR-PEP-Classification
|
96ea67e81c053d9a8bf68dc6dc3db557e188be3b
|
1cca1551ca71359239a5f5caea7f13ec01f4982b
|
refs/heads/master
| 2020-04-05T00:33:05.187020
| 2019-06-26T09:01:55
| 2019-06-26T09:01:55
| 156,401,275
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,672
|
py
|
import matplotlib.pyplot as plt
import compare_data_stats as cmp
from Kidera import kidera
import numpy as np
w = 'McPAS-TCR_with_V'
t = 'TCRGP_with_V'
nt = 'TCRGP_negs_with_V'
def kidera_hists(data1, data2):
factor_observations1 = [[] for i in range(10)]
with open(data1, 'r') as data:
for line in data:
line = line.split('\t')
tcr = line[0]
tcr = tcr[3:-1]
v = kidera.score_sequence(tcr)
v = v.values
for i in range(len(v)):
factor_observations1[i].append(v[i])
factor_observations2 = [[] for i in range(10)]
with open(data2, 'r') as data:
for line in data:
line = line.split('\t')
tcr = line[0]
tcr = tcr[3:-1]
v = kidera.score_sequence(tcr)
v = v.values
for i in range(len(v)):
factor_observations2[i].append(v[i])
fig = plt.figure()
for i in range(10):
ax = fig.add_subplot(2, 5, i+1)
a = factor_observations1[i]
b = factor_observations2[i]
weights1 = np.ones_like(a) / float(len(a))
weights2 = np.ones_like(b) / float(len(b))
bins = np.linspace(-1.0, 1.0, 10)
plot2 = ax.hist([t + 0.1 for t in b], weights=weights2, bins=[bin + 0.1 for bin in bins],
color='salmon', label='TCRGP', width=0.1)
plot1 = ax.hist(a, weights=weights1, bins=bins,
color='dodgerblue', label='McPAS', width=0.1)
ax.set_title('Kidera ' + str(i+1) + ' factor histogram')
ax.legend()
fig.tight_layout()
plt.show()
kidera_hists(w, nt)
|
[
"idospringer@gmail.com"
] |
idospringer@gmail.com
|
cf2437bbb631b33ae6e6807748210e78b731a4f1
|
fb1d7f7dea35f992d5d7a80d2b76cb7ad12aec2f
|
/restart.py
|
1e24de7e7333dcaa48507049b172766fde8db826
|
[] |
no_license
|
kohnakagawa/implicit_dpd
|
1e418b038f7f4bc935f01e6403cca8b37334334d
|
592640bd6a70193eeabd9614f86ac907c846a9d1
|
refs/heads/master
| 2021-01-20T20:37:03.061054
| 2017-09-04T07:25:55
| 2017-09-04T07:25:55
| 47,946,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
#!/usr/bin/env python
import glob
import os
import sys
import re
import shutil
def copy_with_log(src, dst):
print "%s -> %s" % (src, dst)
shutil.copy(src, dst)
def get_backup_number(root_dir, f_back_pattern):
backup_nums = set()
for f in os.listdir(root_dir):
f_number = re.search(f_back_pattern, f)
if f_number is None:
continue
matched_num = f_number.group(3)
backup_nums.add(matched_num)
if len(backup_nums) == 0:
return 0
else:
return int(max(backup_nums)) + 1
def make_backup(root_dir):
f_back_pattern = r'(\w+)\.(\w+)\.(\d+)'
num = get_backup_number(root_dir, f_back_pattern)
for f in os.listdir(root_dir):
sim_data = re.search(f_back_pattern, f)
if sim_data is not None: # skip backup files
continue
f_back = f + "." + str(num)
f = os.path.join(root_dir, f)
f_back = os.path.join(root_dir, f_back)
copy_with_log(f, f_back)
def make_init_config(root_dir):
init_config = os.path.join(root_dir, "init_config.xyz")
fin_config = os.path.join(root_dir, "fin_config.xyz")
if os.path.getsize(fin_config) == 0:
print "WARNING! there is no trajectory data in %s" % fin_config
sys.exit(1)
copy_with_log(fin_config, init_config)
def main(argv):
if len(argv) != 2:
print "Usage: %s root_dir" % argv[0]
sys.exit(1)
root_dir = argv[1]
make_backup(root_dir)
make_init_config(root_dir)
if __name__ == "__main__":
main(sys.argv)
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
90f88061995145befde80c2bce56e91f3b03e14b
|
eaac679161dfd275964575193f82d24171321f43
|
/setup.py
|
654e245dd8752b93d1cecf50ae82bbfcf36ad6da
|
[
"MIT"
] |
permissive
|
aagallag/pubg-python
|
0ff3816cfcbeb185cc6a83ab04934bacd425badf
|
42d1a16d332ee53d5ebe136293bfcd65d34a4da6
|
refs/heads/master
| 2020-03-11T13:11:13.745847
| 2018-04-16T15:08:22
| 2018-04-16T15:08:22
| 130,018,032
| 0
| 0
| null | 2018-04-18T06:58:26
| 2018-04-18T06:58:25
| null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
from setuptools import (
find_packages,
setup)
setup(
name='pubg-python',
version='0.2.8',
description='A python wrapper for the PUBG developer API',
url='https://github.com/ramonsaraiva/pubg-python',
author='Ramon Saraiva',
author_email='ramonsaraiva@gmail.com',
license='MIT',
packages=find_packages(exclude=('tests*',)),
install_requires=[
'requests>=2.18.4',
'furl>=1.0.1',
],
extras_require={
":python_version<='3.4'": ['enum34>=1.1.6'],
},
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
],
)
|
[
"ramonsaraiva@gmail.com"
] |
ramonsaraiva@gmail.com
|
cd3b531914b0f7e87c1fdf8e76c6404c03ae150d
|
9bf522a1716339fe928e83c9b416eeebaa1421af
|
/aiida_lammps/calculations/lammps/force.py
|
11f3143a6fb0dfaa6cc6ed3da3a21f84ab3c9b05
|
[
"MIT"
] |
permissive
|
zaidurrehman/aiida-lammps
|
132ccf6f6bc2b8e2a81fa3f852a76c8bd3bdcedd
|
e00d5501778c918b4333747398d4ae4df46fd3eb
|
refs/heads/master
| 2020-03-22T16:07:52.265272
| 2018-05-15T08:26:39
| 2018-05-15T08:26:39
| 140,304,946
| 0
| 0
|
MIT
| 2018-07-09T15:31:32
| 2018-07-09T15:25:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,367
|
py
|
from aiida.orm.calculation.job import JobCalculation
from aiida.common.exceptions import InputValidationError
from aiida.common.datastructures import CalcInfo, CodeInfo
from aiida.common.utils import classproperty
from aiida.orm import DataFactory
from aiida_lammps.calculations.lammps import BaseLammpsCalculation
from aiida_lammps.calculations.lammps.potentials import LammpsPotential
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
def generate_LAMMPS_input(parameters_data,
potential_obj,
structure_file='data.gan',
trajectory_file='trajectory.lammpstr'):
names_str = ' '.join(potential_obj._names)
lammps_input_file = 'units metal\n'
lammps_input_file += 'boundary p p p\n'
lammps_input_file += 'box tilt large\n'
lammps_input_file += 'atom_style atomic\n'
lammps_input_file += 'read_data {}\n'.format(structure_file)
lammps_input_file += potential_obj.get_input_potential_lines()
lammps_input_file += 'neighbor 0.3 bin\n'
lammps_input_file += 'neigh_modify every 1 delay 0 check no\n'
lammps_input_file += 'dump aiida all custom 1 {0} element fx fy fz\n'.format(trajectory_file)
lammps_input_file += 'dump_modify aiida format line "%4s %16.10f %16.10f %16.10f"\n'
lammps_input_file += 'dump_modify aiida sort id\n'
lammps_input_file += 'dump_modify aiida element {}\n'.format(names_str)
lammps_input_file += 'run 0'
return lammps_input_file
class ForceCalculation(BaseLammpsCalculation, JobCalculation):
_OUTPUT_TRAJECTORY_FILE_NAME = 'trajectory.lammpstrj'
_OUTPUT_FILE_NAME = 'log.lammps'
def _init_internal_params(self):
super(ForceCalculation, self)._init_internal_params()
self._default_parser = 'lammps.force'
self.__retrieve_list = []
self._generate_input_function = generate_LAMMPS_input
self._retrieve_list = [self._OUTPUT_TRAJECTORY_FILE_NAME, self._OUTPUT_FILE_NAME]
@classproperty
def _use_methods(cls):
"""
Extend the parent _use_methods with further keys.
"""
retdict = JobCalculation._use_methods
retdict.update(BaseLammpsCalculation._baseclass_use_methods)
return retdict
|
[
"abelcarreras83@gmail.com"
] |
abelcarreras83@gmail.com
|
5bb3365c6f397cdc145999193c20452bf0d67692
|
3cf0d750948a758d5771dd778fbb783d64a044ae
|
/src/basic/web/flask/01hello/do_flask.py
|
7765c119d4516759757f524df238b1c5042fa794
|
[
"CC-BY-NC-SA-4.0",
"Apache-2.0"
] |
permissive
|
hbulpf/pydemo
|
6552a08b3c85721ac1b2ba335b030e234ad03b6c
|
ea3e9f9086116a86ecef803e9e3179a34c94c20f
|
refs/heads/master
| 2022-11-30T21:06:29.933820
| 2022-01-15T17:05:16
| 2022-01-15T17:05:16
| 237,584,300
| 6
| 1
|
Apache-2.0
| 2022-11-22T09:49:38
| 2020-02-01T08:20:43
|
Python
|
UTF-8
|
Python
| false
| false
| 795
|
py
|
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
return '<h1>Home</h1>'
@app.route('/signin', methods=['GET'])
def signin_form():
return '''<form action="/signin" method="post">
<p>username:<input name="username"></p>
<p>password:<input name="password" type="password"></p>
<p><button type="submit">Sign In</button></p>
</form>'''
@app.route('/signin', methods=['POST'])
def signin():
# 需要从request对象读取表单内容:
if request.form['username']=='admin' and request.form['password']=='password':
return '<h3>Hello, admin!</h3>'
return '<h3>Bad username or password.</h3>'
if __name__ == '__main__':
app.run()
|
[
"hudalpf@163.com"
] |
hudalpf@163.com
|
54bec16a1e8a091fb14b3314055c0bfc1ade59c2
|
08cfe7ccf78f098924efdcf0db72f32d56e995fe
|
/envosx/bin/pip
|
d805a53a4465a5e6982e53bfb89d17841e9f7151
|
[] |
no_license
|
carloxdev/veritas
|
6709031a020801181dc81751133433adc96dfb71
|
e91e42545b7c42b8fd5090f58572715c3f653095
|
refs/heads/master
| 2020-04-21T23:31:32.031949
| 2019-02-10T06:25:27
| 2019-02-10T06:25:27
| 169,949,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
#!/Users/carloxdev/Files/Trabajo/Sintaxyz/Proyectos/Veritas/envosx/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"carloxdev@gmail.com"
] |
carloxdev@gmail.com
|
|
9d69eb0935be8ecd1893a4b8bbbb712dbb6b2c3f
|
2daa3894e6d6929fd04145100d8a3be5eedbe21c
|
/tests/artificial/transf_exp/trend_linear/cycle_12/ar_/test_artificial_1024_exp_linear_12__0.py
|
d3250a459fa582199ac7047cea2b5bd10721479e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Henri-Lo/pyaf
|
a1f73a0cc807873bd7b79648fe51de9cfd6c126a
|
08c968425d85dcace974d90db7f07c845a0fe914
|
refs/heads/master
| 2021-07-01T12:27:31.600232
| 2017-09-21T11:19:04
| 2017-09-21T11:19:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
dataset = tsds.generate_random_TS(N = 1024 , FREQ = 'D', seed = 0, trendtype = "linear", cycle_length = 12, transform = "exp", sigma = 0.0, exog_count = 0, ar_order = 0);
art.process_dataset(dataset);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
4abc683ed81e9286e0ff9350feee1b386ac7ffe5
|
4cbf572b446af438249911e2b07ae873234609df
|
/examples/postman_echo/request_methods/hardcode_test.py
|
e60254c428b1d0552596a765052181cb6c195cab
|
[
"Apache-2.0"
] |
permissive
|
jeremy8250/httprunner
|
0a1d164c18df43bf65754130615bab8a91b14862
|
a40c7892f3666dd1de200e53ecd5cee9fa8a68ee
|
refs/heads/master
| 2022-07-04T08:20:01.979326
| 2020-05-18T02:47:56
| 2020-05-18T02:47:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,548
|
py
|
# NOTICE: Generated By HttpRunner. DO'NOT EDIT!
# FROM: examples/postman_echo/request_methods/hardcode.yml
from httprunner import HttpRunner, TConfig, TStep
class TestCaseHardcode(HttpRunner):
config = TConfig(
**{
"name": "request methods testcase in hardcode",
"base_url": "https://postman-echo.com",
"verify": False,
"path": "examples/postman_echo/request_methods/hardcode_test.py",
}
)
teststeps = [
TStep(
**{
"name": "get with params",
"request": {
"method": "GET",
"url": "/get",
"params": {"foo1": "bar1", "foo2": "bar2"},
"headers": {"User-Agent": "HttpRunner/3.0"},
},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "post raw text",
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "text/plain",
},
"data": "This is expected to be sent back as part of response body.",
},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "post form data",
"request": {
"method": "POST",
"url": "/post",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "application/x-www-form-urlencoded",
},
"data": "foo1=bar1&foo2=bar2",
},
"validate": [{"eq": ["status_code", 200]}],
}
),
TStep(
**{
"name": "put request",
"request": {
"method": "PUT",
"url": "/put",
"headers": {
"User-Agent": "HttpRunner/3.0",
"Content-Type": "text/plain",
},
"data": "This is expected to be sent back as part of response body.",
},
"validate": [{"eq": ["status_code", 200]}],
}
),
]
if __name__ == "__main__":
TestCaseHardcode().test_start()
|
[
"mail@debugtalk.com"
] |
mail@debugtalk.com
|
2c7f7315aae320bc1caeb77516b600a04ca3a90f
|
ace30d0a4b1452171123c46eb0f917e106a70225
|
/filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/keystone/conf/extra_headers.py
|
247d879764f82dcec291593525b0478080abbdef
|
[
"Python-2.0"
] |
permissive
|
juancarlosdiaztorres/Ansible-OpenStack
|
e98aa8c1c59b0c0040c05df292964520dd796f71
|
c01951b33e278de9e769c2d0609c0be61d2cb26b
|
refs/heads/master
| 2022-11-21T18:08:21.948330
| 2018-10-15T11:39:20
| 2018-10-15T11:39:20
| 152,568,204
| 0
| 3
| null | 2022-11-19T17:38:49
| 2018-10-11T09:45:48
|
Python
|
UTF-8
|
Python
| false
| false
| 960
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from keystone.conf import utils
distribution = cfg.StrOpt(
'Distribution',
default='Ubuntu',
help=utils.fmt("""
Specifies the distribution of the keystone server.
"""))
GROUP_NAME = __name__.split('.')[-1]
ALL_OPTS = [
distribution,
]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=GROUP_NAME)
def list_opts():
return {GROUP_NAME: ALL_OPTS}
|
[
"jcdiaztorres96@gmail.com"
] |
jcdiaztorres96@gmail.com
|
2d0b99e0bd8d4068049d46c68973cd8748ee3dd6
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_drove.py
|
1f3fa8910c98a175c0f442468e208abbc398f454
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
#calss header
class _DROVE():
def __init__(self,):
self.name = "DROVE"
self.definitions = [u'to move farm animals on foot from one place to another']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
b8beb75f1384cd86e0be5cf1fca0bff678731ddc
|
b4fc645746dd2a88a951acea06db91eae1d0eda4
|
/fluent_blogs/__init__.py
|
e531338f42cc1e7e73c6966faff30d43d6e2196f
|
[
"Apache-2.0"
] |
permissive
|
nishchintg01/django-fluent-blogs
|
40a2a5c25f0afbdbb08af14852af5a128e564e75
|
86b148549a010eaca9a2ea987fe43be250e06c50
|
refs/heads/master
| 2020-06-13T16:45:11.847957
| 2018-07-31T10:52:10
| 2018-07-31T10:52:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# following PEP 440
__version__ = "2.0.3"
# Fix for internal messy imports.
# When base_models is imported before models/__init__.py runs, there is a circular import:
# base_models -> models/managers.py -> invoking models/__init__.py -> models/db.py -> base_models.py
#
# This doesn't occur when the models are imported first.
|
[
"vdboor@edoburu.nl"
] |
vdboor@edoburu.nl
|
50a51ca5d16e357fec283b6b5598b557ab490c41
|
63044bff27a0cf40ae9bd203e12562386b0fc925
|
/courses/management/commands/update.py
|
b323a77606477d99af35698445c40cbe516b99fb
|
[
"BSD-3-Clause"
] |
permissive
|
afg-archive/nthucourses
|
696f208b57b3f870fdae6a87030804fb70cda080
|
9f28f8e9480b9d7a9db1f9c023955fb23b1a28aa
|
refs/heads/master
| 2021-05-29T01:46:36.882086
| 2015-05-29T10:51:13
| 2015-05-29T10:51:13
| 36,482,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
from django.core.management.base import BaseCommand
import sys
from courses import adapter
from logs.models import Logger
class Command(BaseCommand):
help = 'update stuff'
def add_arguments(self, parser):
parser.add_argument('count', type=int)
def handle(self, *args, **options):
with Logger(' '.join(sys.argv[1:])):
adapter.update_n(options['count'])
|
[
"afg984@gmail.com"
] |
afg984@gmail.com
|
3b9a978a922a5b196bc117898814b0cbec445d6c
|
25ebc03b92df764ff0a6c70c14c2848a49fe1b0b
|
/daily/20180118/example_pandas/x.py
|
b249ecd7574e9204bf7e358494db3c0a1f66cdf8
|
[] |
no_license
|
podhmo/individual-sandbox
|
18db414fafd061568d0d5e993b8f8069867dfcfb
|
cafee43b4cf51a321f4e2c3f9949ac53eece4b15
|
refs/heads/master
| 2023-07-23T07:06:57.944539
| 2023-07-09T11:45:53
| 2023-07-09T11:45:53
| 61,940,197
| 6
| 0
| null | 2022-10-19T05:01:17
| 2016-06-25T11:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 173
|
py
|
import pandas as pd
ax = df.plot(kind="scatter", x="x", y="y", s=40)
for _, row in df.iterrows():
print(row.name, row.x, row.y)
ax.annotate(row.name, row.x, row.y)
|
[
"ababjam61+github@gmail.com"
] |
ababjam61+github@gmail.com
|
1c676289043ddc7f0d412e1cc3be5ddf29fa5bbd
|
31a9a6cd0c6d06c612705a6d572f97d6a6ec7286
|
/render.py
|
a077a40bd70d27e379908dc0a77abc7d79ca947c
|
[] |
no_license
|
mikeboers/RenderHeatmap
|
ce9860f1f6d451f65442a566477b97079282c44d
|
684a3fef7dc4c3fb0983eed69a983ceb9ecb473b
|
refs/heads/master
| 2023-06-08T05:52:28.983104
| 2013-07-16T01:49:07
| 2013-07-16T01:49:07
| 11,429,803
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,259
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import ctypes as c
import functools
import os
import re
import sys
from subprocess import call, check_output
import prman
from mako.template import Template
def debug(*args, **kwargs):
kwargs['file'] = sys.stderr
print(*args, **kwargs)
_memo_store = {}
def memo(func):
@functools.wraps(func)
def memoized(*args, **kwargs):
arg_repr = [repr(x) for x in args]
arg_repr.extend('%s=%r' % x for x in sorted(kwargs.iteritems()))
spec = '%s(%s)' % (func.__name__, ', '.join(arg_repr))
try:
return _memo_store[spec]
except KeyError:
ret = _memo_store[spec] = func(*args, **kwargs)
return ret
return memoized
# Consider grabbing the Rix API to get the shader path. But for now, I'm just
# going to hard-code it...
# lib = c.CDLL(os.path.join(os.environ['RMANTREE'], 'lib/libprman.dylib'))
shader_path = ['shaders', os.path.join(os.environ['RMANTREE'], 'lib', 'shaders')]
@memo
def find_shader(name):
for dir_ in shader_path:
path = os.path.join(dir_, name) + '.slo'
if os.path.exists(path):
return path
@memo
def get_shader_methods(path):
return tuple(check_output(['sloinfo', '--methods', path]).strip().split())
@memo
def get_shader_parameters(path):
params = {}
last = None
for line in check_output(['sloinfo', path]).splitlines():
line = line.strip()
if not line:
continue
m = re.match(r'^"(.+?)" "parameter (\S+) (\S+)"$', line)
if m:
name, storage, type_ = m.groups()
last = name
params[name] = [storage, type_, None]
continue
m = re.match(r'^Default value: (.+?)$', line)
if m:
default = m.group(1)
params[last][2] = default
params = dict((key, tuple(value)) for key, value in params.iteritems())
return params
@memo
def wrap_shader(name):
path = find_shader(name)
if not path:
debug('wrap_shader: Could not find shader %r.' % (name, ))
return
methods = get_shader_methods(path)
params = get_shader_parameters(path)
wrapped_name = 'wrapped_%s' % name
wrapped_path = os.path.join('var', 'shaders', wrapped_name) + '.sl'
with open(wrapped_path, 'w') as fh:
template = Template(filename='wrapper.sl.mako')
fh.write(template.render(name=wrapped_name, params=params, methods=set(methods)))
call(['shader', '-Ilib', '-o', wrapped_path + 'o', wrapped_path])
return wrapped_name
class ShaderWrapper(prman.Rif):
def __init__(self, *args, **kwargs):
super(ShaderWrapper, self).__init__(*args, **kwargs)
self._coshader_count = 0
def Surface(self, name, kw):
wrapped = wrap_shader(name)
if wrapped:
self._coshader_count += 1
count = self._coshader_count
handle = '%s_%d' % (wrapped, count)
self.m_ri.Shader(name, handle, kw)
self.m_ri.Surface(wrapped, {'string wrapped_handle': handle})
ri = prman.Ri()
rif = ShaderWrapper(ri)
prman.RifInit([rif])
ri.Begin(ri.RENDER)
for path in sys.argv[1:]:
ri.ReadArchive(path)
ri.End()
|
[
"github@mikeboers.com"
] |
github@mikeboers.com
|
62c027779e60eb717cd1ecc9b3f065887d5c151d
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/qNQkYzY8GpiFMmndh_9.py
|
1c13b93ebc6d37c3186cdfd2d117379f9db81968
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
def join(lst):
output = lst.pop(0)
n = 0
for word in lst:
for i in range(1, len(output) + 1):
substring = output[-i:]
if word.startswith(substring):
output += word[i:]
if n == 0 or i < n:
n = i
break
else:
output += word
return [output, n]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
ab7cc7d498bcf6e430f5f3ee54ce21077a31f0f6
|
e20ed90b9be7a0bcdc1603929d65b2375a224bf6
|
/generated-libraries/python/netapp/qos/qos_policy_group_delete_iter_info.py
|
b9c1249c80dd536e9d9b77e35d7b894771e5a5ac
|
[
"MIT"
] |
permissive
|
radekg/netapp-ontap-lib-gen
|
530ec3248cff5ead37dc2aa47ced300b7585361b
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
refs/heads/master
| 2016-09-06T17:41:23.263133
| 2015-01-14T17:40:46
| 2015-01-14T17:40:46
| 29,256,898
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,209
|
py
|
from netapp.qos.qos_policy_group_info import QosPolicyGroupInfo
from netapp.netapp_object import NetAppObject
class QosPolicyGroupDeleteIterInfo(NetAppObject):
"""
Information about the deletion operation that was
attempted/performed against qos-policy-group object.
were not deleted due to some error.
due to some error.
This element will be returned only if input element
'return-failure-list' is true.
"""
_qos_policy_group_key = None
@property
def qos_policy_group_key(self):
"""
The keys for the qos-policy-group object to which the
deletion applies.
"""
return self._qos_policy_group_key
@qos_policy_group_key.setter
def qos_policy_group_key(self, val):
if val != None:
self.validate('qos_policy_group_key', val)
self._qos_policy_group_key = val
_error_code = None
@property
def error_code(self):
"""
Error code, if the deletion operation caused an error.
"""
return self._error_code
@error_code.setter
def error_code(self, val):
if val != None:
self.validate('error_code', val)
self._error_code = val
_error_message = None
@property
def error_message(self):
"""
Error description, if the operation caused an error.
"""
return self._error_message
@error_message.setter
def error_message(self, val):
if val != None:
self.validate('error_message', val)
self._error_message = val
@staticmethod
def get_api_name():
return "qos-policy-group-delete-iter-info"
@staticmethod
def get_desired_attrs():
return [
'qos-policy-group-key',
'error-code',
'error-message',
]
def describe_properties(self):
return {
'qos_policy_group_key': { 'class': QosPolicyGroupInfo, 'is_list': False, 'required': 'required' },
'error_code': { 'class': int, 'is_list': False, 'required': 'optional' },
'error_message': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
[
"radek@gruchalski.com"
] |
radek@gruchalski.com
|
7996e73b8d40a38bcb034f1713d2b76544270a5a
|
c97b9ae1bf06757ba61f90905e4d9b9dd6498700
|
/venv/Lib/site-packages/skimage/util/setup.py
|
b9b8d3221b147d91483dc8bf4e7f656014dea973
|
[] |
no_license
|
Rahulk1p/image-processor
|
f7ceee2e3f66d10b2889b937cdfd66a118df8b5d
|
385f172f7444bdbf361901108552a54979318a2d
|
refs/heads/main
| 2023-03-27T10:09:46.080935
| 2021-03-16T13:04:02
| 2021-03-16T13:04:02
| 348,115,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4915141ae116202354212e66d14926ed94771f1e4478f2a19210d8f9d010464e
size 1206
|
[
"rksc.k1p@gmail.com"
] |
rksc.k1p@gmail.com
|
382e99a6a5e95ffe23cf57a4cfd4f4c83d66325f
|
a52f9758ace9a85adfb735609e0cb8839aab2977
|
/tests/sitetester
|
b1cfbddc4e01b1b26c0a31ef2a248c188e16be64
|
[] |
no_license
|
yadudoc/Swift
|
8ec085c4297861c2197d504571e10dce2df961a4
|
e96600ae9ce74f529a436d33d984534ca9566ee7
|
refs/heads/master
| 2021-01-22T09:09:37.081267
| 2011-07-25T08:47:03
| 2011-07-25T08:47:03
| 1,968,948
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,791
|
#!/usr/bin/env python
import commands
import os
# the workdir for a given site needs to exist on the site you're testing
# and should contain run-suite & suite.sh
# this script will ssh onto the site, pass run-suite the necesary variables
# for running suite.sh
class Site:
def __init__(self,login,logtype,workdir,queue,project):
self.login = login
self.logintype = logtype
self.workdir = workdir
self.queue = queue
self.project = project
def testSite(self,cogv,swiftv,test_type):
cmdline = self.logintype+" "+self.login+" "+self.workdir+"/run-suite.sh "+self.workdir+"/"+test_type+" "+self.workdir+" "+self.queue+" "+self.project+" "+cogv+" "+swiftv
print "running......"+cmdline
res = commands.getoutput(cmdline)
print res
cmdline2 = "scp -r "+s+":"+rundir+" ."
res2 = commands.getoutput(cmdline)
#---------------------main--------------------------------------------------------
sites = []
s_logins = ['skenny@login2.pads.ci.uchicago.edu','login-abe.ncsa.teragrid.org','login1-qb.loni-lsu.teragrid.org']
logintype = ['ssh','gsissh','gsissh']
workdirs = ['/home/skenny/swift_runs/tests','/u/ac/skenny/swift_runs/tests','/home/skenny/swift_runs/tests']
queues = ['short','debug','workq']
projects = ['CI-IBN000039','TG-DBS080004N','TG-DBS080004N']
test_type = "groups/local-pbs-coasters.sh"
cogv = 'branches/4.1.8'
swiftv = 'branches/release-0.92'
rundir = commands.getoutput('date +\"%Y-%m-%d\"')
rundir = "run-"+rundir
for idx,s in enumerate(s_logins):
sites.append(Site(s,logintype[idx],workdirs[idx],queues[idx],projects[idx]))
# loop over all or run single
sites[1].testSite(cogv,swiftv,test_type)
sites[2].testSite(cogv,swiftv,test_type)
|
[
"yadudoc1729@gmail.com"
] |
yadudoc1729@gmail.com
|
|
681c3498de14ed65faac6556b9a4a4fdd30bb14a
|
23b9600c8a5afb6451902c3c9b3fd3a6ba9ed1e3
|
/pycontour/cnt/tests/test_cnt_property.py
|
671b185d4386656c8b6478b5f6fbb0f5be19fe84
|
[
"Apache-2.0"
] |
permissive
|
codingPingjun/pycontour
|
a822cef6f40f80b978b6e24db660d46c4e5e3660
|
892f42dd8569bcffe50433c32ca3bb414163a293
|
refs/heads/master
| 2020-03-29T10:37:48.033915
| 2018-09-21T18:17:24
| 2018-09-21T18:17:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 928
|
py
|
# -*- coding: utf-8 -*-
import os, sys
from pycontour import point_list_to_np_arr
from pycontour import np_arr_to_cv_cnt
from pycontour.cnt import get_cnt_area
from pycontour.cnt import get_cnt_aspect_ratio
from pycontour.cnt import get_cnt_solidity
def test_property(cnt):
cnt_area = get_cnt_area(cnt)
cnt_aspect_ratio = get_cnt_aspect_ratio(cnt)
cnt_solidty = get_cnt_solidity(cnt)
print("Contour area is {}".format(cnt_area))
print("Contour aspect ratio is {}".format(cnt_aspect_ratio))
print("Contour solidity is {}".format(cnt_solidty))
if __name__ == "__main__":
point_list1 = [(0, 1), (1, 2), (2, 1), (1, 0)]
np_arr1 = point_list_to_np_arr(point_list1)
cnt1 = np_arr_to_cv_cnt(np_arr1)
test_property(cnt1)
point_list2 = [(0, 0), (1, 2), (2, 0), (1, 0), (1, -1)]
np_arr2 = point_list_to_np_arr(point_list2)
cnt2 = np_arr_to_cv_cnt(np_arr2)
test_property(cnt2)
|
[
"chenpingjun@gmx.com"
] |
chenpingjun@gmx.com
|
112b1c682d5f843710c2bb57bd848e533ac37db9
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python/generated/test/test_com_adobe_cq_social_commons_comments_scheduler_impl_search_scheduled_pos_properties.py
|
1694f07b9f29ee9d009be5fff57d48e5001c1ead
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
# coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.com_adobe_cq_social_commons_comments_scheduler_impl_search_scheduled_pos_properties import ComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties(unittest.TestCase):
"""ComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties(self):
"""Test ComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.com_adobe_cq_social_commons_comments_scheduler_impl_search_scheduled_pos_properties.ComAdobeCqSocialCommonsCommentsSchedulerImplSearchScheduledPosProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"michael.bloch@shinesolutions.com"
] |
michael.bloch@shinesolutions.com
|
2391b4f2d82d25411949f36512c5b7942e8622d5
|
c9094a4ed256260bc026514a00f93f0b09a5d60c
|
/homeassistant/util/process.py
|
6f8bafda7a70aed91d6d24f7703edddc5045ef30
|
[
"Apache-2.0"
] |
permissive
|
turbokongen/home-assistant
|
824bc4704906ec0057f3ebd6d92788e096431f56
|
4ab0151fb1cbefb31def23ba850e197da0a5027f
|
refs/heads/dev
| 2023-03-12T05:49:44.508713
| 2021-02-17T14:06:16
| 2021-02-17T14:06:16
| 50,231,140
| 4
| 1
|
Apache-2.0
| 2023-02-22T06:14:30
| 2016-01-23T08:55:09
|
Python
|
UTF-8
|
Python
| false
| false
| 442
|
py
|
"""Util to handle processes."""
from __future__ import annotations
import subprocess
from typing import Any
# mypy: disallow-any-generics
def kill_subprocess(
# pylint: disable=unsubscriptable-object # https://github.com/PyCQA/pylint/issues/4034
process: subprocess.Popen[Any],
) -> None:
"""Force kill a subprocess and wait for it to exit."""
process.kill()
process.communicate()
process.wait()
del process
|
[
"noreply@github.com"
] |
turbokongen.noreply@github.com
|
93dbb80bdcbe510bed8809ddb9090334d1b7cba5
|
edbe6966098d925e831b4e3054c76e4ae1c1891a
|
/cluster/code/test/test_requester.py
|
4ad4d87f316d1d3324176bd449314c690175eacf
|
[
"CC-BY-3.0",
"MIT",
"BSD-3-Clause"
] |
permissive
|
Sean10/Algorithm_code
|
242fcb21de97186ed1caea30ab967c3f4b4e9351
|
8ba923150102e16a9072b8f32ced45d15b18223b
|
refs/heads/master
| 2023-06-22T17:47:07.241192
| 2023-06-19T15:22:23
| 2023-06-19T15:22:23
| 107,443,471
| 0
| 0
|
BSD-3-Clause
| 2021-06-08T20:35:47
| 2017-10-18T17:51:56
|
C++
|
UTF-8
|
Python
| false
| false
| 1,272
|
py
|
from cluster import *
from . import utils
import mock
CLIENT_ID = 999999
class Tests(utils.ComponentTestCase):
def setUp(self):
super(Tests, self).setUp()
self.callback = mock.Mock(name='callback')
with mock.patch.object(Requester, 'client_ids') as client_ids:
client_ids.next.return_value = CLIENT_ID
self.req = Requester(self.node, 10, self.callback)
self.assertEqual(self.req.client_id, CLIENT_ID)
def test_function(self):
"""Requester should repeatedly send INVOKE until receiving a matching INVOKED"""
self.req.start()
self.assertMessage(['F999'], Invoke(caller='F999', client_id=CLIENT_ID, input_value=10))
self.network.tick(INVOKE_RETRANSMIT)
self.assertMessage(['F999'], Invoke(caller='F999', client_id=CLIENT_ID, input_value=10))
# non-matching
self.node.fake_message(Invoked(client_id=333, output=22))
self.network.tick(INVOKE_RETRANSMIT)
self.assertMessage(['F999'], Invoke(caller='F999', client_id=CLIENT_ID, input_value=10))
self.failIf(self.callback.called)
self.node.fake_message(Invoked(client_id=CLIENT_ID, output=20))
self.callback.assert_called_with(20)
self.assertUnregistered()
|
[
"sean10reborn@gmail.com"
] |
sean10reborn@gmail.com
|
7dabd080543a7db593ce58222f1664ce3b14a7f9
|
e9c4239c8064d882691314fd5b37208f10447173
|
/leetcode/252meetingRoom.py
|
4cfdabad9de26d758a4eb4eebb2264529d6c4179
|
[] |
no_license
|
IronE-G-G/algorithm
|
6f030dae6865b2f4ff4f6987b9aee06874a386c1
|
6f6d7928207534bc8fb6107fbb0d6866fb3a6e4a
|
refs/heads/master
| 2020-09-21T03:02:20.908940
| 2020-03-22T15:19:41
| 2020-03-22T15:19:41
| 224,658,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 858
|
py
|
"""
252 会议室
给定一个会议时间安排的数组,每个会议时间都会包括开始和结束的时间 [[s1,e1],[s2,e2],...] (si < ei),请你判断一个人是否能够参加这里面的全部会议。
示例 1:
输入: [[0,30],[5,10],[15,20]]
输出: false
示例 2:
输入: [[7,10],[2,4]]
输出: true
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/meeting-rooms
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def canAttendMeetings(self, intervals: List[List[int]]) -> bool:
if not intervals:
return True
intervals.sort()
last_end = intervals[0][1]
for item in intervals[1:]:
if item[0]<last_end:
return False
last_end = item[1]
return True
|
[
"linjh95@163.com"
] |
linjh95@163.com
|
51ef5cd2d9cfbed4111cec84b099160d93c11f59
|
9d418674a6cb6797656b15926f1f259964dabd71
|
/jokedbapp/image_profiles/basic_vertical.py
|
d702ca3843eaed294094d238a45faea1dfc4b388
|
[
"MIT"
] |
permissive
|
BL-Labs/jokedbapp
|
f54a7aedded95591e1719ef19d6ae1f72bb6d73f
|
a0e03eefbf04255623a9ad81db145f1508fade5f
|
refs/heads/master
| 2020-06-08T09:19:45.827254
| 2019-04-10T16:04:14
| 2019-04-10T16:04:14
| 23,437,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,369
|
py
|
import cairo
DEFAULT = {'font': 'American Typewriter', 'size':32.0, 'style': cairo.FONT_SLANT_NORMAL, 'weight': cairo.FONT_WEIGHT_NORMAL, 'LINEHEIGHT':1.1}
DEFAULT_SMALL = {'font': 'American Typewriter', 'size':22.0, 'style': cairo.FONT_SLANT_NORMAL, 'weight': cairo.FONT_WEIGHT_NORMAL, 'LINEHEIGHT':1.1}
BOLD = {'font': 'American Typewriter', 'size':32.0, 'style': cairo.FONT_SLANT_NORMAL, 'weight': cairo.FONT_WEIGHT_BOLD, 'LINEHEIGHT':1}
BOLD_SMALL = {'font': 'American Typewriter', 'size':22.0, 'style': cairo.FONT_SLANT_NORMAL, 'weight': cairo.FONT_WEIGHT_NORMAL, 'LINEHEIGHT':1}
EMPHASIS = {'font': 'American Typewriter', 'size':32.0, 'style': cairo.FONT_SLANT_OBLIQUE, 'weight': cairo.FONT_WEIGHT_NORMAL, 'LINEHEIGHT':1}
TITLE = {'font': 'American Typewriter', 'size':32.0, 'style': cairo.FONT_SLANT_NORMAL, 'weight': cairo.FONT_WEIGHT_BOLD, 'LINEHEIGHT':1.3}
LAYOUT = {'_SIZE': (2,2), 'DEFAULT': (0,0), 'IMAGE': (1, 0), 'ATTRIB': (0,1)}
BACKGROUND_COLOUR = (0.2, 0.2, 0.2, 1.0) #RGBA
TEXTS = {'DEFAULT': DEFAULT,
'BOLD': BOLD,
'EMPHASIS': EMPHASIS,
'TITLE': TITLE,
'DEFAULT_SMALL': DEFAULT_SMALL,
'BOLD_SMALL': BOLD_SMALL,}
PARTS = {'DEFAULT': {'BACKGROUND': ((171.0/256.0), (140.0/256.0), (16.0/256.0), 1.0), 'FOREGROUND': (0.0, 0.0, 0.0, 1.0),
'TEXT': TEXTS, 'TEXTWIDTH':46, "MARGIN":(30,0)},
'IMAGE': {'BACKGROUND': ((171.0/256.0), (140.0/256.0), (16.0/256.0), 1.0), 'FOREGROUND': (0.0, 0.0, 0.0, 1.0),
'TEXT': TEXTS, 'TEXTWIDTH':None, "MARGIN":(0,0),
'IMAGES': ["/home/ben/jokedb/jokedbapp/imgs/01.png",
"/home/ben/jokedb/jokedbapp/imgs/02.png",
"/home/ben/jokedb/jokedbapp/imgs/03.png",
"/home/ben/jokedb/jokedbapp/imgs/04.png",
"/home/ben/jokedb/jokedbapp/imgs/05.png",
"/home/ben/jokedb/jokedbapp/imgs/06.png",
"/home/ben/jokedb/jokedbapp/imgs/07.png",
"/home/ben/jokedb/jokedbapp/imgs/08.png",
"/home/ben/jokedb/jokedbapp/imgs/09.png",
"/home/ben/jokedb/jokedbapp/imgs/10.png",
"/home/ben/jokedb/jokedbapp/imgs/11.png",
"/home/ben/jokedb/jokedbapp/imgs/12.png",
"/home/ben/jokedb/jokedbapp/imgs/13.png",
"/home/ben/jokedb/jokedbapp/imgs/14.png",
"/home/ben/jokedb/jokedbapp/imgs/15.png",
"/home/ben/jokedb/jokedbapp/imgs/16.png",
"/home/ben/jokedb/jokedbapp/imgs/17.png",
"/home/ben/jokedb/jokedbapp/imgs/18.png",
"/home/ben/jokedb/jokedbapp/imgs/19.png",
"/home/ben/jokedb/jokedbapp/imgs/20.png",
"/home/ben/jokedb/jokedbapp/imgs/21.png",
"/home/ben/jokedb/jokedbapp/imgs/22.png",
"/home/ben/jokedb/jokedbapp/imgs/23.png",
"/home/ben/jokedb/jokedbapp/imgs/24.png",
"/home/ben/jokedb/jokedbapp/imgs/25.png",
"/home/ben/jokedb/jokedbapp/imgs/26.png",
"/home/ben/jokedb/jokedbapp/imgs/27.png",
"/home/ben/jokedb/jokedbapp/imgs/28.png",
"/home/ben/jokedb/jokedbapp/imgs/29.png",
"/home/ben/jokedb/jokedbapp/imgs/30.png",
"/home/ben/jokedb/jokedbapp/imgs/31.png",
"/home/ben/jokedb/jokedbapp/imgs/32.png",
"/home/ben/jokedb/jokedbapp/imgs/33.png",
"/home/ben/jokedb/jokedbapp/imgs/34.png",
"/home/ben/jokedb/jokedbapp/imgs/35.png",
"/home/ben/jokedb/jokedbapp/imgs/36.png",
"/home/ben/jokedb/jokedbapp/imgs/37.png",
"/home/ben/jokedb/jokedbapp/imgs/38.png",
"/home/ben/jokedb/jokedbapp/imgs/39.png",
"/home/ben/jokedb/jokedbapp/imgs/40.png",
"/home/ben/jokedb/jokedbapp/imgs/41.png",
"/home/ben/jokedb/jokedbapp/imgs/42.png",
"/home/ben/jokedb/jokedbapp/imgs/43.png",
"/home/ben/jokedb/jokedbapp/imgs/44.png",
"/home/ben/jokedb/jokedbapp/imgs/45.png",
"/home/ben/jokedb/jokedbapp/imgs/46.png",
"/home/ben/jokedb/jokedbapp/imgs/47.png",
"/home/ben/jokedb/jokedbapp/imgs/48.png",
"/home/ben/jokedb/jokedbapp/imgs/49.png",
"/home/ben/jokedb/jokedbapp/imgs/50.png"]
},
'ATTRIB': {'BACKGROUND': ((158.0/256.0), (11.0/256.0), (15.0/256.0), 1.0), 'FOREGROUND': (1.0, 1.0, 1.0, 1.0),
'TEXT': TEXTS, 'TEXTWIDTH':40, "MARGIN":(50,0)},}
|
[
"bosteen@gmail.com"
] |
bosteen@gmail.com
|
68b2a186aa88e4263e2be029b45f32089b87e6ec
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03855/s164056215.py
|
bdc63f1ec0e4debd072fc610599656e89ad76b4e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,705
|
py
|
import sys
from collections import deque
from collections import defaultdict
from collections import Counter
def conn(n,m,e):
d=dict(zip(range(1,n+1),range(-1,(-1)*n-1,-1)))
td=defaultdict(lambda:deque([])) #tdは同値類がキーで中の元が値
c=1
for edge in e:
a=edge[0]
b=edge[1]
da=d[a] #da,dbはa,bの含まれる同値流のラベル
db=d[b]
if da<0 and db<0:
d[a]=c
d[b]=c
td[c].append(a)
td[c].append(b)
c+=1
elif da>0 and db<0:
d[b]=da
td[d[a]].append(b)
elif da<0 and db>0:
d[a]=db
td[d[b]].append(a)
elif da>0 and db>0 and da!=db:
for x in td[db]:
d[x]=da
td[da].append(x)
return list(d.values())
def components(n,k,e):
ed=defaultdict(lambda:deque())
for edge in e:
ed[edge[0]].append(edge[1])
c=0
s=[0]*n
label=[0]*n
for i in range(1,n+1):
if s[i-1]==0:
c+=1
label[c-1]=c
stack=deque([i])
while stack:
w=stack.pop()
s[w-1]=c
while ed[w]:
wn=ed[w].pop()
if s[wn-1]==0:
s[wn-1]=c
if ed[wn]:
stack.append(w)
w=wn
elif s[wn-1]<c:
label[s[wn-1]-1]=c
return [label[s[i]-1] for i in range(n)]
def components2(n,k,e):
ed=defaultdict(lambda:deque())
for edge in e:
ed[edge[0]].append(edge[1])
ed[edge[1]].append(edge[0])
c=0
s=[0]*n
for i in range(1,n+1):
if s[i-1]==0:
c+=1
stack=deque([i])
while stack:
w=stack.pop()
s[w-1]=c
while ed[w]:
wn=ed[w].pop()
if s[wn-1]==0:
s[wn-1]=c
if ed[wn]:
stack.append(w)
w=wn
return [s[i] for i in range(n)]
def main(n,k,l,e1,e2):
d1=components2(n,k,e1)
d2=components2(n,l,e2)
p=tuple(zip(iter(d1),iter(d2)))
d=Counter(p)
# print(d1,d2,d,p)
print(' '.join([str(d[x]) for x in p]))
if __name__=='__main__':
ssr=sys.stdin.readline
n,k,l=map(int,ssr().strip().split())
e1=[]
e2=[]
for _ in range(k):
e1.append(tuple(map(int,ssr().strip().split())))
for _ in range(l):
e2.append(tuple(map(int,ssr().strip().split())))
main(n,k,l,e1,e2)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
19516fddcf46da7c958112de4b4a48b588f34952
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02262/s633587482.py
|
d753ca2ac61eed71618d108cbb94d29de6373baa
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
from sys import stdin
def insertionSort(A, n, g):
global cnt
for i in range(g, n):
v = A[i]
j = i - g
while j >= 0 and A[j] > v:
A[j+g] = A[j]
j = j - g
cnt += 1
A[j+g] = v
def shellSort(A, n):
global cnt
cnt = 0
g = 1
G = [g]
while 3 * g + 1 < n:
g = 3 * g + 1
G.append(g)
m = len(G)
G.reverse()
print(m)
print(' '.join(map(str, G)))
for i in range(m):
insertionSort(A, n, G[i])
n = int(stdin.readline())
A = [int(stdin.readline()) for i in range(n)]
shellSort(A, n)
print(cnt)
for a in A:
print(a)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
17783be31b334cd82e6068aab17d8f98bec6cca8
|
de9b8b7192a0a81e9249823bb2b86f0b7e452863
|
/.history/main_20171106171523.py
|
deb4ddd52f38171dcbe23d61cbf9d45aa121589b
|
[
"MIT"
] |
permissive
|
reecebenson/uwe-dadsa-tennis-a
|
f5eaeb1b96d4e61f29279514e68eeea8ad6533db
|
d0763f819b300fcd0ce27041f5bc4ef0519c00bf
|
refs/heads/master
| 2023-07-08T16:13:23.963348
| 2017-11-30T12:07:01
| 2017-11-30T12:07:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
# DADSA - Assignment 1
# Reece Benson
from classes import Handler as Handler
from classes import Player as Player
from classes import Season as Season
from classes import Tournament as Tournament
from classes import Round as Round
from classes import Match as Match
class App():
def __hold__(self):
input(">>> Press <Return> to terminate the program")
exit()
def __main__(self):
handler = Handler.Handler()
# Hold the program
self.__hold__()
App()
|
[
"business@reecebenson.me"
] |
business@reecebenson.me
|
bfaf7ad3a1c88c89c66f90f56a241c967f7662e2
|
4f0e26b19f9b97c2a62605c039440fa984ebaaba
|
/scripts/easy_install-2.6-script.py
|
2c20b3f8a0a56b276e911c4435a3fbfc81403321
|
[] |
no_license
|
acmiyaguchi/buildbotve
|
61ff08955997445a2b38032692d1ba0fcc9235e0
|
8f2806e1b83ff1df5f6f6313089c0d1d1f2fe288
|
refs/heads/master
| 2020-12-25T19:04:13.485076
| 2015-08-10T21:48:24
| 2015-08-10T21:48:24
| 40,506,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
#!c:\mozilla-build\buildbotve\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'distribute==0.6.14','console_scripts','easy_install-2.6'
__requires__ = 'distribute==0.6.14'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('distribute==0.6.14', 'console_scripts', 'easy_install-2.6')()
)
|
[
"acmiyaguchi@gmail.com"
] |
acmiyaguchi@gmail.com
|
6306027d406174be1fb961bc6ff1ffcd7c4b825f
|
b4afb44b8f483c048716fe12d778186ce68ac846
|
/pages/ios/ffan/fei_fan_activity_page_configs.py
|
d12c7b41288668dca21a167d40b4743ce3b02f25
|
[] |
no_license
|
liu111xiao111/UItest
|
64309b2c85f6d2334d64bb0875ba9ced459ebb1e
|
67e2acc9a99da81022e286e8d8ec7ccb12636ff3
|
refs/heads/master
| 2021-09-01T18:30:28.044296
| 2017-12-28T04:36:46
| 2017-12-28T04:36:46
| 115,585,226
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
class FeiFanActivityPageConfigs(object):
'''
This is a configuration class for FeiFanActivityPage class.
'''
# Assert view time out
assert_view_timeout = 10
# Assert invalid view time out
assert_invalid_view_time = 3
# Click button time out
click_on_button_timeout = 10
# Fei fan activity title
name_fei_fan_activity_title_st = u"飞凡活动"
def __init__(self):
pass
|
[
"tl@neusoft.com"
] |
tl@neusoft.com
|
a622006114c11724974981c8fde6b7a6250f0085
|
57e6f45405452526945c34c43d42c8f8fdbf1de4
|
/changeseeking_tracing/run_m6.py
|
fd887a5c4f2374d0d3d53b7e2b0b5f5750026482
|
[] |
no_license
|
mitroadmaps/map-maintainer
|
d48c4d58d4129672afb615674eb4718ca265a870
|
b9e125830ed177f182bbd87d18e8b76946408e7b
|
refs/heads/master
| 2023-08-29T13:00:09.442187
| 2021-11-02T23:52:43
| 2021-11-02T23:52:43
| 335,781,636
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,214
|
py
|
from discoverlib import geom, graph
import model_m6a as model
import tileloader as tileloader
import numpy
import math
import os
import os.path
import random
import scipy.ndimage
import sys
import tensorflow as tf
import time
model_path = sys.argv[1]
old_tile_path = sys.argv[2]
new_tile_path = sys.argv[3]
graph_path = sys.argv[4]
angle_path = sys.argv[5]
MODEL_BASE = model_path
tileloader.REGIONS = ['mass']
tileloader.TRAINING_REGIONS = tileloader.REGIONS
tileloader.tile_dir = [
old_tile_path,
new_tile_path,
]
tileloader.graph_dir = graph_path
tileloader.angles_dir = angle_path
WINDOW_SIZE = 256
NUM_TRAIN_TILES = 1024
TILE_SIZE = 4096
RECT_OVERRIDE = None
NUM_BUCKETS = 64
MASK_NEAR_ROADS = False
tileloader.tile_size = 4096
tileloader.window_size = 256
tiles = tileloader.Tiles(2, 20, NUM_TRAIN_TILES+8, 'sat')
tiles.prepare_training()
train_tiles = list(tiles.train_tiles)
random.shuffle(train_tiles)
num_val = len(train_tiles)//10
val_tiles = train_tiles[0:num_val]
train_tiles = train_tiles[num_val:]
print('pick {} train tiles from {}'.format(len(train_tiles), len(tiles.train_tiles)))
# initialize model and session
print('initializing model')
m = model.Model(input_channels=3, bn=True)
session = tf.Session()
model_path = os.path.join(MODEL_BASE, 'model_latest/model')
best_path = os.path.join(MODEL_BASE, 'model_best/model')
if os.path.isfile(model_path + '.meta'):
print('... loading existing model')
m.saver.restore(session, model_path)
else:
print('... initializing a new model')
session.run(m.init_op)
def get_tile_rect(tile):
if RECT_OVERRIDE:
return RECT_OVERRIDE
p = geom.Point(tile.x, tile.y)
return geom.Rectangle(
p.scale(TILE_SIZE),
p.add(geom.Point(1, 1)).scale(TILE_SIZE)
)
def get_tile_example(tile, tries=10):
rect = get_tile_rect(tile)
# pick origin: must be multiple of the output scale
origin = geom.Point(random.randint(0, rect.lengths().x//4 - WINDOW_SIZE//4), random.randint(0, rect.lengths().y//4 - WINDOW_SIZE//4))
origin = origin.scale(4)
origin = origin.add(rect.start)
tile_origin = origin.sub(rect.start)
big_ims = tiles.cache.get_window(tile.region, rect, geom.Rectangle(tile_origin, tile_origin.add(geom.Point(WINDOW_SIZE, WINDOW_SIZE))))
if len(tileloader.get_tile_keys()) > 1:
inputs = [big_ims[key] for key in tileloader.get_tile_keys()]
#input = numpy.concatenate(inputs, axis=2).astype('float32') / 255.0
input = random.choice(inputs).astype('float32') / 255.0
else:
input = big_ims['input'].astype('float32') / 255.0
target = big_ims['angles'].astype('float32') / 255.0
if numpy.count_nonzero(target.max(axis=2)) < 64 and tries > 0:
#return get_tile_example(tile, tries - 1)
return None
example = {
'region': tile.region,
'origin': origin,
'input': input,
'target': target,
}
if MASK_NEAR_ROADS:
mask = target.max(axis=2) > 0
mask = scipy.ndimage.morphology.binary_dilation(mask, iterations=9)
example['mask'] = mask
return example
def get_example(traintest='train'):
while True:
if traintest == 'train':
tile = random.choice(train_tiles)
elif traintest == 'test':
tile = random.choice(val_tiles)
example = get_tile_example(tile)
if example is not None:
return example
val_examples = [get_example('test') for _ in range(2048)]
def vis_example(example, outputs=None):
x = numpy.zeros((WINDOW_SIZE, WINDOW_SIZE, 3), dtype='uint8')
x[:, :, :] = example['input'] * 255
x[WINDOW_SIZE//2-2:WINDOW_SIZE//2+2, WINDOW_SIZE//2-2:WINDOW_SIZE//2+2, :] = 255
gc = tiles.get_gc(example['region'])
rect = geom.Rectangle(example['origin'], example['origin'].add(geom.Point(WINDOW_SIZE, WINDOW_SIZE)))
for edge in gc.edge_index.search(rect):
start = edge.src.point
end = edge.dst.point
for p in geom.draw_line(start.sub(example['origin']), end.sub(example['origin']), geom.Point(WINDOW_SIZE, WINDOW_SIZE)):
x[p.x, p.y, 0:2] = 0
x[p.x, p.y, 2] = 255
for i in range(WINDOW_SIZE):
for j in range(WINDOW_SIZE):
di = i - WINDOW_SIZE//2
dj = j - WINDOW_SIZE//2
d = math.sqrt(di * di + dj * dj)
a = int((math.atan2(dj, di) - math.atan2(0, 1) + math.pi) * NUM_BUCKETS / 2 / math.pi)
if a >= NUM_BUCKETS:
a = NUM_BUCKETS - 1
elif a < 0:
a = 0
elif d > 100 and d <= 120 and example['target'] is not None:
x[i, j, 0] = example['target'][WINDOW_SIZE//8, WINDOW_SIZE//8, a] * 255
x[i, j, 1] = example['target'][WINDOW_SIZE//8, WINDOW_SIZE//8, a] * 255
x[i, j, 2] = 0
elif d > 70 and d <= 90 and outputs is not None:
x[i, j, 0] = outputs[WINDOW_SIZE//8, WINDOW_SIZE//8, a] * 255
x[i, j, 1] = outputs[WINDOW_SIZE//8, WINDOW_SIZE//8, a] * 255
x[i, j, 2] = 0
return x
def get_learning_rate(epoch):
if epoch < 100:
return 1e-4
else:
return 1e-5
best_loss = None
for epoch in range(200):
start_time = time.time()
train_losses = []
for _ in range(1024):
examples = [get_example('train') for _ in range(model.BATCH_SIZE)]
feed_dict = {
m.is_training: True,
m.inputs: [example['input'] for example in examples],
m.targets: [example['target'] for example in examples],
m.learning_rate: get_learning_rate(epoch),
}
if MASK_NEAR_ROADS:
feed_dict[m.mask] = [example['mask'] for example in examples]
_, loss = session.run([m.optimizer, m.loss], feed_dict=feed_dict)
train_losses.append(loss)
train_loss = numpy.mean(train_losses)
train_time = time.time()
val_losses = []
for i in range(0, len(val_examples), model.BATCH_SIZE):
examples = val_examples[i:i+model.BATCH_SIZE]
feed_dict = {
m.is_training: False,
m.inputs: [example['input'] for example in examples],
m.targets: [example['target'] for example in examples],
}
if MASK_NEAR_ROADS:
feed_dict[m.mask] = [example['mask'] for example in examples]
loss = session.run([m.loss], feed_dict=feed_dict)
val_losses.append(loss)
val_loss = numpy.mean(val_losses)
val_time = time.time()
print('iteration {}: train_time={}, val_time={}, train_loss={}, val_loss={}/{}'.format(epoch, int(train_time - start_time), int(val_time - train_time), train_loss, val_loss, best_loss))
m.saver.save(session, model_path)
if best_loss is None or val_loss < best_loss:
best_loss = val_loss
m.saver.save(session, best_path)
|
[
"fbastani@perennate.com"
] |
fbastani@perennate.com
|
9687d37ecb9adfe8cd3cd82ced27afb31f1bafd2
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2653/60749/257218.py
|
8275f8a1cd5eee9113e42a0a5bae23d48fe94bb8
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
n=int(input())
res=[]
for _ in range(n):
res.append(input().split(" "))
for h in res:
a=h[0]
b=h[1]
res=str((a-1)*(10-b))
print(res)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
5eb4a0a88031d87a676379e40039387261a2a1cf
|
d967cf34a147f1bde1839fecfa1d356bb4c83d66
|
/scripts/releaser_hooks.py
|
5c115013ca4c09b6a6f7c21760ccdee4dbc72608
|
[
"BSD-3-Clause"
] |
permissive
|
Lessica/django-photologue
|
dbb44656b85c06f5c733ca50efb3595599b9387e
|
3e2e7b3cf02ba396ccb063927513930a9a711036
|
refs/heads/master
| 2022-04-18T01:52:04.228366
| 2022-02-23T10:01:06
| 2022-02-23T10:01:06
| 256,437,446
| 0
| 0
|
BSD-3-Clause
| 2020-04-17T07:49:46
| 2020-04-17T07:49:45
| null |
UTF-8
|
Python
| false
| false
| 3,436
|
py
|
import os
import subprocess
try:
import polib
except ImportError:
print('Msg to the package releaser: prerelease hooks will not work as you have not installed polib.')
raise
import codecs
import copy
def prereleaser_before(data):
"""
1. Run the unit tests one last time before we make a release.
2. Update the CONTRIBUTORS.txt file.
Note: Install * polib (https://pypi.python.org/pypi/polib).
* pep8.
"""
print('Running unit tests.')
subprocess.check_output(["python", "example_project/manage.py", "test", "photologue"])
print('Running flake8 check.')
# See setup.cfg for configuration options.
subprocess.check_output(["flake8"])
print('Running isort check.')
subprocess.check_output(["isort", ".", "--check", "--quiet"])
print('Checking that we have no outstanding DB migrations.')
output = subprocess.check_output(["python", "example_project/manage.py", "makemigrations", "--dry-run",
"photologue"])
if not output == b"No changes detected in app 'photologue'\n":
raise Exception('There are outstanding migrations for Photologue.')
print('Updating CONTRIBUTORS.txt')
# This command will get the author of every commit.
output = subprocess.check_output(["git", "log", "--format='%aN'"])
# Convert to a list.
contributors_list = [contributor.strip("'") for contributor in output.decode('utf-8').split('\n')]
# Now add info from the translator files. This is incomplete, we can only list
# the 'last contributor' to each translation.
for language in os.listdir('photologue/locale/'):
filename = 'photologue/locale/{}/LC_MESSAGES/django.po'.format(language)
po = polib.pofile(filename)
last_translator = po.metadata['Last-Translator']
contributors_list.append(last_translator[:last_translator.find('<') - 1])
# Now we want to only show each contributor once, and to list them by how many
# contributions they have made - a rough guide to the effort they have put in.
contributors_dict = {}
for author in contributors_list:
author_copy = copy.copy(author)
if author_copy in ('', '(no author)', 'FULL NAME'):
# Skip bad data.
continue
# The creator of this project should always appear first in the list - so
# don't add him to this list, but hard-code his name.
if author_copy in ('Justin Driscoll', 'justin.driscoll'):
continue
# Handle contributors who appear under multiple names.
if author_copy == 'richardbarran':
author_copy = 'Richard Barran'
if author_copy in contributors_dict:
contributors_dict[author_copy] += 1
else:
contributors_dict[author_copy] = 1
with codecs.open('CONTRIBUTORS.txt', 'w', encoding='utf8') as f:
f.write('Photologue is made possible by all the people who have contributed'
' to it. A non-exhaustive list follows:\n\n')
f.write('Justin Driscoll\n')
for i in sorted(contributors_dict, key=contributors_dict.get, reverse=True):
f.write(i + '\n')
# And commit the new contributors file.
if subprocess.check_output(["git", "diff", "CONTRIBUTORS.txt"]):
subprocess.check_output(["git", "commit", "-m", "Updated the list of contributors.", "CONTRIBUTORS.txt"])
|
[
"richard@arbee-design.co.uk"
] |
richard@arbee-design.co.uk
|
03665678c340fd12dded68cb93404683636a2552
|
63ce91bae5eeadf885262b8fe0e769a64454d257
|
/ignite_template.py
|
860100b069677d6072b88c13df487527f4e5f296
|
[
"Apache-2.0"
] |
permissive
|
Data-drone/cv_experiments
|
c7349e7808f7f9c1315ce1efe33be1f86f4a9f80
|
d6e1d9716c03a9165e3d8a08f4cc1287323a56ca
|
refs/heads/master
| 2021-06-26T04:33:10.079771
| 2021-01-19T11:40:30
| 2021-01-19T11:40:30
| 196,596,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,638
|
py
|
### Ignite Example
from argparse import ArgumentParser
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
import torch
import torch.nn.functional as F
from torchvision.transforms import Compose, ToTensor, Normalize
from torchvision.datasets import MNIST
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from tqdm import tqdm
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True),
batch_size=train_batch_size, shuffle=True)
val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False),
batch_size=val_batch_size, shuffle=False)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(model,
metrics={'accuracy': Accuracy(),
'nll': Loss(F.nll_loss)},
device=device)
desc = "ITERATION - loss: {:.2f}"
pbar = tqdm(
initial=0, leave=False, total=len(train_loader),
desc=desc.format(0)
)
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
iter = (engine.state.iteration - 1) % len(train_loader) + 1
if iter % log_interval == 0:
pbar.desc = desc.format(engine.state.output)
pbar.update(log_interval)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
tqdm.write(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll)
)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
tqdm.write(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll))
pbar.n = pbar.last_print_n = 0
trainer.run(train_loader, max_epochs=epochs)
pbar.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64,
help='input batch size for training (default: 64)')
parser.add_argument('--val_batch_size', type=int, default=1000,
help='input batch size for validation (default: 1000)')
parser.add_argument('--epochs', type=int, default=10,
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5,
help='SGD momentum (default: 0.5)')
parser.add_argument('--log_interval', type=int, default=10,
help='how many batches to wait before logging training status')
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval)
|
[
"bpl.law@gmail.com"
] |
bpl.law@gmail.com
|
a0a3845ac7ef4d04ce903f51f533772194a7f499
|
a0c60bd23fbdc7a89786d1f775455057aeb32701
|
/torch/onnx/_constants.py
|
8b71a4f86c173d18eec7d8955e92137a7d42a1e7
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
cpuhrsch/pytorch
|
474dd74a729c11970af0a010d3f076e8ef31b74f
|
be327ec08f320e256d444693dde65fe55831bc46
|
refs/heads/master
| 2023-06-23T04:33:16.514572
| 2022-09-30T18:51:43
| 2022-09-30T18:51:43
| 118,517,346
| 2
| 2
|
NOASSERTION
| 2022-05-24T00:58:21
| 2018-01-22T21:26:11
|
C++
|
UTF-8
|
Python
| false
| false
| 355
|
py
|
"""Constant values used in ONNX."""
ONNX_ARCHIVE_MODEL_PROTO_NAME = "__MODEL_PROTO"
ONNX_BASE_OPSET = 9
ONNX_MIN_OPSET = 7
ONNX_MAX_OPSET = 17
# ONNX_DEFAULT_OPSET generated by tools/onnx/update_default_opset_version.py
ONNX_DEFAULT_OPSET = 14
ONNX_CONSTANT_FOLDING_MIN_OPSET = 9
PYTORCH_GITHUB_ISSUES_URL = "https://github.com/pytorch/pytorch/issues"
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
9dd608ba30dfbbde63634e389ca85fd9e593e4db
|
dbbb048a0e494d92ee3851b0e67836ae38b147b5
|
/util/cdx2db.py
|
b2bd77d54d92546ab1d77b31b793c795412c9883
|
[
"Unlicense"
] |
permissive
|
ArchiveTeam/justintv-index
|
4c5ce07711c8357dbfedaec14ffe0bc3e783ab69
|
3c6397f54a9e50456ad683a944c32db2aeee4153
|
refs/heads/master
| 2021-01-18T16:22:52.003475
| 2014-09-05T20:41:15
| 2014-09-05T20:41:15
| 22,726,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,805
|
py
|
'''Process the CDX files into a database.'''
import gzip
import argparse
import dbm
import re
def read_cdx(filename):
with gzip.open(filename, 'rt') as in_file:
header = in_file.readline()
assert header.rstrip() == ' CDX N b a m s k r M S V g'
for line in in_file:
(massaged_url, date, url, mime_type, status_code,
sha1_checksum, redirect, aif_meta_tags, compressed_archive_size,
archive_offset, filename) = line.rstrip().split()
yield (massaged_url, date, url, mime_type, status_code,
sha1_checksum, redirect, aif_meta_tags, compressed_archive_size,
archive_offset, filename)
def main():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('cdx_file', nargs='+')
args = arg_parser.parse_args()
video_2_user_db = dbm.open('video_2_user.dbm', 'c')
video_2_server_db = dbm.open('video_2_server.dbm', 'c')
for cdx_file in args.cdx_file:
print('Opening', cdx_file)
for row in read_cdx(cdx_file):
(massaged_url, date, url, mime_type, status_code,
sha1_checksum, redirect, aif_meta_tags, compressed_archive_size,
archive_offset, filename) = row
match = re.search(r'justin\.tv/([^/]+)/\w/([\d]+)', url)
if match:
user = match.group(1)
video_id = match.group(2)
print(video_id, user)
video_2_user_db[video_id] = user
match = re.search(r'store.+_([\d]+)\.', url)
if match:
video_id = match.group(1)
print(video_id, url)
video_2_server_db[video_id] = url
video_2_user_db.close()
video_2_server_db.close()
if __name__ == '__main__':
main()
|
[
"chris.foo@gmail.com"
] |
chris.foo@gmail.com
|
48da04f61e056962abffa6aab149f7ef9965f6c3
|
b6bcfd935f7876fc65416e7340fda1c9b0516fd7
|
/examples/pbc/12-gamma_point_post_hf.py
|
38f775af31be359c38bccf0b98f07a92246a91a7
|
[
"Apache-2.0"
] |
permissive
|
lzypotato/pyscf
|
62f849b9a3ec8480c3da63a5822ea780608796b2
|
94c21e2e9745800c7efc7256de0d628fc60afc36
|
refs/heads/master
| 2020-09-06T22:45:04.191935
| 2019-06-18T06:04:48
| 2019-06-18T06:04:48
| 220,578,540
| 1
| 0
|
Apache-2.0
| 2019-11-09T02:13:16
| 2019-11-09T02:13:15
| null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
#!/usr/bin/env python
'''
Gamma point post-HF calculation needs only real integrals.
Methods implemented in finite-size system can be directly used here without
any modification.
'''
import numpy
from pyscf.pbc import gto, scf
cell = gto.M(
a = numpy.eye(3)*3.5668,
atom = '''C 0. 0. 0.
C 0.8917 0.8917 0.8917
C 1.7834 1.7834 0.
C 2.6751 2.6751 0.8917
C 1.7834 0. 1.7834
C 2.6751 0.8917 2.6751
C 0. 1.7834 1.7834
C 0.8917 2.6751 2.6751''',
basis = '6-31g',
verbose = 4,
)
mf = scf.RHF(cell).density_fit()
mf.with_df.mesh = [10]*3
mf.kernel()
#
# Import CC, TDDFT moduel from the molecular implementations
#
from pyscf import cc, tddft
mycc = cc.CCSD(mf)
mycc.kernel()
mytd = tddft.TDHF(mf)
mytd.nstates = 5
mytd.kernel()
|
[
"warlocat@zju.edu.cn"
] |
warlocat@zju.edu.cn
|
bd55384ca7a0585407e1d2dfe91d875ad040fbdf
|
9b9f7546c9d4396bae7d9065b81b8c6c163b9a1d
|
/lectures/physics/old/NumericalIntegration003.py
|
04b881c55a92a5de32c593f54cd00e16b9b1b659
|
[] |
no_license
|
geo7/csci321
|
60db9454fab00fc63624a4fc32c4dd47f02fda41
|
527744c8d76c5c4aceb07e23a1ec3127be305641
|
refs/heads/master
| 2020-12-28T14:50:17.267837
| 2015-06-03T19:18:53
| 2015-06-03T19:18:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,786
|
py
|
import numpy as N
import pygame, time
from pygame.locals import *
from pygame.color import *
import numpy as N
from particlesystem import *
#### Globals
pygame.init()
screen = pygame.display.set_mode((640,480))
background = pygame.Surface(screen.get_size())
background.fill((128,128,255))
myfont = pygame.font.Font(None, 24)
### Forces
def drag(k):
def func(psystem):
for p in psystem.particles:
p.force += -k*p.state[3:6]
return func
def spring(k, center = N.array((320.0, 240.0, 0.0))):
def func(psystem):
for p in psystem.particles:
p.force += -k*(p.state[0:3] - center)
return func
def gravity(k):
def func(psystem):
for p in psystem.particles:
for otherp in psystem.particles:
if p != otherp:
v = p.state[0:3] - otherp.state[0:3]
p.force += -k*v/N.sqrt(N.dot(v,v))
return func
#### Utilities
def newParticle(size):
m = N.random.random()*5.0
state = N.zeros(6)
for i in range(3):
state[i] = N.random.random()*size/2.0 + size/4.0
for i in range(3):
state[i+3] = N.random.random()*2.0
return Particle(m, N.array(state))
def newSystem(n):
w,h = screen.get_size()
size = min(w,h)
particles = [newParticle(size) for i in range(n)]
return ParticleSystem(particles)
def reset(n):
screen.blit(background, (0,0))
return newSystem(n)
def textout(ls):
for i,txt in enumerate(ls):
rtext = myfont.render(txt, 1, (0,0,0))
textrec = rtext.get_rect()
textrec.topleft = (0, i*22)
screen.blit(rtext, textrec)
def main():
nParticles = 20
plotTime = False
myforces = [spring(0.1)]
mytext = ["spring(0.1)"]
mysystem = newSystem(20)
clock = pygame.time.Clock()
running = 1
deltaT = 0.1
screen.blit(background, (0,0))
while running:
clock.tick(60)
for event in pygame.event.get():
if event.type == QUIT:
running = 0
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
running = 0
elif event.key == K_F12:
mysystem = reset(nParticles)
plotTime = not plotTime
elif event.key == K_F1:
mysystem = reset(nParticles)
myforces = [spring(0.1)]
mytext = ['spring(0.1)']
elif event.key == K_F2:
mysystem = reset(nParticles)
myforces = [spring(0.1), drag(0.05)]
mytext = ['spring(0.1)','drag(0.05)']
elif event.key == K_F3:
mysystem = reset(nParticles)
myforces = [gravity(5.0)]
mytext = ['gravity(5.0)']
elif event.key == K_F4:
mysystem = reset(nParticles)
myforces = [gravity(2.0),drag(0.1)]
mytext = ['gravity(2)','drag(0.1)']
elif event.key == K_F5:
mysystem = reset(nParticles)
myforces = [gravity(2.0),spring(0.2),drag(0.05)]
mytext = ['gravity(2.0)','spring(0.2)','drag(0.05)']
EulerStep(mysystem, myforces, deltaT)
if plotTime:
mysystem.Draw(screen, time=True)
else:
screen.blit(background, (0,0))
mysystem.Draw(screen)
textout(mytext)
pygame.display.flip()
if __name__ == "__main__":
try:
main()
finally:
pygame.quit()
|
[
"geoffrey.matthews@wwu.edu"
] |
geoffrey.matthews@wwu.edu
|
feb125c8c1bdc5ba19a5cbac3035d3dc811bf671
|
4e5d078e21cccd8ad2793055ca79865c2bb4c10a
|
/crawler/julyedu_crawler/julyedu_crawler/settings.py
|
d8b27127827e007dcc8ffec5a361ee941877fd59
|
[] |
no_license
|
gifts1912/PythonProject
|
13cabf395cd9efaebca19e2ea8519d39b772a3c6
|
e6bccdb37a60bee9c219eaf8f9514109074c3ce4
|
refs/heads/master
| 2021-01-09T09:37:47.114854
| 2017-04-01T07:45:09
| 2017-04-01T07:45:09
| 81,183,554
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,206
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for julyedu_crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'julyedu_crawler'
SPIDER_MODULES = ['julyedu_crawler.spiders']
NEWSPIDER_MODULE = 'julyedu_crawler.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'julyedu_crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'julyedu_crawler.middlewares.JulyeduCrawlerSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'julyedu_crawler.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'julyedu_crawler.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"hengyliu@hotmail.com"
] |
hengyliu@hotmail.com
|
d7dfea7732fa3aeb28a4d33e5c072968ab7880a2
|
de0724c1b71dce624ae2fcef9044952a6360c8cf
|
/pca_masks/extract_signal_subrois.py
|
7733df08085d1f4ecf29433da676cda48a8c7937
|
[] |
no_license
|
Gilles86/bias_task
|
8c52914c55dc7866d5d679305be2ad4fcb96dc5e
|
18cce163e662c7edf8d42d7f32e87f0ed644875d
|
refs/heads/master
| 2021-07-09T01:45:43.381063
| 2020-07-29T08:17:20
| 2020-07-29T08:17:20
| 168,526,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,474
|
py
|
import argparse
import os.path as op
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
import nipype.interfaces.utility as niu
import nipype.interfaces.ants as ants
from niworkflows.interfaces.bids import DerivativesDataSink
def main(derivatives, ds):
if ds == 'ds-01':
subjects = ['{:02d}'.format(s) for s in range(1, 20)]
elif ds == 'ds-02':
subjects = ['{:02d}'.format(s) for s in range(1, 16)]
subjects.pop(3) # Remove 4
subjects = subjects
wf_folder = '/tmp/workflow_folders'
templates = {'preproc':op.join(derivatives, ds, 'fmriprep', 'sub-{subject}', 'func',
'sub-{subject}_task-randomdotmotion_run-*_space-T1w_desc-preproc_bold.nii.gz')}
templates['individual_mask'] = op.join(derivatives, ds, 'pca_masks', 'sub-{subject}', 'anat',
'sub-{subject}_desc-{mask}_space-T1w_subroi-{subroi}_roi.nii.gz')
wf = pe.Workflow(name='extract_signal_submasks_{}'.format(ds),
base_dir=wf_folder)
mask_identity = pe.Node(niu.IdentityInterface(fields=['mask', 'subroi']),
name='mask_identity')
mask_identity.iterables = [('mask', ['stnl', 'stnr']), ('subroi', ['A', 'B', 'C'])]
selector = pe.Node(nio.SelectFiles(templates),
name='selector')
selector.iterables = [('subject', subjects)]
wf.connect(mask_identity, 'mask', selector, 'mask')
wf.connect(mask_identity, 'subroi', selector, 'subroi')
def extract_signal(preproc, mask):
from nilearn import image
from nilearn import input_data
from nipype.utils.filemanip import split_filename
import os.path as op
import pandas as pd
_, fn, ext = split_filename(preproc)
masker = input_data.NiftiMasker(mask, standardize='psc')
data = pd.DataFrame(masker.fit_transform(preproc))
new_fn = op.abspath('{}_signal.csv'.format(fn))
data.to_csv(new_fn)
return new_fn
extract_signal_node = pe.MapNode(niu.Function(function=extract_signal,
input_names=['preproc', 'mask'],
output_names=['signal']),
iterfield=['preproc'],
name='extract_signal_node')
wf.connect(selector, 'preproc', extract_signal_node, 'preproc')
wf.connect(selector, 'individual_mask', extract_signal_node, 'mask')
datasink_signal = pe.MapNode(DerivativesDataSink(base_directory=op.join(derivatives, ds),
out_path_base='extracted_signal'),
iterfield=['source_file', 'in_file'],
name='datasink_signal')
wf.connect(selector, 'preproc', datasink_signal, 'source_file')
wf.connect(extract_signal_node, 'signal', datasink_signal, 'in_file')
wf.connect(mask_identity, 'mask', datasink_signal, 'desc')
def get_subroi_suffix(subroi):
return 'subroi-{}_roi'.format(subroi)
wf.connect(mask_identity, ('subroi', get_subroi_suffix), datasink_signal, 'suffix')
wf.run(plugin='MultiProc',
plugin_args={'n_procs':4})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('ds', type=str,)
args = parser.parse_args()
main('/home/shared/2018/subcortex/bias_task/',
args.ds)
|
[
"Gilles.de.Hollander@gmail.com"
] |
Gilles.de.Hollander@gmail.com
|
902e51bf7e36d601a8ba585d3269eb982f6f8d7c
|
a38670ee08ea64af33477899a68ee22936f70ce7
|
/luffy/第三模块/第6章网络编程/第6章每小节/5 文件传输/优化/服务端.py
|
d8ea33a935f6021bdfff842963a99952f97a3b17
|
[] |
no_license
|
foremostxiao/d
|
40ed37215f411e8b081a4cb92c8ecbd335cd9d76
|
fe80672adc6b2406365b05d5cedd02c6abf66c11
|
refs/heads/master
| 2020-03-29T13:51:19.589004
| 2018-09-23T09:29:56
| 2018-09-23T09:29:56
| 149,985,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,991
|
py
|
import socket
import subprocess
import struct
import json
import os,sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
from db import settings
def get(conn,cmds):
filename = cmds[1]
header_dic = {
'filename': filename,
'md5': 'xxdxxx',
'file_size': os.path.getsize(os.path.join(settings.path_server, filename))
}
header_json = json.dumps(header_dic)
header_bytes = header_json.encode('utf-8')
# 第二步:先发送报头的长度
conn.send(struct.pack('i', len(header_bytes))) # len(header_bytes)发送信息给客户端的字节长度
# 第三步:再发报头
conn.send(header_bytes) # 客户端发两次
with open(os.path.join(settings.path_server, filename), 'rb') as f:
for line in f:
conn.send(line)
def put(conn):
obj = conn.recv(4) # 接收服务端传来的 struct.pack('i',len(header_bytes))
header_size = struct.unpack('i', obj)[0] # 解包--得到服务端传给客户端 header_dic字典字节的长度
# 第二步:再收报头
header_bytes = conn.recv(header_size) # header_size为上一步已经算好的字典字节长度
# header_bytes 为 接收客户端第二次发过来的header_dic字典转化的成的字节数据
# 第三步:从报头中解析出对真实数据的描述信息
header_json = header_bytes.decode('utf-8') # class---> str类型
header_dic = json.loads(header_json) # 反序列化 服务端原先的 字典
print(header_dic)
total_size = header_dic['file_size'] # 服务端的执行后返回给客户端的字节流长度
# 第四步:接收真实的数据
filename = header_dic['filename']
with open(os.path.join(settings.path_server, filename), 'wb') as f:
recv_size = 0
while recv_size < total_size:
line = conn.recv(1024) # 1024是一个坑
f.write(line)
recv_size += len(line)
print(f'总大小{total_size},已下载{recv_size}')
def run():
phone=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# phone.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
phone.bind(('127.0.0.1',9909)) #0-65535:0-1024给操作系统使用
phone.listen(5)
print('starting...')
while True: # 链接循环
conn,client_addr=phone.accept()
print(client_addr)
while True: #通信循环
try:
#1、收命令
res=conn.recv(8096) # b'get 3.jpeg'
if not res:break
#2、解析命令,提取相应的命令参数
cmds = res.decode('utf-8').split()
if cmds[0] == 'get':
get(conn,cmds)
if cmds[0] == 'put':
put(conn)
except ConnectionResetError: #适用于windows操作系统
break
conn.close()
phone.close()
if __name__ == '__main__':
run()
|
[
"foremostxiao@163.com"
] |
foremostxiao@163.com
|
f27edaccdc64a506e287adb8921ebb20260c7a50
|
e0c00b126aecd06e0b914a6134c8c14f647ad620
|
/comment/models.py
|
43376e0cb71cc865babafdbc205c54541d018c96
|
[] |
no_license
|
ssk1987/MyBlog_django
|
e658eb29504968fdf5659f8befbb598d039e721e
|
12d9e38b5f1b1460f3525fb4a57a0a73ceac1435
|
refs/heads/master
| 2023-03-28T09:47:08.446446
| 2021-03-26T06:52:46
| 2021-03-26T06:52:46
| 351,415,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
from django.db import models
from django.contrib.auth.models import User
from article.models import ArticlePost
from mptt.models import MPTTModel, TreeForeignKey
from ckeditor.fields import RichTextField
# 文章评论
class Comment(MPTTModel):
# 被评论的文章
article = models.ForeignKey(ArticlePost, on_delete=models.CASCADE, related_name='comments')
# 评论的发布者
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='comments')
# body = models.TextField()
body = RichTextField()
created_time = models.DateTimeField(auto_now_add=True)
# mptt 树形结构
parent = TreeForeignKey('self', on_delete=models.CASCADE,
null=True, blank=True, related_name='children')
# 记录二级评论回复给谁 str
reply_to = models.ForeignKey(User, null=True, blank=True,
on_delete=models.CASCADE, related_name='replyers')
class MPTTMeta:
order_insertion_by = ['created_time']
def __str__(self):
return self.body[:20]
# class Comment(models.Model):
# # 被评论的文章
# article = models.ForeignKey(ArticlePost,on_delete=models.CASCADE,related_name='comments')
# # 评论的发布者
# user = models.ForeignKey(User,on_delete=models.CASCADE,related_name='comments')
# body = models.TextField()
# created_time = models.DateTimeField(auto_now_add=True)
#
# class Meta:
# ordering = ('created_time',)
#
# def __str__(self):
# return self.body[:20]
|
[
"10293665@qq.com"
] |
10293665@qq.com
|
b9464425e45d62a6f92da84b5b394988caf0a5a1
|
0c01446c765b9765b1dd1e95dfd1915e61e5d16d
|
/run.py
|
6191de1ff540b604c8b4e4269fa1b126ea901f0d
|
[
"MIT"
] |
permissive
|
saeedbeiraki/Second_Order_Parsing
|
3cf3ff75d62297236432d3efec895ee7f6e99c04
|
333c2dc5a72b2018f3e3331a232dfe3cd63f9a37
|
refs/heads/main
| 2022-12-29T15:22:16.908353
| 2020-10-22T09:15:57
| 2020-10-22T09:15:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,364
|
py
|
# -*- coding: utf-8 -*-
import argparse
import os
from parser.cmds import Evaluate, Predict, Train
from parser.config import Config
import torch
import pdb
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Create the Biaffine Parser model.'
)
subparsers = parser.add_subparsers(title='Commands', dest='mode')
subcommands = {
'evaluate': Evaluate(),
'predict': Predict(),
'train': Train()
}
for name, subcommand in subcommands.items():
subparser = subcommand.add_subparser(name, subparsers)
subparser.add_argument('--conf', '-c', default='config.ini',
help='path to config file')
# subparser.add_argument('--file', '-f', default='exp/ptb',
# help='path to saved files')
# subparser.add_argument('--preprocess', '-p', action='store_true',
# help='whether to preprocess the data first')
# subparser.add_argument('--seed', '-s', default=1, type=int,
# help='seed for generating random numbers')
# subparser.add_argument('--threads', '-t', default=16, type=int,
# help='max num of threads')
# subparser.add_argument('--tree', action='store_true',
# help='whether to ensure well-formedness')
# subparser.add_argument('--feat', default='tag',
# choices=['tag', 'char', 'bert'],
# help='choices of additional features')
args = parser.parse_args()
# os.environ['CUDA_VISIBLE_DEVICES'] = args.device
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f"Override the default configs with parsed arguments")
args = Config(args.conf).update(vars(args))
print(f"Set the max num of threads to {args.threads}")
print(f"Set the seed for generating random numbers to {args.seed}")
# print(f"Set the device with ID {args.device} visible")
torch.set_num_threads(args.threads)
torch.manual_seed(args.seed)
args.fields = os.path.join(args.file, 'fields')
args.model = os.path.join(args.file, 'model')
print(args)
print(f"Run the subcommand in mode {args.mode}")
cmd = subcommands[args.mode]
cmd(args)
|
[
"wangxy1@shanghaitech.edu.cn"
] |
wangxy1@shanghaitech.edu.cn
|
0f6336a4696e6bd762d1b4c51b39b6aaca2b9344
|
3eed647ca50411ce28072085e50aaf83ea792539
|
/config.py
|
1fb9b937ded6c83b3ff2ec66e7fd7142b35075df
|
[] |
no_license
|
valhuber/ApiLogicServerProto
|
132dcd6064b63fe0d02cb40e9c58ae191a3674f1
|
5425bf518e4201b103c7c943e23f18434284e6c7
|
refs/heads/main
| 2023-03-02T03:16:17.226783
| 2021-01-26T18:07:59
| 2021-01-26T18:07:59
| 328,511,148
| 1
| 1
| null | 2021-01-26T15:24:43
| 2021-01-11T00:46:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
"""Flask configuration variables."""
from os import environ, path
import util
from dotenv import load_dotenv
# for complete flask_sqlachemy config parameters,session handling,
# read: file flask_sqlalchemy/__init__.py AND flask/config.py
'''
app.config.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite:///:memory:')
app.config.setdefault('SQLALCHEMY_BINDS', None)
app.config.setdefault('SQLALCHEMY_NATIVE_UNICODE', None)
app.config.setdefault('SQLALCHEMY_ECHO', False)
app.config.setdefault('SQLALCHEMY_RECORD_QUERIES', None)
app.config.setdefault('SQLALCHEMY_POOL_SIZE', None)
app.config.setdefault('SQLALCHEMY_POOL_TIMEOUT', None)
app.config.setdefault('SQLALCHEMY_POOL_RECYCLE', None)
app.config.setdefault('SQLALCHEMY_MAX_OVERFLOW', None)
app.config.setdefault('SQLALCHEMY_COMMIT_ON_TEARDOWN', False)
'''
basedir = path.abspath(path.dirname(__file__))
load_dotenv(path.join(basedir, "default.env"))
class Config:
"""Set Flask configuration from .env file."""
# General Config
SECRET_KEY = environ.get("SECRET_KEY")
FLASK_APP = environ.get("FLASK_APP")
FLASK_ENV = environ.get("FLASK_ENV")
DEBUG = environ.get("DEBUG")
# Database
# SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
# 'sqlite:///' + os.path.join(basedir, 'app.db') + '?check_same_thread=False'
SQLALCHEMY_DATABASE_URI = "replace_db_url"
""" FIXME what is this
if 'sqlite' in SQLALCHEMY_DATABASE_URI:
util.log('Basedir: '+basedir)
SQLALCHEMY_DATABASE_URI = "sqlite:///" + path.join(basedir, "database/db.sqlite")+ '?check_same_thread=False'
"""
util.log(SQLALCHEMY_DATABASE_URI)
# SQLALCHEMY_ECHO = environ.get("SQLALCHEMY_ECHO")
SQLALCHEMY_TRACK_MODIFICATIONS = False
PROPAGATE_EXCEPTIONS = False
|
[
"valjhuber@gmail.com"
] |
valjhuber@gmail.com
|
692f0ccbe03319a9173e638c8c084eaaaa48af69
|
96c6060e49418f87f49625fa2e141324aa809b5a
|
/setup.py
|
e2e56cba82e8fc128873f37e444afd18132ff835
|
[] |
no_license
|
paulosjd/aqrecs
|
9e08dc79b74d24610bfc2c360f6fafd988ec38e7
|
3ea59811aabfb9b67431ab971e7cc9630cfea920
|
refs/heads/master
| 2020-12-10T00:18:06.044062
| 2020-03-15T22:41:00
| 2020-03-15T22:41:00
| 233,456,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,468
|
py
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
requires = [
'plaster_pastedeploy',
'pyramid',
'pyramid_mako',
'pyramid_debugtoolbar',
'waitress',
'alembic',
'pyramid_retry',
'pyramid_tm',
'psycopg2',
'bs4',
'lxml',
'pytz',
'requests',
'Celery',
'redis',
'SQLAlchemy',
'transaction',
'zope.sqlalchemy',
'graphene-sqlalchemy'
]
tests_require = [
'WebTest >= 1.3.1',
'pytest >= 3.7.4',
'pytest-cov',
]
setup(
name='aqrecs',
version='1.3',
description='aqrecs',
long_description=README,
classifiers=[
'Programming Language :: Python',
'Framework :: Pyramid',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
],
author='',
author_email='',
url='',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
extras_require={
'testing': tests_require,
},
install_requires=requires,
entry_points={
'paste.app_factory': [
'main = aqrecs:main',
],
'console_scripts': [
'initialize_aqrecs_db=aqrecs.scripts.initialize_db:main',
'aurn_hourly_create=aqrecs.scripts.aurn_hourly_create:main',
],
},
)
|
[
"pjdavis@gmx.com"
] |
pjdavis@gmx.com
|
2740018dd7730df6381dd6898796dac5699a78f5
|
a7d41aa056165fc33b0c1d8edd50b8557f642548
|
/Python/Map-1/map_ab3.py
|
b30e1d8ec495be592f788fc466020d41a6bd6c6e
|
[] |
no_license
|
jemtca/CodingBat
|
3243ec9c5309f8581e1a54fba0b076069cec7d74
|
8545a70348dd621070c8b3efa280ca79a24f9d5a
|
refs/heads/master
| 2023-04-05T03:20:17.416495
| 2023-03-31T06:35:08
| 2023-03-31T06:35:08
| 147,287,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
# modify and return the given map as follows: if exactly one of the keys "a" or "b" has a value in the map (but not both), set the other to have that same value in the map
def map_ab3(d):
if 'a' in d and not 'b' in d:
d['b'] = d['a']
elif not 'a' in d and 'b' in d:
d['a'] = d['b']
return d
print(map_ab3({'a': 'aaa', 'c': 'cake'}))
print(map_ab3({'b': 'bbb', 'c': 'cake'}))
print(map_ab3({'a': 'aaa', 'b': 'bbb', 'c': 'cake'}))
|
[
"30645648+jemtca@users.noreply.github.com"
] |
30645648+jemtca@users.noreply.github.com
|
3ab1275b9cc38744553596af5a248053d6f0c3cc
|
e83e8a3b7ef31b36b2c590b37bf2d1df1487fe5a
|
/ninja/security/apikey.py
|
ef210d43560322ac2c455bd8552c365eca0a3299
|
[
"MIT"
] |
permissive
|
duilio/django-ninja
|
19d66eae1b3b01f9910f3ea0f569ed6d3a561707
|
8dac3c981bcf431322d32acd34c8179564a3698d
|
refs/heads/master
| 2023-01-21T07:17:02.544071
| 2020-11-25T10:48:30
| 2020-11-25T10:48:30
| 316,243,580
| 0
| 0
|
MIT
| 2020-11-26T13:56:19
| 2020-11-26T13:45:12
| null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
from ninja.security.base import AuthBase
from ninja.compatibility.request import get_headers
class APIKeyBase(AuthBase):
openapi_type = "apiKey"
param_name = "key"
def __init__(self):
self.openapi_name = self.param_name
super().__init__()
def __call__(self, request):
key = self._get_key(request)
return self.authenticate(request, key)
def authenticate(self, request, key):
raise NotImplementedError("Please implement authenticate(self, request, key)")
class APIKeyQuery(APIKeyBase):
openapi_in = "query"
def _get_key(self, request):
return request.GET.get(self.param_name)
class APIKeyCookie(APIKeyBase):
openapi_in = "cookie"
def _get_key(self, request):
return request.COOKIES.get(self.param_name)
class APIKeyHeader(APIKeyBase):
openapi_in = "header"
def _get_key(self, request):
headers = get_headers(request)
return headers.get(self.param_name)
|
[
"ppr.vitaly@gmail.com"
] |
ppr.vitaly@gmail.com
|
8dfd73233628b2b4e5705f550dd176e3c4993a6f
|
fd1612fb542fede6899c3f69ff124e7b2335ad95
|
/list/views.py
|
15db2a2bde30f4d531e2cf5ac7e5e847feec1fd7
|
[] |
no_license
|
Shovon588/toDoList
|
821a7163caa6d6abb4c7f8e6ecea34e6249b1b87
|
bf037097a37734a106c959729c05d9beb0f503e6
|
refs/heads/master
| 2021-09-25T08:07:45.355541
| 2020-04-19T05:18:23
| 2020-04-19T05:18:23
| 231,726,869
| 0
| 0
| null | 2021-09-22T18:52:30
| 2020-01-04T07:37:16
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,062
|
py
|
from django.shortcuts import render
from list.models import Item, UserIP
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
from datetime import datetime
# Create your views here.
def index(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
user = UserIP.objects.get_or_create(ip=ip)[0]
items = Item.objects.filter(user=user).order_by('done', 'time')
context = {'data': items}
if request.method=='POST':
item = request.POST.get("item")
Item.objects.create(user=user, item=item)
return HttpResponseRedirect(reverse('list:index'))
return render(request, 'index.html', context=context)
def update(request, pk):
data = Item.objects.get(id=pk)
if request.method == 'POST':
item = request.POST.get('item')
data.item = item
data.save()
messages.success(request, 'Updated Successfully!')
return HttpResponseRedirect(reverse('list:index'))
context = {'data': data}
return render(request, 'update.html', context=context)
def delete(request, pk):
item = Item.objects.get(id=pk)
context = {'item': item}
if request.method == 'POST':
if request.POST.get('yes'):
item.delete()
messages.success(request, 'Item Successfully Deleted!')
return HttpResponseRedirect(reverse('list:index'))
return render(request, 'delete.html', context=context)
def mark(request, pk):
item = Item.objects.get(id=pk)
context = {'item': item}
if request.method == 'POST':
if request.POST.get('yes'):
item.done=True
item.save()
else:
item.done = False
item.save()
messages.success(request, 'Marker Updated!')
return HttpResponseRedirect(reverse('list:index'))
return render(request, 'mark.html', context=context)
|
[
"mainulislam588@gmail.com"
] |
mainulislam588@gmail.com
|
83f5415c5e682e0e5c90fe418860953455cc7050
|
cebe89b09271deb0dfff1baa5e1beb8b5a4f95c4
|
/pycurve/parser.py
|
d74028c6b2e9ddaec1f1845561fc620f793867bb
|
[
"Apache-2.0"
] |
permissive
|
thatch45/curve
|
1ff3bcd8f961e25cf6d6f38a6c509c287d98eb82
|
2684733ad2de51e0a0c78a46d99ae4442f879e87
|
refs/heads/master
| 2020-05-29T21:49:37.110085
| 2013-12-12T05:38:46
| 2013-12-12T05:38:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
'''
Parse command line options
'''
import optparse
def parse():
'''
Parse options for a server
'''
parser = optparse.OptionParser()
parser.add_option(
'--server-ip',
dest='server_ip',
default='127.0.0.1',
help='the server ip')
parser.add_option(
'--server-port',
dest='server_port',
default=4510,
type='int',
help='the server port')
parser.add_option(
'--ret-ip',
dest='ret_ip',
default='127.0.0.1',
help='the ret ip')
parser.add_option(
'--ret-port',
dest='ret_port',
default=4511,
type='int',
help='the ret port')
parser.add_option(
'-m',
'--message',
dest='message',
default='foo',
help='The message to send')
options, args = parser.parse_args()
return options.__dict__
|
[
"thatch45@gmail.com"
] |
thatch45@gmail.com
|
47c7333bc96d80de441ad9dfc8c33af56bea3437
|
03f32cdb30e6a44decd529f9112a6459c655b1ef
|
/2_FormNetAndSomeStatics/createnet.py
|
bcedbee39a9a6babce8a0838f2967829f15e8837
|
[] |
no_license
|
curryli/AntiLanudry-Python
|
ee693480e0c62dc0795bd9b76499149cdec7a83a
|
4d69d911be5cea6aa30f6aeb263644614808cea2
|
refs/heads/master
| 2021-01-11T01:47:24.384978
| 2016-12-14T07:04:32
| 2016-12-14T07:04:32
| 70,667,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
import sys
import re
import os
def GetInOut(filein,fileout):
r = re.compile('\s')
with open(filein,'r') as FILEIN:
with open(fileout,'w') as FILEOUT:
for line in FILEIN.readlines():
ItemList = r.split(line) #in, out, money, location, date
print>>FILEOUT,ItemList[0],ItemList[1]
def createNet(filein,fileout):
bset = set()
r = re.compile('\s')
with open(filein,'r') as FILEIN:
for line in FILEIN.readlines():
ItemList = r.split(line)
ItemPair = (ItemList[0], ItemList[1])
if ItemPair not in bset:
bset.add(ItemPair)
with open(fileout,'w') as FILEOUT:
for x in bset:
print>>FILEOUT,x[0]," ",x[1]
if __name__ == '__main__':
GetInOut('MappedInOut.txt','GetInOut.txt')
createNet('GetInOut.txt','Net.txt')
|
[
"xurui.lee@msn.com"
] |
xurui.lee@msn.com
|
cfeb943a74dbd748829d195b83c317c63c5c287f
|
d838bed08a00114c92b73982a74d96c15166a49e
|
/docs/data/learn/Bioinformatics/input/ch3_code/src/Stepik.3.10.CodeChallenge.GenerateContigsFromReads.py
|
9dac465815b57c2cd32dbeb1129b39899c5e4a18
|
[] |
no_license
|
offbynull/offbynull.github.io
|
4911f53d77f6c59e7a453ee271b1e04e613862bc
|
754a85f43159738b89dd2bde1ad6ba0d75f34b98
|
refs/heads/master
| 2023-07-04T00:39:50.013571
| 2023-06-17T20:27:05
| 2023-06-17T23:27:00
| 308,482,936
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
from FindContigs import find_maximal_non_branching_paths
from Read import Read
from ToDeBruijnGraph import to_debruijn_graph
with open('/home/user/Downloads/dataset_240263_5.txt', mode='r', encoding='utf-8') as f:
data = f.read()
lines = data.split('\n')
kmers = lines[:]
kmers = [l.strip() for l in kmers] # get rid of whitespace
kmers = [l for l in kmers if len(l) > 0] # get rid of empty lines
reads = [Read(kmer) for kmer in kmers]
graph = to_debruijn_graph(reads)
contigs = find_maximal_non_branching_paths(graph)
for contig in contigs:
output = contig[0].stitch(contig)
print(f'{output}')
|
[
"offbynull@gmail.com"
] |
offbynull@gmail.com
|
cc46f4e6f27650f01b8dfc036fb07f4c738cc912
|
dc95dfb24f3cd12b823dfad2cca8607ab12e757b
|
/11-Lists-Mutation/Coding Exercises/while_testing.py
|
8fe180696ca79d5a7fb264b089e7bd7d79518111
|
[] |
no_license
|
RandyG3/Python
|
06213a361deac2d653d4cd4734728838ed34e733
|
86068d81ae037beb6fd6114d93074a92c2f3108e
|
refs/heads/master
| 2023-01-06T15:18:43.173886
| 2020-11-08T03:03:34
| 2020-11-08T03:03:34
| 236,549,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
def delete_all(strings, target):
i = 0
while target in strings:
strings.remove(target)
i += 1
return strings
print(delete_all([4, 4, 4], 4))
|
[
"40631249+RandyG3@users.noreply.github.com"
] |
40631249+RandyG3@users.noreply.github.com
|
e10707058cbb09229792c940f0b0188728ca2335
|
c39f999cae8825afe2cdf1518d93ba31bd4c0e95
|
/PYME/ParallelTasks/taskQueue.py
|
c486b62becb2b78e8edbd235be56dd08794665e8
|
[] |
no_license
|
WilliamRo/CLipPYME
|
0b69860136a9b2533f2f29fc29408d7471cb934d
|
6596167034c727ad7dad0a741dd59e0e48f6852a
|
refs/heads/master
| 2023-05-11T09:50:58.605989
| 2023-05-09T02:17:47
| 2023-05-09T02:17:47
| 60,789,741
| 3
| 1
| null | 2016-06-17T08:52:44
| 2016-06-09T16:30:14
|
Python
|
UTF-8
|
Python
| false
| false
| 5,659
|
py
|
#!/usr/bin/python
##################
# taskQueue.py
#
# Copyright David Baddeley, 2009
# d.baddeley@auckland.ac.nz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
import time
import threading
CHUNKSIZE = 50
def doNix(taskQueue): #do nothing
pass
def popZero(workerN, NWorkers, NTasks): #give worker oldest task irrespective of which worker called
return 0
class TaskQueue:
def __init__(self, name, initialTasks=[], onEmpty = doNix, fTaskToPop = popZero):
#Pyro.core.ObjBase.__init__(self)
#self.name = name
self.queueID = name
self.openTasks = list(initialTasks)
self.closedTasks = []
self.tasksInProgress = []
self.onEmpty = onEmpty #function to call when queue is empty
self.fTaskToPop = fTaskToPop #function to call to decide which task to give a worker (useful if the workers need to have share information with, e.g., previous tasks as this can improve eficiency of per worker buffering of said info).
self.inProgressLock = threading.Lock()
def postTask(self,task):
self.openTasks.append(task)
#print '[%s] - Recieved new task' % self.queueID
def postTasks(self,tasks):
self.openTasks += tasks
#print '[%s] - Recieved %d new tasks' % (self.queueID, len(tasks))
def getTask(self, workerN = 0, NWorkers = 1):
"""get task from front of list, blocks"""
#print 'Task requested'
while len(self.openTasks) < 1:
time.sleep(0.01)
task = self.openTasks.pop(self.fTaskToPop(workerN, NWorkers, len(self.openTasks)))
task.queueID = self.queueID
task.initializeWorkerTimeout(time.clock())
with self.inProgressLock:
self.tasksInProgress.append(task)
#print '[%s] - Task given to worker' % self.queueID
return task
def getTasks(self, workerN = 0, NWorkers = 1):
return [self.getTask(workerN, NWorkers) for i in range(min(CHUNKSIZE,len(self.openTasks)))]
def returnCompletedTask(self, taskResult):
with self.inProgressLock:
for it in self.tasksInProgress[:]:
if (it.taskID == taskResult.taskID):
self.tasksInProgress.remove(it)
self.fileResult(taskResult)
if (len(self.openTasks) + len(self.tasksInProgress)) == 0: #no more tasks
self.onEmpty(self)
def returnCompletedTasks(self, taskResults):
with self.inProgressLock:
for taskResult in taskResults:
for it in self.tasksInProgress[:]:
if (it.taskID == taskResult.taskID):
self.tasksInProgress.remove(it)
#for taskResult in taskResults:
#allow this to be over-ridden
self.fileResults(taskResults)
if (len(self.openTasks) + len(self.tasksInProgress)) == 0: #no more tasks
self.onEmpty(self)
def fileResults(self, taskResults):
#allow this to be over-ridden in derived classes to file multiple results at once
for taskResult in taskResults:
self.fileResult(taskResult)
def fileResult(self,taskResult):
self.closedTasks.append(taskResult)
def getCompletedTask(self):
if len(self.closedTasks) < 1:
return None
else:
return self.closedTasks.pop(0)
def checkTimeouts(self):
with self.inProgressLock:
curTime = time.clock()
for it in self.tasksInProgress:
if 'workerTimeout' in dir(it):
if curTime > it.workerTimeout:
self.openTasks.insert(0, it)
self.tasksInProgress.remove(it)
def getNumberOpenTasks(self, exact=True):
return len(self.openTasks)
def getNumberTasksInProgress(self):
return len(self.tasksInProgress)
def getNumberTasksCompleted(self):
return len(self.closedTasks)
def cleanup(self):
pass
def purge(self):
self.openTasks = []
self.closedTasks = []
self.tasksInProgress = []
def setPopFcn(self, fcn):
''' sets the function which determines which task to give a worker'''
self.fTaskToPop = fcn
class TaskQueueWithData(TaskQueue):
def __init__(self, name, initialTasks=[], onEmpty = doNix, fTaskToPop = popZero):
TaskQueue.__init__(self, name, initialTasks, onEmpty, fTaskToPop)
self.data = {}
def getTasks(self, workerN = 0, NWorkers = 1):
return [self.getTask(workerN, NWorkers)]
def getQueueData(self, fieldName, *args):
'''Get data, defined by fieldName and potntially additional arguments, ascociated with queue'''
return self.data[fieldName]
def setQueueData(self, fieldName, value):
'''Get data, defined by fieldName and potntially additional arguments, ascociated with queue'''
self.data[fieldName] = value
|
[
"willi4m@zju.edu.cn"
] |
willi4m@zju.edu.cn
|
3804ffbc4338cf88a60d5ae74c2722e1a81e2149
|
09dd58f46b1e914278067a69142230c7af0165c2
|
/blackmamba/lib/rope/base/fscommands.py
|
3564ed919c9cfb40b50806b43940c8f8240d4135
|
[
"MIT"
] |
permissive
|
zrzka/blackmamba
|
4e70262fbe3702553bf5d285a81b33eb6b3025ea
|
b298bc5d59e5aea9d494282910faf522c08ebba9
|
refs/heads/master
| 2021-01-01T18:43:19.490953
| 2020-01-20T08:26:33
| 2020-01-20T08:26:33
| 98,410,391
| 72
| 12
|
MIT
| 2020-01-20T08:26:35
| 2017-07-26T10:21:15
|
Python
|
UTF-8
|
Python
| false
| false
| 7,983
|
py
|
"""Project file system commands.
This modules implements file system operations used by rope. Different
version control systems can be supported by implementing the interface
provided by `FileSystemCommands` class. See `SubversionCommands` and
`MercurialCommands` for example.
"""
import os
import shutil
import subprocess
import rope.base.utils.pycompat as pycompat
try:
unicode
except NameError:
unicode = str
def create_fscommands(root):
dirlist = os.listdir(root)
commands = {'.hg': MercurialCommands,
'.svn': SubversionCommands,
'.git': GITCommands,
'_svn': SubversionCommands,
'_darcs': DarcsCommands}
for key in commands:
if key in dirlist:
try:
return commands[key](root)
except (ImportError, OSError):
pass
return FileSystemCommands()
class FileSystemCommands(object):
def create_file(self, path):
open(path, 'w').close()
def create_folder(self, path):
os.mkdir(path)
def move(self, path, new_location):
shutil.move(path, new_location)
def remove(self, path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
def write(self, path, data):
file_ = open(path, 'wb')
try:
file_.write(data)
finally:
file_.close()
class SubversionCommands(object):
def __init__(self, *args):
self.normal_actions = FileSystemCommands()
import pysvn
self.client = pysvn.Client()
def create_file(self, path):
self.normal_actions.create_file(path)
self.client.add(path, force=True)
def create_folder(self, path):
self.normal_actions.create_folder(path)
self.client.add(path, force=True)
def move(self, path, new_location):
self.client.move(path, new_location, force=True)
def remove(self, path):
self.client.remove(path, force=True)
def write(self, path, data):
self.normal_actions.write(path, data)
class MercurialCommands(object):
def __init__(self, root):
self.hg = self._import_mercurial()
self.normal_actions = FileSystemCommands()
try:
self.ui = self.hg.ui.ui(
verbose=False, debug=False, quiet=True,
interactive=False, traceback=False, report_untrusted=False)
except:
self.ui = self.hg.ui.ui()
self.ui.setconfig('ui', 'interactive', 'no')
self.ui.setconfig('ui', 'debug', 'no')
self.ui.setconfig('ui', 'traceback', 'no')
self.ui.setconfig('ui', 'verbose', 'no')
self.ui.setconfig('ui', 'report_untrusted', 'no')
self.ui.setconfig('ui', 'quiet', 'yes')
self.repo = self.hg.hg.repository(self.ui, root)
def _import_mercurial(self):
import mercurial.commands
import mercurial.hg
import mercurial.ui
return mercurial
def create_file(self, path):
self.normal_actions.create_file(path)
self.hg.commands.add(self.ui, self.repo, path)
def create_folder(self, path):
self.normal_actions.create_folder(path)
def move(self, path, new_location):
self.hg.commands.rename(self.ui, self.repo, path,
new_location, after=False)
def remove(self, path):
self.hg.commands.remove(self.ui, self.repo, path)
def write(self, path, data):
self.normal_actions.write(path, data)
class GITCommands(object):
def __init__(self, root):
self.root = root
self._do(['version'])
self.normal_actions = FileSystemCommands()
def create_file(self, path):
self.normal_actions.create_file(path)
self._do(['add', self._in_dir(path)])
def create_folder(self, path):
self.normal_actions.create_folder(path)
def move(self, path, new_location):
self._do(['mv', self._in_dir(path), self._in_dir(new_location)])
def remove(self, path):
self._do(['rm', self._in_dir(path)])
def write(self, path, data):
# XXX: should we use ``git add``?
self.normal_actions.write(path, data)
def _do(self, args):
_execute(['git'] + args, cwd=self.root)
def _in_dir(self, path):
if path.startswith(self.root):
return path[len(self.root) + 1:]
return self.root
class DarcsCommands(object):
def __init__(self, root):
self.root = root
self.normal_actions = FileSystemCommands()
def create_file(self, path):
self.normal_actions.create_file(path)
self._do(['add', path])
def create_folder(self, path):
self.normal_actions.create_folder(path)
self._do(['add', path])
def move(self, path, new_location):
self._do(['mv', path, new_location])
def remove(self, path):
self.normal_actions.remove(path)
def write(self, path, data):
self.normal_actions.write(path, data)
def _do(self, args):
_execute(['darcs'] + args, cwd=self.root)
def _execute(args, cwd=None):
process = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE)
process.wait()
return process.returncode
def unicode_to_file_data(contents, encoding=None):
if not isinstance(contents, unicode):
return contents
if encoding is None:
encoding = read_str_coding(contents)
if encoding is not None:
return contents.encode(encoding)
try:
return contents.encode()
except UnicodeEncodeError:
return contents.encode('utf-8')
def file_data_to_unicode(data, encoding=None):
result = _decode_data(data, encoding)
if '\r' in result:
result = result.replace('\r\n', '\n').replace('\r', '\n')
return result
def _decode_data(data, encoding):
if isinstance(data, unicode):
return data
if encoding is None:
encoding = read_str_coding(data)
if encoding is None:
# there is no encoding tip, we need to guess.
# PEP263 says that "encoding not explicitly defined" means it is ascii,
# but we will use utf8 instead since utf8 fully covers ascii and btw is
# the only non-latin sane encoding.
encoding = 'utf-8'
try:
return data.decode(encoding)
except (UnicodeError, LookupError):
# fallback to latin1: it should never fail
return data.decode('latin1')
def read_file_coding(path):
file = open(path, 'b')
count = 0
result = []
while True:
current = file.read(10)
if not current:
break
count += current.count('\n')
result.append(current)
file.close()
return _find_coding(''.join(result))
def read_str_coding(source):
if type(source) == bytes:
newline = b'\n'
else:
newline = '\n'
#try:
# source = source.decode("utf-8")
#except AttributeError:
# pass
try:
first = source.index(newline) + 1
second = source.index(newline, first) + 1
except ValueError:
second = len(source)
return _find_coding(source[:second])
def _find_coding(text):
if isinstance(text, pycompat.str):
text = text.encode('utf-8')
coding = b'coding'
to_chr = chr if pycompat.PY3 else lambda x: x
try:
start = text.index(coding) + len(coding)
if text[start] not in b'=:':
return
start += 1
while start < len(text) and to_chr(text[start]).isspace():
start += 1
end = start
while end < len(text):
c = text[end]
if not to_chr(c).isalnum() and c not in b'-_':
break
end += 1
result = text[start:end]
if isinstance(result, bytes):
result = result.decode('utf-8')
return result
except ValueError:
pass
|
[
"rvojta@me.com"
] |
rvojta@me.com
|
8443bf3ecaa41447fe625e2fe2294e82ecab398d
|
ccbfc7818c0b75929a1dfae41dc061d5e0b78519
|
/aliyun-openapi-python-sdk-master/aliyun-python-sdk-cloudesl/aliyunsdkcloudesl/request/v20180801/DescribeStoresRequest.py
|
5a7a3d07d56264658c0fd2ae92922535fd509779
|
[
"Apache-2.0"
] |
permissive
|
P79N6A/dysms_python
|
44b634ffb2856b81d5f79f65889bfd5232a9b546
|
f44877b35817e103eed469a637813efffa1be3e4
|
refs/heads/master
| 2020-04-28T15:25:00.368913
| 2019-03-13T07:52:34
| 2019-03-13T07:52:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,173
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeStoresRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cloudesl', '2018-08-01', 'DescribeStores')
def get_ToDate(self):
return self.get_query_params().get('ToDate')
def set_ToDate(self,ToDate):
self.add_query_param('ToDate',ToDate)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_StoreName(self):
return self.get_query_params().get('StoreName')
def set_StoreName(self,StoreName):
self.add_query_param('StoreName',StoreName)
def get_Groups(self):
return self.get_query_params().get('Groups')
def set_Groups(self,Groups):
self.add_query_param('Groups',Groups)
def get_StoreId(self):
return self.get_query_params().get('StoreId')
def set_StoreId(self,StoreId):
self.add_query_param('StoreId',StoreId)
def get_Brand(self):
return self.get_query_params().get('Brand')
def set_Brand(self,Brand):
self.add_query_param('Brand',Brand)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_FromDate(self):
return self.get_query_params().get('FromDate')
def set_FromDate(self,FromDate):
self.add_query_param('FromDate',FromDate)
|
[
"1478458905@qq.com"
] |
1478458905@qq.com
|
b7cc73147ba4ef14c6838961e1aef059cb0b31c4
|
66bb3f65f0157a2b5475903c90a54d5173bc4f0a
|
/djthia/bin/thank_you.py
|
ee920d6ca181a5ffa1743eab7dad3af84a511194
|
[
"MIT"
] |
permissive
|
carthage-college/django-djthia
|
691233049bcb05391fd82e390edb717f3bc0588a
|
52401592291a980c7226c0573d415e7cdb8c20d3
|
refs/heads/master
| 2023-03-04T08:22:03.055448
| 2023-02-24T18:33:12
| 2023-02-24T18:33:12
| 249,989,382
| 0
| 0
|
MIT
| 2023-02-24T18:33:56
| 2020-03-25T13:43:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import django
import os
import sys
# load apps
django.setup()
from django.conf import settings
from djthia.gearup.models import Annotation
from djtools.utils.mail import send_mail
# env
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djthia.settings.shell')
DEBUG = settings.DEBUG
def main():
"""Send emails to recipients of thank you notes."""
notes = Annotation.objects.all()
for note in notes:
if note.status:
to = [note.recipients.all()[0].email]
frum = note.questionnaire.email
if not frum:
frum = note.created_by.email
if DEBUG:
note.to = to
note.frum = frum
to = [settings.MANAGERS[0][1]]
subject = "A Thank You Note from {0} {1}".format(
note.created_by.first_name, note.created_by.last_name,
)
print(to, frum, subject)
send_mail(None, to, subject, frum, 'gearup/notes_email.html', note)
note.status = False
note.save()
if __name__ == '__main__':
sys.exit(main())
|
[
"plungerman@gmail.com"
] |
plungerman@gmail.com
|
7b98ad676a95b27ba930eb3ee2cf809754df15c6
|
9cc1b58d0319308da98187d071295b2fabf1f080
|
/0730_numpy/a0730_終於教到Numpy_02.py
|
3e6fefbfabd6cd5f2e6ebc2b2d00832df63433cd
|
[
"MIT"
] |
permissive
|
Arwen0905/Python_Test
|
60d1dee383c9cf27df6b93cfde7884c91092229c
|
c75357e4354a684a9fae41f751dae60d4cf0716c
|
refs/heads/master
| 2023-01-13T13:14:55.355898
| 2020-10-31T18:52:07
| 2020-10-31T18:52:07
| 265,150,874
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
import numpy as np
a = np.array([1,2,3])
a = a * 3
a = a + 2
print(f"原本的a: {a}")
b = np.array([2,2,0])
print(f"原本的b: {b}")
print("a+b: ",a+b)
# print("a/b: ",a/b) #除有問題
print("a*b: ",a*b)
#建立陣列: np.array
#建立陣列: np.arange
c = np.arange(10)
print(c)
d = np.linspace(0,10,5) #平均撒點
print(d)
e = np.array([[1,2,3],[4,5,6]])
print(e)
f = np.arange(10).reshape(2,5)
print(f)
d = np.array([[[
[[1,2,3],[4,5,6],[7,8,9],[10,11,12]],
[[1,2,3],[4,5,6],[7,8,9],[10,11,12]],
[[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
]]])
print(d) # 由圖解判斷幾維,從 欄、列 開始往外圈判斷,遇到框即代表一維
print(d.ndim) # 僅檢視該陣列為幾維
print(d.shape) # 由數量判斷有幾組維度,包含欄、列數量
print(d.sum(axis=2)) #指定維度 → 做運算(sum)
print(d.reshape(12,3)) #重整結構,總數必須符合實際元素數量
arr = np.array([[1, 2, 3],[4,5,6]], ndmin=5) # ndmin可設定幾維
print(arr)
print(arr.ndim)
print('shape of array :', arr.shape)
|
[
"qq23378452@gmail.com"
] |
qq23378452@gmail.com
|
3ff08b6463ee0d3726a8807965d91aba577eb6f3
|
dd87194dee537c2291cf0c0de809e2b1bf81b5b2
|
/test/test_v1beta1_deployment_spec.py
|
661c7f5b82ef365e85cabe855eb49e525f5a2da1
|
[
"Apache-2.0"
] |
permissive
|
Arvinhub/client-python
|
3ea52640ab02e4bf5677d0fd54fdb4503ecb7768
|
d67df30f635231d68dc4c20b9b7e234c616c1e6a
|
refs/heads/master
| 2023-08-31T03:25:57.823810
| 2016-11-02T22:44:36
| 2016-11-02T22:44:36
| 73,865,578
| 1
| 0
|
Apache-2.0
| 2018-10-10T12:16:45
| 2016-11-15T23:47:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,440
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: unversioned
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import k8sclient
from k8sclient.rest import ApiException
from k8sclient.models.v1beta1_deployment_spec import V1beta1DeploymentSpec
class TestV1beta1DeploymentSpec(unittest.TestCase):
""" V1beta1DeploymentSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1DeploymentSpec(self):
"""
Test V1beta1DeploymentSpec
"""
model = k8sclient.models.v1beta1_deployment_spec.V1beta1DeploymentSpec()
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
f28198c3ce34e8dc612664c6550ed1185d9c3b32
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5690574640250880_1/Python/kevinleeone/main.py
|
7a28010c1cded6c2928d58496a1d84262b6a4e88
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,338
|
py
|
import sys
def fillRow(data, row):
for i in range(len(data[row])):
data[row][i] = '*'
def fillCol(data, col):
for i in data:
i[col] = '*'
def fill(data, row, col):
data[row][col] = '*'
def printSolution(data):
return '\n'.join([''.join(i) for i in data])
def solve():
row, col, mines = map(int, sys.stdin.readline().split())
data = [['.'] * col for i in range(row)]
data[0][0] = 'c'
row -= 1
col -= 1
if row == 0:
while mines:
fill(data, row, col)
col -= 1
mines -= 1
return printSolution(data)
elif col == 0:
while mines:
fill(data, row, col)
row -= 1
mines -= 1
return printSolution(data)
else:
while row < mines or col < mines:
if col < row:
fillRow(data, row)
row -= 1
mines -= col + 1
else:
fillCol(data, col)
col -= 1
mines -= row + 1
if not mines:
if 1 <= row and 1 <= col or row == 0 and col == 0:
return printSolution(data)
else:
return 'Impossible'
else:
if col < row:
i = row
while 1 < i and mines:
fill(data, i, col)
mines -= 1
i -= 1
assert(mines == 0)
row -= 1
col -= 1
if 1 <= row and 1 <= col:
return printSolution(data)
else:
return 'Impossible'
else:
i = col
while 1 < i and mines:
fill(data, row, i)
mines -= 1
i -= 1
if mines:
assert(mines == 1)
row -= 1
fill(data, row, col)
row -= 1
col -= 1
if 1 <= row and 1 <= col:
return printSolution(data)
else:
return 'Impossible'
if __name__ == '__main__':
cases = int(sys.stdin.readline().split()[0])
for i in range(cases):
print('Case #%d:' % (i + 1))
print(solve())
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
050fdadc5b742ad96f26c3e3b74fc638ca7f9300
|
2cb2bc953975540de8dfe3aee256fb3daa852bfb
|
/kawagashira_nobuyuki/tyama_codeiq186.py
|
c27fa12584c938e2faec4bcece92b9bd39865131
|
[] |
no_license
|
cielavenir/codeiq_solutions
|
db0c2001f9a837716aee1effbd92071e4033d7e0
|
750a22c937db0a5d94bfa5b6ee5ae7f1a2c06d57
|
refs/heads/master
| 2023-04-27T14:20:09.251817
| 2023-04-17T03:22:57
| 2023-04-17T03:22:57
| 19,687,315
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
#!/usr/bin/python
#coding:utf-8
import nltk
#import re
#nltk.download() #Download Corpora -> gutenberg
from nltk.corpus import gutenberg
#words1 = [w.lower() for w in gutenberg.words('austen-sense.txt')]
#words2 = [w for w in words1 if re.sub(r'[^a-z]','',w)==w]
words2 = [w.lower() for w in gutenberg.words('austen-sense.txt') if w.isalpha()] ###
freq=nltk.FreqDist(words2)
s = len(words2)
for e in freq.keys():
print("%s,%d,%f" % (e,freq[e],float(freq[e])/s*100))
'''
keys = freq.keys() ###
values = freq.values() ###
#s=sum(freq.values())
s=sum(values) ### len(words2)
for i in range(len(freq)):
#print "%s,%d,%f" % (freq.keys()[i],freq.values()[i],float(freq.values()[i])/s*100)
print "%s,%d,%f" % (keys[i],values[i],float(values[i])/s*100) ###
'''
#冠詞、前置詞、代名詞、接続詞が多い
#主人公であるelinorとmarianneも多く出現する
|
[
"cielartisan@gmail.com"
] |
cielartisan@gmail.com
|
3ce44d8bde5dbf48a665dbc4c07d1ad54d105060
|
4db539a1fec5369d1970a10554a71f85b31a1855
|
/manage_command/migrations/0003_auto_20200707_2112.py
|
43943b20f0285a850ceef3038c56e8a6990cda4a
|
[] |
no_license
|
1SouravGhosh/API_MONGO
|
8f04f37892703fbac9d851028505252cf58886b8
|
b7071ec5797adf3bcccdf6749c560f50e1469839
|
refs/heads/master
| 2022-11-19T23:35:03.933449
| 2020-07-10T07:24:22
| 2020-07-10T07:24:22
| 278,567,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
# Generated by Django 3.0.5 on 2020-07-07 15:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('manage_command', '0002_auto_20200707_2111'),
]
operations = [
migrations.RenameField(
model_name='command',
old_name='command',
new_name='command1',
),
]
|
[
"1SouravGhosh@noreply.github.com"
] |
1SouravGhosh@noreply.github.com
|
f86b14c00d63c82b17124a789bec6e8f3d9f89e5
|
37683c6f6c36f47ff4c7344576b268817e992ec3
|
/源代码/p17/p17_50.py
|
5b95523d6d60cf37313b3d1f9b97a73f93d0f7ac
|
[] |
no_license
|
WhiteSheep-y/Python
|
33e026a798e2a02d75908cefa2b02fa2c654e199
|
a166bdb8ec8bcea2f955b43d16e9c9b92c44f558
|
refs/heads/main
| 2023-06-01T02:09:32.641415
| 2021-06-16T15:00:09
| 2021-06-16T15:00:09
| 359,199,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
# p17_50.py
from tkinter import *
root = Tk()
def create():
top = Toplevel()
top.title("FishC Demo")
msg = Message(top, text="I love FishC.com")
msg.pack()
Button(root, text="创建顶级窗口", command=create).pack()
mainloop()
|
[
"xiaomie_y@163.com"
] |
xiaomie_y@163.com
|
53096071f7b1fc09e5f96abc1c67458157a34650
|
1b78ca7f3250ebed418717c6ea28b5a77367f1b8
|
/051.n-queens/n-queens.py
|
0d216ecb8ef8f38a36a8a90aa7459786820bdf17
|
[] |
no_license
|
JaniceLC/lc-all-solutions
|
ced854f31b94f44c0b03a0677988805e3b9ee718
|
3f2a4ee8c09a8890423c6a22c73f470eccf979a2
|
refs/heads/master
| 2020-04-05T19:53:31.307528
| 2018-11-12T04:18:45
| 2018-11-12T04:18:45
| 157,155,285
| 0
| 2
| null | 2018-11-12T04:13:22
| 2018-11-12T04:13:22
| null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
class Solution(object):
def solveNQueens(self, n):
"""
:type n: int
:rtype: List[List[str]]
"""
ans = []
def dfs(path, n, ans):
if len(path) == n:
ans.append(drawChess(path))
return
for i in range(n):
if i not in path and isValidQueen(path, i):
path.append(i)
dfs(path, n, ans)
path.pop()
def isValidQueen(path, k):
for i in range(len(path)):
if abs(k - path[i]) == abs(len(path) - i):
return False
return True
def drawChess(path):
ret = []
chess = [["."] * len(path) for _ in range(len(path))]
for i in range(0, len(path)):
chess[i][path[i]] = "Q"
for chs in chess:
ret.append("".join(chs))
return ret
dfs([], n, ans)
return ans
|
[
"jedihy@yis-macbook-pro.local"
] |
jedihy@yis-macbook-pro.local
|
b7334aef4dd1fac6f082369bba23650ac0764e78
|
6f866eb49d0b67f0bbbf35c34cebe2babe2f8719
|
/tests/app/forms/field_handlers/test_field_handler.py
|
81289acd03f8c36048634f0175cd0fdb9e99385b
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
ONSdigital/eq-questionnaire-runner
|
681b0d081f9cff0ee4ae3017ecc61f7390d553bf
|
87e7364c4d54fee99e6a5e96649123f11c4b53f1
|
refs/heads/main
| 2023-09-01T21:59:56.733363
| 2023-08-31T15:07:55
| 2023-08-31T15:07:55
| 219,752,509
| 12
| 18
|
MIT
| 2023-09-14T11:37:31
| 2019-11-05T13:32:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,438
|
py
|
from wtforms import validators
from app.forms import error_messages
from app.forms.field_handlers.string_handler import StringHandler
from app.forms.validators import ResponseRequired
def test_get_mandatory_validator_optional(value_source_resolver, rule_evaluator):
answer = {"mandatory": False}
text_area_handler = StringHandler(
answer, value_source_resolver, rule_evaluator, error_messages
)
validate_with = text_area_handler.get_mandatory_validator()
assert isinstance(validate_with, validators.Optional)
def test_get_mandatory_validator_mandatory(value_source_resolver, rule_evaluator):
answer = {"mandatory": True}
text_area_handler = StringHandler(
answer,
value_source_resolver,
rule_evaluator,
{"MANDATORY_TEXTFIELD": "This is the default mandatory message"},
)
validate_with = text_area_handler.get_mandatory_validator()
assert isinstance(validate_with, ResponseRequired)
assert validate_with.message == "This is the default mandatory message"
def test_get_mandatory_validator_mandatory_with_error(
value_source_resolver, rule_evaluator
):
answer = {
"mandatory": True,
"validation": {
"messages": {
"MANDATORY_TEXTFIELD": "This is the mandatory message for an answer"
}
},
}
text_area_handler = StringHandler(
answer, value_source_resolver, rule_evaluator, error_messages
)
validate_with = text_area_handler.get_mandatory_validator()
assert isinstance(validate_with, ResponseRequired)
assert validate_with.message == "This is the mandatory message for an answer"
def test_get_mandatory_validator_mandatory_with_question_in_error(
value_source_resolver, rule_evaluator
):
answer = {
"mandatory": True,
"validation": {
"messages": {
"MANDATORY_TEXTFIELD": "Select an answer to ‘%(question_title)s’"
}
},
}
text_area_handler = StringHandler(
answer,
value_source_resolver,
rule_evaluator,
{"MANDATORY_TEXTFIELD": "This is the default mandatory message"},
question_title="To be or not to be?",
)
validate_with = text_area_handler.get_mandatory_validator()
assert isinstance(validate_with, ResponseRequired)
assert validate_with.message == "Select an answer to ‘To be or not to be?’"
|
[
"noreply@github.com"
] |
ONSdigital.noreply@github.com
|
690ea698c2f6650c9785ed6877b332086552e8c7
|
6b4f38370ce1126a7f74e13c2012ab238a01df93
|
/azure-mgmt-compute/azure/mgmt/compute/compute/v2017_03_30/models/os_disk_image.py
|
52165b11bc0cd25a81a4315d22b3130f970e4f7d
|
[
"MIT"
] |
permissive
|
action/azure-sdk-for-python
|
52d8a278bfb2fbc9c7e11297e3bd21c604f906b1
|
f06553e45451f065c87ee9ed503ac4be81e64a71
|
refs/heads/master
| 2020-12-03T02:13:52.566291
| 2017-06-30T18:42:49
| 2017-06-30T18:42:49
| 95,917,797
| 1
| 0
| null | 2017-06-30T19:25:58
| 2017-06-30T19:25:58
| null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OSDiskImage(Model):
"""Contains the os disk image information.
:param operating_system: The operating system of the osDiskImage. Possible
values include: 'Windows', 'Linux'
:type operating_system: str or :class:`OperatingSystemTypes
<azure.mgmt.compute.compute.v2017_03_30.models.OperatingSystemTypes>`
"""
_validation = {
'operating_system': {'required': True},
}
_attribute_map = {
'operating_system': {'key': 'operatingSystem', 'type': 'OperatingSystemTypes'},
}
def __init__(self, operating_system):
self.operating_system = operating_system
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
3c288fa74b379bbb4e9419282be8f2108292fa16
|
139715a923c8c82b172803d5bdc1b1bca46fbdf3
|
/leetcode/swap_node.py
|
5bfdd00934e97a6e295d9ed9688c73a3b12c36b5
|
[] |
no_license
|
haoccheng/pegasus
|
ab32dcc4265ed901e73790d8952aa3d72bdf72e7
|
76cbac7ffbea738c917e96655e206f8ecb705167
|
refs/heads/master
| 2021-01-10T11:33:23.038288
| 2016-03-18T04:17:28
| 2016-03-18T04:17:28
| 46,103,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 955
|
py
|
# Given a linked list, swap every two adjacent nodes and return the head.
# 1->2->3->4 return: 2->1->4->3.
# Use constant space. May not modify the values in the list; only nodes itself can be changed.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def pt(self):
ret = []
ret.append(self.val)
if self.next is not None:
ret += self.next.pt()
return ret
def create(input):
head = ListNode(input[0])
if len(input) > 1:
tail = ListNode.create(input[1:])
head.next = tail
return head
create = staticmethod(create)
def swap_nodes(head):
curr = head
if curr is None:
return None
else:
next = curr.next
if next is None:
return curr
curr.next = next.next
next.next = curr
curr = next
next = swap_nodes(curr.next.next)
curr.next.next = next
return curr
h = ListNode.create([1,2,3,4,5])
print h.pt()
x = swap_nodes(h)
print x.pt()
|
[
"haoc.cheng@gmail.com"
] |
haoc.cheng@gmail.com
|
b285f83ba9cb715e85abeb79cde46b6044797581
|
9507ff9e9bca2ca8104369c9e25acd74d308e9b3
|
/data_collect/novatel_pi.py
|
485fa7917c9ecbd01b49b1f1ad849886d092ea02
|
[] |
no_license
|
yangkang411/python_tool
|
03e483c7ec7e1e76284f93cf5b9086fdf98af826
|
713071a9fbabfabcbc3c16ce58d1382c410a7ea3
|
refs/heads/master
| 2023-03-17T16:14:03.332332
| 2020-09-10T02:37:05
| 2020-09-10T02:37:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,837
|
py
|
#!/usr/bin/python
import serial
import math
import time
import datetime
import os
def get_utc_day():
year = int(time.strftime("%Y"))
month = int(time.strftime("%m"))
day = int(time.strftime("%d"))
hour = int(time.strftime("%H"))
minute = int(time.strftime("%M"))
second = int(time.strftime("%S"))
local_time = datetime.datetime(year, month, day, hour, minute, second)
time_struct = time.mktime(local_time.timetuple())
utc_st = datetime.datetime.utcfromtimestamp(time_struct)
d1 = datetime.datetime(year, 1, 1)
utc_sub = utc_st - d1
utc_str = utc_sub.__str__()
utc_day_int = int(utc_str.split( )[0])
utc_day_str = str(utc_day_int + 1)
return utc_day_str
def mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
os.makedirs(path)
print path+' mkdir suc'
return True
else:
print 'mkdir exist'
return False
def getweeknum(weekseconds):
return math.floor(weekseconds/(7*24*3600))
def getweeksec(weekseconds):
return weekseconds - getweeknum(weekseconds)*(7*24*3600)
def yearfour(year):
if year<=80:
year += 2000
elif year<1990 and year>80:
year += 1900
return year
def isleapyear(year):
return (yearfour(year)%4==0 and yearfour(year)%100!=0) or yearfour(year)%400==0
def timefromGPS(weeknum,weeksec):
year = 0
month = 0
day = 0
hour = 0
minute = 0
second = 0
doy = 0
daypermon = [31,28,31,30,31,30,31,31,30,31,30,31]
weeknum += getweeknum(weeksec)
weeksec = getweeksec(weeksec)
weekmin = math.floor(weeksec/60.0)
second = weeksec - weekmin*60.0
weekhour = math.floor(weekmin/60)
minute = weekmin - weekhour*60
weekday = math.floor(weekhour/24)
hour = weekhour - weekday*24
totalday = weekday+weeknum*7
if totalday<360:
year = 1980
else:
year = 1981
totalday -= 360
while True:
if totalday<365:
break
if isleapyear(year): totalday -= 1
totalday -= 365
year += 1
doy = totalday
if totalday <= daypermon[0]:
month = 1
else:
totalday -= daypermon[0];
if isleapyear(year): totalday -= 1
month = 2
while True:
if totalday<=daypermon[month-1]:
break
else:
totalday -= daypermon[month-1]
month += 1
if month==2 and isleapyear(year): totalday += 1
day = totalday
return [year,month,day,hour,minute,second,doy]
def configNovatel(ser):
# need to change the following lever arm values when mounting in the car
#'setimutoantoffset -0.2077 1.8782 1.0 0.10 0.10 0.10\r',\
# 'setinstranslation ant2 x, y, z, std_x, std_y, std_z\r',\
setupcommands7 = ['unlogall\r',\
'serialconfig com1 230400 N 8 1 N OFF\r',\
'ETHCONFIG ETHA AUTO AUTO AUTO AUTO\r',\
'NTRIPCONFIG ncom1 client v1 106.12.40.121:2201 RTK rtkeasy 555555\r',\
'interfacemode ncom1 rtcmv3 novatel off\r',\
'interfacemode com1 novatel novatel on\r',\
'alignmentmode automatic\r',\
'setinstranslation ant1 0.0 0.0 0.0 0.10 0.10 0.10\r',\
'setinstranslation ant2 0.0 0.0 0.0 0.10 0.10 0.10\r',\
'setinsrotation rbv -180 0 90\r',\
#'setinsrotation rbv 90 0 180\r',\
'log RANGECMPB ONTIME 0.1\r',\
'log RAWEPHEMB ONCHANGED\r',\
'log GLOEPHEMERISB ONCHANGED\r',\
'log GALFNAVEPHEMERISB ONCHANGED\r',\
'log GALINAVEPHEMERISB ONCHANGED\r',\
'log BDSEPHEMERISB ONCHANGED\r',\
'log QZSSEPHEMERISB ONCHANGED\r',\
'log INSCONFIGB ONCHANGED\r',\
#'log RAWIMUSXB ONNEW\r',\
'log versionb once\r',\
'log rxconfigb once\r',\
'log rxstatusb once\r',\
'log thisantennatypeb once\r',\
'log inspvasb ontime 0.1\r',\
#'log bestposb ontime 0.1\r',\
'log bestgnssposb ontime 0.1\r',\
'log bestgnssvelb ontime 0.1\r',\
#'log heading2b onnew\r',\
'log ncom1 gpgga ontime 1\r',\
'saveconfig\r']
for cmd in setupcommands7:
ser.write(cmd.encode())
ser = serial.Serial('/dev/ttyUSB0',230400,parity='N',bytesize=8,stopbits=1,timeout=None) #novatel
fname = ''
ser.flushInput()
fmode = 'wb'
while True:
if ser.isOpen(): break
print ('\Port is open now\n')
configNovatel(ser)
ser.flushInput()
# get the time information from #INSPVAXA
##while True:
## line = ser.readline().decode('utf-8')
## if line.find('#INSPVAXA', 0, len(line)) >= 0:
## info = line.split(',')
## #print(info)
## gpsweek = int(info[5]);
## sow = float(info[6]);
## #print(gpsweek)
## #print(sow)
## startime = timefromGPS(gpsweek,sow)
## fname += '_%4d%02d%02d_%02d%02d%02d.txt' % (startime[0],startime[1],startime[2],startime[3],startime[4],startime[5])
## print(fname)
## break
#mk_time = time.strftime("%Y_%m_%d",time.localtime())
#mkpath='./' + mk_time
day = get_utc_day()
mkpath='./' + day
mkdir(mkpath)
fname += mkpath + '/' + 'novatel_' + time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime()) + '.bin'
with open(fname,fmode) as outf:
while True:
try:
line = ser.readline()
#print (line, end='\r\n')
#outf.write(line.decode('utf-8'))
outf.write(bytes(line)) #line.decode('utf-8')
except:
#break
pass
outf.close()
|
[
"41727862+geqian@users.noreply.github.com"
] |
41727862+geqian@users.noreply.github.com
|
6dc99ea802bf1e2b5e2bec7686b08547ccf9f1ae
|
b71e4e576d242598d8cec5c552e1d66630b81328
|
/tools/generate_changelog.py
|
810bde02639a0ed417bc48dd5d0bd88b0c2d2b77
|
[
"Apache-2.0"
] |
permissive
|
Global19-atlassian-net/qiskit-bot
|
d77b7b326e3a9b3e4db3b7453fa62326ecc65be7
|
2cd2e27d0ff51bb517eee0ceab24cb57b2034f12
|
refs/heads/master
| 2023-03-31T08:05:38.992587
| 2021-04-09T20:23:50
| 2021-04-09T20:23:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,863
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import argparse
import tempfile
from github import Github
from qiskit_bot import config
from qiskit_bot import repos
from qiskit_bot import release_process
def main():
parser = argparse.ArgumentParser()
parser.add_argument('repo_name')
parser.add_argument('tag')
parser.add_argument('--token', '-t', help="optional token for auth",
default=None)
parser.add_argument(
'--username', '-u',
help="optional username for auth, password required if specified",
default=None)
parser.add_argument(
'--password', '-p',
help="optional password for auth, username required if specified.",
default=None)
args = parser.parse_args()
with tempfile.TemporaryDirectory() as tmpdir:
token = args.token
repo = repos.Repo(tmpdir, args.repo_name, token)
if not token and args.username and args.password:
session = Github(args.username, args.password)
gh_repo = session.get_repo(args.repo_name)
repo.gh_repo = gh_repo
categories = repo.get_local_config().get(
'categories', config.default_changelog_categories)
print(release_process._generate_changelog(
repo, '%s..' % args.tag, categories, show_missing=True))
if __name__ == '__main__':
main()
|
[
"mtreinish@kortar.org"
] |
mtreinish@kortar.org
|
7c7f43bb605f5e933d8b743773578ddafeecd426
|
35e79b51f691b7737db254ba1d907b2fd2d731ef
|
/AtCoder/ARC/108/B.py
|
70f7cfc3ece60774a07f7d71f03ed6736989158b
|
[] |
no_license
|
rodea0952/competitive-programming
|
00260062d00f56a011f146cbdb9ef8356e6b69e4
|
9d7089307c8f61ea1274a9f51d6ea00d67b80482
|
refs/heads/master
| 2022-07-01T02:25:46.897613
| 2022-06-04T08:44:42
| 2022-06-04T08:44:42
| 202,485,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
n = int(input())
s = input()
seq = []
for i in range(n):
seq.append(s[i])
if len(seq) <= 2: continue
if seq[-3] + seq[-2] + seq[-1] == "fox":
for j in range(3):
seq.pop()
print(len(seq))
|
[
"dragondoor0912@yahoo.co.jp"
] |
dragondoor0912@yahoo.co.jp
|
dacc6ba9a649d25e5bab950814610ce498f64c67
|
e77b92df446f0afed18a923846944b5fd3596bf9
|
/Programers_algo/DFS_BFS/pro_4_re_re_re.py
|
6c6c3154a70e176d89d9f76bbb775c1d43586c4c
|
[] |
no_license
|
sds1vrk/Algo_Study
|
e40ca8eb348d1fc6f88d883b26195b9ee6f35b2e
|
fbbc21bb06bb5dc08927b899ddc20e6cde9f0319
|
refs/heads/main
| 2023-06-27T05:49:15.351644
| 2021-08-01T12:43:06
| 2021-08-01T12:43:06
| 356,512,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
from collections import defaultdict
def solution(tickets):
# 특정 티켓의 인접 리스트를 구하는 함수
def init_graph():
routes = defaultdict(list)
for key, value in tickets:
routes[key].append(value)
return routes
# 재귀 호출을 사용한 DFS
def dfs(key, footprint):
if len(footprint) == N + 1:
return footprint
for idx, country in enumerate(routes[key]):
routes[key].pop(idx)
fp = footprint[:] # deepcopy
# print("fp",fp,"footprirnt",footprint)
fp.append(country)
ret = dfs(country, fp)
if ret: return ret # 모든 티켓을 사용해 통과한 경우
routes[key].insert(idx, country) # 통과 못했으면 티켓 반환
routes = init_graph()
for r in routes:
routes[r].sort()
N = len(tickets)
answer = dfs("ICN", ["ICN"])
return answer
print(solution([["ICN", "A"], ["A", "B"], ["A", "C"], ["C", "A"], ["B", "D"]]))
|
[
"51287886+sds1vrk@users.noreply.github.com"
] |
51287886+sds1vrk@users.noreply.github.com
|
ec3d37871d0b7c038a83fbd98c57ece4b479fd40
|
ba4e73e43a419b2491c68ef1b64f6ff21c296fb8
|
/src/profiles_project/profiles_api/serializers.py
|
41de36ffefe6a3fc177d0ac206d764e10d37fdc8
|
[] |
no_license
|
adithyanps/profile-rest-api
|
7ca6c14f43bc072b4cd3815463950753867b5786
|
65421b9f3fd45f80ef86a8ca6591929a0e5453d8
|
refs/heads/master
| 2020-04-09T22:35:40.979205
| 2018-12-06T07:15:54
| 2018-12-06T07:15:54
| 160,632,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
from rest_framework import serializers
from . import models
class HelloSerializer(serializers.Serializer):
"""serialise a name field for testing our API view"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
"""A serializer for our user profile objects"""
class Meta:
model = models.UserProfile
fields = ('id','email','name','password')
extra_kwargs = {'password':{'write_only':True}} #this is to hide password from users
def create(self, validated_data):
"""create and return a new user"""
user = models.UserProfile(
email=validated_data['email'],
name=validated_data['name'],
)
user.set_password(validated_data['password'])
user.save()
return user
class ProfilefeedItemSerializer(serializers.ModelSerializer):
"""A serializer for profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id','user_profile','status_text','created_on')
extra_kwargs = {'user_profile':{'read_only':True}}
|
[
"adithynps3@gmial.com"
] |
adithynps3@gmial.com
|
3cf0e33afa27ddc56d6846b1d08d09011df235f6
|
5c11c2731c736be4055639b9ddae74d2536a62b9
|
/cloudmesh_base/locations.py
|
f7d263a459b779d2e44c894868778aa42b558464
|
[
"Apache-2.0"
] |
permissive
|
zaber-paul/base
|
6694e1e12c0ca7e500e7e645df0336475bf0b11a
|
9c4d4e40db7a5059dcaa32d44be0146b6bb829c4
|
refs/heads/master
| 2020-08-11T09:37:08.805336
| 2020-01-08T01:24:44
| 2020-01-08T01:24:44
| 214,541,353
| 0
| 0
|
Apache-2.0
| 2019-10-11T22:56:48
| 2019-10-11T22:56:48
| null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
from cloudmesh_base.util import path_expand
from cloudmesh_base.Shell import Shell
import os
__config_dir_prefix__ = os.path.join("~", ".cloudmesh")
__config_dir__ = path_expand(__config_dir_prefix__)
def config_file(filename):
"""
The location of the config file: ~/.cloudmesh/filename. ~ will be expanded
:param filename: the filename
"""
return os.path.join(__config_dir__, filename)
def config_file_raw(filename):
"""
The location of the config file: ~/.cloudmesh/filename. ~ will NOT be expanded
:param filename: the filename
"""
return os.path.join(__config_dir_prefix__, filename)
def config_file_prefix():
"""
The prefix of the configuration file location
"""
return __config_dir_prefix__
def config_dir_setup(filename):
path = os.path.dirname(filename)
if not os.path.isdir(path):
Shell.mkdir(path)
|
[
"laszewski@gmail.com"
] |
laszewski@gmail.com
|
a78b3e5b9320fcde83a6189f7374d3fd0882b1bf
|
98dfa21cd26462658165c802a16b697cf1ba582f
|
/blog/models.py
|
e9c5ed76a0502a3bcd4b7f74d90fcf624cd8409b
|
[] |
no_license
|
samirpatil2000/startwith_rest-framework
|
9408c295bf08ed21551ae5e5fe45a54c45bde8d2
|
0b1e3978886ab3fea40d046814c3ca3bc258c8f7
|
refs/heads/master
| 2022-12-31T14:04:36.376080
| 2020-10-18T06:17:36
| 2020-10-18T06:17:36
| 304,104,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,795
|
py
|
import random
from django.db import models
from django.db.models.signals import pre_save
from django.urls import reverse
from django.utils.text import slugify
from django.conf import settings
from django.db.models.signals import post_delete
from django.dispatch import receiver
# Create your models here.
def upload_location(instance, filename):
file_path = f'blog/{str(instance.author.id)}/{str(instance.title)}-{filename}'
return file_path
def default_title():
n=random.randrange(10,99)
return f'blog{n}'
class BlogPost(models.Model):
title = models.CharField(max_length=50, default=default_title,null=False, blank=False)
body = models.TextField(max_length=5000, default='This is the body',null=True, blank=True)
image = models.ImageField(upload_to=upload_location, null=True, blank=True)
date_published = models.DateTimeField(auto_now_add=True, verbose_name="date published")
date_updated = models.DateTimeField(auto_now=True, verbose_name="date updated")
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
slug = models.SlugField(blank=True, unique=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog_detail',kwargs={'slug':self.slug})
""" if blog post is deleted then it will delete image also from database"""
@receiver(post_delete, sender=BlogPost)
def submission_delete(sender, instance, **kwargs):
instance.image.delete(False)
""" Here we are creating slug if their is on slug """
# TODO slugify is user fro creating slug
def pre_save_blog_post_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = slugify(f'{instance.author.username} - {instance.title}')
pre_save.connect(pre_save_blog_post_receiver, sender=BlogPost)
|
[
"samirspatil742099@gmail.com"
] |
samirspatil742099@gmail.com
|
bf25d724880936aef9b3440e4517fa7ff12fc670
|
a024fe3b05dd320a7860165dd72ebd832ce6e484
|
/sale_order_portal/models/models.py
|
09856cd628a8bd90a34b8d5a2e76f83c0d408bf3
|
[] |
no_license
|
acostaw/erp_odoo
|
97d02a675908e441cf8e1ba4e3dcbc62691f8dec
|
2437997b650c9fdbf6a6f007c0a1fea2aab018e2
|
refs/heads/main
| 2023-04-19T14:52:48.877851
| 2021-04-22T18:40:07
| 2021-04-22T18:40:07
| 360,644,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
# class sale_order_portal(models.Model):
# _name = 'sale_order_portal.sale_order_portal'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# self.value2 = float(self.value) / 100
|
[
"wacosta@INTN.GOV.PY"
] |
wacosta@INTN.GOV.PY
|
978ffec64bc93981e16854128c9f7aade48bfc3f
|
a4e6b080d17611853374577aaecb0367366b39b5
|
/glycresoft_sqlalchemy/web_app/services/json_api.py
|
222df0e6d93e068677cb4d7a0fa37616774c997e
|
[] |
no_license
|
mobiusklein/glycresoft_sqlalchemy
|
6235b1ea2c8da9ef6b2e725a60f0b6a925f1689d
|
e0edf12a8d6243cc2438a6236aa0564a28f92a8a
|
refs/heads/master
| 2020-04-06T05:38:35.849225
| 2016-11-21T03:25:26
| 2016-11-21T03:25:26
| 37,537,754
| 0
| 2
| null | 2016-11-21T03:25:27
| 2015-06-16T15:10:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
from flask import Response, Blueprint, g, jsonify
from glycresoft_sqlalchemy.data_model import GlycopeptideMatch, Hypothesis, HypothesisSampleMatch, json_type
from glycresoft_sqlalchemy.report import colors
JSONEncoderType = json_type.new_alchemy_encoder()
# ----------------------------------------
# JSON Data API Calls
# ----------------------------------------
api = Blueprint("api", __name__)
@api.route("/api/glycopeptide_matches/<int:id>")
def get_glycopeptide_match_api(id):
gpm = g.db.query(GlycopeptideMatch).get(id)
return Response(JSONEncoderType().encode(gpm), mimetype="text/json")
@api.route("/api/tasks")
def api_tasks():
return jsonify(**{t.id: t.to_json() for t in g.manager.tasks.values()})
@api.route("/api/hypothesis_sample_matches")
def api_hypothesis_sample_matches():
hsms = g.db.query(HypothesisSampleMatch).all()
d = {str(h.id): h.to_json() for h in hsms}
return jsonify(**d)
@api.route("/api/hypotheses")
def api_hypothesis():
hypotheses = g.db.query(Hypothesis).all()
d = {str(h.id): h.to_json() for h in hypotheses}
return jsonify(**d)
@api.route("/api/samples")
def api_samples():
samples = g.manager.samples()
d = {str(h.name): h.to_json() for h in samples}
return jsonify(**d)
@api.route("/api/colors")
def api_colors():
return jsonify(**colors.color_dict())
|
[
"mobiusklein@gmail.com"
] |
mobiusklein@gmail.com
|
f1627ae27f127f6561a052c0246ee3cff0d0491e
|
b84955813634b3e64a82bc9c9bef13d2b596e4db
|
/us_addresses.py
|
fabfdb2b11e79431c34aab25c462521541da4a27
|
[] |
no_license
|
henocdz/postalcodes
|
6cf5157c5f6946a42abad875b0d6bddfd77ec7c2
|
3f95618b2423b6f0fd9970b7b92f8f53322d28d7
|
refs/heads/master
| 2021-01-03T04:48:22.295053
| 2020-02-14T16:25:13
| 2020-02-14T16:25:13
| 239,929,346
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,442
|
py
|
import json
from geopy.geocoders import GoogleV3
import concurrent.futures
MAPS_API_KEY = ""
STATES = {
"AA": "Armed Forces Americas",
"AE": "Armed Forces Middle East",
"AK": "Alaska",
"AL": "Alabama",
"AP": "Armed Forces Pacific",
"AR": "Arkansas",
"AS": "American Samoa",
"AZ": "Arizona",
"CA": "California",
"CO": "Colorado",
"CT": "Connecticut",
"DC": "District of Columbia",
"DE": "Delaware",
"FL": "Florida",
"FM": "Federated Stated of Micronesia",
"GA": "Georgia",
"GU": "Guam",
"HI": "Hawaii",
"IA": "Iowa",
"ID": "Idaho",
"IL": "Illinois",
"IN": "Indiana",
"KS": "Kansas",
"KY": "Kentucky",
"LA": "Louisiana",
"MA": "Massachusetts",
"MD": "Maryland",
"ME": "Maine",
"MH": "Marshall Islands",
"MI": "Michigan",
"MN": "Minnesota",
"MO": "Missouri",
"MP": "Northern Mariana Islands",
"MS": "Mississippi",
"MT": "Montana",
"NC": "North Carolina",
"ND": "North Dakota",
"NE": "Nebraska",
"NH": "New Hampshire",
"NJ": "New Jersey",
"NM": "New Mexico",
"NV": "Nevada",
"NY": "New York",
"OH": "Ohio",
"OK": "Oklahoma",
"OR": "Oregon",
"PA": "Pennsylvania",
"PR": "Puerto Rico",
"PW": "Palau",
"RI": "Rhode Island",
"SC": "South Carolina",
"SD": "South Dakota",
"TN": "Tennessee",
"TX": "Texas",
"UT": "Utah",
"VA": "Virginia",
"VI": "Virgin Islands",
"VT": "Vermont",
"WA": "Washington",
"WI": "Wisconsin",
"WV": "West Virginia",
"WY": "Wyoming",
}
raw_us_postal_codes_file = "us_zipcodes.json"
us_addresses_output = "addresses_us.json"
us_city_timezones_file = "us_timezones.json"
with open(us_city_timezones_file, "r") as pc:
TIME_ZONES = json.loads(pc.read())
with open(raw_us_postal_codes_file, "r") as usf:
postal_codes = json.loads(usf.read())
city_timezones = dict()
state_timezones = dict()
state_tzs = dict(PR="America/Puerto_Rico", VI="America/St_Thomas")
last_pc = 0
states = dict()
cities = set()
for i, (postal_code, info) in enumerate(postal_codes.items()):
state_code = info["state"]
state_name = STATES[state_code]
raw_city_name = info["city"]
city_name = " ".join([cc.capitalize() for cc in raw_city_name.split(" ")])
state_data = states.setdefault(
state_code, dict(code=state_code, name=state_name, cities={})
)
city_data = state_data["cities"].setdefault(
city_name, dict(name=city_name, towns=[], time_zone=None, postal_codes=[]),
)
city_key = f"{city_name} {state_code}"
cities.add(city_key)
timezone = city_data.get("time_zone", None)
if timezone is None:
try:
timezone = TIME_ZONES[city_key]
except KeyError:
timezone = "America/Denver"
city_data["time_zone"] = timezone
city_data["postal_codes"].append(postal_code)
pc = round((i + 1) * 100 / 41697, 1)
per = f"{pc}%"
if pc > last_pc:
last_pc = pc
print(per)
# def get_timezone(city_key):
# try:
# geolocator = GoogleV3(api_key=MAPS_API_KEY, timeout=5)
# location = geolocator.geocode(city_key)
# tz = geolocator.reverse_timezone(location.point)
# timezone = tz.raw["timeZoneId"]
# except Exception:
# timezone = ""
# return timezone
# with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:
# fs = dict()
# for city_key in cities:
# if city_key not in TIME_ZONES:
# fs[executor.submit(get_timezone, city_key=city_key)] = city_key
# last_pc = 0
# i = 0
# for future in concurrent.futures.as_completed(fs):
# city = fs[future]
# tz = future.result()
# if tz:
# TIME_ZONES[city] = tz
# pc = round((i + 1) * 100 / len(cities), 1)
# per = f"{pc}%"
# if pc > last_pc:
# last_pc = pc
# i += 1
# with open(us_city_timezones_file, "w") as tzoutput:
# tzoutput.write(json.dumps(TIME_ZONES))
with open(us_addresses_output, "w") as output:
for _, state in states.items():
cities = list(state["cities"].values())
state["cities"] = cities
output.write(json.dumps(list(states.values()), ensure_ascii=True))
|
[
"henocdz@gmail.com"
] |
henocdz@gmail.com
|
8bc19bc8b5f4e92d2e69058ec02dacb602eba280
|
36273a4ce1e01bdd3d80c71e22fb5fae751a264c
|
/elementary/first_word.py
|
dc268bd1542686110339791a8ac293dd90665da6
|
[] |
no_license
|
pavoli/checkIO
|
63e0e347a0a4800626e5d2d1be07f53e86de070f
|
27578ce460c45a82bd549206a4260fc3f9ec711b
|
refs/heads/master
| 2022-02-07T08:26:40.509503
| 2019-07-11T06:54:54
| 2019-07-11T06:54:54
| 114,545,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
# -*- coding: utf-8 -*-
import re
#s = 'Hello world'
#s = 'greetings, friends'
#s = ' a word '
#s = "don't touch it"
#s = "... and so on ..."
s = "Hello.World"
def first_word(str):
new_str = str.replace(',', ' ')
new_str = new_str.replace('.', ' ')
for i in new_str.split(' '):
if re.search('^([a-z]|[A-Z])+',i):
return i
if __name__ == '__main__':
print(first_word(s))
|
[
"pavel.olifer@gmail.com"
] |
pavel.olifer@gmail.com
|
3720c7d48077bc2eb852dfeb905da7a06b79d172
|
463c8ba5baad086d37819804af4ee10f43ab6dd5
|
/Algorithm/190826/서울3반_홍수경_4880_토너먼트 카드게임.py
|
5e28e22ac950f854a850cfbf5df5597d724a4a28
|
[] |
no_license
|
sooya14/TIL
|
dbbb0608d45ce273ddef6f7cea1b1195285f269d
|
232b0d38d8f6ee2e6e5517bfd6a2a15cf1000dad
|
refs/heads/master
| 2023-01-11T17:12:39.370178
| 2020-05-11T12:06:41
| 2020-05-11T12:06:41
| 195,916,241
| 0
| 0
| null | 2023-01-05T18:22:56
| 2019-07-09T02:17:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 231
|
py
|
import sys
sys.stdin = open('4880_토너먼트 카드게임.txt', 'r')
def gbb(people):
T = int(input())
for tc in range(T):
num = int(input())
people = list(map(int, input().split()))
print(tc+1, num, people)
|
[
"soosmile9653@gmail.com"
] |
soosmile9653@gmail.com
|
9493189223318dadd253d8cd0089fddd799df474
|
60e38d3122cfb18cf8901e0d7fba02ef2a32affa
|
/notebooks/__code/ui_registration_profile_settings.py
|
557057a50a472250f2e9186f5148506fa616bf90
|
[
"BSD-3-Clause"
] |
permissive
|
earnestdl/python_notebooks
|
ac11b40d9d5e721b947b083b2f4c301079f206a8
|
4ef31711b70b90cf621e9e9d094fa2a43eeeae16
|
refs/heads/master
| 2023-03-12T19:41:44.229158
| 2021-02-22T15:41:57
| 2021-02-22T15:41:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,331
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/j35/git/python_notebooks/notebooks/ui/ui_registration_profile_settings.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(776, 184)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.spinBox = QtWidgets.QSpinBox(self.centralwidget)
self.spinBox.setMinimum(3)
self.spinBox.setProperty("value", 20)
self.spinBox.setObjectName("spinBox")
self.horizontalLayout.addWidget(self.spinBox)
self.plainTextEdit = QtWidgets.QPlainTextEdit(self.centralwidget)
self.plainTextEdit.setMinimumSize(QtCore.QSize(300, 60))
self.plainTextEdit.setMaximumSize(QtCore.QSize(16777215, 50))
self.plainTextEdit.setReadOnly(True)
self.plainTextEdit.setObjectName("plainTextEdit")
self.horizontalLayout.addWidget(self.plainTextEdit)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.ok_button = QtWidgets.QPushButton(self.centralwidget)
self.ok_button.setObjectName("ok_button")
self.horizontalLayout_2.addWidget(self.ok_button)
self.verticalLayout.addLayout(self.horizontalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 776, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.ok_button.clicked.connect(MainWindow.ok_button_clicked)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Maximum Delta Pixel Offset"))
self.plainTextEdit.setPlainText(_translate("MainWindow", "Maximum Pixel offset values allowed between two images. If the algorithm returns an offset value greater than this value, the program will use the previous offset value.\n"
""))
self.ok_button.setText(_translate("MainWindow", "OK"))
|
[
"bilheuxjm@ornl.gov"
] |
bilheuxjm@ornl.gov
|
0838708c369b20173d3a097231fd65eda479e366
|
368c66467b78adf62da04cb0b8cedd2ef37bb127
|
/SW expert/python/회문2.py
|
c76690313edabce32dc132e0b0a60e8d53b94f55
|
[] |
no_license
|
DJHyun/Algorithm
|
c8786ddcd8b5693fc9b3b4721fdf1eeda21611c5
|
fd6ae800886dac4ec5ff6cf2618bc2c839a76e7a
|
refs/heads/master
| 2020-07-30T16:32:49.344329
| 2020-02-25T07:59:34
| 2020-02-25T07:59:34
| 210,289,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
import sys
sys.stdin = open("회문2.txt", "r")
def my_palindrome(a):
for i in range(len(a) // 2):
if a[i] != a[len(a) - 1 - i]:
return False
return True
for test_case in range(1,11):
input()
sh = []
sv = []
result = 0
for i in range(100):
sh.append(list(input()))
for i in range(100):
sv.append([])
for j in range(100):
sv[i].append(sh[j][i])
for i in sh:
for j in range(len(i)):
for d in range(len(i)-1,j,-1):
if len(i[j:d+1]) > result:
if my_palindrome(i[j:d+1]):
result = len(i[j:d+1])
break
else:
break
for i in sv:
for j in range(len(i)):
for d in range(len(i)-1,j,-1):
if len(i[j:d+1]) > result:
if my_palindrome(i[j:d+1]):
result = len(i[j:d+1])
break
else:
break
print(f'#{test_case} {result}')
|
[
"djestiny4444@naver.com"
] |
djestiny4444@naver.com
|
4ebaf23e1ab2fff8d10ecedfe6fb866c36b15ff4
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-elasticsearch/aliyunsdkelasticsearch/request/v20170613/UpdateInstanceChargeTypeRequest.py
|
cab978c843cbe65c84557ce41d927fee367195fd
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,944
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkelasticsearch.endpoint import endpoint_data
class UpdateInstanceChargeTypeRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'elasticsearch', '2017-06-13', 'UpdateInstanceChargeType','elasticsearch')
self.set_uri_pattern('/openapi/instances/[InstanceId]/actions/convert-pay-type')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self): # string
return self.get_path_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # string
self.add_path_param('InstanceId', InstanceId)
def get_clientToken(self): # string
return self.get_query_params().get('clientToken')
def set_clientToken(self, clientToken): # string
self.add_query_param('clientToken', clientToken)
def get_body(self): # string
return self.get_body_params().get('body')
def set_body(self, body): # string
self.add_body_params('body', body)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
f4a798c794dcd2f46d7d18e1f36c63693a7d3aec
|
232c2738dff4b89ca63d7d4ec3c812570e3860c3
|
/ch06/better_rnnlm.py
|
0f8b26a6b999cb089ed405c9a6e08498643d5b53
|
[] |
no_license
|
Soh1121/DeepLearningFromScratch2
|
0c115fcdf15c7b0cfd5d1ce7c6c32873354839d7
|
f2294156c6394fd105a6534801ff42a078b0a0af
|
refs/heads/main
| 2023-02-19T15:58:58.779465
| 2021-01-20T02:06:07
| 2021-01-20T02:06:07
| 319,550,802
| 0
| 0
| null | 2021-01-19T06:30:58
| 2020-12-08T06:45:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,722
|
py
|
import sys
sys.path.append('..')
from common.np import * # import numpy as np
from common.time_layers import *
from common.base_model import BaseModel
class BetterRnnlm(BaseModel):
'''
LSTMレイヤを2層利用し、各層にDropoutを使うモデル
[1]で提案されたモデルをベースとし、weight tying[2][3]を利用
[1] Recurrent Neural Network Regularization (https://arxiv.org/abs/1409.2329)
[2] Using the Output Embedding to Improve Language Models (https://arxiv.org/abs/1608.05859)
[3] Tying Word Vectors and Word Classifiers (https://arxiv.org/pdf/1611.01462.pdf)
'''
def __init__(self,
vocab_size=10000,
wordvec_size=650,
hidden_size=650,
dropout_ratio=0.5
):
V, D, H = vocab_size, wordvec_size, hidden_size
rn = np.random.randn
embed_W = (rn(V, D) / 100).astype('f')
lstm_Wx1 = (rn(D, 4 * H) / np.sqrt(D)).astype('f')
lstm_Wh1 = (rn(H, 4 * H) / np.sqrt(H)).astype('f')
lstm_b1 = np.zeros(4 * H).astype('f')
lstm_Wx2 = (rn(H, 4 * H) / np.sqrt(H)).astype('f')
lstm_Wh2 = (rn(H, 4 * H) / np.sqrt(H)).astype('f')
lstm_b2 = np.zeros(4 * H).astype('f')
affine_b = np.zeros(V).astype('f')
self.layers = [
TimeEmbedding(embed_W),
TimeDropout(dropout_ratio),
TimeLSTM(lstm_Wx1, lstm_Wh1, lstm_b1, stateful=True),
TimeDropout(dropout_ratio),
TimeLSTM(lstm_Wx2, lstm_Wh2, lstm_b2, stateful=True),
TimeDropout(dropout_ratio),
TimeAffine(embed_W.T, affine_b) # weight tying!!
]
self.loss_layer = TimeSoftmaxWithLoss()
self.lstm_layers = [
self.layers[2],
self.layers[4]
]
self.drop_layers = [
self.layers[1],
self.layers[3],
self.layers[5]
]
self.params, self.grads = [], []
for layer in self.layers:
self.params += layer.params
self.grads += layer.grads
def predict(self, xs, train_flg=False):
for layer in self.drop_layers:
layer.train_flg = train_flg
for layer in self.layers:
xs = layer.forward(xs)
return xs
def forward(self, xs, ts, train_flg=True):
score = self.predict(xs, train_flg)
loss = self.loss_layer.forward(score, ts)
return loss
def backward(self, dout=1):
dout = self.loss_layer.backward(dout)
for layer in reversed(self.layers):
dout = layer.backward(dout)
return dout
def reset_state(self):
for layer in self.lstm_layers:
layer.reset_state()
|
[
"satou.shg@gmail.com"
] |
satou.shg@gmail.com
|
faaa83c242009275d9ca670497e67c056417638e
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=2.0_rd=1_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=2/params.py
|
a831fdb8216bcc697815a8f8e189c1e1b40951a2
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '2.040810',
'max_util': '2.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 2,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
7c34b796aabf2898ce5b3391cd849ba3d47df125
|
5380c194fd3d97ce5779790abe8ffa1694daa519
|
/BackEnd/Account/views/Account.py
|
f6880e538f29d92e69825a4999c829b54ee73d67
|
[] |
no_license
|
frshman/LuffyCity
|
4efc5360256cb328cdc34091762078bde11610be
|
8f3143e832a457a1bfbf5aa46c0abb298198164a
|
refs/heads/master
| 2023-08-13T22:10:50.544455
| 2020-06-05T04:47:37
| 2020-06-05T04:47:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/5/18 23:12
# @File : Account.py
# ----------------------------------------------
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
# >>> Author : Alex 007
# >>> QQ : 2426671397
# >>> Mail : alex18812649207@gmail.com
# >>> Github : https://github.com/koking0
# >>> Blog : https://alex007.blog.csdn.net/
# ☆ ☆ ☆ ☆ ☆ ☆ ☆
from Stark.main import StarkHandler, getChoice
class AccountHandler(StarkHandler):
def __init__(self, site, modelClass, prefix):
super().__init__(site, modelClass, prefix)
self.displayList = ["username", getChoice('身份', 'identity')]
|
[
"alex18812649207@gmail.com"
] |
alex18812649207@gmail.com
|
affef3944b882801c0913fe7ca31980c9b4f1b7f
|
de5be7e4d9e20bbfda3ce8697afc3433a3ccf55d
|
/python_tutorial/excercise_3/reverse_string_list_func.py
|
6b8f844aa74c95bd28dd0956de310a0c381b3d6d
|
[] |
no_license
|
poojataksande9211/python_data
|
42a88e0a0395f383d4375000a3d01b894bd38e62
|
64c952d622abfa77f2fdfd737c210014fce153c5
|
refs/heads/main
| 2023-04-16T10:24:27.213764
| 2021-04-27T16:34:32
| 2021-04-27T16:34:32
| 360,673,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
def rev_string(l):
element=[]
for subelement in l:
element.append(subelement[::-1])
return element
words=["abc","def","ghi"]
print(rev_string(words))
#----------------------------
# def rev_string(l):
# element=[]
# for subelement in range(len(l)):
# element.append(subelement[::-1]) #why error for i in l ///for i in range(len(l))
# return element
# words=["abc","def","ghi"]
# print(rev_string(words))
#----------------------------
# words=["abc","def","ghi"]
# for i in words:
# print(i)
#----------------------------
# def rev_string(l):
# rev=[]
# for i in range(len(l)):
# pop_item=l.pop()
# rev.append(pop_item)
# return rev
# words=["abc","def","ghi"]
# print(rev_string(words))
|
[
"amitganvir6@gmail.com"
] |
amitganvir6@gmail.com
|
0fbc601cc05227e44edc2e2eea6511f140d8c776
|
d4f1d1a1657f94376c989b12a8e16c8ff1d86e01
|
/Stanford_ml/ex2/pylogistic/logistic.py
|
c0888790944abd48764a573c639db092c0d66ae3
|
[] |
no_license
|
sbmaruf/DataMing-MachineLearning
|
6e2335c4f16039bf7e5b4aad852e7426e10d2e6f
|
baf9d9b668588329504975c6586dbd31b2932900
|
refs/heads/master
| 2021-06-07T02:22:31.129127
| 2016-09-30T09:56:07
| 2016-09-30T09:56:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,921
|
py
|
#!/usr/bin/env python
# coding=utf-8
import numpy as np
import pandas as pd
def sigmoid(z):
""""""
g = 1 / (1 + np.exp(-z))
return g
def cost_function(theta, X, y):
""""""
m = y.size
J = (-y.T * np.log(sigmoid(X * theta)) - \
(1 - y).T * np.log(1 - sigmoid(X * theta))) / m
grad = ((sigmoid(X * theta) - y).T * X) / m
return J, grad
def gradient_descent(X, y, theta, alpha):
""""""
m = y.size
row, col = X.shape
temp_theta = []
for i in range(col):
_temp = theta[i] - alpha * ((sigmoid(X * theta) - y).T * X[:, i])
temp_theta.append(_temp.tolist()[0][0])
theta = np.array(temp_theta)
return theta
def train(X, y, theta, alpha, num_iters):
""""""
cost_history = range(num_iters)
for i in range(num_iters):
_theta = gradient_descent(X, y, theta, alpha)
theta = np.mat(_theta, dtype=float).T
cost_history[i] = cost_function(theta, X, y)
return theta
def performance(testData, testY, theta):
""""""
z = testData * theta
g = sigmoid(z)
count = 0
for v in g - testY:
if v != 0:
count += 1
return count / float(testY.size)
def predict():
""""""
pass
if __name__ == '__main__':
data = np.mat(pd.read_csv('train.csv', header=None), dtype=float)
_x = data[:, range(data.shape[1])[0:-1]]
X = np.insert(_x, 0, values=1, axis=1)
y = data[:, -1]
theta = np.mat(np.zeros(X.shape[1], dtype=float), dtype=float).T
alpha, num_iters = 0.01, 1000
theta = train(X, y, theta, alpha, num_iters)
print(theta)
# get the performance of Model
_test_data = np.mat(pd.read_csv('test.csv', header=None), dtype=float)
test_data = _test_data[:, range(_test_data.shape[1])[0:-1]]
testData = np.insert(test_data, 0, values=1, axis=1)
testY = _test_data[:, -1]
print(performance(testData, testY, theta))
|
[
"scutqiuwei@163.com"
] |
scutqiuwei@163.com
|
2ca6d767460661f1a4351a9b75835ee0d90e53b8
|
0ef933a7b019e9a754222464046aeaaf8a42b553
|
/django_facebook/tests/test.py
|
4343a14b74f0bd22fe50c3f8c76231417a08b410
|
[
"BSD-3-Clause"
] |
permissive
|
kennu/Django-facebook
|
1d858a53856a3f3997557ad5564c768b9c83c21d
|
88ba7025d24023756a417d1bae4f84e6ed6b67fd
|
refs/heads/master
| 2020-12-25T08:19:29.244207
| 2011-10-12T17:22:10
| 2011-10-12T17:22:10
| 2,562,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,749
|
py
|
from __future__ import with_statement
from django.contrib.auth.models import AnonymousUser
from django_facebook import exceptions as facebook_exceptions
from django_facebook.auth_backends import FacebookBackend
from django_facebook.connect import connect_user, CONNECT_ACTIONS
from django_facebook.tests.base import FacebookTest
from open_facebook.exceptions import *
from django_facebook.api import get_facebook_graph, FacebookUserConverter, get_persistent_graph
import logging
import unittest
logger = logging.getLogger(__name__)
__doctests__ = ['django_facebook.api']
'''
TODO
The views are currently untested,
only the underlying functionality is.
(need to fake facebook cookie stuff to correctly test the views)
'''
class UserConnectTest(FacebookTest):
'''
Tests the connect user functionality
'''
fixtures = ['users.json']
def test_persistent_graph(self):
from django.test import RequestFactory
from django.contrib.auth.models import AnonymousUser
request = RequestFactory()
request.session = {}
request.user = AnonymousUser()
graph = get_persistent_graph(request, access_token='short_username')
def test_full_connect(self):
#going for a register, connect and login
graph = get_facebook_graph(access_token='short_username')
facebook = FacebookUserConverter(graph)
action, user = connect_user(self.request, facebook_graph=graph)
assert action == CONNECT_ACTIONS.REGISTER
action, user = connect_user(self.request, facebook_graph=graph)
assert action == CONNECT_ACTIONS.CONNECT
self.request.user = AnonymousUser()
action, user = connect_user(self.request, facebook_graph=graph)
assert action == CONNECT_ACTIONS.LOGIN
def test_utf8(self):
graph = get_facebook_graph(access_token='unicode_string')
facebook = FacebookUserConverter(graph)
profile_data = facebook.facebook_profile_data()
action, user = connect_user(self.request, facebook_graph=graph)
def test_invalid_token(self):
self.assertRaises(AssertionError, connect_user, self.request, access_token='invalid')
def test_no_email_registration(self):
self.assertRaises(facebook_exceptions.IncompleteProfileError, connect_user, self.request, access_token='no_email')
def test_current_user(self):
facebook = get_facebook_graph(access_token='tschellenbach')
action, user = connect_user(self.request, facebook_graph=facebook)
assert action == CONNECT_ACTIONS.LOGIN
def test_new_user(self):
facebook = get_facebook_graph(access_token='new_user')
action, user = connect_user(self.request, facebook_graph=facebook)
def test_short_username(self):
facebook = get_facebook_graph(access_token='short_username')
action, user = connect_user(self.request, facebook_graph=facebook)
assert len(user.username) > 4
assert action == CONNECT_ACTIONS.REGISTER
def test_gender(self):
graph = get_facebook_graph(access_token='new_user')
facebook = FacebookUserConverter(graph)
data = facebook.facebook_registration_data()
assert data['gender'] == 'm'
def test_double_username(self):
'''
This used to give an error with duplicate usernames with different capitalization
'''
facebook = get_facebook_graph(access_token='short_username')
action, user = connect_user(self.request, facebook_graph=facebook)
user.username = 'Thierry_schellenbach'
user.save()
self.request.user = AnonymousUser()
facebook = get_facebook_graph(access_token='same_username')
action, new_user = connect_user(self.request, facebook_graph=facebook)
assert user.username != new_user.username and user.id != new_user.id
class AuthBackend(FacebookTest):
def test_auth_backend(self):
backend = FacebookBackend()
facebook = get_facebook_graph(access_token='new_user')
action, user = connect_user(self.request, facebook_graph=facebook)
facebook_email = user.email
facebook_id = user.get_profile().facebook_id
auth_user = backend.authenticate(facebook_email=facebook_email)
assert auth_user == user
auth_user = backend.authenticate(facebook_id=facebook_id)
assert auth_user == user
auth_user = backend.authenticate(facebook_id=facebook_id, facebook_email=facebook_email)
assert auth_user == user
auth_user = backend.authenticate()
assert not auth_user
if __name__ == '__main__':
unittest.main()
|
[
"thierryschellenbach@gmail.com"
] |
thierryschellenbach@gmail.com
|
110c2eaecef9a00e4485c50558865f58db4514b5
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/101/usersdata/173/49768/submittedfiles/av1_m3.py
|
d342df107865d5b70a00235643849cdcbd0af61c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
# -*- coding: utf-8 -*-
import math
m=int(input('Digite o número de termos: '))
i=2
for i in range(0,m,2):
if(m>0):
pi=4/(i*(i+1)*(i+2))+soma
i=i+2
soma=pi-(4/(i*(i+1)*(i+2)))
i=i+2
print(soma+3)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
ec6006c50785e0e8b157893e35b4789999df4d00
|
6be8aa517e679b33b47d35f100e6590902a8a1db
|
/Greedy/MST/Problem06.py
|
403924f5fc49aba57dc94d7603a32fc491a619fb
|
[] |
no_license
|
LeeJuhae/Algorithm-Python
|
7ca4762712e5e84d1e277abecb3bf39c9cbd4e56
|
729947b4428205adfbac194a5527b0eeafe1c525
|
refs/heads/master
| 2023-04-24T01:02:36.430970
| 2021-05-23T07:17:25
| 2021-05-23T07:17:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,081
|
py
|
# https://www.acmicpc.net/problem/17472
import sys
from collections import deque, defaultdict
directions = ((1, 0), (0, 1), (-1, 0), (0, -1))
read = sys.stdin.readline
n, m = map(int, read().strip().split())
islands = [list(map(int, read().strip().split())) for _ in range(n)]
v = 0
visit = [[0 for _ in range(m)] for _ in range(n)]
locations = defaultdict(list)
for a in range(n):
for j in range(m):
if islands[a][j] and not visit[a][j]:
visit[a][j] = v + 1
locations[visit[a][j]].append((a, j))
q = deque([(a, j)])
while q:
x, y = q.popleft()
for dx, dy in directions:
nx, ny = x + dx, y + dy
if nx < 0 or ny < 0 or nx >= n or ny >= m or visit[nx][ny] != 0:
continue
if islands[a][j] != islands[nx][ny]:
continue
q.append((nx, ny))
visit[nx][ny] = v + 1
locations[visit[nx][ny]].append((nx, ny))
v += 1
islands = visit
bridges = []
def getEdge(island, x, y):
ret = []
for dx, dy in directions:
nx, ny, d = x, y, 0
while True:
nx, ny = nx + dx, ny + dy
if nx < 0 or ny < 0 or nx >= n or ny >= m:
break
if islands[nx][ny]:
if island == islands[nx][ny] or d == 1:
break
ret.append((islands[nx][ny], d))
break
d += 1
return ret
for a in range(1, v + 1):
for x, y in locations[a]:
for b, c in getEdge(a, x, y):
bridges.append((c, a, b))
bridges.sort()
ans = 0
tree = [i for i in range(v + 1)]
def find(idx):
if idx == tree[idx]:
return idx
tree[idx] = find(tree[idx])
return tree[idx]
def merge(a, b):
a, b = find(a), find(b)
tree[b] = a
cnt = 0
for c, a, b in bridges:
if find(a) != find(b):
merge(a, b)
ans += c
cnt += 1
print(ans if cnt == v - 1 else -1)
|
[
"gusdn0657@gmail.com"
] |
gusdn0657@gmail.com
|
5ea58c5b4506ca5eba55364cae0aa1b3d9f5e864
|
b4afb44b8f483c048716fe12d778186ce68ac846
|
/pages/ios/ffan/movie_page_configs.py
|
cdc95760ceb5ada9045205274bfe84e52614c57f
|
[] |
no_license
|
liu111xiao111/UItest
|
64309b2c85f6d2334d64bb0875ba9ced459ebb1e
|
67e2acc9a99da81022e286e8d8ec7ccb12636ff3
|
refs/heads/master
| 2021-09-01T18:30:28.044296
| 2017-12-28T04:36:46
| 2017-12-28T04:36:46
| 115,585,226
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
class MoviePageConfigs(object):
'''
This is a configuration class for MoviePage class.
'''
# Assert view time out
assert_view_timeout = 10
# Verify view time out
verify_view_timeout = 10
# Assert invalid view time out
assert_invalid_view_time = 3
# Click button time out
click_on_button_timeout = 10
# Get time out
get_timeout = 10
# Movie title
text_movie_title = u"电影"
# Seat picking and buying ticket button
resource_id_seat_picking_and_buying_ticket_button = "com.wanda.app.wanhui:id/movie_buy_ticket"
xpath_seat_picking_and_buying_ticket_bt = "//UIAApplication[1]/UIAWindow[1]/UIATableView[1]/UIATableCell[1]/UIAButton[1]"
xpath_film_name = "//UIAApplication[1]/UIAWindow[1]/UIATableView[1]/UIATableCell[1]/UIAStaticText[1]"
def __init__(self):
pass
|
[
"tl@neusoft.com"
] |
tl@neusoft.com
|
a9316fca169c00cd96bfefb8ba8ffbead21d8a48
|
0cb72fac7926b7415af3bff1bf7dbe03e96fead5
|
/LC_Non_Decreasing_Array.py
|
ca41f20ed70e4daf05dfc1ccff9b789768309e9f
|
[] |
no_license
|
venkatsvpr/Problems_Solved
|
586b5ef9f5868785cb52552da674ed003e139278
|
11c81645893fd65f585c3f558ea837c7dd3cb654
|
refs/heads/master
| 2022-11-11T21:53:27.469213
| 2022-10-14T05:59:11
| 2022-10-14T05:59:11
| 114,329,154
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,785
|
py
|
"""
665. Non-decreasing Array
Given an array with n integers, your task is to check if it could become non-decreasing by modifying at most 1 element.
We define an array is non-decreasing if array[i] <= array[i + 1] holds for every i (1 <= i < n).
Example 1:
Input: [4,2,3]
Output: True
Explanation: You could modify the first 4 to 1 to get a non-decreasing array.
Example 2:
Input: [4,2,1]
Output: False
Explanation: You can't get a non-decreasing array by modify at most one element.
Note: The n belongs to [1, 10,000].
"""
""" Simple logic
Step 1:
Traverse from left to right.. there should be only one position where we find a high to low conversion.
if we find second it is not possible
Step 2:
Traverse from right to left... there should be only one position where we find a low to high conversion.
If we find second it is not possible.
Either of step 1 and step2 should be true.
"""
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
flag = False
count = 0
prev = nums[0]
for i in range(1,len(nums)):
if (nums[i] < prev):
if (flag == True):
count = 1
break;
flag = True
continue;
prev = nums[i]
if (count == 0):
return True
flag = False
prev = nums[len(nums)-1]
count = 0
for i in range(len(nums)-2, -1,-1):
if (nums[i] > prev):
if (flag == True):
count = 1
break;
flag = True
continue;
prev = nums[i]
if (count == 0):
return True
return False
|
[
"venkatakrishnansvpr@gmail.com"
] |
venkatakrishnansvpr@gmail.com
|
32226188a7bed68e781ad02fb2608f42c19404c7
|
a3cc7286d4a319cb76f3a44a593c4a18e5ddc104
|
/lib/googlecloudsdk/core/util/keyboard_interrupt.py
|
8a1c992f84aeaeb3bed44caaeb72156c683c93d1
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
jordanistan/Google-Cloud-SDK
|
f2c6bb7abc2f33b9dfaec5de792aa1be91154099
|
42b9d7914c36a30d1e4b84ae2925df7edeca9962
|
refs/heads/master
| 2023-09-01T01:24:53.495537
| 2023-08-22T01:12:23
| 2023-08-22T01:12:23
| 127,072,491
| 0
| 1
|
NOASSERTION
| 2023-08-22T01:12:24
| 2018-03-28T02:31:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,941
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud SDK default keyboard interrupt handler."""
from __future__ import absolute_import
from __future__ import division
import os
import signal
import sys
from googlecloudsdk.core import log
def HandleInterrupt(signal_number=None, frame=None):
"""Handles keyboard interrupts (aka SIGINT, ^C).
Disables the stack trace when a command is killed by keyboard interrupt.
Args:
signal_number: The interrupt signal number.
frame: The signal stack frame context.
"""
del signal_number, frame # currently unused
message = '\n\nCommand killed by keyboard interrupt\n'
try:
log.err.Print(message)
except NameError:
sys.stderr.write(message)
# Kill ourselves with SIGINT so our parent can detect that we exited because
# of a signal. SIG_DFL disables further KeyboardInterrupt exceptions.
signal.signal(signal.SIGINT, signal.SIG_DFL)
os.kill(os.getpid(), signal.SIGINT)
# Just in case the kill failed ...
sys.exit(1)
def InstallHandler():
"""Installs the default Cloud SDK keyboard interrupt handler."""
try:
signal.signal(signal.SIGINT, HandleInterrupt)
except ValueError:
# Signal cannot be sent from non-main threads. Integration testing will
# run parallel threads for performance reasons, occasionally hitting this
# exception. Should not be reached in production.
pass
|
[
"jordan.robison@gmail.com"
] |
jordan.robison@gmail.com
|
591261811f03add0430c0fb4333e9d46e62d9105
|
c3ff891e0e23c5f9488508d30349259cc6b64b4d
|
/python练习/老王开枪/老王开枪2.py
|
1897dcbc68707445a20bba1d7fbb9ffb4d19a52b
|
[] |
no_license
|
JacksonMike/python_exercise
|
2af2b8913ec8aded8a17a98aaa0fc9c6ccd7ba53
|
7698f8ce260439abb3cbdf478586fa1888791a61
|
refs/heads/master
| 2020-07-14T18:16:39.265372
| 2019-08-30T11:56:29
| 2019-08-30T11:56:29
| 205,370,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,754
|
py
|
class Person():
def __init__(self,name):
super(Person,self).__init__()
self.name = name
self.gun = None
self.hp = 100
def install_bullet(self,clip_temp,bullet_temp):
clip_temp.store_bullet(bullet_temp)
def install_clip(self,gun_temp,clip_temp):
gun_temp.store_clip(clip_temp)
def hold_gun(self,gun_temp):
self.gun = gun_temp
def __str__(self):
if self.gun:
return "%s的血量为%d,他有枪%s"%(self.name,self.hp,self.gun)
else:
if self.hp > 0:
return "%s的血量为%d,他没有枪"%(self.name,self.hp)
else:
return "%s已经死去"%self.name
def use_gun(self,enemy):
self.gun.fire(enemy)
def lose_blood(self,power):
self.hp -= power
class Gun():
def __init__(self,name):
super(Gun,self).__init__()
self.name = name
self.clip = None
def store_clip(self,clip_temp):
self.clip = clip_temp
def __str__(self):
if self.clip:
return "枪的信息为%s,%s"%(self.name,self.clip)
else:
return "枪的信息为%s,这把枪没有弹夹"%(self.name)
def fire(self,enemy):
bullet_temp = self.clip.eject_clip()
if bullet_temp:
bullet_temp.shoot(enemy)
else:
return "弹夹没子弹了"
class Clip():
def __init__(self,max_num):
super(Clip,self).__init__()
self.max_num = max_num
self.bullet_list = []
def store_bullet(self,bullet_temp):
self.bullet_list.append(bullet_temp)
def __str__(self):
return "弹夹的信息为%d/%d"%(len(self.bullet_list),self.max_num)
def eject_clip(self):
if self.bullet_list:
return self.bullet_list.pop()
else:
return None
class Bullet():
def __init__(self,power):
super(Bullet,self).__init__()
self.power = power
def shoot(self,enemy):
enemy.lose_blood(self.power)
def main():
laowang = Person("Jim")
Barrett = Gun("巴雷特")
clip = Clip(20)
for i in range(15):
bullet = Bullet(20)
laowang.install_bullet(clip,bullet)
laowang.install_clip(Barrett,clip)
print(Barrett)
print(clip)
laowang.hold_gun(Barrett)
laosong = Person("Kent")
print(laosong)
laowang.use_gun(laosong)
print(laowang)
print(laosong)
laowang.use_gun(laosong)
print(laowang)
print(laosong)
laowang.use_gun(laosong)
print(laowang)
print(laosong)
laowang.use_gun(laosong)
print(laowang)
print(laosong)
laowang.use_gun(laosong)
print(laowang)
print(laosong)
if __name__ == '__main__':
main()
|
[
"2101706902@qq.com"
] |
2101706902@qq.com
|
a24eae2a57094a02b726d84a5b8fa1291192c8f5
|
53784d3746eccb6d8fca540be9087a12f3713d1c
|
/res/packages/scripts/scripts/client/tutorial/doc_loader/sub_parsers/quests.py
|
ab4c7a0dda44086cdaa465d5711086060442461e
|
[] |
no_license
|
webiumsk/WOT-0.9.17.1-CT
|
736666d53cbd0da6745b970e90a8bac6ea80813d
|
d7c3cf340ae40318933e7205bf9a17c7e53bac52
|
refs/heads/master
| 2021-01-09T06:00:33.898009
| 2017-02-03T21:40:17
| 2017-02-03T21:40:17
| 80,870,824
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 7,831
|
py
|
# 2017.02.03 21:54:21 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/tutorial/doc_loader/sub_parsers/quests.py
from tutorial.control.quests import triggers
from tutorial.doc_loader import sub_parsers
from tutorial.doc_loader.sub_parsers import chains, readVarValue
from tutorial.doc_loader.sub_parsers import lobby
from items import _xml
from tutorial.data import chapter
from tutorial.data import effects
_EFFECT_TYPE = effects.EFFECT_TYPE
def _readAllTurorialBonusesTriggerSection(xmlCtx, section, chapter, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.AllTutorialBonusesTrigger)
def _readInvalidateFlagsTriggerSection(xmlCtx, section, chapter, triggerID):
return triggers.InvalidateFlagsTrigger(triggerID)
def _readRandomBattlesCountTriggerSection(xmlCtx, section, chapter, triggerID):
return triggers.RandomBattlesCountTrigger(triggerID)
def _readResearchModuleTriggerSection(xmlCtx, section, chapter, triggerID):
return triggers.ResearchModuleTrigger(triggerID)
def _readInstallModuleTriggerSection(xmlCtx, section, chapter, triggerID):
return triggers.InstallModuleTrigger(triggerID)
def _readResearchVehicleTriggerSection(xmlCtx, section, chapter, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.ResearchVehicleTrigger)
def _readBuyVehicleTriggerSection(xmlCtx, section, chapter, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.BuyVehicleTrigger)
def _readInventoryVehicleTriggerSection(xmlCtx, section, chapter, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.InventoryVehicleTrigger)
def _readPermanentVehicleOwnTriggerSection(xmlCtx, section, chapter, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.PermanentVehicleOwnTrigger)
def _readXpExchangeTriggerSection(xmlCtx, section, chapter, triggerID):
return triggers.XpExchangeTrigger(triggerID)
def _readVehicleBattlesCountTriggerSection(xmlCtx, section, chapter, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.VehicleBattleCountTrigger)
def readTutorialIntSettingTriggerSection(xmlCtx, section, _, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.TutorialIntSettingsTrigger)
def readTutorialAccountSettingTriggerSection(xmlCtx, section, _, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.TutorialAccountSettingsTrigger)
def _readChapterBonusTriggerSection(xmlCtx, section, _, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.ChapterBonusTrigger)
def _readItemsInstallTriggerSection(xmlCtx, section, _, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.InstallItemsTrigger)
def _readTimerTriggerSection(xmlCtx, section, _, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.TimerTrigger)
def readSaveTutorialSettingSection(xmlCtx, section, _, conditions):
settingID = sub_parsers.parseID(xmlCtx, section, 'Specify a setting ID')
return effects.HasTargetEffect(settingID, _EFFECT_TYPE.SAVE_TUTORIAL_SETTING, conditions=conditions)
def readSaveAccountSettingSection(xmlCtx, section, _, conditions):
settingID = sub_parsers.parseID(xmlCtx, section, 'Specify a setting ID')
return effects.HasTargetEffect(settingID, _EFFECT_TYPE.SAVE_ACCOUNT_SETTING, conditions=conditions)
def readTutorialSettingSection(xmlCtx, section, flags):
settingID = sub_parsers.parseID(xmlCtx, section, 'Specify a setting ID')
settingName = None
if 'setting-name' in section.keys():
settingName = _xml.readString(xmlCtx, section, 'setting-name')
else:
_xml.raiseWrongXml(xmlCtx, section.name, 'Specify a setting name')
settingValue = None
if 'setting-value' in section.keys():
settingValue = _xml.readBool(xmlCtx, section, 'setting-value')
else:
_xml.raiseWrongXml(xmlCtx, section.name, 'Specify a setting value')
return chapter.TutorialSetting(settingID, settingName, settingValue)
def readQuestConditions(section):
result = []
valuesSec = section['quest-conditions']
if valuesSec is not None:
for name, conditionsSection in valuesSec.items():
valueType, valueSec = conditionsSection.items()[0]
result.append(readVarValue(valueType, valueSec))
return result
def _readSimpleWindowCloseTriggerSection(xmlCtx, section, _, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.SimpleWindowCloseTrigger, validateUpdateOnly='validate-update-only' in section.keys())
def _readSimpleWindowProcessTriggerSection(xmlCtx, section, _, triggerID):
return sub_parsers.readValidateVarTriggerSection(xmlCtx, section, triggerID, triggers.SimpleWindowProcessTrigger, validateUpdateOnly='validate-update-only' in section.keys())
def _readSelectVehicleInHangarSection(xmlCtx, section, flags, conditions):
targetID = section.asString
return effects.HasTargetEffect(targetID, effects.EFFECT_TYPE.SELECT_VEHICLE_IN_HANGAR, conditions=conditions)
def init():
sub_parsers.setEffectsParsers({'save-setting': readSaveTutorialSettingSection,
'save-account-setting': readSaveAccountSettingSection,
'show-unlocked-chapter': chains.readShowUnlockedChapterSection,
'switch-to-random': lobby.readSwitchToRandomSection,
'select-in-hangar': _readSelectVehicleInHangarSection})
sub_parsers.setEntitiesParsers({'hint': chains.readHintSection,
'tutorial-setting': readTutorialSettingSection})
sub_parsers.setTriggersParsers({'bonus': lobby.readBonusTriggerSection,
'premiumDiscount': lobby.readPremiumDiscountsUseTriggerSection,
'tankmanAcademyDiscount': chains.readTankmanPriceDiscountTriggerSection,
'allTutorialBonuses': _readAllTurorialBonusesTriggerSection,
'randomBattlesCount': _readRandomBattlesCountTriggerSection,
'researchModule': _readResearchModuleTriggerSection,
'installModule': _readInstallModuleTriggerSection,
'researchVehicle': _readResearchVehicleTriggerSection,
'buyVehicle': _readBuyVehicleTriggerSection,
'inventoryVehicle': _readInventoryVehicleTriggerSection,
'permanentOwnVehicle': _readPermanentVehicleOwnTriggerSection,
'buySlot': lobby.readFreeVehicleSlotTriggerSection,
'vehicleBattlesCount': _readVehicleBattlesCountTriggerSection,
'xpExchange': _readXpExchangeTriggerSection,
'tutorialIntSetting': readTutorialIntSettingTriggerSection,
'tutorialAccountSetting': readTutorialAccountSettingTriggerSection,
'chapterBonus': _readChapterBonusTriggerSection,
'installItems': _readItemsInstallTriggerSection,
'invalidateFlags': _readInvalidateFlagsTriggerSection,
'timer': _readTimerTriggerSection,
'fightBtn': chains.readFightBtnDisableTriggerSection,
'windowClosed': _readSimpleWindowCloseTriggerSection,
'windowProcessed': _readSimpleWindowProcessTriggerSection,
'isInSandbox': chains.readIsInSandBoxPreQueueTriggerSection,
'queue': chains.readQueueTrigger,
'isInSandboxOrRandom': chains.readIsInSandBoxOrRandomPreQueueTriggerSection})
sub_parsers.setWindowsParsers({'awardWindow': sub_parsers.readQuestAwardWindowSection})
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\tutorial\doc_loader\sub_parsers\quests.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:54:21 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
78651cd819bee71e2717fa7ef9e14dd50ca938b4
|
be1ebe1b2b7fa059e49922a4ba66cf74bb4bcbd2
|
/main/admin.py
|
ec32b6e207b024e42a4909134afc658c05fbb6a3
|
[] |
no_license
|
Offdevelopers/realestate
|
8c6c5fb222cb2372c223a31183080d6f3c366652
|
f40d131f0c3c05de1ea38839732835dc6f2b16c5
|
refs/heads/master
| 2020-03-21T18:31:39.094430
| 2018-06-15T16:00:43
| 2018-06-15T16:00:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
from django.contrib import admin
from .models import Developer, Agent, Property, Mortage
# Register your models here.
admin.site.register(Property)
admin.site.register(Developer)
admin.site.register(Agent)
admin.site.register(Mortage)
|
[
"abiodun.toluwanii@gmail.com"
] |
abiodun.toluwanii@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.