blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
26dcfc08a00b7aeb3c786eddbad0189fcb96d23a
|
21b0b4c27193898207751c91b8b2ed168a1b1638
|
/py/py_0637_flexible_digit_sum.py
|
1f93090be6969f91e0df5a37180db7d4318b6121
|
[
"MIT"
] |
permissive
|
lcsm29/project-euler
|
67560a4e66968f1671a3d7ecf2dda6c956893dca
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
refs/heads/main
| 2023-07-04T11:45:24.374841
| 2021-08-07T08:20:41
| 2021-08-07T08:20:41
| 371,808,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 984
|
py
|
# Solution of;
# Project Euler Problem 637: Flexible digit sum
# https://projecteuler.net/problem=637
#
# Given any positive integer $n$, we can construct a new integer by inserting
# plus signs between some of the digits of the base $B$ representation of $n$,
# and then carrying out the additions. For example, from $n=123_{10}$ ($n$ in
# base 10) we can construct the four base 10 integers $123_{10}$,
# $1+23=24_{10}$, $12+3=15_{10}$ and $1+2+3=6_{10}$Let $f(n,B)$ be the
# smallest number of steps needed to arrive at a single-digit number in base
# $B$. For example, $f(7,10)=0$ and $f(123,10)=1$. Let $g(n,B_1,B_2)$ be the
# sum of the positive integers $i$ not exceeding $n$ such that
# $f(i,B_1)=f(i,B_2)$. You are given $g(100,10,3)=3302$. Find $g(10^7,10,3)$
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 637
timed.caller(dummy, n, i, prob_id)
|
[
"lcsm29@outlook.com"
] |
lcsm29@outlook.com
|
92e3cdf8225d45ea6513de9fe7fb005957dc43f2
|
dc2682f687a203dcf5f4f4260f857ef5099bbdab
|
/src/bootstrapping_olympics/interfaces/rep_nuisance_causal.py
|
fbe394ac400c52c679a98d561a9b9c3e359c92b9
|
[] |
no_license
|
AndreaCensi/boot_olympics
|
1bc3d0cd887ca6b47a159929b53032c298979450
|
dc05e283bde01cafc4843d82f17413b13c6ce1af
|
refs/heads/master
| 2020-07-08T10:49:37.368104
| 2013-07-19T07:00:22
| 2013-07-19T07:00:22
| 2,098,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
from .boot_spec import BootSpec
from abc import abstractmethod
from blocks import SimpleBlackBox
from contracts import ContractsMeta, contract
__all__ = ['RepresentationNuisanceCausal']
class RepresentationNuisanceCausal(object):
'''
'''
__metaclass__ = ContractsMeta
class NotInvertible(Exception):
pass
def inverse(self):
'''
Returns the inverse representation nuisance,
or raises NotInvertible
'''
@contract(spec=BootSpec, returns=BootSpec)
def transform_spec(self, spec):
'''
'''
@abstractmethod
@contract(returns=SimpleBlackBox)
def get_pre(self):
pass
@abstractmethod
@contract(returns=SimpleBlackBox)
def get_post(self):
pass
|
[
"andrea@cds.caltech.edu"
] |
andrea@cds.caltech.edu
|
5afadcff75d577496520b4eb19d8797e2579c837
|
f68e0b205bd3eb036905c60bd03a8d9c7f3b1d88
|
/machine_learning/3.3.logistic-regression.py
|
1d88351ce1bc85e63bb039ea2ead4f43f3e9061a
|
[] |
no_license
|
SleepyBag/TrivialPractice
|
c31458d0c28afba158cb4090cb7013267ff54bb2
|
8e006fbe1425f62b52b2a5fe5b6404ea1883f3ab
|
refs/heads/master
| 2020-03-22T00:34:37.415074
| 2018-06-30T14:02:04
| 2018-06-30T14:02:04
| 139,253,389
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,386
|
py
|
import numpy as np
from math import log
from math import exp
input_dim = 2
output_dim = 1
beta = np.random.normal(size=(input_dim + 1, output_dim))
def p1(xhat, beta):
e = np.exp(np.dot(beta.T, xhat))[0][0]
return e / (1 + e)
def iterate(X, Y, beta):
import pdb
# pdb.set_trace()
grad = np.zeros(shape=beta.shape)
grad2 = 0
loss = 0
for x, y in zip(X, Y):
xhat = np.concatenate((np.array([x]).T, np.array([[1]])))
grad += - xhat * (y - p1(xhat, beta))
grad2 += np.dot(xhat, xhat.T) * p1(xhat, beta) * (1 - p1(xhat, beta))
loss += log(1 + exp(np.dot(beta.T, xhat))) - y * np.dot(beta.T, xhat)
print(log(1 + exp(np.dot(beta.T, xhat))) - y * np.dot(beta.T, xhat))
# pdb.set_trace()
beta = beta - np.dot(np.linalg.inv(grad2), grad)
return grad, grad2, beta, loss
X = np.array([[.697, .460], [.774, .376],
[.634, .264], [.608, .318],
[.556, .215], [.403, .237],
[.481, .149], [.437, .211],
[.666, .091], [.243, .267],
[.245, .057], [.343, .099],
[.639, .161], [.657, .198],
[.360, .370], [.593, .042], [.719, .103]])
Y = np.array([[1]] * 8 + [[0]] * 9)
epoch = 50
for i in range(epoch):
print('Epoch' ,i ,'started')
grad, grad2, beta, loss = iterate(X, Y, beta)
print('loss =',loss)
|
[
"xueqianming200@gmail.com"
] |
xueqianming200@gmail.com
|
555e47b52b537e75c5f7db4a5e347387352054ae
|
2de2437bbf480f6518554bcb204106dd37262023
|
/office365/sharepoint/portal/SPSiteCreationResponse.py
|
571a183cf080a80520369bacb01a8d04eb63bccb
|
[
"MIT"
] |
permissive
|
stardust85/Office365-REST-Python-Client
|
386e5bba16cdee1472b7e23d405a4bf9b6f5e73a
|
cd369c607c7d137a000734e9c5e8f03ae3e3c603
|
refs/heads/master
| 2022-09-29T19:44:02.166438
| 2020-06-03T23:12:40
| 2020-06-03T23:12:40
| 269,356,313
| 0
| 0
|
MIT
| 2020-06-04T12:41:03
| 2020-06-04T12:41:02
| null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from office365.runtime.client_value_object import ClientValueObject
class SPSiteCreationResponse(ClientValueObject):
def __init__(self):
super(SPSiteCreationResponse, self).__init__()
self.SiteId = None
self.SiteStatus = None
self.SiteUrl = None
|
[
"vvgrem@gmail.com"
] |
vvgrem@gmail.com
|
add2fdf8fbb97db4726458d6089e1bea384ed165
|
8fc7b22d6ea7444e0b90d5fb8e361ace06b4cb57
|
/setup.py
|
fad40934e3e44c29fddd2fe552a04cdead0b85d7
|
[
"Apache-2.0"
] |
permissive
|
rixx/django-hierarkey
|
80a9569eca317d997560fc92d3d67e5083ae081e
|
e61f03bd1a35489905f3b08fdc18755f1ed07973
|
refs/heads/master
| 2021-06-07T09:47:59.710988
| 2020-07-21T14:57:27
| 2020-07-21T14:57:27
| 195,140,375
| 0
| 0
|
Apache-2.0
| 2019-07-03T23:51:33
| 2019-07-03T23:51:32
| null |
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
from codecs import open
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
try:
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except:
long_description = ''
try:
from hierarkey import version
except ImportError:
version = '?'
setup(
name='django-hierarkey',
version=version,
description='Hierarchical key-value store for django',
long_description=long_description,
url='https://github.com/raphaelm/django-hierarkey',
author='Raphael Michel',
author_email='mail@raphaelmichel.de',
license='Apache License 2.0',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 3.0',
],
keywords='strings database models keyvalue',
install_requires=[
'python-dateutil'
],
packages=find_packages(exclude=['tests', 'tests.*', 'demoproject', 'demoproject.*']),
include_package_data=True,
)
|
[
"mail@raphaelmichel.de"
] |
mail@raphaelmichel.de
|
eb250e5339657728771d905ffbc0be84a8103fcc
|
4e353bf7035eec30e5ad861e119b03c5cafc762d
|
/QtCore/QXmlStreamNamespaceDeclaration.py
|
50587d69bdc7d2c462e766f31a2c38b6faa6a6d9
|
[] |
no_license
|
daym/PyQt4-Stubs
|
fb79f54d5c9a7fdb42e5f2506d11aa1181f3b7d5
|
57d880c0d453641e31e1e846be4087865fe793a9
|
refs/heads/master
| 2022-02-11T16:47:31.128023
| 2017-10-06T15:32:21
| 2017-10-06T15:32:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,849
|
py
|
# encoding: utf-8
# module PyQt4.QtCore
# from C:\Python27\lib\site-packages\PyQt4\QtCore.pyd
# by generator 1.145
# no doc
# imports
import sip as __sip
class QXmlStreamNamespaceDeclaration(): # skipped bases: <type 'sip.simplewrapper'>
"""
QXmlStreamNamespaceDeclaration()
QXmlStreamNamespaceDeclaration(QXmlStreamNamespaceDeclaration)
QXmlStreamNamespaceDeclaration(QString, QString)
"""
def namespaceUri(self): # real signature unknown; restored from __doc__
""" QXmlStreamNamespaceDeclaration.namespaceUri() -> QStringRef """
return QStringRef
def prefix(self): # real signature unknown; restored from __doc__
""" QXmlStreamNamespaceDeclaration.prefix() -> QStringRef """
return QStringRef
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
|
[
"thekewlstore@gmail.com"
] |
thekewlstore@gmail.com
|
7adaffb1cbe579b1d161a731e9ac13a98af57b08
|
5a27471bc2ae4a815db2c58d047dbbea03cd8f77
|
/comparisonFiles/codigos/Simulacion/LV/PDmasIcf_comparacion.py
|
674865b1769c477c33b36cfaf1c0ec73d109887f
|
[] |
no_license
|
ezalorpro/LaboratorioDeControl
|
6ef52bb77b6a2283decb8c9fa153d7b43f019609
|
ac286214f9a4b32298aa1caec808717f4b2d9a29
|
refs/heads/master
| 2023-01-20T19:27:56.233542
| 2020-03-15T20:24:10
| 2020-03-15T20:24:10
| 190,772,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,102
|
py
|
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import correlate
from scipy.stats import energy_distance
from scipy.integrate import cumtrapz
from scipy import io
from matplotlib import pyplot as plt
import pickle
MatFileMATLAB = io.loadmat('comparisonFiles/Data MATLAB/Simulacion/PDcfmasI10', squeeze_me=True)
MatFileSciLab = io.loadmat('comparisonFiles/Data SciLab/Simulacion/PDmasIcf10', squeeze_me=True)
with open('comparisonFiles/Data LVSCCD/Simulacion/Controlador13.pkl', 'rb') as f:
t_lv, yout_lv, yc_lv, set_point, _ = pickle.load(f)
t_lv = np.asarray(t_lv)
yout_lv = np.asarray(yout_lv)
yc_lv = np.asarray(yc_lv)
set_point = np.asarray(set_point)
t_mat = MatFileMATLAB['t']
yout_mat = MatFileMATLAB['yout']
yc_mat = MatFileMATLAB['yc']
t_sci = MatFileSciLab['t']
yout_sci = MatFileSciLab['yout']
yc_sci = MatFileSciLab['yc']
if len(t_sci) > len(t_mat) and len(t_sci) > len(t_lv):
mask1 = t_sci <= max(t_mat)
mask2 = t_sci[mask1] <= max(t_lv)
t_sci = t_sci[mask1][mask2]
yout_sci = yout_sci[mask1][mask2]
funcion1 = interp1d(t_mat, yout_mat)
yout_mat = funcion1(t_sci)
funcion2 = interp1d(t_lv, yout_lv)
yout_lv = funcion2(t_sci)
funcion3 = interp1d(t_lv, set_point)
set_point = funcion3(t_sci)
t_comun = t_sci
if len(t_lv) > len(t_mat) and len(t_lv) > len(t_sci):
mask1 = t_lv <= max(t_mat)
mask2 = t_lv[mask1] <= max(t_sci)
t_lv = t_lv[mask1][mask2]
yout_lv = yout_lv[mask1][mask2]
set_point = set_point[mask1][mask2]
funcion1 = interp1d(t_mat, yout_mat)
yout_mat = funcion1(t_lv)
funcion2 = interp1d(t_sci, yout_sci)
yout_sci = funcion2(t_lv)
t_comun = t_lv
if len(t_mat) > len(t_sci) and len(t_mat) > len(t_lv):
mask1 = t_mat <= max(t_sci)
mask2 = t_mat[mask1] <= max(t_lv)
t_mat = t_mat[mask1][mask2]
yout_mat = yout_mat[mask1][mask2]
funcion1 = interp1d(t_lv, yout_lv)
yout_lv = funcion1(t_mat)
funcion2 = interp1d(t_sci, yout_sci)
yout_sci = funcion2(t_mat)
funcion3 = interp1d(t_lv, set_point)
set_point = funcion3(t_mat)
t_comun = t_mat
index_m = np.argmax([abs(yout_lv - yout_mat), abs(yout_lv - yout_sci)], axis=1)
index_temp = np.argmax([
abs(yout_lv[index_m[0]] - yout_mat[index_m[0]]),
abs(yout_lv[index_m[1]] - yout_sci[index_m[1]])
])
index_temp2 = np.argmax([
yout_lv[index_m[index_temp]],
yout_mat[index_m[index_temp]],
yout_sci[index_m[index_temp]]
])
index_temp3 = np.argmin([
yout_lv[index_m[index_temp]],
yout_mat[index_m[index_temp]],
yout_sci[index_m[index_temp]]
])
index_max = index_m[index_temp]
index_min = index_m[index_temp]
if index_temp2 == 0:
YMAX = yout_lv
elif index_temp2 == 1:
YMAX = yout_mat
else:
YMAX = yout_sci
if index_temp3 == 0:
YMIN = yout_lv
elif index_temp3 == 1:
YMIN = yout_mat
else:
YMIN = yout_sci
fig, ax = plt.subplots(figsize=(5.1, 4.2))
ax.plot(t_comun, yout_mat, color="#001C7F", label='MATLAB/ode45', linewidth=2)
ax.plot(t_comun, yout_lv, 'r', dashes=[1, 2], label='LV/RK2 sin filtro', linewidth=3)
ax.plot(t_comun, yout_sci, color="#12711C", dashes=[2, 2], label='SciLab/BDF-Newton', linewidth=2)
ax.plot(t_comun, set_point, 'k', linestyle='-.', label='SetPoint', linewidth=2)
ax.set_title('Controlador PD difuso mas integral con setpoint variable', fontsize=11)
ax.legend(loc=8, bbox_to_anchor=(0.37, 0))
ax.grid()
axins = ax.inset_axes([0.42, 0.65, 0.25, 0.25])
axins.plot(t_comun, yout_mat, color="#001C7F", linewidth=2)
axins.plot(t_comun, yout_lv, 'r', dashes=[1, 2], linewidth=3)
axins.plot(t_comun, yout_sci, color="#12711C", dashes=[2, 2], linewidth=2)
axins.plot(t_comun, set_point, 'k', linestyle='-.', linewidth=2)
axins.grid()
axins.set_xlim(t_comun[index_max] - 0.1, t_comun[index_min] + 0.1)
axins.set_ylim(YMIN[index_min] - 1 * abs(YMIN[index_min] - YMAX[index_min]) / 2,
YMAX[index_max] + 1 * abs(YMIN[index_min] - YMAX[index_min]) / 2)
ax.indicate_inset_zoom(axins)
fig.tight_layout()
plt.savefig('comparisonFiles/plots/Simulacion/PDmasIc.pdf')
plt.show()
|
[
"kleiver615@gmail.com"
] |
kleiver615@gmail.com
|
bea4ba346ee7ce82719f9664f3447a91400044e8
|
16f36b0fc607cb9c0d7b4eb7d5123a1b7ed40c62
|
/untitled1/.idea/sda.py
|
aa69dcd7d98aab5175474796216259bf79104703
|
[] |
no_license
|
IanChen6/python-learning
|
64c5137f536d10ffc10a9664da43ec02722c95de
|
fea998620ba0a354a741cdbc9d8455bca4080bae
|
refs/heads/master
| 2021-01-23T18:45:18.595877
| 2017-10-31T10:34:51
| 2017-10-31T10:34:51
| 102,805,586
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
#!/usr/bin/env python3
#_*_ coding:utf-8 _*_
import sys
print(len("中文"))
print(sys.getdefaultencoding())
print(len("中文".encode("utf-8")))
print(sys.getdefaultencoding())
import scrapy
|
[
"626614767@qq.com"
] |
626614767@qq.com
|
f4ae8716a1913caf616981c80109ad0bd68f39a5
|
e2bf489830e55a57945b8e696f8e2d6acefeb560
|
/05-系统编程-2/06-列表传递给线程.py
|
f6412c260a9060ce455498ed6ed3712e669c1585
|
[] |
no_license
|
taizilinger123/pythonjichu
|
e713de06fb050943a8a1e0256ccba8dea40a411d
|
5ee896e92edbac55d02aa63965d896200b8c2623
|
refs/heads/master
| 2023-04-01T02:00:37.557667
| 2023-03-31T05:08:40
| 2023-03-31T05:08:40
| 148,663,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
from threading import Thread
import time
def work1(nums):
nums.append(44)
print("----in work1---",nums)
def work2(nums):
#延时一会,保证t1线程中的事情做完
time.sleep(1)
print("----in work2---",nums)
g_nums = [11,22,33]
t1 = Thread(target=work1, args=(g_nums,))
t1.start()
t2 = Thread(target=work2, args=(g_nums,))
t2.start()
|
[
"837337164@qq.com"
] |
837337164@qq.com
|
6b318fa6df2f38457877794dba277c5ba5cc3a84
|
f26af24795d913a4dd17f467052255d256c95032
|
/apps/price/models.py
|
168110b8818fd660a191212deac9e181f91eaf29
|
[] |
no_license
|
minimedj/3dhero.ru
|
ccbd8d5d37fe149e6194457e66cfc338afe21bd6
|
5790f448fe03eecf79760c2e73154f0831abaf54
|
refs/heads/master
| 2021-01-22T21:08:00.632873
| 2016-03-28T13:11:26
| 2016-03-28T13:11:26
| 85,397,391
| 1
| 0
| null | 2017-03-18T11:49:44
| 2017-03-18T11:49:44
| null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
# -*- coding: utf-8 -*-
from apps.file.models import File
from google.appengine.ext import ndb
from model import Base
from werkzeug.wrappers import cached_property
class PriceFile(Base):
order_id = ndb.IntegerProperty(
default=0,
verbose_name=u'Порядок сортиовки'
)
file = ndb.KeyProperty(File)
@cached_property
def get_file(self):
if self.file:
return self.file.get()
else:
return None
@classmethod
def _pre_delete_hook(cls, key):
obj = key.get()
if obj and obj.file:
obj.file.delete()
|
[
"serg.baburin@gmail.com"
] |
serg.baburin@gmail.com
|
529d1708aadd414f217458769cc1134d4712d1e0
|
67e317d203ba478f0dda6d9014b1daa03acee080
|
/nidm/workflows/ProcessExecution.py
|
6be6844f43ea864b6b151d88ff96358edf493717
|
[
"Apache-2.0"
] |
permissive
|
tvanerp/PyNIDM
|
ec074dee9550dee91b21339c78105e8bf661cb6b
|
6a94875969c6bc5247b09d7d2793ed979b18ab3f
|
refs/heads/master
| 2020-07-25T16:54:03.905301
| 2019-09-13T23:56:18
| 2019-09-13T23:56:18
| 208,361,857
| 0
| 0
|
NOASSERTION
| 2019-09-13T23:23:06
| 2019-09-13T23:23:05
| null |
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
import prov.model as pm
from ..core import Constants
from ..experiment.Core import Core
from ..experiment.Core import getUUID
class ProcessExecution(pm.ProvActivity, Core):
"""Class for NIDM-Workflow ProcessExecution Objects.
Default constructor uses empty graph with namespaces added from
NIDM/Scripts/Constants.py. Additional alternate constructors for
user-supplied graphs and default namespaces (i.e. from Constants.py)
and user-supplied graph and namespaces
"""
def __init__(self, parentDoc=None, attributes=None):
"""
Default contructor, creates document and adds Process activity to graph
with optional attributes
:param parentDoc: optional ProvDocument
:param attributes: optional dictionary of attributes to add
"""
#set graph document
if (parentDoc):
self.graph = parentDoc
else:
self.graph = Constants.p_graph
#execute default parent class constructor
super(ProcessExecution, self).__init__(self.graph,
pm.PROV[getUUID()],
attributes)
self.graph._add_record(self)
|
[
"satra@mit.edu"
] |
satra@mit.edu
|
8aa4f99dfc142943b8b42bf343e240017caf68eb
|
40c6f8449f25d30b16510d6b6da3893e5eae3641
|
/shorts/urls.py
|
60cdd103cf7055a38d253710c377d68d0a5a68c5
|
[] |
no_license
|
fergalmoran/shortio
|
b2188df44ebf08455ffd150fb6234dbff582f3c8
|
575dfd8438b37f383e1fc865baf5b7ad65e788ee
|
refs/heads/master
| 2020-03-29T13:03:02.682420
| 2014-04-17T22:10:41
| 2014-04-17T22:10:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
from django.conf.urls import patterns, url, include
from shorts import views
from .api import UserList, UserDetail
from .api import UrlList, UrlDetail, UserUrlList
user_urls = patterns(
'',
url(r'^/(?P<username>[0-9a-zA-Z_-]+)/urlss$',
UserUrlList.as_view(), name='userurl-list'),
url(r'^/(?P<username>[0-9a-zA-Z_-]+)$',
UserDetail.as_view(), name='user-detail'),
url(r'^$', UserList.as_view(), name='user-list')
)
urls_urls = patterns(
'',
url(r'^/(?P<pk>\d+)$', UrlDetail.as_view(), name='urls-detail'),
url(r'^$', UrlList.as_view(), name='urls-list')
)
urlpatterns = patterns(
'',
url(r'^users', include(user_urls)),
url(r'^urls', include(urls_urls)),
url(r'^$', views.index, name='index'),
url(r'^create', views.create, name='create'),
url(r'^(?P<url_id>\d+)/$', views.detail, name='detail')
)
|
[
"fergal.moran@gmail.com"
] |
fergal.moran@gmail.com
|
628022e1b0203108c42330f824295c40095a5238
|
0b312224bd5a9e6b1dd92b78ccf58049b5d69b1b
|
/compounds/migrations/0022_auto_20180724_2343.py
|
f8ceb737b47e0233ffe92b7a56a38ba85a895549
|
[] |
no_license
|
paulosjd/frags
|
e573cc9bc373a7e0847985478b5bf0bfca9b7153
|
4af65c7415dbbfa0a92f308bf93d5734c3583c5e
|
refs/heads/master
| 2020-03-17T00:58:15.530581
| 2018-12-12T23:48:15
| 2018-12-12T23:48:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
# Generated by Django 2.0.4 on 2018-07-24 21:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('compounds', '0021_userbioactive'),
]
operations = [
migrations.AddField(
model_name='compoundsource',
name='user_bioactive',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='compounds.UserBioactive'),
),
migrations.AlterField(
model_name='compoundsource',
name='user_odorant',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='compounds.UserOdorant'),
),
]
|
[
"pjdavis@gmx.com"
] |
pjdavis@gmx.com
|
3cf3ebd056bcb46c29d75f30833eea9c8d1dddc6
|
33110fa5ad8c47e31401769086a985eea1a991c7
|
/mmsegmentation/tests/test_data/test_dataset.py
|
3ebd20e28ed6168d7746eb9e04e12c532d11f73c
|
[
"Apache-2.0"
] |
permissive
|
UESTC-Liuxin/SKMT
|
32bc2781063de1da2a778659e6501762531b15a8
|
377bbe3e5d2777d6c3ccaae7a6c364bd9c85d651
|
refs/heads/master
| 2023-01-12T19:28:49.340298
| 2020-11-16T03:35:09
| 2020-11-16T03:35:09
| 283,365,017
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,658
|
py
|
import os.path as osp
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
from mmseg.core.evaluation import get_classes, get_palette
from mmseg.datasets import (ADE20KDataset, CityscapesDataset, ConcatDataset,
CustomDataset, PascalVOCDataset, RepeatDataset,USDataset,SkmtDataset)
def test_classes():
assert list(CityscapesDataset.CLASSES) == get_classes('cityscapes')
assert list(PascalVOCDataset.CLASSES) == get_classes('voc') == get_classes(
'pascal_voc')
assert list(
ADE20KDataset.CLASSES) == get_classes('ade') == get_classes('ade20k')
with pytest.raises(ValueError):
get_classes('unsupported')
def test_palette():
assert CityscapesDataset.PALETTE == get_palette('cityscapes')
assert PascalVOCDataset.PALETTE == get_palette('voc') == get_palette(
'pascal_voc')
assert ADE20KDataset.PALETTE == get_palette('ade') == get_palette('ade20k')
with pytest.raises(ValueError):
get_palette('unsupported')
@patch('mmseg.datasets.CustomDataset.load_annotations', MagicMock)
@patch('mmseg.datasets.CustomDataset.__getitem__',
MagicMock(side_effect=lambda idx: idx))
def test_dataset_wrapper():
# CustomDataset.load_annotations = MagicMock()
# CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx)
dataset_a = CustomDataset(img_dir=MagicMock(), pipeline=[])
len_a = 10
dataset_a.img_infos = MagicMock()
dataset_a.img_infos.__len__.return_value = len_a
dataset_b = CustomDataset(img_dir=MagicMock(), pipeline=[])
len_b = 20
dataset_b.img_infos = MagicMock()
dataset_b.img_infos.__len__.return_value = len_b
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert concat_dataset[5] == 5
assert concat_dataset[25] == 15
assert len(concat_dataset) == len(dataset_a) + len(dataset_b)
repeat_dataset = RepeatDataset(dataset_a, 10)
assert repeat_dataset[5] == 5
assert repeat_dataset[15] == 5
assert repeat_dataset[27] == 7
assert len(repeat_dataset) == 10 * len(dataset_a)
def test_custom_dataset():
# img_norm_cfg = dict(
# mean=[123.675, 116.28, 103.53],
# std=[58.395, 57.12, 57.375],
# to_rgb=True)
# crop_size = (512, 1024)
# train_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(type='LoadAnnotations'),
# dict(type='Resize', img_scale=(128, 256), ratio_range=(0.5, 2.0)),
# dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
# dict(type='RandomFlip', flip_ratio=0.5),
# dict(type='PhotoMetricDistortion'),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
# dict(type='DefaultFormatBundle'),
# dict(type='Collect', keys=['img', 'gt_semantic_seg']),
# ]
# test_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(
# type='MultiScaleFlipAug',
# img_scale=(128, 256),
# # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
# flip=False,
# transforms=[
# dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip'),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='ImageToTensor', keys=['img']),
# dict(type='Collect', keys=['img']),
# ])
# ]
#
# # with img_dir and ann_dir
# train_dataset = CustomDataset(
# train_pipeline,
# data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
# img_dir='imgs/',
# ann_dir='gts/',
# img_suffix='img.jpg',
# seg_map_suffix='gt.png')
# assert len(train_dataset) == 5
#
# # with img_dir, ann_dir, split
# train_dataset = CustomDataset(
# train_pipeline,
# data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
# img_dir='imgs/',
# ann_dir='gts/',
# img_suffix='img.jpg',
# seg_map_suffix='gt.png',
# split='splits/train.txt')
# assert len(train_dataset) == 4
#
# # no data_root
# train_dataset = CustomDataset(
# train_pipeline,
# img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'),
# ann_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/gts'),
# img_suffix='img.jpg',
# seg_map_suffix='gt.png')
# assert len(train_dataset) == 5
#
# # with data_root but img_dir/ann_dir are abs path
# train_dataset = CustomDataset(
# train_pipeline,
# data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
# img_dir=osp.abspath(
# osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs')),
# ann_dir=osp.abspath(
# osp.join(osp.dirname(__file__), '../data/pseudo_dataset/gts')),
# img_suffix='img.jpg',
# seg_map_suffix='gt.png')
# assert len(train_dataset) == 5
#
# # test_mode=True
# test_dataset = CustomDataset(
# test_pipeline,
# img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'),
# img_suffix='img.jpg',
# test_mode=True)
# assert len(test_dataset) == 5
#
# # training data get
# train_data = train_dataset[0]
# assert isinstance(train_data, dict)
#
# # test data get
# test_data = test_dataset[0]
# assert isinstance(test_data, dict)
#
# # get gt seg map
# gt_seg_maps = train_dataset.get_gt_seg_maps()
# assert len(gt_seg_maps) == 5
# dataset settings
data_root = '/media/Program/CV/Project/SKMT/mmsegmentation/data/VOCdevkit/Seg/skmt5'
# data_root = '/media/Program/CV/Project/SKMT/mmsegmentation/data/VOCdevkit/VOC2012'
# data_root = '/media/Program/CV/Project/SKMT/mmsegmentation/data/VOCdevkit/US_dataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
# with img_dir and ann_dir
train_dataset = USDataset(
split='ImageSets/Segmentation/train.txt',
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClass',
pipeline=train_pipeline
)
# get gt seg map
gt_seg_maps = train_dataset.get_gt_seg_maps()
# evaluation
pseudo_results = []
for gt_seg_map in gt_seg_maps:
h, w = gt_seg_map.shape
pseudo_results.append(np.random.randint(low=0, high=16, size=(h, w)))
eval_results = train_dataset.evaluate(pseudo_results)
assert isinstance(eval_results, dict)
assert 'mIoU' in eval_results
assert 'mAcc' in eval_results
assert 'aAcc' in eval_results
# evaluation with CLASSES
train_dataset.CLASSES = tuple(['a'] * 16)
eval_results = train_dataset.evaluate(pseudo_results)
assert isinstance(eval_results, dict)
assert 'mIoU' in eval_results
assert 'mAcc' in eval_results
assert 'aAcc' in eval_results
test_custom_dataset()
|
[
"625092890@qq.com"
] |
625092890@qq.com
|
3d513b4d49ec184a8c212f0e9e39bded5560e491
|
a9e60d0e5b3b5062a81da96be2d9c748a96ffca7
|
/configurations/i21-config/scripts/scannable/waveform_channel/BinpointWaveformChannelController.py
|
306459920ad78dce613a3566b8f00496e66b5507
|
[] |
no_license
|
openGDA/gda-diamond
|
3736718596f47607335ada470d06148d7b57526e
|
bbb64dcfd581c30eddb210c647db5b5864b59166
|
refs/heads/master
| 2023-08-16T08:01:11.075927
| 2023-08-15T16:01:52
| 2023-08-15T16:01:52
| 121,757,699
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,892
|
py
|
"""
define a Binpoint class to control data collection during continuous move.
Note that the Binpoint device is slaved from the ADC_ACQ_GRP, therefore there is no concept of exposure time.
However collection time is required for data pulling stream timing in order to retrieve collected data in
a more or less synchronised fashion between different channels.
@author: Fajin Yuan
@organization: Diamond Light Source Ltd
@since: 25 August 2020
"""
from gda.epics import CAClient
from scannable.waveform_channel.WaveformChannelPollingInputStream import WaveformChannelPollingInputStream
from org.slf4j import LoggerFactory
import installation
TIMEOUT = 5
class BinpointWaveformChannelController(object):
def __init__(self, name, binpoint_root_pv):
self.logger = LoggerFactory.getLogger("BinpointWaveformChannelController:%s" % name)
self.verbose = False
self.name = name
#ADC_ACQ_GRP in EPICS doing the Binpoint reset comes after PGME waveform reset
self.pv_reset = CAClient(binpoint_root_pv + 'BPTS:BINPOINTALL:RESET.PROC')
self.binpoint_root_pv = binpoint_root_pv
self.configure()
self.exposure_time = 1
self.number_of_positions = 0
self.started = False
self.hardware_trigger_provider=None
self.stream=None
def set_hardware_trigger_provider(self, hardwareTriggerProvider):
self.hardware_trigger_provider=hardwareTriggerProvider
def get_hardware_trigger_provider(self):
return self.hardware_trigger_provider
def configure(self):
if self.verbose: self.logger.info("%s %s" % (self.name,'configure()...'))
if installation.isLive():
self.pv_reset.configure()
def erase(self):
if self.verbose: self.logger.info("%s %s" % (self.name,'erase()...'))
self.started = False
if self.verbose: self.logger.info("%s %s" % (self.name,'...erase()'))
def erase_and_start(self):
if self.verbose: self.logger.info("%s %s" % (self.name,'erase_and_start()...'))
if installation.isLive():
self.pv_reset.caput(1)
self.started = True
if self.verbose: self.logger.info("%s %s" % (self.name,'...erase_and_start()'))
def stop(self):
if self.verbose: self.logger.info("%s %s" % (self.name,'stop()...'))
# Binpoint has no stop, since it is slaved from the ADC.
if self.stream:
self.stream.stop()
self.started = False # added after I10-145
if self.verbose: self.logger.info("%s %s" % (self.name,'...stop()'))
# Provide functions to configure WaveformChannelScannable
def getChannelInputStream(self, channel_pv_suffix):
# Channel suffix assumes trailing :
self.stream = WaveformChannelPollingInputStream(self, channel_pv_suffix)
# TODO: Investigate if the NLAST.B can be listened to, if so we can avoid using this polling class
self.stream.verbose = self.verbose
return self.stream
def getChannelInputStreamFormat(self):
return '%f'
# Provide functions to configure WaveformChannelPollingInputStream
def getChannelInputStreamType(self):
return float
def getChannelInputStreamCAClients(self, channel_pv_suffix):
if installation.isLive():
pv_waveform = CAClient(self.binpoint_root_pv + channel_pv_suffix + 'BINPOINT')
pv_count = CAClient(self.binpoint_root_pv + channel_pv_suffix + 'BINPOINT:NLAST.B')
else:
pv_waveform = []
pv_count = self.number_of_positions
return pv_waveform, pv_count
def getExposureTime(self):
return self.exposure_time
def getChannelInputStreamAcquiring(self):
#return true when continuous move started
return self.started and self.hardware_trigger_provider.continuousMovingStarted
|
[
"fajin.yuan@diamond.ac.uk"
] |
fajin.yuan@diamond.ac.uk
|
6c8c3176d6fab6f847718ff9bf0b86f79b2e7b9f
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/tensorflow/python/tpu/tpu_strategy_util.py
|
7c6396205ab5e127f88ed702d0b0bdcaa2a13c21
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755
| 2021-07-05T14:09:15
| 2021-07-05T14:09:15
| 382,864,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:3ea6cc5b52461659dafea8bd5247a3e94f163ecadc1e3f2e4dc1b668a5ca730e
size 8774
|
[
"business030301@gmail.com"
] |
business030301@gmail.com
|
2b0895d4db4313398af8c77ebcb6a061bcb4237a
|
73e07f0dc3d8b8625105c1528746c91e382567ed
|
/tests/__init__.py
|
79c17586ae066ff7d284c0c88d19930680dae095
|
[
"MIT"
] |
permissive
|
econchick/attrs
|
d10114f0e838ef0b63aadf5055f3e4a482cd0850
|
6a1a740c46e3071296eaa7b64d0120913ddadade
|
refs/heads/master
| 2021-01-21T00:22:29.237367
| 2015-02-07T09:06:08
| 2015-02-07T09:06:08
| 30,468,084
| 0
| 0
| null | 2015-02-07T19:54:49
| 2015-02-07T19:54:49
| null |
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from attr import Attribute
from attr._make import NOTHING, make_class
def simple_class(no_cmp=True, no_repr=True, no_hash=True):
"""
Return a new simple class.
"""
return make_class(
"C", ["a", "b"],
no_cmp=no_cmp, no_repr=no_repr, no_hash=no_hash, no_init=False,
)
def simple_attr(name, default=NOTHING, validator=None, no_repr=False,
no_cmp=False, no_hash=False, no_init=False):
"""
Return an attribute with a name and no other bells and whistles.
"""
return Attribute(
name=name, default=default, validator=validator, no_repr=no_repr,
no_cmp=no_cmp, no_hash=no_hash, no_init=no_init
)
class TestSimpleClass(object):
"""
Tests for the testing helper function `make_class`.
"""
def test_returns_class(self):
"""
Returns a class object.
"""
assert type is simple_class().__class__
def returns_distinct_classes(self):
"""
Each call returns a completely new class.
"""
assert simple_class() is not simple_class()
|
[
"hs@ox.cx"
] |
hs@ox.cx
|
63abffd0d5f913554789ad7d511d77f209c117cc
|
6aa36fee3f4fcc9ac8f5509e51ea6bd8fc05b39b
|
/virtualenv-flask/lib/python2.7/site-packages/cybox/objects/win_task_object.py
|
6349e56f3ccb9792cf8b1ede28ff5f69ce019358
|
[] |
no_license
|
syn-ack-zack/msg-stix-parser
|
8c46c4d897d579162f224360a077ac42f28ffe89
|
1edb7c3b6d60f76f24b91830a1ae7076d46ede14
|
refs/heads/master
| 2021-03-27T15:01:07.344754
| 2016-09-30T16:43:22
| 2016-09-30T16:43:22
| 69,684,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,857
|
py
|
# Copyright (c) 2013, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import cybox
import cybox.bindings.win_task_object as win_task_binding
from cybox.common import (Base64Binary, DateTime, Duration, HashList, Long,
ObjectProperties, String, UnsignedLong)
from cybox.objects.email_message_object import EmailMessage
class Trigger(cybox.Entity):
_binding = win_task_binding
_binding_class = win_task_binding.TriggerType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
trigger_begin = cybox.TypedField("Trigger_Begin", DateTime)
trigger_delay = cybox.TypedField("Trigger_Delay", Duration)
trigger_end = cybox.TypedField("Trigger_End", DateTime)
trigger_frequency = cybox.TypedField("Trigger_Frequency", String)
trigger_max_run_time = cybox.TypedField("Trigger_Max_Run_Time", Duration)
trigger_session_change_type = cybox.TypedField(
"Trigger_Session_Change_Type", String)
#TODO: add Trigger_Type (see CybOXProject/schemas issue #76)
class TriggerList(cybox.EntityList):
_binding = win_task_binding
_binding_class = win_task_binding.TriggerListType
_binding_var = 'Trigger'
_contained_type = Trigger
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
class IComHandlerAction(cybox.Entity):
_binding = win_task_binding
_binding_class = win_task_binding.IComHandlerActionType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
com_data = cybox.TypedField("COM_Data", String)
com_class_id = cybox.TypedField("COM_Class_ID", String)
class IExecAction(cybox.Entity):
_binding = win_task_binding
_binding_class = win_task_binding.IExecActionType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
exec_arguments = cybox.TypedField("Exec_Arguments", String)
exec_program_path = cybox.TypedField("Exec_Program_Path", String)
exec_working_directory = cybox.TypedField("Exec_Working_Directory", String)
exec_program_hashes = cybox.TypedField("Exec_Program_Hashes", HashList)
class IShowMessageAction(cybox.Entity):
_binding = win_task_binding
_binding_class = win_task_binding.IShowMessageActionType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
show_message_body = cybox.TypedField("Show_Message_Body", String)
show_message_title = cybox.TypedField("Show_Message_Title", String)
class TaskAction(cybox.Entity):
_binding = win_task_binding
_binding_class = win_task_binding.TaskActionType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
action_type = cybox.TypedField("Action_Type", String)
action_id = cybox.TypedField("Action_ID", String)
iemailaction = cybox.TypedField("IEmailAction", EmailMessage)
icomhandleraction = cybox.TypedField("IComHandlerAction",
IComHandlerAction)
iexecaction = cybox.TypedField("IExecAction", IExecAction)
ishowmessageaction = cybox.TypedField("IShowMessageAction",
IShowMessageAction)
class TaskActionList(cybox.EntityList):
_binding = win_task_binding
_binding_class = win_task_binding.TaskActionListType
_binding_var = 'Action'
_contained_type = TaskAction
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
class WinTask(ObjectProperties):
_binding = win_task_binding
_binding_class = win_task_binding.WindowsTaskObjectType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
_XSI_NS = "WinTaskObj"
_XSI_TYPE = "WindowsTaskObjectType"
status = cybox.TypedField("Status", String)
priority = cybox.TypedField("Priority", String)
name = cybox.TypedField("Name", String)
application_name = cybox.TypedField("Application_Name", String)
parameters = cybox.TypedField("Parameters", String)
flags = cybox.TypedField("Flags", String)
account_name = cybox.TypedField("Account_Name", String)
account_run_level = cybox.TypedField("Account_Run_Level", String)
account_logon_type = cybox.TypedField("Account_Logon_Type", String)
creator = cybox.TypedField("Creator", String)
creation_date = cybox.TypedField("Creation_Date", DateTime)
most_recent_run_time = cybox.TypedField("Most_Recent_Run_Time", DateTime)
exit_code = cybox.TypedField("Exit_Code", Long)
max_run_time = cybox.TypedField("Max_Run_Time", UnsignedLong)
next_run_time = cybox.TypedField("Next_Run_Time", DateTime)
action_list = cybox.TypedField("Action_List", TaskActionList)
trigger_list = cybox.TypedField("Trigger_List", TriggerList)
comment = cybox.TypedField("Comment", String)
working_directory = cybox.TypedField("Working_Directory", String)
work_item_data = cybox.TypedField("Work_Item_Data", Base64Binary)
|
[
"nagaich@localhost.localdomain"
] |
nagaich@localhost.localdomain
|
6a89ebca4f0ef920b63c07807d9ea8970a5dca97
|
7ef2308e51d1d5700fbd092177ee15e2a03ebdd8
|
/DisasterCrawler/ZHNewsCrawlerPostgreSql/gooseker/gooseeker.py
|
95f8c3f6537f0b93bfdd207b0375084375a77bfa
|
[] |
no_license
|
STAWZW/STAWZW1.0
|
741002eb35c2883e5739fee8d14ff430e9622c01
|
a835ac27aba17f968116e321bd201b26c9fb3578
|
refs/heads/master
| 2020-07-21T20:21:59.753992
| 2019-09-26T09:21:28
| 2019-09-26T09:21:28
| 206,965,347
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,601
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# 模块名: gooseeker
# 类名: gsExtractor
# Version: 2.0
# 说明: html内容提取器
# 功能: 使用xslt作为模板,快速提取HTML DOM中的内容。
# released by 集搜客(http://www.gooseeker.com) on May 18, 2016
# github: https://github.com/FullerHua/jisou/core/gooseeker.py
from urllib import request
from urllib.parse import quote
from lxml import etree
import time
class GsExtractor(object):
def _init_(self):
self.xslt = ""
# 从文件读取xslt
def setXsltFromFile(self , xsltFilePath):
file = open(xsltFilePath , 'r' , encoding='UTF-8')
try:
self.xslt = file.read()
finally:
file.close()
# 从字符串获得xslt
def setXsltFromMem(self , xsltStr):
self.xslt = xsltStr
# 通过GooSeeker API接口获得xslt
def setXsltFromAPI(self , APIKey , theme, middle=None, bname=None):
apiurl = "http://www.gooseeker.com/api/getextractor?key="+ APIKey +"&theme="+quote(theme)
if (middle):
apiurl = apiurl + "&middle="+quote(middle)
if (bname):
apiurl = apiurl + "&bname="+quote(bname)
apiconn = request.urlopen(apiurl)
self.xslt = apiconn.read()
print(apiurl)
# 返回当前xslt
def getXslt(self):
return self.xslt
# 提取方法,入参是一个HTML DOM对象,返回是提取结果
def extract(self , html):
xslt_root = etree.XML(self.xslt)
transform = etree.XSLT(xslt_root)
result_tree = transform(html)
return result_tree
|
[
"1223868042@qq.com"
] |
1223868042@qq.com
|
901b405f4a2a51fd6ca9bfd5094110f8809a137e
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1_neat/16_0_1_ankso_problem1.py
|
abdcde8c10348975b61eb615936ae90fb286edb5
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 736
|
py
|
def returnList(n):
n = str(n)
digits = list(n)
return digits
def check(all):
stat = False
for i in range(10):
if str(i) in all:
stat = True
else:
stat = False
break
return stat
testCases = int(raw_input())
for i in range(testCases):
N = int(raw_input())
if N == 0:
print "Case #"+str(i+1)+": INSOMNIA"
else:
listOfNum = returnList(N)
j=1
while True:
if check(listOfNum):
print "Case #"+str(i+1)+": "+str(newNumber)
break
j = j+1
newNumber = N*j
listOfNum.extend(returnList(newNumber))
listOfNum = list(set(listOfNum))
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
d4529fd488e177eff6820f5688b7d6fd9790eab3
|
c43fbcb4442428e85616f664964d1e27ca396070
|
/runs/malte/simple/config.py
|
5d9a5f0f13ad31b04abdecbc9011127e24d6fd1c
|
[] |
no_license
|
megalut/megalut
|
ddac89a0dca70e13979d31b80d52233226233ade
|
63bd4bec8000ad13f4963d464d7b7b4d470a36ab
|
refs/heads/master
| 2020-04-15T00:33:42.815988
| 2018-09-11T08:45:48
| 2018-09-11T08:45:48
| 20,882,727
| 2
| 1
| null | 2018-09-11T08:45:49
| 2014-06-16T11:39:14
|
Python
|
UTF-8
|
Python
| false
| false
| 317
|
py
|
import megalut
import megalut.learn
import os
import numpy as np
import logging
#logging.basicConfig(level=logging.INFO)
logging.basicConfig(format='PID %(process)06d | %(asctime)s | %(levelname)s: %(name)s(%(funcName)s): %(message)s',level=logging.INFO)
workdir = "/vol/fohlen11/fohlen11_1/mtewes/simplewd/"
|
[
"malte.tewes@gmail.com"
] |
malte.tewes@gmail.com
|
f677ca474fb5707bca7e6923f812c0f9b03202fe
|
aa6e1dd07a71a73bc08574b76f9e57a3ce8c8286
|
/077.Test_BeeWare_windows/beeware-tutorial/beeware-venv/Lib/site-packages/pip/_internal/network/cache.py
|
a0d55b5e992a5f85890fc06703f33dc53995a17b
|
[
"MIT"
] |
permissive
|
IvanaXu/PyTools
|
0aff5982f50bb300bfa950405192c78473b69537
|
358ae06eef418fde35f424909d4f13049ca9ec7b
|
refs/heads/master
| 2023-06-07T21:45:44.242363
| 2023-06-06T16:00:25
| 2023-06-06T16:00:25
| 163,940,845
| 60
| 8
|
MIT
| 2022-12-23T02:49:05
| 2019-01-03T07:54:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,329
|
py
|
"""HTTP cache implementation.
"""
import os
from contextlib import contextmanager
from pip._vendor.cachecontrol.cache import BaseCache
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.requests.models import Response
from pip._internal.utils.filesystem import adjacent_tmp_file, replace
from pip._internal.utils.misc import ensure_dir
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, Iterator
def is_from_cache(response):
# type: (Response) -> bool
return getattr(response, "from_cache", False)
@contextmanager
def suppressed_cache_errors():
# type: () -> Iterator[None]
"""If we can't access the cache then we can just skip caching and process
requests as if caching wasn't enabled.
"""
try:
yield
except (OSError, IOError):
pass
class SafeFileCache(BaseCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, directory):
# type: (str) -> None
assert directory is not None, "Cache directory must not be None."
super(SafeFileCache, self).__init__()
self.directory = directory
def _get_cache_path(self, name):
# type: (str) -> str
# From cachecontrol.caches.file_cache.FileCache._fn, brought into our
# class for backwards-compatibility and to avoid using a non-public
# method.
hashed = FileCache.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
# type: (str) -> Optional[bytes]
path = self._get_cache_path(key)
with suppressed_cache_errors():
with open(path, 'rb') as f:
return f.read()
def set(self, key, value):
# type: (str, bytes) -> None
path = self._get_cache_path(key)
with suppressed_cache_errors():
ensure_dir(os.path.dirname(path))
with adjacent_tmp_file(path) as f:
f.write(value)
replace(f.name, path)
def delete(self, key):
# type: (str) -> None
path = self._get_cache_path(key)
with suppressed_cache_errors():
os.remove(path)
|
[
"1440420407@qq.com"
] |
1440420407@qq.com
|
b04538155bd3cd73f2f1271087a0b63e9be949e1
|
b6303baeaa840671f1ea747d47c905779a07ffce
|
/edital/migrations/0015_auto_20210928_1833.py
|
9a7475e433d719627343e80ac38fcc3a631bb3c5
|
[] |
no_license
|
amarantejoacil/sisnae
|
89954ef9e837799750dc56274ec1207e6d39daef
|
90e237a41e698cda357b8f555fbb0649f16a78b3
|
refs/heads/main
| 2023-08-24T23:06:47.628428
| 2021-10-27T16:26:12
| 2021-10-27T16:26:12
| 401,503,074
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 739
|
py
|
# Generated by Django 3.2.6 on 2021-09-28 22:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('edital', '0014_auto_20210907_2050'),
]
operations = [
migrations.AddField(
model_name='edital',
name='edital_quantidade_vaga',
field=models.IntegerField(default=100, verbose_name='Quantidade de vaga'),
preserve_default=False,
),
migrations.AddField(
model_name='edital',
name='edital_valor_auxilio',
field=models.DecimalField(decimal_places=2, default=100, max_digits=8, verbose_name='Valor do auxílio'),
preserve_default=False,
),
]
|
[
"joacil.amarante@gmail.com"
] |
joacil.amarante@gmail.com
|
768fff722cf0d2f12f0a7428a500a54db6db3a92
|
02952fc67147a2f11a9ed8c4eb29210bec5672ed
|
/business/service/urls/polardb.py
|
239025e7fe9457d9d1dfc284c34ba4bc1af18f10
|
[] |
no_license
|
cuijianzhe/cow
|
b110a70398b09a401dadc7d3ed24dfe2bae50f5b
|
3539cab6e73571f84b7f17391d9a363a756f12e1
|
refs/heads/main
| 2023-06-04T10:33:33.975885
| 2021-06-19T10:40:36
| 2021-06-19T10:40:36
| 340,634,448
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
from django.urls import path
from business.service.apis import polardb as polardb_api
urlpatterns = [
path('service/polardb/create/', polardb_api.CreateServicePolarDBApi.as_view()),
path('service/polardb/delete/', polardb_api.DeleteServicePolarDBApi.as_view()),
path('service/polardb/list/', polardb_api.ListServicePolarDBApi.as_view()),
]
|
[
"598941324@qq.com"
] |
598941324@qq.com
|
bffd945b3e55d605e9bdc96d37c366719e574dc5
|
32cb0be487895629ad1184ea25e0076a43abba0a
|
/LifePictorial/top/api/rest/CrmGroupsGetRequest.py
|
fdf3e997d2a6a1433995e490178adfa406e9a607
|
[] |
no_license
|
poorevil/LifePictorial
|
6814e447ec93ee6c4d5b0f1737335601899a6a56
|
b3cac4aa7bb5166608f4c56e5564b33249f5abef
|
refs/heads/master
| 2021-01-25T08:48:21.918663
| 2014-03-19T08:55:47
| 2014-03-19T08:55:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
'''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class CrmGroupsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.current_page = None
self.page_size = None
def getapiname(self):
return 'taobao.crm.groups.get'
|
[
"poorevil@gmail.com"
] |
poorevil@gmail.com
|
d3987022176ead2e9f190e5c0da47c1505c6fba0
|
dfdecc0f91c6fa0319325561ed0a20f8544f0312
|
/test.py
|
4b61a3c63775e29a496c1734d0afc1a30b4e6eeb
|
[] |
no_license
|
ShichaoMa/MultiThreadClosing
|
c3807047938329a8655d65dc011173c16375240c
|
43b556d9ee6a6ae11f1481675b822b2660a7c36b
|
refs/heads/master
| 2021-01-20T19:27:04.450710
| 2017-12-09T08:58:15
| 2017-12-09T08:58:15
| 64,533,341
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
import time
from threading import Thread
from multi_thread_closing import MultiThreadClosing
class Test(MultiThreadClosing):
name = "test_thread"
def start(self):
t1 = Thread(target=self.process)
t2 = Thread(target=self.process)
self.threads.append(t1)
self.threads.append(t2)
t1.start()
t2.start()
while filter(lambda x:x.is_alive(), self.threads):
print "main %s.."%self.alive
time.sleep(1)
def process(self):
while self.alive:
for i in range(20):
print i
time.sleep(3)
if __name__ == "__main__":
t = Test()
t.set_logger()
t.start()
|
[
"308299269@qq.com"
] |
308299269@qq.com
|
878f78437dc5e1bec4b5c66bd1443295fcebfb4e
|
bcf678908eb3e26f6172265406bfaaa7129f6b18
|
/Blog/myapp/views.py
|
64d2228c228fc572bf4ed3eb5100262b4f3071d9
|
[] |
no_license
|
loganjoon/0713-Blog
|
935cbd75c8682ff6bc6841bc414ad0db3225a917
|
71494795515753b6a354e1b93ed57858e852a4a5
|
refs/heads/master
| 2022-11-17T00:09:40.770351
| 2020-07-13T02:31:34
| 2020-07-13T02:31:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
from django.shortcuts import render
from .models import BlogFrame
from django.shortcuts import render, get_object_or_404
def main(request):
blogs = BlogFrame.objects
return render(request, 'main.html',{'blogs':blogs})
def detail(request, blog_id):
blog_detail = get_object_or_404(BlogFrame, pk=blog_id)
return render(request, '/detail.html', {'blogdetail': blog_detail})
# Create your views here.
|
[
"you@example.com"
] |
you@example.com
|
5649862f39c4121adba3f3cf54160b5251b6ff8e
|
242da8865e037f9fffb76269c3acddb73ce9fa14
|
/packages/pyright-internal/src/tests/samples/forLoop1.py
|
6f5ced2b691c9f7c57d066e0809a9261e765695a
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
khyveasna11111908/pyright
|
f42eceae044f6fbc27552c1765b03ebd345a451c
|
493d47807b96137995e4bb6ca341930e4de911f9
|
refs/heads/main
| 2023-08-30T00:08:36.191799
| 2021-09-25T19:17:13
| 2021-09-25T19:17:13
| 410,361,483
| 1
| 1
|
NOASSERTION
| 2021-09-25T19:15:23
| 2021-09-25T19:15:22
| null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
# This sample tests 'for' operations (both simple for loops
# and list comprehension for loops).
from typing import AsyncIterator, List, Iterator
def requires_int(val: int):
pass
list1 = [1, 2, 3] # type: List[int]
for a in list1:
requires_int(a)
int1 = 1
# This should generate an error because
# an int type is not iterable.
for foo1 in int1:
pass
async def func1():
# This should generate an error because
# list1 isn't an async iterator.
async for foo2 in list1:
requires_int(foo2)
class AsyncIterable1(object):
def __aiter__(self):
return self
async def __anext__(self):
return 1
iter1 = AsyncIterable1()
async def func2():
async for foo3 in iter1:
requires_int(foo3)
for d in [b for b in list1]:
requires_int(d)
for e in [b async for b in iter1]:
requires_int(e)
class ClassWithGetItem(object):
def __getitem__(self, item) -> str:
return "hello"
def testGetItemIterator() -> str:
objWithGetItem = ClassWithGetItem()
for f in objWithGetItem:
return f
return "none"
# This should generate a syntax error.
for in range(3):
pass
|
[
"erictr@microsoft.com"
] |
erictr@microsoft.com
|
484234961357522c403302d254ccabdc4df0e383
|
f3f10bb0ec28489d3111c72ce9811b01fa629d64
|
/setup.py
|
ada05e6131d7ef1e7ee185a5fae1c8a5dfe88d3b
|
[
"BSD-2-Clause"
] |
permissive
|
gitter-badger/labscript
|
db0e6f1a0c49a78f6dc08efea8607bce499a26a4
|
26f68923c71a56d84e19ae2ab894d2f4d6bdd9b4
|
refs/heads/master
| 2022-04-26T02:40:36.586340
| 2020-05-02T17:33:28
| 2020-05-02T17:33:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
# USAGE NOTES
#
# Make a PyPI release tarball with:
#
# python setup.py sdist
#
# Upload to test PyPI with:
#
# twine upload --repository-url https://test.pypi.org/legacy/ dist/*
#
# Install from test PyPI with:
#
# pip install --index-url https://test.pypi.org/simple/ labscript
#
# Upload to real PyPI with:
#
# twine upload dist/*
#
# Build conda packages for all platforms (in a conda environment with setuptools_conda
# installed) with:
#
# python setup.py dist_conda
#
# Upoad to your own account (for testing) on anaconda cloud (in a conda environment with
# anaconda-client installed) with:
#
# anaconda upload --skip-existing conda_packages/*/*
#
# (Trickier on Windows, as it won't expand the wildcards)
#
# Upoad to the labscript-suite organisation's channel on anaconda cloud (in a
# conda environment with anaconda-client installed) with:
#
# anaconda upload -u labscript-suite --skip-existing conda_packages/*/*
#
# If you need to rebuild the same version of the package for conda due to a packaging
# issue, you must increment CONDA_BUILD_NUMBER in order to create a unique version on
# anaconda cloud. When subsequently releasing a new version of the package,
# CONDA_BUILD_NUMBER should be reset to zero.
import os
from setuptools import setup
try:
from setuptools_conda import dist_conda
except ImportError:
dist_conda = None
SETUP_REQUIRES = ['setuptools', 'setuptools_scm']
INSTALL_REQUIRES = [
"labscript_utils >=2.14.0",
"numpy >=1.15",
"scipy",
"matplotlib",
]
setup(
name='labscript',
use_scm_version=True,
description="The labscript compiler",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='The labscript suite community',
author_email='labscriptsuite@googlegroups.com ',
url='http://labscriptsuite.org',
license="BSD",
packages=["labscript"],
zip_safe=False,
setup_requires=SETUP_REQUIRES,
include_package_data=True,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5",
install_requires=INSTALL_REQUIRES if 'CONDA_BUILD' not in os.environ else [],
cmdclass={'dist_conda': dist_conda} if dist_conda is not None else {},
command_options={
'dist_conda': {
'pythons': (__file__, ['3.6', '3.7', '3.8']),
'platforms': (__file__, ['linux-64', 'win-32', 'win-64', 'osx-64']),
'force_conversion': (__file__, True),
},
},
)
|
[
"chrisjbillington@gmail.com"
] |
chrisjbillington@gmail.com
|
efa84b7b252d3f011527c3e5a96bab39d82863ad
|
c817d8c3daf2ea79dc02a2e624e49c2fd556007d
|
/audit/models.py
|
40eea1974af60239d983e11b9cab78dd9c239773
|
[] |
no_license
|
DUMBALINYOLO/stats-filtering
|
7a3d1ccd52527031a66946cdb06286a244be0b1f
|
64d62f84bcfb465cb8721cdbfbb00fe034ac9893
|
refs/heads/master
| 2023-03-17T11:09:17.522663
| 2021-03-12T12:01:16
| 2021-03-12T12:01:16
| 347,049,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
from django.db import models
class AuditLog(models.Model):
timestamp = models.DateTimeField(auto_now=True)
user = models.CharField(max_length=50, null=False, blank=False)
user_ip = models.CharField(max_length=100, null=False, blank=False)
action_name = models.CharField(max_length=20, null=False, blank=False)
table_name = models.CharField(max_length=50, null=True, blank=True)
task_name = models.CharField(max_length=50, null=True, blank=True)
action_details = models.CharField(max_length=200, null=True, blank=True)
data = models.TextField(null=True, blank=True)
def __str__(self):
return str(self.timestamp)+'_'+self.user
|
[
"baridzimaximillem@gmail.com"
] |
baridzimaximillem@gmail.com
|
0bce70d10cc3aaf768ca97f81cc8c150bf7dc968
|
e5483ab737acd9fb222f0b7d1c770cfdd45d2ba7
|
/ecommerce/core/migrations/0019_auto_20200617_1118.py
|
ac79742a44b8082f258a0b47704601705075a955
|
[] |
no_license
|
mxmaslin/otus_web
|
6c1e534047444d7a1fc4cd1bf8245c25d9fc4835
|
b90ad69e1b5c1828fa2ace165710422d113d1d17
|
refs/heads/master
| 2022-12-09T19:52:58.626199
| 2020-07-07T19:15:52
| 2020-07-07T19:15:52
| 226,154,128
| 1
| 1
| null | 2022-12-08T03:23:10
| 2019-12-05T17:25:11
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
# Generated by Django 2.2.12 on 2020-06-17 08:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0018_coupon_refund_userprofile'),
]
operations = [
migrations.AlterModelOptions(
name='coupon',
options={'verbose_name': 'Купон', 'verbose_name_plural': 'Купоны'},
),
migrations.AlterModelOptions(
name='refund',
options={'verbose_name': 'Возврат', 'verbose_name_plural': 'Возвраты'},
),
migrations.AlterModelOptions(
name='userprofile',
options={'verbose_name': 'Профиль пользователя', 'verbose_name_plural': 'Профили пользователей'},
),
migrations.AlterField(
model_name='address',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь'),
),
migrations.AlterField(
model_name='coupon',
name='amount',
field=models.FloatField(verbose_name='Скидка'),
),
migrations.AlterField(
model_name='coupon',
name='code',
field=models.CharField(max_length=15, verbose_name='Код'),
),
migrations.AlterField(
model_name='refund',
name='accepted',
field=models.BooleanField(default=False, verbose_name='Выполнен'),
),
migrations.AlterField(
model_name='refund',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Order', verbose_name='Заказ'),
),
migrations.AlterField(
model_name='refund',
name='reason',
field=models.TextField(verbose_name='Причина'),
),
migrations.AlterField(
model_name='userprofile',
name='one_click_purchasing',
field=models.BooleanField(default=False, verbose_name='Покупка в один клик'),
),
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь'),
),
]
|
[
"zapzarap@yandex.ru"
] |
zapzarap@yandex.ru
|
cfa5f4341bf4ff6482ca10400733edefed6df658
|
f3693916a8b118bf139364604dac3f51235ed613
|
/functional/Components/Clients/Clients_GET/test_TC_42892_Clients_GET_Invalid_Page_Size.py
|
8e24a8ec90dc24677d7dd10d3374b3b4a548f8b0
|
[] |
no_license
|
muktabehera/QE
|
e7d62284889d8241d22506f6ee20547f1cfe6db1
|
3fedde591568e35f7b80c5bf6cd6732f8eeab4f8
|
refs/heads/master
| 2021-03-31T02:19:15.369562
| 2018-03-13T02:45:10
| 2018-03-13T02:45:10
| 124,984,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,684
|
py
|
# -*- coding: UTF-8 -*-
"""PFE Component Tests - Clients.
* TC-42892 - Clients GET:
Verify that 20 records is displayed on providing 'page-size' value as 0 for 'page' parameter using request GET /clients.
Equivalent test CURL command:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X GET -H "Content-Type: application/json"
"<PF_host>://<client_host>/clients?page=1;0"
Same, with test data:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X GET -H "Content-Type: application/json"
"<PF_host>://<client_host>/clients?page=1;0"
"""
import pytest
from qe_common import *
logger = init_logger()
@pytest.mark.components
@pytest.allure.story('Clients')
@pytest.allure.feature('GET')
class Test_PFE_Components(object):
"""PFE Clients test cases."""
@pytest.allure.link('https://jira.qumu.com/browse/TC-42892')
@pytest.mark.Clients
@pytest.mark.GET
def test_TC_42892_GET_Clients_Invalid_Page_Size(self, context):
"""TC-42892 - Clients-GET
Verify that 20 records is displayed on providing 'page-size' value as 0 for 'page' parameter using request GET /clients."""
# Define a test step
with pytest.allure.step("""Verify that 20 records is displayed on providing 'page-size' value as 0 for 'page' parameter using request GET /clients."""):
# listEntities the Clients.
# The `check` call validates return code
# and some of the swagger schema.
# Most schema checks are disabled.
check(
context.cl.Clients.listEntities(
page='1;0')
)
|
[
"mbehera@qumu.com"
] |
mbehera@qumu.com
|
4b1816e64d86e656e29c4d7e8747cabafc9b5f74
|
4a36b5979b0753b32cff3956fd97fb8ed8b11e84
|
/1.0/_downloads/a783c0b285deabf61a1ae7035b88256a/cluster_stats_evoked.py
|
30ed1d1a078ff13647503661206530566df17338
|
[] |
permissive
|
mne-tools/mne-tools.github.io
|
8aac7ae10bf2faeeb875b9a351a5530dc0e53154
|
495e878adc1ef3374e3db88604504d7542b01194
|
refs/heads/main
| 2023-09-03T07:06:00.660557
| 2023-09-03T04:10:18
| 2023-09-03T04:10:18
| 35,639,371
| 12
| 16
|
BSD-3-Clause
| 2023-05-05T19:04:32
| 2015-05-14T22:04:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,790
|
py
|
# -*- coding: utf-8 -*-
"""
.. _ex-cluster-evoked:
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
# %%
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
# %%
# Set parameters
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
# %%
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
# %%
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=1,
out_type='mask')
# %%
# Plot
times = epochs1.times
fig, (ax, ax2) = plt.subplots(2, 1, figsize=(8, 4))
ax.set_title('Channel : ' + channel)
ax.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
ax.set_ylabel("MEG (T / m)")
ax.legend()
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = ax2.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
ax2.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
ax2.legend((h, ), ('cluster p-value < 0.05', ))
ax2.set_xlabel("time (ms)")
ax2.set_ylabel("f-values")
|
[
"dan@mccloy.info"
] |
dan@mccloy.info
|
b21cc60288a12a525d33281ba13def79fd81b34a
|
597c4f48332251552a602122bb3d325bc43a9d7f
|
/etc/chapter09_stack_old/implement/04_empty.py
|
aacff5442075e0c2020872af886861589bfe5559
|
[] |
no_license
|
Kyeongrok/python_algorithm
|
46de1909befc7b17766a57090a7036886361fd06
|
f0cdc221d7908f26572ae67b5c95b12ade007ccd
|
refs/heads/master
| 2023-07-11T03:23:05.782478
| 2023-06-22T06:32:31
| 2023-06-22T06:32:31
| 147,303,654
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
class Stack1():
arr = []
last_index = 0
def __init__(self, size=10000):
self.arr = [None] * size
def push(self, value):
self.arr[self.last_index] = value
self.last_index += 1
def pop(self):
value = self.arr[self.last_index - 1]
self.last_index -= 1
return value
st = Stack1()
print(st.pop())
from _collections import deque
[].pop()
|
[
"oceanfog1@gmail.com"
] |
oceanfog1@gmail.com
|
b34a8a5649c8d6340f7eb3cfb2c1d166f74f221b
|
33736b585caa659ac4a5a8a1ac52df50bdf71f1b
|
/py_solution/5_SMS.py
|
53e953b2191981118abe61d67c0f78617db28fe7
|
[] |
no_license
|
oliverhuangchao/epic_interview
|
3d649fadab0728c629bfe9d8cc14b9045a593385
|
4cfdbc0b83e13e7552633e566b3ddbb4a250a6a0
|
refs/heads/master
| 2021-01-10T22:29:37.663863
| 2015-07-17T18:55:42
| 2015-07-17T18:55:42
| 38,897,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
# SMS
# You are given a telephone keyboard
# 0-0, 1-1, 2-ABC2, 3-DEF3, 4-GHI4, 5-JKL5, 6-MNO6,7-PQRS7, 8-TUV8, 9-WXYZ9, *-space,
# #-char separater
# if you type "2", you will get 'A', that is "2"-'A', "22"-'B' ,"222"-'C', "2222"-'D'
# However, the digits can repeated many times
# "22222"-you get 'A' again . Waral
# You can use "#" to separate characters, for example
# "2#22", you get "AB" .
# However, you may also have consecutive different digits without separator:"23"-'AD'
# If you type "*", it means space.
# You a given a sequence of digits, translate it into a text message
import string
#prepare at the begining
ori = {0:'0', 1:'1', 2:'ABC2', 3:'DEF3', 4:'GHI4', 5:'JKL5', 6:'MNO6',7:'PQRS7', 8:'TUV8', 9:'WXYZ9'}
all = string.ascii_uppercase
newdict = dict()
for i in ori:
newdict[str(i)] = ori[i][-1]
for i in range(2,10):
count = 1
for j in ori[i][:-1]:
newdict[str(i)*count] = j#[str(i) for k in range(count)]
count+=1
def transform(newdict,inputstring):
words = inputstring.split("*")
res = ""
for item in words:
z = item.split("#")
for each in z:
if each in newdict:
res += newdict[each]
else:
x = each[0]
for i in range(1,len(each)):
if each[i] != x:
res += newdict[each[:i]]
res += newdict[each[i:]]
x = each[i]
res += " "
print res
inputstring = "12*322#2*33"
print inputstring
transform(newdict, inputstring)
|
[
"chaoh@g.clemson.edu"
] |
chaoh@g.clemson.edu
|
5e405561491c9f8ec9865d157896c876e026bf58
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/mhmmik002/question2.py
|
f65b3961a2a06e9df45c62d5bc009ab70df32d71
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,274
|
py
|
print("Welcome to the 30 Second Rule Expert")
print("------------------------------------")
print("Answer the following questions by selecting from among the options.")
x=input("Did anyone see you? (yes/no)\n")
if (x) == "yes":
b=input("Was it a boss/lover/parent? (yes/no)\n")
if (b) == "no":
print("Decision: Eat it.")
else:
a=input("Was it expensive? (yes/no)\n")
if (a) == "yes":
s=input("Can you cut off the part that touched the floor? (yes/no)\n")
if (s) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Your call.")
else:
f=input("Is it chocolate? (yes/no)\n")
if (f) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Don't eat it.")
else:
z=input("Was it sticky? (yes/no)\n")
if (z) == "yes":
v=input("Is it a raw steak? (yes/no)\n")
if (v) == "yes":
m=input("Are you a puma? (yes/no)\n")
if (m) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Don't eat it.")
else:
n=input("Did the cat lick it? (yes/no)\n")
if (n) == "yes":
g=input("Is your cat healthy? (yes/no)\n")
if (g) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Your call.")
else:
print("Decision: Eat it.")
else:
c=input("Is it an Emausaurus? (yes/no)\n")
if (c) == "yes":
d=input("Are you a Megalosaurus? (yes/no)\n")
if (d) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Don't eat it.")
else:
n=input("Did the cat lick it? (yes/no)\n")
if (n) == "yes":
g=input("Is your cat healthy? (yes/no)\n")
if (g) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Your call.")
else:
print("Decision: Eat it.")
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
b5b6b93910e5075aaefa207e44ac09ac7a47bada
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_89/41.py
|
4cb3ead2a2dc396c5a937e620544e8867f8ae5e0
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
#!/usr/bin/env python
primes=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997]
from math import log
from sys import stdin
for case in range(1, int(stdin.readline())+1):
N=int(stdin.readline())
if N==1: n=0
else:
n=1
for p in primes:
if p<=N:
n+=int(log(N,p))-1
else: break
print "Case #%d:"%case, n
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
0cbeaf2442721ecc71a0cd8158504cac1b4e4f47
|
eade1861db1968645e0e17dfaa5250a4b8245b98
|
/instacart/faron.py
|
bf8a14a8e3fb487d4321cee3fb9b8eb7eb4a4b08
|
[] |
no_license
|
piupiuup/competition
|
5b5da56fed336e07cf99cef8f5bfe89a8f771900
|
076c30df3d2647cb3580c543e604375e84590ca7
|
refs/heads/master
| 2022-09-30T14:47:01.244084
| 2020-05-30T12:56:02
| 2020-05-30T12:56:02
| 268,074,180
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,764
|
py
|
"""
@author: Faron
"""
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
import multiprocessing
from datetime import datetime
'''
Calculates (user, product) order_streak for the last n orders.
- abs(order_streak) is length of streak
- sgn(order_streak) encodes type of streak (non-ordered vs ordered)
'''
DATA_DIR = "../input/"
PRIOR_FILE = "order_products__prior"
ORDERS_FILE = "orders"
def load_input_data():
PATH = "{}{}{}".format(DATA_DIR, PRIOR_FILE, ".csv")
prior = pd.read_csv(PATH, dtype={'order_id': np.int32,
'product_id': np.uint16,
'add_to_cart_order': np.int16,
'reordered': np.int8})
PATH = "{}{}{}".format(DATA_DIR, ORDERS_FILE, ".csv")
orders = pd.read_csv(PATH, dtype={'order_id': np.int32,
'user_id': np.int64,
'order_number': np.int16,
'order_dow': np.int8,
'order_hour_of_day': np.int8,
'days_since_prior_order': np.float32})
return prior, orders
def apply_parallel(df_groups, _func):
nthreads = multiprocessing.cpu_count() >> 1
print("nthreads: {}".format(nthreads))
res = Parallel(n_jobs=nthreads)(delayed(_func)(grp.copy()) for _, grp in df_groups)
return pd.concat(res)
def add_order_streak(df):
tmp = df.copy()
tmp.user_id = 1
UP = tmp.pivot(index="product_id", columns='order_number').fillna(-1)
UP.columns = UP.columns.droplevel(0)
x = np.abs(UP.diff(axis=1).fillna(2)).values[:, ::-1]
df.set_index("product_id", inplace=True)
df['order_streak'] = np.multiply(np.argmax(x, axis=1) + 1, UP.iloc[:, -1])
df.reset_index(drop=False, inplace=True)
return df
if __name__ == '__main__':
prior, orders = load_input_data()
print("orders: {}".format(orders.shape))
print("take only recent 5 orders per user:")
orders = orders.groupby(['user_id']).tail(5 + 1)
print("orders: {}".format(orders.shape))
prior = orders.merge(prior, how='inner', on="order_id")
prior = prior[['user_id', 'product_id', 'order_number']]
print("prior: {}".format(prior.shape))
user_groups = prior.groupby('user_id')
s = datetime.now()
df = apply_parallel(user_groups, add_order_streak)
e = datetime.now()
print("time elapsed: {}".format(e - s))
df = df.drop("order_number", axis=1).drop_duplicates().reset_index(drop=True)
df = df[['user_id', 'product_id', 'order_streak']]
print(df.head(n=10))
df.to_csv("order_streaks.csv", index=False)
print("order_streaks.csv has been written")
|
[
"278044960@qq.com"
] |
278044960@qq.com
|
aeb73fa853bdfc044c427f1d12e75525607b2690
|
b7d766db43e1857bc1c886bbffa01817d201fb2e
|
/Algorithm PS/이것이 취업을 위한 코딩테스트다/Chapter 11 그리디 문제/볼링공 고르기.py
|
7b772fe7bd59365f6202c61571944cf8694f0793
|
[] |
no_license
|
Jongminfire/Python
|
ae4010b23b60b59cddd837344784ef9da33d1b1d
|
11219310cd13c18647c3220b89878c25fdc98922
|
refs/heads/main
| 2023-07-27T20:27:12.612883
| 2021-09-10T08:05:01
| 2021-09-10T08:05:01
| 307,398,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
n,m = map(int,input().split())
ball = list(map(int,input().split()))
# 중복을 제외한 종류를 얻기 위해 (문제에서는 m이 10이하 이므로 list로 선언해도 됨)
s = set(ball)
answer = 0
# 중복이 하나도 없는 경우 계산
for i in range(1,n):
answer += i
# 중복된 만큼 빼주기
for i in s:
answer -= ball.count(i)-1
print(answer)
|
[
"51112542+Jongminfire@users.noreply.github.com"
] |
51112542+Jongminfire@users.noreply.github.com
|
1b1ff1573ecfd049b15a8a82ced9916ca5a8548e
|
cd127231a354bf7a299667e65cbd83265988be7f
|
/COMPANIES/ness/ex.py
|
f54d1ef46bca7fff2e8525f57769802775ccf1a2
|
[] |
no_license
|
nagireddy96666/Interview_-python
|
de96c8d2dfd56343351bd0039adad561e79aac1a
|
6789d200ded60575682f467c880990937e4d4f0f
|
refs/heads/master
| 2020-03-10T01:53:17.185819
| 2018-04-11T16:22:05
| 2018-04-11T16:22:05
| 129,121,777
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
>>> x=(1,2,(4,5,6,7,8))
>>> x[2][1:4]
(5, 6, 7)
>>> "".join([str(i) for i in x[2][1:4]])
'567'
>>> s="apple banna and apple banna "
>>> s.count('apple')
2
>>> s.count('banna')==s.count('apple')
True
>>> l=['apple',['banna','apple']]
>>> l.count('apple')
1
>>> set(l)
Traceback (most recent call last):
File "<pyshell#8>", line 1, in <module>
set(l)
TypeError: unhashable type: 'list'
>>> i='apple'
>>> list(l)
['apple', ['banna', 'apple']]
>>> for i in l:
if i=="apple:"
SyntaxError: invalid syntax
>>> for i in l:
if i=="apple":
count+=1
else:
x=i.count('apple')
count+=x
Traceback (most recent call last):
File "<pyshell#18>", line 3, in <module>
count+=1
NameError: name 'count' is not defined
>>> count=0
>>> for i in l:
if i=="apple":
count+=1
else:
x=i.count('apple')
count+=x
>>> print count
2
>>> count=0
>>> for i in l:
if i.count('apple'):
count+=i.count('apple')
print count
SyntaxError: invalid syntax
>>>
>>> for i in l:
if i.count('apple'):
count+=i.count('apple')
print count
SyntaxError: invalid syntax
>>> for i in l:
if i.count('apple'):
count+=i.count('apple')
>>>
>>> count
2
>>> l=['apple','banaba',['apple','banan']]
>>> count=0
>>> for i in l:
if i.count('apple'):
count+=i.count('apple')
>>> count
2
>>> l=['banaba',['apple','banan']]
>>> count=0
>>> for i in l:
if i.count('apple'):
count+=i.count('apple')
>>> count
1
>>>
|
[
"nagireddy96666@gmail.com"
] |
nagireddy96666@gmail.com
|
e80898bbcbe582829b80d0cba3f32816f4b4f2e6
|
15102eb2c657a296eb00821dc378225b79fbc17e
|
/Homework/venv/Lib/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/__init__.py
|
719f41d61f65e2d4064afc9d24e406f6c2af3e92
|
[] |
no_license
|
yuju13488/pyworkspace
|
746446b3573fa6241d979b205e964e7d52af009b
|
0c77836185237450ee446542e6ff3856c7cd7de1
|
refs/heads/master
| 2020-08-02T03:56:55.577735
| 2019-10-04T05:50:56
| 2019-10-04T05:50:56
| 211,226,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,607
|
py
|
"""A collection of modules for building different kinds of trees from HTML
documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1. A set of classes for various types of elements: Document, Doctype, Comment,
Element. These must implement the interface of ``base.treebuilders.Node``
(although comment nodes have a different signature for their constructor,
see ``treebuilders.etree.Comment``) Textual content may also be implemented
as another node type, or not, as your tree implementation requires.
2. A treebuilder object (called ``TreeBuilder`` by convention) that inherits
from ``treebuilders.base.TreeBuilder``. This has 4 required attributes:
* ``documentClass`` - the class_hw to use for the bottommost node of a document
* ``elementClass`` - the class_hw to use for HTML Elements
* ``commentClass`` - the class_hw to use for comments
* ``doctypeClass`` - the class_hw to use for doctypes
It also has one required method:
* ``getDocument`` - Returns the root node of the complete document tree
3. If you wish to run the unit tests, you must also create a ``testSerializer``
method on your treebuilder which accepts a node and returns a string
containing Node and its children serialized according to the format used in
the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from .._utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class_hw for various types of trees with built-in support
:arg treeType: the name of the tree type required (case-insensitive). Supported
values are:
* "dom" - A generic builder for DOM implementations, defaulting to a
xml.dom.minidom based implementation.
* "etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to xml.etree.cElementTree if
available and xml.etree.ElementTree if not.
* "lxml" - A etree-based builder for lxml.etree, handling limitations
of lxml's implementation.
:arg implementation: (Currently applies to the "etree" and "dom" tree
types). A module implementing the tree type e.g. xml.etree.ElementTree
or xml.etree.cElementTree.
:arg kwargs: Any additional options to pass to the TreeBuilder when
creating it.
Example:
>>> from html5lib.treebuilders import getTreeBuilder
>>> builder = getTreeBuilder('etree')
"""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
|
[
"shiyoo123@hotmail.com"
] |
shiyoo123@hotmail.com
|
6a8fd64fa290a4515022aa9b4be3f29099b8f7b8
|
537e2be29992f8bfd3fb2797003102f4e79f5f9f
|
/scripts/seq-composition
|
617c244546da86e1baf3cf510e368eabb5095e37
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
etal/biofrills
|
a0cf45700abbda865f71d55030717dee4d769446
|
36684bb6c7632f96215e8b2b4ebc86640f331bcd
|
refs/heads/master
| 2020-06-01T16:29:41.540511
| 2013-10-21T23:01:19
| 2013-10-21T23:01:19
| 5,113,363
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
#!/usr/bin/env python
"""Print the frequencies of each letter in a sequence set."""
# TODO - move the calculation to module (take input stream, return freqs)
import fileinput
from collections import Counter
# Count character types
counts = Counter()
for line in fileinput.input():
if line.startswith('>') or not line.strip():
continue
counts.update(Counter(line.strip()))
# Convert counts to frequencies
scale = 1.0 / sum(counts.values())
freqs = dict((char, cnt * scale)
for char, cnt in counts.iteritems())
# Print a nice table
for char, frq in sorted(freqs.iteritems()):
print '%s: %f' % (char, frq)
|
[
"eric.talevich@gmail.com"
] |
eric.talevich@gmail.com
|
|
7362e34f612448e62b39d7ee13d6f41730354825
|
ba27372850fd287f4e268f486103afb797c7f4f4
|
/setup.py
|
286ff3234f0365f85c60fc77027909c3e6576437
|
[
"BSD-3-Clause"
] |
permissive
|
django-blog-zinnia/feed2zinnia
|
9702a3b177f16009ac49907b2298f98243fab374
|
ec1a5e44f6175dab248e2f4f9ba3f9ecb2800e6b
|
HEAD
| 2016-09-16T05:26:56.676013
| 2015-01-15T11:19:01
| 2015-01-15T11:19:01
| 29,293,499
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,092
|
py
|
"""Setup script of feed2zinnia"""
from setuptools import setup
from setuptools import find_packages
import zinnia_feed
setup(
name='feed2zinnia',
version=zinnia_feed.__version__,
description='Import your RSS or Atom feed into Zinnia',
long_description=open('README.rst').read(),
keywords='django, zinnia, feed, rss, atom',
author=zinnia_feed.__author__,
author_email=zinnia_feed.__email__,
url=zinnia_feed.__url__,
packages=find_packages(exclude=['demo_zinnia_feed']),
classifiers=[
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules'],
license=zinnia_feed.__license__,
include_package_data=True,
zip_safe=False,
install_requires=['feedparser>=5.1.3']
)
|
[
"fantomas42@gmail.com"
] |
fantomas42@gmail.com
|
177e721a596ee080d3343228f66a65ecd4fa0724
|
dc965a62709bbb2c6c9ad01859a83507d7457941
|
/Assignments/Class Assignments/AutoGenerateClass.py
|
ee53ba982de479f56ddc5aeb651099442d698a61
|
[] |
no_license
|
JyotiSathe/Python
|
ead31a84cde86d734acdf0ad83c27c6bb1c1a331
|
846371d678ba225c210493605233b262a51bd950
|
refs/heads/master
| 2021-05-11T22:38:30.299035
| 2018-06-24T14:08:37
| 2018-06-24T14:08:37
| 117,364,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
class AutoGenerate:
def __init__(self,start,stop,step=1):
self.start=start
self.stop=stop
self.step=step
def Next(self):
self.start+=self.step
if self.start>=self.stop:
raise StopIteration
yield self.start
def next(self):
return self.Next().next()
def __next__(self):
return self.Next().__next__()
#def __iter__(self):
# return self
def main():
x=AutoGenerate(0,100,5)
#for sets iterator so if need to give for needs to have iter method
#for y in x:
# print y
y=x.next()
print (y)
if __name__=='__main__':
main()
|
[
"noreply@github.com"
] |
JyotiSathe.noreply@github.com
|
a9dfdb93f377c71c1655c5383fe4d557af7f730b
|
6758974fd7046a3947f1387a788cfebe7ac85b22
|
/BilibiliUpVideosDownloader/BilibiliUpVideosDownload.py
|
d5eba781595ae6be622bd83dbf99b1ad88cb45dd
|
[] |
no_license
|
HkerVit/facebook-api-video-upload-py
|
d85ec55c3e7adacaf094b8440111ccdb8d065a6f
|
6bcf0f4c08512c5b3896c6f61a80de66c0a59744
|
refs/heads/main
| 2023-03-16T22:42:48.137863
| 2021-03-11T03:49:23
| 2021-03-11T03:49:23
| 346,636,830
| 1
| 0
| null | 2021-03-11T08:51:47
| 2021-03-11T08:51:46
| null |
UTF-8
|
Python
| false
| false
| 2,444
|
py
|
import sys, getopt
import requests
import json
import os
import pymysql
def get_history(bvid):
find_sql = "SELECT * FROM download_history_bilibili WHERE bvid='{}'".format(bvid)
findres = cursor.execute(find_sql)
if findres == 0:
res = False
else:
res = True
return res
def get_url_list(uid):
url = f"https://api.bilibili.com/x/space/arc/search?mid={uid}&ps=30&tid=0&pn=1&keyword=&order=pubdate&jsonp=jsonp"
data = json.loads(requests.get(url).text)
if data["code"] == 0:
count = data["data"]["page"]["count"]
page_count = int(count/30) + 1
for page in range(page_count):
pn = page + 1
url = f"https://api.bilibili.com/x/space/arc/search?mid={uid}&ps=30&tid=0&pn={pn}&keyword=&order=pubdate&jsonp=jsonp"
page_vdict = json.loads(requests.get(url).text)["data"]["list"]["vlist"]
for vdict in page_vdict:
bvid="https://www.bilibili.com/video/"+vdict["bvid"]
vdict['bvid']=bvid
vdict['pic']=vdict['pic'].replace("//",'')
bvidExits=get_history(bvid)
if not bvidExits:
values_list = list(vdict.values())
values_list = ["0"] + values_list
values = tuple(values_list)
add_sql = "INSERT INTO download_history_bilibili VALUES {}".format(values)
cursor.execute(add_sql)
db.commit()
print("Insert: ", bvid)
elif bvidExits:
print("Exist: ",bvid)
def downloadVideo(uid):
find_sql = "SELECT * FROM download_history_bilibili WHERE mid='{}'".format(uid)
cursor.execute(find_sql)
res=cursor.fetchall()
for r in res:
bvid = r[16]
author=r[10]
path = "./download/{}/".format(author)
pathExist=os.path.exists(path)
if not pathExist:
os.makedirs(path)
cmd = "annie -o {} {}".format(path,bvid)
os.system(cmd)
if __name__ == "__main__":
db_host = "45.76.170.159"
db_user = "db_poster"
db_name = "db_poster"
db_pass = "ysq1159889481"
db = pymysql.connect(host=db_host, user=db_user, password=db_pass, database=db_name)
cursor = db.cursor()
# get_url_list(15183062)
downloadVideo(15183062)
db.close()
|
[
"root@vultr.guest"
] |
root@vultr.guest
|
256bc94180a64e4adbbbbc23b29e319b6f40ded7
|
751b094918ae9200afe7824d58804549082caa95
|
/src/python/WMComponent/DBS3Buffer/Oracle/CreateBlocks.py
|
cad2bf954ebb5bcb10c781f908e8dbdf8a3500e2
|
[] |
no_license
|
cinquo/WMCore
|
7ebd13269f42eb97f416f8f2bdaca05fa93c6afc
|
122f9332f2e944154dd0df68b6b3f2875427b032
|
refs/heads/master
| 2021-01-09T06:28:58.947626
| 2013-06-05T08:31:53
| 2013-06-05T08:31:53
| 2,965,330
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
#!/usr/bin/env python
"""
_DBSBuffer.SetBlockStatus_
Create new block in dbsbuffer_block
Update file to reflect block information
"""
import threading
import exceptions
from WMComponent.DBS3Buffer.MySQL.CreateBlocks import CreateBlocks as MySQLCreateBlocks
class CreateBlocks(MySQLCreateBlocks):
"""
Oracle implementation
"""
|
[
"sfoulkes@4525493e-7705-40b1-a816-d608a930855b"
] |
sfoulkes@4525493e-7705-40b1-a816-d608a930855b
|
3b126b869bfccc6a9d0b195367775643248e1374
|
1caf4418f3549567637f5e9893a445f52a38c6a0
|
/CmsAdmin/user_content/api/resources/account_verify_api.py
|
69ed7dcf8e0a5f7723260d2e105ebf033f022654
|
[] |
no_license
|
Final-Game/social_network_backend
|
c601563e08c0fd7de72a614944f354ef8d2d31d8
|
8111787d1d20eb87733ae360d8baa745a65e2743
|
refs/heads/master
| 2023-03-04T21:12:43.147084
| 2021-02-23T03:45:22
| 2021-02-23T03:45:22
| 290,542,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,577
|
py
|
from user_content.api.resources.filters.account_verify_filter import AccountVerifyFilter
from user_content.api.serializers.resources.account_verify_serializer import (
AccountVerifySerializer,
)
from rest_framework.filters import OrderingFilter, SearchFilter
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.viewsets import ModelViewSet
from user_content.models import AccountVerify
from core.api.rest_frameworks import StandardResultsSetPagination
from core.api.rest_frameworks.order_filter import CustomOrderingFilter
class AccountVerifyApi(ModelViewSet):
queryset = AccountVerify.objects.all()
serializer_class = AccountVerifySerializer
filter_class = AccountVerifyFilter
pagination_class = StandardResultsSetPagination
filter_backends = [
SearchFilter,
OrderingFilter,
CustomOrderingFilter,
DjangoFilterBackend,
]
search_fields = ["account__profile__first_name", "account__profile__last_name"]
ordering_fields = ["created_at", "updated_at"]
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
return super().update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
return super().destroy(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
return super().retrieve(request, *args, **kwargs)
|
[
"kakavip.198.kaka@gmail.com"
] |
kakavip.198.kaka@gmail.com
|
a7a6bbcef0abbc635c7fab15e94f6e05e49edb93
|
8a5ccfbd09fdc3eb42e8240c0b7ceaf981f27814
|
/astropy_stark/astropy_stark/myresample.py
|
3955dbeb0583809c82943f8a3f5932d6b34aed52
|
[] |
no_license
|
hlabathems/pycecream
|
97edfd388e32ab12b22765debab31ee8c4929ab4
|
cd52937c3ff053dede0b02803933ba58789d5ff3
|
refs/heads/master
| 2020-06-09T22:46:14.114693
| 2019-06-19T17:42:24
| 2019-06-19T17:42:24
| 193,521,752
| 0
| 1
| null | 2019-06-24T14:30:05
| 2019-06-24T14:30:04
| null |
UTF-8
|
Python
| false
| false
| 6,010
|
py
|
#### code to randomly resample a set of input light curves
#10/9/2017 sampcode 3 update includes sampmin the minum spacing between data points
#sampcode 4, dtave indicates the minimum space between data points, data points will be selected (with no interpolation) from the parent sample, skipping points until the minimum spacing dtave is achieved
##avesamp is the average length of time between the random samples
#set dir = '' and fname=[''] to have this function work on datin[nt,3] and output array rather than save to file
#new 10/9/2017 added option dtsdin need mean and standard deviation of spacing between points e.g setting
#dtsdin very small will give very regularly spaced points.
#if negative then the absolute value is the fraction relative to the mean e.g -0.2 will set the
#standard deviation as a fifth of the mean spacing between points
import numpy as np
import os
import sys
import astropy_stark.myrandom as mr
def myresample(dir,fname,dtave,dtsdin = -0.2, sampmin=0.8,sampcode=3,datin=[]):
if (dtsdin < 0):
dtsd = np.abs(dtsdin)*dtave
else:
dtsd = dtsdin
dirpy=os.getcwd()
#dir = sys.argv[1] #the directory storing the light curves e.g '../fort/fortcode/fakelc/kep_18mar'
#fname = sys.argv[2] # a list of the files .e.g ['file1.dat','file2.dat','...'] etc
#dtave = sys.argv[3] #e.g 0.5 will resample with mean half day cadence
#sampmin = 0.8
#sampcode = 2
#!!### user arguments above. Don't change sampcode or sampmin unless you know what they do (I don't and I wrote the code).###
if (dir != ''):
os.chdir(dir)
Nfile=len(fname)
for ifile in range(Nfile):
if (fname[ifile] == ''):
dat = datin
else:
dat=np.loadtxt(fname[ifile])
t=dat[:,0]
x=dat[:,1]
sig=dat[:,2]
Ndat=t.shape[0]
dt = (t[-1] - t[0])/(Ndat-1)
# below are two versions of the code (the 2nd should be more sophisticated and consider the approximate spacing between each point when making its idxsamp selection
if sampcode == 1:
nidx=(1.-sampmin)*np.random.ranom_sample(1)[0]+sampmin
idxsamp=np.random.rand(low=0,high=Ndat,size=nidx)
datsamp=np.zeros((nidx,3))
datsamp[:,0]=t[idxsamp]
datsamp[:,1]=x[idxsamp]
datsamp[:,2]=sig[idxsamp]
elif sampcode == 2:
idxcount=0
tthen=t[0]
idxsamp=[]
xn = []
sign = []
tn = []
while (idxcount < Ndat) & (tthen < t[-1]):
a = np.random.randn(1)*dt*2
tnow = tthen + dtave + a
tn.append(tnow)
xn.append(np.interp([tnow],t,x)[0])
sign.append(np.interp([tnow],t,sig)[0])
#idxsamp.append(np.abs(t-tnow).argmin()) ## index of closest time to tnow
tthen=tnow
idxcount=idxcount+1
#idxsamp=np.array(idxsamp)
tn = np.array(tn)
xn = np.array(xn)
sign = np.array(sign)
nn = xn.shape[0]
datsamp=np.zeros((nn,3))
datsamp[:,0]=tn[:,0]
datsamp[:,1]=xn[:,0]
datsamp[:,2]=sign[:,0]
elif sampcode == 3:
#print 'ararar'
idxcount=0
tthen=t[0]
idxsamp=[]
tlast = t[-1]
while (idxcount < Ndat-1) & (tthen < tlast - 4*sampmin):
#a = np.random.randn(1)*dtave
a = np.random.normal(dtave,dtsd,1)[0]
tnow = tthen + np.abs(a)
idxtemp = np.abs(t-tnow).argmin()
#print tnow,tthen,'before mrs'
#print tnow, tthen, idxcount, sampmin
if ((idxtemp not in idxsamp) and ((tnow - tthen > sampmin) or (tnow > tlast - sampmin))):
idxsamp.append(idxtemp) ## index of closest time to tnow
idxcount=idxcount+1
a= 1.*tthen-tnow
tthen=tnow
#print idxcount, Ndat, tthen, t[-1],'mrs'
idxsamp=np.array(idxsamp)
datsamp=np.zeros((idxsamp.shape[0],3))
datsamp[:,0]=t[idxsamp]
datsamp[:,1]=x[idxsamp]
datsamp[:,2]=sig[idxsamp]
ttemp_space = datsamp[1:,0]-datsamp[:-1,0]
#print('min,max,and ave spacing between elements',ttemp_space.min(), ttemp_space.max(), np.mean(ttemp_space))
elif sampcode == 4:
idxcount=0
tthen=t[0]
idxsamp=[]
while (idxcount < Ndat) & (tthen < t[-1]):
tnow = tthen + dtave
b = t>tnow
idxtemp = [i for i, elem in enumerate(b, 1) if elem]
if (len(idxtemp) ==0):
break
idxtemp = idxtemp[0]
if (idxtemp >= t.shape[0]):
break
if (idxtemp not in idxsamp):
idxsamp.append(idxtemp) ## index of closest time to tnow
idxcount=idxcount+1
a= tnow - tthen
tthen=t[idxtemp]
#if (t[idxtemp] - t[idxtemp-1] < dtave):
#print 'PROBLEM!!',
#print t[idxtemp] - t[idxtemp-1], dtave#print tnow, idxcount, dtave, dt, tthen, a
#raw_input()
#print idxsamp
idxsamp=np.array(idxsamp)
datsamp=np.zeros((idxsamp.shape[0],3))
datsamp[:,0]=t[idxsamp]
datsamp[:,1]=x[idxsamp]
datsamp[:,2]=sig[idxsamp]
ttemp_space = datsamp[1:,0]-datsamp[:-1,0]
ns = len(idxsamp)
#for i in range(ns):
# print i,datsamp[i,:]
#print('min,max,and ave spacing between elements',ttemp_space.min(), ttemp_space.max(), np.mean(ttemp_space))
#print('locations...',ttemp_space.argmin(),ttemp_space.argmax())
np.savetxt('resamp_'+fname[ifile],datsamp)
os.chdir(dirpy) # change back to python directory
return(datsamp)
|
[
"david.starkey@vortexa.com"
] |
david.starkey@vortexa.com
|
8867897937961a346c8a6e06b802258aec31ed5c
|
02f42acb280288acba54fc045c4163df2f069d55
|
/project/migrations/0008_merge_20210312_1000.py
|
9e0c30331fec84f947450cf9076177129855f675
|
[] |
no_license
|
banana1019/tumbluv-backend
|
a05b0faebc63e83248b2e81d026fe26d5a5c085b
|
8592112284672fefdf9e4e76aeee1d1cedaaee82
|
refs/heads/main
| 2023-04-12T20:16:09.992472
| 2021-04-26T10:59:47
| 2021-04-26T10:59:47
| 361,158,437
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
# Generated by Django 3.1.6 on 2021-03-12 10:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('project', '0007_auto_20210309_2313'),
('project', '0007_auto_20210309_2055'),
]
operations = [
]
|
[
"68374847+Kang-kyunghun@users.noreply.github.com"
] |
68374847+Kang-kyunghun@users.noreply.github.com
|
cdcb2620849cb8b9df057128cafd916549c05694
|
5fcc3fd608a794d260368318c62547f74d4c1416
|
/checksum_Luhn.py
|
c0db1474e1bb065768ca070528051853e468d68b
|
[] |
no_license
|
ds-gurukandhamoorthi/intro-python-exs
|
241fb9158096479a100ef378f291ba83e1a7d5d4
|
68c386e51c13d0f31e273016eefc4e29ddecdc04
|
refs/heads/master
| 2022-02-25T22:28:41.061722
| 2019-10-22T18:36:46
| 2019-10-22T18:36:46
| 103,829,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,247
|
py
|
import argparse
def sum_digits(n):
res = n % 9
return res if res > 0 else res + 9
def sum_ISBN(num, mul=3):
return sum(int(n) if i %2 == 0 else int(n)*mul for i, n in enumerate(str(num)))
def is_valid_ISBN(num):
if len(str(num)) != 13:
return False
return sum_ISBN(num) %10 == 0
def is_valid_ISBN_str(str_isbn):
return is_valid_ISBN(str_isbn.replace('-',''))
def sum_digits_Luhn(num, mul=2):
total = 0
for i, digit in enumerate(str(num)):
if i%2 == 0:
total += int(digit)
else:
total += sum_digits(int(digit)*mul)
return total
#Luhn's algorithm
def checksum_Luhn(num):
return sum_digits_Luhn(num) % 10 == 0
def make_checksum_Luhn(num):
append = 10 - sum_digits_Luhn(num) % 10
return int(str(num) + str(append))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Checksum as used in banks')
parser.add_argument('number', type=int, help='checksum for number: 10 digits to create, 11 digits to check')
args = parser.parse_args()
number = str(args.number)
if(len(number) == 10):
res = make_checksum_Luhn(number)
print(res)
else:
print(checksum_Luhn(number))
|
[
"ds.gurukandhamoorthi@gmail.com"
] |
ds.gurukandhamoorthi@gmail.com
|
117e37cdfea8e8164e6d4d2ce18c49fcaa106dfb
|
ab9cfa8aa28749ebd18c4fa4c8712c2198e72501
|
/复杂链表的复制.py
|
9af36bf98445059ae00b4559e5bdf9326c4271a6
|
[] |
no_license
|
joseph-mutu/JianZhiOfferCodePics
|
d71e780483909390b436f81989000a277daac11d
|
8d41326cb2b9bc1379682fa6364a68c0ce62dbee
|
refs/heads/master
| 2020-08-03T14:39:59.666806
| 2019-09-30T06:17:36
| 2019-09-30T06:17:36
| 211,788,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,278
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-08-26 09:10:31
# @Author : mutudeh (josephmathone@gmail.com)
# @Link : ${link}
# @Version : $Id$
import os
class RandomListNode:
def __init__(self, x):
self.label = x
self.next = None
self.random = None
class Solution:
# 返回 RandomListNode
def Clone(self, pHead):
if not pHead:
return None
# write code here
self.CloneNodes(pHead)
self.CloneRanomPointer(pHead)
return self.SplitChain(pHead)
def CloneNodes(self,pHead):
while pHead:
temNode = RandomListNode(pHead.label)
temNode.next = pHead.next
pHead.next = temNode
pHead = temNode.next
def CloneRanomPointer(self,pHead):
while pHead.next.next:
if pHead.random:
pHead.next.random = pHead.random.next
pHead = pHead.next.next
def SplitChain(self,pHead):
newHead= pHead.next
while pHead.next:
tem = pHead.next
pHead.next = tem.next
pHead = tem
return newHead
a = RandomListNode(1)
a.next = RandomListNode(4)
a.next.next = RandomListNode(5)
a.next.next.next = RandomListNode(7)
a.next.next.next.next = RandomListNode(9)
a.random = a.next.next
a.next.random = a.next.next.next.next
s = Solution()
# while a:
# print(a.label)
# a = a.next
newHead = s.Clone(a)
print(newHead.random.label)
|
[
"josephmathone@gmail.com"
] |
josephmathone@gmail.com
|
2363d85ed35aec90334d9ce83011ed94d674768c
|
fd7720dfc136eb92dbff8cc31e0f83bb8bbced16
|
/simulation/predictor.py
|
6a3c114a0e8e87f5a466f0c9e3d90c5c6f75dd2b
|
[] |
no_license
|
Villux/golden_goal
|
d134a1660dd32f0b4d05f720993dd23f8a064faf
|
f36f4dd0297e2e52c0f990cb3ac134f70fc16780
|
refs/heads/master
| 2020-03-27T01:53:09.863147
| 2018-11-15T15:40:04
| 2018-11-15T15:40:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
import numpy as np
from scipy.stats import poisson
class OutcomePredictor():
def __init__(self, model):
self.model = model
def predict_outcome_probabilities(self, x):
return self.model.predict_proba(x)[0]
def predict(self, feature_vector):
outcome_proba = self.predict_outcome_probabilities(feature_vector)
outcome = np.argmax(outcome_proba) - 1
return np.flip(outcome_proba, axis=0), outcome
class ScorePredictor():
def __init__(self, model):
self.model = model
@staticmethod
def get_goal_matrix(home_mu, away_mu):
home_goal_prob, away_goal_prob = [[poisson.pmf(i, team_avg) for i in range(0, 11)] for team_avg in [home_mu, away_mu]]
return np.outer(home_goal_prob, away_goal_prob)
@staticmethod
def get_outcome_probabilities(goal_matrix):
home_win = np.sum(np.tril(goal_matrix, -1))
draw = np.sum(np.diag(goal_matrix))
away_win = np.sum(np.triu(goal_matrix, 1))
return [home_win, draw, away_win]
def predict_score(self, x):
mu_score = self.model.predict(x)[0]
return mu_score
def predict(self, home_fv, away_fv):
home_mu = self.predict_score(home_fv)
away_mu = self.predict_score(away_fv)
goal_matrix = self.get_goal_matrix(home_mu, away_mu)
outcome_proba = self.get_outcome_probabilities(goal_matrix)
outcome = np.argmax(outcome_proba) - 1
return outcome_proba, outcome
|
[
"villej.toiviainen@gmail.com"
] |
villej.toiviainen@gmail.com
|
3ee0b6c42d9e38196d0f53cc53391ef21c05c5df
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03240/s759213189.py
|
1d4f6ebd1ea989b7a9e3ce2eca0c51eb2d3ea865
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
n = int(input())
xyh = [list(map(int, input().split())) for _ in range(n)]
for i in range(101):
for j in range(101):
s = -1
flag = True
limit = float('inf')
for x, y, h in xyh:
if h != 0:
H = h + abs(x-i) + abs(y-j)
if s != -1:
if s != H:
flag = False
break
s = H
else:
limit = min(limit, abs(x-i) + abs(y-j))
if flag:
if s != -1 and s <= limit:
print(i, j, s)
exit()
elif s <= limit and limit == 1:
print(i, j, 1)
exit()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6d417a3569822b41ad57ff5456be22691250b6f6
|
b66c12a4304c6af00d58a1f83e453dbc739ae60d
|
/survey/features/page_objects/investigators.py
|
4e7cb29662126ec40e31708b493fda0f2b1d3c45
|
[
"BSD-2-Clause"
] |
permissive
|
madhavaramu/uSurvey
|
060dae008f975a7cdb77ef8b0c5d820842422637
|
681e1d91fbedf94e840858e1ef09538777ce3e50
|
refs/heads/uSurvey
| 2020-04-11T06:28:17.220192
| 2016-12-06T13:24:45
| 2016-12-06T13:24:45
| 68,372,980
| 0
| 1
| null | 2016-09-16T11:03:44
| 2016-09-16T11:03:43
| null |
UTF-8
|
Python
| false
| false
| 5,651
|
py
|
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from time import sleep
from survey.features.page_objects.base import PageObject
from survey.investigator_configs import COUNTRY_PHONE_CODE
from rapidsms.contrib.locations.models import Location
from lettuce.django import django_url
from survey.models import EnumerationArea
class NewInvestigatorPage(PageObject):
url = "/investigators/new/"
def valid_page(self):
fields = ['name', 'mobile_number', 'confirm_mobile_number', 'male', 'age', 'backend']
for field in fields:
assert self.browser.is_element_present_by_name(field)
assert self.browser.find_by_css("span.add-on")[0].text == COUNTRY_PHONE_CODE
def get_investigator_values(self):
return self.values
def validate_detail_page_url(self):
assert self.browser.url == django_url(self.url)
def fill_valid_values(self, values, ea):
self.browser.find_by_id("location-value").value = Location.objects.create(name="Uganda").id
kampala = Location.objects.get(name="Kampala")
kampala_county = Location.objects.get(name="Kampala County")
kampala_subcounty = Location.objects.get(name="Subcounty")
kampala_parish = Location.objects.get(name="Parish")
kampala_village = Location.objects.get(name="Village")
ea = EnumerationArea.objects.get(name="EA")
self.fill_in_with_js('$("#location-district")', kampala.id)
self.fill_in_with_js('$("#location-county")', kampala_county.id)
self.fill_in_with_js('$("#location-subcounty")', kampala_subcounty.id)
self.fill_in_with_js('$("#location-parish")', kampala_parish.id)
self.fill_in_with_js('$("#location-village")', kampala_village.id)
self.fill_in_with_js('$("#widget_ea")', ea.id)
self.values = values
self.browser.fill_form(self.values)
class InvestigatorsListPage(PageObject):
url = '/investigators/'
def validate_fields(self):
self.validate_fields_present(["Investigators List", "Name", "Mobile Number", "Action"])
def validate_pagination(self):
self.browser.click_link_by_text("2")
def validate_presence_of_investigator(self, values):
assert self.browser.is_text_present(values['name'])
assert self.browser.is_text_present(values['mobile_number'])
def no_registered_invesitgators(self):
assert self.browser.is_text_present("There are no investigators currently registered for this location.")
def visit_investigator(self, investigator):
self.browser.click_link_by_text(investigator.name)
def see_confirm_block_message(self, confirmation_type, investigator):
self.is_text_present("Confirm: Are you sure you want to %s investigator %s" % (confirmation_type, investigator.name))
def validate_successful_edited_message(self):
self.is_text_present("Investigator successfully edited.")
def validate_page_url(self):
assert self.browser.url == django_url(self.url)
class FilteredInvestigatorsListPage(InvestigatorsListPage):
def __init__(self, browser, location_id):
self.browser = browser
self.url = '/investigators/?location=' + str(location_id)
def no_registered_invesitgators(self):
assert self.browser.is_text_present("There are no investigators currently registered for this county.")
class EditInvestigatorPage(PageObject):
def __init__(self, browser, investigator):
self.browser = browser
self.investigator = investigator
self.url = '/investigators/' + str(investigator.id) + '/edit/'
def validate_edit_investigator_url(self):
assert self.browser.url == django_url(self.url)
def change_name_of_investigator(self):
self.values = {
'name': 'Updated Name',
'mobile_number': self.investigator.mobile_number,
'confirm_mobile_number': self.investigator.mobile_number,
'male': self.investigator.male,
'age': self.investigator.age,
'level_of_education': self.investigator.level_of_education,
'language': self.investigator.language,
'location': self.investigator.location,
}
self.browser.fill_form(self.values)
def assert_user_saved_sucessfully(self):
self.is_text_present("User successfully edited.")
class InvestigatorDetailsPage(PageObject):
def __init__(self, browser, investigator):
self.browser = browser
self.investigator = investigator
self.url = '/investigators/' + str(investigator.id) + '/'
def validate_page_content(self):
details = {
'Name': self.investigator.name,
'Mobile Number': self.investigator.mobile_number,
'Age': str(self.investigator.age),
'Sex': 'Male' if self.investigator.male else 'Female',
'Highest Level of Education': self.investigator.level_of_education,
'Preferred Language of Communication': self.investigator.language,
'Country': 'Uganda',
'City': 'Kampala',
}
for label, text in details.items():
self.is_text_present(label)
self.is_text_present(text)
def validate_navigation_links(self):
assert self.browser.find_link_by_text(' Back')
def validate_back_link(self):
self.browser.find_link_by_href(django_url(InvestigatorsListPage.url))
def validate_detail_page_url(self):
assert self.browser.url == django_url(self.url)
def validate_successful_edited_message(self):
self.is_text_present("Investigator successfully edited.")
|
[
"antsmc2@yahoo.com"
] |
antsmc2@yahoo.com
|
bf1ff2870f3e7095975be2860e2c3f653b3482ee
|
8948aa88edf47ca788ec464a9a6115e2ef46e4cf
|
/rastervision2/core/data/label_source/object_detection_label_source_config.py
|
5ff23847d45a255ae547ea060760244f53e3de16
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
etraiger/raster-vision
|
71687177f44ade3495e2adcbe87bcacb3948c168
|
dc2f6bc688002375b91acc5df59d60c476022a96
|
refs/heads/master
| 2022-11-21T16:24:27.467471
| 2020-06-17T14:43:25
| 2020-06-17T14:43:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
from rastervision2.core.data.label_source import (LabelSourceConfig,
ObjectDetectionLabelSource)
from rastervision2.core.data.vector_source import (VectorSourceConfig)
from rastervision2.pipeline.config import (register_config)
@register_config('object_detection_label_source')
class ObjectDetectionLabelSourceConfig(LabelSourceConfig):
"""Config for a read-only label source for object detection."""
vector_source: VectorSourceConfig
def build(self, class_config, crs_transformer, extent, tmp_dir):
vs = self.vector_source.build(class_config, crs_transformer)
return ObjectDetectionLabelSource(vs, extent)
|
[
"lewfish@gmail.com"
] |
lewfish@gmail.com
|
07907361b2de76395ad32702e0f64cc5fb4c1bd3
|
71d4381d6e78e1078720380fa5d26f323e8426bf
|
/python/services/firebase/beta/apple_app.py
|
18349529600e966e53542dc66b96a41f66c2c809
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/declarative-resource-client-library
|
0edb9c3fc4bbc8f2df014ad8b92c13611b45fe26
|
dd32fc7f3a041b5c73a1ad51f82871221d93b621
|
refs/heads/main
| 2023-08-19T00:12:42.859403
| 2023-08-16T16:46:52
| 2023-08-16T16:49:19
| 327,995,099
| 26
| 28
|
Apache-2.0
| 2023-08-09T22:16:39
| 2021-01-08T19:34:57
|
Go
|
UTF-8
|
Python
| false
| false
| 5,908
|
py
|
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.firebase import apple_app_pb2
from google3.cloud.graphite.mmv2.services.google.firebase import apple_app_pb2_grpc
from typing import List
class AppleApp(object):
def __init__(
self,
name: str = None,
app_id: str = None,
display_name: str = None,
project_id: str = None,
bundle_id: str = None,
app_store_id: str = None,
team_id: str = None,
api_key_id: str = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.display_name = display_name
self.bundle_id = bundle_id
self.app_store_id = app_store_id
self.team_id = team_id
self.api_key_id = api_key_id
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = apple_app_pb2_grpc.FirebaseBetaAppleAppServiceStub(channel.Channel())
request = apple_app_pb2.ApplyFirebaseBetaAppleAppRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.bundle_id):
request.resource.bundle_id = Primitive.to_proto(self.bundle_id)
if Primitive.to_proto(self.app_store_id):
request.resource.app_store_id = Primitive.to_proto(self.app_store_id)
if Primitive.to_proto(self.team_id):
request.resource.team_id = Primitive.to_proto(self.team_id)
if Primitive.to_proto(self.api_key_id):
request.resource.api_key_id = Primitive.to_proto(self.api_key_id)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyFirebaseBetaAppleApp(request)
self.name = Primitive.from_proto(response.name)
self.app_id = Primitive.from_proto(response.app_id)
self.display_name = Primitive.from_proto(response.display_name)
self.project_id = Primitive.from_proto(response.project_id)
self.bundle_id = Primitive.from_proto(response.bundle_id)
self.app_store_id = Primitive.from_proto(response.app_store_id)
self.team_id = Primitive.from_proto(response.team_id)
self.api_key_id = Primitive.from_proto(response.api_key_id)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = apple_app_pb2_grpc.FirebaseBetaAppleAppServiceStub(channel.Channel())
request = apple_app_pb2.DeleteFirebaseBetaAppleAppRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.bundle_id):
request.resource.bundle_id = Primitive.to_proto(self.bundle_id)
if Primitive.to_proto(self.app_store_id):
request.resource.app_store_id = Primitive.to_proto(self.app_store_id)
if Primitive.to_proto(self.team_id):
request.resource.team_id = Primitive.to_proto(self.team_id)
if Primitive.to_proto(self.api_key_id):
request.resource.api_key_id = Primitive.to_proto(self.api_key_id)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteFirebaseBetaAppleApp(request)
@classmethod
def list(self, project, service_account_file=""):
stub = apple_app_pb2_grpc.FirebaseBetaAppleAppServiceStub(channel.Channel())
request = apple_app_pb2.ListFirebaseBetaAppleAppRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListFirebaseBetaAppleApp(request).items
def to_proto(self):
resource = apple_app_pb2.FirebaseBetaAppleApp()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.bundle_id):
resource.bundle_id = Primitive.to_proto(self.bundle_id)
if Primitive.to_proto(self.app_store_id):
resource.app_store_id = Primitive.to_proto(self.app_store_id)
if Primitive.to_proto(self.team_id):
resource.team_id = Primitive.to_proto(self.team_id)
if Primitive.to_proto(self.api_key_id):
resource.api_key_id = Primitive.to_proto(self.api_key_id)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
557ec87e81eefffe537c57d04561b95ab3ddc620
|
426a3f961982db52360a3e66c326b36a1d0a91ae
|
/object oriented programming/Polymorphism/Methodoverloading.py
|
cba84c2f6aac3d0ff0a41cbedb6b0535ba633546
|
[] |
no_license
|
Sibinvarghese/PythonPrograms
|
c121f8eaf06b2fcbbd15f64b54c13771e82e5d99
|
c0e0951dab3ff0921ae2e25dfb086bb94d805464
|
refs/heads/master
| 2023-02-17T10:27:59.876242
| 2021-01-13T03:51:51
| 2021-01-13T03:51:51
| 329,186,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
from smtplib import LMTP
class Maths:
def add(self):
num1,num2=10,20
print(num1+num2)
def add(self,num1):
num2=50
print(num1+num2)
def add(self,num1,num2): # This is Called recently implemented method
print(num1+num2)
obj=Maths()
obj.add(10,20)
# obj.add(30)
# obj.add()
|
[
"sibinmvarghese@gmail.com"
] |
sibinmvarghese@gmail.com
|
04d24f794aa036733d2b00b2aa67933a0d67d36a
|
37977ac3865502cac5b4d61f5e628d9129c711e2
|
/2019-RoundG/shifts.py
|
ba90f6c58ef4541539cd7b38b92b8031a7fca127
|
[] |
no_license
|
YeahHuang/Kickstart
|
932c3ee14a4afee3c1cbe7acb58cb65789c1225a
|
08d141768cf93294be8a9c45fa39a4d14f9aa592
|
refs/heads/master
| 2021-07-08T05:52:00.769972
| 2020-07-19T15:43:13
| 2020-07-19T15:43:13
| 130,649,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
from bisect import bisect_left, bisect_right, insort_left, insort_right
from string import ascii_lowercase
from heapq import heappush, heappop, heapify
from collections import Counter, defaultdict
from itertools import product
global ans
global a,b,suma, sumb, n, h
def dfs(i, cur_a, cur_b):
global ans,a,b,suma, sumb, n, h
#print(i, cur_a, cur_b)
if cur_a>=h and cur_b>=h:
ans += 3**(n-i)
return
if (i<n) and (suma[-1]-suma[i]+cur_a>=h) and (sumb[-1]-sumb[i]+cur_b>=h):
dfs(i+1, cur_a+a[i], cur_b)
dfs(i+1, cur_a, cur_b+b[i])
dfs(i+1, cur_a+a[i], cur_b+b[i])
T = int(input())
for it in range(T):
n, h = map(int, input().split())
aa = list(map(int, input().split()))
bb = list(map(int, input().split()))
tmp = []
for i in range(n):
tmp.append((aa[i],bb[i]))
tmp.sort(reverse=True)
a,b = [],[]
for i in range(n):
a.append(tmp[i][0])
b.append(tmp[i][1])
suma, sumb = [0] * (n+1), [0]*(n+1)
for i in range(n):
suma[i+1] = suma[i] + a[i]
sumb[i+1] = sumb[i] + b[i]
ans = 0
dfs(0, 0, 0)
print("Case #%d: %d"%(it+1, ans))
|
[
"noreply@github.com"
] |
YeahHuang.noreply@github.com
|
c70990a5217ff8db6ecdb0e8f3057427189a090b
|
58afefdde86346760bea40690b1675c6639c8b84
|
/leetcode/next-greater-element-i/282267514.py
|
ff135633679788c1ecdd8891789789e9ac47b07c
|
[] |
no_license
|
ausaki/data_structures_and_algorithms
|
aaa563f713cbab3c34a9465039d52b853f95548e
|
4f5f5124534bd4423356a5f5572b8a39b7828d80
|
refs/heads/master
| 2021-06-21T10:44:44.549601
| 2021-04-06T11:30:21
| 2021-04-06T11:30:21
| 201,942,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# title: next-greater-element-i
# detail: https://leetcode.com/submissions/detail/282267514/
# datetime: Thu Nov 28 20:22:56 2019
# runtime: 44 ms
# memory: 12.8 MB
class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
res = [-1] * len(nums1)
pos = {n: i for i, n in enumerate(nums2)}
for i in range(len(nums1)):
for j in range(pos[nums1[i]] + 1, len(nums2)):
if nums2[j] > nums1[i]:
res[i] = nums2[j]
break
return res
|
[
"ljm51689@gmail.com"
] |
ljm51689@gmail.com
|
addab2413e8bdacb53c294cc23deaaa6c4a83ccd
|
3a5a9f79e53f30a719cf51dedcc71a7153570103
|
/apicatslist/models.py
|
97c24a1b8c4348349a4bfab35b4d03ab1358e76b
|
[] |
no_license
|
KiaraRaynS/Catslist-23-06-16-
|
b5ab80d6c363a161806133cfa1a1187728811395
|
7b8ec46684ca64e185e047d61379248e94390c64
|
refs/heads/master
| 2021-01-19T01:23:46.383024
| 2016-08-17T07:26:26
| 2016-08-17T07:26:26
| 61,828,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
from rest_framework.authtoken.models import Token
from django.dispatch import receiver
from django.db.models.signals import post_save
@receiver(post_save, sender='auth.User')
def usertoken(**kwargs):
created = kwargs.get('created')
instance = kwargs.get('instance')
if created:
Token.objects.create(user=instance)
|
[
"sherman.rayn@outlook.com"
] |
sherman.rayn@outlook.com
|
d88b07c53a5a5106cb51936f34c9493a3c87903b
|
a6c7d1b57c7d4804fc651adc13416853ec6c86cd
|
/test/test_samples/test_samples.py
|
c8015cb6d081a6b61475b36b22a5d15d3bfdadf4
|
[
"MIT"
] |
permissive
|
executablebooks/mistletoe-ebp
|
367c9c031a457f9f3c089bfb7a86b81309f8840b
|
229812436726fd9b1af85c6e66ff8c81b415758d
|
refs/heads/master
| 2022-12-05T03:17:00.975786
| 2020-08-06T17:53:59
| 2020-08-06T17:53:59
| 245,217,804
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
import os
from mistletoe import markdown
PATH = os.path.dirname(__file__)
def test_syntax(file_regression):
with open(os.path.join(PATH, "syntax.md")) as handle:
file_regression.check(markdown(handle.read()), extension=".html")
def test_jquery(file_regression):
with open(os.path.join(PATH, "jquery.md")) as handle:
file_regression.check(markdown(handle.read()), extension=".html")
|
[
"chrisj_sewell@hotmail.com"
] |
chrisj_sewell@hotmail.com
|
0a7042dfc022c5fa43ff7bc37a587ad6d7f6f90b
|
ebbcd3815d00123238cff2ae4ab47d8a0155df55
|
/apps/xfzauth/forms.py
|
86f8c7426b0e32abec5dba02d80ae78dd85648c6
|
[] |
no_license
|
zhaocheng1996/xfz_test
|
f4e1393e8a416371761b628a72af38bed2e3b5a0
|
724b17632ac7671db0baa460b988a59e45d5fc0d
|
refs/heads/master
| 2020-04-30T16:26:28.833609
| 2019-04-02T06:51:54
| 2019-04-02T06:51:54
| 176,948,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
from django import forms
from apps.forms import FormMixin
from django.core.cache import cache
from .models import User
class LoginForm(forms.Form,FormMixin):
telephone = forms.CharField(max_length=11)
password = forms.CharField(max_length=20,min_length=6,error_messages={"max_length":"密码最多不能超过20个字符!","min_length":"密码最少不能少于6个字符!"})
remember = forms.IntegerField(required=False)
class RegisterForm(forms.Form,FormMixin):
telephone = forms.CharField(max_length=11)
username =forms.CharField(max_length=20)
password1 = forms.CharField(max_length=20, min_length=6,
error_messages={"max_length": "密码最多不能超过20个字符!", "min_length": "密码最少不能少于6个字符!"})
password2 = forms.CharField(max_length=20,min_length=6,error_messages={"max_length":"密码最多不能超过20个字符!","min_length":"密码最少不能少于6个字符!"})
img_captcha = forms.CharField(min_length=4,max_length=4)
def clean(self):
cleaned_data = super(RegisterForm, self).clean()
password1 = cleaned_data.get('password1')
password2 = cleaned_data.get('password2')
if password1 != password2:
raise forms.ValidationError('两次密码输入不一致!')
img_captcha = cleaned_data.get('img_captcha')
cached_img_captcha = cache.get(img_captcha.lower())
if not cached_img_captcha or cached_img_captcha.lower() != img_captcha.lower():
raise forms.ValidationError("图像验证码错误")
telephone = cleaned_data.get('telephone')
exists = User.objects.filter(telephone=telephone).exists()
if exists:
forms.ValidationError('该手机号码已经被注册!')
return cleaned_data
|
[
"34829837+zhaocheng1996@users.noreply.github.com"
] |
34829837+zhaocheng1996@users.noreply.github.com
|
9373a09c9d4289563607e011d4788444a4b72147
|
9bbb00c09aaaa19565d3fb8091af568decb5820f
|
/4_Intermediate_Importing_Python/2_Interacting_With_APIs_To_Import_Data_From_The_Internet/4_Checking_The_Wikipedia_API.py
|
7bba80f6ccb09003b1c81c42f2c8dd0cf911ea65
|
[] |
no_license
|
PeterL64/UCDDataAnalytics
|
4417fdeda9c64c2f350a5ba53b2a01b4bdc36fc7
|
d6ff568e966caf954323ecf641769b7c79ccb83a
|
refs/heads/master
| 2023-06-14T04:10:41.575025
| 2021-07-07T15:23:50
| 2021-07-07T15:23:50
| 349,780,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
# Checking out the Wikipedia API
# Nested JSONs
# Import Packages
import requests
# Assign the URL to the variable: url
url = 'https://en.wikipedia.org/w/api.php?action=query&prop=extracts&format=json&exintro=&titles=pizza'
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Decode the JSON data into a dictionary: json_data
json_data = r.json()
# The variable pizza_extract holds the HTML of an extract from Wikipedia's Pizza page as a string.
# Print the Wikipedia page extract
pizza_extract = json_data['query']['pages']['24768']['extract']
print(pizza_extract)
|
[
"peterlyonscbar@gmail.com"
] |
peterlyonscbar@gmail.com
|
4c801b6dbe162b6d05d2b0f9077b249bed8522be
|
dad12d5cfdae207fd5391eca45c86ef1bd6447bd
|
/zmq_plugin_bridge/monitor.py
|
b76e1cbc0f7e6c0935494b55ebcc6f65e0750259
|
[] |
no_license
|
JozzOfLyfe/Threejs
|
08f9282bba37d2a15b3d9d585930e0293f3b138f
|
50f585b3afa0dcaacced7bec7727a75fc40c0f99
|
refs/heads/master
| 2021-01-20T19:39:40.302550
| 2016-07-22T16:39:12
| 2016-07-22T16:39:12
| 62,829,783
| 0
| 1
| null | 2016-07-22T16:39:12
| 2016-07-07T18:33:59
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,995
|
py
|
# coding: utf-8
import json
import logging
import sys
from zmq import green as zmq
from zmq_plugin.plugin import Plugin
from zmq_plugin.schema import (validate, PandasJsonEncoder,
encode_content_data,
decode_content_data)
import IPython
import arrow
import gevent
import jsonschema
logger = logging.getLogger(__name__)
def run_plugin(sio, plugin, log_level=None, namespace=None):
if log_level is not None:
logging.basicConfig(level=log_level)
plugin.reset()
def get_message():
msg_frames = plugin.subscribe_socket.recv_multipart(zmq.NOBLOCK)
message_str = msg_frames[-1]
try:
# Decode message from first (and only expected) frame.
message = json.loads(message_str)
# Validate message against schema.
validate(message)
except jsonschema.ValidationError:
logger.error('Unexpected message', exc_info=True)
raise
else:
return message
start = arrow.now()
while True:
try:
try:
message = get_message()
except zmq.Again:
gevent.sleep(.01)
continue
msg_timestamp = arrow.get(message['header']['date'])
delta_time = (msg_timestamp - start).total_seconds()
time_info = msg_timestamp.strftime('%H:%M:%S')
if delta_time > .25:
time_info += (' +%-5.1f' % delta_time)
print 72 * '-'
if message['header']['msg_type'] == 'execute_reply':
msg_info = (time_info +
' [{header[target]}<-{header[source]}] '
'{content[command]}'.format(**message))
print msg_info
data = decode_content_data(message)
try:
json_data = json.dumps(data, cls=PandasJsonEncoder)
except:
import pdb; pdb.set_trace()
content = encode_content_data(json_data, mime_type=
'application/json')
message['content'].update(content)
elif 'content' in message:
msg_info = (time_info +
' [{header[source]}->{header[target]}] '
'{content[command]}'.format(**message))
data = decode_content_data(message)
try:
json_data = json.dumps(data, cls=PandasJsonEncoder)
except:
import pdb; pdb.set_trace()
content = encode_content_data(json_data, mime_type=
'application/json')
message['content'].update(content)
else:
msg_info = (time_info +
' [{header[source]}->{header[target]}] '
'<{header[msg_type]}>'.format(**message))
print msg_info
sio.emit('zmq', message, namespace=namespace)
start = arrow.now()
except KeyboardInterrupt:
IPython.embed()
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='ZeroMQ Plugin process.')
log_levels = ('critical', 'error', 'warning', 'info', 'debug', 'notset')
parser.add_argument('-l', '--log-level', type=str, choices=log_levels,
default='info')
parser.add_argument('hub_uri')
parser.add_argument('name', type=str)
args = parser.parse_args()
args.log_level = getattr(logging, args.log_level.upper())
return args
if __name__ == '__main__':
args = parse_args()
plugin = Plugin(args.name, args.hub_uri, {zmq.SUBSCRIBE: ''})
run_plugin(plugin, args.log_level)
|
[
"christian@fobel.net"
] |
christian@fobel.net
|
48653f7afa85e42005da9783c9c96c4c43582d04
|
a8c0867109974ff7586597fe2c58521277ab9d4d
|
/LC88.py
|
b314c31d431d89596bf96bcee8a1b853a8a3b00a
|
[] |
no_license
|
Qiao-Liang/LeetCode
|
1491b01d2ddf11495fbc23a65bb6ecb74ac1cee2
|
dbdb227e12f329e4ca064b338f1fbdca42f3a848
|
refs/heads/master
| 2023-05-06T15:00:58.939626
| 2021-04-21T06:30:33
| 2021-04-21T06:30:33
| 82,885,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,320
|
py
|
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
curr = 0
for num in nums2:
while curr < m and nums1[curr] <= num:
curr += 1
temp_curr = m
m += 1
while temp_curr > curr:
nums1[temp_curr] = nums1[temp_curr - 1]
temp_curr -= 1
nums1[curr] = num
print nums1
# if nums2:
# for m_idx in range(m):
# if nums1[m_idx] > nums2[0]:
# nums1[m_idx], nums2[0] = nums2[0], nums1[m_idx]
# for n_idx in range(n - 1):
# if nums2[n_idx] > nums2[n_idx + 1]:
# nums2[n_idx], nums2[n_idx + 1] = nums2[n_idx + 1], nums2[n_idx]
# else:
# break
# nums1 = nums1[:m] + nums2[:n]
# print(nums1)
nums1 = [1,2,3,0,0,0]
m = 3
nums2 = [2,5,6]
n = 3
# nums1 = [-1,0,0,3,3,3,0,0,0]
# m = 6
# nums2 = [1,2,2]
# n = 3
sol = Solution()
sol.merge(nums1, m, nums2, n)
|
[
"qiaoliang@Qiaos-MacBook-Pro.local"
] |
qiaoliang@Qiaos-MacBook-Pro.local
|
94a551407625272ce72f502bb937aa316c9dff30
|
9abc2f4fbf1b31b5a56507437b4a8d9c3f3db7e6
|
/movies/urls.py
|
5b2dfb674ea70f3227f015265650fdd1faabd2f2
|
[] |
no_license
|
odbalogun/ticketr
|
e9fe8461d66dabe395f0e1af8fbecc67dbb16e97
|
94f24c82f407f861f1614a151feb3fdd62b283e5
|
refs/heads/master
| 2022-11-30T22:40:30.931160
| 2019-08-09T14:34:38
| 2019-08-09T14:34:38
| 188,833,600
| 0
| 0
| null | 2022-11-22T03:50:30
| 2019-05-27T11:50:07
|
Python
|
UTF-8
|
Python
| false
| false
| 232
|
py
|
from django.urls import path
from .views import MovieDetailView, MovieListView
app_name = 'movies'
urlpatterns = [
path('', MovieListView.as_view(), name='list'),
path('<pk>/', MovieDetailView.as_view(), name='detail'),
]
|
[
"oduntan@live.com"
] |
oduntan@live.com
|
ece1320d825400cb0e92383a86db4b36ec2f815f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03845/s420639446.py
|
2bb83b6030c44ccd6ef52dd5ec54eab56c4cdf2c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from sys import stdin
n = int(input())
t = list(map(int, input().split()))
m = int(input())
sum_t = sum(t)
ans = []
for _ in range(m):
p, x = map(int, stdin.readline().strip().split())
ans.append(sum_t - t[p-1] + x)
for i in ans:
print(i)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a85b28586d5142d231dad051aaec6bbade136a1b
|
b501a5eae1018c1c26caa96793c6ee17865ebb2d
|
/data_compression_and_archiving/gzip/gzip_seek.py
|
a927d0dc0cf73da48f9b2242a96373f98fa54184
|
[] |
no_license
|
jincurry/standard_Library_Learn
|
12b02f9e86d31ca574bb6863aefc95d63cc558fc
|
6c7197f12747456e0f1f3efd09667682a2d1a567
|
refs/heads/master
| 2022-10-26T07:28:36.545847
| 2018-05-04T12:54:50
| 2018-05-04T12:54:50
| 125,447,397
| 0
| 1
| null | 2022-10-02T17:21:50
| 2018-03-16T01:32:50
|
Python
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
import gzip
with gzip.open('example.txt.gz', 'rb') as input_file:
print('Entire file:')
all_data = input_file.read()
print(all_data)
expected = all_data[5:15]
input_file.seek(0)
input_file.seek(5)
print('Starting at position 5 for 10 bytes:')
partial = input_file.read(10)
print(partial)
print()
print(expected == partial)
|
[
"jintao422516@gmail.com"
] |
jintao422516@gmail.com
|
f0aefe5cd151f1d2f53ec1a7689f18e5fbc73561
|
36957a9ce540846d08f151b6a2c2d582cff1df47
|
/VR/Python/Python36/Lib/test/test_tcl.py
|
ba93edb736e64213e90fc8006166fdef79cc82e4
|
[] |
no_license
|
aqp1234/gitVR
|
60fc952307ef413e396d31e0d136faffe087ed2b
|
e70bd82c451943c2966b8ad1bee620a0ee1080d2
|
refs/heads/master
| 2022-12-29T15:30:12.540947
| 2020-10-07T15:26:32
| 2020-10-07T15:26:32
| 290,163,043
| 0
| 1
| null | 2020-08-25T09:15:40
| 2020-08-25T08:47:36
|
C#
|
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:5d2b96fe46f7dcde01e44c07777df5125ffa70a5ff4a41c8f3d9ac5b08420a16
size 30385
|
[
"aqp1234@naver.com"
] |
aqp1234@naver.com
|
fa8fee3c0516297125b7949177ef554e11e8ddc6
|
e9524305812608ae488884e5af133655f385a08a
|
/VueDjangoFrameWorkShop/settings.py
|
c2a37f3d41801705d17a8950fab2b5b60c610331
|
[
"MIT"
] |
permissive
|
GryffindorMuggle/python3.6-django2.1-django-rest-framework
|
66c43ba70477c1d099309c6a80d0b788d2636de3
|
4e21db7ce9eff77d030deb74de33189352010765
|
refs/heads/master
| 2020-03-26T13:48:52.823676
| 2018-08-16T08:33:24
| 2018-08-16T08:33:24
| 144,958,050
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,848
|
py
|
"""
Django settings for VueDjangoFrameWorkShop project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import datetime
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from past.builtins import execfile
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.insert(0,BASE_DIR)
sys.path.insert(0,os.path.join(BASE_DIR, 'apps'))
sys.path.insert(0,os.path.join(BASE_DIR, 'extra_apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y5yew=o5yey*9ydgt74-st11qkt$3n_i9r-c+aw$lt0%x3%a^)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# 设置邮箱和用户名和手机号均可登录
AUTHENTICATION_BACKENDS = (
'users.views.CustomBackend',
'social_core.backends.weibo.WeiboOAuth2',
'social_core.backends.qq.QQOAuth2',
'social_core.backends.weixin.WeixinOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# 此处重载是为了使我们的UserProfile生效
AUTH_USER_MODEL = "users.UserProfile"
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users.apps.UsersConfig',
'goods.apps.GoodsConfig',
'trade.apps.TradeConfig',
'user_operation.apps.UserOperationConfig',
'xadmin',
'crispy_forms',
'DjangoUeditor',
'rest_framework',
'django_filters',
'corsheaders',
'rest_framework.authtoken',
'social_django',
'raven.contrib.django.raven_compat',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
CORS_ORIGIN_WHITELIST = (
'127.0.0.1:3000'
)
ROOT_URLCONF = 'VueDjangoFrameWorkShop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'VueDjangoFrameWorkShop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'vue_shop',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
"OPTIONS": {"init_command": "SET default_storage_engine=INNODB;"}
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
# 语言改为中文
LANGUAGE_CODE = 'zh-hans'
# 时区改为上海
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
# 数据库存储使用时间,True时间会被存为UTC的时间
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
# 设置上传文件,图片访问路径
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
# 所有与drf相关的设置写在这里面
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
),
'DEFAULT_THROTTLE_RATES': {
'anon': '100/day',
'user': '1000/day'
}
}
# 与drf的jwt相关的设置
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=3600),
'JWT_AUTH_HEADER_PREFIX': 'Bearer',
}
# 手机号码正则表达式
REGEX_MOBILE = "^1[358]\d{9}$|^147\d{8}$|^176\d{8}$"
# 云片网设置
APIKEY = ''
# 缓存过期时间
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 15
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# 支付宝相关的key路径
private_key_path = os.path.join(BASE_DIR, 'apps/trade/keys/private_2048.txt')
ali_pub_key_path = os.path.join(BASE_DIR, 'apps/trade/keys/alipay_key_2048.txt')
# 第三方登录相关
SOCIAL_AUTH_WEIBO_KEY = 'foobar'
SOCIAL_AUTH_WEIBO_SECRET = 'bazqux'
SOCIAL_AUTH_QQ_KEY = 'foobar'
SOCIAL_AUTH_QQ_SECRET = 'bazqux'
SOCIAL_AUTH_WEIXIN_KEY = 'foobar'
SOCIAL_AUTH_WEIXIN_SECRET = 'bazqux'
# sentry设置
import os
import raven
RAVEN_CONFIG = {
'dsn': 'https://<key>:<secret>@sentry.io/<project>',
}
REMOTE_DEBUG = True
PROJECT_ROOT = os.path.join(BASE_DIR, 'VueDjangoFrameWorkShop')
if DEBUG and REMOTE_DEBUG:
try:
execfile(os.path.join(PROJECT_ROOT, 'dev_settings.py'))
except IOError:
pass
elif DEBUG:
try:
execfile(os.path.join(PROJECT_ROOT, 'local_settings.py'))
except IOError:
pass
else:
try:
execfile(os.path.join(PROJECT_ROOT, 'dev_settings.py'))
except IOError:
pass
|
[
"1147727180@qq.com"
] |
1147727180@qq.com
|
c1c531d2e16c942466ed4ed651adf50c6a1f6ed3
|
9d64a438cdfe4f3feb54f2f0dc7431139c4b9fb9
|
/type_converter/icon_type_converter/actions/string_to_boolean/action.py
|
fe85b8b0d3c9bb94d56678f388240e518fe856a5
|
[
"MIT"
] |
permissive
|
PhilippBehmer/insightconnect-plugins
|
5ad86faaccc86f2f4ed98f7e5d518e74dddb7b91
|
9195ddffc575bbca758180473d2eb392e7db517c
|
refs/heads/master
| 2021-07-25T02:13:08.184301
| 2021-01-19T22:51:35
| 2021-01-19T22:51:35
| 239,746,770
| 0
| 0
|
MIT
| 2020-02-11T11:34:52
| 2020-02-11T11:34:51
| null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
import insightconnect_plugin_runtime
from .schema import StringToBooleanInput, StringToBooleanOutput, Input, Output, Component
# Custom imports below
from insightconnect_plugin_runtime.exceptions import PluginException
class StringToBoolean(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='string_to_boolean',
description=Component.DESCRIPTION,
input=StringToBooleanInput(),
output=StringToBooleanOutput())
def run(self, params={}):
try:
return {
Output.OUTPUT: params.get(Input.INPUT).lower() == "true"
}
except Exception as e:
raise PluginException(
cause="Converting error.",
assistance="Check input",
data=e
)
|
[
"noreply@github.com"
] |
PhilippBehmer.noreply@github.com
|
b5f2f15ab28f76cf471a10e139fe25ecda72997b
|
489574745e7823d1dc22bda0676d6fa1b42ef547
|
/src/django_wools/templatetags/wools_for_wt_images.py
|
77493569dd7609d8a85ed03caa5f1fcbb2ef3fd6
|
[] |
no_license
|
juliengueperoux/django-wools
|
9419239b27170fc701708817f1c3e19c57edcf7c
|
d7a0dd98a873cb2d41a3b26d18ddd243fe6d22b6
|
refs/heads/master
| 2023-09-05T07:31:34.831561
| 2021-05-17T13:45:41
| 2021-05-17T13:45:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,162
|
py
|
import re
from enum import Enum
from typing import Iterator, NamedTuple, Optional
from django import template
from wagtail.images.models import AbstractImage
from ..settings import wool_settings
register = template.Library()
class WidthInfo(NamedTuple):
"""
Computed width from generate_widths()
"""
width: int
pixel_ratio: float
def generate_widths(spec_width) -> Iterator[WidthInfo]:
"""
Generates width and device pixel ratios based on settings bounds and the
initial spec width of the image.
Parameters
----------
spec_width
On-screen width of the image
"""
width = spec_width
max_width = spec_width * wool_settings.MAX_PIXEL_RATIO
for _ in range(0, 1000):
yield WidthInfo(width, round(float(width) / float(spec_width), 4))
if width > max_width:
return
width *= 1 + wool_settings.INCREMENT_STEP_PERCENT / 100.0
class WagtailSizeOperation(Enum):
"""
Allowed Wagtail operations
"""
max = "max"
min = "min"
width = "width"
fill = "fill"
class WagtailSizeSpec(NamedTuple):
"""
Parsed Wagtail size specification
"""
operation: WagtailSizeOperation
width: int
height: Optional[int]
zoom: int = 0
def __str__(self):
"""
Un-parses the string
"""
out = f"{self.operation.value}-{self.width}"
if self.height:
out += f"x{self.height}"
if self.zoom:
out += f"-c{self.zoom}"
return out
@classmethod
def parse(cls, spec) -> "WagtailSizeSpec":
"""
Parses a spec and returns the parsed tuple
"""
ops = "|".join(WagtailSizeOperation._member_names_) # noqa
exp = re.compile(
rf"(?P<op>{ops})-(?P<width>\d+)x(?P<height>\d+)?(-c(?P<zoom>\d+))?"
)
if not (m := exp.match(spec)):
raise ValueError(
f'Provided spec "{spec}" cannot be parsed. Please bear in '
f'mind that "scale" and "height" operations are not permitted '
f"since they do not have any width constraint."
)
return cls(
operation=WagtailSizeOperation(m.group("op")),
width=int(m.group("width")),
height=(int(m.group("height")) if m.group("height") else None),
zoom=(int(m.group("zoom")) if m.group("zoom") else 0),
)
def at_width(self, width: int) -> "WagtailSizeSpec":
"""
Returns a scaled version of this spec to fit the new width
"""
ratio = float(width) / float(self.width)
if self.height:
new_height = ratio * self.height
else:
new_height = None
return self._replace(height=round(new_height), width=round(width))
@register.inclusion_tag("wools/images/fixed_size.html")
def image_fixed_size(
image: AbstractImage,
spec: str,
css_class: str = "",
fallback_format: str = "png",
lossless: bool = False,
):
"""
This tag manages images whose size on screen stay the same and simply
needs larger images for larger pixel ratios.
Image will be encoded in WebP with a fallback of the choosing of the
caller, by default PNG to make sure to lose nothing (neither quality
neither alpha channel).
Parameters
----------
image
Original Wagtail image
spec
Wagtail size spec
css_class
CSS class that will be added to the root <picture> element
fallback_format
The format to use for browsers that do not support WebP
lossless
Enables lossless compression for WebP. If you want the fallback to also
be lossless, you need to use "png" as fallback_format.
"""
parsed_spec = WagtailSizeSpec.parse(spec)
if fallback_format not in {"png", "jpeg"}:
raise ValueError('Only "png" and "jpeg" are allowed as fallbacks')
if not isinstance(image, AbstractImage):
return {}
base_rendition = image.get_rendition(f"{spec}|format-{fallback_format}")
sources = {}
if lossless:
webp_format = "webp-lossless"
else:
webp_format = "webp"
for fmt in [webp_format, fallback_format]:
sources[fmt] = dict(set=[], base=image.get_rendition(f"{spec}|format-{fmt}"))
for width, density in generate_widths(parsed_spec.width):
if int(density) == density:
density = int(density)
rendition = image.get_rendition(
f"{parsed_spec.at_width(width)}|format-{fmt}"
)
sources[fmt]["set"].append(
dict(
rendition=rendition,
density=density,
string=f"{rendition.url} {density}x",
)
)
sources[fmt]["srcset"] = ", ".join(x["string"] for x in sources[fmt]["set"])
return dict(
base_url=base_rendition.url,
size=dict(width=base_rendition.width, height=base_rendition.height),
alt=image.default_alt_text,
sources=sources,
css_class=css_class,
)
|
[
"remy.sanchez@hyperthese.net"
] |
remy.sanchez@hyperthese.net
|
6edf5f48c2d80e7fa0fef3d643563c3a07612cb8
|
512b388a53022f561e2375b4621f78572d3b4f04
|
/catalogues/migrations/0021_identifications_campox.py
|
dd681d8898ea94145bf51e32bebc8937817497db
|
[] |
no_license
|
Madoka09/Worker15
|
006d5ac44dc55c3ae7f72d3b8300f3567395cdff
|
181012d309052b2df3d4ef99a197e8acef73a185
|
refs/heads/master
| 2023-03-24T05:29:02.060796
| 2021-03-16T21:56:21
| 2021-03-16T21:56:21
| 336,394,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
# Generated by Django 3.0.4 on 2020-11-23 20:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogues', '0020_auto_20201123_1401'),
]
operations = [
migrations.AddField(
model_name='identifications',
name='campox',
field=models.CharField(default='X', max_length=1),
),
]
|
[
"personal.galvan.francisco@gmail.com"
] |
personal.galvan.francisco@gmail.com
|
0e87d7a6cea6e6444077e686b495635459e2db8c
|
33febf8b617ef66d7086765f1c0bf6523667a959
|
/test/automatic/density.py
|
a584970a7d4dc681c0c7d31e59502b15bc8a5b87
|
[] |
no_license
|
JonasRSV/probpy
|
857201c7f122461463b75d63e5c688e011615292
|
5203063db612b2b2bc0434a7f2a02c9d2e27ed6a
|
refs/heads/master
| 2022-07-07T06:17:44.504570
| 2020-04-15T14:52:20
| 2020-04-15T14:52:20
| 245,820,195
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,184
|
py
|
import unittest
import time
import numpy as np
from probpy.distributions import normal
import numba
from probpy.density import UCKD, RCKD, URBK
from probpy.sampling import fast_metropolis_hastings
from probpy.search import search_posterior_estimation
from probpy.distributions import normal, exponential, jit
from probpy.learn.posterior.common import jit_log_probabilities
def distribution(x):
return 0.3333 * normal.p(x, -2, 1) + 0.3333 * normal.p(x, 2, 0.2) + 0.3333 * normal.p(x, 4, 0.2)
def log_distribution(x):
return np.log(0.3333 * normal.p(x, -2, 1) + 0.3333 * normal.p(x, 2, 0.2) + 0.3333 * normal.p(x, 4, 0.2))
class AutomaticDensityTest(unittest.TestCase):
def test_running_uckd(self):
timestamp = time.time()
samples = fast_metropolis_hastings(5000, distribution, initial=np.random.rand(10, 1), energy=1.0)
print("making samples", time.time() - timestamp)
density = UCKD(variance=5.0)
density.fit(samples)
lb, ub = -6, 6
n = 2000
x = np.linspace(lb, ub, n)
y = density.p(x)
y = y / (y.sum() / (n / (ub - lb)))
delta = (n / (ub - lb))
self.assertAlmostEqual(y.sum() / delta, 1, delta=0.1)
fast_p = density.get_fast_p()
fast_p(x) # is slower than normal p.. but numba need numba functions
def test_running_rckd(self):
timestamp = time.time()
samples = fast_metropolis_hastings(5000, distribution, initial=np.random.rand(50, 1), energy=1.0)
print("making samples", time.time() - timestamp)
density = RCKD(variance=5.0, error=0.001, verbose=True)
timestamp = time.time()
density.fit(samples)
print("fitting samples", time.time() - timestamp)
lb, ub = -6, 6
n = 2000
x = np.linspace(lb, ub, n)
print("x", len(x))
y = density.p(x)
delta = (n / (ub - lb))
self.assertAlmostEqual(y.sum() / delta, 1, delta=0.5)
fast_p = density.get_fast_p()
fast_p(x) # is slower than normal p.. but numba need numba functions
def test_running_urbk(self):
prior_rv = normal.med(mu=0.5, sigma=1.0)
n = normal.fast_p
prior = jit.jit_probability(prior_rv)
@numba.jit(fastmath=True, nopython=True, forceobj=False)
def likelihood(y, w):
return n(y - w, mu=0.0, sigma=1.0)
data = normal.sample(mu=3.0, sigma=1.0, size=100)
log_likelihood, log_prior = jit_log_probabilities((data,), likelihood, prior)
samples, densities = search_posterior_estimation(
size=300, log_likelihood=log_likelihood,
log_prior=log_prior,
initial=prior_rv.sample(size=10),
energy=0.1,
volume=100
)
density = URBK(variance=5.0, verbose=True)
density.fit(samples, densities)
lb, ub = -6, 6
n = 2000
x = np.linspace(lb, ub, n)
y = density.p(x)
self.assertEqual(y.size, 2000)
fast_p = density.get_fast_p()
fast_p(x) # is slower than normal p.. but numba need numba functions
if __name__ == '__main__':
unittest.main()
|
[
"jonas@valfridsson.net"
] |
jonas@valfridsson.net
|
162735502472a321677a2df6fbcd5c3543c436a1
|
868e94b7121b1f4c27fdf1d5ff88cb2fa6786e47
|
/polling_stations/apps/data_finder/urls.py
|
189b4d82cd5de12b0d179d75ff039766677fb31d
|
[] |
no_license
|
JoeMitchell/UK-Polling-Stations
|
df0ebc2343a9b18928263a60be5718f08588782e
|
861157b431f14eb24fdd17fb7380ac5e90d03a65
|
refs/heads/master
| 2021-01-24T04:35:49.749879
| 2016-02-29T11:55:46
| 2016-02-29T11:55:46
| 52,805,675
| 1
| 0
| null | 2016-02-29T16:27:05
| 2016-02-29T16:27:05
| null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.decorators.cache import cache_page
from constituencies.views import ConstituencyList, ConstituencyView
urlpatterns = patterns(
'',
url(r'^/$', cache_page(60*60)(ConstituencyList.as_view()), name='constituencies'),
# url(r'^/notspots/', view_not_spots, name='constituency_notspots'),
url(r'^/(?P<pk>[^/]+)(?:/(?P<ignored_slug>.*))?$',
cache_page(60 * 60 * 24)(ConstituencyView.as_view()),
name='constituency-view'),
)
|
[
"sym.roe@talusdesign.co.uk"
] |
sym.roe@talusdesign.co.uk
|
ea4e5b13a18d090736df358341d6202a061d1db4
|
612325535126eaddebc230d8c27af095c8e5cc2f
|
/src/build/android/gyp/touch.py
|
d3a8735504614a4f317a231b2d10a4808cf31e0c
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/proto-quic_1V94
|
1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673
|
feee14d96ee95313f236e0f0e3ff7719246c84f7
|
refs/heads/master
| 2023-04-01T14:36:53.888576
| 2019-10-17T02:23:04
| 2019-10-17T02:23:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from util import build_utils
def main(argv):
for f in argv[1:]:
build_utils.Touch(f)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[
"2100639007@qq.com"
] |
2100639007@qq.com
|
aefb6400427a8ca905416089a51c9b81d336cb16
|
7701773efa258510951bc7d45325b4cca26b3a7d
|
/tkinter_explore/positionAppWindow.py
|
f479edd5f1b498b414abe22911f53753c94429d3
|
[] |
no_license
|
Archanciel/explore
|
c170b2c8b5eed0c1220d5e7c2ac326228f6b2485
|
0576369ded0e54ce7ff9596ec4df076e69067e0c
|
refs/heads/master
| 2022-06-17T19:15:03.647074
| 2022-06-01T20:07:04
| 2022-06-01T20:07:04
| 105,314,051
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
from tkinter import *
import tkinter.messagebox as msgb
root = Tk() # create a Tk root window
w = 600 # width for the Tk root
h = 300 # height for the Tk root
# get screen width and height
ws = root.winfo_screenwidth() # width of the screen
hs = root.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = (ws - w) / 2
y = (hs - h) / 2
# set the dimensions of the screen
# and where it is placed
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
root.title('Order medicaments')
def on_sendMail():
global entryform
strng = entryform.get()
if strng == '1':
msgb.showinfo(message='You typed 1') # modif 3
else:
msgb.showinfo(message='Please type 1') # modif 4
entryform = Entry(root)
entryform.pack()
sendmail = Button(root, text="Send mail", command=on_sendMail)
sendmail.pack(side=BOTTOM)
root.mainloop() # starts the mainloop
|
[
"jp.schnyder@gmail.com"
] |
jp.schnyder@gmail.com
|
ecd2bf9d8a8924b7839511254ca2f355886cbded
|
85601d534fbcc6df900af7509c189075a3112422
|
/src/aulas/03_desafio/datas.py
|
2c90d82c0273c05fb57f2b3dcbc61d6150d19ba8
|
[] |
no_license
|
claudimf/python_oo_1
|
a58229abe5dc8a784e887ca94168bcdbbfc0f6ef
|
1749456f709b850f5340adecd8b47f860184ca5a
|
refs/heads/main
| 2023-03-22T14:17:16.232643
| 2021-03-09T20:14:23
| 2021-03-09T20:14:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
class Data:
def __init__(self, dia, mes, ano):
print("Construindo objeto...{}".format(self))
self.dia = dia
self.mes = mes
self.ano = ano
def formatada(self):
data = "{}/{}/{}".format(self.dia, self.mes, self.ano)
print(data)
return data
|
[
"claudi.freitas.prs@synergiaconsultoria.com.br"
] |
claudi.freitas.prs@synergiaconsultoria.com.br
|
540ec371e20da9710f8b7d384d2aebac11bd4566
|
5d2d214fff5892d381d0328bca3db04b14e358fb
|
/final_design/tests/led_button/test.py
|
6de08985003f3434fd31485b02d71f28402fba87
|
[
"MIT"
] |
permissive
|
DFEC-R2D2/r2d2
|
9552705188ed6e3d8c144881eb7c9ddfacfd8072
|
9b64233865ebfe9f0ca3f1b400b55cc8d6494adf
|
refs/heads/master
| 2021-01-20T13:27:36.953410
| 2018-10-30T21:37:50
| 2018-10-30T21:37:50
| 90,496,130
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 823
|
py
|
#!/usr/bin/env python
from __future__ import print_function
from time import sleep
import numpy as np
import sys
sys.path.append('../../python')
from library import ButtonLED
if __name__ == "__main__":
button = ButtonLED(16,26,20)
try:
while True:
choice = input("Enter LED color:\n0-Off\n1-Red\n2-Green\n3-Blue\n4-Quit\n>>")
if choice == 0:
button.setRGB(False, False, False)
elif choice == 1:
button.setRGB(True, False, False)
elif choice == 2:
button.setRGB(False, True, False)
elif choice == 3:
button.setRGB(False, False, True)
elif choice == 4:
break
except KeyboardInterrupt:
print("ctl-c")
button.setRGB(False, False, False)
|
[
"walchko@users.noreply.github.com"
] |
walchko@users.noreply.github.com
|
b0a5d2a9d7f6eb5e27604901ec38a320cfa7aed6
|
fe39d984440b0dbb612b9d3e20b93a5e795ebefa
|
/part-1/py-10-recursia/r_04.py
|
7aa71334dc89df0abd03f6784bd5e568ee65376f
|
[] |
no_license
|
maximkavm/py-algorithmization-and-programming
|
f7d6b1b13d2557511aaccec6d9b006ac871c5477
|
c1d8228601c3cea17e11e6a2736659ef395f675d
|
refs/heads/master
| 2023-08-14T09:11:02.433801
| 2021-03-01T19:30:26
| 2021-03-01T19:30:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
'''
найти минимальный элемент списка
'''
def get(lst):
if len(lst) == 1:
return lst[0]
else:
# return lst[0] if lst[0] < get(lst[1:]) else get(lst[1:]) # дублирование вызова = это плохо
tmp = get(lst[1:])
return lst[0] if lst[0] < tmp else tmp # нет дублирования
lst = [8, 4, 6, 5, -7, 9]
# lst = [] # нет проверки на дурака
print(get(lst))
|
[
"ttxiom@gmail.com"
] |
ttxiom@gmail.com
|
838da3ee7f8801a79052070457d4856066a9b52b
|
ced56909016fb7c2175c3911fc8481bd5fdf0800
|
/pytext/contrib/pytext_lib/resources/models.py
|
da9e9a7457bed0a62a52c84eb1994d4f3eb04695
|
[
"BSD-3-Clause"
] |
permissive
|
coderbyr/pytext
|
e258a3aae625e6a2fd386b60f25ac44a7b4149fe
|
72c1ad835a30bef425494b02a6210f2e3232b1a4
|
refs/heads/master
| 2022-11-20T09:11:44.991716
| 2020-07-20T22:05:42
| 2020-07-20T22:07:15
| 281,286,078
| 1
| 0
|
NOASSERTION
| 2020-07-21T03:32:42
| 2020-07-21T03:32:41
| null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
ROBERTA_BASE_TORCH = "roberta_base_torch"
ROBERTA_PUBLIC = "roberta_public"
XLMR_BASE = "xlmr_base"
XLMR_DUMMY = "xlmr_dummy"
URL = {
ROBERTA_BASE_TORCH: "https//dl.fbaipublicfiles.com/pytext/models/roberta/roberta_base_torch.pt", # noqa
ROBERTA_PUBLIC: "https//dl.fbaipublicfiles.com/pytext/models/roberta/roberta_public.pt1", # noqa
XLMR_BASE: "https://dl.fbaipublicfiles.com/pytext/models/xlm_r/checkpoint_base_1500k.pt", # noqa
XLMR_DUMMY: "https://dl.fbaipublicfiles.com/pytext/models/xlm_r/xlmr_dummy.pt", # noqa
}
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
f8e579e2133a16d5c607a651e58a7752082a7494
|
2324dea2cb3003c8ab7e8fd80588d44973eb8c77
|
/Euler_5_251b.py
|
e0502f1b9bb84fe6bcae764690f07d67cf20cb16
|
[] |
no_license
|
MikeOcc/MyProjectEulerFiles
|
5f51bc516cb6584732dc67bb2f9c7fd9e6d51e56
|
4d066d52380aade215636953589bf56d6b88f745
|
refs/heads/master
| 2021-01-16T18:45:44.133229
| 2015-05-27T18:28:43
| 2015-05-27T18:28:43
| 5,876,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
#
# Euler Problem 251
#
#
#
from math import *
from time import time
from itertools import combinations
from operator import mul
def RetFact(n):
#from time import time
#st = time()
ndiv = n
factlist=[ ]
ctr = 2
while ndiv >1:
#temp = ndiv
if (ndiv)%(ctr)==0:
factlist.append(ctr)
ndiv /= (ctr)
else:
ctr +=1
#print "process time",time()-st
return factlist
st = time()
ctr = 0
lim = 10000# 110000000
maxi,maxj,maxk=0,0,0
numturns =2000 # 60000
print "Cardano Numbers for limit with turns:", lim, numturns
for i in xrange(1,numturns):
a = i * 3 -1
f = (i**2) * (8*i -3)
if f< lim:
b=1;c=f
if a + b + c > lim:continue
ctr+=1
#print ctr,")",i,":",a, b,c,":",a+b+c,f
L = RetFact(f)
#M= L #remove
S = sorted(set(L))
P=[]
for k in S:
x=L.count(k)
if (x/2)>=1:
P.append(x/2)
else:
L.remove(k)
S = sorted(set(L))
L=list(S)
#print L
for m in xrange(len( P)):
if P[m]>1:
#print "P",P[m]
for j in xrange(1,P[m]):
#print i,j,S[m]
L.append(S[m])
R=[]
for jj in range(1, len(L)+1):
for subset in combinations(L, jj):
#print(subset)
R.append(reduce(mul,subset))
R=list(set(R))
R=sorted(R)
for j in R:
#print "J",j,R,f,M,S
if f/float(j*j) == int( f/j/j):
b = j; c = f/(j*j)
if a + b + c > lim:continue
ctr+=1
#print ctr,")",i,":",a, b,c,":",a+b+c,f
print "Number of Cardano Triplets <=",lim, "is", ctr
print "Process time is", time()-st
|
[
"mike.occhipinti@mlsassistant.com"
] |
mike.occhipinti@mlsassistant.com
|
c10f65b33d4bbfd4a45b433b2db3adb4e1b984bb
|
3d8b4e0415bd4d818c17dcf0b3fc1676caad47b6
|
/examples/switch/lan_host.py
|
9d44db8a2e8d2843b1dd1a31f598978dc82fb447
|
[
"MIT"
] |
permissive
|
ShigemoriHakura/NintendoClients
|
68c08ab2f54e36fb41eab2b2461bedd5bd1522be
|
da56e272f8504bcc8e5238128d63ab6ee44fa2c7
|
refs/heads/master
| 2022-03-09T19:20:05.647911
| 2022-03-01T23:35:01
| 2022-03-01T23:35:01
| 252,926,498
| 0
| 1
|
MIT
| 2022-03-01T23:35:02
| 2020-04-04T06:30:32
| null |
UTF-8
|
Python
| false
| false
| 1,256
|
py
|
from nintendo.games import Splatoon2
from nintendo.pia import lan, settings, types
from nintendo.nex import common
import secrets
import random
import anyio
import math
import logging
logging.basicConfig(level=logging.INFO)
SESSION_ID = random.randint(1, 0xFFFFFFFF)
HOST_ID = random.randint(1, 0xFFFFFFFFFFFFFFFF)
HOST_NAME = "Yannik"
def handler():
host_address = types.StationLocation()
host = lan.LanStationInfo()
host.role = lan.LanStationInfo.HOST
host.username = HOST_NAME
host.id = HOST_ID
session = lan.LanSessionInfo()
session.game_mode = 0
session.session_id = SESSION_ID
session.attributes = [0, 0, 0, 0, 0, 0]
session.num_participants = 1
session.min_participants = 1
session.max_participants = 10
session.system_version = 5
session.application_version = 66
session.session_type = 0
session.application_data = HOST_NAME.encode("utf-16le").ljust(74, b"\0")
session.is_opened = True
session.host_location = host_address
session.stations[0] = host
session.session_param = secrets.token_bytes(32)
return [session]
async def main():
s = settings.default(Splatoon2.PIA_VERSION)
async with lan.serve(s, handler, Splatoon2.PIA_KEY):
print("LAN server is running...")
await anyio.sleep(math.inf)
anyio.run(main)
|
[
"ymarchand@me.com"
] |
ymarchand@me.com
|
574ff80f57a6e8ad726a37f117325a4420e6af8d
|
31945f926e766e2a30ec8cfa720c32a943d84cea
|
/tests/data/test_split.py
|
f661482dece4d010326141046042b736a2003ba6
|
[
"MIT"
] |
permissive
|
mcorniere/rxsci
|
3854129bbf1af727b40cfbc2cb4dedaa27bc8e5f
|
7c2bf433a760d4ecab8aee56227803a190458fbe
|
refs/heads/master
| 2023-02-10T05:56:03.607556
| 2021-01-05T14:41:09
| 2021-01-05T14:41:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,156
|
py
|
import rx
import rx.operators as ops
import rxsci as rs
def test_split():
source = ["1a", "2a", "3b", "4b", "5c", "6c", "7c", "8d", "9d"]
source = [
rs.OnCreateMux((1 ,None)),
rs.OnNextMux((1, None), '1a'),
rs.OnNextMux((1, None), '2a'),
rs.OnNextMux((1, None), '3b'),
rs.OnNextMux((1, None), '4b'),
rs.OnNextMux((1, None), '5c'),
rs.OnNextMux((1, None), '6c'),
rs.OnNextMux((1, None), '7c'),
rs.OnNextMux((1, None), '8d'),
rs.OnNextMux((1, None), '9d'),
rs.OnCompletedMux((1, None)),
]
actual_result = []
mux_actual_result = []
expected_result = [
["1a", "2a"],
["3b", "4b"],
["5c", "6c", "7c"],
["8d", "9d"],
]
def on_next(i):
actual_result.append(i)
store = rs.state.StoreManager(store_factory=rs.state.MemoryStore)
rx.from_(source).pipe(
rs.cast_as_mux_observable(),
rs.state.with_store(
store,
rs.data.split(lambda i: i[-1], rx.pipe(
ops.do_action(mux_actual_result.append),
)),
),
).subscribe(on_next)
assert type(mux_actual_result[0]) is rs.state.ProbeStateTopology
assert mux_actual_result[1:] == [
rs.OnCreateMux((1, (1, None)), store),
rs.OnNextMux((1, (1, None)), '1a', store),
rs.OnNextMux((1, (1, None)), '2a', store),
rs.OnCompletedMux((1, (1, None)), store),
rs.OnCreateMux((1, (1, None)), store),
rs.OnNextMux((1, (1, None)), '3b', store),
rs.OnNextMux((1, (1, None)), '4b', store),
rs.OnCompletedMux((1, (1, None)), store),
rs.OnCreateMux((1, (1, None)), store),
rs.OnNextMux((1, (1, None)), '5c', store),
rs.OnNextMux((1, (1, None)), '6c', store),
rs.OnNextMux((1, (1, None)), '7c', store),
rs.OnCompletedMux((1, (1, None)), store),
rs.OnCreateMux((1, (1, None)), store),
rs.OnNextMux((1, (1, None)), '8d', store),
rs.OnNextMux((1, (1, None)), '9d', store),
rs.OnCompletedMux((1, (1, None)), store),
]
assert actual_result == source
|
[
"romain.picard@oakbits.com"
] |
romain.picard@oakbits.com
|
cc35a48b6c83a8e7b4678d1337571699e26f2e26
|
8acbd7fcfe1bcf94e4e895e58ac5c81f8ed13741
|
/fees/migrations/0001_initial.py
|
4d2a79465920cf7560126359a04d00bea99675fb
|
[] |
no_license
|
Rajangupta09/School-beta
|
440af5d5d078a46036cfa3c50865f980c5ff1ace
|
3ca6ca9992d2b47bcfe1762beb8c88609d519ea5
|
refs/heads/master
| 2022-12-07T19:42:19.562804
| 2020-08-04T09:53:04
| 2020-08-04T09:53:04
| 284,509,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,634
|
py
|
# Generated by Django 3.0.3 on 2020-05-16 14:24
from django.db import migrations, models
import django.db.models.deletion
import fees.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('classform', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FeeCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('once', models.BooleanField(default=True)),
('submission_type', models.CharField(max_length=50)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='FeeCycle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lastSubmissionDate', models.DateField()),
('firstSubmissionDate', models.DateField()),
('cycle', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Fine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fine', models.IntegerField()),
('category', models.CharField(max_length=100)),
('submissionDate', models.DateField(null=True)),
('description', models.TextField()),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classform.ClassRoomStudent')),
],
),
migrations.CreateModel(
name='FeeDiscount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('discount', models.IntegerField()),
('category', models.CharField(max_length=50)),
('description', models.TextField()),
('total_off', models.IntegerField()),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classform.ClassRoomStudent')),
],
),
migrations.CreateModel(
name='Fee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('regNo', models.IntegerField()),
('payment_method', models.CharField(max_length=50)),
('submissionDate', models.DateField()),
('amount', models.IntegerField()),
('monthsPaid', models.CharField(max_length=50)),
('feeSlip', models.FileField(upload_to=fees.models.user_directory_path)),
('classSection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classform.ClassRoom')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classform.ClassRoomStudent')),
],
),
migrations.CreateModel(
name='ClassSectionFees',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fees', models.IntegerField()),
('classSection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='classform.ClassRoom')),
('feeCategory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='fees.FeeCategory')),
],
),
]
|
[
"rjnkumar05@gmail.com"
] |
rjnkumar05@gmail.com
|
609d8b4f4188c8ac49f0d80a912a0960915ab279
|
ab221e6778959a17a40585defdcf17b2ebf34908
|
/SpellingBeePlay.py
|
fad7da80b2739504abe8ad2716ca673962296735
|
[] |
no_license
|
Mystified131/NYTImesGamesSolutions
|
f1e6a0905120764e9079666e6b59aa4201bf3270
|
349118cd1784888177b7475bcc7bd3b8ea20ba62
|
refs/heads/master
| 2023-03-23T17:02:25.341274
| 2021-03-07T09:02:13
| 2021-03-07T09:02:13
| 287,679,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,519
|
py
|
import enchant
from itertools import permutations
import datetime
import random
from subprocess import call
right_now = datetime.datetime.now().isoformat()
tlist = []
for i in right_now:
if i.isnumeric():
tlist.append(i)
tim = ("".join(tlist))
d = enchant.Dict("en_US")
conlst = ["B", "C", "D", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "V", "W", "X", "Z"]
vowlst = ["A", "E", "I", "O", "U", "Y"]
sublst = []
for x in range(26):
ctr = random.randrange(10)
if ctr < 5:
vch = random.randrange(6)
letch = vowlst[vch]
if ctr > 4:
cch = random.randrange(20)
letch = conlst[cch]
if letch not in sublst:
sublst.append(letch)
oplst = []
for x1 in range(7):
astr = sublst[x1]
oplst.append(astr)
x2 = random.randrange(7)
keyletr = oplst[x2]
biglst = []
for elem in oplst:
for x in range(2):
biglst.append(elem)
print("")
print(oplst)
print("")
print("The key letter is: ", keyletr)
print("")
print("This may take a moment, as a puzzle and results are generated.")
print("")
#print("Here are the solutions, if there are any:")
#print("")
#print("Check for modern parlance. Some may not appear in NY Times word list:")
#print("")
wdlst = []
worm = list(permutations(biglst, 4))
for elem in worm:
astr = ""
for wor in elem:
astr += wor
if d.check(astr) and astr not in wdlst and keyletr in astr:
print("Generating possible answers- ", len(wdlst))
print("")
wdlst.append(astr)
worm = []
worn = list(permutations(biglst, 5))
for elem in worn:
astr = ""
for wor in elem:
astr += wor
if d.check(astr) and astr not in wdlst and keyletr in astr:
print("Generating possible answers- ", len(wdlst))
print("")
wdlst.append(astr)
worn = []
#woro = list(permutations(biglst, 6))
#for elem in woro:
#astr = ""
#for wor in elem:
#astr += wor
#if d.check(astr) and astr not in wdlst and keyletr in astr:
#print("Generating possible answers- ", len(wdlst))
#print("")
#wdlst.append(astr)
#woro = []
#worp = list(permutations(biglst, 7))
#for elem in worp:
#astr = ""
#for wor in elem:
#astr += wor
#if d.check(astr) and astr not in wdlst and keyletr in astr:
#print("Generating possible answers- ", len(wdlst))
#print("")
#wdlst.append(astr)
#worp = []
#print("")
#print(wdlst)
#print("")
titstr = "SpellingBeeCreation." + tim + ".txt"
outfile = open(titstr, "w")
outfile.write ("A Tenable Spelling Bee Creation." + '\n' )
outfile.write ("" + '\n' )
outfile.write ("Time: " + tim + '\n')
outfile.write ("" + '\n' )
for elem in oplst:
outfile.write(elem + ",")
outfile.write('\n')
outfile.write (keyletr + '\n' )
outfile.write ("" + '\n' )
for elem in wdlst:
outfile.write(elem + '\n')
outfile.close()
print("")
print("See the answers document in the same folder as your code.")
print("")
pts = 0
if len(wdlst) == 0:
print("")
print("Trying again!")
print("")
call(["python", "SpellingBeePlay.py"])
for ctr in range(1000):
print("")
print("Here are the letter possibilities: ", oplst)
print("")
print("Here is the key letter, to be in each guess at least once: ", keyletr)
print("")
anstr = input("Please enter a guess, 4 or 5 total letters: ")
if anstr.upper() not in wdlst:
print("")
print("That guess does not match any of our saved words, or is a repeat. Check that only given letters appear, and that the key letter is there.")
print("")
print("We also may miss some words.")
print("")
print("Your current score is: ", pts)
print("")
if anstr.upper() in wdlst:
print("")
print("You got one!")
scr = len(anstr)
pts += scr
print("")
print("You get ", scr, " points added for a total of ", pts, " total points.")
print("")
wdlst.remove(anstr.upper())
qtstr = input("Please press q to quit, or anything else to continue: ")
if qtstr == "q" or len(wdlst == 0):
break
print("")
print("You have indicated q, or the list of words is finished.")
print("")
print("Your final score is: ", pts)
print("")
print("Thanks for playing!")
print("")
## THE GHOST OF THE SHADOW ##
|
[
"mystifiedthomas@gmail.com"
] |
mystifiedthomas@gmail.com
|
779f6b0f802f2a2118bacba5ad0e88316ae714b0
|
d58bc2475a41e7c36e22947565c099908f84cfd6
|
/samples/openapi3/client/petstore/python-experimental/petstore_api/paths/store_inventory/get.py
|
76bb8cb65578587169f50f0290859796eaadb706
|
[
"Apache-2.0"
] |
permissive
|
yaronius/openapi-generator
|
d8390dc2cfd9330d3f05a1f517612d793e332ead
|
9f3fac53c1689b82bf4c99b664e10e4a5decfb8e
|
refs/heads/master
| 2022-11-03T02:27:44.670087
| 2022-08-17T12:17:30
| 2022-08-17T12:17:30
| 175,407,506
| 0
| 0
|
Apache-2.0
| 2023-09-04T20:41:29
| 2019-03-13T11:30:05
|
Java
|
UTF-8
|
Python
| false
| false
| 5,532
|
py
|
# coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import re # noqa: F401
import sys # noqa: F401
import typing
import urllib3
import functools # noqa: F401
from urllib3._collections import HTTPHeaderDict
from petstore_api import api_client, exceptions
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
UUIDSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
UUIDBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
NoneClass,
BoolClass,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
from . import path
_auth = [
'api_key',
]
class SchemaFor200ResponseBodyApplicationJson(
DictSchema
):
_additional_properties = Int32Schema
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
_configuration: typing.Optional[Configuration] = None,
**kwargs: typing.Type[Schema],
) -> 'SchemaFor200ResponseBodyApplicationJson':
return super().__new__(
cls,
*args,
_configuration=_configuration,
**kwargs,
)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: Unset = unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
def _get_inventory(
self: api_client.Api,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
"""
Returns pet inventories by status
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class GetInventory(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
def get_inventory(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._get_inventory(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
def get(
self: BaseApi,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization
]:
return self._get_inventory(
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
|
[
"noreply@github.com"
] |
yaronius.noreply@github.com
|
8b51d93cbec4433a5ff954290538addaaeac3696
|
4bc2af514877135a222826b2c5ac48632045f2fa
|
/django/juziom/userauth/form.py
|
d1f13721d408eb598e6074dd8485a31d79aa2690
|
[] |
no_license
|
18734865664/python
|
1853481ac1dcd515f691cfc11557f76fbbb083de
|
25bc355ddb2abefc5a3736fb99e6345138ebbefc
|
refs/heads/master
| 2020-03-17T09:37:57.469741
| 2018-06-28T08:41:37
| 2018-06-28T08:41:37
| 133,482,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
#coding:utf8
from django import forms
from models import *
class LoginForm(forms.Form):
username = forms.CharField(max_length=255, widget=forms.TextInput(attrs={'class': 'form-control','placeholder':u'用户名'}))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control','placeholder':u'密码'}))
class ChangepwdForm(forms.Form):
oldpassword = forms.CharField(
required=True,
label=u"原密码",
error_messages={'required': u'请输入原密码'},
widget=forms.PasswordInput(
attrs={
'placeholder':u"原密码",
}
),
)
newpassword1 = forms.CharField(
required=True,
label=u"新密码",
error_messages={'required': u'请输入新密码'},
widget=forms.PasswordInput(
attrs={
'placeholder':u"新密码",
}
),
)
newpassword2 = forms.CharField(
required=True,
label=u"确认密码",
error_messages={'required': u'请再次输入新密码'},
widget=forms.PasswordInput(
attrs={
'placeholder':u"确认密码",
}
),
)
def clean(self):
if not self.is_valid():
raise forms.ValidationError(u"所有项都为必填项")
elif self.cleaned_data['newpassword1'] <> self.cleaned_data['newpassword2']:
raise forms.ValidationError(u"两次输入的新密码不一样")
else:
cleaned_data = super(ChangepwdForm, self).clean()
return cleaned_data
|
[
"you@example.com"
] |
you@example.com
|
cb7986e98a23496bf14138ac78df7fe003dfd9e5
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/models/deployment_operation_properties_py3.py
|
5f38d5397daca470b2e2a824ae06d5fcf2d20902
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,927
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DeploymentOperationProperties(Model):
"""Deployment operation properties.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar provisioning_state: The state of the provisioning.
:vartype provisioning_state: str
:ivar timestamp: The date and time of the operation.
:vartype timestamp: datetime
:ivar service_request_id: Deployment operation service request id.
:vartype service_request_id: str
:ivar status_code: Operation status code.
:vartype status_code: str
:ivar status_message: Operation status message.
:vartype status_message: object
:ivar target_resource: The target resource.
:vartype target_resource:
~azure.mgmt.resource.resources.v2016_09_01.models.TargetResource
:ivar request: The HTTP request message.
:vartype request:
~azure.mgmt.resource.resources.v2016_09_01.models.HttpMessage
:ivar response: The HTTP response message.
:vartype response:
~azure.mgmt.resource.resources.v2016_09_01.models.HttpMessage
"""
_validation = {
'provisioning_state': {'readonly': True},
'timestamp': {'readonly': True},
'service_request_id': {'readonly': True},
'status_code': {'readonly': True},
'status_message': {'readonly': True},
'target_resource': {'readonly': True},
'request': {'readonly': True},
'response': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'service_request_id': {'key': 'serviceRequestId', 'type': 'str'},
'status_code': {'key': 'statusCode', 'type': 'str'},
'status_message': {'key': 'statusMessage', 'type': 'object'},
'target_resource': {'key': 'targetResource', 'type': 'TargetResource'},
'request': {'key': 'request', 'type': 'HttpMessage'},
'response': {'key': 'response', 'type': 'HttpMessage'},
}
def __init__(self, **kwargs) -> None:
super(DeploymentOperationProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.timestamp = None
self.service_request_id = None
self.status_code = None
self.status_message = None
self.target_resource = None
self.request = None
self.response = None
|
[
"noreply@github.com"
] |
xiafu-msft.noreply@github.com
|
53a93667c441c6cbc84bc7b42295359c9203f3ab
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_Logit/trend_PolyTrend/cycle_7/ar_12/test_artificial_1024_Logit_PolyTrend_7_12_20.py
|
356fc68289ce4b41a07aca9517fc5a4f10b4de0d
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 267
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Logit", sigma = 0.0, exog_count = 20, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
c78f25abc910c9fb03d1dc449d75d91bae270bc1
|
0fb0dba210ff0f63515c464d7acc95ae32d7603c
|
/File Operations/Hide a File or Folder/hide-a-file-or-folder.py
|
4bfe8e23f58b91670e5d641e1f6a6d46d39681d6
|
[] |
no_license
|
slad99/pythonscripts
|
7cbe6b8bb27c8c06e140c46e7c8cf286cbc56d8e
|
4e0ebb023899a602cb041ef6f153fd3b7ab032e9
|
refs/heads/master
| 2022-01-04T21:49:10.486758
| 2019-06-28T14:29:28
| 2019-06-28T14:29:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 993
|
py
|
PATH =r'C:\Users\ac\Desktop\AdobeAIRInstaller.exe' # give the path of the file or folder to hidden
import ctypes
class disable_file_system_redirection:
_disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
_revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self._disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self._revert(self.old_value)
import os
from subprocess import Popen, PIPE
if os.path.exists(PATH):
with disable_file_system_redirection():
OBJ = Popen('attrib +H '+PATH, stdin = PIPE, stdout = PIPE, stderr = PIPE, shell=True)
RES = OBJ.communicate()
RET = OBJ.returncode
if RET == 0:
print PATH+' is hidden successfully'
else:
print RES[1]
else:
print '1: Sorry! Given path is not available.'
|
[
"noreply@github.com"
] |
slad99.noreply@github.com
|
48db028ef8f7d1a4e3cb06f35a3d87d695fad4f9
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipaySecurityProdFacePayResponse.py
|
1fe1ab1693136b166a60fe8ae2da572173c1432e
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 652
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipaySecurityProdFacePayResponse(AlipayResponse):
def __init__(self):
super(AlipaySecurityProdFacePayResponse, self).__init__()
self._aa = None
@property
def aa(self):
return self._aa
@aa.setter
def aa(self, value):
self._aa = value
def parse_response_content(self, response_content):
response = super(AlipaySecurityProdFacePayResponse, self).parse_response_content(response_content)
if 'aa' in response:
self.aa = response['aa']
|
[
"jiandong.jd@antfin.com"
] |
jiandong.jd@antfin.com
|
d15d0cc4b11412348704da95bb09c8c43cc6c08d
|
2ed86a79d0fcd299ad4a01310954c5eddcf01edf
|
/tests/components/devolo_home_network/test_init.py
|
99b6053e1bac778817fc9ab95799930fd4813530
|
[
"Apache-2.0"
] |
permissive
|
konnected-io/home-assistant
|
037f12c87bb79e19220192eb918e49db1b1a8b3e
|
2e65b77b2b5c17919939481f327963abdfdc53f0
|
refs/heads/dev
| 2023-05-11T08:57:41.891518
| 2023-05-07T20:03:37
| 2023-05-07T20:03:37
| 109,931,626
| 24
| 10
|
Apache-2.0
| 2023-02-22T06:24:01
| 2017-11-08T05:27:21
|
Python
|
UTF-8
|
Python
| false
| false
| 4,125
|
py
|
"""Test the devolo Home Network integration setup."""
from unittest.mock import patch
from devolo_plc_api.exceptions.device import DeviceNotFound
import pytest
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.button import DOMAIN as BUTTON
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.devolo_home_network.const import DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import CONF_IP_ADDRESS, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import async_get_platforms
from . import configure_integration
from .const import IP
from .mock import MockDevice
from tests.common import MockConfigEntry
@pytest.mark.usefixtures("mock_device")
async def test_setup_entry(hass: HomeAssistant) -> None:
"""Test setup entry."""
entry = configure_integration(hass)
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
), patch("homeassistant.core.EventBus.async_listen_once"):
assert await hass.config_entries.async_setup(entry.entry_id)
assert entry.state is ConfigEntryState.LOADED
@pytest.mark.usefixtures("mock_device")
async def test_setup_without_password(hass: HomeAssistant) -> None:
"""Test setup entry without a device password set like used before HA Core 2022.06."""
config = {
CONF_IP_ADDRESS: IP,
}
entry = MockConfigEntry(domain=DOMAIN, data=config)
entry.add_to_hass(hass)
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
), patch("homeassistant.core.EventBus.async_listen_once"):
assert await hass.config_entries.async_setup(entry.entry_id)
assert entry.state is ConfigEntryState.LOADED
async def test_setup_device_not_found(hass: HomeAssistant) -> None:
"""Test setup entry."""
entry = configure_integration(hass)
with patch(
"homeassistant.components.devolo_home_network.Device.async_connect",
side_effect=DeviceNotFound(IP),
):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state is ConfigEntryState.SETUP_RETRY
@pytest.mark.usefixtures("mock_device")
async def test_unload_entry(hass: HomeAssistant) -> None:
"""Test unload entry."""
entry = configure_integration(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
await hass.config_entries.async_unload(entry.entry_id)
assert entry.state is ConfigEntryState.NOT_LOADED
async def test_hass_stop(hass: HomeAssistant, mock_device: MockDevice) -> None:
"""Test homeassistant stop event."""
entry = configure_integration(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
mock_device.async_disconnect.assert_called_once()
@pytest.mark.parametrize(
("device", "expected_platforms"),
[
["mock_device", (BINARY_SENSOR, BUTTON, DEVICE_TRACKER, SENSOR, SWITCH)],
["mock_repeater_device", (BUTTON, DEVICE_TRACKER, SENSOR, SWITCH)],
["mock_nonwifi_device", (BINARY_SENSOR, BUTTON, SENSOR, SWITCH)],
],
)
async def test_platforms(
hass: HomeAssistant,
device: str,
expected_platforms: set[str],
request: pytest.FixtureRequest,
) -> None:
"""Test platform assembly."""
request.getfixturevalue(device)
entry = configure_integration(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
platforms = [platform.domain for platform in async_get_platforms(hass, DOMAIN)]
assert len(platforms) == len(expected_platforms)
assert all(platform in platforms for platform in expected_platforms)
|
[
"noreply@github.com"
] |
konnected-io.noreply@github.com
|
a0974ac4da7ff77096e13917b61406bc2ef64c90
|
2dd26e031162e75f37ecb1f7dd7f675eeb634c63
|
/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py
|
9fe64c1105b8b5680b9561071435c59c048f0156
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NeMo
|
1b001fa2ae5d14defbfd02f3fe750c5a09e89dd1
|
c20a16ea8aa2a9d8e31a98eb22178ddb9d5935e7
|
refs/heads/main
| 2023-08-21T15:28:04.447838
| 2023-08-21T00:49:36
| 2023-08-21T00:49:36
| 200,722,670
| 7,957
| 1,986
|
Apache-2.0
| 2023-09-14T18:49:54
| 2019-08-05T20:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 6,403
|
py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to after google_data_preprocessing_before_alignment.py
to obtain separate "parallel" corpora for each semiotic class.
USAGE Example:
1. Download the Google TN dataset from https://www.kaggle.com/google-nlu/text-normalization
2. Unzip the English subset (e.g., by running `tar zxvf en_with_types.tgz`).
Then there will a folder named `en_with_types`.
3. Run python google_data_preprocessing_before_alignment.py
which will produce a file data.tsv in its --output-dir
4. [Optional]. sort -u and rewrite data.tsv
5. Clone https://github.com/moses-smt/giza-pp.git, run "make" from its root folder.
6. Run this script
python ${NEMO}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py \
--data_dir=<--output-dir from the previous step> \
--out_dir=<destination directory for giza alignment folders> \
--giza_dir=/.../giza-pp/GIZA++-v2 \
--mckls_binary=/.../giza-pp/mkcls-v2/mkcls \
--lang={en,ru}
Each corpus will be stored within <--data-dir> in the subdirectory with the name of the semiotic class,
containing files ready to be fed to Giza++:
src - written form, tokenized as characters
dst - spoken form, tokenized as words
run.sh - script for running Giza++
"""
from argparse import ArgumentParser
from collections import Counter
from os import listdir, mkdir
from os.path import isdir, join
from shutil import rmtree
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import get_src_and_dst_for_alignment
parser = ArgumentParser(description='Split corpus to subcorpora for giza alignment')
parser.add_argument('--data_dir', type=str, required=True, help='Path to folder with data')
parser.add_argument('--out_dir', type=str, required=True, help='Path to output folder')
parser.add_argument('--giza_dir', type=str, required=True, help='Path to folder with GIZA++ binaries')
parser.add_argument('--mckls_binary', type=str, required=True, help='Path to mckls binary')
parser.add_argument('--lang', type=str, required=True, help='Language')
args = parser.parse_args()
def prepare_subcorpora_from_data() -> None:
"""Preprocess a corpus in Google TN Dataset format, extract TN-ITN phrase pairs, prepare input for GIZA++ alignment.
"""
semiotic_vcb = Counter()
cache_vcb = {}
filenames = []
for fn in listdir(args.data_dir + "/train"):
filenames.append(args.data_dir + "/train/" + fn)
for fn in listdir(args.data_dir + "/dev"):
filenames.append(args.data_dir + "/dev/" + fn)
for fn in filenames:
with open(fn, "r", encoding="utf-8") as f:
# Loop through each line of the file
for line in f:
parts = line.strip().split("\t")
if len(parts) < 3:
continue
if len(parts) != 3:
raise ValueError("Expect 3 parts, got " + str(len(parts)))
semiotic_class, written, spoken = parts[0], parts[1].strip(), parts[2].strip()
if spoken == "<self>":
continue
semiotic_class = semiotic_class.casefold()
semiotic_vcb[semiotic_class] += 1
classdir = join(args.out_dir, semiotic_class)
if not isdir(classdir):
mkdir(classdir)
src, dst, _, _ = get_src_and_dst_for_alignment(semiotic_class, written, spoken, args.lang)
if src == "" or dst == "":
continue
if len(src.split(" ")) >= 100:
continue
if semiotic_class not in cache_vcb:
cache_vcb[semiotic_class] = Counter()
cache_vcb[semiotic_class][(src, dst)] += 1
for sem in semiotic_vcb:
classdir = join(args.out_dir, sem)
if not isdir(classdir):
raise ValueError("No such directory: " + classdir)
print(classdir, " has ", semiotic_vcb[sem], " instances")
with open(join(classdir, "run.sh"), "w") as out:
out.write("GIZA_PATH=\"" + args.giza_dir + "\"\n")
out.write("MKCLS=\"" + args.mckls_binary + "\"\n")
out.write("\n")
out.write("${GIZA_PATH}/plain2snt.out src dst\n")
out.write("${MKCLS} -m2 -psrc -c15 -Vsrc.classes opt >& mkcls1.log\n")
out.write("${MKCLS} -m2 -pdst -c15 -Vdst.classes opt >& mkcls2.log\n")
out.write("${GIZA_PATH}/snt2cooc.out src.vcb dst.vcb src_dst.snt > src_dst.cooc\n")
out.write(
"${GIZA_PATH}/GIZA++ -S src.vcb -T dst.vcb -C src_dst.snt -coocurrencefile src_dst.cooc -p0 0.98 -o GIZA++ >& GIZA++.log\n"
)
out.write("##reverse direction\n")
out.write("${GIZA_PATH}/snt2cooc.out dst.vcb src.vcb dst_src.snt > dst_src.cooc\n")
out.write(
"${GIZA_PATH}/GIZA++ -S dst.vcb -T src.vcb -C dst_src.snt -coocurrencefile dst_src.cooc -p0 0.98 -o GIZA++reverse >& GIZA++reverse.log\n"
)
out_src = open(join(classdir, "src"), 'w', encoding="utf-8")
out_dst = open(join(classdir, "dst"), 'w', encoding="utf-8")
out_freq = open(join(classdir, "freq"), 'w', encoding="utf-8")
for src, dst in cache_vcb[sem]:
freq = cache_vcb[sem][(src, dst)]
out_src.write(src + "\n")
out_dst.write(dst + "\n")
out_freq.write(str(freq) + "\n")
out_freq.close()
out_dst.close()
out_src.close()
# Main code
if __name__ == '__main__':
for name in listdir(args.out_dir):
path = join(args.out_dir, name)
if isdir(path):
rmtree(path)
# Processing
prepare_subcorpora_from_data()
|
[
"noreply@github.com"
] |
NVIDIA.noreply@github.com
|
caf65928426dfc47a7b74d4a1e41cd9f7f7b4eb7
|
844bec60f1a769b2a5e68e4beeb6531edc3ce4e3
|
/my_instagram/member/tests.py
|
d76a21e9183a029a7c58801d925daa20cceb0141
|
[] |
no_license
|
shoark7/my-instagram
|
04796a8c263009ef9128ec788ce8991c417fa918
|
42d1b43645c142c9d7f8c6df31865877ea1873a8
|
refs/heads/master
| 2020-12-24T07:53:07.248369
| 2016-11-15T06:36:44
| 2016-11-15T06:36:44
| 73,359,919
| 0
| 0
| null | 2016-11-13T09:05:29
| 2016-11-10T08:05:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
from django.test import TestCase, LiveServerTestCase
from .models import MyUser
# Create your tests here.
class FollowTest(LiveServerTestCase):
def create_user(self, username, last_name, first_name):
return MyUser.objects.create_user(
username=username,
last_name=last_name,
first_name=first_name,
)
def test_create_user(self):
print("test create user")
u1 = self.create_user('u1', '방','민아')
u2 = self.create_user('u2', 'dl','ㅇ하녕')
u3 = self.create_user('u3', '박','성환')
def test_follow_user(self):
print("test create user")
u1 = self.create_user('u1', '방', '민아')
u2 = self.create_user('u2', 'dl', 'ㅇ하녕')
u3 = self.create_user('u3', '박', '성환')
u2.follow(u1)
u3.follow(u2)
u3.follow(u1)
print(u2.follow_users.all())
print(u3.follow_users.all())
print()
print(u1.followers.all())
|
[
"shoark7@gmail.com"
] |
shoark7@gmail.com
|
8eaf470b58b0830ec791b74d821eb954a7fd1a02
|
dbfdbe3c1d5e3ad38625d8c971fe8dd45c8c3885
|
/device_agent/snmp/libs/pysnmp-4.4.5/examples/smi/agent/custom-managed-object.py
|
0b175193bfa76c391ed6cdfa364259d6af77d5df
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
fyfdoc/IntegrateTest
|
a58f6d0ea7cff5f67d79d7e042c0bb39c6b8bbbb
|
0d8374406c10c313d6627699879215841e0ebdb6
|
refs/heads/master
| 2022-12-03T02:32:37.388556
| 2019-01-25T02:36:42
| 2019-01-25T02:36:42
| 167,468,256
| 0
| 1
| null | 2022-11-29T20:58:41
| 2019-01-25T01:59:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,604
|
py
|
"""
Implementing MIB objects
++++++++++++++++++++++++
This script explains how SNMP Agent application could model
real-world data as Managed Objects defined in MIB.
"""#
from pysnmp.smi import builder
# MIB Builder is normally pre-created by SNMP engine
mibBuilder = builder.MibBuilder()
#
# This may be done in a stand-alone file and then loaded up
# by SNMP Agent
#
# A base class for a custom Managed Object
MibScalarInstance, = mibBuilder.importSymbols(
'SNMPv2-SMI', 'MibScalarInstance'
)
# Managed object specification
sysLocation, = mibBuilder.importSymbols('SNMPv2-MIB', 'sysLocation')
# Custom Managed Object
class MySysLocationInstance(MibScalarInstance):
# noinspection PyUnusedLocal
def readGet(self, name, *args):
# Just return a custom value
return name, self.syntax.clone('The Leaky Cauldron')
sysLocationInstance = MySysLocationInstance(
sysLocation.name, (0,), sysLocation.syntax
)
# Register Managed Object with a MIB tree
mibBuilder.exportSymbols(
# '__' prefixed MIB modules take precedence on indexing
'__MY-LOCATION-MIB', sysLocationInstance=sysLocationInstance
)
if __name__ == '__main__':
#
# This is what is done internally by Agent.
#
from pysnmp.smi import instrum, exval
mibInstrum = instrum.MibInstrumController(mibBuilder)
print('Remote manager read access to MIB instrumentation (table walk)')
oid, val = (), None
while 1:
oid, val = mibInstrum.readNextVars(((oid, val),))[0]
if exval.endOfMib.isSameTypeWith(val):
break
print(oid, val.prettyPrint())
|
[
"fengyanfeng@datangmobile.cn"
] |
fengyanfeng@datangmobile.cn
|
95206566ebb073185d5e386f1509f946bc331050
|
e953c138d3808d92fcc9848824985be5bc42f034
|
/python/multiprocessing/lock.py
|
bb10bc3b3357cb248b6dc23f5ba3a856a0415d2b
|
[] |
no_license
|
hotoku/samples
|
1cf3f7006ae8ba9bae3a52113cdce6d1e1d32c5a
|
ce0d95d87e08386d9eb83d7983bd2eaff0682793
|
refs/heads/main
| 2023-08-09T09:05:15.185012
| 2023-08-04T09:29:06
| 2023-08-04T09:29:06
| 222,609,036
| 0
| 0
| null | 2022-03-30T01:44:03
| 2019-11-19T04:35:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 536
|
py
|
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
import time
import multiprocessing
def do_f(x, y):
print(f"{datetime.now()}: do_f x={x}, y={y}")
time.sleep(3)
return x + y
def f(x, y, lock):
print(f"{datetime.now()}: f x={x}, y={y}")
with lock:
return do_f(x, y)
m = multiprocessing.Manager()
lock = m.Lock()
with ProcessPoolExecutor(max_workers=4) as ex:
fs = [
ex.submit(f, i, 1, lock) for i in range(3)
]
res = [f.result() for f in fs]
print(res)
|
[
"hotoku@users.noreply.github.com"
] |
hotoku@users.noreply.github.com
|
5bfb68f5064b121eb578adf71745af4cd3d8ac5e
|
6c9ed1bbf924591b9044ddcba03dee701ba39b2b
|
/recordwhat/records/seq.py
|
c1c9bf568716b234c6400657038caeed66d2edde
|
[
"BSD-3-Clause"
] |
permissive
|
klauer/recordwhat
|
986b1a8cd0d57902f25fb09573e96823bffd333e
|
2921a9852361cc682ec8441bb5f4cc54b6af6e80
|
refs/heads/master
| 2022-02-03T15:36:27.648497
| 2022-01-25T22:56:58
| 2022-01-25T22:56:58
| 51,451,764
| 2
| 4
|
BSD-3-Clause
| 2022-01-25T22:56:59
| 2016-02-10T15:56:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,605
|
py
|
from ophyd import (EpicsSignal, EpicsSignalRO)
from .. import (RecordBase, _register_record_type,
FieldComponent as Cpt)
@_register_record_type('seq')
class SeqRecord(RecordBase):
alarm_status = Cpt(EpicsSignalRO, '.STAT')
constant_input_1 = Cpt(EpicsSignal, '.DO1')
constant_input_10 = Cpt(EpicsSignal, '.DOA')
constant_input_2 = Cpt(EpicsSignal, '.DO2')
constant_input_3 = Cpt(EpicsSignal, '.DO3')
constant_input_4 = Cpt(EpicsSignal, '.DO4')
constant_input_5 = Cpt(EpicsSignal, '.DO5')
constant_input_6 = Cpt(EpicsSignal, '.DO6')
constant_input_7 = Cpt(EpicsSignal, '.DO7')
constant_input_8 = Cpt(EpicsSignal, '.DO8')
constant_input_9 = Cpt(EpicsSignal, '.DO9')
link_selection = Cpt(EpicsSignal, '.SELN')
# - display
display_precision = Cpt(EpicsSignal, '.PREC')
# - inputs
link_selection_loc = Cpt(EpicsSignal, '.SELL$', string=True)
select_mechanism = Cpt(EpicsSignal, '.SELM')
# - seq1
delay_1 = Cpt(EpicsSignal, '.DLY1')
delay_2 = Cpt(EpicsSignal, '.DLY2')
delay_3 = Cpt(EpicsSignal, '.DLY3')
input_link_2 = Cpt(EpicsSignal, '.DOL2$', string=True)
input_link_3 = Cpt(EpicsSignal, '.DOL3$', string=True)
input_link1 = Cpt(EpicsSignal, '.DOL1$', string=True)
output_link_1 = Cpt(EpicsSignal, '.LNK1$', string=True)
output_link_2 = Cpt(EpicsSignal, '.LNK2$', string=True)
output_link_3 = Cpt(EpicsSignal, '.LNK3$', string=True)
# - seq2
delay_4 = Cpt(EpicsSignal, '.DLY4')
delay_5 = Cpt(EpicsSignal, '.DLY5')
delay_6 = Cpt(EpicsSignal, '.DLY6')
input_link_4 = Cpt(EpicsSignal, '.DOL4$', string=True)
input_link_5 = Cpt(EpicsSignal, '.DOL5$', string=True)
input_link_6 = Cpt(EpicsSignal, '.DOL6$', string=True)
output_link_4 = Cpt(EpicsSignal, '.LNK4$', string=True)
output_link_5 = Cpt(EpicsSignal, '.LNK5$', string=True)
output_link_6 = Cpt(EpicsSignal, '.LNK6$', string=True)
# - seq3
delay_10 = Cpt(EpicsSignal, '.DLYA')
delay_7 = Cpt(EpicsSignal, '.DLY7')
delay_8 = Cpt(EpicsSignal, '.DLY8')
delay_9 = Cpt(EpicsSignal, '.DLY9')
input_link_10 = Cpt(EpicsSignal, '.DOLA$', string=True)
input_link_7 = Cpt(EpicsSignal, '.DOL7$', string=True)
input_link_8 = Cpt(EpicsSignal, '.DOL8$', string=True)
input_link_9 = Cpt(EpicsSignal, '.DOL9$', string=True)
output_link_10 = Cpt(EpicsSignal, '.LNKA$', string=True)
output_link_7 = Cpt(EpicsSignal, '.LNK7$', string=True)
output_link_8 = Cpt(EpicsSignal, '.LNK8$', string=True)
output_link_9 = Cpt(EpicsSignal, '.LNK9$', string=True)
|
[
"klauer@bnl.gov"
] |
klauer@bnl.gov
|
c9e6afdec8ce261e8a2f43fc4b54c4dbdb5e3542
|
0547c3ebab814e3fdf2616ae63f8f6c87a0ff6c5
|
/374.guess-number-higher-or-lower.py
|
700808b1ba1e1ec628739b2454d3092046d932a9
|
[] |
no_license
|
livepo/lc
|
b8792d2b999780af5d5ef3b6050d71170a272ca6
|
605d19be15ece90aaf09b994098716f3dd84eb6a
|
refs/heads/master
| 2020-05-15T03:57:15.367240
| 2019-07-30T03:11:46
| 2019-07-30T03:11:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
# def guess(num):
class Solution(object):
def guessNumber(self, n):
"""
:type n: int
:rtype: int
"""
|
[
"qgmfky@gmail.com"
] |
qgmfky@gmail.com
|
fe57d823e3f211eb951f7d95c44156eae836df34
|
34ffb243303300ccb233dc8394c78d6cb1212127
|
/registration/tests.py
|
a785686bfbf3b73736e23c595b6775a5b2005a25
|
[] |
no_license
|
Larrygf02/webplayground
|
139955880e9e1a935cf7fbc89df9f1ebf45009f0
|
09579f3705e74ddd3380d905f5e0b8df0f93032a
|
refs/heads/master
| 2020-04-28T01:01:36.451838
| 2019-03-10T14:59:48
| 2019-03-10T14:59:48
| 174,838,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
from django.test import TestCase
from .models import Profile
from django.contrib.auth.models import User
# Create your tests here.
class ProfileTestCase(TestCase):
def setUp(self):
User.objects.create_user('test','test@test.com', 'test1234')
def test_profile_exists(self):
exists = Profile.objects.filter(user__username='test').exists()
self.assertEquals(exists, True)
|
[
"raulgf_02@hotmail.com"
] |
raulgf_02@hotmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.