blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
673b277e8c1eb534425768123f79ddd2505204f1
|
925f2935b34042abc9161795413031ae68f45b9a
|
/multimodel_inference/fold_IMisc2sm.py
|
291a17afa018e415d33f158ff6cce511b0ce18fa
|
[] |
no_license
|
Farhad63/AFS-analysis-with-moments
|
7e1d17f47c06ed97ebb7c9ec8245fe52a88622c3
|
7874b1085073e5f62d910ef2d79a22b29ff3be84
|
refs/heads/master
| 2022-04-09T22:11:12.341235
| 2020-03-11T21:15:42
| 2020-03-11T21:15:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,074
|
py
|
#!/usr/bin/env python
# split into two diff sizes, then secondary contact with instantaneous size change followed by growth, with symmetric migration
# "genomic islands" of two different migration regimes
# n(para): 12
import matplotlib
matplotlib.use('PDF')
import moments
import random
import pylab
import matplotlib.pyplot as plt
import numpy as np
from numpy import array
from moments import Misc,Spectrum,Numerics,Manips,Integration,Demographics1D,Demographics2D
import sys
infile=sys.argv[1]
pop_ids=[sys.argv[2],sys.argv[3]]
projections=[int(sys.argv[4]),int(sys.argv[5])]
#params=[float(sys.argv[6]),float(sys.argv[7]),float(sys.argv[8]),float(sys.argv[9]),float(sys.argv[10]),float(sys.argv[11]),float(sys.argv[12]),float(sys.argv[13])]
params=array([1,1,1,1,1,1,1,1,1,1,0.5])
# mutation rate per sequenced portion of genome per generation: for A.millepora, 0.02
mu=float(sys.argv[6])
# generation time, in thousand years: 0.005 (5 years)
gtime=float(sys.argv[7])
#infile="5kA3_dadi.data"
#pop_ids=["W","K"]
#projections=[32,38]
dd = Misc.make_data_dict(infile)
data = Spectrum.from_data_dict(dd, pop_ids,projections,polarized=False)
ns=data.sample_sizes
np.set_printoptions(precision=3)
#-------------------
# split with growth and asymmetrical migration; with genomic islands
def IM2iSC(params, ns):
"""
Isolation-with-migration model with split into two arbtrary sizes
p_misid: proportion of misidentified ancestral states
"""
nua,nub,nu1_0,nu2_0,nu1,nu2,T,T0,m,mi,P = params
nu1_func = lambda t: nu1_0 * (nu1/nu1_0)**(t/T)
nu2_func = lambda t: nu2_0 * (nu2/nu2_0)**(t/T)
nu_func = lambda t: [nu1_func(t), nu2_func(t)]
sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fs = moments.Spectrum(sts)
fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1])
fs.integrate([nua, nub], T0, m = np.array([[0, 0], [0, 0]]))
fs.integrate(nu_func, T, dt_fac=0.01, m=np.array([[0, m], [m, 0]]))
stsi = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fsi = moments.Spectrum(stsi)
fsi = moments.Manips.split_1D_to_2D(fsi, ns[0], ns[1])
fsi.integrate([nua, nub], T0, m = np.array([[0, 0], [0, 0]]))
fsi.integrate(nu_func, T, dt_fac=0.01, m=np.array([[0, mi], [mi, 0]]))
fs2=P*fsi+(1-P)*fs
return fs2
func=IM2iSC
upper_bound = [100,100,100,100,100, 100, 10,10, 200,200,0.9999]
lower_bound = [1e-3,1e-3,1e-3,1e-3,1e-3,1e-3, 1e-3,1e-3,1e-5,1e-5,1e-4]
params = moments.Misc.perturb_params(params, fold=2, upper_bound=upper_bound,
lower_bound=lower_bound)
# fitting (poptg = optimal parameters):
poptg = moments.Inference.optimize_log(params, data, func,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=False, maxiter=30)
# extracting model predictions, likelihood and theta
model = func(poptg, ns)
ll_model = moments.Inference.ll_multinom(model, data)
theta = moments.Inference.optimal_sfs_scaling(model, data)
# random index for this replicate
ind=str(random.randint(0,999999))
# plotting demographic model
plot_mod = moments.ModelPlot.generate_model(func, poptg, ns)
moments.ModelPlot.plot_model(plot_mod, save_file="IMisc2sm_"+ind+"_"+sys.argv[1]+".png",pop_labels=pop_ids, nref=theta/(4*mu), draw_scale=False, gen_time=gtime, gen_time_units="KY", reverse_timeline=True)
# bootstrapping for SDs of params and theta
all_boot=moments.Misc.bootstrap(dd,pop_ids,projections)
uncert=moments.Godambe.GIM_uncert(func,all_boot,poptg,data)
# printing parameters and their SDs
print "RESULT","IMisc2sm",ind,len(params),ll_model,sys.argv[1],sys.argv[2],sys.argv[3],poptg,theta,uncert
# plotting quad-panel figure witt AFS, model, residuals:
moments.Plotting.plot_2d_comp_multinom(model, data, vmin=1, resid_range=3,
pop_ids =pop_ids)
plt.savefig("IMisc2sm_"+ind+"_"+sys.argv[1]+"_"+sys.argv[2]+"_"+sys.argv[3]+"_"+sys.argv[4]+"_"+sys.argv[5]+'.pdf')
|
[
"matz@utexas.edu"
] |
matz@utexas.edu
|
494b730cf575803022fd0cfcf7c6a4255f7c81f4
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5706278382862336_1/Python/urupica/a.py
|
bacd3f027517e16fe04dada1b43ada1bbcbbb703
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,570
|
py
|
#!/usr/bin/python
import sys, datetime
# https://code.google.com/p/gmpy/
from gmpy import sqrt
from fractions import Fraction as frac
def solve(pq):
start = (frac(pq[0], pq[1]), 0)
n = 0
y = start[0].denominator
while y % 2 == 0:
n += 1
y /= 2
if y != 1:
return 'impossible'
q = [start]
v = set([start])
while q:
t,g = q.pop()
if g >= 40:
return 'impossible'
anc = set()
x = t.numerator
y = t.denominator
n = 0
while y % 2 == 0:
n += 1
y /= 2
i = 0
n2 = n/2
xx = 2*x
while i <= n2:
b = 2**i
d = 2**(n-i)
a = 0
ad = a*d
while ad <= xx:
if (xx-ad) % b == 0:
c = (xx-ad)/b
t1 = frac(a,b)
t2 = frac(c,d)
if t1 == 1 or t2 == 1:
return g+1
anc.add((t1, g+1))
anc.add((t2, g+1))
a += 1
ad = a*d
i += 1
for u in anc:
if u not in v:
v.add(u)
q.insert(0,u)
def main():
if len(sys.argv) < 2:
print 'Please provide input file'
print 'Usage: %s inputfile [outputfile]' % sys.argv[0]
return
timestart = datetime.datetime.now()
try:
inputFile = open(sys.argv[1])
except:
print 'Failed to read input file %s' % sys.argv[1]
return
try:
outputFile = open(sys.argv[2], 'w') if len(sys.argv) >= 3 else None
except:
print 'Failed to create output file %s' % sys.argv[2]
return
testCases = int(inputFile.readline().strip())
print '-----------------'
print 'Test cases: %d ' % testCases
print 'No output file' if len(sys.argv) < 3 else 'Writing to %s' % sys.argv[2]
print '-----------------'
for testCaseNumber in range(1, testCases+1):
pq = map(int, inputFile.readline().strip().split('/'))
string = 'Case #%d: %s' % (testCaseNumber, solve(pq))
print string
if outputFile:
outputFile.write(string + '\n')
print '-----------------'
print 'Written to %s' % sys.argv[2] if outputFile else 'No output file'
print 'Elapsed time: %s' % (datetime.datetime.now() - timestart)
print '-----------------'
inputFile.close()
if outputFile:
outputFile.close()
if __name__=='__main__':
main()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
49f198411001677c7891235ff88a61cf81bd18d4
|
d921253b98a922975709693c411218746af2f017
|
/bgx/families/smart_stuff/setup.py
|
ca54e6e51fa7a6642a979686b71f4d14c11621d2
|
[
"Zlib",
"MIT",
"Apache-2.0"
] |
permissive
|
bestonly125/DGT-Kawartha
|
223f88e224c1464fa22a4512e4567ac7ce1bc78f
|
edfbc18f2c70e813805ec23c28fbc35bf7866ffc
|
refs/heads/master
| 2022-11-22T13:18:21.204906
| 2020-07-24T09:03:57
| 2020-07-24T09:03:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,699
|
py
|
# Copyright 2020 NTRLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from __future__ import print_function
import os
import subprocess
from setuptools import setup, find_packages
data_files = []
if os.path.exists("/etc/default"):
data_files.append(
('/etc/default', ['packaging/systemd/sawtooth-bgt-tp-python']))
if os.path.exists("/lib/systemd/system"):
data_files.append(
('/lib/systemd/system',
['packaging/systemd/sawtooth-bgt-tp-python.service']))
setup(
name='dgt-stuff',
version=subprocess.check_output(
['../../../bin/get_version']).decode('utf-8').strip(),
description='DGT stuff Python ',
author='NTRLab',
url='https://github.com/hyperledger/sawtooth-core',
packages=find_packages(),
install_requires=[
"cbor",
"colorlog",
"sawtooth-sdk",
"sawtooth-signing",
"secp256k1"
],
data_files=data_files,
entry_points={
'console_scripts': [
'stuff = dgt_stuff.client_cli.bgt_cli:main_wrapper',
'stuff-tp = dgt_stuff.processor.main:main'
]
})
|
[
"sparsov@sinergo.ru"
] |
sparsov@sinergo.ru
|
357363e824323766d3fcb3eb32e7192e1d62999e
|
a65de6ebf74a0ea70fdb4d85a28cc11e641554e0
|
/py-scripts/det_keychain_type2.py
|
929368c14258f7c70ec8f74e612c98827e8e58aa
|
[
"MIT"
] |
permissive
|
VB6Hobbyst7/bbt
|
e4f63cdd0bb9ef9550edc40b0ee1def7148fe91a
|
7b35e4fe9041af9b73bcfb192aeeeb873d730c72
|
refs/heads/master
| 2023-01-22T03:44:04.506182
| 2020-11-23T23:12:56
| 2020-11-23T23:12:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,640
|
py
|
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
""" Deterministic Key Sequence (Type-1)"""
import secrets
from hashlib import sha256 as hf
from btclib.curve import mult
from btclib.curve import secp256k1 as ec
from btclib.utils import int_from_bits
# master prvkey in [1, n-1]
mprvkey = 1 + secrets.randbelow(ec.n - 1)
print(f"\nmaster prvkey: {hex(mprvkey).upper()}")
# Master Pubkey:
mpubkey = mult(mprvkey, ec.G)
print(f"Master Pubkey: {hex(mpubkey[0]).upper()}")
print(f" {hex(mpubkey[1]).upper()}")
r = secrets.randbits(ec.nlen)
print(f"\npublic random number: {hex(r).upper()}")
rbytes = r.to_bytes(ec.nsize, "big")
nKeys = 3
for i in range(nKeys):
ibytes = i.to_bytes(ec.nsize, "big")
hd = hf(ibytes + rbytes).digest()
offset = int_from_bits(hd, ec.nlen) % ec.n
q = (mprvkey + offset) % ec.n
Q = mult(q, ec.G, ec)
print(f"\nprvkey #{i}: {hex(q).upper()}")
print(f"Pubkey #{i}: {hex(Q[0]).upper()}")
print(f" {hex(Q[1]).upper()}")
# Pubkeys could also be calculated without using prvkeys
for i in range(nKeys):
ibytes = i.to_bytes(ec.nsize, "big")
hd = hf(ibytes + rbytes).digest()
offset = int_from_bits(hd, ec.nlen) % ec.n
Q = ec.add(mpubkey, mult(offset, ec.G, ec))
assert Q == mult((mprvkey + offset) % ec.n, ec.G, ec)
|
[
"ferdinando@ametrano.net"
] |
ferdinando@ametrano.net
|
6087bac61affd929686678fae8bc6328f0d68c6c
|
93c3abdedc8d5571dc2a5241b20a5806d1da59d8
|
/dcgan/.ipynb_checkpoints/main-checkpoint.py
|
000c110d3963eb3527631a1963b62db4a083e3a3
|
[] |
no_license
|
Trccc/5242Project-GAN
|
bf33b33501b332d9716e2884d821016f5bbfd5aa
|
3cd3a7734ae42fb0d3b5efb5bd6839621a9e1b0c
|
refs/heads/master
| 2023-03-18T03:33:23.304285
| 2020-01-14T20:01:18
| 2020-01-14T20:01:18
| 222,341,507
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,983
|
py
|
import matplotlib
matplotlib.use('Agg')
import tensorflow as tf
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import PIL
from tensorflow.keras import layers
import time
from IPython import display
import IPython
import tensorflow_datasets as tfds
from pytz import timezone
from datetime import datetime
from config import cfg
from model import *
from utils import *
from loss import *
from Inception_score import *
@tf.function
def train_step(images, showloss = False):
noise = tf.random.normal([cfg.BATCH_SIZE, cfg.NOISE_DIM])
g_loss = generator_loss
d_loss = discriminator_loss
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = g_loss(fake_output)
disc_loss = d_loss(real_output, fake_output)
#if showloss:
#print('gen_loss = %.4f|disc_loss = %.4f'%(gen_loss.numpy(),disc_loss.numpy()))
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
return gen_loss, disc_loss
def train(dataset, epochs, savedir):
IS_mean = []
IS_std = []
G_loss = []
D_loss = []
for epoch in range(epochs):
start = time.time()
i = 0
g_loss = 0
d_loss = 0
for image_batch in dataset:
i += 1
if (i+1) % cfg.SHOW_LOSS ==0:
g_tensor, d_tensor = train_step(image_batch, showloss = True)
else:
g_tensor, d_tensor = train_step(image_batch)
g_loss += float(g_tensor.numpy())
d_loss += float(d_tensor.numpy())
G_loss.append(g_loss / i)
D_loss.append(d_loss / i)
# Produce images for the GIF
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
seed,savedir)
# Save the model every 15 epochs
if (epoch + 1) % 5 == 0:
mean, std = IS(generator, 1000, 100)
IS_mean.append(mean)
IS_std.append(std)
checkpoint.save(file_prefix = checkpoint_prefix)
with train_summary_writer.as_default():
tf.summary.scalar('loss', G_loss[-1], step=epoch)
with test_summary_writer.as_default():
tf.summary.scalar('loss', D_loss[-1], step=epoch)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
# clear outputs
display.clear_output(wait=True)
# save IS score and Loss plot
IS_mean = np.array(IS_mean)
IS_std = np.array(IS_std)
IS_df = pd.DataFrame({'mean':IS_mean, 'mean+std':IS_mean+IS_std, 'mean-std':IS_mean-IS_std, 'std':IS_std})
IS_df.index = [5 * (x + 1) for x in range(IS_df.shape[0])]
Loss_df = pd.DataFrame({'Generator':G_loss, 'Discriminator':D_loss})
df_path = os.path.join(savedir, 'IS_score.csv')
IS_df.to_csv(path_or_buf=df_path, index=False)
df_path2 = os.path.join(savedir, 'Loss.csv')
Loss_df.to_csv(path_or_buf=df_path2, index=False)
print('Inception score and loss save complete')
path = os.path.join(savedir, 'IS_score_trend.png')
fig = plt.figure(figsize=(6, 6))
plt.plot(IS_df[['mean','mean+std','mean-std']])
plt.title('Inception Score')
plt.legend(IS_df[['mean','mean+std','mean-std']].columns, loc='best')
plt.savefig(path)
#plt.close('all')
path2 = os.path.join(savedir, 'Loss_trend.png')
fig2 = plt.figure(figsize=(6, 6))
plt.plot(Loss_df)
plt.title('Validation Losses')
plt.legend(Loss_df.columns, loc='best')
plt.savefig(path2)
# Generate after the final epoch
generate_and_save_images(generator,
epochs,
seed,savedir)
if __name__ == '__main__':
if cfg.DATA.lower() == 'mnist':
train_data = get_train_data('mnist')
generator = make_generator_model_mnist()
discriminator = make_discriminator_model_mnist()
elif cfg.DATA.lower() == 'svhn':
train_data = get_train_data('svhn')
generator = make_generator_model_svhn()
discriminator = make_discriminator_model_svhn()
noise = tf.random.normal([1, 100])
generator_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4, beta_1=0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4, beta_1=0.5)
EPOCHS = cfg.EPOCHS
noise_dim = cfg.NOISE_DIM
num_examples_to_generate = cfg.NUM_EXAMPLES_TO_GENERATE
seed = tf.random.normal([num_examples_to_generate, noise_dim])
now = datetime.now(timezone('US/Eastern'))
subfile = now.strftime("%m_%d_%H_%M")
filedir = os.path.join(cfg.IMAGE_PATH,subfile)
checkpoint_dir = filedir
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
if not os.path.exists(cfg.IMAGE_PATH):
os.mkdir(cfg.IMAGE_PATH)
if not os.path.isfile(filedir):
os.mkdir(filedir)
savedir = filedir
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
gen_log_dir = 'logs/gradient_tape/' + current_time + '/gen'
disc_log_dir = 'logs/gradient_tape/' + current_time + '/disc'
train_summary_writer = tf.summary.create_file_writer(gen_log_dir)
test_summary_writer = tf.summary.create_file_writer(disc_log_dir)
train(train_data, EPOCHS,savedir)
if cfg.GIF:
anim_file = subfile+'gan.gif'
with imageio.get_writer(anim_file, mode='I') as writer:
filenames = glob.glob(filedir+'/image*.png')
filenames = sorted(filenames)
last = -1
for i,filename in enumerate(filenames):
frame = 2*(i**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
print('finish')
|
[
"google-dl-platform@googlegroups.com"
] |
google-dl-platform@googlegroups.com
|
0d6b1608e13599f03b3bc14f985dd604ca4101c4
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/sql/azure-mgmt-sql/generated_samples/create_database_default_mode.py
|
9fca87c06715de4be793721ff9f2021becd6a493
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,935
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.sql import SqlManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-sql
# USAGE
python create_database_default_mode.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SqlManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.databases.begin_create_or_update(
resource_group_name="Default-SQL-SouthEastAsia",
server_name="testsvr",
database_name="testdb",
parameters={
"location": "southeastasia",
"properties": {
"collation": "SQL_Latin1_General_CP1_CI_AS",
"createMode": "Default",
"maxSizeBytes": 1073741824,
},
"sku": {"name": "S0", "tier": "Standard"},
},
).result()
print(response)
# x-ms-original-file: specification/sql/resource-manager/Microsoft.Sql/preview/2022-05-01-preview/examples/CreateDatabaseDefaultMode.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
907ff0c5531274923b2904bb253a1409a2028b8b
|
b87402fa3729a2bee6e8c527dad8f2c09e84c115
|
/geemap/__init__.py
|
02d54e0f59c9c6c51b062296d246a2b813816863
|
[
"MIT"
] |
permissive
|
tchigher/geemap
|
26e3214a7f4e604faf892276d9aad400faffcf67
|
66b2618f31d673230f6ec739fc88da33c50af5a3
|
refs/heads/master
| 2022-04-13T16:57:47.873576
| 2020-04-08T03:02:18
| 2020-04-08T03:02:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
"""Top-level package for geemap."""
__author__ = """Qiusheng Wu"""
__email__ = 'giswqs@gmail.com'
__version__ = '0.6.1'
from .geemap import *
from .basemaps import ee_basemaps
|
[
"giswqs@gmail.com"
] |
giswqs@gmail.com
|
b86d96ebe13f417ecffb1d27aed00b6a10b9031d
|
6c58da2c54a3d35273e7984313d181f1da9981fc
|
/DB_Connection/venv/bin/easy_install-2.7
|
785646a1d64db62a32e5e767203f0d9d853dd016
|
[
"MIT-0"
] |
permissive
|
py1-10-2017/rgero215_PY1-10-2017
|
e582cb12cc63f84b1c0c14d09a922cb6cb228016
|
f455b335ec9c8c850571f3a75dcd95759b4cfdad
|
refs/heads/master
| 2021-09-04T03:23:48.062326
| 2018-01-14T21:07:26
| 2018-01-14T21:07:26
| 105,612,652
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
7
|
#!/Users/RGero13/Desktop/rgero215_PY1-10-2017/DB_Connection/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==36.6.0','console_scripts','easy_install-2.7'
__requires__ = 'setuptools==36.6.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==36.6.0', 'console_scripts', 'easy_install-2.7')()
)
|
[
"rgero215@gmail.com"
] |
rgero215@gmail.com
|
4b00204bf1744504a90b22619170f254e3f4633d
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/9ED9zbHHhaPaBz2xi_1.py
|
55607ddaf041a60b190183711eb7aeff9bf882d2
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
"""
Consider a sequence where the first two numbers are `0` and `1` and the next
number of the sequence is the sum of the previous two numbers modulo three.
Create a function that finds the `n`th element of the sequence.
### Examples
normal_sequence(1) ➞ 0
normal_sequence(2) ➞ 1
normal_sequence(3) ➞ 1
# (0+1)%3 = 1
normal_sequence(20) ➞ 2
### Notes
* 1 ≤ N ≤ 10^19
* A hint in comments section.
"""
def normal_sequence(n):
dict = {1:0,2:1,3:1,4:2,5:0,6:2,7:2,0:1}
return dict[n%8]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
4056ce7d0c7d7aa37339d8d6f107d2fc1ba5f3ee
|
35e41dab4a3e8eebe17872f5414cf3efb86be54c
|
/supervised_learning/0x08-deep_cnns/3-projection_block.py
|
8cf99029892ce07e6fa99b4b6ec1db75920eeb35
|
[] |
no_license
|
yasheymateen/holbertonschool-machine_learning
|
52eaf217e924404e8833e75e8b48e80807d784dc
|
1329ba88bf9ae50693188e1c85383d9b76c2b1f4
|
refs/heads/master
| 2022-12-24T13:51:02.743526
| 2020-10-08T06:59:04
| 2020-10-08T06:59:04
| 255,277,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
#!/usr/bin/env python3
"""identity block maker"""
import tensorflow.keras as K
def projection_block(A_prev, filters, s=2):
"""Return an identity block"""
out = K.layers.Conv2D(filters[0], 1, s,
kernel_initializer='he_normal')(A_prev)
out = K.layers.BatchNormalization()(out)
out = K.layers.Activation('relu')(out)
out = K.layers.Conv2D(filters[1], 3, padding='same',
kernel_initializer='he_normal')(out)
out = K.layers.BatchNormalization()(out)
out = K.layers.Activation('relu')(out)
out = K.layers.Conv2D(filters[2], 1,
kernel_initializer='he_normal')(out)
out = K.layers.BatchNormalization()(out)
out2 = K.layers.Conv2D(filters[2], 1, s,
kernel_initializer='he_normal')(A_prev)
out2 = K.layers.BatchNormalization()(out2)
out = K.layers.add([out, out2])
return K.layers.Activation('relu')(out)
|
[
"yasheymateen@gmail.com"
] |
yasheymateen@gmail.com
|
0b6fa605bb49d021790aa6ee7cb6c301ac938c7d
|
fe9b1fcf9f44092037a8c14b4c5cbfefa6ad6982
|
/shoppingsite.py
|
923848ec55b2472541fe4bc960484ee1da3d1163
|
[] |
no_license
|
elaineyoung702/shopping-site
|
702abd1536b8e805dc719377de23f456773039c2
|
e2dbd14569203c6d4be4119ab075f2210c88b61d
|
refs/heads/master
| 2021-06-20T05:48:32.217285
| 2019-07-26T00:51:48
| 2019-07-26T00:51:48
| 198,886,591
| 0
| 0
| null | 2021-03-20T01:19:46
| 2019-07-25T19:00:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,873
|
py
|
"""Ubermelon shopping application Flask server.
Provides web interface for browsing melons, seeing detail about a melon, and
put melons in a shopping cart.
Authors: Joel Burton, Christian Fernandez, Meggie Mahnken, Katie Byers.
"""
from flask import Flask, render_template, redirect, flash, session
import jinja2
import melons
app = Flask(__name__)
# A secret key is needed to use Flask sessioning features
app.secret_key = 'this-should-be-something-unguessable'
# Normally, if you refer to an undefined variable in a Jinja template,
# Jinja silently ignores this. This makes debugging difficult, so we'll
# set an attribute of the Jinja environment that says to make this an
# error.
app.jinja_env.undefined = jinja2.StrictUndefined
@app.route("/")
def index():
"""Return homepage."""
return render_template("homepage.html")
@app.route("/melons")
def list_melons():
"""Return page showing all the melons ubermelon has to offer"""
melon_list = melons.get_all()
return render_template("all_melons.html",
melon_list=melon_list)
@app.route("/melon/<melon_id>")
def show_melon(melon_id):
"""Return page showing the details of a given melon.
Show all info about a melon. Also, provide a button to buy that melon.
"""
melon = melons.get_by_id(melon_id)
# print(melon)
return render_template("melon_details.html",
display_melon=melon)
@app.route("/cart")
def show_shopping_cart():
"""Display content of shopping cart."""
# TODO: Display the contents of the shopping cart.
# The logic here will be something like:
#
# - get the cart dictionary from the session
# - create a list to hold melon objects and a variable to hold the total
# cost of the order
# - loop over the cart dictionary, and for each melon id:
# - get the corresponding Melon object
# - compute the total cost for that type of melon
# - add this to the order total
# - add quantity and total cost as attributes on the Melon object
# - add the Melon object to the list created above
# - pass the total order cost and the list of Melon objects to the template
#
# Make sure your function can also handle the case wherein no cart has
# been added to the session
cart = session["cart"]
print(cart)
# melons = []
# total_order_cost = 0
# for melon in cart:
# melons.append[melon]
return render_template("cart.html")
@app.route("/add_to_cart/<melon_id>")
def add_to_cart(melon_id):
"""Add a melon to cart and redirect to shopping cart page.
When a melon is added to the cart, redirect browser to the shopping cart
page and display a confirmation message: 'Melon successfully added to
cart'."""
# TODO: Finish shopping cart functionality
# The logic here should be something like:
#
# - check if a "cart" exists in the session, and create one (an empty
# dictionary keyed to the string "cart") if not
# - check if the desired melon id is the cart, and if not, put it in
# - increment the count for that melon id by 1
# - flash a success message
# - redirect the user to the cart page
if "cart" not in session:
session["cart"] = {}
cart = session["cart"]
cart[melon_id] = cart.get(melon_id, 0) + 1
# print(cart)
flash("Melon successfully added!")
return redirect("/cart")
@app.route("/login", methods=["GET"])
def show_login():
"""Show login form."""
return render_template("login.html")
@app.route("/login", methods=["POST"])
def process_login():
"""Log user into site.
Find the user's login credentials located in the 'request.form'
dictionary, look up the user, and store them in the session.
"""
# TODO: Need to implement this!
# The logic here should be something like:
#
# - get user-provided name and password from request.form
# - use customers.get_by_email() to retrieve corresponding Customer
# object (if any)
# - if a Customer with that email was found, check the provided password
# against the stored one
# - if they match, store the user's email in the session, flash a success
# message and redirect the user to the "/melons" route
# - if they don't, flash a failure message and redirect back to "/login"
# - do the same if a Customer with that email doesn't exist
return "Oops! This needs to be implemented"
@app.route("/checkout")
def checkout():
"""Checkout customer, process payment, and ship melons."""
# For now, we'll just provide a warning. Completing this is beyond the
# scope of this exercise.
flash("Sorry! Checkout will be implemented in a future version.")
return redirect("/melons")
if __name__ == "__main__":
app.run(debug=True)
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
7b67cdaa927798bb7bcd21bf3a652fc430e7d02c
|
a11d83fced34854664fac72e18d48fde6aa967e4
|
/0x11-python-network_1/2-post_email.py
|
9a6be815e066d3d6642c328bd74da639a6b63b2d
|
[] |
no_license
|
afarizap/holbertonschool-higher_level_programming
|
ffe0bf1440726c952f4dd28b908eabc4ccb5225b
|
ad39e58f9cb20cba4b9e2c14075f216097588f47
|
refs/heads/master
| 2023-03-30T15:39:35.184484
| 2021-03-22T22:55:24
| 2021-03-22T22:55:24
| 259,437,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
#!/usr/bin/python3
""" takes in a URL and an email, sends a POST request with the email
as a parameter, and displays the body of the response decoded in utf-8
"""
from urllib import request, parse
from sys import argv
if __name__ == '__main__':
data = parse.urlencode({'email': argv[2]})
data = data.encode('ascii')
req = request.Request(argv[1], data)
with request.urlopen(req) as response:
print(response.read().decode('utf-8'))
|
[
"afarizap@gmail.com"
] |
afarizap@gmail.com
|
8ab9ca2c71dcf5dd124863ea0ac9caad6daa108a
|
01245df7d402532d9729ff933f5ade8c2965760b
|
/fias/admin.py
|
8f9ffe8cfa5ca10b536c6a856b7b8b8a5df040b2
|
[
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
aldev12/django-fias
|
c653edbef22d7f3fb42e4ebc0a8729fa488e3799
|
53980ada222f1f4f56c268c0489ab9a50a87d9f6
|
refs/heads/master
| 2020-06-02T18:39:30.285925
| 2020-04-14T02:26:19
| 2020-04-14T02:26:19
| 191,269,395
| 0
| 1
|
NOASSERTION
| 2019-07-17T00:51:28
| 2019-06-11T01:15:19
|
Python
|
UTF-8
|
Python
| false
| false
| 3,549
|
py
|
# coding: utf-8
from __future__ import unicode_literals, absolute_import
from django.contrib import admin
from fias.models import (
AddrObj, House, HouseInt,
LandMark, Room, Stead,
NormDoc,
SocrBase,
NDocType,
ActStat, CenterSt, CurentSt,
EstStat, HSTStat, IntvStat,
OperStat, StrStat,
)
class ViewAdmin(admin.ModelAdmin):
"""
Класс админки только для просмотра данных модели
"""
change_form_template = 'admin/view_form.html'
save_on_top = False
actions = None
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def save_model(self, request, obj, form, change):
pass
@admin.register(SocrBase)
class SocrBaseAdmin(admin.ModelAdmin):
list_display = ['level', 'scname', 'socrname', 'item_weight']
list_display_links = ('scname', 'socrname')
readonly_fields = ['level', 'scname', 'socrname', 'kod_t_st']
list_editable = ['item_weight']
ordering = ['-item_weight', 'level']
actions = None
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
@admin.register(AddrObj)
class AddrObjAdmin(ViewAdmin):
list_display = ('offname', 'shortname', 'aolevel', 'code', 'aoguid')
@admin.register(House)
class HouseAdmin(ViewAdmin):
list_display = ('aoguid', 'housenum', 'buildnum', 'strucnum', 'houseguid')
raw_id_fields = ('aoguid',)
@admin.register(HouseInt)
class HouseIntAdmin(ViewAdmin):
list_display = ('aoguid', 'intguid', 'houseintid', 'intstart', 'intend')
raw_id_fields = ('aoguid',)
@admin.register(LandMark)
class LandMarkAdmin(ViewAdmin):
list_display = ('aoguid', 'landguid', 'landid')
raw_id_fields = ('aoguid',)
@admin.register(Room)
class RoomAdmin(ViewAdmin):
list_display = ('houseguid', 'flatnumber', 'flattype', 'roomguid', 'roomid')
raw_id_fields = ('houseguid',)
@admin.register(Stead)
class SteadAdmin(ViewAdmin):
list_display = ('steadguid', 'number', 'regioncode')
@admin.register(NDocType)
class NDocTypeAdmin(ViewAdmin):
list_display = ('ndtypeid', 'name')
list_display_links = ('name',)
@admin.register(NormDoc)
class NormDocAdmin(ViewAdmin):
list_display = ('normdocid', 'docdate', 'docnum')
list_display_links = ('normdocid',)
@admin.register(ActStat)
class ActStatAdmin(ViewAdmin):
list_display = ('actstatid', 'name')
list_display_links = ('name',)
@admin.register(CenterSt)
class CenterStatAdmin(ViewAdmin):
list_display = ('centerstid', 'name')
list_display_links = ('name',)
@admin.register(CurentSt)
class CurentStatAdmin(ViewAdmin):
list_display = ('curentstid', 'name')
list_display_links = ('name',)
@admin.register(EstStat)
class EstStatAdmin(ViewAdmin):
list_display = ('eststatid', 'name', 'shortname')
list_display_links = ('name',)
@admin.register(HSTStat)
class HSTStatAdmin(ViewAdmin):
list_display = ('housestid', 'name')
list_display_links = ('name',)
@admin.register(IntvStat)
class IntvStatAdmin(ViewAdmin):
list_display = ('intvstatid', 'name')
list_display_links = ('name',)
@admin.register(OperStat)
class OperStatAdmin(ViewAdmin):
list_display = ('operstatid', 'name')
list_display_links = ('name',)
@admin.register(StrStat)
class StrStatAdmin(ViewAdmin):
list_display = ('strstatid', 'name', 'shortname')
list_display_links = ('name',)
|
[
"root@proscript.ru"
] |
root@proscript.ru
|
60d4fed93ac6cca6c2002acc9ab54c8781e1beb1
|
a4a540187b93a177331885caee59c3253e91ea7f
|
/pymcversion/java_version_manifest.py
|
340eb9a94fddb23da1d9f3eea8de3417385e38a9
|
[
"Unlicense"
] |
permissive
|
aoirint/pymcversion
|
ae97123457594ad7741b8f8b69a8deff515eb09c
|
a3af1d06c9b1ce54530fc9af04b3cb48a0ba1340
|
refs/heads/main
| 2023-08-25T19:50:52.011370
| 2021-10-22T15:58:48
| 2021-10-22T15:58:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
import requests
from typing import List
from pydantic import BaseModel
class VersionManifestLatest(BaseModel):
release: str
snapshot: str
class VersionManifestVersion(BaseModel):
id: str
type: str
url: str
time: str
releaseTime: str
class VersionManifest(BaseModel):
latest: VersionManifestLatest
versions: List[VersionManifestVersion]
def get_java_version_manifest() -> VersionManifest:
timeout = 3
useragent = 'aoirint/pymcversion'
headers = {
'User-Agent': useragent,
}
res = requests.get('https://launchermeta.mojang.com/mc/game/version_manifest.json', headers=headers, timeout=timeout)
manifest_dict = res.json()
manifest = VersionManifest.parse_obj(manifest_dict)
return manifest
|
[
"aoirint@gmail.com"
] |
aoirint@gmail.com
|
67ecd1716036514223b44bda3eada5013e806d32
|
1fcf52f71e4b08b9f6973dcb0a7acf1fe41b9c5f
|
/setup.py
|
fc0025b4f83392ce591bf27cf3013ce54c98453b
|
[] |
no_license
|
svetlyak40wt/colorize
|
0a2c65059d4832b88ffbe62864328810f47f0750
|
1e1fd8eff1d2c820105d0655a8df36efac12600a
|
refs/heads/master
| 2020-05-20T08:14:22.598684
| 2014-01-16T12:57:29
| 2014-01-16T12:57:29
| 1,140,039
| 0
| 1
| null | 2014-01-16T12:57:29
| 2010-12-05T09:34:27
|
Python
|
UTF-8
|
Python
| false
| false
| 812
|
py
|
from setuptools import setup, find_packages
setup(
name = 'colorizer',
version = '0.1.7',
description = 'Console colorizer, which acts like grep but paint each match in it\'s own color.',
author = 'Alexander Artemenko',
author_email = 'svetlyak.40wt@gmail.com',
url = 'http://github.com/svetlyak40wt/colorizer/',
license = 'New BSD License',
install_requires = ['termcolor'],
classifiers = [
'Environment :: Console',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
],
packages = find_packages(),
entry_points = """
[console_scripts]
colorize = colorizer:main
""",
)
|
[
"svetlyak.40wt@gmail.com"
] |
svetlyak.40wt@gmail.com
|
a1c1382419540480fb893cf3aba8e6f46c76eddc
|
f8cff1f2c00f000aba61cdf728d8a2b254647f09
|
/OpenSpider-master/concurrents/manager.py
|
b1c32985d3a928292464adbd637ee346c6da18c8
|
[] |
no_license
|
vieyahn2017/crawlers
|
76097ec9ca71edd128addf341ae02ef26ef383dc
|
fa584094b026cbf0451e78303c56f614fa5fedde
|
refs/heads/master
| 2021-09-12T21:23:33.265856
| 2018-04-21T03:06:50
| 2018-04-21T03:06:50
| 102,854,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
__author__ = 'zhangxa'
'''
A concurrentManager should apply a class func who has a run method.
'''
class ConcurrentManger:
def __init__(self,concurrents,runner,*args,**kwargs):
self._concurrents = concurrents
self._runner = runner
self._args = args
self._kwargs = kwargs
def run(self):
raise NotImplementedError
|
[
"283403891@qq.com"
] |
283403891@qq.com
|
6eac61a01f8b8fcef649ff9f214998ce0fa0545d
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/connect_write_f/contact-recording_resume.py
|
934dbeb6b552f992ed8bf1aedd0a3c53fb46b289
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
start-contact-recording : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/connect/start-contact-recording.html
stop-contact-recording : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/connect/stop-contact-recording.html
suspend-contact-recording : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/connect/suspend-contact-recording.html
"""
write_parameter("connect", "resume-contact-recording")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
2d770c24fe0c6044d3dc2542a21f7bcb180d892e
|
842e3cd1266d18752a3baf2b90232ed4ce41eb4f
|
/grako/test/grammar/semantics_test.py
|
9d520aaa87c70fb7d87067f9de6cfda85bd29469
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
apalala/grako
|
2786d85eef9799bf614c46c92f19ff183a435d46
|
efb373d89e6805930e661758c2cff2b26da4658a
|
refs/heads/master
| 2020-12-25T17:37:05.353167
| 2017-05-02T02:53:11
| 2017-05-02T02:53:11
| 65,163,853
| 16
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by Juancarlo Añez
# Copyright (C) 2012-2016 by Juancarlo Añez and Thomas Bragg
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from grako.tool import compile
from grako.semantics import ModelBuilderSemantics
class SemanticsTests(unittest.TestCase):
def test_builder_semantics(self):
grammar = '''
start::sum = {number}+ $ ;
number::int = /\d+/ ;
'''
text = '5 4 3 2 1'
semantics = ModelBuilderSemantics()
model = compile(grammar, 'test')
ast = model.parse(text, semantics=semantics)
self.assertEqual(15, ast)
import functools
dotted = functools.partial(type('').join, '.')
dotted.__name__ = 'dotted'
grammar = '''
start::dotted = {number}+ $ ;
number = /\d+/ ;
'''
semantics = ModelBuilderSemantics(types=[dotted])
model = compile(grammar, 'test')
ast = model.parse(text, semantics=semantics)
self.assertEqual('5.4.3.2.1', ast)
|
[
"apalala@gmail.com"
] |
apalala@gmail.com
|
b81db8a245a81834aad2e44681142b1dee1f761d
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/eXe/rev2735-2877/base-trunk-2735/exe/engine/mathidevice.py
|
d7c9a57ae1108ccbb969998567d54ab61700910a
|
[] |
no_license
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902
| 2013-03-28T16:28:47
| 2013-03-28T16:28:47
| 9,080,611
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
"""
MathIdevice: just has a block of text
"""
import logging
from exe.engine.idevice import Idevice
from exe.engine.field import MathField
log = logging.getLogger(__name__)
class MathIdevice(Idevice):
"""
MathIdevice: just has a block of text
"""
def __init__(self, instruc="", latex=""):
Idevice.__init__(self, x_(u"Maths"),
x_(u"University of Auckland"),
x_("""The mathematical language LATEX has been
used to enable your to insert mathematical formula
into your content. It does this by translating
LATEX into an image which is then displayed
within your eXe content. We would recommend that
you use the Free Text iDevice to provide
explanatory notes and learning instruction around
this graphic."""),
"",
"")
self.emphasis = Idevice.NoEmphasis
self.content = MathField(x_(u"Maths"),
x_(u"""You can use the toolbar or enter latex manually into the textarea. """))
self.content.idevice = self
|
[
"joliebig@fim.uni-passau.de"
] |
joliebig@fim.uni-passau.de
|
4f66d4ae6728ef58845bb197375e2267313d0135
|
275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc
|
/test/test_ad_view.py
|
ea1ebb582cd5d6c22cdaaacd01a964bee35afdb7
|
[] |
no_license
|
cascadiarc/cyclos-python-client
|
8029ce07174f2fe92350a92dda9a60976b2bb6c2
|
a2e22a30e22944587293d51be2b8268bce808d70
|
refs/heads/main
| 2023-04-03T16:52:01.618444
| 2021-04-04T00:00:52
| 2021-04-04T00:00:52
| 354,419,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
# coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.ad_view import AdView # noqa: E501
from swagger_client.rest import ApiException
class TestAdView(unittest.TestCase):
"""AdView unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAdView(self):
"""Test AdView"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.ad_view.AdView() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"dan@leftcoastfs.com"
] |
dan@leftcoastfs.com
|
42d4548e0211060ef8143593198856e9ef130dfd
|
578db86c51d44ebddd0dc7b1738985b3dc69eb74
|
/corehq/apps/api/resources/serializers.py
|
f57a8fc1b707903d74990c7085001cd6ca31691d
|
[
"BSD-3-Clause"
] |
permissive
|
dimagi/commcare-hq
|
a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b
|
e7391ddae1af1dbf118211ecb52c83fc508aa656
|
refs/heads/master
| 2023-08-16T22:38:27.853437
| 2023-08-16T19:07:19
| 2023-08-16T19:07:19
| 247,278
| 499
| 203
|
BSD-3-Clause
| 2023-09-14T19:03:24
| 2009-07-09T17:00:07
|
Python
|
UTF-8
|
Python
| false
| false
| 489
|
py
|
import json
from tastypie.serializers import Serializer
class ListToSingleObjectSerializer(Serializer):
"""
Serializer class that takes a list of one object and removes the other metadata
around the list view so that just the object is returned.
See IdentityResource for an example.
"""
def to_json(self, data, options=None):
# note: this is not valid if there is ever not exactly one object returned
return json.dumps(data['objects'][0].data)
|
[
"czue@dimagi.com"
] |
czue@dimagi.com
|
350ac6d5f55c221b5552b1ff7e6b0a3063604641
|
6eb318399cea68bc998fc635d7d519e187a24f15
|
/leetcode/39_combination_sum/39_combination_sum.py
|
3efee07c21fc16d136b49c82665b494a4d2096d8
|
[
"Apache-2.0"
] |
permissive
|
ryangillard/misc
|
e8eb4813a54bc59babc263087f2d71bdc6af6b7a
|
d1f9919400636e6b988fa933493b94829a73331e
|
refs/heads/master
| 2021-11-24T17:26:52.740997
| 2021-11-16T06:27:42
| 2021-11-16T06:27:42
| 192,038,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
paths = []
self.recursive(candidates, target, 0, [], paths)
return paths
def recursive(self, candidates, target, start_index, path, paths):
if target == 0:
paths.append(path)
return None
for i in range(start_index, len(candidates)):
if candidates[i] <= target:
self.recursive(candidates, target - candidates[i], i, path + [candidates[i]], paths)
return None
|
[
"ryangillard@google.com"
] |
ryangillard@google.com
|
8602f6a64d2286cd253d640ee4063c095b9f289a
|
ca507259c36a4299666f4c064f25832f5c3f45c1
|
/symbol_openapi_client/models/transaction_status_dto.py
|
f51ff88a06df1f8858bcf03ab41ae5ee63116815
|
[] |
no_license
|
fullcircle23/symbol-openapi-python-client
|
ae38a2537d1f2eebca115733119c444b79ec4962
|
3728d30eb1b5085a5a5e991402d180fac8ead68b
|
refs/heads/master
| 2022-04-15T06:20:47.821281
| 2020-04-16T02:39:14
| 2020-04-16T02:39:14
| 254,701,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,790
|
py
|
# coding: utf-8
"""
****************************************************************************
Copyright (c) 2016-present,
Jaguar0625, gimre, BloodyRookie, Tech Bureau, Corp. All rights reserved.
This file is part of Catapult.
Catapult is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Catapult is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with Catapult. If not, see <http://www.gnu.org/licenses/>.
****************************************************************************
Catapult REST Endpoints
OpenAPI Specification of catapult-rest 1.0.20.22 # noqa: E501
The version of the OpenAPI document: 0.8.9
Contact: ravi@nem.foundation
NOTE: This file is auto generated by Symbol OpenAPI Generator:
https://github.com/nemtech/symbol-openapi-generator
Do not edit this file manually.
"""
import pprint
import re # noqa: F401
import six
from symbol_openapi_client.configuration import Configuration
class TransactionStatusDTO(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'group': 'TransactionStateTypeEnum',
'code': 'TransactionStatusTypeEnum',
'hash': 'str',
'deadline': 'int',
'height': 'int'
}
attribute_map = {
'group': 'group',
'code': 'code',
'hash': 'hash',
'deadline': 'deadline',
'height': 'height'
}
def __init__(self, group=None, code=None, hash=None, deadline=None, height=None, local_vars_configuration=None): # noqa: E501
"""TransactionStatusDTO - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._group = None
self._code = None
self._hash = None
self._deadline = None
self._height = None
self.discriminator = None
self.group = group
if code is not None:
self.code = code
self.hash = hash
self.deadline = deadline
if height is not None:
self.height = height
@property
def group(self):
"""Gets the group of this TransactionStatusDTO. # noqa: E501
:return: The group of this TransactionStatusDTO. # noqa: E501
:rtype: TransactionStateTypeEnum
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this TransactionStatusDTO.
:param group: The group of this TransactionStatusDTO. # noqa: E501
:type: TransactionStateTypeEnum
"""
if self.local_vars_configuration.client_side_validation and group is None: # noqa: E501
raise ValueError("Invalid value for `group`, must not be `None`") # noqa: E501
self._group = group
@property
def code(self):
"""Gets the code of this TransactionStatusDTO. # noqa: E501
:return: The code of this TransactionStatusDTO. # noqa: E501
:rtype: TransactionStatusTypeEnum
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this TransactionStatusDTO.
:param code: The code of this TransactionStatusDTO. # noqa: E501
:type: TransactionStatusTypeEnum
"""
self._code = code
@property
def hash(self):
"""Gets the hash of this TransactionStatusDTO. # noqa: E501
:return: The hash of this TransactionStatusDTO. # noqa: E501
:rtype: str
"""
return self._hash
@hash.setter
def hash(self, hash):
"""Sets the hash of this TransactionStatusDTO.
:param hash: The hash of this TransactionStatusDTO. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and hash is None: # noqa: E501
raise ValueError("Invalid value for `hash`, must not be `None`") # noqa: E501
self._hash = hash
@property
def deadline(self):
"""Gets the deadline of this TransactionStatusDTO. # noqa: E501
Duration expressed in number of blocks. # noqa: E501
:return: The deadline of this TransactionStatusDTO. # noqa: E501
:rtype: int
"""
return self._deadline
@deadline.setter
def deadline(self, deadline):
"""Sets the deadline of this TransactionStatusDTO.
Duration expressed in number of blocks. # noqa: E501
:param deadline: The deadline of this TransactionStatusDTO. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and deadline is None: # noqa: E501
raise ValueError("Invalid value for `deadline`, must not be `None`") # noqa: E501
self._deadline = deadline
@property
def height(self):
"""Gets the height of this TransactionStatusDTO. # noqa: E501
Height of the blockchain. # noqa: E501
:return: The height of this TransactionStatusDTO. # noqa: E501
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this TransactionStatusDTO.
Height of the blockchain. # noqa: E501
:param height: The height of this TransactionStatusDTO. # noqa: E501
:type: int
"""
self._height = height
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TransactionStatusDTO):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TransactionStatusDTO):
return True
return self.to_dict() != other.to_dict()
|
[
"fullcircle2324@gmail.com"
] |
fullcircle2324@gmail.com
|
1621cd025aba6ae82b0151c94d67dbcc0918af22
|
32cb0be487895629ad1184ea25e0076a43abba0a
|
/LifePictorial/top/api/rest/FenxiaoGradesGetRequest.py
|
b11baf371f16e5abe3b7f6e328da2e7a1297435a
|
[] |
no_license
|
poorevil/LifePictorial
|
6814e447ec93ee6c4d5b0f1737335601899a6a56
|
b3cac4aa7bb5166608f4c56e5564b33249f5abef
|
refs/heads/master
| 2021-01-25T08:48:21.918663
| 2014-03-19T08:55:47
| 2014-03-19T08:55:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
'''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class FenxiaoGradesGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'taobao.fenxiao.grades.get'
|
[
"poorevil@gmail.com"
] |
poorevil@gmail.com
|
ddd78fe9da9398ec8fd58bfc448d03b175f789e7
|
c1261b9181d86c418df612dc809af933cfbb2c0d
|
/blog1/models.py
|
72e8cea72ffc9af34805e080119fd81054f69afd
|
[] |
no_license
|
gitlGl/myblog
|
122a598407d12a7397420ce50f9c1ca68a3107d2
|
b3d7d1130e81ca625cb9d2b7204e19da6efe7d07
|
refs/heads/master
| 2023-09-01T14:06:04.720407
| 2022-10-22T08:47:02
| 2022-10-22T08:47:02
| 198,171,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,075
|
py
|
from django.db import models
from django.urls import reverse
# Create your models here.
#hjflghjglkhd
# Create your models here.
from django.shortcuts import render, get_object_or_404
#from .models import Post
from django.db import models
from django.contrib.auth.models import User
from tinymce.models import HTMLField
class Category(models.Model):
"""
Django 要求模型必须继承 models.Model 类。
Category 只需要一个简单的分类名 name 就可以了。
CharField 指定了分类名 name 的数据类型,CharField 是字符型,
CharField 的 max_length 参数指定其最大长度,超过这个长度的分类名就不能被存入数据库。
当然 Django 还为我们提供了多种其它的数据类型,如日期时间类型 DateTimeField、整数类型 IntegerField 等等。
Django 内置的全部类型可查看文档:
https://docs.djangoproject.com/en/1.10/ref/models/fields/#field-types
"""
name = models.CharField(max_length=100)
def get_absolute_url(self):
#print(self.pk)
return reverse('blog1:Category_page', kwargs={'category1': self.pk})
'''
class Tag(models.Model):
"""
标签 Tag 也比较简单,和 Category 一样。
再次强调一定要继承 models.Model 类!
"""
name = models.CharField(max_length=100)
'''
class Post(models.Model):
"""
文章的数据库表稍微复杂一点,主要是涉及的字段更多。
"""
# 文章标题
title = models.CharField(max_length=70)
# 文章正文,我们使用了 TextField。
# 存储比较短的字符串可以使用 CharField,但对于文章的正文来说可能会是一大段文本,因此使用 TextField 来存储大段文本。
body = HTMLField()
# 这两个列分别表示文章的创建时间和最后一次修改时间,存储时间的字段用 DateTimeField 类型。
created_time = models.DateTimeField()
modified_time = models.DateTimeField()
# 文章摘要,可以没有文章摘要,但默认情况下 CharField 要求我们必须存入数据,否则就会报错。
# 指定 CharField 的 blank=True 参数值后就可以允许空值了。
excerpt = HTMLField( blank=True)
# 这是分类与标签,分类与标签的模型我们已经定义在上面。
# 我们在这里把文章对应的数据库表和分类、标签对应的数据库表关联了起来,但是关联形式稍微有点不同。
# 我们规定一篇文章只能对应一个分类,但是一个分类下可以有多篇文章,所以我们使用的是 ForeignKey,即一对多的关联关系。
# 而对于标签来说,一篇文章可以有多个标签,同一个标签下也可能有多篇文章,所以我们使用 ManyToManyField,表明这是多对多的关联关系。
# 同时我们规定文章可以没有标签,因此为标签 tags 指定了 blank=True。
# 如果你对 ForeignKey、ManyToManyField 不了解,请看教程中的解释,亦可参考官方文档:
# https://docs.djangoproject.com/en/1.10/topics/db/models/#relationships
category = models.ForeignKey('Category',on_delete=models.CASCADE)
#tags = models.ManyToManyField('Tag' ,blank=True)
# 文章作者,这里 User 是从 django.contrib.auth.models 导入的。
# django.contrib.auth 是 Django 内置的应用,专门用于处理网站用户的注册、登录等流程,User 是 Django 为我们已经写好的用户模型。
# 这里我们通过 ForeignKey 把文章和 User 关联了起来。
# 因为我们规定一篇文章只能有一个作者,而一个作者可能会写多篇文章,因此这是一对多的关联关系,和 Category 类似。
author = models.ForeignKey(User,on_delete=models.CASCADE)
def __str__(self):
return self.title
# 自定义 get_absolute_url 方法
# 记得从 django.urls 中导入 reverse 函数
def get_absolute_url(self):
return reverse('blog1:detail', kwargs={'pk': self.pk})
#Category.get_absolute_url()
|
[
"you@example.com"
] |
you@example.com
|
099a47f37b2f69649665afc8b56a49ed9d42362d
|
c2ae65792af1fab2e7843303ef90790819f872e8
|
/Algorithm/Math/Power-Function.py
|
f0b6d0d8a9288824dc87f7ac62ccb797cc247d80
|
[] |
no_license
|
behappyyoung/PythonSampleCodes
|
47c224ca76ce509a03c8b75ef6b4bf7f49ebdd7f
|
f7640467273fa8ea3c7e443e798737ca5bcea6f9
|
refs/heads/master
| 2023-03-15T00:53:21.034605
| 2023-02-13T17:12:32
| 2023-02-13T17:12:32
| 26,919,763
| 3
| 3
| null | 2023-03-07T12:45:21
| 2014-11-20T15:57:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,804
|
py
|
"""
Implement pow(x, n)
"""
from datetime import datetime
def power_function(x, y):
if y == 0:
return 1
else:
for i in range(y):
x *= x
return x
def power_function_s(x, y):
if y == 0:
return 1
elif int(y % 2) == 0:
return power_function_s(x, int(y / 2)) * power_function_s(x, int(y / 2))
else:
return x * power_function_s(x, int(y / 2)) * power_function_s(x, int(y / 2))
def power_function_s_s(x, y):
if y == 0:
return 1
else:
temp = power_function_s_s(x, y // 2)
if int(y % 2) == 0:
return temp * temp
else:
return x * temp * temp
# print(power_function_s(-1, 2))
# print(power_function_s(-1, 3))
# print(power_function_s(-2, 2))
# start_time = datetime.now()
# print(str(power_function_s(7100, 4150))[:50])
# end_time = datetime.now()
# print(end_time - start_time)
# start_time = datetime.now()
# print(str(power_function_s_s(7100, 4150))[:50])
# end_time = datetime.now()
# print(end_time - start_time)
def power_function_f(x, y):
print(y)
if y == 0:
return 1
elif y > 0:
temp = power_function_f(x, y // 2)
print(y, temp)
if int(y % 2) == 0:
return temp * temp
else:
return x * temp * temp
else:
next_y = -(-y // 2)
temp = power_function_f(x, next_y)
print(y, next_y, temp)
if int(-y % 2) == 0:
return 1 / (temp * temp) if temp >1 else (temp * temp)
else:
return 1/(x * temp * temp) if temp >1 else x* (temp * temp)
print(str(power_function_f(2.00000, 2)))
print(str(power_function_f(2.00000, -2)))
print(str(power_function_f(8.84372, -5)))
|
[
"behappyyoung@gmail.com"
] |
behappyyoung@gmail.com
|
5bb0e557dd728812cf61315606915473308c1769
|
c4a33b613ffc77dccf96d33c3a5cc127405c0e95
|
/products/models.py
|
a07d41df737ce764165d547ebee4f370ea33ad54
|
[] |
no_license
|
tsokac2/new-irish-life
|
25f49bd0b74dfa7c0a449772249f6cb51925b643
|
d09934b60a1fd4fbd4540d412dc5dab726f5b502
|
refs/heads/main
| 2023-07-02T09:54:55.082587
| 2021-07-30T04:42:57
| 2021-07-30T04:42:57
| 379,245,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
from django.db import models
class Category(models.Model):
class Meta:
verbose_name_plural = 'Categories'
name = models.CharField(max_length=254)
friendly_name = models.CharField(max_length=254, null=True, blank=True)
def __str__(self):
return self.name
def get_friendly_name(self):
return self.friendly_name
class Product(models.Model):
category = models.ForeignKey(
'Category', null=True, blank=True, on_delete=models.SET_NULL)
sku = models.CharField(max_length=254, null=True, blank=True)
name = models.CharField(max_length=254)
description = models.TextField()
price = models.DecimalField(max_digits=6, decimal_places=2)
sale_price = models.DecimalField(
max_digits=6, decimal_places=2, null=True, blank=True)
rating = models.DecimalField(
max_digits=2, decimal_places=1, null=True, blank=True)
image_1 = models.ImageField(null=True, blank=True)
image_2 = models.ImageField(null=True, blank=True)
image_3 = models.ImageField(null=True, blank=True)
def __str__(self):
return self.name
|
[
"tsokac2@gmail.com"
] |
tsokac2@gmail.com
|
822b4360a54f56d2fbbdea34fed6eb34287dcaa8
|
f9ed608c620093b9f6b5058bcedf7ae610c09c8d
|
/298-Binary_Tree_Longest_Consecutive_Sequence.py
|
da80a4acedcbf4c90c5005102e1c5612e3ed7da4
|
[] |
no_license
|
chanyoonzhu/leetcode-python
|
9b88d7f2749e1ae3ed597759b1bf9f7fa4912c35
|
085d868ba0458fc8e6b5549aa00fa151c335fa7f
|
refs/heads/master
| 2022-05-24T11:20:35.927915
| 2022-04-16T06:02:33
| 2022-04-16T06:02:33
| 166,224,197
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,998
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
"""
Questions:
- validity of number (floats, negative integer and 0)
"""
"""
- top-down solution (pre-order traversal)
- O(n): every node is visited once - The time complexity is the same as an in-order traversal of a binary tree with n nodes.
- O(n): The extra space comes from implicit stack space due to recursion. For a skewed binary tree, the recursion could go up to nn levels deep.
"""
class Solution(object):
def longestConsecutive(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.dfs(root, -100, 0) # provided that node values are all positive
def dfs(self, node, prev_val, _max):
if node is None:
return _max
if node.val != prev_val + 1:
return max(_max, self.dfs(node.left, node.val, 1), self.dfs(node.right, node.val, 1))
if node.val == prev_val + 1:
return max(self.dfs(node.left, node.val, _max+1), self.dfs(node.right, node.val, _max+1))
"""
- bottom-up solution
- O(n), O(n)
"""
class Solution:
def longestConsecutive(self, root: Optional[TreeNode]) -> int:
self.maximum_length = 0
def helper(root):
if not root:
return 0
length = 1
# easy to miss: need to call helper even if node doesn't connect downwards
l = helper(root.left)
r = helper(root.right)
if root.left and root.left.val == root.val + 1:
length = max(length, 1 + l)
if root.right and root.right.val == root.val + 1:
length = max(length, 1 + r)
self.maximum_length = max(self.maximum_length, length)
return length
helper(root)
return self.maximum_length
|
[
"zhuchanyoon@gmail.com"
] |
zhuchanyoon@gmail.com
|
c1a76f770a4973d3e1450afc83bc808df1007e5b
|
b7174170d50b867050c80129dbde239a948f6f85
|
/tools/sapp/sapp/tests/sharded_files/sharded_files_test.py
|
1e1903098d02d2e0eba4cf7b97bb287f1d108f76
|
[
"MIT"
] |
permissive
|
GreyElaina/pyre-check
|
c1a2b7a6ee050f606322eaa588f9bd95cd1b3dbc
|
abcb5daa64c38a25aed9ab238bb61290444ab06c
|
refs/heads/master
| 2022-12-19T21:35:09.582761
| 2020-09-12T05:54:32
| 2020-09-12T05:58:24
| 295,080,507
| 0
| 0
|
MIT
| 2020-09-13T04:52:19
| 2020-09-13T04:52:18
| null |
UTF-8
|
Python
| false
| false
| 3,086
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
from ...sharded_files import ShardedFile
test_path = "tools/sapp/sapp//tests/sharded_files"
class TestShardedFiles(unittest.TestCase):
def test_fails_for_no_sharding(self):
pattern = os.path.join(test_path, "foo.bar")
with self.assertRaisesRegex(ValueError, "Not a sharded file"):
ShardedFile(pattern)
def test_returns_two_shards_for_star(self):
pattern = os.path.join(test_path, "foo@*.bar")
sf = ShardedFile(pattern)
self.assertEqual(
sf.get_filenames(),
[
os.path.join(test_path, "foo@00000-of-00002.bar"),
os.path.join(test_path, "foo@00001-of-00002.bar"),
],
)
def test_returns_two_shards_for_two(self):
pattern = os.path.join(test_path, "foo@2.bar")
sf = ShardedFile(pattern)
self.assertEqual(
sf.get_filenames(),
[
os.path.join(test_path, "foo@00000-of-00002.bar"),
os.path.join(test_path, "foo@00001-of-00002.bar"),
],
)
def test_returns_two_shards_for_two_ambiguous(self):
pattern = os.path.join(test_path, "ambiguous@2.ext")
sf = ShardedFile(pattern)
self.assertEqual(
sf.get_filenames(),
[
os.path.join(test_path, "ambiguous@00000-of-00002.ext"),
os.path.join(test_path, "ambiguous@00001-of-00002.ext"),
],
)
def test_returns_two_shards_for_one_ambiguous(self):
pattern = os.path.join(test_path, "ambiguous@1.ext")
sf = ShardedFile(pattern)
self.assertEqual(
sf.get_filenames(),
[os.path.join(test_path, "ambiguous@00000-of-00001.ext")],
)
def test_fails_for_bad_sharding_pattern(self):
pattern = os.path.join(test_path, "foo@baz.bar")
with self.assertRaisesRegex(ValueError, "Invalid shard specification: baz"):
ShardedFile(pattern)
def test_fails_for_ambiguous_star_pattern(self):
pattern = os.path.join(test_path, "ambiguous@*.ext")
with self.assertRaisesRegex(
ValueError, "@* matches ambiguous shard sets: @1 and @2"
):
ShardedFile(pattern)
def test_fails_for_inconsistent_set(self):
pattern = os.path.join(test_path, "inconsistent@2.baz")
with self.assertRaisesRegex(
ValueError,
f"Shard {test_path}/inconsistent@00001-of-00002.baz does not exist.",
):
ShardedFile(pattern)
def test_fails_for_inconsistent_set_star(self):
pattern = os.path.join(test_path, "inconsistent@*.baz")
with self.assertRaisesRegex(
ValueError,
f"Shard {test_path}/inconsistent@00001-of-00002.baz does not exist.",
):
ShardedFile(pattern)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
cd28d35b22be1c515cd2b2d8c6067d3bec20ebf5
|
131cf803a1f7b9638ab0a604d61ab2de22906014
|
/tests/system/web/api_1_0/resources/test_action_template.py
|
cb67c0780e6d1e4638116518724615052eadf8a7
|
[
"Apache-2.0"
] |
permissive
|
dimensigon/dimensigon
|
757be1e61e57f7ce0a610a9531317761393eaad0
|
079d7c91a66e10f13510d89844fbadb27e005b40
|
refs/heads/master
| 2023-03-09T06:50:55.994738
| 2021-02-21T11:45:01
| 2021-02-21T11:45:01
| 209,486,736
| 2
| 0
|
Apache-2.0
| 2021-02-26T02:59:18
| 2019-09-19T07:11:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,167
|
py
|
from flask import url_for
from dimensigon import defaults
from dimensigon.domain.entities import bypass_datamark_update, ActionType, ActionTemplate
from dimensigon.web import db
from tests.base import TestDimensigonBase
class TestActionTemplate(TestDimensigonBase):
def setUp(self) -> None:
self.initials = dict(self.initials)
self.initials.update(action_template=False)
super().setUp()
def fill_database(self):
self.at1 = ActionTemplate(id="aaaaaaaa-1234-5678-1234-56781234aaa1", action_type=ActionType.SHELL,
code="mkdir {dir}", last_modified_at=defaults.INITIAL_DATEMARK, name="mkdir",
version=1)
self.at2 = ActionTemplate(id="aaaaaaaa-1234-5678-1234-56781234aaa2",
action_type=ActionType.SHELL,
code="rmdir {dir}",
last_modified_at=defaults.INITIAL_DATEMARK,
expected_stdout='output',
expected_stderr='err',
expected_rc=0,
name="rmdir",
system_kwargs={'kwarg1': 1},
pre_process='pre_process',
post_process='post_process',
version=1
)
with bypass_datamark_update():
db.session.add_all([self.at1, self.at2])
db.session.commit()
def test_action_template_list(self):
response = self.client.get(url_for('api_1_0.actiontemplatelist'), headers=self.auth.header)
self.assertListEqual([self.at1.to_json(), self.at2.to_json()],
response.get_json())
def test_action_template(self):
response = self.client.get(
url_for('api_1_0.actiontemplateresource', action_template_id="aaaaaaaa-1234-5678-1234-56781234aaa1"),
headers=self.auth.header)
self.assertDictEqual(
self.at1.to_json(), response.get_json())
|
[
"joan.prat@knowtrade.eu"
] |
joan.prat@knowtrade.eu
|
e7a482ab473236d6774c1f06998aecde86703dd1
|
e78154abbb8bacf5afccda9da371684cbeabad36
|
/popego/popserver/popserver/db/.svn/text-base/migration_004.py.svn-base
|
b03ff10669d839bab29b9c43a3f116206f18bf1f
|
[
"BSD-3-Clause"
] |
permissive
|
enterstudio/popego
|
1a196fabc374c0f45764e5c74bd7752236424040
|
2d09e793d9d2f297139edb325b8a70ddda9b2705
|
refs/heads/master
| 2021-04-09T16:39:40.781634
| 2016-10-14T16:53:47
| 2016-10-14T16:53:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
""" agregar columna para grupo distinguido en itemgroups """
__docformat__ = "restructuredtext"
# I'm using a creative whitespace style that makes it readable both here
# and when printed.
migration = [
("""\
ALTER TABLE itemgroups ADD COLUMN is_null_group BOOLEAN NOT NULL;
""",
"""\
ALTER TABLE itemgroups DROP COLUMN is_null_group;
"""),
]
|
[
"santisiri@gmail.com"
] |
santisiri@gmail.com
|
|
4aaf0372ce3ad85fcd307858db488117364e0b63
|
cb405bbb673711585ec36b9c9cf9e1c255579e83
|
/web/service/github/api/v3/users/SshKeys.py
|
67d609fd1471f4658286030e24db636d05bb71a7
|
[
"BSD-3-Clause",
"CC0-1.0",
"MIT",
"Unlicense",
"Apache-2.0"
] |
permissive
|
ytyaru/Python.OTP.tools.201704200841
|
c5b265a6458d007ad9b1ad032b42a4eacb5944a3
|
120239f96e40467203939492fd7ad9c5967fac0f
|
refs/heads/master
| 2021-01-25T06:23:56.998622
| 2017-06-06T20:26:59
| 2017-06-06T20:26:59
| 93,560,782
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,363
|
py
|
#!python3
#encoding:utf-8
import requests
import datetime
import time
import json
import web.service.github.api.v3.Response
class SshKeys(object):
def __init__(self, reqp, response):
# def __init__(self):
# self.__response = web.service.github.api.v3.Response.Response()
self.__reqp = reqp
self.__response = response
"""
SSH鍵の生成。
@params {string} public_keyはSSH公開鍵。
@params {string} titleはSSH公開鍵。
"""
def Create(self, public_key, title=None):
# def Create(self, mailaddress, public_key):
# def Create(self, token, mailaddress, public_key):
method = 'POST'
endpoint = 'users/:username/keys'
params = self.__reqp.Get(method, endpoint)
# headers=self.__GetHeaders(token)
# data=json.dumps({'title': mailaddress, 'key': public_key})
# params['data'] = json.dumps({'title': mailaddress, 'key': public_key})
params['data'] = json.dumps({'title': title, 'key': public_key})
url = 'https://api.github.com/user/keys'
print(url)
print(data)
r = requests.post(url, **params)
# r = requests.post(url, headers=headers, data=data)
return self.__response.Get(r)
def Gets(self, username):
# def Gets(self, username, token):
method = 'GET'
endpoint = 'users/:username/keys'
params = self.__reqp.Get(method, endpoint)
keys = []
url = 'https://api.github.com/users/{username}/keys'.format(username=username)
# headers=self.__GetHeaders(token)
while None is not url:
print(url)
# r = requests.get(url, headers=headers)
r = requests.get(url, **params)
keys += self.__response.Get(r)
url = self.__response.Headers.Link.Next(r)
params = self.__reqp.Get(method, endpoint)
return keys
def Get(self, key_id):
# def Get(self, token, key_id):
method = 'GET'
endpoint = 'user/keys/:id'
params = self.__reqp.Get(method, endpoint)
url = 'https://api.github.com/user/keys/{key_id}'.format(key_id=key_id)
# headers=self.__GetHeaders(token)
print(url)
r = requests.get(url, **params)
# r = requests.get(url, headers=headers)
return self.__response.Get(r)
"""
GitHubに設定したSSH公開鍵を削除する。
BASIC認証でしか使えない。
"""
def Delete(self, key_id):
# def Delete(self, key_id, username, password, otp=None):
method = 'DELETE'
endpoint = 'user/keys/:id'
params = self.__reqp.Get(method, endpoint)
url = 'https://api.github.com/user/keys/{key_id}'.format(key_id=key_id)
# headers=self.__GetHeaders(otp)
print(url)
r = requests.delete(url, **params)
# r = requests.delete(url, headers=headers, auth=(username, password))
return self.__response.Get(r)
def __GetHeaders(self, token=None, otp=None):
headers = {
'Time-Zone': 'Asia/Tokyo',
'Accept': 'application/vnd.github.v3+json'
}
if None is not token:
headers.update({'Authorization': 'token ' + token})
if None is not otp:
headers.update({'X-GitHub-OTP': otp})
print(headers)
return headers
|
[
"ytyaru@outlook.jp"
] |
ytyaru@outlook.jp
|
b65865ec1ef0c354e6434848517d607c33532700
|
fb23ef7cffab7c23081d026e4bf6e642746ba5fa
|
/pytext/models/r3f_models.py
|
ccae7e70cb022880311ad97ec658d450bc65251a
|
[
"BSD-3-Clause"
] |
permissive
|
thomascherickal/pytext
|
2558d597f4355e728cd9982dc29f2b857caed77d
|
da680540176436163ffd34136dd35a22f98dbb3c
|
refs/heads/master
| 2023-02-27T07:20:30.079053
| 2021-02-04T04:56:57
| 2021-02-04T04:58:44
| 336,046,191
| 1
| 0
|
NOASSERTION
| 2021-02-04T18:28:05
| 2021-02-04T18:28:04
| null |
UTF-8
|
Python
| false
| false
| 7,228
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from contextlib import AbstractContextManager
from enum import Enum
from typing import Dict
import torch
import torch.nn.functional as F
from pytext.common.constants import Stage
from pytext.config import ConfigBase
from pytext.utils.precision import maybe_float
class R3FNoiseType(Enum):
UNIFORM = "uniform"
NORMAL = "normal"
def build_noise_sampler(noise_type: R3FNoiseType, eps: float):
"""
Given a `noise_type` (`R3FNoiseType`): builds a `torch.distribution`
capable of generating noise within the passed in `eps` (`float`).
"""
if noise_type == R3FNoiseType.UNIFORM:
return torch.distributions.uniform.Uniform(low=-eps, high=eps)
elif noise_type == R3FNoiseType.NORMAL:
return torch.distributions.normal.Normal(loc=0.0, scale=eps)
else:
raise Exception(f"Unknown noise type: {noise_type}")
def compute_symmetric_kl(noised_logits, input_logits):
"""
Computes symmetric KL loss by taking the KL for both the input logits
and the noised logits and comparing the two
"""
return F.kl_div(
F.log_softmax(noised_logits, dim=-1, dtype=torch.float32),
F.softmax(input_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
) + F.kl_div(
F.log_softmax(input_logits, dim=-1, dtype=torch.float32),
F.softmax(noised_logits, dim=-1, dtype=torch.float32),
None,
None,
"sum",
) # / noised_logits.size(0)
class R3FConfigOptions(ConfigBase):
"""
Configuration options for models using R3F
"""
# for MTL purposes different lambda per loss
r3f_lambda_by_loss: Dict[str, float] = {}
r3f_default_lambda: float = 0.5
eps: float = 1e-5
noise_type: R3FNoiseType = R3FNoiseType.UNIFORM
class R3FNoiseContextManager(AbstractContextManager):
"""
Context manager that adds a forward hook to the embedding module,
to insert noise into the model and detatch embedding when doing
this pass
"""
def __init__(self, context):
self.encoder_hook = None
self.decoder_hook = None
self.context = context
self.hook = self.context.get_embedding_module().register_forward_hook(
self._hook_implementation
)
def __enter__(self):
return self.context
def __exit__(self, type, value, traceback):
self.hook.remove()
self.hook = None
def _hook_implementation(self, module, input, output):
noise = self.context.noise_sampler.sample(sample_shape=output.shape).to(output)
return output.clone().detach() + noise
class R3FPyTextMixin(object):
"""
Mixin class for applying the R3F method, to apply R3F with any model
inherit the class and implement the abstract functions.
For more details: https://arxiv.org/abs/2008.03156
"""
def __init__(self, config: R3FConfigOptions):
self.r3f_lambda_by_loss = config.r3f_lambda_by_loss
self.r3f_default_lambda = config.r3f_default_lambda
self.r3f_eps = config.eps
self.noise_sampler = build_noise_sampler(config.noise_type, self.r3f_eps)
def get_embedding_module(self, *args, **kwargs):
"""
Given the core model outputs, this returns the embedding module that is used
for the R3F loss, in particular noise will be injected to this module.
"""
raise NotImplementedError()
def forward_with_noise(self, *args, **kwargs):
with R3FNoiseContextManager(self):
return self.original_forward(*args, **kwargs)
def original_forward(self, *args, **kwargs):
"""
Runs the traditional forward of this model
"""
raise NotImplementedError()
def get_sample_size(self, model_inputs, targets):
"""
Gets the sample size of the model that is used as a regularization
factor to the model itself
"""
raise NotImplementedError()
def get_r3f_model_output(self, model_output):
"""
Extracts the output from the model.forward() call that is used for the
r3f loss term
"""
return model_output
def forward(self, *args, use_r3f: bool = False, **kwargs):
if use_r3f:
# forward with the normal model
model_output = self.original_forward(
*args,
**kwargs,
)
# compute noised model outputs
noise_model_outputs = self.forward_with_noise(
*args,
**kwargs,
)
return model_output, noise_model_outputs
else:
return self.original_forward(*args, **kwargs)
def get_r3f_loss_terms(
self, model_outputs, noise_model_outputs, sample_size: int
) -> torch.Tensor:
"""
Computes the auxillary loss for R3F, in particular computes a symmetric
KL divergence between the result from the input embedding and the noise
input embedding.
"""
label_symm_kl = compute_symmetric_kl(
self.get_r3f_model_output(noise_model_outputs),
self.get_r3f_model_output(model_outputs),
)
label_symm_kl = label_symm_kl # * sample_size
return (
self.r3f_lambda_by_loss.get("label", self.r3f_default_lambda)
* label_symm_kl
)
@classmethod
def train_batch(cls, model, batch, state=None):
"""
Runs training over a batch with the R3F method, training will use R3F
while eval and test do not.
"""
# Forward pass through the network.
model_inputs = model.arrange_model_inputs(batch)
model_context = model.arrange_model_context(batch)
targets = model.arrange_targets(batch)
sample_size = model.get_sample_size(model_inputs=model_inputs, targets=targets)
# get embedding
r3f_loss_term = torch.tensor(0)
if state and state.stage == Stage.TRAIN:
# during training run R3F forward calls
model_outputs, noise_model_outputs = model(*model_inputs, use_r3f=True)
r3f_loss_term = model.get_r3f_loss_terms(
model_outputs, noise_model_outputs, sample_size=sample_size
)
else:
# during eval and test don't run R3F forward
model_outputs = model(*model_inputs, use_r3f=False)
# Add stage to context.
if state:
if model_context is None:
model_context = {"stage": state.stage, "epoch": state.epoch}
else:
model_context["stage"] = state.stage
model_context["epoch"] = state.epoch
# Compute loss and predictions.
loss = maybe_float(model.get_loss(model_outputs, targets, model_context))
# add R3F loss term
loss = loss + r3f_loss_term.to(loss.device)
predictions, scores = model.get_pred(model_outputs, context=model_context)
# Pack results and return them.
metric_data = (predictions, targets, scores, loss, model_inputs)
return loss, metric_data
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
d590ce824a5cb5b72f17a9cab92fbb89c815aae1
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_brains.py
|
f3368e44fd4ab511605eb85187d524295c0a1b9c
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
#calss header
class _BRAINS():
def __init__(self,):
self.name = "BRAINS"
self.definitions = brain
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['brain']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
59eac1215433b62efec457f6593d5623cc29ab49
|
933fc538fa7f1b304467b5e1a0ba7e155abbb535
|
/setup.py
|
9cc53f678f6035c91fc9ed565426d676a71e6390
|
[
"MIT"
] |
permissive
|
PCKK123/geemap
|
f2f8d3ab59127b4399ecf005b2eca3cd91832791
|
ce65675ca4935076793e667f342e7be79a0ef6c5
|
refs/heads/master
| 2023-07-19T05:47:16.950807
| 2021-09-03T12:55:36
| 2021-09-03T12:55:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,024
|
py
|
#!/usr/bin/env python
"""The setup script."""
import platform
from os import path as op
import io
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
here = op.abspath(op.dirname(__file__))
# get the dependencies and installs
with io.open(op.join(here, "requirements.txt"), encoding="utf-8") as f:
all_reqs = f.read().split("\n")
if platform.system() == "Windows":
all_reqs.append("pywin32")
install_requires = [x.strip() for x in all_reqs if "git+" not in x]
dependency_links = [x.strip().replace("git+", "") for x in all_reqs if "git+" not in x]
requirements = [
"Click>=7.0",
]
setup_requirements = []
test_requirements = []
setup(
author="Qiusheng Wu",
author_email="giswqs@gmail.com",
python_requires=">=3.5",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="A Python package for interactive mapping using Google Earth Engine and ipyleaflet",
entry_points={
"console_scripts": [
"geemap=geemap.cli:main",
],
},
install_requires=install_requires,
dependency_links=dependency_links,
license="MIT license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="geemap",
name="geemap",
packages=find_packages(include=["geemap", "geemap.*"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/giswqs/geemap",
version="0.8.18",
zip_safe=False,
)
|
[
"giswqs@gmail.com"
] |
giswqs@gmail.com
|
45b6c019f9d4399a36f8f1ceb7bd52afae6238cf
|
86a017dd4c8d4d77c511cc598190aaa9dc0ae3e8
|
/study_class.py
|
4441fdb2079687168ed046dbee0f5dd42d538569
|
[] |
no_license
|
sungguenja/studying
|
fd7459eb9faa6488d7b63bf3884a92513daf3c54
|
719f4dfbda211c34de2a0c8cf3b9d3001f29fcec
|
refs/heads/master
| 2023-08-17T13:46:44.343780
| 2023-08-10T11:55:15
| 2023-08-10T11:55:15
| 232,306,053
| 0
| 0
| null | 2022-12-16T10:53:26
| 2020-01-07T11:00:28
|
Python
|
UTF-8
|
Python
| false
| false
| 756
|
py
|
class Shape:
def __init__(self, length):
self.__length = length
def ar(self):
return self.__length * self.__length
def length(self):
return self.__length
class Squre(Shape):
def __init__(self, length, area):
super(Squre, self).__init__(length)
self.__area=area
def area(self):
return self.__area
def __repr__(self):
return "{0}".format(super(Squre, self).ar())
N1=Squre(3, 9)
print(N1)
class Person:
def __init__(self, gender):
self.__gender = gender
def gender(self):
return self.__gender
def __repr__(self):
return "{0}".format(self.__gender)
class Son(Person):
def __init__(self, gender):
super(Son, self).__init__(gender)
def __repr__(self):
return "{0}".format(super(Son, self).gender())
J1=Son("Male")
J2=Son("Female")
print(J1)
print(J2)
|
[
"59605197+sungguenja@users.noreply.github.com"
] |
59605197+sungguenja@users.noreply.github.com
|
b070add0a3c79d07ec513e5fb177ced969dc9b3b
|
7e41fd7f107a771b9bb52f9c58cd4afda6308044
|
/tool/modules/ip_misc.py
|
097f6e8f3e0122de2126eac3e6ed632e7dd051fc
|
[] |
no_license
|
gisazae/SpoofingDetector
|
e6043bef4b8c28af63fd905963abbc89c954f4a8
|
0ba2d6dd7e91f629f8b83dc851faa4f301f53db7
|
refs/heads/master
| 2021-01-24T08:41:53.820723
| 2018-02-26T15:35:56
| 2018-02-26T15:35:56
| 122,988,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,321
|
py
|
import re, os
from scapy.all import *
'''
Check if ip address is correctly formed
'''
def check_IP(ip):
pattern = re.compile("^(([0-9]){1,3}\.([0-9]){1,3}\.([0-9]){1,3}\.([0-9]){1,3})$")
return pattern.match(ip)
'''
Check if port number is between range 1-65535
'''
def check_port(port):
return int(port) in range(1, 65536)
'''
Enables IPv4 forwarding for routing purposes
'''
def enable_forward():
os.system("echo 1 > /proc/sys/net/ipv4/ip_forward")
'''
Disables IPv4 forwarding
'''
def disable_forward():
os.system("echo 0 > /proc/sys/net/ipv4/ip_forward")
'''
Redirects http traffic to this machine's proxy (sslstrip)
'''
def start_http_redirect(port):
os.system("iptables -t nat -A PREROUTING -p tcp --destination-port 80 -j REDIRECT --to-port " + str(port))
'''
Stops http redirect to sslstrip proxy
'''
def stop_http_redirect(port):
os.system("iptables -t nat -D PREROUTING -p tcp --destination-port 80 -j REDIRECT --to-port " + str(port))
'''
Redirects DNS queries to this machine's DNS
'''
def start_dns_redirect():
os.system("iptables -t nat -A PREROUTING -p udp --destination-port 53 -j REDIRECT --to-port 53")
'''
Stops DNS query redirect
'''
def stop_dns_redirect():
os.system("iptables -t nat -D PREROUTING -p udp --destination-port 53 -j REDIRECT --to-port 53")
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
bd672e59d39f51800c208d1f7209a8897e8f0889
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/class_def_attr-big-134.py
|
466ed590182cc1782fc6ea8ba942f3b937d480e4
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
class A(object):
x:int = 1
class A2(object):
x:int = 1
x2:int = 1
class A3(object):
x:int = 1
x2:int = 1
x3:int = 1
class A4(object):
x:int = 1
x2:int = 1
x3:int = 1
x4:int = 1
class A5(object):
x:int = 1
x2:int = 1
x3:int = $Literal
x4:int = 1
x5:int = 1
class B(A):
def __init__(self: "B"):
pass
class B2(A):
def __init__(self: "B2"):
pass
class B3(A):
def __init__(self: "B3"):
pass
class B4(A):
def __init__(self: "B4"):
pass
class B5(A):
def __init__(self: "B5"):
pass
class C(B):
z:bool = True
class C2(B):
z:bool = True
z2:bool = True
class C3(B):
z:bool = True
z2:bool = True
z3:bool = True
class C4(B):
z:bool = True
z2:bool = True
z3:bool = True
z4:bool = True
class C5(B):
z:bool = True
z2:bool = True
z3:bool = True
z4:bool = True
z5:bool = True
a:A = None
a2:A = None
a3:A = None
a4:A = None
a5:A = None
b:B = None
b2:B = None
b3:B = None
b4:B = None
b5:B = None
c:C = None
c2:C = None
c3:C = None
c4:C = None
c5:C = None
a = A()
a2 = A()
a3 = A()
a4 = A()
a5 = A()
b = B()
b2 = B()
b3 = B()
b4 = B()
b5 = B()
c = C()
c2 = C()
c3 = C()
c4 = C()
c5 = C()
a.x = 1
b.x = a.x
c.z = a.x == b.x
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
13385c1bac21e9f6d94674d6956396df3909b0ad
|
dd3f5a712dbab0d3c4f4526c64c08ba710f78b81
|
/Basic/pachong/t04proxy.py
|
ff16307fbb87fb20b693ba29b62a7d190f6c04e9
|
[] |
no_license
|
nameusea/pyGreat
|
3988ebcce3f80a7e458a20f9b2e3ccba368efcf8
|
dde8b6a1348620ffd3b2d65db3d5b4331e5c78be
|
refs/heads/master
| 2023-04-25T09:02:32.831423
| 2021-05-17T11:31:22
| 2021-05-17T11:31:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,590
|
py
|
import requests
from bs4 import BeautifulSoup
import json
# https://wanakiki.github.io/2020/spider-with-proxy/
class GetIp(object):
"""抓取代理IP"""
def __init__(self):
"""初始化变量"""
self.url = 'http://www.xicidaili.com/nt/'
self.check_url = 'https://www.ip.cn/'
self.ip_list = []
@staticmethod
def get_html(url):
"""请求html页面信息"""
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'
}
try:
request = requests.get(url=url, headers=header)
request.encoding = 'utf-8'
html = request.text
return html
except Exception as e:
return ''
def get_available_ip(self, ip_address, ip_port):
"""检测IP地址是否可用"""
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'
}
ip_url_next = '://' + ip_address + ':' + ip_port
proxies = {'http': 'http' + ip_url_next, 'https': 'https' + ip_url_next}
try:
r = requests.get(self.check_url, headers=header, proxies=proxies, timeout=2)
html = r.text
except:
print('fail-%s' % ip_address)
else:
print('success-%s' % ip_address)
soup = BeautifulSoup(html, 'lxml')
div = soup.find(class_='well')
if div:
print(div.text)
ip_info = {'address': ip_address, 'port': ip_port}
self.ip_list.append(ip_info) # 可以用的ip保存到self.ip_list
def main(self):
"""主方法"""
for i in range(1, 10): # 从这个网站上检测n页的ip
web_html = self.get_html(self.url+str(i))
soup = BeautifulSoup(web_html, 'lxml')
ip_list = soup.find(id='ip_list').find_all('tr')
for ip_info in ip_list:
td_list = ip_info.find_all('td')
if len(td_list) > 0:
ip_address = td_list[1].text
ip_port = td_list[2].text
# 检测IP地址是否有效
self.get_available_ip(ip_address, ip_port)
# 写入有效文件
with open('data/ip.txt', 'w') as file:
json.dump(self.ip_list, file)
print(self.ip_list)
# 程序主入口
if __name__ == '__main__':
get_ip = GetIp()
get_ip.main()
|
[
"darcyzhang@DarcydeMacBook-Air.local"
] |
darcyzhang@DarcydeMacBook-Air.local
|
2ce825ca6f6f248267a0b77f24692ef69ed28d5c
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/de4a8b83df05373c8295f9771315d9a23bc2b591-<convert_mongo_result_to_valid_json>-fix.py
|
9e4aeda3cd4e73fb96be34588cc02ac29c6a781b
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
def convert_mongo_result_to_valid_json(self, result):
if (result is None):
return result
if isinstance(result, (integer_types + (float, bool))):
return result
if isinstance(result, string_types):
return result
elif isinstance(result, list):
new_list = []
for elem in result:
new_list.append(self.convert_mongo_result_to_valid_json(elem))
return new_list
elif isinstance(result, dict):
new_dict = {
}
for key in result.keys():
value = result[key]
new_dict[key] = self.convert_mongo_result_to_valid_json(value)
return new_dict
elif isinstance(result, datetime.datetime):
return (result - datetime.datetime(1970, 1, 1)).total_seconds()
else:
return '{}'.format(result)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
4a0d71573f39fb6949ac84ec04eeedf104afda83
|
17bdba01253b57e0b74b20a44dae067a643c8b86
|
/ingenico/connect/sdk/domain/mandates/definitions/mandate_customer.py
|
e2fca0a6b17e66a4039b901f4e2e929a2e8f319f
|
[
"MIT"
] |
permissive
|
BEAM-ZILLOW/connect-sdk-python3
|
04b18733216d1e8b5e5c724e315ed02de6b83aca
|
0355389ddb096edc19d770c8b8499bd3351d2b3d
|
refs/heads/master
| 2020-04-12T17:50:03.259747
| 2018-11-08T09:59:43
| 2018-11-08T09:59:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,861
|
py
|
# -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.data_object import DataObject
from ingenico.connect.sdk.domain.definitions.bank_account_iban import BankAccountIban
from ingenico.connect.sdk.domain.mandates.definitions.mandate_address import MandateAddress
from ingenico.connect.sdk.domain.mandates.definitions.mandate_contact_details import MandateContactDetails
from ingenico.connect.sdk.domain.mandates.definitions.mandate_personal_information import MandatePersonalInformation
class MandateCustomer(DataObject):
__bank_account_iban = None
__company_name = None
__contact_details = None
__mandate_address = None
__personal_information = None
@property
def bank_account_iban(self):
"""
| Object containing IBAN information
Type: :class:`ingenico.connect.sdk.domain.definitions.bank_account_iban.BankAccountIban`
"""
return self.__bank_account_iban
@bank_account_iban.setter
def bank_account_iban(self, value):
self.__bank_account_iban = value
@property
def company_name(self):
"""
| Name of company, as a consumer
Type: str
"""
return self.__company_name
@company_name.setter
def company_name(self, value):
self.__company_name = value
@property
def contact_details(self):
"""
| Object containing contact details like email address and phone number
Type: :class:`ingenico.connect.sdk.domain.mandates.definitions.mandate_contact_details.MandateContactDetails`
"""
return self.__contact_details
@contact_details.setter
def contact_details(self, value):
self.__contact_details = value
@property
def mandate_address(self):
"""
| Object containing billing address details
Type: :class:`ingenico.connect.sdk.domain.mandates.definitions.mandate_address.MandateAddress`
"""
return self.__mandate_address
@mandate_address.setter
def mandate_address(self, value):
self.__mandate_address = value
@property
def personal_information(self):
"""
| Object containing personal information of the consumer
Type: :class:`ingenico.connect.sdk.domain.mandates.definitions.mandate_personal_information.MandatePersonalInformation`
"""
return self.__personal_information
@personal_information.setter
def personal_information(self, value):
self.__personal_information = value
def to_dictionary(self):
dictionary = super(MandateCustomer, self).to_dictionary()
self._add_to_dictionary(dictionary, 'bankAccountIban', self.bank_account_iban)
self._add_to_dictionary(dictionary, 'companyName', self.company_name)
self._add_to_dictionary(dictionary, 'contactDetails', self.contact_details)
self._add_to_dictionary(dictionary, 'mandateAddress', self.mandate_address)
self._add_to_dictionary(dictionary, 'personalInformation', self.personal_information)
return dictionary
def from_dictionary(self, dictionary):
super(MandateCustomer, self).from_dictionary(dictionary)
if 'bankAccountIban' in dictionary:
if not isinstance(dictionary['bankAccountIban'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['bankAccountIban']))
value = BankAccountIban()
self.bank_account_iban = value.from_dictionary(dictionary['bankAccountIban'])
if 'companyName' in dictionary:
self.company_name = dictionary['companyName']
if 'contactDetails' in dictionary:
if not isinstance(dictionary['contactDetails'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['contactDetails']))
value = MandateContactDetails()
self.contact_details = value.from_dictionary(dictionary['contactDetails'])
if 'mandateAddress' in dictionary:
if not isinstance(dictionary['mandateAddress'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['mandateAddress']))
value = MandateAddress()
self.mandate_address = value.from_dictionary(dictionary['mandateAddress'])
if 'personalInformation' in dictionary:
if not isinstance(dictionary['personalInformation'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['personalInformation']))
value = MandatePersonalInformation()
self.personal_information = value.from_dictionary(dictionary['personalInformation'])
return self
|
[
"jenkins@isaac.nl"
] |
jenkins@isaac.nl
|
769ee646685638845b9b0dbcf4b77df4a9ac8a52
|
8b124607a1227013b05b9af9c6a6888200d8f67d
|
/spider/bookinfo/middlewares.py
|
18e1e908f8c17208c57e16c7afd0f1051ebc48b9
|
[
"Apache-2.0"
] |
permissive
|
zzzz123321/Broadview-analysing-sales-figures
|
f32fcd4b41f00af59a44a09e744523199a17a9f6
|
bdff4239fd71b5077bc05703757d9f5d2610c536
|
refs/heads/master
| 2020-04-27T18:06:22.240431
| 2018-03-05T09:17:29
| 2018-03-05T09:17:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,319
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
import random
from bookinfo.settings import IPPOOL
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
class IPPOOLS(HttpProxyMiddleware):
def __init__(self, ip=''):
self.ip = ip
def process_request(self, request, spider):
thisip = random.choice(IPPOOL)
# print('当前的IP是:'+thisip['ipaddr'])
request.meta['proxy']="http://"+thisip['ipaddr']
class BookinfoSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"wonderfulsuccess@163.com"
] |
wonderfulsuccess@163.com
|
ef7ace55198ec861fcb052180a141fca5b563670
|
f1961c86e6da14f35c21d7235f4fc8a89fabdcad
|
/DailyProgrammer/DP20160527C.py
|
5e886c195faa15637218b42b98ecb6636fdaec9c
|
[
"MIT"
] |
permissive
|
DayGitH/Python-Challenges
|
d4930bdd85cd1a977d8f6192775ca956a375fcde
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
refs/heads/master
| 2021-01-17T13:01:03.784523
| 2018-06-29T23:49:04
| 2018-06-29T23:49:04
| 58,497,683
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,641
|
py
|
"""
[2016-05-27] Challenge #268 [Hard] Network and Cards: Part 3, The cheaters
https://www.reddit.com/r/dailyprogrammer/comments/4lavv6/20160527_challenge_268_hard_network_and_cards/
#Description
This week we are creating a game playable over network. This will be a 3-parter.
The third part is going to be even more interaction, and some cheating, card players love to cheat.
We are going to play a modified version of Blackjack:
Each player is dealt 1 covered card at the start of the game.
When a player decides to take a card het recieves that card covered and then has to decide which one to play and which
one to hold.
Player send the card open over the network back to the server.
Starting stays the same:
When all connected clients send a `START` command, the game will begin, you don't have to look for other connections
then.
The communication goes as followed:
CLIENT A -> SERVER: START
CLIENT B -> SERVER: START
SERVER -> CLIENT A: Ace of spades
SERVER -> CLIENT B: 4 of clubs
SERVER -> CLIENT A: TAKE or PASS
CLIENT A -> SERVER: TAKE
SERVER -> CLIENT A: Queen of hearts
CLIENT A -> SERVER: PLAY Ace of spades
SERVER -> CLIENT B: TAKE or PASS
CLIENT B -> SERVER: PASS
The client has the option to either respond with a `TAKE` command, folowed by a `PLAY` or `PASS` command, the server
then go to the next client till everyone is done (all passed or everyone has 21 or more in score)
The cards have the following values:
2 -> 2
3 -> 3
4 -> 4
5 -> 5
6 -> 6
7 -> 7
8 -> 8
9 -> 9
Jack -> 10
Queen -> 10
King -> 10
Ace -> 1 or 11 (11 if not over 21 and 1 if over)
#Formal Inputs & Outputs
##Input description
- Server
Server has to accept at least 4 commands: `START`, `TAKE`, `PLAY` and `PASS`
- Client
Clients must be able to recieve the choice for `TAKE` and `PASS` and must be able to recieve cards, format of that is
up to you
##Output description
- Server
No Output required, but I can imagen that some loggin will be handy.
- Client
A decent output for humans to read the cards and see their current score.
Also must know when to type in the option to `TAKE` and `PASS`
#Notes/Hints
## TCP Socket approach
The server needs to able to handle multiple clients in the end, so a multithreaded approach is advised.
It is advised to think of some command like pattern, so you can send messages to the server and back.
For the server and client, just pick some random ports that you can use.
[Here](https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers) you have a list off all "reserved" ports.
For the connection, TCP connections are the easiest way to do this in most languages. But you are not limited to that
if you want to use something more high-level if your language of choice supports that.
## REST api approach
Some off you pointed out that this could be done with a webserver. If this is more in the line of what you are used to,
no problem then, as long as it stays in the line of a multiplayer game.
#Bonus
Examine the game logic from a other submissions (or your own) and try to create a cheating bot.
If a programmer forgets to add checks or some sort, you can exploit these.
**HOWEVER**:
**If you are not up for that, put it in your submission. I don't want to see any bragging, I want this to be fun.
Please be respectfull to other people at all time.**
**I will monitor this closely and any hurtful comment will be deleted**
#Finally
Have a good challenge idea?
Consider submitting it to /r/dailyprogrammer_ideas
"""
def main():
pass
if __name__ == "__main__":
main()
|
[
"akber91@gmail.com"
] |
akber91@gmail.com
|
ada2bcb6a2c7f0e34aad00da2a1dec1042a2ce24
|
0be5c80361a12ef7b56d3da8ce64cc1ab80a5ada
|
/todo_app/forms.py
|
807cfe1aa1b023703185a0585df2b8d10847d0f3
|
[] |
no_license
|
swaraj70/TaskTodo
|
a73492afdd0c0920dea315681b400ac61a8ceb1f
|
652ef2a096055b2d96aa8122bb9825e5de79da8c
|
refs/heads/master
| 2023-04-29T02:46:11.406996
| 2020-02-14T05:47:41
| 2020-02-14T05:47:41
| 224,838,456
| 0
| 0
| null | 2023-04-21T20:41:35
| 2019-11-29T11:10:18
|
Python
|
UTF-8
|
Python
| false
| false
| 154
|
py
|
from django import forms
from .models import Task
class TaskForm(forms.ModelForm):
class Meta:
model = Task
fields = ['task', 'done']
|
[
"apple@Apples-MacBook-Pro.local"
] |
apple@Apples-MacBook-Pro.local
|
fa5d2b4c4e8ebff5883ae88aca6ee894eab5a525
|
e21599d08d2df9dac2dee21643001c0f7c73b24f
|
/practice/ai/pic/simular/pil_sample2.py
|
c5c9cc3432ed46ccad315b22eff21bde3567adb1
|
[] |
no_license
|
herolibra/PyCodeComplete
|
c7bf2fb4ce395737f8c67749148de98a36a71035
|
4ef7d2c3aec6d28a53eed0e649cdeb74df3d783b
|
refs/heads/master
| 2022-07-17T05:39:03.554760
| 2020-05-03T07:00:14
| 2020-05-03T07:00:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
#!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
from PIL import Image
import math
import operator
from functools import reduce
def image_contrast(img1, img2):
image1 = Image.open(img1)
image2 = Image.open(img2)
h1 = image1.histogram()
h2 = image2.histogram()
result = math.sqrt(reduce(operator.add, list(map(lambda a, b: (a - b) ** 2, h1, h2))) / len(h1))
return result
if __name__ == '__main__':
img1 = "1.png" # 指定图片路径
img2 = "2.png"
result = image_contrast(img1, img2)
print((100 - result), "%")
|
[
"ijumper@163.com"
] |
ijumper@163.com
|
58250e8bea0428c8b376e7e5f633cdc6431591e3
|
65f8211fc33eb5f9ac1ff0d68902226ca9a58692
|
/sorting_algorithms/quick_sort_cormen.py
|
9fdd4284ab2ac365d12a921a94e243eead478568
|
[] |
no_license
|
szarbartosz/asd-python
|
46869f5699a1ef661e2df02e523af0adcddbbbda
|
0130cc3dcbba6ad62e1516c98b5cbab85848d619
|
refs/heads/master
| 2022-12-13T19:02:53.699381
| 2020-09-11T13:29:31
| 2020-09-11T13:29:31
| 242,975,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
def partition(arr, left, right):
i = left - 1
pivot = arr[right]
for j in range(left, right):
if arr[j] <= pivot:
i += 1
arr[i], arr[j] = arr[j], arr[i]
arr[i+1], arr[right] = arr[right], arr[i+1]
return i + 1
def quick_sort(arr, left, right):
if right > left:
mid = partition(arr, left, right)
quick_sort(arr, left, mid - 1)
quick_sort(arr, mid + 1, right)
if __name__ == '__main__':
arr = [54,3234,32,54,54376,4,3,52,34,1,43,5,26,37,45,23,432,52,36]
print(arr)
quick_sort(arr,0,len(arr)-1)
print(arr)
|
[
"szarbartosz@gmail.com"
] |
szarbartosz@gmail.com
|
8a6ca59bb06502011b10a473553ad89562b21f6f
|
deb246d7cb1ce4ea57bba7e1b3c9964c503a0852
|
/khmer/khmerEnv/lib/python2.7/site-packages/screed/screedRecord.py
|
92eeb4728973624c2c3790803d7968f153a2c4dc
|
[] |
no_license
|
chelseaju/TahcoRoll
|
89fec5e7d3f8658b3647cb01fb5d3dba90dc74a4
|
3a5978cd48d728c3ee36afcf95dec82cec58d7e1
|
refs/heads/master
| 2021-04-25T15:40:53.545677
| 2019-09-25T14:09:17
| 2019-09-25T14:09:17
| 109,560,971
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,955
|
py
|
# Copyright (c) 2016, The Regents of the University of California.
from __future__ import absolute_import
from functools import total_ordering
import types
from . import DBConstants
import gzip
import bz2
from io import BytesIO
try:
from collections import MutableMapping
except ImportError:
import UserDict
MutableMapping = UserDict.DictMixin
class Record(MutableMapping):
"""
Simple dict-like record interface with bag behavior.
"""
def __init__(self, name=None, sequence=None, **kwargs):
d = dict()
if name is not None:
d['name'] = name
if sequence is not None:
d['sequence'] = sequence
d.update(kwargs)
if 'quality' in d and d['quality'] is None:
del d['quality']
self.d = d
def __setitem__(self, name, value):
self.d[name] = value
def __getattr__(self, name):
try:
return self.d[name]
except KeyError:
raise AttributeError(name)
def __len__(self):
return len(self.sequence)
def keys(self):
return self.d.keys()
def __getitem__(self, idx):
if isinstance(idx, slice):
trimmed = dict(self.d)
trimmed['sequence'] = trimmed['sequence'][idx]
if 'quality' in trimmed:
trimmed['quality'] = trimmed['quality'][idx]
return Record(**trimmed)
return self.d[idx]
def __delitem__(self, key):
del self.d[key]
def __iter__(self):
return iter(self.d)
def __repr__(self):
return repr(self.d)
@total_ordering
class _screed_attr(object):
"""
Sliceable database object that supports lazy retrieval
"""
def __init__(self, dbObj, attrName, rowName, queryBy):
"""
Initializes database object with specific record retrieval
information
dbOjb = database handle
attrName = name of attr in db
rowName = index/name of row
queryBy = by name or index
"""
self._dbObj = dbObj
self._attrName = attrName
self._rowName = rowName
self._queryBy = queryBy
def __getitem__(self, sliceObj):
"""
Slicing interface. Returns the slice range given.
*.start + 1 to be compatible with sqlite's 1 not 0 scheme
"""
if not isinstance(sliceObj, slice):
raise TypeError('__getitem__ argument must be of slice type')
if not sliceObj.start <= sliceObj.stop: # String reverse in future?
raise ValueError('start must be less than stop in slice object')
length = sliceObj.stop - sliceObj.start
query = 'SELECT substr(%s, %d, %d) FROM %s WHERE %s = ?' \
% (self._attrName, sliceObj.start + 1, length,
DBConstants._DICT_TABLE,
self._queryBy)
cur = self._dbObj.cursor()
result = cur.execute(query, (str(self._rowName),))
try:
subStr, = result.fetchone()
except TypeError:
raise KeyError("Key %s not found" % self._rowName)
return str(subStr)
def __len__(self):
"""
Returns the length of the string
"""
return len(self.__str__())
def __repr__(self):
"""
Prints out the name of the class and the name of the sliceable attr
"""
return "<%s '%s'>" % (self.__class__.__name__, self._attrName)
def __eq__(self, given):
"""
Compares attribute to given object in string form
"""
if isinstance(given, bytes):
return given == self.__str__()
else:
return str(given) == self.__str__()
def __lt__(self, given):
if isinstance(given, bytes):
return self.__str__() < given
else:
return self.__str__() < str(given)
def __str__(self):
"""
Returns the full attribute as a string
"""
query = 'SELECT %s FROM %s WHERE %s = ?' \
% (self._attrName, DBConstants._DICT_TABLE, self._queryBy)
cur = self._dbObj.cursor()
result = cur.execute(query, (str(self._rowName),))
try:
record, = result.fetchone()
except TypeError:
raise KeyError("Key %s not found" % self._rowName)
return str(record)
def _buildRecord(fieldTuple, dbObj, rowName, queryBy):
"""
Constructs a dict-like object with record attribute names as keys and
_screed_attr objects as values
"""
# Separate the lazy and full retrieval objects
kvResult = []
fullRetrievals = []
for fieldname, role in fieldTuple:
if role == DBConstants._SLICEABLE_TEXT:
kvResult.append((fieldname, _screed_attr(dbObj,
fieldname,
rowName,
queryBy)))
else:
fullRetrievals.append(fieldname)
# Retrieve the full text fields from the db
subs = ','.join(fullRetrievals)
query = 'SELECT %s FROM %s WHERE %s=?' % \
(subs, DBConstants._DICT_TABLE, queryBy)
cur = dbObj.cursor()
res = cur.execute(query, (rowName,))
# Add the full text fields to the result tuple list
data = tuple([str(r) for r in res.fetchone()])
kvResult.extend(zip(fullRetrievals, data))
# Hack to make indexing start at 0
hackedResult = []
for key, value in kvResult:
if key == DBConstants._PRIMARY_KEY:
hackedResult.append((key, int(value) - 1))
else:
hackedResult.append((key, value))
return Record(**dict(hackedResult))
def write_fastx(record, fileobj):
"""Write sequence record to 'fileobj' in FASTA/FASTQ format."""
isbytesio = isinstance(fileobj, BytesIO)
iswb = hasattr(fileobj, 'mode') and fileobj.mode == 'wb'
outputvalid = isbytesio or iswb
if not outputvalid:
message = ('cannot call "write_fastx" on object, must be of a file '
'handle with mode "wb" or an instance of "BytesIO"')
raise AttributeError(message)
defline = record.name
if hasattr(record, 'description'):
defline += ' ' + record.description
if hasattr(record, 'quality'):
recstr = '@{defline}\n{sequence}\n+\n{quality}\n'.format(
defline=defline,
sequence=record.sequence,
quality=record.quality)
else:
recstr = '>{defline}\n{sequence}\n'.format(
defline=defline,
sequence=record.sequence)
fileobj.write(recstr.encode('utf-8'))
def write_fastx_pair(read1, read2, fileobj):
"""Write a pair of sequence records to 'fileobj' in FASTA/FASTQ format."""
if hasattr(read1, 'quality'):
assert hasattr(read2, 'quality')
write_record(read1, fileobj)
write_record(read2, fileobj)
|
[
"chelseaju@ucla.edu"
] |
chelseaju@ucla.edu
|
99a3675e71c362c414c8ea7f39d3da678a6336ac
|
45614a944ffbdb75a0bef955582a722da5ce7492
|
/python/app.py
|
e7011353a8b938a296f146d092cafebdb5a08972
|
[] |
no_license
|
wccgoog/pass
|
1c8ab5393547634a27c7543556a75dec771a9e3d
|
0ec01536ae10b3d99707002c0e726072acb50231
|
refs/heads/2
| 2023-01-15T13:27:26.312648
| 2019-10-23T09:30:45
| 2019-10-23T09:30:45
| 122,595,075
| 0
| 2
| null | 2023-01-07T10:42:38
| 2018-02-23T08:38:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 639
|
py
|
from flask import Flask,request,render_template
app=Flask(__name__)
@app.route('/',methods=['GET','POST'])
def home():
return render_template('home.html')
@app.route('/signin',methods=['GET'])
def signin_form():
return render_template('form.html')
@app.route('/signin',methods=['POST'])
def signin():
username=request.form['username']
password=request.form['password']
if username=='admin' and password=='password':
return render_template('signin-ok.html',username=username)
return render_template('form.html',message='Bad username or password',username=username)
if __name__=='__main__':
app.run()
|
[
"wcc3@sina.com"
] |
wcc3@sina.com
|
351e42f616d21721a870b7c8092d090c713bcead
|
2a603f33f109df24e55d392b124dad3f51922075
|
/0x11-python-network_1/5-hbtn_header.py
|
e67a32a20f51e94d671ba94a4ce7735bbdc91470
|
[] |
no_license
|
michellegsld/holbertonschool-higher_level_programming
|
bc010270904ec2638953afea8fcfd650159e38ac
|
c6a3a4b2eb80b45708becfa3b08249c3e86294e8
|
refs/heads/master
| 2023-06-08T21:06:34.401385
| 2023-06-06T02:25:03
| 2023-06-06T02:25:03
| 226,925,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
#!/usr/bin/python3
"""
Task 5:
Sends a request and displays the value of X-Request-Id
5-hbtn_header.py
"""
import requests
from sys import argv
if __name__ == "__main__":
req = requests.get(argv[1])
try:
print(req.headers["X-Request-Id"])
except:
pass
|
[
"michellegsld@gmail.com"
] |
michellegsld@gmail.com
|
04c96bdf8c7d21cf82ad54365d7d445b04fd42ef
|
71e2ff113d8b0de6708c7b6165d01a7bbb199b41
|
/main.py
|
c273fb85ebc326ecbd9eb80415a83e91f7653d41
|
[] |
no_license
|
MJB90/timeSeries
|
3b506b4659121fb58984c358d3ef50a66909df21
|
0b54fe1f06df5e12f26b5b3a0d210b4f86492414
|
refs/heads/master
| 2020-04-20T15:05:22.759585
| 2019-02-03T12:00:36
| 2019-02-03T12:00:36
| 168,918,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,761
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from pandas import Series
import barplot as bp
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("data/Train_SU63ISt.csv")
test = pd.read_csv("data/Test_0qrQsBZ.csv")
train_original = train.copy()
test_original = test.copy()
##########################################################################
# Exploratory Analysis
train['Datetime'] = pd.to_datetime(train.Datetime, format='%d-%m-%Y %H:%M')
test['Datetime'] = pd.to_datetime(test.Datetime, format='%d-%m-%Y %H:%M')
test_original['Datetime'] = pd.to_datetime(test_original.Datetime, format='%d-%m-%Y %H:%M')
train_original['Datetime'] = pd.to_datetime(train_original.Datetime, format='%d-%m-%Y %H:%M')
for i in (train, test, test_original, train_original):
i['year'] = i.Datetime.dt.year
i['month'] = i.Datetime.dt.month
i['day'] = i.Datetime.dt.day
i['Hour'] = i.Datetime.dt.hour
train['day of week'] = train['Datetime'].dt.dayofweek
temp = train['Datetime']
# This functions adds a boolean value 1 if the current day is a weekend
def is_weekend(row):
if row.dayofweek == 5 or row.dayofweek == 6:
return 1
else:
return 0
temp2 = train['Datetime'].apply(is_weekend)
train['weekend'] = temp2
# train.index = train['Datetime'] # indexing the Datetime to get the time period on the x-axis.
# df = train.drop('ID', 1) # drop ID variable to get only the Datetime on x-axis.
# ts = df['Count']
# # plt.figure(figsize=(16, 8))
# # plt.plot(ts, label='Passenger Count')
# # plt.title('Time Series')
# # plt.xlabel("Time(year-month)")
# # plt.ylabel("Passenger count")
# # plt.legend(loc='best')
# # plt.show()
#
# temp_data = train.groupby('month')['Count'].mean()
# x_axis_data = temp_data.index
# y_axis_data = temp_data[:]
# bp.plot_bar_x(x_axis_data, y_axis_data, 'Month', 'Count', 'Monthly Count')
#######################################################################################
# Splitting and forecasting
train=train.drop('ID', 1)
test.Timestamp = pd.to_datetime(test.Datetime, format='%d-%m-%Y %H:%M')
test.index = test.Timestamp
# Converting to daily mean
test = test.resample('D').mean()
train.Timestamp = pd.to_datetime(train.Datetime, format='%d-%m-%Y %H:%M')
train.index = train.Timestamp
# Converting to daily mean
train = train.resample('D').mean()
Train = train.ix['2012-08-25':'2014-06-24']
valid = train.ix['2014-06-25':'2014-09-25']
Train.Count.plot(figsize=(15,8), title= 'Daily Ridership', fontsize=14, label='train')
valid.Count.plot(figsize=(15,8), title= 'Daily Ridership', fontsize=14, label='valid')
plt.xlabel("Datetime")
plt.ylabel("Passenger count")
plt.legend(loc='best')
plt.show()
|
[
"you@example.com"
] |
you@example.com
|
fdbf80ce6ac0265975d02e5efdc5009f07131870
|
ecf2511642e0e8fb11616249b062a7c8a130d137
|
/src/python_files/GenOpSpace.py
|
ef84cc5eff5f24db805665ab5b9ae3245e96df93
|
[] |
no_license
|
zaddan/apx_tool_chain
|
61abc9d6958ce17d9823ac7bf71ae3097f76decf
|
ef81dd6515f110d3c8a4b62a642d44d9a4327a79
|
refs/heads/master
| 2021-01-24T06:37:12.248020
| 2017-04-08T20:39:08
| 2017-04-08T20:39:08
| 40,195,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,515
|
py
|
import itertools
import sys
# Copyright (C)
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##
# @file GenOpSpace.py
# @brief this file contains the class for generating all the possible apx version of an operation that is defined as a class
# @author Behzad Boroujerdian
# @date 2015-06-30
##
# @brief this class generates all the possible versions of a specific operator. it ueses the name to figure out the type of input. for example, in the case of multipliation, it generates all the multipliactions possible such as accurate and apx version (apx versions can have different kinds)
#some of the inputs are just names and some of the inputs are ranges and need to be first generated and then permutated.
#for example the name passed to this class, is just one element and doesn't need to be expanded, but the rest of the inputs acquire a low bound and high bound
#the way to use this class is the following way:
#set the numberOfInputs. For example in the case of Eta1 the number of inputs equal 8. These inputs include the low bound and high bound of the followi
##
class GenOpSpace():
def __init__(self, name, numberOfInputs, lInput):
self.name = [name]
self.inputList = [] #this list containg the input that user provided (this can vary from Op to Op. for example in the case of GenEta1Input, this list contains
#NtLB, NtHB, NiaLB, NiaHB, msbLB, msbHB, lsbLB, lsbHB):
self.eachCategoryAllValues = [] #this list stores all the values possible for each input catgory. for example in the case of Eta1 the categories are:
#Nia, msb, lsb, Nt
self.numberOfInputs = numberOfInputs
for i in range(0, self.numberOfInputs):
self.inputList.append(lInput[i])
self.combineList = [] #putting all the values of different categories in one list
self.permutedTuples= [] #using itertool to generate all the permutations of the combineList
self.permutedList= [] #converting the permutedTuples from tuple form to listForm
def sweepInput(self):
self.combineList.append(self.name);
for i in range(0, self.numberOfInputs, 2):
self.eachCategoryAllValues.append(range(self.inputList[i], self.inputList[i+1]))
self.combineList.append(self.eachCategoryAllValues[i/2])
self.permutedTuples= list(itertools.product(*(self.combineList)))
for element in self.permutedTuples:
self.permutedList.append(list(element));
#print self.permutedList
def printPermutations(self):
print self.permutedList
#testing framework
#GenOps = [GenOpSpace("Eta", 8,[1,4, 2,6, 3,5, 4, 6]), GenOpSpace("btm", 4,[10, 11, 100, 110])]
#for element in GenOps:
# element.sweepInput()
# element.printPermutations()
#
|
[
"behzadboro@gmail.com"
] |
behzadboro@gmail.com
|
e18d24a68f51f405a4f8e4d13943f2abec5d6f23
|
6bf995003dfe009129f7d93b63ec2478927324e0
|
/backends/ubpf/tests/ptf/tunneling_test.py
|
cbc00770d83b8bdcef665d28aa9a236075469f62
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
p4lang/p4c
|
e9fd8acfa25a6e1e42adffd136cbf6cd5e94018a
|
4a393c951e1f4fdbdc3894a1f8d2929e700adeb7
|
refs/heads/main
| 2023-09-04T16:30:20.238093
| 2023-09-04T06:55:44
| 2023-09-04T06:55:44
| 55,433,859
| 621
| 555
|
Apache-2.0
| 2023-09-14T14:22:46
| 2016-04-04T18:12:32
|
C++
|
UTF-8
|
Python
| false
| false
| 2,190
|
py
|
#!/usr/bin/env python
# Copyright 2019 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base_test import P4rtOVSBaseTest
from ptf.packet import MPLS, Ether
from ptf.testutils import send_packet, simple_ip_only_packet, verify_packets
class TunnelingTest(P4rtOVSBaseTest):
def setUp(self):
P4rtOVSBaseTest.setUp(self)
self.del_flows()
self.unload_bpf_program()
self.load_bpf_program(path_to_program="build/test-tunneling.o")
self.add_bpf_prog_flow(1, 2)
self.add_bpf_prog_flow(2, 1)
class MplsDownstreamTest(TunnelingTest):
def setUp(self):
TunnelingTest.setUp(self)
self.update_bpf_map(map_id=1, key="1 1 168 192", value="0 0 0 0")
def runTest(self):
pkt = Ether(dst="11:11:11:11:11:11") / simple_ip_only_packet(ip_dst="192.168.1.1")
exp_pkt = (
Ether(dst="11:11:11:11:11:11")
/ MPLS(label=20, cos=5, s=1, ttl=64)
/ simple_ip_only_packet(ip_dst="192.168.1.1")
)
send_packet(self, (0, 1), pkt)
verify_packets(self, exp_pkt, device_number=0, ports=[2])
class MplsUpstreamTest(TunnelingTest):
def setUp(self):
TunnelingTest.setUp(self)
self.update_bpf_map(map_id=0, key="20 0 0 0", value="0 0 0 0")
def runTest(self):
pkt = (
Ether(dst="11:11:11:11:11:11")
/ MPLS(label=20, cos=5, s=1, ttl=64)
/ simple_ip_only_packet(ip_dst="192.168.1.1")
)
exp_pkt = Ether(dst="11:11:11:11:11:11") / simple_ip_only_packet(ip_dst="192.168.1.1")
send_packet(self, (0, 1), pkt)
verify_packets(self, exp_pkt, device_number=0, ports=[2])
|
[
"noreply@github.com"
] |
p4lang.noreply@github.com
|
601e2fd0c5ec8bd6a1589b74e3fe3c62cc00a0d1
|
c6ee7be1479797788dd9f9d3a29c0e76ea020db8
|
/apscheduler_yqt/rabbitmq_selenium/__init__.py
|
6aeef9ff6c2d7a54fa444b5060b547c6b8ca913f
|
[] |
no_license
|
hikekang/apscheduler_yqt
|
2966e7231fff1f81c4fa4a75459cf638592aae6a
|
0d30c2ef721b8ffeba98dca6b2441613b4ed608d
|
refs/heads/master
| 2023-08-17T05:19:34.345553
| 2021-09-26T03:21:11
| 2021-09-26T03:21:11
| 406,217,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
#!/usr/bin/python3.x
# -*- coding=utf-8 -*-
"""
Time : 2021/8/6 16:27
Author : hike
Email : hikehaidong@gmail.com
File Name : __init__.py.py
Description:
Software : PyCharm
"""
|
[
"38242754+hikekang@users.noreply.github.com"
] |
38242754+hikekang@users.noreply.github.com
|
868f1a271bf1e90f4384544b6b79bb9e791ab6b9
|
8d47af9482444b07b52cf44cebcaf4b992df4d09
|
/agents/17_DoubleSampling/17.py
|
67cf990ddcfc9a4ff45dc4790a458cd55d3247c2
|
[] |
no_license
|
w0lv3r1nix/retro-agents
|
f4dbce2db558c880b161062796e5397be65bdd10
|
c7f93a737dc6c6fc5d8343c099e14bd2bc97aaf1
|
refs/heads/master
| 2020-08-01T01:19:41.660018
| 2018-06-13T04:28:09
| 2018-06-13T04:28:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,854
|
py
|
#!/usr/bin/env python
"""
Train an agent on Sonic using an open source Rainbow DQN
implementation.
"""
import tensorflow as tf
from anyrl.algos import DQN
from anyrl.envs import BatchedGymEnv
from anyrl.envs.wrappers import BatchedFrameStack
from anyrl.models import rainbow_models
from anyrl.rollouts import BatchedPlayer, PrioritizedReplayBuffer, NStepPlayer
from anyrl.spaces import gym_space_vectorizer
import gym_remote.exceptions as gre
from sonic_util import AllowBacktracking, make_env
from DoubleSampling import DoubleSampling
def main():
"""Run DQN until the environment throws an exception."""
env = AllowBacktracking(make_env(stack=False, scale_rew=False))
env = BatchedFrameStack(BatchedGymEnv([[env]]), num_images=4, concat=False)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=E1101
with tf.Session(config=config) as sess:
dqn = DQN(*rainbow_models(sess,
env.action_space.n,
gym_space_vectorizer(env.observation_space),
min_val=-200,
max_val=200))
player = NStepPlayer(BatchedPlayer(env, dqn.online_net), 3)
optimize = dqn.optimize(learning_rate=1e-4)
sess.run(tf.global_variables_initializer())
dqn.train(num_steps=2000000, # Make sure an exception arrives before we stop.
player=player,
replay_buffer=DoubleSampling(500000, 0.5, 0.4, epsilon=0.1),
optimize_op=optimize,
train_interval=1,
target_interval=8192,
batch_size=32,
min_buffer_size=20000)
if __name__ == '__main__':
try:
main()
except gre.GymRemoteError as exc:
print('exception', exc)
|
[
"seungjaeryanlee@gmail.com"
] |
seungjaeryanlee@gmail.com
|
ea8568e4d91e2982982e1baec28f8eafed53cab4
|
55d5430e266d331320eef97653bca303c775184b
|
/GAN/lib/dataset/alignDataSet.py
|
06f700b5543cbfd0abd70a7eea77b42820ee75d1
|
[] |
no_license
|
arn1992/CCX-rayNet_and_CCX-rayGAN
|
57059bc8c02e01f2d03ad5047bf93e2cc0ae4e3a
|
b219ca9e19ee0a87cfd9895d11d6709429bb9f52
|
refs/heads/main
| 2023-05-09T03:00:21.496350
| 2021-06-03T02:12:24
| 2021-06-03T02:12:24
| 373,348,224
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,619
|
py
|
# ------------------------------------------------------------------------------
# Copyright (c) Tencent
# Licensed under the GPLv3 License.
# Created by Kai Ma (makai0324@gmail.com)
# ------------------------------------------------------------------------------
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from X2CT.GAN.lib.dataset.baseDataSet import Base_DataSet
from X2CT.GAN.lib.dataset.utils import *
import h5py
import numpy as np
import os
import torch
import cv2
#
# class AlignDataSet(Base_DataSet):
# '''
# DataSet For unaligned data
# '''
# def __init__(self, opt):
# super(AlignDataSet, self).__init__()
# self.opt = opt
# self.ext = '.h5'
# self.dataset_paths = get_dataset_from_txt_file(self.opt.datasetfile)
# self.dataset_paths = sorted(self.dataset_paths)
# self.dataset_size = len(self.dataset_paths)
# self.dir_root = self.get_data_path
# self.data_augmentation = self.opt.data_augmentation(opt)
#
# @property
# def name(self):
# return 'AlignDataSet'
#
# @property
# def get_data_path(self):
# path = os.path.join(self.opt.dataroot)
# return path
#
# @property
# def num_samples(self):
# return self.dataset_size
#
# def get_image_path(self, root, index_name):
# img_path = os.path.join(root, index_name, 'ct_xray_data'+self.ext)
# assert os.path.exists(img_path), 'Path do not exist: {}'.format(img_path)
# return img_path
#
# # def load_file(self, file_path):
# # hdf5 = h5py.File(file_path, 'r')
# # ct_data = np.asarray(hdf5['ct'])
# # x_ray1 = np.asarray(hdf5['xray1'])
# # x_ray1 = np.expand_dims(x_ray1, 0)
# # hdf5.close()
# # return ct_data, x_ray1
# #
# # '''
# # generate batch
# # '''
# # def pull_item(self, item):
# # file_path = self.get_image_path(self.dir_root, self.dataset_paths[item])
# # ct_data, x_ray1 = self.load_file(file_path)
# #
# # # Data Augmentation
# # ct, xray1 = self.data_augmentation([ct_data, x_ray1])
# #
# # return ct, xray1, file_path
#
#
#
# def load_file(self, file_path):
# '''
#
# :param file_path: dir_root/dataset_paths/file.npy ---> CT
# :return:
# '''
# ct_name = os.path.join(file_path)
# xray_name = os.path.join(file_path.replace('3d_numpy_array', 'xray_image').replace('.npy','.png').replace('CT_3D_', 'normal_'))
# ct_data = np.load(ct_name)
# xray_data = cv2.imread(xray_name, 0)
# x_ray1 = np.expand_dims(xray_data, 0)
#
# return ct_data, x_ray1
#
#
#
# '''
# generate batch
# '''
# def pull_item(self, item):
# file_path = self.dataset_paths[item] #self.get_image_path(self.dir_root, self.dataset_paths[item])
# ct_data, x_ray1 = self.load_file(file_path)
# # assert ct_data.shape[0] == x_ray1.shape[1] and ct_data.shape[1] == x_ray1.shape[2]
# # Data Augmentation
# ct, xray1 = self.data_augmentation([ct_data, x_ray1])
#
# return ct, xray1, file_path
#
#
# from torch.utils.data import Dataset
# class My_Align_DataSet(Dataset):
# '''
# Base DataSet
# '''
# @property
# def name(self):
# return 'AlignDataSet'
#
# def __init__(self, opt):
# self.opt = opt
# self.dataset_paths = get_dataset_from_txt_file(self.opt.datasetfile)
# self.dataset_paths = sorted(self.dataset_paths)
# self.dataset_size = len(self.dataset_paths)
# self.dir_root = self.get_data_path
# self.data_augmentation = self.opt.data_augmentation(opt)
#
# def get_data_path(self):
# path = os.path.join(self.opt.dataroot)
# return path
#
# def get_image_path(self, root, index_name):
# img_path = os.path.join(root, index_name, 'ct_xray_data'+self.ext)
# assert os.path.exists(img_path), 'Path do not exist: {}'.format(img_path)
# return img_path
#
# def load_file(self, file_path):
# '''
#
# :param file_path: dir_root/dataset_paths/file.npy ---> CT
# :return:
# '''
# ct_name = os.path.join(file_path)
# xray_name = os.path.join(file_path.replace('3d_numpy_array', 'xray_image').replace('.npy','.png').replace('CT_3D_', 'normal_'))
# seg_name = os.path.join(file_path.replace('3d_numpy_array', 'seg_image').replace('CT_3D_Patient', ''))
# ct_data = np.load(ct_name)
# xray_data = cv2.imread(xray_name, 0)
# x_ray1 = np.expand_dims(xray_data, 0)
# seg_data = np.expand_dims(np.load(seg_name), 0)
# seg_data[seg_data<0.8] = 0
# seg_data[seg_data>=0.8] = 1
#
# return ct_data, x_ray1, seg_data
#
#
# def __getitem__(self, item):
# file_path = self.dataset_paths[item] #self.get_image_path(self.dir_root, self.dataset_paths[item])
# ct_data, x_ray1, seg_data = self.load_file(file_path)
# # print(file_path, ct_data.shape, x_ray1.shape)
# # assert ct_data.shape[0] == x_ray1.shape[1] and ct_data.shape[1] == x_ray1.shape[2]
# # Data Augmentation
# ct, xray1 = self.data_augmentation([ct_data, x_ray1])
# # segmentation_map
# seg = torch.Tensor(seg_data)
# return ct, xray1, seg, file_path
#
# def __len__(self):
# return self.dataset_size
#
class AlignDataSet(Base_DataSet):
'''
DataSet For unaligned data
'''
def __init__(self, opt):
super(AlignDataSet, self).__init__()
self.opt = opt
self.ext = '.h5'
self.dataset_paths = get_dataset_from_txt_file(self.opt.datasetfile)
self.dataset_paths = sorted(self.dataset_paths)
self.dataset_size = len(self.dataset_paths)
self.dir_root = self.get_data_path
self.data_augmentation = self.opt.data_augmentation(opt)
@property
def name(self):
return 'AlignDataSet'
@property
def get_data_path(self):
path = os.path.join(self.opt.dataroot)
return path
@property
def num_samples(self):
return self.dataset_size
def get_image_path(self, root, index_name):
img_path = os.path.join(root, index_name, 'ct_xray_data'+self.ext)
assert os.path.exists(img_path), 'Path do not exist: {}'.format(img_path)
return img_path
def load_file(self, file_path):
hdf5 = h5py.File(file_path, 'r')
ct_data = np.asarray(hdf5['ct'])
x_ray1 = np.asarray(hdf5['xray1'])
x_ray1 = np.expand_dims(x_ray1, 0)
hdf5.close()
return ct_data, x_ray1
'''
generate batch
'''
def pull_item(self, item):
file_path = self.get_image_path(self.dir_root, self.dataset_paths[item])
ct_data, x_ray1 = self.load_file(file_path)
# Data Augmentation
ct, xray1 = self.data_augmentation([ct_data, x_ray1])
return ct, xray1, file_path
|
[
"noreply@github.com"
] |
arn1992.noreply@github.com
|
9a55f6475c0faa28d1d4f188a408cd6edb3b8c7b
|
8d472f9facb895dda9e1df81f3bb6c2f81b9c357
|
/master/bt5/slapos_erp5/SkinTemplateItem/portal_skins/slapos_administration/NotificationMessageModule_updateProductionNotificationId.py
|
c500db8c5f0f5a55eeea84950922136ec276be23
|
[] |
no_license
|
SlapOS/slapos.core
|
852485eed9382685f3df6ba8532f8192bb1389c4
|
369e8d56636e1c59a745e68dc68154abfc5b7840
|
refs/heads/master
| 2023-08-31T04:42:34.722241
| 2023-08-30T15:13:08
| 2023-08-30T15:13:08
| 1,825,920
| 11
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
if context.getPortalType() != "Notification Message Module":
raise ValueError("This folder is not a Notification Message Module")
for notification_message in context.searchFolder(id="201%", validation_state="validated"):
if notification_message.getValidationState() != 'validated':
continue
new_id = "master_prod_%s_%s_%s" % (notification_message.getReference().replace("-", "_").replace(".", "_"),
notification_message.getLanguage("en"),
notification_message.getVersion("001"))
notification_message.getObject().setId(new_id)
|
[
"rafael@nexedi.com"
] |
rafael@nexedi.com
|
48ddd44b06ec6eca0612ca841b1a3c4c4285e8ef
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_QC1758.py
|
9251f4bd17de4e61517b4875032d7ca930e91402
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,787
|
py
|
# qubit number=5
# total number=60
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.x(input_qubit[4]) # number=53
prog.h(input_qubit[0]) # number=57
prog.cz(input_qubit[2],input_qubit[0]) # number=58
prog.h(input_qubit[0]) # number=59
prog.z(input_qubit[2]) # number=46
prog.h(input_qubit[0]) # number=54
prog.cz(input_qubit[2],input_qubit[0]) # number=55
prog.h(input_qubit[0]) # number=56
prog.h(input_qubit[1]) # number=4
prog.rx(2.664070570244145,input_qubit[1]) # number=39
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[2]) # number=49
prog.cz(input_qubit[3],input_qubit[2]) # number=50
prog.h(input_qubit[2]) # number=51
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[3]) # number=40
prog.y(input_qubit[4]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=25
prog.cz(input_qubit[1],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=36
prog.cz(input_qubit[1],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=38
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[0]) # number=34
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.cx(input_qubit[2],input_qubit[3]) # number=44
prog.x(input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.z(input_qubit[1]) # number=52
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1758.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
c5de003f1e74c18d0f8f566bab3c1c6638eb1c13
|
725abfa74e3800622837e60615dc15c6e91442c0
|
/venv/Lib/site-packages/django/contrib/sites/managers.py
|
12f8e555d1819f45ba12459ff1e8dedf2b4a4201
|
[] |
no_license
|
Malak-Abdallah/TODOlist
|
4840e2e0a27e6499ae6b37524bb3e58455d08bfb
|
fd35754e8eac9b262fae17ec16ad9fb510a12f5d
|
refs/heads/master
| 2023-07-16T11:38:48.759232
| 2021-08-31T09:43:11
| 2021-08-31T09:43:11
| 401,600,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,973
|
py
|
from django.conf import settings
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db import models
class CurrentSiteManager(models.Manager):
"Use this to limit objects to those associated with the current site."
use_in_migrations = True
def __init__(self, field_name=None):
super().__init__()
self.__field_name = field_name
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_field_name())
return errors
def _check_field_name(self):
field_name = self._get_field_name()
try:
field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"CurrentSiteManager could not find a field named '%s'."
% field_name,
obj=self,
id="sites.E001",
)
]
if not field.many_to_many and not isinstance(field, (models.ForeignKey)):
return [
checks.Error(
"CurrentSiteManager cannot use '%s.%s' as it is not a foreign key or a many-to-many field."
% (self.model._meta.object_name, field_name),
obj=self,
id="sites.E002",
)
]
return []
def _get_field_name(self):
""" Return self.__field_name or 'site' or 'sites'. """
if not self.__field_name:
try:
self.model._meta.get_field("site")
except FieldDoesNotExist:
self.__field_name = "sites"
else:
self.__field_name = "site"
return self.__field_name
def get_queryset(self):
return (
super()
.get_queryset()
.filter(**{self._get_field_name() + "__id": settings.SITE_ID})
)
|
[
"malkobeidallah@gmail.com"
] |
malkobeidallah@gmail.com
|
1f0ac06c86ccc934a38d76d04471bc0b6512f58e
|
d36ac2218dd70e9d1f26f0e82bc32cd2d0166e17
|
/tests/test_e_policy.py
|
e6cad3826d533caa3cf6f6ad7ea876072d51aec9
|
[
"MIT"
] |
permissive
|
dre2004/reinforcement
|
b8b1524fb81ec2e540b0d8e64e5a77f6fff2ded8
|
50d59a49106ffecc26c0fd97fa240bddfff1384a
|
refs/heads/master
| 2020-05-20T04:38:22.524054
| 2018-10-28T18:59:55
| 2018-10-28T18:59:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,064
|
py
|
import random
import pytest
from reinforcement.policies.e_greedy_policies import EpsilonGreedyPolicy
from tests.common_doubles import MockFilter, Call
from tests.q_doubles import QFunctionWrapper
def make_policy(epsilon, filter=None):
return EpsilonGreedyPolicy(epsilon, filter)
STATE_A = [0]
STATE_B = [1]
@pytest.fixture
def q_function():
return QFunctionWrapper([0, 1])
@pytest.fixture
def zero_policy():
"""Returns a normal e greedy policy with epsilon 0"""
return make_policy(0)
@pytest.fixture
def epsilon_policy():
"""Returns a normal e greedy policy with epsilon 0.2"""
return make_policy(0.2)
@pytest.fixture
def function_policy():
"""Returns a normal e greedy policy with epsilon 0.2 provided by a function"""
return make_policy(lambda: 0.2)
def test_epsilon_zero(zero_policy, q_function):
q_function.set_state_action_values(STATE_A, -1, 1)
assert zero_policy.select(STATE_A, q_function) == 1
def test_multiple_states(zero_policy, q_function):
q_function.set_state_action_values(STATE_A, -1, 1)
q_function.set_state_action_values(STATE_B, 10, -5)
assert zero_policy.select(STATE_A, q_function) == 1
assert zero_policy.select(STATE_B, q_function) == 0
def test_non_zero_epsilon(epsilon_policy, q_function):
random.seed(1)
q_function.set_state_action_values(STATE_A, -1, 1)
assert epsilon_policy.select(STATE_A, q_function) == 0
def test_epsilon_as_function(function_policy, q_function):
random.seed(1)
q_function.set_state_action_values(STATE_A, -1, 1)
assert function_policy.select(STATE_A, q_function) == 0
def test_incomplete_state(zero_policy, q_function):
q_function[STATE_A, 0] = -1
assert zero_policy.select(STATE_A, q_function) == 1
def test_invalid_actions_are_ignored(q_function):
q_function[STATE_A, 0] = 10
q_function[STATE_A, 1] = -1
filter = MockFilter(Call((STATE_A, 0), returns=False), Call((STATE_A, 1), returns=True))
policy = make_policy(0, filter)
assert policy.select(STATE_A, q_function) == 1
|
[
"b.raml@gmx.at"
] |
b.raml@gmx.at
|
58c87bd862dce94dac7a79671928118b0d26a780
|
b01f25b447d5ec3d6bc08380ae2601d5badb6af3
|
/Graph Theory/find_town_judge_graph.py
|
a65dc5f13d930fed2aa087d3129ce35ebbcebc07
|
[] |
no_license
|
SurajPatil314/Leetcode-problems
|
0b05faab17214437a599d846dd1c9a7ea82b9c4c
|
9201a87246842855281c90a9705f83fce24d1137
|
refs/heads/master
| 2021-09-05T02:20:05.274438
| 2021-08-09T21:24:05
| 2021-08-09T21:24:05
| 203,467,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
"""
In a town, there are N people labelled from 1 to N. There is a rumor that one of these people is secretly the town judge.
If the town judge exists, then:
The town judge trusts nobody.
Everybody (except for the town judge) trusts the town judge.
There is exactly one person that satisfies properties 1 and 2.
You are given trust, an array of pairs trust[i] = [a, b] representing that the person labelled a trusts the person labelled b.
If the town judge exists and can be identified, return the label of the town judge. Otherwise, return -1.
"""
from collections import defaultdict
class Solution:
def findJudge(self, N: int, trust: List[List[int]]) -> int:
qw = defaultdict(list)
qw1 = defaultdict(list)
if N == 1 and len(trust) == 0:
return 1
for i in trust:
qw[i[1]].append(i[0])
qw1[i[0]].append(i[1])
ans = []
for i, j in qw.items():
if len(j) == N - 1:
ans.append(i)
for i in ans:
if len(qw1[i]) == 0:
return i
return -1
|
[
"spatil2@umbc.edu"
] |
spatil2@umbc.edu
|
19d37369afbab32c4d2c09847ce2d0f8ce1afa84
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_12663.py
|
6031a1a7cbb9dc2a4982856ada1abfbb19ec911f
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
# Getting SQLAlchemy to issue CREATE SCHEMA on create_all
from sqlalchemy.schema import CreateSchema
engine.execute(CreateSchema('my_schema'))
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
6e2c03b6b255c05b1a83bfd12d7a9abac813bc85
|
7ddc47adf330d48bf0b5c602a04c45fa4dc64590
|
/Multidimensional Lists/Exercises/Exercise multidimentional lists 04.py
|
5209364a7b2049de153e9e53b71c367116c85170
|
[] |
no_license
|
Beshkov/Python_Advanced
|
634786e5f2de6f19ba74ab2fcf6489f751a774e8
|
e1bf1385a03b773b697bc65771193ebd9ef5d226
|
refs/heads/main
| 2023-03-03T16:19:35.271919
| 2021-02-18T20:45:37
| 2021-02-18T20:45:37
| 329,417,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,503
|
py
|
r, c = tuple(int(el) for el in input().split())
matrix = [[el for el in input().split()] for _ in range(r)]
def is_valid(command, matrix):
if not command.split()[0] == 'swap' or not len(command.split()) == 5:
print('Invalid input!')
return
row_one, col_one, row_two, col_two = [int(x) for x in command.split()[1:]]
if not (row_one < len(matrix) and row_two < len(matrix)) or not (
col_one < len(matrix[0]) and col_two < len(matrix[0])):
print('Invalid input!')
return
return (row_one, col_one, row_two, col_two)
def swap(command, matrix):
if is_valid(command, matrix):
r_1, c_1, r_2, c_2 = is_valid(command, matrix)
matrix[r_1][c_1], matrix[r_2][c_2] = matrix[r_2][c_2], matrix[r_1][c_1]
[print(" ".join(map(str, num))) for num in [submatrix for submatrix in matrix]]
command = input()
while not command == "END":
swap(command, matrix)
command = input()
# def check_if_index_is_valid(index_row,index_col, rows, cols):
# if 0 <= index_row < rows and 0<= index_col < cols:
# return True
# return False
#
# def check_if_command_is_valid(command,coordinates, rows, cols):
# if len(coordinates) == 4 and command == "swap":
# for index in range(0,len(coordinates), 2):
# if not check_if_index_is_valid(coordinates[index], coordinates[index+1], rows,cols):
# print('Invalid input!')
# return False
# return True
#
# def print_matrix(matrix):
# for row_index in range(len(matrix)):
# for col_index in range(0,len(matrix[row_index])):
# print(matrix[row_index][col_index], end=' ')
# print()
#
#
# def init_matrix(rows):
#
# matrix = []
# for _ in range(rows):
# matrix.append([el for el in input().split()])
# return matrix
#
# rows, cols = [int(el) for el in input().split()]
# matrix = init_matrix(rows)
# data = input()
#
# while not data == "END":
#
# splitted_data = data.split()
# try:
# command = splitted_data[0]
# coordinates = [int(el) for el in splitted_data[1:]]
# except:
# print("Invalid input")
# if check_if_command_is_valid(command,coordinates, rows, cols):
# temp = matrix[coordinates[0]][coordinates[1]]
# matrix[coordinates[0]][coordinates[1]] = matrix[coordinates[2]][coordinates[3]]
# matrix[coordinates[2]][coordinates[3]] = temp
# print_matrix(matrix)
#
# data = input()
|
[
"alexander.beshkov@gmail.com"
] |
alexander.beshkov@gmail.com
|
966bee64272b1e61f7a41d8aa904ff20763d2f3c
|
79e1b7919f34ec0425bb98cc4888e092e94e0273
|
/readMotionZone3.py
|
d3e197bc34d632b274850cb613793b6b3042dd07
|
[] |
no_license
|
hamhochoi/K59_training
|
099e0c193b94677045bd3622f04440d49a3765fb
|
845ab3ac64035bf25a72c8dd380cc133092f2e42
|
refs/heads/master
| 2021-01-19T08:59:34.938053
| 2018-10-24T17:46:29
| 2018-10-24T17:46:29
| 87,705,721
| 0
| 0
| null | 2018-01-13T16:49:23
| 2017-04-09T12:09:21
| null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
import paho.mqtt.client as mqtt
import time
from datetime import datetime
#f = open("time_pin.txt", "a")
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("zone_3/box_1/motion/id_1")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload) + " " + str(datetime.now()))
f = open("time_pin_zone3.txt", "a")
f.write(str(msg.payload) + " " + str(datetime.now()))
f.write("\n")
f.close()
time.sleep(5*60)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("broker.hivemq.com", 1883, 60)
client.loop_forever()
|
[
"dothanhwork2017@gmail.com"
] |
dothanhwork2017@gmail.com
|
764a01fab0d7c00916075b5b3e9e39d50334c0ab
|
31c7a5fb038c094abd9d25aa6eb30901aa255e5f
|
/planner/progression/utils.py
|
5352e96dab2b17174f42a3f797b21b240dbfbd26
|
[
"MIT"
] |
permissive
|
hxdaze/pyplanners
|
0711d8d7065ea347925bd4395cc22460f8989299
|
ef1ae33e233f20cd93ce03cba363b0f14fd078bc
|
refs/heads/master
| 2023-05-13T03:40:21.480863
| 2021-06-02T02:45:24
| 2021-06-02T02:45:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
from random import random, randint, choice
# TODO: Monte-Carlo Tree Search
DEFAULT_GOAL_PROBABILITY = 0.1
def pop_random(queue):
queue.rotate(randint(0, len(queue) - 1))
return queue.popleft()
def sample_target(sample, goal_sample=None, goal_probability=DEFAULT_GOAL_PROBABILITY):
if (goal_sample is not None) and (random() < goal_probability):
return goal_sample()
return sample()
def pop_min(queue, distance):
minimum, indices = None, []
for i, v in enumerate(queue):
score = distance(v)
if minimum is None or score < minimum:
minimum, indices = score, [i]
elif score == minimum:
indices.append(i)
queue.rotate(choice(indices))
return queue.popleft()
def pop_rrt(distance, sample, goal_sample=None, goal_probability=DEFAULT_GOAL_PROBABILITY):
return lambda queue: pop_min(
queue, lambda sv: distance(sv.state, sample_target(
sample, goal_sample=goal_sample, goal_probability=goal_probability)))
|
[
"caelan@mit.edu"
] |
caelan@mit.edu
|
2a060797c6f0596efb85781e96e662b62b25a605
|
e20ed90b9be7a0bcdc1603929d65b2375a224bf6
|
/generated-libraries/python/netapp/volume/volume_qos_attributes.py
|
d9b68e9559d1de7eb232b2af87826e82481b4776
|
[
"MIT"
] |
permissive
|
radekg/netapp-ontap-lib-gen
|
530ec3248cff5ead37dc2aa47ced300b7585361b
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
refs/heads/master
| 2016-09-06T17:41:23.263133
| 2015-01-14T17:40:46
| 2015-01-14T17:40:46
| 29,256,898
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,405
|
py
|
from netapp.netapp_object import NetAppObject
class VolumeQosAttributes(NetAppObject):
"""
QoS policy group attached with a volume.
"""
_policy_group_name = None
@property
def policy_group_name(self):
"""
The QoS policy group associated with this volume.
<p>
This optionally specifies which QoS policy group to apply
to the volume. This policy group defines measurable
service level objectives (SLOs) that apply to the storage
objects with which the policy group is associated. If you
do not assign a policy group to a volume, the system will
not monitor and control the traffic to it. This parameter
is not supported on Infinite Volumes. <p>
Attributes: optional-for-create, modifiable
"""
return self._policy_group_name
@policy_group_name.setter
def policy_group_name(self, val):
if val != None:
self.validate('policy_group_name', val)
self._policy_group_name = val
@staticmethod
def get_api_name():
return "volume-qos-attributes"
@staticmethod
def get_desired_attrs():
return [
'policy-group-name',
]
def describe_properties(self):
return {
'policy_group_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
[
"radek@gruchalski.com"
] |
radek@gruchalski.com
|
ed29596b420bf723cd8950b86899310a5b4c08c5
|
8ad9ad16582ad302b121ce9f6cd743dff5187454
|
/dataset/dataset_loader.py
|
4cb35ca8619a1e7fa17cd3c92077eadf218d5962
|
[] |
no_license
|
wolf943134497/3D-Aware-Ellipses-for-Visual-Localization
|
0d7d55b2b0997ffdd97f27b31e09de7725790dda
|
9caddecd4e175d7528ea6b8d74bb79954c74efef
|
refs/heads/master
| 2023-05-05T07:45:54.831338
| 2021-05-23T19:33:08
| 2021-05-24T21:35:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,818
|
py
|
import json
import numpy as np
class Dataset_loader:
"""
Interace with our own dataset file format.
"""
def __init__(self, filename):
with open(filename, "r") as fin:
self.dataset = json.load(fin)
self.nb_images = len(self.dataset)
def __len__(self):
return len(self.dataset)
def get_image_size(self, idx):
"""
Get the image size.
"""
if idx < 0 or idx >= self.nb_images:
print("Invalid index")
return None
return self.dataset[idx]["width"], self.dataset[idx]["height"]
def get_K(self, idx):
"""
Get the K matrix.
"""
if idx < 0 or idx >= self.nb_images:
print("Invalid index")
return None
return np.asarray(self.dataset[idx]["K"])
def get_Rt(self, idx):
"""
Get the extrinsic parameters.
"""
if idx < 0 or idx >= self.nb_images:
print("Invalid index")
return None
R = np.asarray(self.dataset[idx]["R"])
t = np.asarray(self.dataset[idx]["t"])
return np.hstack((R, t.reshape((3, 1))))
def get_rgb_filename(self, idx):
"""
Get the rgb image filename.
"""
if idx < 0 or idx >= self.nb_images:
print("Invalid index")
return None
return self.dataset[idx]["file_name"]
def get_annotations(self, idx):
"""
Get objects annotations.
"""
if idx < 0 or idx >= self.nb_images:
print("Invalid index")
return None
if "annotations" not in self.dataset[idx].keys():
print("No annotations available")
return None
return self.dataset[idx]["annotations"]
|
[
"zins.matthieu@gmail.com"
] |
zins.matthieu@gmail.com
|
f412e114622a62333efacf8bd085912edb0d96e1
|
7eebbfaee45fdc57c4fc6ba32c87c35be1e62b14
|
/airbyte-integrations/connectors/source-amazon-ads/source_amazon_ads/streams/report_streams/products_report.py
|
ea97bf690ae643e105877fc34e221bea461fb5e4
|
[
"MIT",
"Elastic-2.0"
] |
permissive
|
Velocity-Engineering/airbyte
|
b6e1fcead5b9fd7c74d50b9f27118654604dc8e0
|
802a8184cdd11c1eb905a54ed07c8732b0c0b807
|
refs/heads/master
| 2023-07-31T15:16:27.644737
| 2021-09-28T08:43:51
| 2021-09-28T08:43:51
| 370,730,633
| 0
| 1
|
MIT
| 2021-06-08T05:58:44
| 2021-05-25T14:55:43
|
Java
|
UTF-8
|
Python
| false
| false
| 8,127
|
py
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from copy import copy
from .report_streams import RecordType, ReportStream
METRICS_MAP = {
"campaigns": [
"bidPlus",
"campaignName",
"campaignId",
"campaignStatus",
"campaignBudget",
"campaignRuleBasedBudget",
"applicableBudgetRuleId",
"applicableBudgetRuleName",
"impressions",
"clicks",
"cost",
"attributedConversions1d",
"attributedConversions7d",
"attributedConversions14d",
"attributedConversions30d",
"attributedConversions1dSameSKU",
"attributedConversions7dSameSKU",
"attributedConversions14dSameSKU",
"attributedConversions30dSameSKU",
"attributedUnitsOrdered1d",
"attributedUnitsOrdered7d",
"attributedUnitsOrdered14d",
"attributedUnitsOrdered30d",
"attributedSales1d",
"attributedSales7d",
"attributedSales14d",
"attributedSales30d",
"attributedSales1dSameSKU",
"attributedSales7dSameSKU",
"attributedSales14dSameSKU",
"attributedSales30dSameSKU",
"attributedUnitsOrdered1dSameSKU",
"attributedUnitsOrdered7dSameSKU",
"attributedUnitsOrdered14dSameSKU",
"attributedUnitsOrdered30dSameSKU",
],
"adGroups": [
"campaignName",
"campaignId",
"adGroupName",
"adGroupId",
"impressions",
"clicks",
"cost",
"attributedConversions1d",
"attributedConversions7d",
"attributedConversions14d",
"attributedConversions30d",
"attributedConversions1dSameSKU",
"attributedConversions7dSameSKU",
"attributedConversions14dSameSKU",
"attributedConversions30dSameSKU",
"attributedUnitsOrdered1d",
"attributedUnitsOrdered7d",
"attributedUnitsOrdered14d",
"attributedUnitsOrdered30d",
"attributedSales1d",
"attributedSales7d",
"attributedSales14d",
"attributedSales30d",
"attributedSales1dSameSKU",
"attributedSales7dSameSKU",
"attributedSales14dSameSKU",
"attributedSales30dSameSKU",
"attributedUnitsOrdered1dSameSKU",
"attributedUnitsOrdered7dSameSKU",
"attributedUnitsOrdered14dSameSKU",
"attributedUnitsOrdered30dSameSKU",
],
"keywords": [
"campaignName",
"campaignId",
"adGroupName",
"adGroupId",
"keywordId",
"keywordText",
"matchType",
"impressions",
"clicks",
"cost",
"attributedConversions1d",
"attributedConversions7d",
"attributedConversions14d",
"attributedConversions30d",
"attributedConversions1dSameSKU",
"attributedConversions7dSameSKU",
"attributedConversions14dSameSKU",
"attributedConversions30dSameSKU",
"attributedUnitsOrdered1d",
"attributedUnitsOrdered7d",
"attributedUnitsOrdered14d",
"attributedUnitsOrdered30d",
"attributedSales1d",
"attributedSales7d",
"attributedSales14d",
"attributedSales30d",
"attributedSales1dSameSKU",
"attributedSales7dSameSKU",
"attributedSales14dSameSKU",
"attributedSales30dSameSKU",
"attributedUnitsOrdered1dSameSKU",
"attributedUnitsOrdered7dSameSKU",
"attributedUnitsOrdered14dSameSKU",
"attributedUnitsOrdered30dSameSKU",
],
"productAds": [
"campaignName",
"campaignId",
"adGroupName",
"adGroupId",
"impressions",
"clicks",
"cost",
"currency",
"asin",
"attributedConversions1d",
"attributedConversions7d",
"attributedConversions14d",
"attributedConversions30d",
"attributedConversions1dSameSKU",
"attributedConversions7dSameSKU",
"attributedConversions14dSameSKU",
"attributedConversions30dSameSKU",
"attributedUnitsOrdered1d",
"attributedUnitsOrdered7d",
"attributedUnitsOrdered14d",
"attributedUnitsOrdered30d",
"attributedSales1d",
"attributedSales7d",
"attributedSales14d",
"attributedSales30d",
"attributedSales1dSameSKU",
"attributedSales7dSameSKU",
"attributedSales14dSameSKU",
"attributedSales30dSameSKU",
"attributedUnitsOrdered1dSameSKU",
"attributedUnitsOrdered7dSameSKU",
"attributedUnitsOrdered14dSameSKU",
"attributedUnitsOrdered30dSameSKU",
],
"asins_keywords": [
"campaignName",
"campaignId",
"adGroupName",
"adGroupId",
"keywordId",
"keywordText",
"asin",
"otherAsin",
"sku",
"currency",
"matchType",
"attributedUnitsOrdered1d",
"attributedUnitsOrdered7d",
"attributedUnitsOrdered14d",
"attributedUnitsOrdered30d",
"attributedUnitsOrdered1dOtherSKU",
"attributedUnitsOrdered7dOtherSKU",
"attributedUnitsOrdered14dOtherSKU",
"attributedUnitsOrdered30dOtherSKU",
"attributedSales1dOtherSKU",
"attributedSales7dOtherSKU",
"attributedSales14dOtherSKU",
"attributedSales30dOtherSKU",
],
"asins_targets": [
"campaignName",
"campaignId",
"adGroupName",
"adGroupId",
"asin",
"otherAsin",
"sku",
"currency",
"matchType",
"attributedUnitsOrdered1d",
"attributedUnitsOrdered7d",
"attributedUnitsOrdered14d",
"attributedUnitsOrdered30d",
"attributedUnitsOrdered1dOtherSKU",
"attributedUnitsOrdered7dOtherSKU",
"attributedUnitsOrdered14dOtherSKU",
"attributedUnitsOrdered30dOtherSKU",
"attributedSales1dOtherSKU",
"attributedSales7dOtherSKU",
"attributedSales14dOtherSKU",
"attributedSales30dOtherSKU",
"targetId",
"targetingText",
"targetingType",
],
"targets": [
"campaignName",
"campaignId",
"adGroupName",
"adGroupId",
"targetId",
"targetingExpression",
"targetingText",
"targetingType",
"impressions",
"clicks",
"cost",
"attributedConversions1d",
"attributedConversions7d",
"attributedConversions14d",
"attributedConversions30d",
"attributedConversions1dSameSKU",
"attributedConversions7dSameSKU",
"attributedConversions14dSameSKU",
"attributedConversions30dSameSKU",
"attributedUnitsOrdered1d",
"attributedUnitsOrdered7d",
"attributedUnitsOrdered14d",
"attributedUnitsOrdered30d",
"attributedSales1d",
"attributedSales7d",
"attributedSales14d",
"attributedSales30d",
"attributedSales1dSameSKU",
"attributedSales7dSameSKU",
"attributedSales14dSameSKU",
"attributedSales30dSameSKU",
"attributedUnitsOrdered1dSameSKU",
"attributedUnitsOrdered7dSameSKU",
"attributedUnitsOrdered14dSameSKU",
"attributedUnitsOrdered30dSameSKU",
],
}
class SponsoredProductsReportStream(ReportStream):
"""
https://advertising.amazon.com/API/docs/en-us/sponsored-products/2-0/openapi#/Reports
"""
def report_init_endpoint(self, record_type: str) -> str:
return f"/v2/sp/{record_type}/report"
metrics_map = METRICS_MAP
def _get_init_report_body(self, report_date: str, record_type: str, profile):
metrics_list = self.metrics_map[record_type]
body = {
"reportDate": report_date,
}
if RecordType.ASINS in record_type:
body["campaignType"] = "sponsoredProducts"
if profile.accountInfo.type == "vendor":
metrics_list = copy(metrics_list)
metrics_list.remove("sku")
return {**body, "metrics": ",".join(metrics_list)}
|
[
"noreply@github.com"
] |
Velocity-Engineering.noreply@github.com
|
a8d8af51088b85de2bc2918484ac156679217741
|
6a9f6e6527afb38611a5c695e5845e492d7676ff
|
/102.binary-tree-level-order-traversal.py
|
7eb77a9a8d81de928a9b97a1005e30cb9e67ae71
|
[] |
no_license
|
Junyangz/leetcode
|
39913d48778d369f9f0a96070352d63b207e7df6
|
5de48fbdcb61ce8a437255a119a4a3ae242ede52
|
refs/heads/master
| 2020-06-09T08:57:26.361138
| 2019-11-14T08:38:07
| 2019-11-14T08:38:07
| 193,413,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
#
# @lc app=leetcode id=102 lang=python3
#
# [102] Binary Tree Level Order Traversal
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
res = []
def levelOrder(self, root: TreeNode, l=0) -> List[List[int]]:
if not root: return None
if l == 0: self.res = []
if len(self.res) < l + 1: self.res.append([])
self.res[l].append(root.val)
left, right = self.levelOrder(root.left, l + 1), self.levelOrder(root.right, l + 1)
return self.res
|
[
"junyangz.iie@gmail.com"
] |
junyangz.iie@gmail.com
|
227324e4b93ca4b8bf74ffa2dd956e21cce31967
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_primaries.py
|
e457d57cae006604c84eb17a3f6a03363e756bca
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
from xai.brain.wordbase.adjectives._primary import _PRIMARY
#calss header
class _PRIMARIES(_PRIMARY, ):
def __init__(self,):
_PRIMARY.__init__(self)
self.name = "PRIMARIES"
self.specie = 'adjectives'
self.basic = "primary"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d2cb3437eb8ea8a93580f88ea57e3ac8d9291e75
|
30b97efb2f36f81aa684d16d19e0e2db17f2967d
|
/09. 기본수학2/3009.py
|
9717ea46cc12a0dece6c17b3d29096b76c42fd5b
|
[] |
no_license
|
jmseb3/bakjoon
|
0a784a74c6476ef51864e2ada9d2551c7c7979eb
|
a38db54e851372059b0e45add92e43e556835e62
|
refs/heads/main
| 2023-08-25T08:43:04.579785
| 2021-10-01T08:40:37
| 2021-10-01T08:40:37
| 362,287,450
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
x = [0,0,0]
y = [0,0,0]
ans = [0,0]
for i in range(3):
x[i], y[i] = map(int, input().split())
for i in range(3):
if x[i%3] == x[(i+1) %3]:
ans[0] = x[(i+2)%3]
if y[i%3] == y[(i+1)%3]:
ans[1] = y[(i+2)%3]
print(ans[0],ans[1])
|
[
"jmseb3@naver.com"
] |
jmseb3@naver.com
|
eabdc460c1970ab10e273e221fc427cb01c439ff
|
08db753c516c90538fbebe6570b62caa395c6860
|
/MRaswan_Assignment8/harry_potter.py
|
ad18f3ee2f9e8943ef7cfde23a4a6da5a90242f2
|
[] |
no_license
|
meghnaraswan/PythonAssignments
|
4c717fb90e6a782db8789582408d8ad313bbd000
|
63d6afaa0a6f4086031eaa66b1fbb7a6a533cd8f
|
refs/heads/master
| 2021-07-11T10:39:14.403841
| 2021-04-04T19:40:49
| 2021-04-04T19:40:49
| 237,349,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,546
|
py
|
#Meghna Raswan
#2337415
#raswan@chapman.edu
#CPSC 230-10 (1515)
#Assignment 8
#word count
import operator
def read_file(input_file_name):
input_file_handle = open(input_file_name, 'r') ##opens the file and reads the file
str_of_words = "" #initialize output string
for line_str in input_file_handle: #iterate over every line in the file
line_list = line_str.split() #split the lines into a list of individual words
for word in line_list: #iterates over every word in the list of words
word = word.lower().strip(',.!?"') #make lower case and removes punctuation from list
if word != "": #if after stripping we are left with empty word then don't add
str_of_words += word + " " #add to string
input_file_handle.close() #closes file
return str_of_words
def build_dictionary(string_of_words):
text_list = string_of_words.split() #split the list into a list of individual words
word_dict = {} #initialize output dictionary
for word in text_list: #iderates over every word in the list
if word in word_dict:
word_dict[word] += 1 #adds 1 count for every word repeated
else:
word_dict[word] = 1 #else, if the word is not repeated, it will have a word count of 1
return word_dict
def write_file(word_dict):
sorted_list = sorted(word_dict.items(), key=operator.itemgetter(1), reverse=True) #sorts dictionary in ascending order, and the reverse sorts in descending order
output_file = open("counts.txt", "w") #writes the new text into a new file called counts.txt
for (word, count) in sorted_list: #word is the key and count is the value in the dictionary
print("{}, {}".format(word, count)) #formats the list as word, count
output_file.write("{}, {}\n".format(word, count)) #writes this into the new file and adds new line for every new word and count
output_file.close() #closes file
return (word, count)
if __name__ == '__main__': #script is being run as the main module
input_file = 'harry_potter.txt' #the file we will be reading
word_string = read_file(input_file) #calling the read_file function to read harry_potter.txt file, remove punctuation, and create a string
word_dict = build_dictionary(word_string) #calling the build_dictionary function on word_string to create a dictionary using the string and counting the words
write_file(word_dict) #calling the write_file function to create a new file with the words and word count sorted in descending order
|
[
"raswan@chapman.edu"
] |
raswan@chapman.edu
|
751151aa43a65c2ae6631e2343285b05224fe7b3
|
b9d53e5506ee883d74614cf564e37de53e924b5c
|
/run.py
|
7b84858abcb14586b5bb52e0c378af1bf6e69ace
|
[] |
no_license
|
Hodo7amShichiYA/VK_Img_V1
|
e5eb78d0374d24219183fb98da4b0e9018973611
|
307601cfa23bd150487eb14ecea43e3f0ab0aa0d
|
refs/heads/master
| 2020-06-05T09:00:25.851616
| 2019-06-17T16:50:43
| 2019-06-17T16:50:43
| 192,385,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,102
|
py
|
from selenium import webdriver
from lxml import etree
import time
from time import sleep
def vkimg(userid):
email = 'x'
pwd = 'x'
with open('./%s lasturl.txt'%userid, 'r+') as l:
url = l.read()
print('地址获取成功')
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.implicitly_wait(30)
browser.get("https://vk.com/feed")
print('LOGIN...')
elem = browser.find_element_by_id('email')
elem.send_keys(email)
elem = browser.find_element_by_id('pass')
elem.send_keys(pwd)
elem = browser.find_element_by_id('login_button')
elem.click()
print('登录完成')
browser.get(url)
starttime = time.time()
for i in range(1,40000):
pst = time.time()
sleep(3)
response = browser.page_source
html = etree.HTML(response)
html_data = html.xpath('//*[@id="pv_photo"]/img/@src')[0]
print('获取完成')
pageurl = browser.current_url
print('正在抓取第%s页的页面代码:%s' % (i,pageurl))
with open('./%s lasturl.txt'%userid, 'w') as tf:
tf.write(pageurl)
print('获取完成')
with open('./%s url.txt'%userid, 'a') as f:
f.write('%s\n'%html_data)
print('第%s页图片地址抓取完成' % i)
elem = browser.find_element_by_id('pv_photo')
elem.click()
pet = time.time()
print('第%s张图片抓取用时%d秒'%(i,(pet-pst)))
browser.quit()
endtime = time.time()
print('程序执行时长:%d 秒'%(endtime-starttime))
if __name__ == "__main__":
userid = input('输入本次抓取的文件名:')
print('尝试获取上次保存的地址')
try:
with open('./'+userid+' lasturl.txt', 'r+') as f:
aa = f.read()
print(aa)
except:
intro = input('初次抓取请填入第一张图片的地址:')
with open('./%s lasturl.txt' % userid, 'w') as lu:
lu.write(intro)
vkimg(userid)
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
22f9942da36f5533775e4cecabe544164b90f9ba
|
000a4b227d970cdc6c8db192f4437698cb782721
|
/python/helpers/typeshed/stdlib/profile.pyi
|
982bcabad40111fe78d27e3f9ba69534cb546a7e
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
trinhanhngoc/intellij-community
|
2eb2f66a2a3a9456e7a0c5e7be1eaba03c38815d
|
1d4a962cfda308a73e0a7ef75186aaa4b15d1e17
|
refs/heads/master
| 2022-11-03T21:50:47.859675
| 2022-10-19T16:39:57
| 2022-10-19T23:25:35
| 205,765,945
| 1
| 0
|
Apache-2.0
| 2019-09-02T02:55:15
| 2019-09-02T02:55:15
| null |
UTF-8
|
Python
| false
| false
| 1,364
|
pyi
|
from _typeshed import Self, StrOrBytesPath
from typing import Any, Callable, TypeVar
from typing_extensions import ParamSpec
__all__ = ["run", "runctx", "Profile"]
def run(statement: str, filename: str | None = ..., sort: str | int = ...) -> None: ...
def runctx(
statement: str, globals: dict[str, Any], locals: dict[str, Any], filename: str | None = ..., sort: str | int = ...
) -> None: ...
_T = TypeVar("_T")
_P = ParamSpec("_P")
_Label = tuple[str, int, str]
class Profile:
bias: int
stats: dict[_Label, tuple[int, int, int, int, dict[_Label, tuple[int, int, int, int]]]] # undocumented
def __init__(self, timer: Callable[[], float] | None = ..., bias: int | None = ...) -> None: ...
def set_cmd(self, cmd: str) -> None: ...
def simulate_call(self, name: str) -> None: ...
def simulate_cmd_complete(self) -> None: ...
def print_stats(self, sort: str | int = ...) -> None: ...
def dump_stats(self, file: StrOrBytesPath) -> None: ...
def create_stats(self) -> None: ...
def snapshot_stats(self) -> None: ...
def run(self: Self, cmd: str) -> Self: ...
def runctx(self: Self, cmd: str, globals: dict[str, Any], locals: dict[str, Any]) -> Self: ...
def runcall(self, __func: Callable[_P, _T], *args: _P.args, **kw: _P.kwargs) -> _T: ...
def calibrate(self, m: int, verbose: int = ...) -> float: ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
98cbfc244d6321a2b2564a9a471a7607c49aa45d
|
943e502b6e293c48564cf6c6d7dd7e39fc7dcf56
|
/unnecessary_math/unnecessary_math.py
|
8bf025b4cc2960f579adae219e7107e308fd6d53
|
[] |
no_license
|
charsyam/unnecessary_math
|
e3a49f757dcf1a606a53c90fb8be5ba50e5dd412
|
fcd6fe4a87b89419bb05886388e9f6342079b5cb
|
refs/heads/master
| 2021-01-20T12:52:19.963717
| 2017-05-13T03:33:19
| 2017-05-13T03:33:19
| 90,422,244
| 0
| 0
| null | 2017-05-13T03:33:19
| 2017-05-05T22:44:48
|
Python
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
'''
Module showing how doctests can be included with source code
Each '>>>' line is run as if in a python shell, and counts as a test.
The next line, if not '>>>' is the expected output of the previous line.
If anything doesn't match exactly (including trailing spaces), the test fails.
'''
def multiply(a, b):
"""
>>> multiply(4, 3)
12
>>> multiply('a', 3)
'aaa'
"""
return a * b
def add(a, b):
return a + b
|
[
"charsyam@naver.com"
] |
charsyam@naver.com
|
0fd3fb1d4912d8adc8ba824a5d0208ed3da11c1d
|
a28e1e659e4dd82be5e253443b0c7a808cdcee92
|
/DataStructure/tree.py
|
a5baa7fec37b604010a20f5f471ca8781ac1a306
|
[] |
no_license
|
LeBron-Jian/BasicAlgorithmPractice
|
b2af112e8f1299fe17cf456111276fce874586cb
|
51943e2c2c4ec70c7c1d5b53c9fdf0a719428d7a
|
refs/heads/master
| 2023-06-07T19:12:16.362428
| 2023-05-27T06:58:12
| 2023-05-27T06:58:12
| 217,682,743
| 13
| 14
| null | 2020-09-12T01:50:35
| 2019-10-26T08:59:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
#_*_coding:utf-8_*_
class Node:
def __init__(self, name, type='dir'):
self.name = name
self.type = type # 'dir' or ; 'file'
self.children = []
self.parent = None
# 链式存储
def __repr__(self):
return self.name
'''
分析列表,假设hello目录下如何找到子目录world的目录呢?
n = Node('hello')
n2 = Node('world')
n.children.append(n2)
那,如何通过world目录找到父亲目录 hello呢?
n2.parent = n
那么这样做就相当于双链表
'''
class FileSystemTree:
def __init__(self):
self.root = Node("/") # 首先我们创建一个根目录
self.now = self.root
def mkdir(self, name):
# 创建一个文件目录,所以我们必须保证name是以 /结尾,如果没有,我们就加
if name[-1] != '/':
name += '/'
node = Node(name)
# 创建一个文件目录
self.now.children.append(node)
node.parent = self.now
def ls(self):
# 展示当前文件夹下的文件
return self.now.children
def cd(self, name):
# 切换到指定目录 注意:支持绝对路径和相对路径
# 相对路径是从now的路径下开始,而绝对路径是从root路径下开始找
if name[-1] != '/':
name += '/'
if name == '../':
self.now = self.now.parent
return
for child in self.now.children:
if child.name == name:
# 如果传入的目录名等于孩子的目录名,我们直接切换
self.now = child
return
raise ValueError("invalid dir")
tree = FileSystemTree()
tree.mkdir('var/')
tree.mkdir('bin/')
tree.mkdir('usr/')
print(tree.ls()) # [var/, bin/, usr/]
tree.cd('bin/')
print(tree.ls()) # []
print(tree.root.children) # [var/, bin/, usr/]
|
[
"1171737614@qq.com"
] |
1171737614@qq.com
|
a357f9ead60456ab5a7e9d939b363b35d728a205
|
1dec4f8f58202b3ebabcb12816811d22a8b7da3c
|
/test/scanner.py
|
d300ce1d50a3569256666a359fd39d53558fd36a
|
[
"MIT"
] |
permissive
|
lsst-sqre/rubin-jupyter-hub
|
51df5d3590635009479732247c3c19333ea39ff5
|
162abe9422332aa60ef089af2862b7a0b0417fa1
|
refs/heads/master
| 2023-04-17T09:16:10.893045
| 2021-05-05T17:41:45
| 2021-05-05T17:41:45
| 287,143,775
| 0
| 0
|
NOASSERTION
| 2021-05-05T17:41:45
| 2020-08-13T00:33:31
|
Python
|
UTF-8
|
Python
| false
| false
| 280
|
py
|
#!/usr/bin/env python3
import rubin_jupyter_utils.hub as rh
q = rh.SingletonScanner(
name="sciplat-lab",
owner="lsstsqre",
debug=True,
experimentals=2,
dailies=3,
weeklies=4,
releases=3,
cachefile="/tmp/reposcan.json",
)
q.scan()
q.get_all_tags()
|
[
"athornton@gmail.com"
] |
athornton@gmail.com
|
e2810a18610a785f47a2d469dc933a538a438a11
|
b4b0e2979d3342449bd7847e619896a7f3878f2f
|
/day02/bathroom.py
|
396d3e9c53dcdcf71a2d0337c68f939dc29ad9eb
|
[
"MIT"
] |
permissive
|
ecly/adventofcode2016
|
ebbfa39e0932488d930e286321d73e0cb59d24b5
|
d08411a887cb2bf6b53785b9b6c193a69f78ba87
|
refs/heads/master
| 2018-10-20T23:23:07.113189
| 2018-07-20T17:37:53
| 2018-07-20T17:37:53
| 114,026,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
def first(instructions):
code = ""
x, y = 1, 1
keypad = ((1,2,3),(4,5,6),(7,8,9))
for instruction in instructions:
for c in instruction:
if c == 'U'and y > 0: y-=1
if c == 'D'and y < 2: y+=1
if c == 'L'and x > 0: x-=1
if c == 'R'and x < 2: x+=1
code += str(keypad[y][x])
return code
def second(instructions):
code = ""
x, y = 0, 2
keypad = ((None,None,1 ,None,None),
(None,2 ,3 ,4 ,None),
(5 ,6 ,7 ,8 ,9 ),
(None,'A' ,'B' ,'C' ,None),
(None,None,'D' ,None,None))
for instruction in instructions:
for c in instruction:
if c == 'U'and y > 0:
if(keypad[y-1][x] != None): y-=1
if c == 'D'and y < 4:
if(keypad[y+1][x] != None): y+=1
if c == 'L'and x > 0:
if(keypad[y][x-1] != None): x-=1
if c == 'R'and x < 4:
if(keypad[y][x+1] != None): x+=1
code += str(keypad[y][x])
return code
with open('input.in', 'r') as file:
lines = file.readlines()
print(first(lines))
print(second(lines))
|
[
"ecly@mailbox.org"
] |
ecly@mailbox.org
|
c2a01c5648c2e7f6f5fbaef3915cdf3dae6ded4b
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2183/60895/250649.py
|
2ac41841d2853fb7b23c742a376c03a26de7de4b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
t=int(input())
while t>0:
n=int(input())
temp=n
start=0
sum=0
if n==1:
sum=3
else:
temp=temp-1
while temp>0:
start=start+2*temp
temp=temp-1
for i in range(0,2*n):
sum=sum+start+i+1
print(sum)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
15bbb5b5d25a476591ac3f22a66ddeba6418df73
|
e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7
|
/flask_api/venv/lib/python3.7/site-packages/vsts/task_agent/v4_1/models/endpoint_url.py
|
6ac3ef6470b0974e3a1bbc74eb73896987dc48b9
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
u-blavins/secret_sasquatch_society
|
c36993c738ab29a6a4879bfbeb78a5803f4f2a57
|
0214eadcdfa9b40254e331a6617c50b422212f4c
|
refs/heads/master
| 2020-08-14T00:39:52.948272
| 2020-01-22T13:54:58
| 2020-01-22T13:54:58
| 215,058,646
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class EndpointUrl(Model):
"""EndpointUrl.
:param depends_on: Gets or sets the dependency bindings.
:type depends_on: :class:`DependsOn <task-agent.v4_1.models.DependsOn>`
:param display_name: Gets or sets the display name of service endpoint url.
:type display_name: str
:param help_text: Gets or sets the help text of service endpoint url.
:type help_text: str
:param is_visible: Gets or sets the visibility of service endpoint url.
:type is_visible: str
:param value: Gets or sets the value of service endpoint url.
:type value: str
"""
_attribute_map = {
'depends_on': {'key': 'dependsOn', 'type': 'DependsOn'},
'display_name': {'key': 'displayName', 'type': 'str'},
'help_text': {'key': 'helpText', 'type': 'str'},
'is_visible': {'key': 'isVisible', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, depends_on=None, display_name=None, help_text=None, is_visible=None, value=None):
super(EndpointUrl, self).__init__()
self.depends_on = depends_on
self.display_name = display_name
self.help_text = help_text
self.is_visible = is_visible
self.value = value
|
[
"usama.blavins1@gmail.com"
] |
usama.blavins1@gmail.com
|
9bb9fa1995a4ad19d223a45f59b6a2586b015ae6
|
00db9cdb3cf82306c0de9de1aac0f7514192f1d7
|
/dataset/SUN.py
|
61af030add5b5c574858bbecf4fffa3c461152a7
|
[] |
no_license
|
lijiunderstand/BiSeNet-CCP
|
039b4aad6a20ed848141e09a265f50e204d0a28c
|
61023e13fed18fc58737194f1de8639b82382605
|
refs/heads/master
| 2020-04-24T22:19:47.595380
| 2019-01-22T01:30:40
| 2019-01-22T01:30:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,450
|
py
|
import torch
import glob
import os
from torchvision import transforms
from torchvision.transforms import functional as F
import cv2
from PIL import Image
import pandas as pd
import numpy as np
from imgaug import augmenters as iaa
import imgaug as ia
from utils import get_label_info, one_hot_it
import random
def augmentation():
# augment images with spatial transformation: Flip, Affine, Rotation, etc...
# see https://github.com/aleju/imgaug for more details
pass
def augmentation_pixel():
# augment images with pixel intensity transformation: GaussianBlur, Multiply, etc...
pass
class SUN(torch.utils.data.Dataset):
def __init__(self, image_path, depth_path, label_path, csv_path, scale, mode='train'):
super().__init__()
self.mode = mode
self.image_list = glob.glob(os.path.join(image_path, '*.jpg'))
self.image_name = [x.split('/')[-1].split('.')[0] for x in self.image_list]
self.depth_list = [os.path.join(depth_path, x + '.png') for x in self.image_name]
self.label_list = [os.path.join(label_path, x + '.png') for x in self.image_name]
self.fliplr = iaa.Fliplr(0.5)
self.label_info = get_label_info(csv_path)
# resize
self.resize_img = transforms.Resize(scale, Image.BILINEAR)
self.resize_depth = transforms.Resize(scale, Image.NEAREST)
self.resize_label = transforms.Resize(scale, Image.NEAREST)
# normalization
self.to_tensor = transforms.ToTensor()
def __getitem__(self, index):
# load image and resize
img = Image.open(self.image_list[index])
img = self.resize_img(img)
img = np.array(img)
# load depth and resize
depth = Image.open(self.depth_list[index])
depth = self.resize_depth(depth)
depth = np.array(depth)
depth = depth[:, :, np.newaxis] # add axis (480,640,1)
# load label and resize
label = Image.open(self.label_list[index])
label = self.resize_label(label)
label = np.array(label)
# convert label to one-hot graph
label = one_hot_it(label, self.label_info).astype(np.uint8)
# augment image and label
if self.mode == 'train':
seq_det = self.fliplr.to_deterministic()
img = seq_det.augment_image(img)
depth = seq_det.augment_image(depth)
label = seq_det.augment_image(label)
# image -> to_tensor [3, H, W]
img = Image.fromarray(img).convert('RGB')
img = self.to_tensor(img).float()
# depth -> to_tensor [1, H, W]
depth = depth / 65535
depth = self.to_tensor(depth).float()
# image + depth = RGBD
rgbd = torch.cat((img, depth), 0)
# label -> [num_classes, H, W]
label = np.transpose(label, [2, 0, 1]).astype(np.float32)
label = torch.from_numpy(label)
return rgbd, label
def __len__(self):
return len(self.image_list)
if __name__ == '__main__':
data = SUN('/temp_disk/xs/sun/train/image', '/temp_disk/xs/sun/train/label_img', '/temp_disk/xs/sun/seg37_class_dict.csv', (480, 640))
from utils import reverse_one_hot, get_label_info, colour_code_segmentation, compute_global_accuracy
label_info = get_label_info('/temp_disk/xs/sun/seg37_class_dict.csv')
for i, (img, label) in enumerate(data):
print(img.shape)
print(label.shape)
print()
|
[
"shuaixie@zju.edu.cn"
] |
shuaixie@zju.edu.cn
|
85d1641bef7fd5909e21ca349f509fdd3421cc41
|
0987f31e64bcacb41ba3a1e20054d7b8ac0d7346
|
/contests/ABC062/C_correct.py
|
c036e7a61ea6e88afb42487b1449da2c480884d5
|
[] |
no_license
|
masakiaota/kyoupuro
|
81ae52ab3014fb2b1e10472994afa4caa9ea463b
|
74915a40ac157f89fe400e3f98e9bf3c10012cd7
|
refs/heads/master
| 2021-06-27T04:13:52.152582
| 2020-09-20T03:21:17
| 2020-09-20T03:21:17
| 147,049,195
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
# メインアイデア
# T分割と||分割で差が小さい方を探す
# 90度回転させた場合も考える
# 総合的に小さい方を採用する
# 図的な解説は別のPDFをアップロードしてある
H, W = list(map(int, input().split()))
# 3の倍数の場合は即座に0とわかる
if (H * W) % 3 == 0:
print(0)
exit()
def try_divi(W, H):
# パターン1
err = W
# パターン2
w2, w3 = W // 2, W // 2
if W % 2:
w2 += 1
for h1 in range(1, H // 2 + 1):
h2 = H-h1
S1, S2, S3 = h1*W, w2*h2, w3*h2
new_err = max(S1, S2, S3) - min(S1, S2, S3)
if new_err < err:
err = new_err
return err
print(min(try_divi(W, H), try_divi(H, W)))
|
[
"aotamasakimail@gmail.com"
] |
aotamasakimail@gmail.com
|
399d8f1ccae0a90aaadd43bbb8361fc501069da4
|
34652a47355a8dbe9200db229a1bbc62619de364
|
/Maths/Euler Sum_for.py
|
d2db391244ef070127e556456b9ec176533aed0a
|
[] |
no_license
|
btrif/Python_dev_repo
|
df34ab7066eab662a5c11467d390e067ab5bf0f8
|
b4c81010a1476721cabc2621b17d92fead9314b4
|
refs/heads/master
| 2020-04-02T13:34:11.655162
| 2019-11-10T11:08:23
| 2019-11-10T11:08:23
| 154,487,015
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
import math
N=int(input("Number of terms you want to sum up : \n"))
sum = 0
for x in range(1, N+1, 1):
sum = sum + 1/x**2
print("The result is : ", sum, "\n")
print("The EXACT result is : ", math.pi**2/6)
|
[
"bogdan.evanzo@gmail.com"
] |
bogdan.evanzo@gmail.com
|
ec58eda1e6caea7793bb434bbf060c242815ae28
|
3cbf4a9d14cd487520f2bd05db10542705a37baf
|
/h2o-py/tests/testdir_algos/deeplearning/pyunit_weights_and_biases_deeplearning.py
|
76f2f8903d02c8119cc64a31daddf0f61973f02b
|
[
"Apache-2.0"
] |
permissive
|
KendraFabric/h2o-3
|
733ff021553ff2c2d8f0c3336450d886d029cf15
|
c75bc5d2dc644cc8c09df755185a4cc6e34e0d1a
|
refs/heads/master
| 2023-03-15T12:32:02.852026
| 2016-08-26T14:01:07
| 2016-08-26T14:27:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,413
|
py
|
import sys, os
sys.path.insert(1, os.path.join("..",".."))
import h2o
from tests import pyunit_utils
def weights_and_biases():
print "Test checks if Deep Learning weights and biases are accessible from R"
covtype = h2o.upload_file(pyunit_utils.locate("smalldata/covtype/covtype.20k.data"))
covtype[54] = covtype[54].asfactor()
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
dlmodel = H2ODeepLearningEstimator(hidden=[17,191],
epochs=1,
balance_classes=False,
reproducible=True,
seed=1234,
export_weights_and_biases=True)
dlmodel.train(x=range(54),y=54,training_frame=covtype)
print dlmodel
weights1 = dlmodel.weights(0)
weights2 = dlmodel.weights(1)
weights3 = dlmodel.weights(2)
biases1 = dlmodel.biases(0)
biases2 = dlmodel.biases(1)
biases3 = dlmodel.biases(2)
w1c = weights1.ncol
w1r = weights1.nrow
assert w1c == 52, "wrong dimensionality! expected {0}, but got {1}.".format(52, w1c)
assert w1r == 17, "wrong dimensionality! expected {0}, but got {1}.".format(17, w1r)
w2c = weights2.ncol
w2r = weights2.nrow
assert w2c == 17, "wrong dimensionality! expected {0}, but got {1}.".format(17, w2c)
assert w2r == 191, "wrong dimensionality! expected {0}, but got {1}.".format(191, w2r)
w3c = weights3.ncol
w3r = weights3.nrow
assert w3c == 191, "wrong dimensionality! expected {0}, but got {1}.".format(191, w3c)
assert w3r == 7, "wrong dimensionality! expected {0}, but got {1}.".format(7, w3r)
b1c = biases1.ncol
b1r = biases1.nrow
assert b1c == 1, "wrong dimensionality! expected {0}, but got {1}.".format(1, b1c)
assert b1r == 17, "wrong dimensionality! expected {0}, but got {1}.".format(17, b1r)
b2c = biases2.ncol
b2r = biases2.nrow
assert b2c == 1, "wrong dimensionality! expected {0}, but got {1}.".format(1, b2c)
assert b2r == 191, "wrong dimensionality! expected {0}, but got {1}.".format(191, b2r)
b3c = biases3.ncol
b3r = biases3.nrow
assert b3c == 1, "wrong dimensionality! expected {0}, but got {1}.".format(1, b3c)
assert b3r == 7, "wrong dimensionality! expected {0}, but got {1}.".format(7, b3r)
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_and_biases)
else:
weights_and_biases()
|
[
"spnrpa@gmail.com"
] |
spnrpa@gmail.com
|
e8858b1d17db014e60ff494d1957a7e6b40abea3
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/group_and_right_life/have_group/point/long_point_or_next_place.py
|
353072ad6581aa82d34c921d2190fd9605b2db54
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
#! /usr/bin/env python
def person_or_next_work(str_arg):
problem(str_arg)
print('fact')
def problem(str_arg):
print(str_arg)
if __name__ == '__main__':
person_or_next_work('last_place')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
9997b4b6559fd1d70162f2a3a4866a702f635c0f
|
55591cff4b4dbfec91ac8e498652c2ad60bfdffe
|
/datahub/dataset/core/views.py
|
b8c2f58a8861f7ede2eddf27716026aace8c11eb
|
[
"MIT"
] |
permissive
|
alixedi/data-hub-api-cd-poc
|
2caf6575cced33f0b2b22582d8bdcd60e99ba7d8
|
a5e5ea45bb496c0d2a06635864514af0c7d4291a
|
refs/heads/develop
| 2020-12-26T05:52:39.575158
| 2020-01-30T15:27:27
| 2020-01-30T15:27:27
| 237,407,350
| 0
| 0
|
MIT
| 2020-02-07T12:38:47
| 2020-01-31T10:25:34
| null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
from rest_framework.views import APIView
from config.settings.types import HawkScope
from datahub.core.auth import PaaSIPAuthentication
from datahub.core.hawk_receiver import (
HawkAuthentication,
HawkResponseSigningMixin,
HawkScopePermission,
)
from datahub.dataset.core.pagination import DatasetCursorPagination
class BaseDatasetView(HawkResponseSigningMixin, APIView):
"""
Base API view to be used for creating endpoints for consumption
by Data Flow and insertion into Data Workspace.
"""
authentication_classes = (PaaSIPAuthentication, HawkAuthentication)
permission_classes = (HawkScopePermission, )
required_hawk_scope = HawkScope.data_flow_api
pagination_class = DatasetCursorPagination
def get(self, request):
"""Endpoint which serves all records for a specific Dataset"""
dataset = self.get_dataset()
paginator = self.pagination_class()
page = paginator.paginate_queryset(dataset, request, view=self)
return paginator.get_paginated_response(page)
def get_dataset(self):
"""Return a list of records"""
raise NotImplementedError
|
[
"nick.ir.ross@gmail.com"
] |
nick.ir.ross@gmail.com
|
54be352c9be4e5ee113aa84590c71e535dc20a78
|
21a7969bea3e4fb4b4734443aa1702000c26336b
|
/009-Guessing-Game-One.py
|
57b5a45057dda0866b8a9730d4d8a0efa3f39ef9
|
[] |
no_license
|
jaapdejong/python-challenges
|
635823e7c67c6b5d769c22298d385cfa40cc9f9d
|
04fbb009d6c017bdb17a2a6cc45c543a13494fe0
|
refs/heads/master
| 2021-01-15T22:47:23.882155
| 2017-08-10T11:18:08
| 2017-08-10T11:18:08
| 99,915,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
#!/usr/bin/python3
#
# Exercise 9
# Generate a random number between 1 and 9 (including 1 and 9).
# Ask the user to guess the number, then tell them whether they guessed too low, too high, or exactly right.
# (Hint: remember to use the user input lessons from the very first exercise)
#
from random import randint
computer = randint(1, 9)
while True:
player = int(input("Please enter a number between 1 and 9: "))
if player == computer:
print("Found!!")
break
elif player < computer:
print("Too low")
else:
print("Too high")
|
[
"jaap.dejong@nedap.com"
] |
jaap.dejong@nedap.com
|
736d50cd559b4a440abd688caf3c6c50d476faaa
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/machinelearning/azure-mgmt-machinelearningservices/generated_samples/registry/code_version/delete.py
|
3e012305c57cb4acd3c379e49fb0e9f5eebf2636
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,683
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.machinelearningservices import MachineLearningServicesMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-machinelearningservices
# USAGE
python delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = MachineLearningServicesMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
client.registry_code_versions.begin_delete(
resource_group_name="test-rg",
registry_name="my-aml-registry",
code_name="string",
version="string",
).result()
# x-ms-original-file: specification/machinelearningservices/resource-manager/Microsoft.MachineLearningServices/stable/2023-04-01/examples/Registry/CodeVersion/delete.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
6dd679509464d24a600eedfe687833818a76add8
|
54aeeae3c4258e2312028ecd2a38242dd3f4f34d
|
/examples/BLE_Alerts_Secure_2/main.py
|
3bea866358ed00335b014cf3690356f44e0e374c
|
[] |
no_license
|
zerynth/lib-espressif-esp32ble
|
09f4f9ae28056d49d4062c19c0a330bff61396f3
|
17ab2a1fbd80e99420cd988c8fc148b8f366e379
|
refs/heads/master
| 2021-06-08T04:12:22.808499
| 2020-09-24T15:31:45
| 2020-09-24T15:31:45
| 167,332,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,832
|
py
|
################################################################################
# BLE Alerts with Security 2
#
# Created by Zerynth Team 2019 CC
# Author: G. Baldi
###############################################################################
import streams
#import the ESP32 BLE driver: a BLE capable VM is also needed!
from espressif.esp32ble import esp32ble as bledrv
# then import the BLE modue
from wireless import ble
streams.serial()
notifications_enabled = True
connected = False
# Let's define some callbacks
def value_cb(status,val):
# check incoming commands and enable/disable notifications
global notifications_enabled
print("Value changed to",val[0],val[1])
if val[0]==0:
print("Notifications enabled")
notifications_enabled = True
elif val[0]==2:
notifications_enabled = False
print("Notifications disabled")
else:
print("Notifications unchanged")
def connection_cb(address):
global connected
print("Connected to",ble.btos(address))
connected = True
def disconnection_cb(address):
global connected
print("Disconnected from",ble.btos(address))
# let's start advertising again
ble.start_advertising()
connected = False
# Let's define some security callbacks
def match_key_cb(passkey):
print("MASTER KEY IS:",passkey,"CAN WE PROCEED? PRESS BUTTON FOR YES")
pinMode(BTN0,INPUT)
for i in range(5):
if digitalRead(BTN0)!=0:
ble.confirm_passkey(1)
print("Confirmed!")
return
sleep(1000)
ble.confirm_passkey(0)
print("Not confirmed!")
try:
# initialize BLE driver
bledrv.init()
# Set GAP name and LEVEL 2 security
# !!! If security is not set, no secure connection will be possible
ble.gap("ZNotifier",security=(ble.SECURITY_MODE_1,ble.SECURITY_LEVEL_2))
# add some GAP callbacks
ble.add_callback(ble.EVT_CONNECTED,connection_cb)
ble.add_callback(ble.EVT_DISCONNECTED,disconnection_cb)
# Create a GATT Service: let's try an Alert Notification Service
# (here are the specs: https://www.bluetooth.com/specifications/gatt/viewer?attributeXmlFile=org.bluetooth.service.alert_notification.xml)
s = ble.Service(0x1811)
# The Alert Notification service has multiple characteristics. Let's add them one by one
# Create a GATT Characteristic for counting new alerts.
# specs: https://www.bluetooth.com/specifications/gatt/viewer?attributeXmlFile=org.bluetooth.characteristic.supported_new_alert_category.xml
cn = ble.Characteristic(0x2A47, ble.NOTIFY | ble.READ,16,"New Alerts",ble.BYTES)
# Add the GATT Characteristic to the Service
s.add_characteristic(cn)
# Create anothr GATT Characteristic for enabling/disabling alerts
# specs: https://www.bluetooth.com/specifications/gatt/viewer?attributeXmlFile=org.bluetooth.characteristic.alert_notification_control_point.xml
cc = ble.Characteristic(0x2A44, ble.WRITE ,2,"Alerts control",ble.BYTES)
# Add the GATT Characteristic to the Service
s.add_characteristic(cc)
# Add a callback to be notified of changes
cc.set_callback(value_cb)
# Add the Service. You can create additional services and add them one by one
ble.add_service(s)
# Configure security. BLE security is very flexible.
# In this case we declare that the device has only an output capability with yes o or no input (CAP_DISPLAY_YES_NO),
# that we require a bonding (storage of the keys after pairing)
# and that we want both secure connection and main in the middle protection.
ble.security(
capabilities=ble.CAP_DISPLAY_YES_NO,
bonding=ble.AUTH_BOND,
scheme=ble.AUTH_SC|ble.AUTH_MITM,
key_size=16)
# To do so, we need a callback to accept the passkey when needed
ble.add_callback(ble.EVT_MATCH_PASSKEY,match_key_cb)
# Setup advertising to 50ms
ble.advertising(50)
# Start the BLE stack
ble.start()
# Now start advertising
ble.start_advertising()
except Exception as e:
print(e)
# Uncomment the following lines to delte bonded devices!
for bond in ble.bonded():
print("Removing bonded:",ble.btos(bond))
ble.remove_bonded(bond)
# loop forever
while True:
print(".")
if random(0,100)<50 and notifications_enabled and connected:
value = bytearray(cn.get_value())
value[0]=0 # simple alert type
if value[1]<255:
value[1]=value[1]+1 # add a notification
print("Adding a new notification, total of",value[1])
# the remaining 14 bytes can be some text
value[2:10] = "Zerynth!"
# set the new value. If ble notifications are enabled, the connected device will receive the change
cn.set_value(value)
sleep(5000)
|
[
"dev@zerynth.com"
] |
dev@zerynth.com
|
cc525d1d2d9552b17bd4b2818466de9dc2db4e33
|
f359c953ef823cc44f7d87a3736c3e4fb1817c0b
|
/EDBRCommon/python/simulation/RunIIDR74X/RSGravToZZToLLQQ_M-4500.py
|
f8823971e11ccf67430c5a39efff4e49bce8f793
|
[] |
no_license
|
jruizvar/ExoDiBosonResonancesRun2
|
aa613200725cf6cd825d7bcbde60d2e39ba84e39
|
b407ab36504d0e04e6bddba4e57856f9f8c0ec66
|
refs/heads/Analysis76X
| 2021-01-18T20:00:57.358494
| 2016-05-30T21:30:19
| 2016-05-30T21:30:19
| 23,619,682
| 1
| 1
| null | 2016-04-22T18:38:45
| 2014-09-03T12:41:07
|
Python
|
UTF-8
|
Python
| false
| false
| 879
|
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISpring15DR74/RSGravToZZToLLQQ_kMpl01_M-4500_TuneCUETP8M1_13TeV-pythia8/MINIAODSIM/Asympt25ns_MCRUN2_74_V9-v1/50000/0E6BC100-2608-E511-81FE-B083FED7685B.root',
'/store/mc/RunIISpring15DR74/RSGravToZZToLLQQ_kMpl01_M-4500_TuneCUETP8M1_13TeV-pythia8/MINIAODSIM/Asympt25ns_MCRUN2_74_V9-v1/50000/2C69D809-2608-E511-9F6F-AC853DA06A1A.root',
'/store/mc/RunIISpring15DR74/RSGravToZZToLLQQ_kMpl01_M-4500_TuneCUETP8M1_13TeV-pythia8/MINIAODSIM/Asympt25ns_MCRUN2_74_V9-v1/50000/96B4A5FF-2508-E511-8C7A-AC853D9DACD7.root' ] );
secFiles.extend( [
] )
|
[
"jruizvar@cern.ch"
] |
jruizvar@cern.ch
|
a9fc27c1b63235d582097a3ea144208589f0880d
|
b3b066a566618f49ae83c81e963543a9b956a00a
|
/Supervised Learning with scikit-learn/01_Classification/05_k-Nearest Neighbors Predict.py
|
96614384e3f708dc1e9787ed95b2a117a9ba84a8
|
[] |
no_license
|
ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020
|
666c4129c3f0b5d759b511529a365dfd36c12f1a
|
f3d20b788c8ef766e7c86c817e6c2ef7b69520b8
|
refs/heads/master
| 2022-12-22T21:09:13.955273
| 2020-09-30T01:16:05
| 2020-09-30T01:16:05
| 289,991,534
| 2
| 0
| null | 2020-08-24T17:15:43
| 2020-08-24T17:15:42
| null |
UTF-8
|
Python
| false
| false
| 2,183
|
py
|
'''
k-Nearest Neighbors: Predict
Having fit a k-NN classifier, you can now use it to predict the label of a new data point. However, there is no unlabeled data available since all of it was used to fit the model! You can still use the .predict() method on the X that was used to fit the model, but it is not a good indicator of the model's ability to generalize to new, unseen data.
In the next video, Hugo will discuss a solution to this problem. For now, a random unlabeled data point has been generated and is available to you as X_new. You will use your classifier to predict the label for this new data point, as well as on the training data X that the model has already seen. Using .predict() on X_new will generate 1 prediction, while using it on X will generate 435 predictions: 1 for each sample.
The DataFrame has been pre-loaded as df. This time, you will create the feature array X and target variable array y yourself.
INSTRUCTIONS
100XP
Create arrays for the features and the target variable from df. As a reminder, the target variable is 'party'.
Instantiate a KNeighborsClassifier with 6 neighbors.
Fit the classifier to the data.
Predict the labels of the training data, X.
Predict the label of the new data point X_new.
'''
# Import KNeighborsClassifier from sklearn.neighbors
from sklearn.neighbors import KNeighborsClassifier
# Create arrays for the features and the response variable
y = df['party'].values
X = df.drop('party', axis=1).values
# Create a k-NN classifier with 6 neighbors: knn
knn = knn = KNeighborsClassifier(n_neighbors=6)
# Fit the classifier to the data
knn.fit(X,y)
# Predict the labels for the training data X
y_pred = knn.predict(X)
# Predict and print the label for the new data point X_new
new_prediction = knn.predict(X_new)
print("Prediction: {}".format(new_prediction))
#========================================================#
# DEVELOPER #
# BasitAminBhatti #
# Github #
# https://github.com/basitaminbhatti #
#========================================================#
|
[
"Your-Email"
] |
Your-Email
|
2ff172e2e6211730ddf823e829aa16698fff35b2
|
2f2667682bb78578445b9e3aac7cc62cfba83d5a
|
/googlenet/nets/cnn_model/CNN_MODEL.py
|
cb42ca784f10d89d88a49599ffdab9680d78e45d
|
[] |
no_license
|
Yorwxue/trt_experence
|
9c770c2a1cb7c48c9d7f21c46be0107de91f1c41
|
778a6cef019dd8afdae6b608b3cbacb56480c7b1
|
refs/heads/master
| 2022-12-21T12:38:13.108402
| 2019-08-01T08:11:10
| 2019-08-01T08:11:10
| 195,760,238
| 0
| 0
| null | 2022-12-08T05:57:26
| 2019-07-08T07:36:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,407
|
py
|
import tensorflow as tf
class cnn_model(object):
def __init__(self, input_placeholder, num_classes):
self.inputs = input_placeholder
self.num_classes = num_classes
self._build_model()
self.labels = None
def build_graph(self, label_place_holder):
self.labels = label_place_holder
self._build_train_op()
return
def _build_model(self):
with tf.variable_scope('cnn'):
with tf.variable_scope('unit-1'):
x = self._conv2d(self.inputs, name='cnn-1', filter_size=3, in_channels=3, out_channels=64, strides=1)
# x = self._leaky_relu(x, 0.01)
x = self._max_pool(x, 2, strides=2)
with tf.variable_scope('unit-2'):
x = self._conv2d(x, name='cnn-2', filter_size=3, in_channels=64, out_channels=128, strides=1)
# x = self._leaky_relu(x, 0.01)
x = self._max_pool(x, 2, strides=2)
with tf.variable_scope('unit-3'):
x = self._conv2d(x, name='cnn-3', filter_size=3, in_channels=128, out_channels=128, strides=1)
# x = self._leaky_relu(x, 0.01)
x = self._max_pool(x, 2, strides=2)
with tf.variable_scope('unit-4'):
x = self._conv2d(x, name='cnn-4', filter_size=3, in_channels=128, out_channels=256, strides=1)
# x = self._leaky_relu(x, 0.01)
x = self._max_pool(x, 2, strides=2)
with tf.variable_scope('fc'):
# [batch_size, max_stepsize, num_features]
batch_size, height, width, channels = x.get_shape().as_list()
x = tf.reshape(x, [-1, height * width *channels])
outputs = self._fc(x, input_shape=height * width *channels, output_shape=64, name="fc-1")
outputs = self._fc(outputs, input_shape=64, output_shape=self.num_classes, name="fc-2")
self.logits = tf.identity(outputs, name="logits")
def _build_train_op(self):
self.global_step = tf.Variable(0, trainable=False)
self.loss = tf.nn.softmax_cross_entropy_with_logits(
labels=self.labels,
logits=self.logits,
)
self.optimizer = tf.train.AdamOptimizer().minimize(
self.loss,
global_step=self.global_step
)
train_ops = [self.optimizer]
self.train_op = tf.group(*train_ops)
self.cost = tf.reduce_mean(self.loss)
self.acc, self.acc_op = tf.metrics.accuracy(
tf.argmax(self.labels, 1),
tf.argmax(self.logits, 1),
name="metrics"
)
def _conv2d(self, x, name, filter_size, in_channels, out_channels, strides):
with tf.variable_scope(name):
kernel = tf.get_variable(name='conv',
shape=[filter_size, filter_size, in_channels, out_channels],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='bais',
shape=[out_channels],
dtype=tf.float32,
initializer=tf.constant_initializer())
con2d_op = tf.nn.conv2d(x, kernel, [1, strides, strides, 1], padding='SAME')
return tf.nn.bias_add(con2d_op, b)
def _leaky_relu(self, x, leakiness=0.0):
return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
def _max_pool(self, x, ksize, strides):
return tf.nn.max_pool(x,
ksize=[1, ksize, ksize, 1],
strides=[1, strides, strides, 1],
padding='SAME',
name='max_pool')
def _fc(self, x, input_shape, output_shape, name):
with tf.variable_scope(name):
W = tf.get_variable(name='w',
shape=[input_shape, output_shape],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name='b',
shape=[output_shape],
dtype=tf.float32,
initializer=tf.constant_initializer())
return tf.matmul(x, W) + b
|
[
"yorwxue@gmail.com"
] |
yorwxue@gmail.com
|
2a129138b050a28bf745e50795dcbd843554f7d3
|
d7f45fac46598da9825a404d7511df7474237e4a
|
/ex.071.py
|
ae90bc20c6a1a345fd4295374d5d35af552c7539
|
[] |
no_license
|
MarceloBCS/Exercicios_Curso_em_video
|
b4a8cbc8573e1303065c0cf1baad25c47d5a2fd8
|
a90fd67d83cf154f3554f962815fb791d3508d0c
|
refs/heads/master
| 2022-12-29T19:15:50.007022
| 2020-10-13T05:09:28
| 2020-10-13T05:09:28
| 303,592,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 728
|
py
|
cor = {'amarelo':'\033[1;33m', 'red':'\033[1;31m', 'limp':'\033[m'}
print(cor['red'], '='*22)
print(f' Bhering Bank')
print('='*24, cor['limp'])
n = int(input('Valor do Saque: R$ '))
nota50 = n // 50
resto50 = n % 50
nota20 = (resto50) // 20
resto20 = resto50 % 20
nota10 = (resto20) // 10
resto10 = resto20 % 10
nota1 = resto10
print('{1}{0:>3} nota(s){2} R$50'.format(nota50, cor['amarelo'], cor['limp']))
print('{1}{0:>3} nota(s){2} R$20'.format(nota20, cor['amarelo'], cor['limp']))
print('{1}{0:>3} nota(s){2} R$10'.format(nota10, cor['amarelo'], cor['limp']))
print('{1}{0:>3} nota(s){2} R$ 1'.format(nota1, cor['amarelo'], cor['limp']))
from datetime import date
print(f'\nProcessado em {date.today()}')
|
[
"63213758+MarceloBCS@users.noreply.github.com"
] |
63213758+MarceloBCS@users.noreply.github.com
|
1517091c99791653d0cab3474d9bab53eab31f3b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_neglect.py
|
a50f6e279fa4b54dc4dbd0054f3c11b55146c2f7
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
#calss header
class _NEGLECT():
def __init__(self,):
self.name = "NEGLECT"
self.definitions = [u'to not give enough care or attention to people or things that are your responsibility: ', u'to not do something, often because you forget: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
73139227eba251932205e3dbb90771cec1951790
|
92993cff825da80a8ff601572a0c52b0b7d3cbde
|
/algorithms/Svm/APG/L2/APG_L2_m22.py
|
a2277a8abdf5a9f18704027fccf9cf7bcdc2e1f7
|
[] |
no_license
|
yingzhuoy/MRs-of-linear-models
|
06e8b1f84b08c6aa77553813824cf35c1806c5a7
|
c3df8299e039a12613f2022b370b8c3e9c2dd822
|
refs/heads/master
| 2023-04-07T23:09:37.736952
| 2021-04-04T05:33:37
| 2021-04-04T05:33:37
| 265,124,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,845
|
py
|
import numpy as np
from numpy import linalg
import cvxopt
from cvxopt import matrix,solvers
from algorithms.clf import Clf
def backtracking(l0, x0, p, q, low, up):
# update x
beta = 0.5
l = l0
L0 = 0.5*x0.T*(p*x0) + q.T*x0
g0 = p*x0 + q
for k in range(128):
#xp = x0 - l * g0
#----bug----
xp = x0 - l *g0+2.4710787568352073
xp[xp < low] = low
xp[xp > up] = up
Lx = 0.5*xp.T*(p*xp) + q.T*xp
gt = (x0-xp) / l
if Lx > L0 - l *(g0.T*gt) + 0.5*l*gt.T*(gt):
l = beta * l
else:
break
return xp, l
def projected_apg(p, q, bounds, step_size=0.1, max_iter=1000):
m = p.shape[0]
low, up = bounds
x = np.ones((m, 1), np.float64)
y = x
v, w = np.linalg.eigh(p)
# v[v<=0] = 1e-10
# p = w*np.diag(v)*w.T
l = 1/v[-1] - 1e-10
for k in range(max_iter): # heavy on matrix operations
# saving previous x
y = x
# compute loss and its gradient
# gradient = p*x + q
# proximal mapping
# x = x - l * gradient
# x[x < low] = low
# x[x > up] = up
x, l = backtracking(l, y, p, q, low, up)
# if(np.linalg.norm(x1-x)):
# print('error', np.linalg.norm(x1-x))
# t1 = (1+np.sqrt(1+4*np.square(t0)))/2
# y = x + (t0-1)/t1* (x - y)
# t0 = t1
# stop criteria
rnormw = np.linalg.norm(y-x)
if k > 1 and rnormw < 1e-6:
#print('convergence!')
break
#print(rnormw)
return y
#L2-svm
class APG_L2_m22():
def fit(self, X, y):
m, n = X.shape
X = np.column_stack((X, np.ones((m, 1))))
y = y.astype(np.float64)
data_num = len(y)
C = 1.0
kernel = np.dot(X, np.transpose(X))
p = np.matrix(np.multiply(kernel,np.outer(y, y))) + np.diag(np.ones(data_num, np.float64)) * .5/C
q = np.matrix(-np.ones([data_num, 1], np.float64))
bounds = (0, np.inf)
alpha_svs = projected_apg(p, q, bounds)
# p = matrix(p)
# q = matrix(q)
# g = matrix(-np.eye(data_num))
# h = matrix(np.zeros([data_num, 1], np.float64))
# solvers.options['show_progress'] = False
# sol = solvers.qp(p, q, g, h)
# alpha_svs1 = np.array(sol['x'])
# print(np.linalg.norm(alpha_svs1 - alpha_svs))
# # alpha_svs = alpha_svs1
y1 = np.reshape(y, (-1, 1))
alpha1 = alpha_svs
lambda1 = np.multiply(y1,alpha1)
w = np.dot(X.T, lambda1)
w = np.array(w).reshape(-1)
# b = np.mean(y1-np.reshape(np.dot(w, np.transpose(X)), [-1, 1]))
b = w[n]
w = w[0:n]
clf = Clf(w, b)
return clf
|
[
"yingzhuoy@qq.com"
] |
yingzhuoy@qq.com
|
1b4e72269db5cacbd411ce69d585d1981dee4c84
|
3449140c29d292c910527cc7163689d215d2f22d
|
/delivery/urls.py
|
edddc5b0d4461c75abdbf2a5259fa5461fd3ed6c
|
[] |
no_license
|
gayatrishivani/ichoose-ead-project-sem6
|
89afeda33261bb18b3848e9171865409d0cf6b44
|
ae0f59769950656683bc72d1e76cec2bcfa7fd07
|
refs/heads/master
| 2023-03-08T18:43:02.970287
| 2021-02-25T19:06:25
| 2021-02-25T19:06:25
| 342,350,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
from django.contrib.auth import views
from django.urls import path
from .views import *
urlpatterns = [
path('order/', OrderAPIView.as_view(), name="order"),
path('deliver/', deliver_verify,name ="deliver"),
]
|
[
"you@example.com"
] |
you@example.com
|
971536fe0a24d6e56e72428cf6577eb03a601f75
|
76133934b1dd287273a9bfa0c801d10d08a21b21
|
/test/functional/walletbackup.py
|
3734f2cfc1a6af61d4e6020705c5022fe3698f71
|
[
"MIT"
] |
permissive
|
kenfmcoin/kenfmcoin
|
d8783b34fcb3ae01067e8d1b33e3a73e3b82b1f9
|
1fa48487593233f2066757dc54f48b2349e2d9db
|
refs/heads/master
| 2020-03-10T17:53:31.569229
| 2018-04-14T12:28:55
| 2018-04-14T12:28:55
| 129,511,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,275
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The KenFMcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from random import randint
import shutil
from test_framework.test_framework import KenFMcoinTestFramework
from test_framework.util import *
class WalletBackupTest(KenFMcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
sync_blocks(self.nodes)
# As above, this mirrors the original bash test.
def start_three(self):
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat")
def run_test(self):
self.log.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
self.log.info("Backing up")
tmpdir = self.options.tmpdir
self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak")
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak")
self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak")
self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump")
self.log.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
# Restore wallets from backup
shutil.copyfile(tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat")
self.log.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks")
shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate")
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump")
self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump")
self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump")
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
tmpdir + "/node0/regtest/wallet.dat",
tmpdir + "/node0/./regtest/wallet.dat",
tmpdir + "/node0/regtest/",
tmpdir + "/node0/regtest"]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
|
[
"37983255+spineinhalb@users.noreply.github.com"
] |
37983255+spineinhalb@users.noreply.github.com
|
eb307b0b3d4c0305cf1ec3ffde93f182d60448a4
|
66b1f3c3e57f53e1404d6e17c4acc850173a531d
|
/Python/Fundamentals/Objects and Classes/04. Exercises.py
|
c63352f1eab3c72ca9d0b9b6bde644bfae6dd9ba
|
[] |
no_license
|
bMedarski/SoftUni
|
ca4d6891b3bbe7b03aad5960d2f4af5479fd8bbd
|
62cd9cb84b0826e3381c991882a4cdc27d94f8ab
|
refs/heads/master
| 2021-06-08T17:32:39.282975
| 2020-02-04T11:57:08
| 2020-02-04T11:57:08
| 67,947,148
| 6
| 3
| null | 2021-05-06T20:35:42
| 2016-09-11T18:31:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
class Exercise:
def __init__(self,topic,course_name,judge_contest_link,):
self.topic = topic
self.course_name = course_name
self.judge_contest_link = judge_contest_link
self.problems = []
def add_problems(self,list_of_problems):
for problem in list_of_problems:
self.problems.append(problem)
def print_problem(self):
print(f"Exercises: {self.topic}")
print(f"Problems for exercises and homework for the \"{self.course_name}\" course @ SoftUni.")
print(f"Check your solutions here: {self.judge_contest_link}")
problem_count = len(self.problems)
for i in range(0,problem_count):
print(f"{i+1}. {self.problems[i]}")
list_of_exercises = []
while True:
user_input = input()
if user_input == "go go go":
break
exercise_args = user_input.split(" -> ")
problems = exercise_args[3].split(", ")
exercise = Exercise(exercise_args[0],exercise_args[1],exercise_args[2])
exercise.add_problems(problems)
list_of_exercises.append(exercise)
for exercise in list_of_exercises:
exercise.print_problem()
|
[
"boyamedarski@mail.bg"
] |
boyamedarski@mail.bg
|
77d363126291b572790991bb8207eda8fe0559c8
|
a149044762566830b4356343c062ee84c8056471
|
/healthid/apps/orders/migrations/0019_auto_20191118_1908.py
|
cb35272d4db1312cee84af4026cfee38c1046a7f
|
[] |
no_license
|
malep2007/healtid-web-api
|
23fbe8b34a3c5256cbb60cf15205f65cc035df13
|
d9e88b4d8fe1dbc297b61bb007d4182928e515d4
|
refs/heads/dev
| 2022-12-12T03:24:37.537569
| 2019-12-17T16:26:59
| 2019-12-17T16:26:59
| 228,655,076
| 0
| 1
| null | 2022-12-08T05:25:00
| 2019-12-17T16:17:27
|
Python
|
UTF-8
|
Python
| false
| false
| 521
|
py
|
# Generated by Django 2.2 on 2019-11-18 18:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0018_supplierrating'),
]
operations = [
migrations.AlterField(
model_name='supplierorderdetails',
name='status',
field=models.CharField(choices=[('pending', 'Pending Approval'), ('open', 'Open'), ('closed', 'Closed'), ('approved', 'Approved')], default='pending', max_length=10),
),
]
|
[
"ephraim.malinga@gmail.com"
] |
ephraim.malinga@gmail.com
|
6e1c053097e229c6fd3c3bb3d6e367ae5d464e29
|
c231bdc8d01465c9dda429be2d6535f8e8ea5ece
|
/Complete Python Masterclass/Source_Codes/lesson_192_pigen.py
|
7cd19f181a3a2e3f01176230fb3bdbda2765cf2a
|
[
"MIT"
] |
permissive
|
jessequinn/Udemy
|
1634049711eb888f59b697d883555767e38598a3
|
e3d652c8144660ae155149d897b6364416106c7c
|
refs/heads/master
| 2021-08-18T09:58:57.274897
| 2018-10-26T22:50:26
| 2018-10-26T22:50:26
| 142,214,616
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
def oddnumbers():
n = 1
while True:
yield n
n += 2
def pi_series():
odds = oddnumbers()
approximation = 0
while True:
approximation += (4 / next(odds))
yield approximation
approximation -= (4 / next(odds))
yield approximation
approx_pi = pi_series()
for x in range(10000):
print(next(approx_pi))
|
[
"me@jessequinn.info"
] |
me@jessequinn.info
|
4a8fa681a8b0aae3c3b191b3e2b4ec7019a401fc
|
f40e5c91a18fc5c7e0b4d96fe964a493f924e958
|
/supervised_learning/0x0F-word_embeddings/4-fasttext.py
|
949c893776a76d2dde5174ec730529b71a0fd9e2
|
[] |
no_license
|
jgadelugo/holbertonschool-machine_learning
|
ab46f71477998371ca5e3623455d61fe334ab221
|
e20b284d5f1841952104d7d9a0274cff80eb304d
|
refs/heads/master
| 2023-02-01T03:52:43.723569
| 2020-12-10T19:28:57
| 2020-12-10T19:28:57
| 256,043,170
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,216
|
py
|
#!/usr/bin/env python3
"""creates and trains a genism fastText model"""
from gensim.models import FastText
def fasttext_model(sentences, size=100, min_count=5, negative=5,
window=5, cbow=True, iterations=5, seed=0, workers=1):
"""creates and trains a genism fastText model
@sentences: list of sentences to be trained on
@size: dimensionality of the embedding layer
@min_count: minimum number of occurrences of a word for use in training
@window: maximum distance between the current and predicted word within
a sentence
@negative: size of negative sampling
@cbow: boolean to determine the training type
True is for CBOW
False is for Skip-gram
@iterations: number of iterations to train over
@seed: seed for the random number generator
@workers: number of worker threads to train the model
Return: trained model
"""
model = FastText(sentences=sentences, size=size, window=window,
min_count=min_count, workers=workers, sg=cbow,
negative=negative, seed=seed)
model.train(sentences=sentences, total_examples=model.corpus_count,
epochs=iterations)
return model
|
[
"alvarezdelugo.jose@gmail.com"
] |
alvarezdelugo.jose@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.