blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74da5a87b5ec42b3916f337f6510325ceb0175cc
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_223/ch147_2020_04_12_20_47_12_558411.py
|
48a99e9557a4ecf4ea235c6fb2d47c61fe37004e
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
def mais_frequente(l1):
dic={}
for e in l1:
if e in dic:
dic[e]+=1
else:
dic[e]=1
ocorrencias = [0]
for n in dic.values():
if n>ocorrencias[0]:
del ocorrencias[0]
ocorrencias.append(n)
palavra = []
for i in dic.items():
for p in dic.keys():
if dic[p] = ocorrencias
palavra.append(p)
return palavra
|
[
"you@example.com"
] |
you@example.com
|
cfbbccfbb28499d825414a4c03770d71a0783f86
|
0ad5abffdd15bca072ab8db068aab7e1bc6df167
|
/NanoGardener/python/modules/LeptonMaker.py
|
c081cb86e230635a3145a1ee358104a8582dccfd
|
[] |
no_license
|
pfackeldey/LatinoAnalysis
|
bf603af9c370b079c3d92e3ed49a5d7d05b87379
|
484a48ec6bfdb7edb06897be984eecfd1aae62fd
|
refs/heads/master
| 2020-03-14T22:42:22.226962
| 2018-04-27T16:02:56
| 2018-04-27T16:02:56
| 131,827,114
| 0
| 0
| null | 2018-05-02T09:16:59
| 2018-05-02T09:16:59
| null |
UTF-8
|
Python
| false
| false
| 8,092
|
py
|
import ROOT
import os
import re
ROOT.PyConfig.IgnoreCommandLineOptions = True
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from LatinoAnalysis.NanoGardener.data.LeptonMaker_cfg import List_newVar, Lep_var
from LatinoAnalysis.NanoGardener.data.common_cfg import Type_dict
#from LatinoAnalysis.NanoGardener.data.Trigger_names import TrigNames, SPTrigNames
class LeptonMaker(Module):
'''
put this file in LatinoAnalysis/NanoGardener/python/modules/
Add extra variables to NANO tree
'''
def __init__(self):
pass
def beginJob(self):
pass
def endJob(self):
pass
def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
self.initReaders(inputTree) # initReaders must be called in beginFile
self.out = wrappedOutputTree
# New branches
for typ in List_newVar:
for var in List_newVar[typ]:
if 'Lepton_' in var: self.out.branch(var, typ, lenVar='nLepton')
elif 'SPTrigger' in var: self.out.branch(var, typ, len(SPTrigNames))
elif 'Trigger' in var: self.out.branch(var, typ, len(TrigNames))
else: self.out.branch(var, typ)
# Old branches to reorder
self.list_old_br = {}
self.list_old_br['Electron'] = []
self.list_old_br['Muon'] = []
self.list_old_br['Jet'] = []
for br in inputTree.GetListOfBranches():
bname = br.GetName()
btype = Type_dict[br.GetListOfLeaves()[0].GetTypeName()]
if re.match('\AElectron_', bname):
self.list_old_br['Electron'].append(bname)
self.out.branch(bname, btype, lenVar='nElectron')
if re.match('\AMuon_', bname):
self.list_old_br['Muon'].append(bname)
self.out.branch(bname, btype, lenVar='nMuon')
if re.match('\AJet_', bname):
self.list_old_br['Jet'].append(bname)
self.out.branch(bname, btype, lenVar='nJet')
def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):
pass
def initReaders(self,tree): # this function gets the pointers to Value and ArrayReaders and sets them in the C++ worker class
self.electron_var = {}
self.muon_var = {}
self.jet_var = {}
for br in tree.GetListOfBranches():
bname = br.GetName()
if re.match('\AElectron_', bname): self.electron_var[bname] = tree.arrayReader(bname)
if re.match('\AMuon_', bname): self.muon_var[bname] = tree.arrayReader(bname)
if re.match('\AJet_', bname): self.jet_var[bname] = tree.arrayReader(bname)
self.nElectron = tree.valueReader('nElectron')
self.nMuon = tree.valueReader('nMuon')
self.nJet = tree.valueReader('nJet')
self._ttreereaderversion = tree._ttreereaderversion # self._ttreereaderversion must be set AFTER all calls to tree.valueReader or tree.arrayReader
def analyze(self, event):
"""process event, return True (go to next module) or False (fail, go to next event)"""
if event._tree._ttreereaderversion > self._ttreereaderversion: # do this check at every event, as other modules might have read further branches
self.initReaders(event._tree)
# do NOT access other branches in python between the check/call to initReaders and the call to C++ worker code
#--- Set vars
nEl = int(self.nElectron)
nMu = int(self.nMuon)
nJt = int(self.nJet)
nLep = nMu + nEl
lep_dict = {}
for lv in Lep_var:
lep_dict[lv] = [0]*nLep
lep_dict['instance'] = [0]*nLep
ele_dict = {}
for lv in self.list_old_br['Electron']:
ele_dict[lv] = [0]*nEl
muo_dict = {}
for lv in self.list_old_br['Muon']:
muo_dict[lv] = [0]*nMu
jet_dict = {}
for lv in self.list_old_br['Jet']:
jet_dict[lv] = [0]*nJt
#--- Electron Loops
for iEle1 in range(nEl):
pt_idx = 0
pt1 = self.electron_var['Electron_pt'][iEle1]
# Start comparing electrons
for iEle2 in range(nEl):
if iEle2 == iEle1: continue
pt2 = self.electron_var['Electron_pt'][iEle2]
if pt1 < pt2:
pt_idx += 1
#if pt_idx != iEle1: print('Electrons reordered')
# Now index is set, fill the vars
for var in ele_dict:
if type(self.electron_var[var][iEle1]) is str:
ele_dict[var][pt_idx] = ord(self.electron_var[var][iEle1])
else:
ele_dict[var][pt_idx] = self.electron_var[var][iEle1]
#--- Muon Loops
for iMu1 in range(nMu):
pt_idx = 0
pt1 = self.muon_var['Muon_pt'][iMu1]
# Start comparing muons
for iMu2 in range(nMu):
if iMu2 == iMu1: continue
pt2 = self.muon_var['Muon_pt'][iMu2]
if pt1 < pt2:
pt_idx += 1
#if pt_idx != iMu1: print('Muons reordered')
# Now index is set, fill the vars
for var in muo_dict:
if type(self.muon_var[var][iMu1]) is str:
muo_dict[var][pt_idx] = ord(self.muon_var[var][iMu1])
else:
muo_dict[var][pt_idx] = self.muon_var[var][iMu1]
#--- Lepton Loops
for iLep1 in range(nLep):
pt_idx = 0
if iLep1 < nEl:
pt1 = ele_dict['Electron_pt'][iLep1]
pdgId1 = ele_dict['Electron_pdgId'][iLep1]
else:
pt1 = muo_dict['Muon_pt'][iLep1 - nEl]
pdgId1 = muo_dict['Muon_pdgId'][iLep1 - nEl]
# Start comparing leptons
for iLep2 in range(nLep):
if iLep2 == iLep1: continue
if iLep2 < nEl:
pt2 = ele_dict['Electron_pt'][iLep2]
else:
pt2 = muo_dict['Muon_pt'][iLep2 - nEl]
if pt1 < pt2:
pt_idx += 1
# Now index is set, fill the vars
if abs(pdgId1) == 11:
for var in lep_dict:
if not 'instance' in var:
lep_dict[var][pt_idx] = ele_dict['Electron_'+var][iLep1]
else:
lep_dict[var][pt_idx] = iLep1
elif abs(pdgId1) == 13:
for var in lep_dict:
if not 'instance' in var and not 'eCorr' in var:
lep_dict[var][pt_idx] = muo_dict['Muon_'+var][iLep1 - nEl]
elif 'eCorr' in var:
lep_dict[var][pt_idx] = 1.
else:
lep_dict[var][pt_idx] = iLep1 - nEl
#--- Jet Loops
for iJ1 in range(nJt):
pt_idx = 0
pt1 = self.jet_var['Jet_pt'][iJ1]
# Start comparing jets
for iJ2 in range(nJt):
if iJ2 == iJ1: continue
pt2 = self.jet_var['Jet_pt'][iJ2]
if pt1 < pt2:
pt_idx += 1
#if pt_idx != iJ1: print('Jets reordered')
# Now index is set, fill the vars
for var in jet_dict:
if type(self.jet_var[var][iJ1]) is str:
jet_dict[var][pt_idx] = ord(self.jet_var[var][iJ1])
else:
jet_dict[var][pt_idx] = self.jet_var[var][iJ1]
#--- Fill branches
for var in lep_dict:
self.out.fillBranch('Lepton_' + var, lep_dict[var])
for var in ele_dict:
self.out.fillBranch(var, ele_dict[var])
for var in muo_dict:
self.out.fillBranch(var, muo_dict[var])
for var in jet_dict:
self.out.fillBranch(var, jet_dict[var])
return True
# define modules using the syntax 'name = lambda : constructor' to avoid having them loaded when not needed
lepMkr = lambda : LeptonMaker()
|
[
"senne.vanputte@student.uantwerpen.be"
] |
senne.vanputte@student.uantwerpen.be
|
2faa1a2aaf34ff00d50c35afead93ace9bc949fb
|
993ef8924418866f932396a58e3ad0c2a940ddd3
|
/Production/test/condorSub/dict_Summer20UL16APV_gjets_dr0p4.py
|
817994fbf5c7ee0aa69b211e2a51ecb254f23838
|
[] |
no_license
|
TreeMaker/TreeMaker
|
48d81f6c95a17828dbb599d29c15137cd6ef009a
|
15dd7fe9e9e6f97d9e52614c900c27d200a6c45f
|
refs/heads/Run2_UL
| 2023-07-07T15:04:56.672709
| 2023-07-03T16:43:17
| 2023-07-03T16:43:17
| 29,192,343
| 16
| 92
| null | 2023-07-03T16:43:28
| 2015-01-13T13:59:30
|
Python
|
UTF-8
|
Python
| false
| false
| 446
|
py
|
flist = {
"scenario": "Summer20UL16APV",
"args": "emerging=True",
"samples": [
['Summer20UL16APV.GJets_DR-0p4_HT-100To200_TuneCP5_13TeV-madgraphMLM-pythia8'],
['Summer20UL16APV.GJets_DR-0p4_HT-200To400_TuneCP5_13TeV-madgraphMLM-pythia8'],
['Summer20UL16APV.GJets_DR-0p4_HT-400To600_TuneCP5_13TeV-madgraphMLM-pythia8'],
['Summer20UL16APV.GJets_DR-0p4_HT-600ToInf_TuneCP5_13TeV-madgraphMLM-pythia8'],
]
}
|
[
"kpedro88@gmail.com"
] |
kpedro88@gmail.com
|
190d1b1092d241c85f0feb82ec4fbde905277a25
|
871e1b0295c0fbbfca8191236d674866cf62ff01
|
/TrainB5_NAR1_imagenet_64.py
|
532b33d65353507986ad9cfe7bb6f9818cee5de2
|
[] |
no_license
|
Peckkie/USAI_ABnormal_Screening
|
ce31a813e9303a7d43def912ab731cc633268cb7
|
82cd63ac9ab72fbe68eae254c15c7bf7ef906022
|
refs/heads/master
| 2023-02-16T13:32:33.678500
| 2021-01-07T02:36:35
| 2021-01-07T02:36:35
| 277,981,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,080
|
py
|
import PIL
from keras import models
from keras import layers
from tensorflow.keras import optimizers
import os
import glob
import shutil
import sys
import numpy as np
from skimage.io import imread
import matplotlib.pyplot as plt
import os
from tensorflow.keras import callbacks
import pandas as pd
from keras.utils import generic_utils
os.environ["CUDA_VISIBLE_DEVICES"]="1"
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
batch_size = 64
epochs = 200
#Train
dataframe = pd.read_csv( '/media/tohn/SSD/ImageForTrainTest/train.csv')
base_dir = '/media/tohn/SSD/ImageForTrainTest/'
os.chdir(base_dir)
train_dir = os.path.join(base_dir, 'train')
#validation
valframe = pd.read_csv( '/media/tohn/SSD/ImageForTrainTest/validation.csv')
validation_dir = os.path.join(base_dir, 'validation')
from efficientnet.keras import EfficientNetB5 as Net
from efficientnet.keras import center_crop_and_resize, preprocess_input
conv_base = Net(weights='imagenet')
height = width = conv_base.input_shape[1]
input_shape = (height, width, 3)
# loading pretrained conv base model
conv_base = Net(weights='imagenet', include_top=False, input_shape=input_shape)
# create new model with a new classification layer
x = conv_base.output
global_average_layer = layers.GlobalAveragePooling2D(name = 'head_pooling')(x)
dropout_layer_1 = layers.Dropout(0.50,name = 'head_dropout')(global_average_layer)
prediction_layer = layers.Dense(2, activation='softmax',name = 'prediction_layer')(dropout_layer_1)
model = models.Model(inputs= conv_base.input, outputs=prediction_layer)
model.summary()
#showing before&after freezing
print('This is the number of trainable layers '
'before freezing the conv base:', len(model.trainable_weights))
#conv_base.trainable = False # freeze เพื่อรักษา convolutional base's weight
for layer in conv_base.layers:
layer.trainable = False
print('This is the number of trainable layers '
'after freezing the conv base:', len(model.trainable_weights)) #freez แล้วจะเหลือ max pool and dense
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
brightness_range=[0.5,1.5],
shear_range=0.4,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe = dataframe,
directory = train_dir,
x_col = 'Path Crop',
y_col = 'Class',
target_size = (height, width),
batch_size=batch_size,
color_mode= 'rgb',
class_mode='categorical')
test_generator = test_datagen.flow_from_dataframe(
dataframe = valframe,
directory = validation_dir,
x_col = 'Path Crop',
y_col = 'Class',
target_size = (height, width),
batch_size=batch_size,
color_mode= 'rgb',
class_mode='categorical')
os.chdir('/media/tohn/SSD/trainEffbyB/R1')
root_logdir = '/media/tohn/SSD/trainEffbyB/R1/my_logsB5imagenet_64'
def get_run_logdir():
import time
run_id = time.strftime("run_%Y_%m_%d_%H_%M_%S")
return os.path.join(root_logdir,run_id)
run_logdir = get_run_logdir()
tensorboard_cb = callbacks.TensorBoard(log_dir = run_logdir)
# os.makedirs("./models", exist_ok=True)
def avoid_error(gen):
while True:
try:
data, labels = next(gen)
yield data, labels
except:
pass
#Training
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
history = model.fit_generator(
avoid_error(train_generator),
steps_per_epoch= len(dataframe)//batch_size,
epochs=epochs,
validation_data=avoid_error(test_generator),
validation_steps= len(valframe) //batch_size,
callbacks = [tensorboard_cb])
model.save('./models/B5_R1_imnet_64.h5')
|
[
"w_yupaporn@kkumail.com"
] |
w_yupaporn@kkumail.com
|
b9a48a3fa6173aaf6e71b3ae6f50b4791ceb6e34
|
e49a07ad215172e9c82cb418b10371bf0ce1c0f7
|
/第1章 python基础/Python基础09/1-创建模块/msgnew.py
|
c70f8f258a39e5f8bc8e8298c973427f1890cdb5
|
[] |
no_license
|
taogangshow/python_Code
|
829c25a7e32ead388c8b3ffa763cb9cf587bfd7b
|
4b3d6992ec407d6069f3187ca7e402a14d863fff
|
refs/heads/master
| 2022-12-16T01:26:17.569230
| 2018-11-16T10:07:59
| 2018-11-16T10:07:59
| 157,832,985
| 0
| 1
| null | 2022-11-25T09:55:32
| 2018-11-16T08:00:13
|
Python
|
UTF-8
|
Python
| false
| false
| 142
|
py
|
__all__ = ["test2","Test"]
def test1():
print("---test1---")
def test2():
print("---test2---")
class Test(object):
pass
num = 100
|
[
"cdtaogang@163.com"
] |
cdtaogang@163.com
|
fdd59d240a4c0bb10c89d75d4e9a62b0b1c7f939
|
2e990ff03f23c3f82e1f3fb7acee1ddd8fb72e0e
|
/whoislive.py
|
450d38b0d33cacce38abf02fe08ffd66a715315d
|
[] |
no_license
|
HeNine/ekimbot_plugins
|
e25bd5326b13603a8671d4089317185bb7a7821c
|
354978cc8a632aec57ef79d2948ada21dc2502cd
|
refs/heads/master
| 2021-01-24T18:26:28.146480
| 2017-03-06T17:37:11
| 2017-03-06T17:37:11
| 84,441,447
| 0
| 0
| null | 2017-03-09T12:47:12
| 2017-03-09T12:47:12
| null |
UTF-8
|
Python
| false
| false
| 4,547
|
py
|
import functools
import itertools
import gevent
import gtools
import requests
import twitch
from ekimbot.botplugin import ClientPlugin
from ekimbot.commands import CommandHandler
from ekimbot.utils import reply_target
def encode_recursive(o, encoding='utf-8'):
if isinstance(o, unicode):
return o.encode(encoding)
elif isinstance(o, dict):
return {encode_recursive(k): encode_recursive(v) for k, v in o.items()}
elif isinstance(o, list):
return [encode_recursive(x) for x in o]
else:
return o
def requires_oauth(fn):
@functools.wraps(fn)
def wrapper(self, msg, *args):
if self.config.oauth is None or self.config.target is None:
self.reply(msg, "No twitch login configured")
return
return fn(self, msg, *args)
return wrapper
class TwitchPlugin(ClientPlugin):
"""Should be a client plugin for a client logged into twitch.
Upon request, will list all live channels out of the list of channels that config.target
(default client.nick) is following.
"""
name = 'whoislive'
defaults = {
'target': None, # None makes no args an error
'limit': 3,
'private_limit': 10,
'client_id': None,
'oauth': None, # if not none, can do follow actions
}
def init(self):
self.api = twitch.TwitchClient(oauth=self.config.oauth, client_id=self.config.client_id)
def limit(self, msg):
if msg.target == reply_target(self.client, msg):
# public channel
return self.config.limit
else:
# private message
return self.config.private_limit
@CommandHandler("live", 0)
def live(self, msg, *channels):
"""List currently live streamers
Specify list of channels, or list of all channels followed by a channel by prepending a ~
If nothing given, a default follow list is used depending on bot config
"""
found = []
errors = False
if not channels:
if self.config.target:
channels = ['~{}'.format(self.config.target)]
else:
self.reply(msg, "Please list some channels to check")
return
limit = self.limit(msg)
try:
# flatten iterators of follows and direct channel names into single iterable
# TODO this could be better parallelised so follow fetches happen in parallel
# but we need to refactor to use gevent queues or it gets real ugly real fast
channels = itertools.chain(*[
self.following(channel.lstrip('~')) if channel.startswith('~') else (channel,)
for channel in channels
])
for name, channel in gtools.gmap_unordered(self.get_channel_if_live, channels):
if not channel:
continue
found.append(name)
if len(found) < limit:
self.reply(msg, "https://twitch.tv/{name} is playing {game}: {status}".format(**channel))
except Exception:
self.logger.exception("Error while checking who is live")
errors = True
if errors:
self.reply(msg, "I had some issues talking to twitch, maybe try again later?")
elif len(found) >= limit:
found = found[limit - 1:]
self.reply(msg, "And also {}".format(', '.join(found)))
elif not found:
self.reply(msg, "No-one is live right now, sorry!")
def following(self, target):
"""Yields channel names that target is following"""
for result in self.api.get_all("follows", "users", target, "follows", "channels"):
yield encode_recursive(result['channel']['name'])
def get_channel_if_live(self, name):
"""Returns an up-to-date channel object if channel is currently live, else None"""
stream = gevent.spawn(lambda: self.api.get("streams", name))
channel = gevent.spawn(lambda: self.api.get("channels", name))
if stream.get().get("stream") is None:
return
return encode_recursive(channel.get())
def _follow_op(self, msg, channels, method, op_name):
channels = sorted(list(set(channels)))
failures = {}
for channel in channels:
try:
self.api.request(method, 'users', self.config.target, 'follows', 'channels', channel, json=False)
except requests.HTTPError as e:
failures[channel] = str(e)
if len(failures) == 0:
self.reply(msg, "{}ed channels: {}".format(op_name, ' '.join(channels)))
elif len(failures) == 1:
(channel, error), = failures.items()
self.reply(msg, "failed to {} channel {}: {}".format(op_name, channel, error))
else:
self.reply(msg, "failed to {} channels: {}".format(op_name, ' '.join(sorted(failures))))
@CommandHandler("twitch follow", 1)
@requires_oauth
def follow(self, msg, *channels):
self._follow_op(msg, channels, 'PUT', 'follow')
@CommandHandler("twitch unfollow", 1)
@requires_oauth
def unfollow(self, msg, *channels):
self._follow_op(msg, channels, 'DELETE', 'unfollow')
|
[
"mikelang3000@gmail.com"
] |
mikelang3000@gmail.com
|
7a2c9eb7044540d777bca9c0f68a4a888895eb00
|
06904f68018fbd42bba1909e12a79c2106af71f4
|
/mirror_en.py
|
733cf287ae4ed857491c9bb00206dfa953eb9428
|
[] |
no_license
|
rzbfreebird/MCDR-Mirror-Server
|
2d079ac30c073805045f97302b2379937b8f95e2
|
fbaebc8eeddaefe3675efff8abe98e7e69d83e30
|
refs/heads/master
| 2022-12-07T01:14:01.603244
| 2020-09-03T14:30:43
| 2020-09-03T14:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,443
|
py
|
# -*- coding: utf-8 -*-
import shutil
import datetime
import os
import json as js
import platform
from os.path import abspath, dirname
from utils import rcon
current_path = abspath(dirname(__file__))
def read_config():
with open("config/mirror.json") as json_file:
config = js.load(json_file)
return config
conf=read_config()
mirror_folder=conf['path']
remote_enable=conf['remote']['enable']
address=conf['remote']['address']
port=conf['remote']['port']
secret=conf['remote']['secret']
start_command=conf['command']
world=conf["world"]
source=[]
target=[]
mirror_started=False
MCDRJudge=os.path.exists("{}MCDReforged.py".format(mirror_folder))
for i in range(len(world)):
source.append('./server/{}'.format(world[i-1]))
if(MCDRJudge):
for i in range(len(world)):
target.append('{}/server/{}'.format(mirror_folder,world[i-1]))
else:
for i in range(len(world)):
target.append('{}/{}'.format(mirror_folder,world[i-1]))
if(remote_enable):
connection=rcon.Rcon(address,port,secret)
remote_info='''
§6[Mirror]§bRemote Information:
§5Rcon Address: §b{}
§5Rcon Port: §b{}
'''.format(address,port)
help_msg='''
§r======= §6Minecraft Mirror Plugin §r=======
Use §6!!mirror sync§r to sync the main server's world to the mirror one
Use §6!!mirror start§r to turn on the mirror server
§4BE CAUTIOUS: IF YOU DON'T ENABLE THE RCON FREATURE OF THE MIRROR SERVER, YOU CANNOT SHUTDOWN THE SERVER BY REMOTE COMMAND
§4YOU CAN ONLY SHUTDOWN IT IN THE MIRROR SERVER, TO DO THIS, YOU CAN CHECKOUT THE FOLLOWING MCDR PLUGINS
§4SimpleOP without MCDR-Admin permission required
§4StartStopHelper with MCDR-Admin permission required
-----Rcon Features-----
Use §6!!mirror info§r to checkout rcon information(MCDR-Admin Permission is Required)
Use §6!!mirror stop§r to stop mirror server
Use §6!!mirror status§r to checkout whether the mirror has been turned on or not
Use §6!!mirror rcon <command>§r to send command to mirror server(MCDR-Admin Permission is Required, use it WITHOUT SLASH)
'''
SimpleOP=' {"text":"§6Checkout SimpleOP","clickEvent":{"action":"open_url","value":"https://github.com/GamerNoTitle/SimpleOP"}}'
StartStopHelper=' {"text":"§6Checkout StartStopHelper","clickEvent":{"action":"open_url","value":"https://github.com/MCDReforged-Plugins/StartStopHelper"}}'
def helpmsg(server,info):
if info.is_player and info.content == '!!mirror':
server.reply(info, help_msg, encoding=None)
server.execute('tellraw '+ info.player + SimpleOP)
server.execute('tellraw '+ info.player + StartStopHelper)
def sync(server,info):
start_time=datetime.datetime.now()
server.execute('save-all')
server.say('§6[Mirror]Syncing...')
i=0
try:
while True:
if(i>len(world)-1): break
shutil.copytree(source[i],target[i])
i=i+1
except:
try:
while True:
if(i>len(world)-1): break
shutil.rmtree(target[i],True)
shutil.copytree(source[i],target[i])
i=i+1
except Exception:
while True:
if(i>len(world)-1): break
shutil.rmtree(target[i],True)
ignore=shutil.ignore_patterns('session.lock')
shutil.copytree(source[i],target[i],ignore=ignore)
i=i+1
end_time=datetime.datetime.now()
server.say('§6[Mirror]Sync completed in {}'.format(end_time-start_time))
def start(server,info):
server.say('§6[Mirror]Mirror server is launching, please wait...')
if platform.system()=='Windows':
os.system('cd {} && powershell {}'.format(mirror_folder,start_command))
else:
os.system('cd {} && {}'.format(mirror_folder,start_command))
os.system('cd {}'.format(current_path))
global mirror_started
mirror_started=False
server.say('§6[Mirror]Mirror server has been shutdown!')
def command(server,info):
if(conf['remote']['command']):
if(server.get_permission_level(info)>2):
try:
connection.connect()
connection.send_command(info.content[14:])
connection.disconnect()
server.reply(info,'§6[Mirror]Command Sent!', encoding=None)
except Exception as e:
server.reply(info,'§6[Mirror]§4Error: {}'.format(e), encoding=None)
else:
server.reply(info,'§6[Mirror]§4Error: Permission Denied!', encoding=None)
else:
server.reply(info,' §6[Mirror]§4Error: Rcon feature is disabled!', encoding=None)
def stop(server,info):
try:
connection.connect()
connection.send_command('stop')
connection.disconnect()
except Exception as e:
server.reply(info,'§6[Mirror]§4Connection Failed: {}'.format(e), encoding=None)
def information(server,info):
if(server.get_permission_level(info)>2):
server.reply(info,remote_info)
else:
server.reply(info,"§6[Mirror]§4Error: Permission Denied!", encoding=None)
def status(server,info):
global mirror_started
try:
connection.connect()
server.reply(info,'§6[Mirror]§lMirror Server is online!', encoding=None)
connection.disconnect()
except:
if mirror_started:
server.reply(info,'§6[Mirror]§lMirror Server is Starting...(or mirror has been started but rcon feature didn\'t work well', encoding=None)
else:
server.reply(info,'§4[Mirror]§lMirror Server is offline!', encoding=None)
def on_load(server, old_module):
server.add_help_message('!!mirror', '§6Get the usage of Mirror')
def on_info(server,info):
if info.is_player and info.content == '!!mirror':
helpmsg(server,info)
if info.content == '!!mirror sync':
sync(server,info)
if info.content == '!!mirror start':
global mirror_started
if mirror_started:
server.reply(info,'§b[Mirror]Mirror server has already started, please don\'t run the command again!', encoding=None)
else:
mirror_started=True
start(server,info)
if('!!mirror rcon' in info.content):
command(server,info)
if(info.content=='!!mirror info'):
information(server,info)
if(info.content=='!!mirror stop'):
stop(server,info)
if(info.content=='!!mirror status'):
status(server,info)
|
[
"bili33@87ouo.top"
] |
bili33@87ouo.top
|
0f6b4c0e8a7fc2507d68d242905734ba1e2e2592
|
6b033e3dddc280417bb97500f72e68d7378c69d6
|
/IV. COLAB/Enemy-Spotted/2. Uniform Classification/crawling/crawling_version_2_deprecated.py
|
fa6711bbd2b14e88c54793181f0ffa2d0b600bb1
|
[] |
no_license
|
inyong37/Study
|
e5cb7c23f7b70fbd525066b6e53b92352a5f00bc
|
e36252a89b68a5b05289196c03e91291dc726bc1
|
refs/heads/master
| 2023-08-17T11:35:01.443213
| 2023-08-11T04:02:49
| 2023-08-11T04:02:49
| 128,149,085
| 11
| 0
| null | 2022-10-07T02:03:09
| 2018-04-05T02:17:17
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 903
|
py
|
from urllib.request import urlopen
import argparse
import requests as req
from bs4 import BeautifulSoup
# reference: https://enjoysomething.tistory.com/42
parser = argparse.ArgumentParser()
parser.add_argument("-data", required=False, default='acu pattern')
args = parser.parse_args()
data = args.data
def main():
url_info = "https://www.google.com/search?"
params = {
"q": data
}
html_object = req.get(url_info, params)
if html_object.status_code == 200:
bs_object = BeautifulSoup(html_object.text, "html.parser")
img_data = bs_object.find_all("img")
for i in enumerate(img_data[1:]):
t = urlopen(i[1].attrs['src']).read()
filename = "img_" + str(i[0] + 1) + '.jpg'
with open(filename, "wb") as f:
f.write(t)
print("Image Save Success")
if __name__ == "__main__":
main()
|
[
"inyong1020@gmail.com"
] |
inyong1020@gmail.com
|
5901cd761f795addb37355ab5dfb91b136524937
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/45/usersdata/118/15614/submittedfiles/lista1.py
|
e7b973d29fb37d041373635daf0586e519cab283
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
n = input('Digite o número de termos:')
a = []
for i in range(0,n+1,1):
a.append(input('Digite o valor:')
somap = 0
somai = 0
contp = 0
conti = 0
for j in range(0,len(a),1):
if a[i]%2 == 0:
contp = contp +1
somap = somap +1
else:
conti = conti +1
somai = somai +1
print(somai)
print(somap)
print(conti)
print(contp)
print(a)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
65862506e7c2a0b1eba9b24168fb76d1f57c32fd
|
87fb0ae5563512bf4cfe2754ea92e7f4173f753f
|
/Chap_05/Ex_129.py
|
67451fbd6333873e212e51249f4b024c92250365
|
[] |
no_license
|
effedib/the-python-workbook-2
|
87291f5dd6d369360288761c87dc47df1b201aa7
|
69532770e6bbb50ea507e15f7d717028acc86a40
|
refs/heads/main
| 2023-08-21T13:43:59.922037
| 2021-10-12T20:36:41
| 2021-10-12T20:36:41
| 325,384,405
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
# Tokenizing a String
# Tokenizing is the process of converting a string into a list of substrings, known as tokens.
def tokenbystring(string: str) -> list:
string = string.replace(' ', '')
tokens = []
dgt = ''
for s in string:
if s in ['*', '/', '^', '+', '-', '(', ')']:
if dgt != '':
tokens.append(dgt)
dgt = ''
tokens.append(s)
elif 0 <= int(s) <= 9:
dgt += s
if s == string[len(string)-1]:
tokens.append(dgt)
return tokens
def main():
# exp = input("Enter a mathematical expressione: ")
exp = '52 + 3 - 86 * (936 / 2)'
print('The tokens are: {}'.format(tokenbystring(exp)))
if __name__ == "__main__":
main()
|
[
"cicciodb@hotmail.it"
] |
cicciodb@hotmail.it
|
c69d55d3f7500378e3a928dff4e8a0e47d70916b
|
09db0d94ef90ff4df3b17cf8d9c2cca7f79b2c65
|
/buffer.py
|
317b3835a2a7a73b712441fc4f3f631cdf1c3eb1
|
[] |
no_license
|
tgbugs/desc
|
5e17e7e35445908b14c7cbaed766764bb3cbab6b
|
b68a07af90f87f55c4b5be6ff433f310a0bc7e2c
|
refs/heads/master
| 2020-04-09T12:20:02.650756
| 2019-05-08T07:34:29
| 2019-05-08T07:34:29
| 20,045,270
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
#!/usr/bin/env python3.4
""" Example for how to load vertex data from numpy directly
"""
import numpy as np
from panda3d.core import Geom, GeomVertexFormat, GeomVertexData
from .util.ipython import embed
size = 1000
data = np.random.randint(0,1000,(size,3))
#color = np.random.randint(0,255,(size,4))
color = np.repeat(np.random.randint(0,255,(1,4)), size, 0)
#full = np.hstack((data,color))
full = [tuple(d) for d in np.hstack((data,color))]
#full = [tuple(*d,*color) for d in data]
geom = GeomVertexData('points', GeomVertexFormat.getV3c4(), Geom.UHDynamic)
geom.setNumRows(len(full))
array = geom.modifyArray(0) # need a writeable version
handle = array.modifyHandle()
#options are then the following:
view = memoryview(array)
arr = np.asarray(view)
arr[:] = full
embed()
#OR
#handle.copyDataFrom('some other handle to a GVDA')
#handle.copySubataFrom(to_start, to_size, buffer, from_start, from_size)
|
[
"tgbugs@gmail.com"
] |
tgbugs@gmail.com
|
b7dd7a197154d308863a5d0f9d1d548a6a166d6e
|
dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5
|
/eggs/plone.app.controlpanel-2.1.1-py2.7.egg/plone/app/controlpanel/skins.py
|
a649d961b9669e9e19a497770d9f1e3f809ad3e2
|
[] |
no_license
|
nacho22martin/tesis
|
ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5
|
e137eb6225cc5e724bee74a892567796166134ac
|
refs/heads/master
| 2020-12-24T13:20:58.334839
| 2013-11-09T12:42:41
| 2013-11-09T12:42:41
| 14,261,570
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,314
|
py
|
from zope.interface import Interface
from zope.component import adapts
from zope.formlib.form import FormFields
from zope.interface import implements
from zope.schema import Bool
from zope.schema import Choice
from Products.CMFCore.utils import getToolByName
from Products.CMFDefault.formlib.schema import SchemaAdapterBase
from Products.CMFPlone import PloneMessageFactory as _
from Products.CMFPlone.interfaces import IPloneSiteRoot
from form import ControlPanelForm
from widgets import DropdownChoiceWidget
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
ICON_VISIBILITY_CHOICES = {
_(u"Only for users who are logged in"): 'authenticated',
_(u"Never show icons"): 'disabled',
_(u"Always show icons"): 'enabled',
}
ICON_VISIBILITY_VOCABULARY = SimpleVocabulary(
[SimpleTerm(v, v, k) for k, v in ICON_VISIBILITY_CHOICES.items()]
)
class ISkinsSchema(Interface):
theme = Choice(title=_(u'Default theme'),
description=_(u'''Select the default theme for the site.'''),
required=True,
missing_value=tuple(),
vocabulary="plone.app.vocabularies.Skins")
mark_special_links = Bool(title=_(u'Mark external links'),
description=_(u"If enabled all external links "
"will be marked with link type "
"specific icons."),
default=True)
ext_links_open_new_window = Bool(title=_(u"External links open in new "
"window"),
description=_(u"If enabled all external "
"links in the content "
"region open in a new "
"window."),
default=False)
icon_visibility = Choice(title=_(u'Show content type icons'),
description=_(u"If disabled the content icons "
"in folder listings and portlets "
"won't be visible."),
vocabulary=ICON_VISIBILITY_VOCABULARY)
use_popups = Bool(title=_(u'Use popup overlays for simple forms'),
description=_(u"If enabled popup overlays will be "
"used for simple forms like login, "
"contact and delete confirmation."),
default=True)
class SkinsControlPanelAdapter(SchemaAdapterBase):
adapts(IPloneSiteRoot)
implements(ISkinsSchema)
def __init__(self, context):
super(SkinsControlPanelAdapter, self).__init__(context)
self.context = getToolByName(context, 'portal_skins')
self.jstool = getToolByName(context, 'portal_javascripts')
self.csstool = getToolByName(context, 'portal_css')
self.ksstool = getToolByName(context, 'portal_kss')
ptool = getToolByName(context, 'portal_properties')
self.props = ptool.site_properties
self.themeChanged = False
def get_theme(self):
return self.context.getDefaultSkin()
def set_theme(self, value):
self.themeChanged = True
self.context.default_skin = value
theme = property(get_theme, set_theme)
def _update_jsreg_mark_special(self):
self.jstool.getResource('mark_special_links.js').setEnabled(
self.mark_special_links or self.ext_links_open_new_window
)
self.jstool.cookResources()
def get_mark_special_links(self):
msl = getattr(self.props, 'mark_special_links', False)
if msl == 'true':
return True
return False
# return self.jstool.getResource('mark_special_links.js').getEnabled()
def set_mark_special_links(self, value):
if value:
mark_special_links='true'
else:
mark_special_links='false'
if self.props.hasProperty('mark_special_links'):
self.props.manage_changeProperties(mark_special_links=mark_special_links)
else:
self.props.manage_addProperty('mark_special_links', mark_special_links, 'string')
self._update_jsreg_mark_special()
mark_special_links = property(get_mark_special_links,
set_mark_special_links)
def get_ext_links_open_new_window(self):
elonw = self.props.external_links_open_new_window
if elonw == 'true':
return True
return False
def set_ext_links_open_new_window(self, value):
if value:
self.props.manage_changeProperties(external_links_open_new_window='true')
else:
self.props.manage_changeProperties(external_links_open_new_window='false')
self._update_jsreg_mark_special()
ext_links_open_new_window = property(get_ext_links_open_new_window,
set_ext_links_open_new_window)
def get_icon_visibility(self):
return self.props.icon_visibility
def set_icon_visibility(self, value):
self.props.manage_changeProperties(icon_visibility=value)
icon_visibility = property(get_icon_visibility,set_icon_visibility)
def get_use_popups(self):
return self.jstool.getResource('popupforms.js').getEnabled()
def set_use_popups(self, value):
self.jstool.getResource('popupforms.js').setEnabled(value)
self.jstool.cookResources()
use_popups = property(get_use_popups, set_use_popups)
class SkinsControlPanel(ControlPanelForm):
form_fields = FormFields(ISkinsSchema)
form_fields['theme'].custom_widget = DropdownChoiceWidget
label = _("Theme settings")
description = _("Settings that affect the site's look and feel.")
form_name = _("Theme settings")
def _on_save(self, data=None):
# Force a refresh of the page so that a new theme choice fully takes
# effect.
if not self.errors and self.adapters['ISkinsSchema'].themeChanged:
self.request.response.redirect(self.request.URL)
|
[
"ignacio@plone.(none)"
] |
ignacio@plone.(none)
|
9ee36689f1628a59d8a7f28c1af469ca7adedfe2
|
b5e15fc6fe0132f18c72a1bf035b3edab618e35c
|
/microfinance/project_data/helpers.py
|
4e75b923715a09285f8ea6047a5c9c702562fcbf
|
[] |
no_license
|
Jubair70/BRAC-Customer-Service-Assisstant
|
ced72b4c81e0f4670c4be9efdb7d0d113f285b28
|
fe35de8b96e2d8a44bf8ed811faa628ea27861d2
|
refs/heads/master
| 2021-06-27T06:38:35.239131
| 2020-01-13T05:17:48
| 2020-01-13T05:17:48
| 233,516,095
| 0
| 0
| null | 2021-06-10T22:28:56
| 2020-01-13T05:12:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 608
|
py
|
import paho.mqtt.client as mqtt
from microfinance.settings import MQTT_SERVER_PATH, MQTT_SERVER_PORT
def send_push_msg(topic = "/CSA/1/11111", payload = None, qos = 1, retained = False):
# MQTT_SERVER_PATH = "192.168.22.114"
# MQTT_SERVER_PORT = 1884
# MQTT_SUBSCRIBE_TOKEN = "/CSA/1/11111"
# MQTT_SERVER_RESPONSE = "response from view=> ayayayayya :)"
mqttc = mqtt.Client("",True)
mqttc.connect(MQTT_SERVER_PATH, MQTT_SERVER_PORT,100)
print "sending.. token: %s: response text: %s" % (topic, payload)
mqttc.publish(topic, payload, qos , retained)
mqttc.disconnect()
|
[
"jubair@mpower-social.com"
] |
jubair@mpower-social.com
|
e7bc5b408596623a5bf610c7bba934e4da24efab
|
197420c1f28ccb98059888dff214c9fd7226e743
|
/elements, blocks and directions/classes/class5_A_funcs.py
|
3f2a7d6da1786dea286652d45ddc788ab0d67f48
|
[] |
no_license
|
Vovanuch/python-basics-1
|
fc10b6f745defff31364b66c65a704a9cf05d076
|
a29affec12e8b80a1d3beda3a50cde4867b1dee2
|
refs/heads/master
| 2023-07-06T17:10:46.341121
| 2021-08-06T05:38:19
| 2021-08-06T05:38:19
| 267,504,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
''' class A '''
class A:
val = 1
def foo(self):
A.val += 2
def bar(self):
self.val += 1
a = A()
b = A()
a.bar()
a.foo()
c = A()
print(a.val)
print(b.val)
print(c.val)
|
[
"vetohin.vladimir@gmail.com"
] |
vetohin.vladimir@gmail.com
|
73a34062044e8bbacbf5e735782bef6c3a6cbc5a
|
85df75bec1ea604c21db36b8892c90e0d7b7574f
|
/armstrong/core/arm_layout/utils.py
|
c7bb882623ba0881a93e8ae89a446d49251f0d1a
|
[
"Apache-2.0"
] |
permissive
|
niran/armstrong.core.arm_layout
|
a569a64f84085b55509b26c004a9a41af3952047
|
229106581439c370ba51b1395e5e5e4db111a0bc
|
refs/heads/master
| 2021-01-16T19:29:16.017160
| 2012-03-16T16:29:58
| 2012-03-16T16:29:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
def get_layout_template_name(model, name):
ret = []
for a in model.__class__.mro():
if not hasattr(a, "_meta"):
continue
ret.append("layout/%s/%s/%s.html" % (a._meta.app_label,
a._meta.object_name.lower(), name))
return ret
def render_model(object, name, dictionary=None, context_instance=None):
dictionary = dictionary or {}
dictionary["object"] = object
return mark_safe(render_to_string(get_layout_template_name(object, name),
dictionary=dictionary, context_instance=context_instance))
|
[
"development@domain51.com"
] |
development@domain51.com
|
60c721e6c7d21277963b95af8fdc2aa107b72302
|
21df7cd93e156af8357596143792c22b44e14747
|
/regression/SimpleLinearRegression.py
|
963a2797127498672b735dbc7c59e572c6b024fa
|
[] |
no_license
|
yanyongyong/machineLearn
|
0cac90c1d0b4f7021e3f9ca658268f3c433b481f
|
d77a13f83679ba4b06bf24c6c6019dc2af55986f
|
refs/heads/master
| 2021-09-03T08:25:33.933996
| 2018-01-07T14:15:52
| 2018-01-07T14:15:52
| 107,839,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 546
|
py
|
import numpy as np
#简单的线性回归
def fitSLR(x,y):
n = len(x)
denominator = 0 #分母
numerator = 0 #分子
for i in range(0,n):
numerator += (x[i]- np.mean(x))*(y[i] - np.mean(y))
denominator += (x[i] - np.mean(x))**2
b1 = numerator/float(denominator)
b0 = np.mean(y) - b1*np.mean(x)
# b0 = np.mean(y)/float(np.mean(x))
return b0, b1
def predict(x,bo,b1):
return bo + x*b1
x = [1,3,2,1,3]
y = [14,24,18,17,27]
b0,b1 = fitSLR(x,y)
x_test = 8
y_test = predict(8,b0,b1)
print(y_test)
|
[
"123456"
] |
123456
|
465b87dd2605a4e591b7693d9ff7ef6ed379c2e6
|
f39c2c500873180d953ab9a7b22a4f6df95fb1c3
|
/Amazon/Pyramid Transition Matrix.py
|
24f0dd8cd85fc636e5f6ed3c3ff56adc101c0a4e
|
[] |
no_license
|
Jason003/interview
|
458516f671d7da0d3585f89b098c5370edcd9f04
|
e153306b85c3687b23a332812a0885d25ecce904
|
refs/heads/master
| 2021-07-15T15:28:07.175276
| 2021-02-05T03:21:59
| 2021-02-05T03:21:59
| 224,898,150
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
'''
Input: bottom = "BCD", allowed = ["BCG", "CDE", "GEA", "FFF"]
Output: true
Explanation:
We can stack the pyramid like this:
A
/ \
G E
/ \ / \
B C D
We are allowed to place G on top of B and C because BCG is an allowed triple. Similarly, we can place E on top of C and D, then A on top of G and E.
'''
import collections
class Solution:
def pyramidTransition(self, bottom: str, allowed: List[str]) -> bool:
d = collections.defaultdict(set)
for s in allowed:
d[s[:2]].add(s[2])
def helper(bottom, idx, nxt):
if len(bottom) == 1: return True
if idx == len(bottom) - 1: return helper(nxt, 0, '')
s = bottom[idx: idx + 2]
for c in d[s]:
if helper(bottom, idx + 1, nxt + c): return True
return False
return helper(bottom, 0, '')
|
[
"jiefanli97@gmail.com"
] |
jiefanli97@gmail.com
|
ae60e9b1424a37519eecbabcfeb13e32fe0e0f59
|
df1348a67a54fa530f620ba1145c34d914710fde
|
/examples/sandbox/sandbox_export.py
|
0279085899b7e8a7bfb5c5464169c3afb8f28481
|
[
"MIT"
] |
permissive
|
SilverLabUCL/netpyne
|
bf00991cec1ca44c44476e0a0fff2a15bc28b08c
|
72ce78d8c79c060d44513bafa7843756ee06cc45
|
refs/heads/master
| 2020-07-12T12:45:39.959342
| 2016-11-16T10:26:23
| 2016-11-16T10:26:23
| 73,908,592
| 0
| 0
| null | 2016-11-16T10:21:48
| 2016-11-16T10:21:48
| null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
import sandbox # import parameters file
from netpyne import sim # import netpyne sim module
sim.createExportNeuroML2(netParams = sandbox.netParams,
simConfig = sandbox.simConfig,
reference = 'sandbox') # create and export network to NeuroML 2
|
[
"salvadordura@gmail.com"
] |
salvadordura@gmail.com
|
e0c05f71ba2c1ec6b84d1cee2e49a9f3fd585618
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_clutched.py
|
e70757cfd80eae64be874dd7819e132a2b0a95da
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from xai.brain.wordbase.nouns._clutch import _CLUTCH
#calss header
class _CLUTCHED(_CLUTCH, ):
def __init__(self,):
_CLUTCH.__init__(self)
self.name = "CLUTCHED"
self.specie = 'nouns'
self.basic = "clutch"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
59d94563bfa6f5937003f4b1bdd3072c24cc7d4c
|
e9f111b913255e2a8963556a638017c6c4729f01
|
/randomize_four_digits.py
|
0384492097a4e58757931549c4dab66f38246c1c
|
[] |
no_license
|
dojinkimm/daddy
|
d609c38333358a6119ad71b4c89f418ae8c071eb
|
77e79324da3e7deb11d0a045d888e432a499d388
|
refs/heads/master
| 2023-01-31T08:21:26.544482
| 2020-12-15T12:25:26
| 2020-12-15T12:25:26
| 285,579,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,668
|
py
|
"""
문장 리스트에서 4자리 혹은 3자리 숫자를 찾아서 랜덤 숫자로 변경해주는 GUI 프로그램
GUI Program that finds four digit or three digit number in a list of sentences,
and changes to random number
"""
import random
import re
import pandas as pd
import PySimpleGUI as sg
def arg_parse():
layout = [
[sg.Text("문장을 입력하세요", size=(25, 1))],
[sg.InputText()],
[sg.Text("변경할 숫자의 길이를 입력해주세요")],
[sg.InputText()],
[sg.Text("저장할 파일의 이름을 입력하세요")],
[sg.InputText()],
[sg.Submit(), sg.Cancel()],
]
window = sg.Window("문장 숫자 랜덤 생성기", layout)
event, values = window.read()
window.close()
if event is None or event == "Cancel":
exit()
return values
args = arg_parse()
phrases = args[0].split("\n")
digit = args[1]
file_name = args[2] + ".csv"
if args[2] == "":
file_name = "test.csv"
generated_words = []
digit_regexp = "\d\d\d\d((?=[^kg|^Kg|^ml|^cm|^mm|^MM|^WT]))|\d\d\d\d$"
if digit != "" and int(digit) == 3:
digit_regexp = "\d\d\d\d((?=[^kg|^Kg|^ml|^cm|^mm|^MM|^WT]))|\d\d\d\d$"
for p in phrases:
if p == "":
continue
match = re.search(digit_regexp, p)
if match is None:
generated_words.append(p)
continue
rand = random.randint(1000, 9999)
if digit != "" and int(digit) == 3:
rand = random.randint(100, 999)
random.seed(p)
new_p = re.sub(digit_regexp, str(rand), p)
generated_words.append(new_p)
df = pd.DataFrame(generated_words)
df.to_csv(file_name, encoding="utf-8-sig")
|
[
"dojinkim119@gmail.com"
] |
dojinkim119@gmail.com
|
125c76db9f1f9f7db1a60cc1fac82e87519e6ac9
|
c342df24a9e2a94c5b952b57d73e45ee35adea80
|
/dqn_bullet_cartpole.py
|
f1a52ad5b053b903b878f9a354642da5683ba6ec
|
[
"MIT"
] |
permissive
|
vyraun/cartpoleplusplus
|
4b652d4ba0210e5abdb78931153d6076839cf6df
|
87c0f1b896e6d6919c4dbfcd0bf4306f807b60ef
|
refs/heads/master
| 2020-12-31T02:48:49.650551
| 2016-08-29T03:29:05
| 2016-08-29T03:29:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,734
|
py
|
#!/usr/bin/env python
# copy pasta from https://github.com/matthiasplappert/keras-rl/blob/master/examples/dqn_cartpole.py
# with some extra arg parsing
import numpy as np
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
import bullet_cartpole
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gui', action='store_true')
parser.add_argument('--initial-force', type=float, default=55.0,
help="magnitude of initial push, in random direction")
parser.add_argument('--action-force', type=float, default=50.0,
help="magnitude of action push")
parser.add_argument('--num-train', type=int, default=100)
parser.add_argument('--num-eval', type=int, default=0)
parser.add_argument('--load-file', type=str, default=None)
parser.add_argument('--save-file', type=str, default=None)
parser.add_argument('--delay', type=float, default=0.0)
opts = parser.parse_args()
print "OPTS", opts
ENV_NAME = 'BulletCartpole'
# Get the environment and extract the number of actions.
env = bullet_cartpole.BulletCartpole(gui=opts.gui, action_force=opts.action_force,
initial_force=opts.initial_force, delay=opts.delay)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(32))
model.add(Activation('tanh'))
#model.add(Dense(16))
#model.add(Activation('relu'))
#model.add(Dense(16))
#model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
memory = SequentialMemory(limit=50000)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
if opts.load_file is not None:
print "loading weights from from [%s]" % opts.load_file
dqn.load_weights(opts.load_file)
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=opts.num_train, visualize=True, verbose=2)
# After training is done, we save the final weights.
if opts.save_file is not None:
print "saving weights to [%s]" % opts.save_file
dqn.save_weights(opts.save_file, overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=opts.num_eval, visualize=True)
|
[
"matthew.kelcey@gmail.com"
] |
matthew.kelcey@gmail.com
|
a2ee918ee914a6a2440aeba1db575f22ba3e78bf
|
458b1133df5b38a017f3a690a624a54f0f43fda7
|
/PaperExperiments/XHExp041/parameters.py
|
62f97ccd29ad9f45eebb6360c8de059e6a0f209d
|
[
"MIT"
] |
permissive
|
stefan-c-kremer/TE_World2
|
9c7eca30ee6200d371183c5ba32b3345a4cc04ee
|
8e1fae218af8a1eabae776deecac62192c22e0ca
|
refs/heads/master
| 2020-12-18T14:31:00.639003
| 2020-02-04T15:55:49
| 2020-02-04T15:55:49
| 235,413,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,604
|
py
|
# parameters.py
"""
Exp 41 - {'Initial_genes': '5000', 'Host_mutation_rate': '0.30', 'TE_progeny': '0.15, 0, 0.55, 1, 0.30, 2', 'TE_Insertion_Distribution': 'Flat()', 'Carrying_capacity': '30', 'TE_excision_rate': '0.1', 'Junk_BP': '1.4', 'Gene_Insertion_Distribution': 'Flat()', 'mutation_effect': '0.10', 'TE_death_rate': '0.0005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Flat();
Gene_Insertion_Distribution = Flat();
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.0005;
TE_excision_rate = 0.1; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.15, 0, 0.55, 1, 0.30, 2 );
Initial_genes = 5000;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 1.4 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.30;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.10,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.10
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.10,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.10
);
Carrying_capacity = 30;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
|
[
"stefan@kremer.ca"
] |
stefan@kremer.ca
|
a03230f460994f28b677a293aea19162a7708eb2
|
8ff12c53e31f134b9f39f59b9a6f7d4f9142cea7
|
/lvlist/teacherPython/lianxi.py
|
bf2cc0047479923ed84cd01299189d22e12ed361
|
[] |
no_license
|
quhuohuo/python
|
5b0a80dbec7d22a0b274e4a32d269e85d254718c
|
5732c5974519da8e8919dab42b36ab0ab2c99b37
|
refs/heads/master
| 2021-06-13T11:41:12.356329
| 2017-04-07T08:58:05
| 2017-04-07T08:58:05
| 75,054,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
#!/usr/bin/python
def fun(char):
l = char.split(" ")
char = ''.join(l)
return char
while True:
s = raw_input()
if not len(s):
break
print "before:",s
s = fun(s)
print "after:",s
|
[
"813559081@qq.com"
] |
813559081@qq.com
|
19774af108915387eb5f2ee44608d270c5137efc
|
6d4d69c91eb14150778468f7cf73d8e2a9aa9124
|
/t/variant/read_expresses_del.py
|
a0eba72d31d3b41050cde4c2746b8eee0690634a
|
[] |
no_license
|
phonybone/Nof1
|
847acf7ce785319590f99271d20b7c126b59b699
|
22d877a96cd4481fdb7bf860c4d0721fcb34ddbe
|
refs/heads/master
| 2021-01-23T13:56:53.606351
| 2013-09-20T23:50:30
| 2013-09-20T23:50:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
import unittest, sys, os
from warnings import warn
libdir=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','..','lib'))
sys.path.append(libdir)
from variant import *
class TestExpressesSNP(unittest.TestCase):
def setUp(self):
print
def test_expresses_del(self):
var=Variant('ABC', 23, 'center', 'hg45', 'chr1', 3827, 3836, '+', 'Missense_Mutation', 'DEL',
'GTATCCGTCA', 'GTATCCGTCA', '')
seq='AAAAACCGAGCCCGGGGGTT'*4 # note presence of 'GAG' at correct location
pos=3820 # has to encompass variant position of 3829
self.assertTrue(var.is_expressed_in_seq(seq, pos))
seq='AAAAACGGTATCCGTCAAGC'*4 # note presence of 'GAG' at incorrect location
self.assertFalse(var.is_expressed_in_seq(seq, pos))
#-----------------------------------------------------------------------
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestExpressesSNP)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"phonybone@gmail.com"
] |
phonybone@gmail.com
|
c310f33e1c8dbb6251814466ec5e07be15b0a61f
|
637fe43cb3b858be426e9b9ce10485430ae1f146
|
/fsleyes/gl/gl14/glmask_funcs.py
|
3d9bfb590cf1c45bff2b61b253fd436eaac571e6
|
[
"BSD-3-Clause",
"CC-BY-3.0",
"Apache-2.0"
] |
permissive
|
laurenpan02/fsleyes
|
9dda45c1b1b77f0f042488ddf40fed46e5c77360
|
eed8940d422994b6c1f1787381ebac2361b81408
|
refs/heads/master
| 2023-03-11T16:49:16.994945
| 2021-02-25T18:07:39
| 2021-02-25T18:07:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,363
|
py
|
#!/usr/bin/env python
#
# glmask_funcs.py - OpenGL 1.4 functions used by the GLMask class.
#
# Author: Paul McCarthy <pauldmccarthy@gmail.com>
#
"""This module provides functions which are used by the :class:`.GLMask`
class to render :class:`.Image` overlays in an OpenGL 1.4 compatible manner.
"""
import fsleyes.gl.shaders as shaders
from . import glvolume_funcs
def init(self):
"""Calls the :func:`compileShaders` and :func:`updateShaderState`
functions.
"""
self.shader = None
compileShaders( self)
updateShaderState(self)
def destroy(self):
"""Destroys the shader programs. """
self.shader.destroy()
self.shader = None
def compileShaders(self):
"""Loads the vertex/fragment shader source code, and creates a
:class:`.ARBPShader` program.
"""
if self.shader is not None:
self.shader.destroy()
vertSrc = shaders.getVertexShader( 'glvolume')
fragSrc = shaders.getFragmentShader('glmask')
textures = {
'imageTexture' : 0,
}
self.shader = shaders.ARBPShader(vertSrc,
fragSrc,
shaders.getShaderDir(),
textures)
def updateShaderState(self):
"""Updates all shader program variables. """
if not self.ready():
return
opts = self.opts
shader = self.shader
colour = self.getColour()
threshold = list(self.getThreshold())
if opts.invert: threshold += [ 1, 0]
else: threshold += [-1, 0]
shader.load()
shader.setFragParam('threshold', threshold)
shader.setFragParam('colour', colour)
shader.unload()
return True
def draw2D(self, zpos, axes, xform=None, bbox=None):
"""Draws a 2D slice at the given ``zpos``. Uses the
:func:`.gl14.glvolume_funcs.draw2D` function.
"""
self.shader.load()
self.shader.loadAtts()
glvolume_funcs.draw2D(self, zpos, axes, xform, bbox)
self.shader.unloadAtts()
self.shader.unload()
def drawAll(self, axes, zposes, xforms):
"""Draws all specified slices. Uses the
:func:`.gl14.glvolume_funcs.drawAll` function.
"""
self.shader.load()
self.shader.loadAtts()
glvolume_funcs.drawAll(self, axes, zposes, xforms)
self.shader.unloadAtts()
self.shader.unload()
|
[
"pauldmccarthy@gmail.com"
] |
pauldmccarthy@gmail.com
|
7641a1c1f9068abb40afb542114f32591bf63472
|
f645ebae84e973cb42cffbe7f1d112ff2e3b0597
|
/no/edgebox_final/edgebox_final/settings.py
|
8cc80e236c92caef201e903858278cbcd6d1bf38
|
[] |
no_license
|
bopopescu/file_trans
|
709ce437e7aa8ce15136aa6be2f5d696261c30bd
|
fadc3faf6473539ed083ccd380df92f43115f315
|
refs/heads/master
| 2022-11-19T18:54:17.868828
| 2020-03-11T04:30:41
| 2020-03-11T04:30:41
| 280,964,974
| 0
| 0
| null | 2020-07-19T22:57:41
| 2020-07-19T22:57:40
| null |
UTF-8
|
Python
| false
| false
| 3,754
|
py
|
"""
Django settings for edgebox_final project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i(67r0=ud0l6ti(1sr&d0)m6fl6+_^bus41y&h92%i_ynp(-ov'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"Agent",
"Device",
"Drive",
"SmartDevice",
'djcelery',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'edgebox_final.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'edgebox_final.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/10",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
#分页的设置
REST_FRAMEWORK = {
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.NamespaceVersioning', #启动 drf 基于NameSpace的版本控制
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 5
}
from .celery_config import *
|
[
"871488533@qq.com"
] |
871488533@qq.com
|
bed8fdd79205932c1f16505cfd9077aa45156f68
|
2d9a17e2b896d2f6a90913a4ba02d41f0ede5dd0
|
/_58job/page_store.py
|
ab4764e39b62286c71dc816045dbe148722d6785
|
[] |
no_license
|
wolfwhoami/xxxxx
|
1cf2ed2c8ed78048d87cccf2953ca86c0871a783
|
670787ec71127bc05c1645cc3d8ef7c3a91fe84b
|
refs/heads/master
| 2020-03-30T00:44:55.864817
| 2016-12-16T01:45:03
| 2016-12-16T01:45:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,600
|
py
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
from spider.ipin.savedb import PageStoreBase
from spider.runtime import Log
from spider.util import htmlfind
from spider.util import TimeHandler
import spider
import time
import re
class Jd58PageStore(PageStoreBase):
def __init__(self):
super(Jd58PageStore, self).__init__('jd_58job')
def extract_content(self):
content = htmlfind.findTag(self.get_cur_doc().cur_content, 'div', 'posMsg borb')
try:
content = htmlfind.remove_tag(content[0], 1)
except:
Log.errorbin("invalid jd content %s" % self.get_cur_doc().cur_url, self.get_cur_doc().cur_content)
return None
return content
def page_time(self):
tag = htmlfind.findTag(self.get_cur_doc().cur_content, 'ul', 'class="headTag"')
try:
tag = htmlfind.remove_tag(tag[0], 1)
except:
Log.errorbin("invalid jd pubtime %s" % self.get_cur_doc().cur_url, self.get_cur_doc().cur_content)
raise
if isinstance(tag, unicode):
tag = tag.encode('utf-8')
if "天前" not in tag:
return int(time.time() * 1000)
else:
find = re.search('(\d+).*?(\d+).*?(\d+)', tag, re.S)
if find:
day = find.group(1)
return TimeHandler.getTimeOfNDayBefore(day)
raise Exception("not copy time pattern: {}".format(tag))
def check_should_fetch(self, jobid):
if not super(Jd58PageStore, self).check_should_fetch(jobid):
return False
return True
|
[
"jianghao@ipin.com"
] |
jianghao@ipin.com
|
217bd2af0238293662a1d0bef1aaf8b835af57ff
|
a4830a0189c325c35c9021479a5958ec870a2e8b
|
/lib/pyutil/django/mixins.py
|
1f6e5aee271e26288ffc4fda4263d7ba951ea772
|
[] |
no_license
|
solutionprovider9174/steward
|
044c7d299a625108824c854839ac41f51d2ca3fd
|
fd681593a9d2d339aab0f6f3688412d71cd2ae32
|
refs/heads/master
| 2022-12-11T06:45:04.544838
| 2020-08-21T02:56:55
| 2020-08-21T02:56:55
| 289,162,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,931
|
py
|
# Django
from django.http import JsonResponse
from django.forms import BaseFormSet, formset_factory
from django.forms.models import model_to_dict
from django.views.generic.edit import FormMixin
from django.core.exceptions import ImproperlyConfigured
from django.views.generic.detail import SingleObjectTemplateResponseMixin
class JSONResponseMixin(object):
"""
A mixin that can be used to render a JSON response.
"""
def render_to_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
return JsonResponse(
self.get_data(context),
**response_kwargs
)
def get_data(self, context):
"""
Returns an object that will be serialized as JSON by json.dumps().
"""
# Note: This is *EXTREMELY* naive; in reality, you'll need
# to do much more complex handling to ensure that arbitrary
# objects -- such as Django model instances or querysets
# -- can be serialized as JSON.
return context
class JSONModelMixin(object):
"""
A mixin that can be used to render a Model as a JSON response.
"""
def render_to_response(self, context):
if self.request.is_ajax() or self.request.GET.get('format') == 'json':
return JSONResponseMixin.render_to_response(self, model_to_dict(self.get_object()))
else:
return SingleObjectTemplateResponseMixin.render_to_response(self, context)
class ProcessFormMixin(FormMixin):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
formset_class = None
formset_extra = 0
def get_formset_class(self):
return self.formset_class
def form_invalid(self, form, formset):
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def get_formset(self, formset_class=None, formset_extra=None):
if formset_class is None:
formset_class = self.get_formset_class()
if formset_extra is None:
formset_extra = self.formset_extra
if formset_class is None:
return None
else:
formset = formset_factory(formset_class, extra=formset_extra)
return formset(**self.get_form_kwargs())
def get_context_data(self, **kwargs):
if 'formset' not in kwargs:
kwargs['formset'] = self.get_formset()
return super(ProcessFormMixin, self).get_context_data(**kwargs)
def post(self, request, *args, **kwargs):
form = self.get_form()
formset = self.get_formset()
if formset:
if form.is_valid() and formset.is_valid():
return self.form_valid(form, formset)
else:
if form.is_valid():
return self.form_valid(form, None)
|
[
"guangchengwang9174@yandex.com"
] |
guangchengwang9174@yandex.com
|
4bbfd3063d60db8bdd0ba24404b6cba6e8214f32
|
d916a3a68980aaed1d468f30eb0c11bfb04d8def
|
/2021_06_14_Linked_list_cycle.py
|
2cfffe4d21e1cf1685d43336acfba01f596912c7
|
[] |
no_license
|
trinhgliedt/Algo_Practice
|
32aff29ca6dc14f9c74308af1d7eaaf0167e1f72
|
480de9be082fdcbcafe68e2cd5fd819dc7815e64
|
refs/heads/master
| 2023-07-10T23:49:16.519671
| 2021-08-11T05:11:34
| 2021-08-11T05:11:34
| 307,757,861
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
# https://leetcode.com/problems/linked-list-cycle/
# Given head, the head of a linked list, determine if the linked list has a cycle in it.
# There is a cycle in a linked list if there is some node in the list that can be reached again by continuously following the next pointer. Internally, pos is used to denote the index of the node that tail's next pointer is connected to. Note that pos is not passed as a parameter.
# Return true if there is a cycle in the linked list. Otherwise, return false.
# Example 1:
# Input: head = [3,2,0,-4], pos = 1
# Output: true
# Explanation: There is a cycle in the linked list, where the tail connects to the 1st node (0-indexed).
# Example 2:
# Input: head = [1,2], pos = 0
# Output: true
# Explanation: There is a cycle in the linked list, where the tail connects to the 0th node.
# Example 3:
# Input: head = [1], pos = -1
# Output: false
# Explanation: There is no cycle in the linked list.
# Constraints:
# The number of the nodes in the list is in the range [0, 104].
# -105 <= Node.val <= 105
# pos is -1 or a valid index in the linked-list.
# Follow up: Can you solve it using O(1) (i.e. constant) memory?
# Definition for singly-linked list.
from typing import List
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def hasCycle(self, head: ListNode) -> bool:
hare = head
turtle = head
while turtle and hare and hare.next:
hare = hare.next.next
turtle = turtle.next
if turtle == hare:
return True
return False
s = Solution()
node1 = ListNode(1)
node5 = ListNode(5)
node11 = ListNode(11)
node8 = ListNode(8)
node9 = ListNode(9)
node1.next = node5
node5.next = node11
node11.next = node8
node8.next = node9
node9.next = node5
answer = s.hasCycle(node1)
print(answer)
|
[
"chuot2008@gmail.com"
] |
chuot2008@gmail.com
|
7f976f5b8142c14de1f5a2d2cbea50a1fe36c512
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/appconfiguration/azure-appconfiguration/azure/appconfiguration/aio/_sync_token_async.py
|
9d2441dc438ea9e84f222b0768eefed6c3454998
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,236
|
py
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from typing import Any, Dict
from asyncio import Lock
from azure.core.pipeline import PipelineRequest, PipelineResponse
from azure.core.pipeline.policies import SansIOHTTPPolicy
from .._sync_token import SyncToken
class AsyncSyncTokenPolicy(SansIOHTTPPolicy):
"""A simple policy that enable the given callback with the response.
:keyword callback raw_response_hook: Callback function. Will be invoked on response.
"""
def __init__(self, **kwargs: Any) -> None: # pylint: disable=unused-argument
self._sync_token_header = "Sync-Token"
self._sync_tokens = {} # type: Dict[str, Any]
self._lock = Lock()
async def on_request(self, request: PipelineRequest) -> None: # type: ignore # pylint: disable=arguments-differ, invalid-overridden-method
"""This is executed before sending the request to the next policy.
:param request: The PipelineRequest object.
:type request: ~azure.core.pipeline.PipelineRequest
"""
async with self._lock:
sync_token_header = ",".join(str(x) for x in self._sync_tokens.values())
if sync_token_header:
request.http_request.headers.update({self._sync_token_header: sync_token_header})
async def on_response(self, request: PipelineRequest, response: PipelineResponse) -> None: # type: ignore # pylint: disable=arguments-differ, invalid-overridden-method
"""This is executed after the request comes back from the policy.
:param request: The PipelineRequest object.
:type request: ~azure.core.pipeline.PipelineRequest
:param response: The PipelineResponse object.
:type response: ~azure.core.pipeline.PipelineResponse
"""
sync_token_header = response.http_response.headers.get(self._sync_token_header)
if not sync_token_header:
return
sync_token_strings = sync_token_header.split(",")
if not sync_token_strings:
return
for sync_token_string in sync_token_strings:
sync_token = SyncToken.from_sync_token_string(sync_token_string)
await self._update_sync_token(sync_token)
async def add_token(self, full_raw_tokens: str) -> None:
raw_tokens = full_raw_tokens.split(",")
for raw_token in raw_tokens:
sync_token = SyncToken.from_sync_token_string(raw_token)
await self._update_sync_token(sync_token)
async def _update_sync_token(self, sync_token: SyncToken) -> None:
if not sync_token:
return
async with self._lock:
existing_token = self._sync_tokens.get(sync_token.token_id, None)
if not existing_token:
self._sync_tokens[sync_token.token_id] = sync_token
return
if existing_token.sequence_number < sync_token.sequence_number:
self._sync_tokens[sync_token.token_id] = sync_token
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
ae59f02eab72110000b74d8503fae65c3fc36ecd
|
e164fd9dce5fef093f85ca009f78570ec2b1c492
|
/324. Wiggle Sort II.py
|
c63081d423ce9f82a653401f08c2dc5fb6ed93ff
|
[] |
no_license
|
havenshi/leetcode
|
58fde93a1f1cbdd3c2faa9566c00383e5812f3a7
|
bcb79f329bcb133e6421db8fc1f4780a4eedec39
|
refs/heads/master
| 2021-01-22T04:15:23.748793
| 2019-11-30T04:25:54
| 2019-11-30T04:25:54
| 92,447,327
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,541
|
py
|
# Sorting and reoder solution. (92ms)
class Solution(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
nums.sort()
med = (len(nums) - 1) / 2
nums[::2], nums[1::2] = nums[med::-1], nums[:med:-1]
# nums[med::-1]为前半段倒序, nums[:med:-1]为后半段倒序
# Time: O(n) ~ O(n^2)
# Space: O(1)
# Tri Partition (aka Dutch National Flag Problem) with virtual index solution. (TLE)
from random import randint
class Solution2(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
def findKthLargest(nums, k):
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = partitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
def partitionAroundPivot(left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if nums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
def reversedTriPartitionWithVI(nums, val):
def idx(i, N):
return (1 + 2 * (i)) % N
N = len(nums) / 2 * 2 + 1
i, j, n = 0, 0, len(nums) - 1
while j <= n:
if nums[idx(j, N)] > val:
nums[idx(i, N)], nums[idx(j, N)] = nums[idx(j, N)], nums[idx(i, N)]
i += 1
j += 1
elif nums[idx(j, N)] < val:
nums[idx(j, N)], nums[idx(n, N)] = nums[idx(n, N)], nums[idx(j, N)]
n -= 1
else:
j += 1
mid = (len(nums) - 1) / 2
findKthLargest(nums, mid + 1)
reversedTriPartitionWithVI(nums, nums[mid])
|
[
"haiwen.shi01@gmail.com"
] |
haiwen.shi01@gmail.com
|
91577320a6ad2fab7a30f0640acbdbcf621586e1
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/list/NMTOKEN/Schema+Instance/NISTXML-SV-IV-list-NMTOKEN-enumeration-3-5.py
|
41c46ea0f9f6ec5c12819d5834a5ba585aeda8a2
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 717
|
py
|
from output.models.nist_data.list_pkg.nmtoken.schema_instance.nistschema_sv_iv_list_nmtoken_enumeration_3_xsd.nistschema_sv_iv_list_nmtoken_enumeration_3 import NistschemaSvIvListNmtokenEnumeration3
from output.models.nist_data.list_pkg.nmtoken.schema_instance.nistschema_sv_iv_list_nmtoken_enumeration_3_xsd.nistschema_sv_iv_list_nmtoken_enumeration_3 import NistschemaSvIvListNmtokenEnumeration3Type
obj = NistschemaSvIvListNmtokenEnumeration3(
value=NistschemaSvIvListNmtokenEnumeration3Type.IDENTIFY_THE_FURTHERMORE_PARTNERS_VERSIONS_TO_TECHNOL_THAT_COMMERCE_D_FROM_FRAMEWORKS_WOULD_PA_SAME_FIVE_SIMULATION_COMPLEX_OASIS_TO_THE_NAVAL_DATA_IN_AROMA_DESCRIPTION_BASE_EC_RECOMMEN_SOME_THESE_TOOLS_CO_RELATED
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
61ffe08a041bf9ab8125c750c6710d2416c6f292
|
51f887286aa3bd2c3dbe4c616ad306ce08976441
|
/pybind/slxos/v17s_1_02/mpls_state/ldp/statistics/ldp_protocol_stats_instance_since_clear/__init__.py
|
9db90b9b03d86d82c89d8808869b01b69cf370e7
|
[
"Apache-2.0"
] |
permissive
|
b2220333/pybind
|
a8c06460fd66a97a78c243bf144488eb88d7732a
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
refs/heads/master
| 2020-03-18T09:09:29.574226
| 2018-04-03T20:09:50
| 2018-04-03T20:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,549
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import protocol_stats
class ldp_protocol_stats_instance_since_clear(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-mpls-operational - based on the path /mpls-state/ldp/statistics/ldp-protocol-stats-instance-since-clear. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__protocol_stats',)
_yang_name = 'ldp-protocol-stats-instance-since-clear'
_rest_name = 'ldp-protocol-stats-instance-since-clear'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__protocol_stats = YANGDynClass(base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'mpls-state', u'ldp', u'statistics', u'ldp-protocol-stats-instance-since-clear']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'mpls-state', u'ldp', u'statistics', u'ldp-protocol-stats-instance-since-clear']
def _get_protocol_stats(self):
"""
Getter method for protocol_stats, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_stats_instance_since_clear/protocol_stats (list)
YANG Description: protocol stats rx/tx
"""
return self.__protocol_stats
def _set_protocol_stats(self, v, load=False):
"""
Setter method for protocol_stats, mapped from YANG variable /mpls_state/ldp/statistics/ldp_protocol_stats_instance_since_clear/protocol_stats (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_protocol_stats is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_protocol_stats() directly.
YANG Description: protocol stats rx/tx
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """protocol_stats must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""",
})
self.__protocol_stats = t
if hasattr(self, '_set'):
self._set()
def _unset_protocol_stats(self):
self.__protocol_stats = YANGDynClass(base=YANGListType("stat_type",protocol_stats.protocol_stats, yang_name="protocol-stats", rest_name="protocol-stats", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='stat-type', extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}), is_container='list', yang_name="protocol-stats", rest_name="protocol-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-protocol-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)
protocol_stats = __builtin__.property(_get_protocol_stats)
_pyangbind_elements = {'protocol_stats': protocol_stats, }
|
[
"badaniya@brocade.com"
] |
badaniya@brocade.com
|
2b60f88f7128b020f21fa8e9351b9fb82c26385d
|
13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab
|
/home--tommy--mypy/mypy/lib/python2.7/site-packages/gensim/corpora/lowcorpus.py
|
e293c998a14d288506947a9fd241acf64a343952
|
[
"Unlicense"
] |
permissive
|
tommybutler/mlearnpy2
|
8ec52bcd03208c9771d8d02ede8eaa91a95bda30
|
9e5d377d0242ac5eb1e82a357e6701095a8ca1ff
|
refs/heads/master
| 2022-10-24T23:30:18.705329
| 2022-10-17T15:41:37
| 2022-10-17T15:41:37
| 118,529,175
| 0
| 2
|
Unlicense
| 2022-10-15T23:32:18
| 2018-01-22T23:27:10
|
Python
|
UTF-8
|
Python
| false
| false
| 7,100
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Corpus in GibbsLda++ format of List-Of-Words.
"""
from __future__ import with_statement
import logging
from gensim import utils
from gensim.corpora import IndexedCorpus
from six import iterkeys
from six.moves import xrange, zip as izip
logger = logging.getLogger('gensim.corpora.lowcorpus')
def split_on_space(s):
return [word for word in utils.to_unicode(s).strip().split(' ') if word]
class LowCorpus(IndexedCorpus):
"""
List_Of_Words corpus handles input in GibbsLda++ format.
Quoting http://gibbslda.sourceforge.net/#3.2_Input_Data_Format::
Both data for training/estimating the model and new data (i.e., previously
unseen data) have the same format as follows:
[M]
[document1]
[document2]
...
[documentM]
in which the first line is the total number for documents [M]. Each line
after that is one document. [documenti] is the ith document of the dataset
that consists of a list of Ni words/terms.
[documenti] = [wordi1] [wordi2] ... [wordiNi]
in which all [wordij] (i=1..M, j=1..Ni) are text strings and they are separated
by the blank character.
"""
def __init__(self, fname, id2word=None, line2words=split_on_space):
"""
Initialize the corpus from a file.
`id2word` and `line2words` are optional parameters.
If provided, `id2word` is a dictionary mapping between word_ids (integers)
and words (strings). If not provided, the mapping is constructed from
the documents.
`line2words` is a function which converts lines into tokens. Defaults to
simple splitting on spaces.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s", fname)
self.fname = fname # input file, see class doc for format
self.line2words = line2words # how to translate lines into words (simply split on space by default)
self.num_docs = self._calculate_num_docs()
if not id2word:
# build a list of all word types in the corpus (distinct words)
logger.info("extracting vocabulary from the corpus")
all_terms = set()
self.use_wordids = False # return documents as (word, wordCount) 2-tuples
for doc in self:
all_terms.update(word for word, wordCnt in doc)
all_terms = sorted(all_terms) # sort the list of all words; rank in that list = word's integer id
# build a mapping of word id(int) -> word (string)
self.id2word = dict(izip(xrange(len(all_terms)), all_terms))
else:
logger.info("using provided word mapping (%i ids)", len(id2word))
self.id2word = id2word
self.num_terms = len(self.word2id)
self.use_wordids = True # return documents as (wordIndex, wordCount) 2-tuples
logger.info(
"loaded corpus with %i documents and %i terms from %s",
self.num_docs, self.num_terms, fname
)
def _calculate_num_docs(self):
# the first line in input data is the number of documents (integer). throws exception on bad input.
with utils.smart_open(self.fname) as fin:
try:
result = int(next(fin))
except StopIteration:
result = 0
return result
def __len__(self):
return self.num_docs
def line2doc(self, line):
words = self.line2words(line)
if self.use_wordids:
# get all distinct terms in this document, ignore unknown words
uniq_words = set(words).intersection(iterkeys(self.word2id))
# the following creates a unique list of words *in the same order*
# as they were in the input. when iterating over the documents,
# the (word, count) pairs will appear in the same order as they
# were in the input (bar duplicates), which looks better.
# if this was not needed, we might as well have used useWords = set(words)
use_words, marker = [], set()
for word in words:
if (word in uniq_words) and (word not in marker):
use_words.append(word)
marker.add(word)
# construct a list of (wordIndex, wordFrequency) 2-tuples
doc = [(self.word2id.get(w), words.count(w)) for w in use_words]
else:
uniq_words = set(words)
# construct a list of (word, wordFrequency) 2-tuples
doc = [(w, words.count(w)) for w in uniq_words]
# return the document, then forget it and move on to the next one
# note that this way, only one doc is stored in memory at a time, not the whole corpus
return doc
def __iter__(self):
"""
Iterate over the corpus, returning one bag-of-words vector at a time.
"""
with utils.smart_open(self.fname) as fin:
for lineno, line in enumerate(fin):
if lineno > 0: # ignore the first line = number of documents
yield self.line2doc(line)
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""
Save a corpus in the List-of-words format.
This function is automatically called by `LowCorpus.serialize`; don't
call it directly, call `serialize` instead.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
logger.info("storing corpus in List-Of-Words format into %s" % fname)
truncated = 0
offsets = []
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8('%i\n' % len(corpus)))
for doc in corpus:
words = []
for wordid, value in doc:
if abs(int(value) - value) > 1e-6:
truncated += 1
words.extend([utils.to_unicode(id2word[wordid])] * int(value))
offsets.append(fout.tell())
fout.write(utils.to_utf8('%s\n' % ' '.join(words)))
if truncated:
logger.warning(
"List-of-words format can only save vectors with integer elements; "
"%i float entries were truncated to integer value", truncated
)
return offsets
def docbyoffset(self, offset):
"""
Return the document stored at file position `offset`.
"""
with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())
@property
def id2word(self):
return self._id2word
@id2word.setter
def id2word(self, val):
self._id2word = val
self.word2id = utils.revdict(val)
|
[
"tbutler.github@internetalias.net"
] |
tbutler.github@internetalias.net
|
d95b85d157c5e47a6a21e27eabf4525b5afea52e
|
d0a84d97aaa8dcc2dff4a6b33ce98dee6d474496
|
/com.CheckProofing/Test_Campaign_2021/scripts/python/Page/extract_images.py
|
3b80f87b21db865b5932d0164080417339bd2fe7
|
[] |
no_license
|
ahmed-test001/python
|
21a27248c4571a13c0ed4dccab256aede1beea3a
|
eab59b9a54fae1a51fbc18c391599eb3b0e28b3d
|
refs/heads/master
| 2023-03-10T21:00:54.634028
| 2021-02-27T05:31:58
| 2021-02-27T05:31:58
| 342,778,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,915
|
py
|
# import json
# import re
# import os
# import sys
# import requests
# import pytesseract
# # import cv2
# from urllib.parse import urlparse
#
# from bs4 import BeautifulSoup
# from selenium.webdriver.support.wait import WebDriverWait
#
# from Test_Campaign_2021.scripts.python.Util_Data import ReadConfig
#
#
# class extract_images:
# output_dir = "../../data/output/"
#
# def __init__(self, driver):
# self.driver = driver
# self.wait = WebDriverWait(self.driver, 10)
#
# def check_key_exist(self, test_dict, key):
# try:
# value = test_dict[key]
# return True
# except KeyError:
# return False
#
# def extract_images(self):
#
# with open(ReadConfig.readFilePathData('FilePaths', 'url_list'), 'w') as f:
# urls = f.read().splitlines()
# contents = urls[0]
# input_html_file = BeautifulSoup(contents, 'html.parser')
# f.close()
# print("#################### Extract Images Start ####################")
# pytesseract.pytesseract.tesseract_cmd = (r"C:\\Program Files\\Tesseract-OCR\\tesseract.exe")
#
# png_images = input_html_file.find_all('img', {'src': re.compile('.png')})
# jpg_images = input_html_file.find_all('img', {'src': re.compile('.jpg')})
# ahref_links = []
# hyper_links_json = {}
# for image in jpg_images:
# d_cols = {}
# d_cols['src'] = image['src']
# source = urlparse(image['src'])
# print("Image Source: ", source)
# filename = os.path.basename(source.path)
# response = requests.get(image['src'])
# image_file = open(self.output_dir+"/proof_images/" + filename, "wb+")
# image_file.write(response.content)
# image_file.close()
# d_cols['filename'] = filename
# # if image['alt'] == "":
# # continue
# d_cols['alt'] = image['alt'] if self.check_key_exist(image, 'alt') else ""
# # d_cols['alt'] = image['alt']
# img = cv2.imread(self.output_dir+"/proof_images/" + filename)
# img = cv2.resize(img, None, fx=7, fy=7)
# data = pytesseract.image_to_string(img)
# d_cols['data'] = data.strip()
# ahref_links.append(d_cols)
#
# for image in png_images:
# d_cols = {}
# d_cols['src'] = image['src']
# source = urlparse(image['src'])
# print("Image Source: ", source)
# filename = os.path.basename(source.path)
# response = requests.get(image['src'])
# image_file = open(self.output_dir+"/proof_images/" + filename, "wb+")
# image_file.write(response.content)
# image_file.close()
# d_cols['filename'] = filename
#
# # if image['alt']=="":
# # continue
# d_cols['alt'] = image['alt'] if self.check_key_exist(image, 'alt') else ""
# # d_cols['alt'] = image['alt']
# img = cv2.imread(self.output_dir+"/proof_images/" + filename)
# img = cv2.resize(img, None, fx=7, fy=7)
# data = pytesseract.image_to_string(img)
# d_cols['data'] = data
# ahref_links.append(d_cols)
#
# # hyper_links_json['alerts'] = ahref_links
# # final_hyber_links = json.dumps(hyper_links_json, indent=4, sort_keys=False, ensure_ascii=False)
# # file = open(self.output_dir+"proof_files/" + "abc" + ".json", "w", encoding="utf-8")
# # # file = open(self.output_dir+"proof_files/" + self.output_file_name + '_' + '-'.join(self.filename.split('-')[-3:-1]) + ".json", "w", encoding="utf-8")
# # # file.write(final_hyber_links)
# # file.close()
# print("#################### Extract Images End ####################")
|
[
"ahmedu.ferdous@gmail.com"
] |
ahmedu.ferdous@gmail.com
|
b9063f096b96d5a75a310bc8ea0a8636adf03b5a
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/BHBXNfeMsA43d8Tys_22.py
|
a4efdfcb90caae2db151d39c9a348261e7d74a67
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,417
|
py
|
"""
As far as we currently know, approximations for the mathematical constant
**pi** (π) in the history of mathematics started surfacing with Ancient
Babylonians, who found its correct truncation up to 1 decimal place. During
the 5th century, the Chinese mathematician Zu Chongzhi raised it to 7 decimal
places and from the 18th century onwards the number of correct pi decimal
places has seen steady growth.
Since the middle of the 20th century, the approximation of pi has been the
task of electronic digital computers. During the 2019 Pi Day on the 14th of
March, the Japanese computer scientist _Emma Haruka Iwao_ released the
currently most accurate value of pi with more than 31.4 trillion digits, using
170 Terabytes of data.
Your task is to create a function that takes a positive integer `n` as an
argument and returns the value of **pi** with its first `n` decimal digits.
Taylor series are usually used to get finer approximations. To make this
challenge approachable to anyone, the following formula is suggested:

### Examples
pi(1) ➞ "3.1"
pi(2) ➞ "3.14"
pi(30) ➞ "3.141592653589793238462643383279"
### Notes
N/A
"""
def pi(n):
i = 1
p = x = 3 * 10 ** (n + 10)
while x:
x = x * i // ((i + 1) * 4)
i += 2
p += x // i
return '3.' + str(p // 10 ** 10)[1:]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
72fb6a38c5f5d698ef3de0e95fd431195f0c6c1c
|
4522fc52bc43654aadd30421a75bae00a09044f0
|
/riley/dev.py
|
1c924685ce9976e08ff5b678bf63dcb402aa2ce4
|
[] |
no_license
|
qesoalpe/anelys
|
1edb8201aa80fedf0316db973da3a58b67070fca
|
cfccaa1bf5175827794da451a9408a26cd97599d
|
refs/heads/master
| 2020-04-07T22:39:35.344954
| 2018-11-25T05:23:21
| 2018-11-25T05:23:21
| 158,779,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,321
|
py
|
from dict import Dict as dict
import os
import os.path
from isodate import datetime_isoformat
from datetime import datetime
from pathlib import Path
path_root = Path(r'/home/picazo/anelys')
if os.path.sep != '/':
os.path.sep = '/'
from katherine import d6
def get_datetime(timestamp):
return datetime_isoformat(datetime.fromtimestamp(timestamp))
def parse_dir(dirpath):
dir = Dict()
dir.children = list()
dir.path = '/' + dirpath + '/'
dir.type = 'riley/directory'
if path_relative:
paths = os.listdir(dirpath)
if dirpath != '.':
paths = [os.path.join(dirpath, path).replace('\\', '/') for path in paths]
else:
paths = [path.replace('\\', '/') for path in paths]
for path in paths:
if os.path.isdir(path) and os.path.basename(path) not in ['__cache__', '__pycache__']:
dir.children.append(parse_dir(path))
elif os.path.isfile(path) and os.path.splitext(path)[1] in ['.py', '.pyw']:
f = open(path, 'rb')
import hashlib
md5_hashlib = hashlib.md5()
for chunk in iter(lambda: f.read(4096), b''):
md5_hashlib.update(chunk)
f.close()
file = Dict()
file.md5 = md5_hashlib.hexdigest().upper()
file.path = '/' + path
file.size = os.path.getsize(path)
file.modified_datetime = get_datetime(os.path.getmtime(path))
file.type = 'riley/file'
dir.children.append(file)
return dir
os.chdir(path_root)
tree = parse_dir('.')
def get_locals(dir):
rr = [child for child in dir.children if child.type == 'riley/file']
for m in [child for child in dir.children if child.type == 'riley/directory']:
rr.extend(get_locals(m))
from copy import deepcopy
m = deepcopy(m)
for k in list(m.keys()):
if k not in ['path', 'type']:
del m[k]
rr.append(m)
return rr
locals = get_locals(tree)
# cursor_db = db_mariadb.cursor()
# from pprint import pprint
#
# cursor_db = db_mariadb.cursor(pymysql.cursors.DictCursor)
# cursor_db.execute('select filepath as path, md5, size, modified_datetime from riley.file;')
#
# remotes = [Dict(file) for file in cursor_db]
#
# for file in remotes:
# file.modified_datetime = datetime_isoformat(file.modified_datetime)
#
#
#
# for katherine in locals:
# if 'path' in katherine:
# if katherine.path[0] != '/':
# katherine.path = '/' + katherine.path
#
# from pymongo import MongoClient
# db_mongo_local = MongoClient(port=27020)
# db_riley = db_mongo_local.get_database('riley')
# coll_snapshot_sync = db_riley.get_collection('snapshot_sync')
#
# snapshot = coll_snapshot_sync.find_one(projection={'_id': False},
# sort=[('datetime', -1)])
# if snapshot is not None:
# snapshots = snapshot.snapshots
# else:
# snapshots = None
#
# persisted_path = [file.path for file in persisted]
# locals_path = [file.path for file in locals]
#
#
# def persist_file(file):
# pass
#
# pprint(locals_path)
# pprint(persisted_path)
#
#
# snapshots = Dict({'snapshot': locals, 'datetime': datetime_isoformat(datetime.now())})
|
[
"qesoalpe@gmail.com"
] |
qesoalpe@gmail.com
|
92f6925b2a9cfb31a62f32e59f35f03425e5c4ee
|
fd25231975acd147e04dc3ed3627c92cb1a4f86c
|
/FlaskAPI/vir_env/lib/python3.7/site-packages/scipy/io/matlab/tests/test_mio.py
|
a2fff9f37f188018118bed9ef6dc4f9d6725e5b8
|
[] |
no_license
|
sumitkutty/Flight-Price-Prediction
|
832a2802a3367e655b46d3b44f073d917abd2320
|
d974a8b75fbcbfa42f11703602af3e45a3f08b3c
|
refs/heads/master
| 2022-12-25T07:13:06.375888
| 2020-10-08T18:46:44
| 2020-10-08T18:46:44
| 302,366,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:8ddd2d36df500761f2d9af0be22308dd2450ebd281c3b0e945bc89b26ebb413d
size 42136
|
[
"sumitkutty37@gmail.com"
] |
sumitkutty37@gmail.com
|
3496db296e088ab5b474d57d635d971b8e919291
|
923a14dd594191d77e30465027ece8371f28a7a6
|
/web-serpng/code/serpng/jobs/services/search/user_data_tests.py
|
a41f50ac118c451b073c3ebd84206912b868bae7
|
[] |
no_license
|
alyago/django-web
|
3af7b3389df59104eaf5e50ed9cc2c3e730fed7f
|
da3073eec6d676dfe0164502b80d2a1c75e89575
|
refs/heads/master
| 2021-01-10T19:33:45.425520
| 2013-11-21T09:43:37
| 2013-11-21T09:43:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,400
|
py
|
"""User Data Tests."""
from django.test import TestCase
import user_data
# JSON responses from the bridge to be used in the tests.
JSON_RESPONSE_WITH_NO_USER_DATA = {
'abc': 'I am not user data'
}
JSON_RESPONSE_WITH_GOOD_USER_DATA = {
'user_data': {
'recent_searches': ['rs1', 'rs2'],
'user_email': 'meow@cat.com',
'saved_jobs': {
'job1': {'comment': 'abc'},
'job2': {'comment': 'def'}
}
}
}
JSON_RESPONSE_WITH_BAD_USER_DATA = {
'user_data': {}
}
JSON_RESPONSE_WITH_EMPTY_ARRAY_SAVED_JOBS = {
'user_data': {
'saved_jobs': []
}
}
JSON_RESPONSE_WITH_NULL_COMMENT_SAVED_JOB = {
'user_data': {
'saved_jobs': {
'job1': {'comment': 'abc'},
'job2': {'comment': None}
}
}
}
# Tests
class UserDataTestCase(TestCase):
"""User Data TestCase."""
# pylint: disable=R0904
def test_no_user_data_in_json_response(self):
"""Default values should be correct when there is no user data."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_NO_USER_DATA)
self.assertIsNone(test_user_data.recent_searches)
self.assertIsNone(test_user_data.user_email)
self.assertEqual(test_user_data.saved_jobs, {})
def test_good_recent_searches(self):
"""Attribute 'recent_searches' should be correctly populated."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_GOOD_USER_DATA)
self.assertEqual(test_user_data.recent_searches[1], 'rs2')
def test_good_user_email(self):
"""Attribute 'user_email' should be correctly populated."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_GOOD_USER_DATA)
self.assertEqual(test_user_data.user_email, 'meow@cat.com')
def test_good_saved_jobs(self):
"""Attribute 'saved_jobs' should be correctly populated."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_GOOD_USER_DATA)
self.assertEqual(test_user_data.saved_jobs['job1'], 'abc')
def test_no_recent_searches(self):
"""Attribute 'recent_searches' should have good default value when user_data is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_BAD_USER_DATA)
self.assertIsNone(test_user_data.recent_searches)
def test_no_user_email(self):
"""Attribute 'user_email' should have good default value when user_data is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_BAD_USER_DATA)
self.assertIsNone(test_user_data.user_email)
def test_no_saved_jobs(self):
"""Attribute 'saved_jobs' should have good default value when user_data is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_BAD_USER_DATA)
self.assertEqual(test_user_data.saved_jobs, {})
def test_empty_array_saved_jobs(self):
"""Attribute 'saved_jobs' should have good default value when saved_jobs is empty."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_EMPTY_ARRAY_SAVED_JOBS)
self.assertEqual(test_user_data.saved_jobs, {})
def test_null_comment_saved_job(self):
"""Attribute 'saved_jobs' should convert null comments to empty strings."""
test_user_data = user_data.UserData(JSON_RESPONSE_WITH_NULL_COMMENT_SAVED_JOB)
self.assertEqual(test_user_data.saved_jobs['job2'], '')
|
[
"oleg@simplyhired.com"
] |
oleg@simplyhired.com
|
ea0247a08d3dbfcc08f7339be1353955119ac626
|
f5ef25c84e9b4846f98d520bc9a20d20b3d1b65c
|
/OOP/oop3.py
|
e7b6dedf4ec707a9c8abd83d79293c2b25573e9b
|
[] |
no_license
|
amiraHag/python-basic-course2
|
45757ffdfa677c2accd553330cd2fd825208b0aa
|
1fbfd08b34f3993299d869bd55c6267a61dc7810
|
refs/heads/main
| 2023-03-31T06:48:11.587127
| 2021-03-30T03:43:10
| 2021-03-30T03:43:10
| 327,271,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
# --------------------------------------------------------------------
# -- Object Oriented Programming => Instance Attributes and Methods --
# --------------------------------------------------------------------
# Self: Point To Instance Created From Class
# Instance Attributes: Instance Attributes Defined Inside The Constructor
# -----------------------------------------------------------------------
# Instance Methods: Take Self Parameter Which Point To Instance Created From Class
# Instance Methods Can Have More Than One Parameter Like Any Function
# Instance Methods Can Freely Access Attributes And Methods On The Same Object
# Instance Methods Can Access The Class Itself
# -----------------------------------------------------------
class Member:
def __init__(self, first_name, middle_name, last_name):
self.fname = first_name
self.mname = middle_name
self.lname = last_name
member_one = Member("Amira", "Mustafa", "HM")
member_two = Member("Ahmed", "Hag", "Imam")
member_three = Member("Sara", "HI", "Mustafa")
# print(dir(member_one))
print(member_one.fname, member_one.mname, member_one.lname)
print(member_two.fname)
print(member_three.fname)
|
[
"amira071846@feng.bu.edu.eg"
] |
amira071846@feng.bu.edu.eg
|
8af41c09b124f2ec5b82fef8804ae4eefd794aa5
|
4759db9f7e74cec91edbb4c18c553b92913d1695
|
/adafruit_atecc/adafruit_atecc_cert_util.py
|
415c17ab0cb4833d4b867b6891196d9eb11ca90d
|
[
"MIT",
"LGPL-2.1-or-later",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
brentru/Adafruit_CircuitPython_ATECC
|
9702e8e06123ab258fee39baf3462640401f9f28
|
cceac6431ff28edcf410c53fc2db0c357533d774
|
refs/heads/master
| 2020-07-27T13:53:31.604065
| 2019-09-17T20:17:00
| 2019-09-17T20:17:00
| 209,113,921
| 1
| 0
|
MIT
| 2019-09-17T17:15:21
| 2019-09-17T17:15:21
| null |
UTF-8
|
Python
| false
| false
| 6,488
|
py
|
# Copyright (c) 2018 Arduino SA. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# The MIT License (MIT)
#
# Copyright (c) 2019 Brent Rubell for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_atecc_cert_util`
================================================================================
Certification Generation and Helper Utilities for the Adafruit_ATECC Module.
* Author(s): Brent Rubell
Implementation Notes
--------------------
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
from adafruit_binascii import b2a_base64
import adafruit_atecc.adafruit_atecc_asn1 as asn1
class CSR:
"""Certificate Signing Request Builder.
:param adafruit_atecc atecc: ATECC module.
:param slot_num: ATECC module slot (from 0 to 4).
:param bool private_key: Generate a new private key in selected slot?
:param str country: 2-letter country code.
:param str state_prov: State or Province name,
:param str city: City name.
:param str org: Organization name.
:param str org_unit: Organizational unit name.
"""
# pylint: disable=too-many-arguments, too-many-instance-attributes
def __init__(self, atecc, slot_num, private_key, country, state_prov,
city, org, org_unit):
self._atecc = atecc
self.private_key = private_key
self._slot = slot_num
self._country = country
self._state_province = state_prov
self._locality = city
self._org = org
self._org_unit = org_unit
self._common = self._atecc.serial_number
self._version_len = 3
self._cert = None
self._key = None
def generate_csr(self):
"""Generates and returns a certificate signing request."""
self._csr_begin()
csr = self._csr_end()
return csr
def _csr_begin(self):
"""Initializes CSR generation. """
assert 0 <= self._slot <= 4, "Provided slot must be between 0 and 4."
# Create a new key
self._key = bytearray(64)
if self.private_key:
self._atecc.gen_key(self._key, self._slot, self.private_key)
return
self._atecc.gen_key(self._key, self._slot, self.private_key)
def _csr_end(self):
"""Generates and returns
a certificate signing request as a base64 string."""
len_issuer_subject = asn1.issuer_or_subject_length(self._country, self._state_province,
self._locality, self._org,
self._org_unit, self._common)
len_sub_header = asn1.get_sequence_header_length(len_issuer_subject)
len_csr_info = self._version_len + len_issuer_subject
len_csr_info += len_sub_header + 91 + 2
len_csr_info_header = asn1.get_sequence_header_length(len_csr_info)
# CSR Info Packet
csr_info = bytearray()
# Append CSR Info --> [0:2]
asn1.get_sequence_header(len_csr_info, csr_info)
# Append Version --> [3:5]
asn1.get_version(csr_info)
# Append Subject --> [6:7]
asn1.get_sequence_header(len_issuer_subject, csr_info)
# Append Issuer or Subject
asn1.get_issuer_or_subject(csr_info, self._country, self._state_province,
self._locality, self._org, self._org_unit, self._common)
# Append Public Key
asn1.get_public_key(csr_info, self._key)
# Terminator
csr_info += b"\xa0\x00"
# Init. SHA-256 Calculation
csr_info_sha_256 = bytearray(64)
self._atecc.sha_start()
for i in range(0, len_csr_info + len_csr_info_header, 64):
chunk_len = (len_csr_info_header + len_csr_info) - i
if chunk_len > 64:
chunk_len = 64
if chunk_len == 64:
self._atecc.sha_update(csr_info[i:i+64])
else:
csr_info_sha_256 = self._atecc.sha_digest(csr_info[i:])
# Sign the SHA256 Digest
signature = bytearray(64)
signature = self._atecc.ecdsa_sign(self._slot, csr_info_sha_256)
# Calculations for signature and csr length
len_signature = asn1.get_signature_length(signature)
len_csr = len_csr_info_header + len_csr_info + len_signature
asn1.get_sequence_header_length(len_csr)
# append signature to csr
csr = bytearray()
asn1.get_sequence_header(len_csr, csr)
# append csr_info
csr += csr_info
asn1.get_signature(signature, csr)
# encode and return
csr = b2a_base64(csr)
return csr
|
[
"robots199@me.com"
] |
robots199@me.com
|
432c01b9bc0749e080f5030723946eea795b05b6
|
eeade223e39130cac09fb4907da6410101af5935
|
/setup.py
|
3e74c7d16553cc22209fc859a2c55f4468a03da1
|
[] |
no_license
|
TrendingTechnology/jaxfg
|
67cac95f7e37c2eac75574fa8473b89cc222137e
|
7f19668b344944be196e6b61fdc36f1441bac819
|
refs/heads/master
| 2023-06-20T17:20:43.928788
| 2021-07-30T23:24:42
| 2021-07-31T00:33:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
from setuptools import find_packages, setup
setup(
name="jaxfg",
version="0.0",
description="Factor graphs in Jax",
url="http://github.com/brentyi/jaxfg",
author="brentyi",
author_email="brentyi@berkeley.edu",
license="BSD",
packages=find_packages(),
package_data={"jaxfg": ["py.typed"]},
python_requires=">=3.7",
install_requires=[
"datargs",
"jax>=0.2.13",
"jaxlib",
"jaxlie>=1.0.0",
"jax_dataclasses>=1.0.0",
"overrides",
"scikit-sparse",
"termcolor",
"tqdm",
"typing_utils", # We can phase this out if we drop support for Python 3.7
"matplotlib",
],
extras_require={
"testing": [
"pytest",
# "pytest-cov",
# "hypothesis",
# "hypothesis[numpy]",
],
"type-checking": [
"mypy",
"types-termcolor",
],
},
)
|
[
"yibrenth@gmail.com"
] |
yibrenth@gmail.com
|
88fde4953ea93f45918c4891940b3b494d26ae2f
|
7623d4ca5cacb259a1b2e7a98b1e8a3011592348
|
/SICP/examples/ex2_83.py
|
b372d8db084f7f17d5cb1e2e2f63db57d0db0e8f
|
[] |
no_license
|
nextdesusu/Learn-Python
|
3b875ab5093844fe64cc13e717a3637bdfe62a9a
|
3212059408eec27ee2ed359ac9d691b3d061372f
|
refs/heads/master
| 2022-01-29T07:39:11.915177
| 2019-07-21T14:18:10
| 2019-07-21T14:18:10
| 198,063,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
def gcd(a, b):
while a != 0 and b != 0:
if a > b:
a = a % b
else:
b = b % a
return a + b
#print(gcd(50, 130))
class Complex:
def __init__(self, real, imag = 0):
self.real = real
self.imag = imag
def __str__(self):
return '{0} + {1}i'.format(self.real, self.imag)
class Rational:
def __init__(self, n, m):
self.n = n
if m == 0:
raise 1 / 0
self.m = m
@property
def equate(self):
return self.n / self.m
def __add__(self, other):
if isinstance(other, Rational):
return Rational((self.n + other.n) / gcd(self.n + other.n, self.m + other.m),
(self.m + other.m) / gcd(self.n + other.n, self.m + other.m))
def __str__(self):
return '{0} / {1}'.format(self.n, self.m)
def raise_(num):
if isinstance(num, int):
return Rational(num, 1)
if isinstance(num, Rational):
return float(num.equate)
if isinstance(num, float):
return Complex(num, 0)
a = 1
print(a)
a = raise_(a)
print(a)
a = raise_(a)
print(a)
a = raise_(a)
print(a)
|
[
"noreply@github.com"
] |
nextdesusu.noreply@github.com
|
ecadda233d55e5a381cea2a473aabeb40e553cf4
|
f32e9b464a8c9fb7f5238935cfb5f83e840269e6
|
/chat.py
|
9bba623185a4235e003e9897cc735374256095c4
|
[] |
no_license
|
DavidArmendariz/python-chatbot
|
c192fc5f310d7c069c2a58b165ff8d90a1ceff2b
|
c7df66d4e0ae64c79ab75cc5cb58690efa677c23
|
refs/heads/master
| 2022-12-18T18:38:38.375681
| 2020-09-28T19:10:11
| 2020-09-28T19:10:11
| 258,566,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
from app import app, db
from app.models import User, Message, Chatroom
@app.shell_context_processor
def make_shell_context():
return {'db': db, 'User': User, 'Message': Message, 'Chatroom': Chatroom}
|
[
"darmendariz1998@outlook.com"
] |
darmendariz1998@outlook.com
|
32bc36980bd85af045910d5303f1b1c037b8938f
|
3b60e6f4bbc011003ac4929f01eb7409918deb79
|
/Analysis_v1/Simulation/Pythia/RSG/RSGfragments/RSGravitonToGammaGamma_kMpl01_M_5750_TuneCP5_13TeV_pythia8_cfi.py
|
dbf0343665487d8f89199e7c5e5a6aaec7a57103
|
[] |
no_license
|
uzzielperez/Analyses
|
d1a64a4e8730325c94e2bc8461544837be8a179d
|
1d66fa94763d7847011ea551ee872936c4c401be
|
refs/heads/master
| 2023-02-09T04:54:01.854209
| 2020-09-07T14:57:54
| 2020-09-07T14:57:54
| 120,850,137
| 0
| 0
| null | 2020-06-17T16:48:16
| 2018-02-09T03:14:04
|
C++
|
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
from Configuration.Generator.Pythia8aMCatNLOSettings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(1.095e-3),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
pythia8aMCatNLOSettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsG*:all = on',
'ExtraDimensionsG*:kappaMG = 0.541643794389',
'5100039:m0 = 5750.0',
'5100039:onMode = off',
'5100039:onIfAny = 22',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8aMCatNLOSettings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"uzzie.perez@cern.ch"
] |
uzzie.perez@cern.ch
|
8aeb1300f85a1aaafb71ce05a4910eda695d01de
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_461.py
|
5bdb3315298efd0854676b72294e5e643b54f60a
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
# How to query filter in django without multiple occurrences
ParentModel.objects.filter(childmodel__in=ChildModel.objects.all()).distinct()
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
25894a978235e5a7ba954ec8cdc0e0047e8254e1
|
2fd087fbc5faf43940153693823969df6c8ec665
|
/pyc_decrypted/latest/dropbox/metadata/vorbis.py
|
e7e48da8552e55eb862035894baafb7a71cedce1
|
[] |
no_license
|
mickeystone/DropBoxLibrarySRC
|
ed132bbffda7f47df172056845e5f8f6c07fb5de
|
2e4a151caa88b48653f31a22cb207fff851b75f8
|
refs/heads/master
| 2021-05-27T05:02:30.255399
| 2013-08-27T13:16:55
| 2013-08-27T13:16:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,159
|
py
|
#Embedded file name: dropbox/metadata/vorbis.py
from collections import defaultdict
import struct
from .utils import safe_read
def readVorbisComment(file_obj):
toret = defaultdict(list)
try:
vendor_length = struct.unpack('<I', safe_read(file_obj, 4))[0]
safe_read(file_obj, vendor_length)
user_comment_list_length = struct.unpack('<I', safe_read(file_obj, 4))[0]
for i in range(user_comment_list_length):
length = struct.unpack('<I', safe_read(file_obj, 4))[0]
comment = ''.join(struct.unpack('<%dc' % length, safe_read(file_obj, length)))
k, v = comment.split('=')
toret[k.lower()].append(v)
return toret
except Exception:
return {}
def decodeBlockPicture(file_obj):
try:
pic_type, mime_length = struct.unpack('>II', safe_read(file_obj, 8))
mime = ''.join(struct.unpack('>%dc' % mime_length, safe_read(file_obj, mime_length)))
desc_length = struct.unpack('>I', safe_read(file_obj, 4))[0]
description = unicode(''.join(struct.unpack('>%dc' % desc_length, safe_read(file_obj, desc_length))), 'utf-8')
width, height, depth, colors, data_len = struct.unpack('>IIIII', safe_read(file_obj, 20))
data = safe_read(file_obj, data_len)
return {'type': pic_type,
'mime': mime,
'description': description,
'width': width,
'height': height,
'depth': depth,
'colors': colors,
'data': data}
except Exception:
return {}
def readBlockPicture(file_obj):
try:
buf = ''
buf += safe_read(file_obj, 8)
pic_type, mime_length = struct.unpack('>II', buf[-8:])
buf += safe_read(file_obj, mime_length)
buf += safe_read(file_obj, 4)
desc_length = struct.unpack('>I', buf[-4:])[0]
buf += safe_read(file_obj, desc_length)
buf += safe_read(file_obj, 20)
width, height, depth, colors, data_len = struct.unpack('>IIIII', buf[-20:])
buf += safe_read(file_obj, data_len)
return {'metadata_block_picture': [buf]}
except Exception:
return {}
|
[
"bizonix@me.com"
] |
bizonix@me.com
|
0c4d74fc244e79ebb2b0c11a0c7f7fcf431d901f
|
079c07c5d97eb60d36269e27309e84b25ea0aaeb
|
/guidehero-backend/app/managers/call_manager.py
|
2df061c86f9dcff1932fc86ea2e7e2a95baf97e2
|
[] |
no_license
|
itdream-dev/python
|
3aa44329673f05e2a86e1cba56cb88101c777233
|
eda81b802b99f45933bdf0d22b508837cfa538f0
|
refs/heads/master
| 2023-03-05T12:27:42.776870
| 2020-05-11T15:54:45
| 2020-05-11T15:54:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
# -*- coding: utf-8 -*-
from config import Ivysaur
from lib.registry import get_registry
from lib.models.call import Call
from lib.push_notifications import PushNotifications
class CallManager(object):
def __init__(self):
registry = get_registry()
self.call_repo = registry['CALL_REPO']
self.user_repo = registry['USER_REPO']
self.device_repo = registry['DEVICE_REPO']
self.tokbox = registry['TOKBOX']
self.push_notifications = PushNotifications()
def start_session(self, user, user_id_2):
session = self.tokbox.create_session()
session_id = session.session_id
token = self.tokbox.generate_token(session_id)
recepient = self.user_repo.get_user(user_id_2)
self.call_repo.start_session(user, recepient, session_id)
device = self.device_repo.get_latest_device(user_id_2)
if device:
self.push_notifications.send_notification(
device.device_token,
'Incoming call from %s' % user.name,
sound='calling.caf'
)
return {
'api_key': Ivysaur.Config.TOKBOX_API_KEY,
'session_id': session_id,
'token': token
}
def get_pending_call(self, user):
pending_call = self.call_repo.get_pending_call(user)
if not pending_call:
return {}
session_id = pending_call.session_id
token = self.tokbox.generate_token(session_id)
return {
'api_key': Ivysaur.Config.TOKBOX_API_KEY,
'session_id': session_id,
'token': token,
'caller_name': pending_call.caller.name
}
def report_connected(self, session_id):
call = self.call_repo.get_call_from_session_id(session_id)
if not call or call.status != Call.INITIATED:
return
self.call_repo.report_connected(call)
def report_ended(self, session_id):
call = self.call_repo.get_call_from_session_id(session_id)
if not call or call.status == Call.ENDED:
return
self.call_repo.report_ended(call)
|
[
"skyclean906@gmail.com"
] |
skyclean906@gmail.com
|
aaac7828f0ebe58e41fab34c975790676ce05ef9
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_116/ch32_2020_04_08_11_44_06_529462.py
|
cc4a1b9f2ef0e4e497cbb913dca1ed7af116e79d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
def lista_primos(n):
a=2
lista=[]
contador = 3
while len(lista)!=n:
if a == 2:
lista.append(a)
a+=1
elif a%2 == 0:
a+=1
elif contador < a :
contador = 3
while contador < a:
if a%contador == 0:
contador+=2
else:
lista.append(a)
contador=a+2
a+=1
else:
a+=1
return lista
|
[
"you@example.com"
] |
you@example.com
|
dcd05b317337bac479b22dcaea4f461603eaa11b
|
02e23da0431623db86c8138bda350a1d526d4185
|
/Archivos Python Documentos/Graficas/.history/matriz_20200222132010.py
|
8534cb3045cb3b356fb2e42fe4a210b62a5a9f3b
|
[] |
no_license
|
Jaamunozr/Archivos-python
|
d9996d3d10ff8429cd1b4c2b396016a3a5482889
|
1f0af9ba08f12ac27e111fcceed49bbcf3b39657
|
refs/heads/master
| 2022-08-05T14:49:45.178561
| 2022-07-13T13:44:39
| 2022-07-13T13:44:39
| 244,073,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
import numpy as np
import os
import pylab as pl
import matplotlib.pyplot as plt
os.system("clear")
g=[
12, 23;
34, 34,
]
print (g)
"""
raiz=np.sqrt
ln=np.log
X = np.arange(-2, 12, 0.1)
Y = np.arange(-2, 12, 0.1)
J=np.count_nonzero(Y)
print (J)
a = [0] * J
for i in range(J):
a[i] = Y[i]
X[25]=0.49
X[65]=4.49
X[105]=8.49
Y[25]=0.49
Y[65]=4.49
Y[105]=8.49
ax, ay = 0.5, 0.5
bx, by = 4.5, 0.4
cx, cy = 8.5, 0.5
dx, dy = 0.5, 4.5
ex, ey = 8.5, 4.5
fx, fy = 0.5, 8.5
gx, gy = 4.5, 8.5
hx, hy = 8.5, 8.5
l = 2
rho= 100
ik=25
ma=raiz((X-ax)**2+(Y-ay)**2)
mb=raiz((X-bx)**2+(Y-by)**2)
mc=raiz((X-cx)**2+(Y-cy)**2)
md=raiz((X-dx)**2+(Y-dy)**2)
me=raiz((X-ex)**2+(Y-ey)**2)
mf=raiz((X-fx)**2+(Y-fy)**2)
mg=raiz((X-gx)**2+(Y-gy)**2)
mh=raiz((X-hx)**2+(Y-hy)**2)
va=ln((l+raiz(ma**2+l**2))/ma)
vb=ln((l+raiz(mb**2+l**2))/mb)
vc=ln((l+raiz(mc**2+l**2))/mc)
vd=ln((l+raiz(md**2+l**2))/md)
ve=ln((l+raiz(me**2+l**2))/me)
vf=ln((l+raiz(mf**2+l**2))/mf)
vg=ln((l+raiz(mg**2+l**2))/mg)
vh=ln((l+raiz(mh**2+l**2))/mh)
Vt=((rho*ik)/(2*np.pi))*(va+vb+vc+vd+ve+vf+vg+vh)
print (Vt[::].max())
print(type(Vt))
print(Vt.shape)
plt.figure(figsize=(X,Y))
plt.imshow(Vt, cmap = "summer")
plt.colorbar(
plt.show()
)"""
|
[
"jaamunozr@gmail.com"
] |
jaamunozr@gmail.com
|
c711158cdb65871fda79be945d0bae0d04d531a8
|
50afc0db7ccfc6c80e1d3877fc61fb67a2ba6eb7
|
/challenge20(T-primes)/solutions/Coder45.py
|
32ba0e7800485a552e5637112a3fad818e939995
|
[
"MIT"
] |
permissive
|
banana-galaxy/challenges
|
792caa05e7b8aa10aad8e04369fc06aaf05ff398
|
8655c14828607535a677e2bb18689681ee6312fa
|
refs/heads/master
| 2022-12-26T23:58:12.660152
| 2020-10-06T13:38:04
| 2020-10-06T13:38:04
| 268,851,516
| 11
| 8
|
MIT
| 2020-09-22T21:21:30
| 2020-06-02T16:24:41
|
Python
|
UTF-8
|
Python
| false
| false
| 165
|
py
|
python
def solution(n):
for k in range(n):
y=len([k for k in range(1,n+1) if not n %k])
if y == 3:
return True
else:
return False
|
[
"cawasp@gmail.com"
] |
cawasp@gmail.com
|
62f3762c1f4cc277c8f0b20c4777ee5c189eb345
|
e593f5b34050eba13fbadeee3563346fa0f1c25b
|
/tests/plugins/test_speedrunslive.py
|
81731cbdc212b4e31f3887da8a114f76df267300
|
[
"BSD-2-Clause",
"CC-BY-SA-2.0",
"MIT",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
fuglede/streamlink
|
f9e56e434b01ae426edd83f13037384af294838a
|
2661d40164986f979edc2e6867f8daeceba73a44
|
refs/heads/master
| 2020-03-25T08:36:43.175618
| 2018-08-05T15:10:15
| 2018-08-05T15:10:15
| 143,622,979
| 0
| 0
|
BSD-2-Clause
| 2018-08-05T15:07:12
| 2018-08-05T15:07:12
| null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
import unittest
from streamlink.plugins.speedrunslive import SpeedRunsLive
class TestPluginSpeedRunsLive(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'http://www.speedrunslive.com/#!/twitch',
]
for url in should_match:
self.assertTrue(SpeedRunsLive.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://www.twitch.tv',
]
for url in should_not_match:
self.assertFalse(SpeedRunsLive.can_handle_url(url))
|
[
"gravyboat@users.noreply.github.com"
] |
gravyboat@users.noreply.github.com
|
5ec6d29eb18f4b5a615c47c002f54ce12402b6b1
|
611847354ec077c5bc65fdb08c9f45ff45b4bfcc
|
/code/docker/python/flask_2/app.py
|
234a0768f1eb0f2a9ab328dd7b3a0fc9be9cf1a3
|
[
"MIT"
] |
permissive
|
afcarl/pythoh_machine_learning_excerise
|
1a572e4c6db11ee28d5c245f20fc81b334d04995
|
f2b6e93eb02345f9078642cff3066e3e65557e51
|
refs/heads/master
| 2020-03-21T06:00:08.623962
| 2017-06-08T23:03:35
| 2017-06-08T23:03:35
| 138,193,305
| 1
| 0
| null | 2018-06-21T16:05:53
| 2018-06-21T16:05:52
| null |
UTF-8
|
Python
| false
| false
| 2,365
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from flask import Flask, render_template, request
from wtforms import Form, TextAreaField, validators
import pickle
import sqlite3
import os
import numpy as np
from vectorizer import vect
app = Flask(__name__)
cur_dir = os.path.dirname(__file__)
clf = pickle.load(open(os.path.join(cur_dir,
'pickle_objects',
'classifier.pkl'), 'rb'))
db = os.path.join(cur_dir, 'reviews.sqlite')
def classify(document):
label = {0: 'negative', 1: 'positive'}
X =vect.transform([document])
y = clf.predict(X)[0]
proba = np.max(clf.predict_proba(X))
return label[y], proba
def train(document, y):
X = vect.transform([document])
clf.partial_fit(X, [y])
def sqlite_entry(path, document, y):
conn = sqlite3.connect(path)
c = conn.cursor()
c.execute("INSERT INTO review_db (review, sentiment, date)"\
" VALUES (?, ?, DATETIME('now'))", (document, y))
conn.commit()
conn.close()
class ReviewForm(Form):
moviereview = TextAreaField('',
[validators.DataRequired(),
validators.length(min=15)])
@app.route('/')
def index():
form = ReviewForm(request.form)
return render_template('reviewform.html', form=form)
@app.route('/requests', methods=['POST'])
def results():
form = ReviewForm(request.form)
if request.method == 'POST' and form.validate():
review = request.form['movierview']
y, proba = classify(review)
return render_template('results.html',
content=review,
prediction=y,
probability=round(proba * 100, 2))
return render_template('reviewform.html', form=form)
@app.route('/thanks', methods=['POST'])
def feedback():
feedback = request.form['feedback_button']
review = request.form['review']
prediction = request.form['prediction']
inv_label = {'negative': 0, 'positive': 1}
y = inv_label[prediction]
if feedback == 'Incorrect':
y = int(not(y))
train(review, y)
sqlite_entry(db, review, y)
return render_template('thanks.html')
if __name__ == '__main__':
app.run(debug=True)
|
[
"snow.akogi.pgel@gmail.com"
] |
snow.akogi.pgel@gmail.com
|
9bf502aa53de5ff285b04513b8db97f45b9147ae
|
64d923ab490341af97c4e7f6d91bf0e6ccefdf4b
|
/tensorforce/core/policies/state_value.py
|
3f2776d338b73a577f31700a2da5f1127a5c3642
|
[
"Apache-2.0"
] |
permissive
|
tensorforce/tensorforce
|
38d458fedeeaa481adf083397829cea434d020cd
|
1bf4c3abb471062fb66f9fe52852437756fd527b
|
refs/heads/master
| 2023-08-17T17:35:34.578444
| 2023-08-14T20:14:08
| 2023-08-14T20:14:08
| 85,491,050
| 1,312
| 246
|
Apache-2.0
| 2023-08-14T20:14:10
| 2017-03-19T16:24:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,932
|
py
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorforce.core import SignatureDict, TensorSpec, tf_function
from tensorforce.core.policies import BasePolicy
class StateValue(BasePolicy):
"""
Base class for state-value functions, here categorized as "degenerate" policy.
Args:
device (string): Device name
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
l2_regularization (float >= 0.0): Scalar controlling L2 regularization
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
name (string): <span style="color:#0000C0"><b>internal use</b></span>.
states_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
auxiliaries_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
actions_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
"""
def __init__(
self, *, device=None, l2_regularization=None, name=None, states_spec=None,
auxiliaries_spec=None, actions_spec=None
):
BasePolicy.__init__(
self=self, device=device, l2_regularization=l2_regularization, name=name,
states_spec=states_spec, auxiliaries_spec=auxiliaries_spec, actions_spec=actions_spec
)
def input_signature(self, *, function):
if function == 'state_value':
return SignatureDict(
states=self.states_spec.signature(batched=True),
horizons=TensorSpec(type='int', shape=(2,)).signature(batched=True),
internals=self.internals_spec.signature(batched=True),
auxiliaries=self.auxiliaries_spec.signature(batched=True)
)
else:
return super().input_signature(function=function)
def output_signature(self, *, function):
if function == 'state_value':
return SignatureDict(
singleton=TensorSpec(type='float', shape=()).signature(batched=True)
)
else:
return super().output_signature(function=function)
@tf_function(num_args=4)
def state_value(self, *, states, horizons, internals, auxiliaries):
raise NotImplementedError
|
[
"alexkuhnle@t-online.de"
] |
alexkuhnle@t-online.de
|
98fb0dcf64f5486c42788855054e4d8f97762dd7
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-nlp/huaweicloudsdknlp/v2/model/post_sentence_embedding_req.py
|
70377e135a6ed8d93d796857d996707216550b46
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,253
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PostSentenceEmbeddingReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'sentences': 'list[str]',
'domain': 'str'
}
attribute_map = {
'sentences': 'sentences',
'domain': 'domain'
}
def __init__(self, sentences=None, domain=None):
"""PostSentenceEmbeddingReq
The model defined in huaweicloud sdk
:param sentences: 文本列表,文本长度为1~512,列表大小为1~1000,文本编码为UTF-8。
:type sentences: list[str]
:param domain: 支持的领域类型,目前只支持通用领域,默认为general。
:type domain: str
"""
self._sentences = None
self._domain = None
self.discriminator = None
self.sentences = sentences
if domain is not None:
self.domain = domain
@property
def sentences(self):
"""Gets the sentences of this PostSentenceEmbeddingReq.
文本列表,文本长度为1~512,列表大小为1~1000,文本编码为UTF-8。
:return: The sentences of this PostSentenceEmbeddingReq.
:rtype: list[str]
"""
return self._sentences
@sentences.setter
def sentences(self, sentences):
"""Sets the sentences of this PostSentenceEmbeddingReq.
文本列表,文本长度为1~512,列表大小为1~1000,文本编码为UTF-8。
:param sentences: The sentences of this PostSentenceEmbeddingReq.
:type sentences: list[str]
"""
self._sentences = sentences
@property
def domain(self):
"""Gets the domain of this PostSentenceEmbeddingReq.
支持的领域类型,目前只支持通用领域,默认为general。
:return: The domain of this PostSentenceEmbeddingReq.
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this PostSentenceEmbeddingReq.
支持的领域类型,目前只支持通用领域,默认为general。
:param domain: The domain of this PostSentenceEmbeddingReq.
:type domain: str
"""
self._domain = domain
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PostSentenceEmbeddingReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
3eed1b10050537ad9781069bb46ed2f3703cf569
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/intermediate-bite-14-generate-a-table-of-n-sequences.py
|
55c17a68b9ae77cc101badbd72287e480fb740fa
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,471
|
py
|
"""
DATE: 05 Nov 2020
TASK DESCRIPTION:
Write a function that receives one or more sequences. The sequences are already defined for you.
The function should return a table (list of strings) where the columns are the sequences
(example below).
To keep it simple we work with equally sized sequences so you don't have to worry about
handling a missing value (you should end up with a grid of 6 rows x n columns).
There are some Pythonic idioms you can use here, hint: think of pants ;)
Example call (look at the tests for more detail):
>>> generate_table(names, aliases)
['Julian | Pythonista', 'Bob | Nerd', 'PyBites | Coder',
'Dante | Pythonista', 'Martin | Nerd', 'Rodolfo | Coder']
Bonus: use a generator to build up the table rows.
"""
import random
names = 'Julian Bob PyBites Dante Martin Rodolfo'.split()
aliases = 'Pythonista Nerd Coder'.split() * 2
points = random.sample(range(81, 101), 6)
awake = [True, False] * 3
SEPARATOR = ' | '
### ----------- My solution ---------------------------
def my_generate_table(*args):
l = []
result = zip(*args)
for i in result:
s = ""
for t in i:
if s == "":
s = str(t)
else:
s = s + " | " + str(t)
l.append(s)
return l
### ---------- PyBites original solution ---------------
def pyb_generate_table(*sequences):
for seq in zip(*sequences):
seq = [str(val) for val in seq]
yield SEPARATOR.join(seq)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
43ef6671cbd2943a73a2201439c31fdfc5c0ad9c
|
54a745510b16111f5e5f610a07be49ea1e79fccf
|
/py1810/hello_mysql_01.py
|
086070e7afa84b55e4f9c256485a983058c32dcc
|
[] |
no_license
|
SonDog0/bigdata
|
84a5b7c58ad9680cdc0e49ac6088f482e09118a5
|
e6cd1e3bbb0bfec0c89a31b3fb4ef66d50c272be
|
refs/heads/master
| 2020-04-22T02:24:16.469718
| 2019-03-13T08:59:26
| 2019-03-13T08:59:26
| 170,047,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,590
|
py
|
# 파이썬으로 MySQL, MariaDB 다루기
# python에서 MySQL 데이터베이스를 지원하려면
# python DB API 규약에 맞게 작성된 mySQL DB 모듈 필요
# 일반적으로 pyMySQL 모듈을 많이 사용
import pymysql
# # mysql connection 생성
# conn = pymysql.connect(host='13.209.88.188', user= 'son', password= '931027',db='SON_MARIADB', charset='utf8')
#
# curs = conn.cursor()
#
# curs.execute('DROP TABLE items')
# curs.execute('''create table items( item_id INTEGER PRIMARY KEY AUTO_INCREMENT, name TEXT, price INTEGER)''' )
# # sql 질의문 실행
# sql = 'select * from books'
# curs.execute(sql)
#
# # 결과 집합 처리
# for rs in curs.fetchall():
# print(rs[0], rs[1], rs[2], rs[3]) #배열 기반 커서
#
#
#
#
# #mysql connection 닫기
# conn.close()
# # mysql connection 생성
# conn = pymysql.connect(host='13.209.88.188', user= 'son', password= '931027',db='SON_MARIADB', charset='utf8')
# # connection 으로부터 dict cursor 생성
# curs = conn.cursor(pymysql.cursors.DictCursor)
#
# # sql 질의문 실행
# sql = 'select * from books'
# curs.execute(sql)
#
# # 결과 집합 처리
# for rs in curs.fetchall():
# print(rs['bno'], rs['bname'], rs['bpub'], rs['bprice']) #사전기반 커서
#
# #mysql connection 닫기
# conn.close()
# 1~100 까지 2배수, 3배수, 5배수 저장
# 테이블 이름은 numbers
# 필드는 no, no2, no3, no5
# mysql connection 생성
conn = pymysql.connect(host='13.209.88.188', user= 'son', password= '931027',db='SON_MARIADB', charset='utf8')
# connection 으로부터 cursor 생성
curs = conn.cursor(pymysql.cursors.DictCursor)
# sql 질의문 실행
create_sql = 'create table numbers( no2 int, no3 int, no5 int )'
drop_sql = 'drop table numbers'
sql = 'insert into numbers values(%s,%s,%s)'
# sql = 'select * from books'
curs.execute(drop_sql)
curs.execute(create_sql)
# 1~ 100까지 2배수, 3배수, 5배수
num1 = 0
num2 = 0
num3 = 0
for i in range (1,101):
if i % 2 == 0:
num1 = i
else:
num1 = 0
if i % 3 == 0:
num2 = i
else:
num2 = 0
if i % 5 == 0:
num3 = i
else:
num3 = 0
curs.execute(sql, (num1, num2, num3))
#변경사항 서버에 적용하기
conn.commit()
# 결과 집합 처리
select_sql = 'select * from numbers'
curs.execute(select_sql)
for rs in curs.fetchall():
print(rs['no2'], rs['no3'], rs['no5']) #사전기반 커서
#mysql connection 닫기
conn.close()
|
[
"noreply@github.com"
] |
SonDog0.noreply@github.com
|
eb3d9991bac5d69b10d1a291e2e099785c5e1bdb
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/leetCode/Backtracking/90_SubsetsII.py
|
f0018bb559069fbfa983759b7fcba413f3f6cb4b
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
c.. Solution o..
___ subsetsWithDup nums
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
__ n.. nums:
r_ []
nums.s.. )
nums_len = l..(nums)
# Keep the subsets without duplicate subsets
subsets = [[nums[0]]]
# Keep the previous subsets which contains previous nums.
pre_subset = [[nums[0]]]
___ i __ r..(1, nums_len
# Combine current num with the previous subsets,
# Then update the previous subsets
__ nums[i] __ nums[i-1]:
___ j __ r..(l..(pre_subset)):
one_set = pre_subset[j][:]
one_set.a.. nums[i])
subsets.a.. one_set)
pre_subset[j] = one_set
# Combine current num with all the subsets before.
# Then update the previous subsets
____
pre_subset # list
___ j __ r..(l..(subsets)):
one_set = subsets[j][:]
one_set.a.. nums[i])
subsets.a.. one_set)
pre_subset.a.. one_set)
pre_subset.a.. [nums[i]])
subsets.a.. [nums[i]])
subsets.a.. [])
r_ subsets
"""
[]
[1,2]
[1,2,2]
[1,2,2,3,3,4,5]
"""
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
f24083eb0c7654f23ecf8369b85752a9772562e2
|
c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd
|
/google/ads/googleads/v4/googleads-py/google/ads/googleads/v4/enums/types/targeting_dimension.py
|
76a70911a6f15e25b79765af970fefb4bcd708ba
|
[
"Apache-2.0"
] |
permissive
|
dizcology/googleapis-gen
|
74a72b655fba2565233e5a289cfaea6dc7b91e1a
|
478f36572d7bcf1dc66038d0e76b9b3fa2abae63
|
refs/heads/master
| 2023-06-04T15:51:18.380826
| 2021-06-16T20:42:38
| 2021-06-16T20:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v4.enums',
marshal='google.ads.googleads.v4',
manifest={
'TargetingDimensionEnum',
},
)
class TargetingDimensionEnum(proto.Message):
r"""The dimensions that can be targeted. """
class TargetingDimension(proto.Enum):
r"""Enum describing possible targeting dimensions."""
UNSPECIFIED = 0
UNKNOWN = 1
KEYWORD = 2
AUDIENCE = 3
TOPIC = 4
GENDER = 5
AGE_RANGE = 6
PLACEMENT = 7
PARENTAL_STATUS = 8
INCOME_RANGE = 9
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
adb2babffe1e8af59930020f6c17f6d45db5f76f
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/request/KoubeiTradeOrderConsultRequest.py
|
2defd325c725861c41724ed3832b3e090ad2407b
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,936
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiTradeOrderConsultModel import KoubeiTradeOrderConsultModel
class KoubeiTradeOrderConsultRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiTradeOrderConsultModel):
self._biz_content = value
else:
self._biz_content = KoubeiTradeOrderConsultModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.trade.order.consult'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
6e3f3b1486dc70ef90cb88af554179fd8f6dc4d5
|
59090da2fe4e848b986c704b1ecf06ebe2d730b1
|
/conferences/items.py
|
459b144b8d8631dcf42753824838e783ac88a031
|
[
"MIT"
] |
permissive
|
manuphatak/conferences
|
75449d2b16d546d4c66e9363369331239c74c9bd
|
67e8880fe7049c003650d83e090b95cc09b45da5
|
refs/heads/master
| 2021-06-05T04:03:18.071859
| 2015-12-25T01:13:18
| 2015-12-25T01:13:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ConferencesItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
[
"bionikspoon@gmail.com"
] |
bionikspoon@gmail.com
|
b2801badf5bd6284bd289b522b327b3edbb347b5
|
6131b2738a7c087dfa6907c624453576f6f0e393
|
/银行转账pmysql版本/Bank_Transfer.py
|
34c606ac76f8f664750def22983d82d05855ec09
|
[] |
no_license
|
heheddff/myPythonProcess
|
60ef240130cd02906dc500eedb397a9662c02e5a
|
885a25dd2a9cd43801306d9e70b9ce89daec4406
|
refs/heads/master
| 2020-04-08T19:09:18.192738
| 2019-08-06T02:52:54
| 2019-08-06T02:52:54
| 159,642,468
| 4
| 5
| null | null | null | null |
GB18030
|
Python
| false
| false
| 2,049
|
py
|
# coding=gbk
import pymysql
class Money():
def __init__(self,sid,tid,mon):
self.conn = pymysql.connect(
host="127.0.0.1",
port=3306,
user='root',
passwd='****',
db='test'
)
self.cursor = self.conn.cursor();
self.table = "money"
self.sid = sid
self.tid = tid
self.mon = mon
def checkuser(self,userid):
try:
sql = "select userid from "+self.table+" where userid=%s"
self.cursor.execute(sql,(userid,))
res = self.cursor.fetchone()
if res is None:
raise Exception("账号{}不存在".format(userid))
finally:
pass
#self.cursor.close()
#self.conn.close()
def reducemoney(self,userid,money):
try:
sql = "update "+self.table+" set money=money-%s where userid=%s"
self.cursor.execute(sql,(money,userid))
if self.cursor.rowcount != 1:
raise Exception("账号{}转账失败".format(userid))
finally:
pass
#self.cursor.close()
#self.conn.close()
def addmoney(self,userid,money):
try:
sql = "update "+self.table+" set money=money+%s where userid=%s"
self.cursor.execute(sql,(money,userid,))
if self.cursor.rowcount != 1:
raise Exception("账号{}收账失败".format(userid))
finally:
pass
#self.cursor.close()
#self.conn.close()
def checkmoney(self,userid,money):
try:
sql = "select userid from "+self.table+" where userid=%s and money>%s"
self.cursor.execute(sql,(userid,money))
res = self.cursor.fetchone()
if res is None:
raise Exception("账号{}余额小于{}".format(userid,money))
finally:
pass
#self.cursor.close()
#self.conn.close()
def run(self):
try:
self.checkuser(self.sid)
self.checkuser(self.tid)
self.checkmoney(self.sid,self.mon)
self.reducemoney(self.sid,self.mon)
self.addmoney(self.tid,self.mon)
self.conn.commit()
except Exception as e:
self.conn.rollback()
raise e
finally:
#pass
self.cursor.close()
self.conn.close()
try:
m = Money(11,13,100)
m.run()
except Exception as e:
#pass
print(e)
else:
print("转账成功")
|
[
"qq2003qq@126.com"
] |
qq2003qq@126.com
|
2e89a9bd74c09e3531412e11b310be4b94ae18d1
|
2a39fe8bd203531c9bcdb470d19b80beac665eae
|
/model_cluster.py
|
288cf9409b0694d16f6334c5ea877ffeafd2e726
|
[] |
no_license
|
davidharvey1986/lenstoolTools
|
7bf11af1a38700503a731c6fe7e83fdc92bf58c1
|
85bcf729603d34341f5f41c57c4e233b08055baa
|
refs/heads/master
| 2021-09-08T14:29:52.695461
| 2018-03-10T13:54:50
| 2018-03-10T13:54:50
| 124,657,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,715
|
py
|
'''
This script has 2 functions:
1. model_cluster( ra, dec, cluster, \
halos=None, \
best_file=None)
This models the input cluster and returns a structure
from simulate_project with shear, chi, etc.
'''
import numpy as np
import ipdb as pdb
import astro_tools as at
import idlsave as idlsave
import lensing as l
import copy as copy
import glob as glob
import os
def model_cluster( ra, dec, cluster, \
halos=None, \
best_file=None):
'''
Model the NFW signal of the cluster using the
input from halos
'''
if best_file is None:
dataDir = '/Users/DavidHarvey/Documents/Work/Trails/data/rerun/'+cluster
best_file = dataDir+'/best.par'
runmode, potentials = l.lenstool.read_best( filename=best_file)
space = l.simulations.templates.space()
space.lens[0].del_profile('isothermal')
space.source[0].ell_disp = 0.
space.source[0].ra = ra
space.source[0].dec = dec
space.telescope.nGalaxies = len(dec)
space.lens[0].redshift = potentials[0]['z_lens']['float']
space.source[0].redshift = 1.0
space.lens[0].ra = potentials[0]['ra']['float']
space.lens[0].dec = potentials[0]['dec']['float']
if halos is not None:
space.lens[0].ra = halos['halos'][0]['gal']['ra'][0]
space.lens[0].dec = halos['halos'][0]['gal']['dec'][0]
space.lens[0].profiles['nfw'].args['mass'] = \
potentials[0]['m200']['str'].astype(np.double)
space.lens[0].profiles['nfw'].args['conc'] = \
potentials[0]['concentration']['float']
space.lens[0].profiles['nfw'].args['ellipticity'] = \
potentials[0]['ellipticite']['float']
space.lens[0].profiles['nfw'].args['potential_angle'] = \
potentials[0]['angle_pos']['float']
scale_radius = l.profiles.nfw.scale_radius(space.lens[0].profiles['nfw'].args['mass'], \
space.lens[0].profiles['nfw'].args['conc'],\
potentials[0]['z_lens']['float'])
space.lens[0].profiles['nfw'].args['scale_radius'] = scale_radius
for iHalo in range(1,len(potentials)):
space.add_lens()
space.lens[iHalo].redshift = potentials[0]['z_lens']['float']
space.source[iHalo].redshift = 1.0
space.lens[iHalo].ra = potentials[iHalo]['ra']['float']
space.lens[iHalo].dec = potentials[iHalo]['dec']['float']
if halos is not None:
space.lens[iHalo].ra = halos['halos'][iHalo]['gal']['ra'][0]
space.lens[iHalo].dec = halos['halos'][iHalo]['gal']['dec'][0]
space.lens[iHalo].profiles['nfw'].args['mass'] = \
potentials[iHalo]['m200']['str'].astype(np.double)
space.lens[iHalo].profiles['nfw'].args['conc'] = \
potentials[iHalo]['concentration']['float']
space.lens[iHalo].profiles['nfw'].args['ellipticity'] = \
potentials[iHalo]['ellipticite']['float']
space.lens[iHalo].profiles['nfw'].args['potential_angle'] = \
potentials[iHalo]['angle_pos']['float']
scale_radius = l.profiles.nfw.scale_radius(space.lens[iHalo].profiles['nfw'].args['mass'], \
space.lens[iHalo].profiles['nfw'].args['conc'],\
potentials[iHalo]['z_lens']['float'])
space.lens[iHalo].profiles['nfw'].args['scale_radius'] = scale_radius
space.reload(positions=False)
space.weak_lensing()
return space
|
[
"davidharvey1986@googlemail.com"
] |
davidharvey1986@googlemail.com
|
37dcfb2bd2200cc648ab737e317d319edfd9d269
|
b87f66b13293782321e20c39aebc05defd8d4b48
|
/maps/build/EnthoughtBase/enthought/logger/agent/attachments.py
|
4d8f00f577f6d82f2c2ec0b1a7f4b5a14dd94aef
|
[] |
no_license
|
m-elhussieny/code
|
5eae020932d935e4d724c2f3d16126a0d42ebf04
|
5466f5858dbd2f1f082fa0d7417b57c8fb068fad
|
refs/heads/master
| 2021-06-13T18:47:08.700053
| 2016-11-01T05:51:06
| 2016-11-01T05:51:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,835
|
py
|
""" Attach relevant project files.
FIXME: there are no public project plugins for Envisage 3, yet. In any case,
this stuff should not be hard-coded, but extensible via extension points. The
code remains here because we can reuse the zip utility code in that extensible
rewrite.
"""
import logging
import os.path
from email import Encoders
from email.MIMEBase import MIMEBase
from enthought.traits.api import Any, HasTraits
logger = logging.getLogger(__name__)
class Attachments(HasTraits):
application = Any()
message = Any()
def __init__(self, message, **traits):
traits = traits.copy()
traits['message'] = message
super(Attachments, self).__init__(**traits)
# FIXME: all of the package_*() methods refer to deprecated project plugins.
def package_workspace(self):
if self.application is None:
pass
workspace = self.application.get_service('enthought.envisage.project.IWorkspace')
if workspace is not None:
dir = workspace.path
self._attach_directory(dir)
return
def package_single_project(self):
if self.application is None:
pass
single_project = self.application.get_service('enthought.envisage.single_project.ModelService')
if single_project is not None:
dir = single_project.location
self._attach_directory(dir)
def package_any_relevant_files(self):
self.package_workspace()
self.package_single_project()
return
def _attach_directory(self, dir):
relpath = os.path.basename(dir)
import zipfile
from cStringIO import StringIO
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
msg = MIMEBase(maintype, subtype)
file_object = StringIO()
zip = zipfile.ZipFile(file_object, 'w')
_append_to_zip_archive(zip, dir, relpath)
zip.close()
msg.set_payload(file_object.getvalue())
Encoders.encode_base64(msg) # Encode the payload using Base64
msg.add_header('Content-Disposition', 'attachment', filename='project.zip')
self.message.attach(msg)
file_object.close()
def _append_to_zip_archive(zip, dir, relpath):
""" Add all files in and below directory dir into zip archive"""
for filename in os.listdir(dir):
path = os.path.join(dir, filename)
if os.path.isfile(path):
name = os.path.join(relpath, filename)
zip.write(path, name)
logger.debug('adding %s to error report' % path)
else:
if filename != ".svn": # skip svn files if any
subdir = os.path.join(dir, filename)
_append_to_zip_archive(zip, subdir, os.path.join(relpath, filename))
return
|
[
"fspaolo@gmail.com"
] |
fspaolo@gmail.com
|
a4becb7bb74d1bc89c50607b9bb58cfd03ce77ee
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/littlecodersh_EasierLife/EasierLife-master/Programs/PCMusicViaWechat/run.py
|
711a41774b068b3a371af0624be25cf996578762
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 785
|
py
|
#coding=utf8
import os
import itchat
from NetEaseMusicApi import interact_select_song
HELP_MSG = u'''\
欢迎使用微信网易云音乐
帮助: 显示帮助
关闭: 关闭歌曲
歌名: 按照引导播放音乐\
'''
with open('stop.mp3', 'w') as f: pass
def close_music():
os.startfile('stop.mp3')
@itchat.msg_register(itchat.content.TEXT)
def music_player(msg):
if msg['ToUserName'] != 'filehelper': return
if msg['Text'] == u'关闭':
close_music()
itchat.send(u'音乐已关闭', 'filehelper')
if msg['Text'] == u'帮助':
itchat.send(HELP_MSG, 'filehelper')
else:
itchat.send(interact_select_song(msg['Text']), 'filehelper')
itchat.auto_login(True, enableCmdQR=True)
itchat.send(HELP_MSG, 'filehelper')
itchat.run()
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
31a22036f099c73ba1c28df51244f2704b311551
|
891902687207fb335b65dbb8d31d6e20301764f9
|
/pe007.py
|
3521b25495844f22a773b26856805b717f686ada
|
[] |
no_license
|
maecchi/PE
|
93bd050eaca2733aa37db6ca493b820fe3d7a351
|
3d9092635807f0036719b65adb16f1c0926c2321
|
refs/heads/master
| 2020-05-04T16:38:36.476355
| 2012-06-10T05:26:10
| 2012-06-10T05:26:10
| 1,746,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# pe007.py - Project Euler
#
prime_array = []
sum_of_array = len(prime_array)
number = 1
while (len(prime_array) < 10001) :
is_prime = True
number += 1
if sum_of_array == 0:
if number != 1:
prime_array.append(number)
else:
for i in prime_array :
if not number % i:
is_prime = False
break
if is_prime:
prime_array.append(number)
sum_of_array = len(prime_array)
print prime_array[len(prime_array)-1]
|
[
"aos81922710@yahoo.co.jp"
] |
aos81922710@yahoo.co.jp
|
2229d4df4a6585402d2b9b02a44445d1e7e39d2e
|
71f55955d7115763f9267704328f8c738aafaa15
|
/euca2ools/commands/iam/addrolepolicy.py
|
bb9e1d6ddeabb20857861ae24bcc791d2dbbad40
|
[
"BSD-2-Clause"
] |
permissive
|
fr33jc/euca2ools
|
66da4a866e9a0873ce225f9f931019b0bbd82fff
|
f4d8052000601e59e4e7d4dec4aa4094df4e39a0
|
refs/heads/master
| 2021-01-21T08:20:44.646393
| 2015-05-07T06:16:30
| 2015-05-07T06:26:57
| 35,200,788
| 0
| 0
| null | 2015-05-07T05:34:42
| 2015-05-07T05:34:42
| null |
UTF-8
|
Python
| false
| false
| 3,335
|
py
|
# Copyright 2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT
from euca2ools.commands.iam.putrolepolicy import PutRolePolicy
from euca2ools.util import build_iam_policy
class AddRolePolicy(IAMRequest):
DESCRIPTION = ('Add a new policy to a role. To add more complex policies '
'than this tool supports, see euare-roleuploadpolicy.')
ARGS = [Arg('-r', '--role-name', metavar='ROLE', required=True,
help='role to attach the policy to (required)'),
Arg('-p', '--policy-name', metavar='POLICY', required=True,
help='name of the new policy (required)'),
Arg('-e', '--effect', choices=('Allow', 'Deny'), required=True,
help='whether the new policy should Allow or Deny (required)'),
Arg('-a', '--action', dest='actions', action='append',
required=True, help='''action(s) the policy should apply to
(at least one required)'''),
Arg('-c', '--resource', dest='resources', action='append',
required=True, help='''resource(s) the policy should apply to
(at least one required)'''),
Arg('-o', '--output', action='store_true',
help='also display the newly-created policy'),
AS_ACCOUNT]
def main(self):
policy = build_iam_policy(self.args['effect'], self.args['resources'],
self.args['actions'])
policy_doc = json.dumps(policy)
req = PutRolePolicy.from_other(
self, RoleName=self.args['role_name'],
PolicyName=self.args['policy_name'],
PolicyDocument=policy_doc,
DelegateAccount=self.params['DelegateAccount'])
response = req.main()
response['PolicyDocument'] = policy_doc
return response
def print_result(self, result):
if self.args['output']:
print result['PolicyDocument']
|
[
"gholms@devzero.com"
] |
gholms@devzero.com
|
a8fe19a5ebd13e3f499880b38d3a0c9b3e2e1a01
|
77c641fd0708b279dddbe01f6af32a8531b93185
|
/marketsim/gen/_out/orderbook/_TwoWayLink.py
|
32086a419dd50f0c39f7d88416660a33f3316484
|
[] |
no_license
|
abensrhir/marketsimulator
|
aea286afd2bb2e0c8a547bfa879601aef21c0cd5
|
f9f55c72fb34cdbec42b96737ca20839f26c6299
|
refs/heads/master
| 2020-12-13T20:55:55.795344
| 2014-02-24T22:52:24
| 2014-02-24T22:52:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,564
|
py
|
from marketsim import registry
from marketsim.gen._out._itwowaylink import ITwoWayLink
from marketsim.gen._intrinsic.orderbook.link import _TwoWayLink_Impl
from marketsim.gen._out._ilink import ILink
@registry.expose(["Asset", "TwoWayLink"])
class TwoWayLink_ILinkILink(ITwoWayLink,_TwoWayLink_Impl):
""" (normally between a trader and a market).
Ensures that sending packets via links preserves their order.
Holds two one-way links in opposite directions.
"""
def __init__(self, up = None, down = None):
from marketsim.gen._out.orderbook._link import Link_IObservableFloat as _orderbook_Link_IObservableFloat
from marketsim import rtti
self.up = up if up is not None else _orderbook_Link_IObservableFloat()
self.down = down if down is not None else _orderbook_Link_IObservableFloat()
rtti.check_fields(self)
_TwoWayLink_Impl.__init__(self)
@property
def label(self):
return repr(self)
_properties = {
'up' : ILink,
'down' : ILink
}
def __repr__(self):
return "TwoWayLink(%(up)s, %(down)s)" % self.__dict__
def TwoWayLink(up = None,down = None):
from marketsim.gen._out._ilink import ILink
from marketsim import rtti
if up is None or rtti.can_be_casted(up, ILink):
if down is None or rtti.can_be_casted(down, ILink):
return TwoWayLink_ILinkILink(up,down)
raise Exception('Cannot find suitable overload for TwoWayLink('+str(up) +':'+ str(type(up))+','+str(down) +':'+ str(type(down))+')')
|
[
"anton.kolotaev@gmail.com"
] |
anton.kolotaev@gmail.com
|
223d15aa59b73791af3a6b0a32075d3e44e5e0e1
|
12c41119156dd3783c3801e07f5f973289f26bb0
|
/aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/ModifyInstanceCrossBackupPolicyRequest.py
|
73f1bb1dc4142387214d25bfcc8845befb0ee4b4
|
[
"Apache-2.0"
] |
permissive
|
toywei/aliyun-openapi-python-sdk
|
bfe0893da38af9b222ce072fd7587d5b6cdce204
|
ce8f683e3201fca8c473512267f50a34f71e31d3
|
refs/heads/master
| 2020-08-07T23:42:00.053692
| 2019-10-08T08:50:21
| 2019-10-08T08:50:21
| 213,626,962
| 1
| 0
|
NOASSERTION
| 2019-10-08T11:43:15
| 2019-10-08T11:43:15
| null |
UTF-8
|
Python
| false
| false
| 3,625
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyInstanceCrossBackupPolicyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'ModifyInstanceCrossBackupPolicy','rds')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_RetentType(self):
return self.get_query_params().get('RetentType')
def set_RetentType(self,RetentType):
self.add_query_param('RetentType',RetentType)
def get_BackupEnabled(self):
return self.get_query_params().get('BackupEnabled')
def set_BackupEnabled(self,BackupEnabled):
self.add_query_param('BackupEnabled',BackupEnabled)
def get_RelService(self):
return self.get_query_params().get('RelService')
def set_RelService(self,RelService):
self.add_query_param('RelService',RelService)
def get_StorageType(self):
return self.get_query_params().get('StorageType')
def set_StorageType(self,StorageType):
self.add_query_param('StorageType',StorageType)
def get_Endpoint(self):
return self.get_query_params().get('Endpoint')
def set_Endpoint(self,Endpoint):
self.add_query_param('Endpoint',Endpoint)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_Retention(self):
return self.get_query_params().get('Retention')
def set_Retention(self,Retention):
self.add_query_param('Retention',Retention)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_CrossBackupType(self):
return self.get_query_params().get('CrossBackupType')
def set_CrossBackupType(self,CrossBackupType):
self.add_query_param('CrossBackupType',CrossBackupType)
def get_LogBackupEnabled(self):
return self.get_query_params().get('LogBackupEnabled')
def set_LogBackupEnabled(self,LogBackupEnabled):
self.add_query_param('LogBackupEnabled',LogBackupEnabled)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_CrossBackupRegion(self):
return self.get_query_params().get('CrossBackupRegion')
def set_CrossBackupRegion(self,CrossBackupRegion):
self.add_query_param('CrossBackupRegion',CrossBackupRegion)
def get_StorageOwner(self):
return self.get_query_params().get('StorageOwner')
def set_StorageOwner(self,StorageOwner):
self.add_query_param('StorageOwner',StorageOwner)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
2a3f06373320ecc765b4bb93855f011b6abd1874
|
8773e8c9b9a0a6e407f91b6f7c6321141d7e8356
|
/P0028.py
|
b36e66bdef3c5322dae8da2cc24b78e41d00f479
|
[] |
no_license
|
westgate458/LeetCode
|
1836bb21e8dd95386ccab390f5fd04567a429a02
|
36d7f9e967a62db77622e0888f61999d7f37579a
|
refs/heads/master
| 2021-12-28T04:16:36.875737
| 2021-12-17T05:48:09
| 2021-12-17T05:48:09
| 152,928,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,687
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 21:30:22 2018
@author: Tianqi Guo
"""
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
# length of two strings
lh = len(haystack)
ln = len(needle)
# deal with trivial cases
if (ln == 0):
return 0
if (lh < ln):
return -1
# start searching from start
p = 0
# stop when remaining substring not long enough
while p <= lh-ln:
# find next position of the first character
# in the remaining substring
pp = haystack[p:].find(needle[0])
# if first character exists in remaining substring
# and remaining substring long enough
if (pp != -1) and (p+pp+ln <= lh):
# check if target is found
if haystack[p+pp:p+pp+ln] == needle:
# return current position
return p + pp
else:
# if not found update starting position
# as the one after current position of found first character
p = p + pp + 1
else:
# if first character does not exist in remaining substring
# return -1
return -1
# return default result (not found)
return -1
haystack = "a"
needle = "a"
test = Solution()
p = test.strStr(haystack, needle)
print(p)
|
[
"tqguo246@gmail.com"
] |
tqguo246@gmail.com
|
531a90f48670b96708ad059976d2ba5bf25937fd
|
cbadf1c08435abc91bd221d2fd9d096717685cc0
|
/cses/introductory/t1068/task.py
|
4d13a4212760b1f74adb4ec357a1211f2f7534e6
|
[] |
no_license
|
x3mka/code-contests-python
|
9b54738941187284e1f70aad850ae1016ca6cd39
|
57f473ca84735f9312913967e20a3ac0da32baa8
|
refs/heads/master
| 2022-09-01T20:39:05.329559
| 2022-08-04T13:05:22
| 2022-08-04T13:05:22
| 263,626,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
import sys
def rs(): return sys.stdin.readline().rstrip()
def ri(): return int(sys.stdin.readline())
def ria(): return list(map(int, sys.stdin.readline().split()))
def ws(s): sys.stdout.write(s); sys.stdout.write('\n')
def wi(n): sys.stdout.write(str(n)); sys.stdout.write('\n')
def wia(a, sep=' '): sys.stdout.write(sep.join([str(x) for x in a])); sys.stdout.write('\n')
def solve(n, k):
return 0
def main():
n = ri()
res = []
while n > 1:
res.append(n)
if n % 2 == 0:
n //= 2
else:
n = n * 3 + 1
res.append(1)
wia(res)
if __name__ == '__main__':
main()
|
[
"bdimonik@gmail.com"
] |
bdimonik@gmail.com
|
c38f64648780fe24938819e7a021e775e5b9144a
|
2aba3c043ce4ef934adce0f65bd589268ec443c5
|
/codility/lessons/lesson15/abs_distinct.py
|
20cee5b98361ce77f0c60a5866368cb270aedd84
|
[] |
no_license
|
kambehmw/algorithm_python
|
4f66593b77039d90515d1fcbecacdab8c811b92f
|
17222399dcc92fd8f908e5774a9883e2e89c486e
|
refs/heads/master
| 2020-06-02T12:44:11.322356
| 2020-05-18T13:22:05
| 2020-05-18T13:22:05
| 191,157,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
def solution(A):
result = set()
for a in A:
if abs(a) not in result:
result.add(abs(a))
return len(result)
if __name__ == '__main__':
A = [-5, -3, -1, 0, 3, 6]
print(solution(A))
|
[
"kanbe.hmw@gmail.com"
] |
kanbe.hmw@gmail.com
|
96195a397e80348016e9ddf846478112f9dadba0
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_02_01/models/connectivity_parameters_py3.py
|
2f6d375168c517e8f45f0201c2a3c695caf2c4b8
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,034
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectivityParameters(Model):
"""Parameters that determine how the connectivity check will be performed.
All required parameters must be populated in order to send to Azure.
:param source: Required.
:type source: ~azure.mgmt.network.v2018_02_01.models.ConnectivitySource
:param destination: Required.
:type destination:
~azure.mgmt.network.v2018_02_01.models.ConnectivityDestination
:param protocol: Network protocol. Possible values include: 'Tcp', 'Http',
'Https', 'Icmp'
:type protocol: str or ~azure.mgmt.network.v2018_02_01.models.Protocol
:param protocol_configuration:
:type protocol_configuration:
~azure.mgmt.network.v2018_02_01.models.ProtocolConfiguration
"""
_validation = {
'source': {'required': True},
'destination': {'required': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'ConnectivitySource'},
'destination': {'key': 'destination', 'type': 'ConnectivityDestination'},
'protocol': {'key': 'protocol', 'type': 'str'},
'protocol_configuration': {'key': 'protocolConfiguration', 'type': 'ProtocolConfiguration'},
}
def __init__(self, *, source, destination, protocol=None, protocol_configuration=None, **kwargs) -> None:
super(ConnectivityParameters, self).__init__(**kwargs)
self.source = source
self.destination = destination
self.protocol = protocol
self.protocol_configuration = protocol_configuration
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
fb6e59194cd56c41ffbf2f949fdb863868fbed1e
|
82f998aec53e7bc49eb5aad4fdb18cbe72976b89
|
/transformers/configuration_albert.py
|
144678774cdc1e5b1ea30145fdf9204c810d854a
|
[] |
no_license
|
MatNLP/SMedBERT
|
6ab8d2749a8a26005eef36dc347f779c9e6a217b
|
8dd549f902ca59ad2b84bf3b951213565fde4dc0
|
refs/heads/main
| 2023-09-02T03:22:13.298661
| 2021-11-17T05:44:50
| 2021-11-17T05:44:50
| 372,204,217
| 75
| 13
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,303
|
py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ALBERT model configuration """
from .configuration_utils import PretrainedConfig
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'albert-base-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-config.json",
'albert-large-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-config.json",
'albert-xlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-config.json",
'albert-xxlarge-v1': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-config.json",
'albert-base-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-config.json",
'albert-large-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-config.json",
'albert-xlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-config.json",
'albert-xxlarge-v2': "https://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-config.json",
}
class AlbertConfig(PretrainedConfig):
"""Configuration for `AlbertModel`.
The default settings match the configuration of model `albert_xxlarge`.
"""
pretrained_config_archive_map = ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size_or_config_json_file=30000,
embedding_size=128,
hidden_size=4096,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
hidden_act="gelu_new",
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12, **kwargs):
"""Constructs AlbertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `AlbertModel`.
embedding_size: size of voc embeddings.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_hidden_groups: Number of group for the hidden layers, parameters in
the same group are shared.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
inner_group_num: int, number of inner repetition of attention and ffn.
down_scale_factor: float, the scale to apply
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`AlbertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
super(AlbertConfig, self).__init__(**kwargs)
self.vocab_size = vocab_size_or_config_json_file
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
|
[
"czr@daddy.com"
] |
czr@daddy.com
|
d57c34be95b4a4e63226be4b67e05cb99573eb54
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03326/s618374476.py
|
ab457fda2c9fc24c21e8f4fbf5a51f82f641ff89
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from heapq import heapify, heappop, heappush
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
N, M = MAP()
L = [LIST() for i in range(N)]
# L = [-1, 1] #生成する数字
num = 3 #生成するビット数
bit_list = list(product([-1, 1], repeat=num))
# print(bit_list)
ans = 0
for a, b, c in bit_list:
tmp = [a * x + b * y + c * z for x, y, z in L]
tmp.sort(reverse=True)
selected = sum(tmp[0:M])
if ans < selected:
ans = selected
# ans.append(sum(tmp[0:M]))
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5993102db04b63b021c0792c45e33184b33f0e7e
|
dc3d310934705034ab2f5bc4d3a96f07dab9b48b
|
/venv/Scripts/pip3.8-script.py
|
d6438c0257f98b46c7098f54e08d33868c8e9a97
|
[] |
no_license
|
createnewdemo/istudy_test
|
82197488d9e9fa05e0c6cc91362645fc4555dc1d
|
806693f2bee13e3c28571d0d75f6b6ea70acf7a0
|
refs/heads/master
| 2022-04-19T05:52:53.780973
| 2020-04-17T17:04:10
| 2020-04-17T17:04:10
| 256,507,355
| 0
| 1
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 400
|
py
|
#!F:\pycharmÁ·Ď°\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"320783214@qq.com"
] |
320783214@qq.com
|
10cabdec7e10d144746752e0c1d59045fc66cc76
|
9d1c769fb97c9287fc86cf582ac84bbf9cfdeec8
|
/PythonFunctionalProgramming(Advanced)/7.Lambda Expression.py
|
8b180d4009858955fcd193726d37957f15f09c82
|
[] |
no_license
|
rohan9769/Python-Coding-and-Practice
|
a0bb1b560e995b2f484b6e6a9cc42e4bac9e84cc
|
27da1d4c3d0a1067fb8ce7f937d469bc4a2d2189
|
refs/heads/master
| 2021-02-10T09:15:17.999508
| 2020-03-22T13:12:44
| 2020-03-22T13:12:44
| 244,368,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
#Lambda Expression - one time anonymous functions
# lambda parameter : action to take on the pararmeter
from functools import reduce
my_list = [1,2,3]
# def multi_by2(i):
# return i*2
def check_odd(i):
return i%2 != 0
def accumulator(acc,i):
print(acc,i)
return acc + i
print(list(map(lambda i: i*2,my_list)))
print(list(filter(lambda i:i%2!=0,my_list)))
|
[
"rohannayak2071@gmail.com"
] |
rohannayak2071@gmail.com
|
24b0d94b1351c0914bc927de94b884458de108d5
|
b82057c77dd4d00ff9bca9a979a1a3075f0528c4
|
/Exicom_gateway/checks/ec500_dc_battcumdischarge_ah_status
|
753f682ed293b267a705e11b7e3da516b6af3913
|
[] |
no_license
|
subhash-007/photography-blog
|
7ee0c4f930fee29d76106c45b09e6b76cb19cf56
|
b1ae66794b48bfe3862cb6e727a3a15a6ef79024
|
refs/heads/master
| 2020-03-31T04:33:00.276628
| 2019-07-12T06:00:39
| 2019-07-12T06:00:39
| 151,910,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,982
|
#!/usr/bin/python
"""
dc_battcumdischarge_ah_status of poller device.
This is part of device application.
Poller script determines the dc_battcumdischarge_ah_status of device.
poller script takes the snmp value of OID .1.3.6.1.4.1.38016.14.2.10.3 from snmp agent of device at specific interval.
Device dc_battcumdischarge_ah_status is sent to device application
"""
# ######################################################################
# Function: check_ec500_dc_battcumdischarge_ah_status
#
# Parameters: info (SNMP Ouput) params (No Parameters)
#
# Output: device dc_battcumdischarge_ah
# ######################################################################
ec500_dc_battcumdischarge_ah_default_levels = ()
def check_ec500_dc_battcumdischarge_ah_status(item, params, info):
"""
check_ec500_dc_battcumdischarge_ah_status function fetches the dc_battcumdischarge_ah_status
Args:
item (str) Specific item on SNMP output on which we want to filter results
Kwargs:
params (tuple) Check parameters for critical and warning state of service
Returns:
state (int) :
0 : OK
1 : Warning
2: Critical
3: unknown
infotext(string):
plugin output
Example : OK - ;;;;
Raises:
Exception
"""
state = 3
infotext = "unknown_value"
perf_data = ['']
if info:
try:
state = 0
try :
ec500_dc_battcumdischarge_ah = float(info[0][0])
except Exception,e:
ec500_dc_battcumdischarge_ah = str(info[0][0].replace(" ","@"))
perf_data = [("ec500_dc_battcumdischarge_ah", ec500_dc_battcumdischarge_ah)]
return (state, "ec500_dc_battcumdischarge_ah=%s" % ec500_dc_battcumdischarge_ah, perf_data)
except Exception,e:
return (3, "ec500_dc_battcumdischarge_ah=%s" % infotext.replace(" ","@"), perf_data)
else:
return (state, "ec500_dc_battcumdischarge_ah=%s" %"No data retrieved".replace(" ","@"), perf_data)
# This check works on all SNMP hosts
"""
Dictionary-based declaration of all check types
"""
check_info["ec500_dc_battcumdischarge_ah_status"] = {
'check_function': check_ec500_dc_battcumdischarge_ah_status,
'service_description': 'ec500_dc_battcumdischarge_ah_status',
'has_perfdata': True,
}
#########################################################################
# SNMP OID for the device dc_battcumdischarge_ah_status
#########################################################################
snmp_info["ec500_dc_battcumdischarge_ah_status"] = ('.1.3.6.1.4.1.38016.14.2', ['10.3'])
|
[
"sbmoond@gmail.com"
] |
sbmoond@gmail.com
|
|
21fe2f6b03ac56dd43ae3a5e3577404f05819754
|
11cd362cdd78c2fc48042ed203614b201ac94aa6
|
/desktop/core/ext-py3/boto-2.49.0/tests/devpay/test_s3.py
|
86665702dcb3bef91d879e2c7171a5c6e2e32913
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
cloudera/hue
|
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
|
dccb9467675c67b9c3399fc76c5de6d31bfb8255
|
refs/heads/master
| 2023-08-31T06:49:25.724501
| 2023-08-28T20:45:00
| 2023-08-28T20:45:00
| 732,593
| 5,655
| 2,244
|
Apache-2.0
| 2023-09-14T03:05:41
| 2010-06-21T19:46:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 7,410
|
py
|
#!/usr/bin/env python
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the S3Connection
"""
import time
import os
import urllib
from boto.s3.connection import S3Connection
from boto.exception import S3PermissionsError
# this test requires a devpay product and user token to run:
AMAZON_USER_TOKEN = '{UserToken}...your token here...'
DEVPAY_HEADERS = { 'x-amz-security-token': AMAZON_USER_TOKEN }
def test():
print '--- running S3Connection tests (DevPay) ---'
c = S3Connection()
# create a new, empty bucket
bucket_name = 'test-%d' % int(time.time())
bucket = c.create_bucket(bucket_name, headers=DEVPAY_HEADERS)
# now try a get_bucket call and see if it's really there
bucket = c.get_bucket(bucket_name, headers=DEVPAY_HEADERS)
# test logging
logging_bucket = c.create_bucket(bucket_name + '-log', headers=DEVPAY_HEADERS)
logging_bucket.set_as_logging_target(headers=DEVPAY_HEADERS)
bucket.enable_logging(target_bucket=logging_bucket, target_prefix=bucket.name, headers=DEVPAY_HEADERS)
bucket.disable_logging(headers=DEVPAY_HEADERS)
c.delete_bucket(logging_bucket, headers=DEVPAY_HEADERS)
# create a new key and store it's content from a string
k = bucket.new_key()
k.name = 'foobar'
s1 = 'This is a test of file upload and download'
s2 = 'This is a second string to test file upload and download'
k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
fp = open('foobar', 'wb')
# now get the contents from s3 to a local file
k.get_contents_to_file(fp, headers=DEVPAY_HEADERS)
fp.close()
fp = open('foobar')
# check to make sure content read from s3 is identical to original
assert s1 == fp.read(), 'corrupted file'
fp.close()
# test generated URLs
url = k.generate_url(3600, headers=DEVPAY_HEADERS)
file = urllib.urlopen(url)
assert s1 == file.read(), 'invalid URL %s' % url
url = k.generate_url(3600, force_http=True, headers=DEVPAY_HEADERS)
file = urllib.urlopen(url)
assert s1 == file.read(), 'invalid URL %s' % url
bucket.delete_key(k, headers=DEVPAY_HEADERS)
# test a few variations on get_all_keys - first load some data
# for the first one, let's override the content type
phony_mimetype = 'application/x-boto-test'
headers = {'Content-Type': phony_mimetype}
headers.update(DEVPAY_HEADERS)
k.name = 'foo/bar'
k.set_contents_from_string(s1, headers)
k.name = 'foo/bas'
k.set_contents_from_filename('foobar', headers=DEVPAY_HEADERS)
k.name = 'foo/bat'
k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
k.name = 'fie/bar'
k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
k.name = 'fie/bas'
k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
k.name = 'fie/bat'
k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
# try resetting the contents to another value
md5 = k.md5
k.set_contents_from_string(s2, headers=DEVPAY_HEADERS)
assert k.md5 != md5
os.unlink('foobar')
all = bucket.get_all_keys(headers=DEVPAY_HEADERS)
assert len(all) == 6
rs = bucket.get_all_keys(prefix='foo', headers=DEVPAY_HEADERS)
assert len(rs) == 3
rs = bucket.get_all_keys(prefix='', delimiter='/', headers=DEVPAY_HEADERS)
assert len(rs) == 2
rs = bucket.get_all_keys(maxkeys=5, headers=DEVPAY_HEADERS)
assert len(rs) == 5
# test the lookup method
k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS)
assert isinstance(k, bucket.key_class)
assert k.content_type == phony_mimetype
k = bucket.lookup('notthere', headers=DEVPAY_HEADERS)
assert k == None
# try some metadata stuff
k = bucket.new_key()
k.name = 'has_metadata'
mdkey1 = 'meta1'
mdval1 = 'This is the first metadata value'
k.set_metadata(mdkey1, mdval1)
mdkey2 = 'meta2'
mdval2 = 'This is the second metadata value'
k.set_metadata(mdkey2, mdval2)
k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
k = bucket.lookup('has_metadata', headers=DEVPAY_HEADERS)
assert k.get_metadata(mdkey1) == mdval1
assert k.get_metadata(mdkey2) == mdval2
k = bucket.new_key()
k.name = 'has_metadata'
k.get_contents_as_string(headers=DEVPAY_HEADERS)
assert k.get_metadata(mdkey1) == mdval1
assert k.get_metadata(mdkey2) == mdval2
bucket.delete_key(k, headers=DEVPAY_HEADERS)
# test list and iterator
rs1 = bucket.list(headers=DEVPAY_HEADERS)
num_iter = 0
for r in rs1:
num_iter = num_iter + 1
rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
num_keys = len(rs)
assert num_iter == num_keys
# try a key with a funny character
k = bucket.new_key()
k.name = 'testnewline\n'
k.set_contents_from_string('This is a test', headers=DEVPAY_HEADERS)
rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
assert len(rs) == num_keys + 1
bucket.delete_key(k, headers=DEVPAY_HEADERS)
rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
assert len(rs) == num_keys
# try some acl stuff
bucket.set_acl('public-read', headers=DEVPAY_HEADERS)
policy = bucket.get_acl(headers=DEVPAY_HEADERS)
assert len(policy.acl.grants) == 2
bucket.set_acl('private', headers=DEVPAY_HEADERS)
policy = bucket.get_acl(headers=DEVPAY_HEADERS)
assert len(policy.acl.grants) == 1
k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS)
k.set_acl('public-read', headers=DEVPAY_HEADERS)
policy = k.get_acl(headers=DEVPAY_HEADERS)
assert len(policy.acl.grants) == 2
k.set_acl('private', headers=DEVPAY_HEADERS)
policy = k.get_acl(headers=DEVPAY_HEADERS)
assert len(policy.acl.grants) == 1
# try the convenience methods for grants
# this doesn't work with devpay
#bucket.add_user_grant('FULL_CONTROL',
# 'c1e724fbfa0979a4448393c59a8c055011f739b6d102fb37a65f26414653cd67',
# headers=DEVPAY_HEADERS)
try:
bucket.add_email_grant('foobar', 'foo@bar.com', headers=DEVPAY_HEADERS)
except S3PermissionsError:
pass
# now delete all keys in bucket
for k in all:
bucket.delete_key(k, headers=DEVPAY_HEADERS)
# now delete bucket
c.delete_bucket(bucket, headers=DEVPAY_HEADERS)
print '--- tests completed ---'
if __name__ == '__main__':
test()
|
[
"noreply@github.com"
] |
cloudera.noreply@github.com
|
7e9bc624c393e992e18b67e221b977c09ff141f9
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_245/ch19_2020_03_04_18_17_22_799262.py
|
8b22222de5aff9dc08c38d569b8da5a45aa29742
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
l1 = int(input("Insira o primeiro lado: "))
l2 = int(input("Insira o segundo lado: "))
l3 = int(input("Insira o terceiro lado: "))
if l1 == l2 and l2 == l3:
print("o triângulo é equilátero")
elif l1 == l2 or l1 == l2 or l2 == l3:
print("o triângulo é isóceles")
else:
print("o triângulo é escaleno")
|
[
"you@example.com"
] |
you@example.com
|
7afa2e261bc06fbe2b86157c44db2697afb12753
|
d3e2f5b8c9505301bfc782cd3f152630565ccfdd
|
/djangoecommerce/catalog/apps.py
|
44e8d6051b4ba24e67229587b176dbefc4b24c95
|
[] |
no_license
|
ingafter60/django3-ecom-portugis
|
81b5b862b01a8bc7ce9a5a2ccd1a306bf7268c56
|
ddf0b68836f54629d830e08a9831d7ad42514d45
|
refs/heads/master
| 2022-01-25T05:19:51.225435
| 2020-02-04T15:39:21
| 2020-02-04T15:39:21
| 238,185,317
| 0
| 0
| null | 2022-01-21T19:56:23
| 2020-02-04T10:59:51
|
Python
|
UTF-8
|
Python
| false
| false
| 230
|
py
|
# from django.apps import AppConfig
# class CatalogConfig(AppConfig):
# name = 'catalog'
from django.apps import AppConfig
class CatalogConfig(AppConfig):
name = 'djangoecommerce.catalog'
verbose_name = 'Catalog'
|
[
"inyoman_gurnitha@yahoo.com"
] |
inyoman_gurnitha@yahoo.com
|
063f00b5a48ae92362e6306ce6da50adda629431
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_211/128.py
|
e12ff8364323062a8ce942d6507e417d019c70c9
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,849
|
py
|
"""
http://networkx.readthedocs.io/en/networkx-1.11/tutorial/index.html
https://docs.python.org/3/library/re.html
https://docs.python.org/3/library/math.html
https://docs.python.org/3/library/collections.html
https://docs.python.org/3/library/itertools.html
https://docs.python.org/3/library/functools.html#functools.lru_cache
"""
# import numpy as np
# import networkx as nx
# import re
# import math
# import time # start_time = time.time(); elapsed_time = time.time() - start_time
# from collections import Counter
# from collections import OrderedDict
# from collections import deque
# from queue import PriorityQueue # q = PriorityQueue(); q.put((pr1, pr2, ...)); item = q.get()
# from itertools import combinations
# from itertools import permutations
# from functools import lru_cache # @lru_cache(maxsize=None)
# from copy import copy, deepcopy
# from sys import stdin, stdout
# from sys import maxsize # 9 * 10**18
# inf = float('inf')
def main():
caseCount = int(input())
for caseIdx in range(1, caseCount + 1):
# read an integer
N, K = map(int, input().strip().split(' '))
U = float(input())
P = list(map(float, input().strip().split(' ')))
ans = solve(N, K, U, P)
print("Case #{}: {}".format(caseIdx, ans))
def solve(N, K, U, P):
# print(N, K, U, P)
P.sort()
P.append(1)
# print(P)
for i in range(len(P)-1):
if U <= 0:
break
diff = P[i+1] - P[i]
need = diff * (i + 1)
if need <= U:
for j in range(i+1):
P[j] += diff
U -= need
continue
else:
for j in range(i+1):
P[j] += U / (i+1)
U = 0
# print(P)
prob = 1
for p in P:
prob *= p
return prob
if __name__ == '__main__':
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
1c6ae8af04aac7f1c2b8ece017e09f30b3cbe5e8
|
c92398a728817578850ecf508ec4197afe91a88f
|
/DemopatterninForloop1.py
|
129e82619895026a6e97d482bf04495e827222b5
|
[] |
no_license
|
HitanshuSoni/Python_practice
|
4d0ec0378124da85e364a15a7b94ddbbfe2fc929
|
7a3d0977b218ef76f91517d88518b1c0b68b9528
|
refs/heads/main
| 2023-04-18T21:55:12.709161
| 2021-05-08T15:39:08
| 2021-05-08T15:39:08
| 365,550,407
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
n=int(input("Enter the number of rows "))
for i in range (1,n+1):
for j in range (1,i+1):
print(j,end="")
print()
|
[
"hitanshusoni10@gmail.com"
] |
hitanshusoni10@gmail.com
|
f218147dd10fb666b03b90331069ea33e88098df
|
2940f5416082dadd9c646cd9a46d2d0a99883efb
|
/venv/Lib/site-packages/pandas/tests/series/test_subclass.py
|
86330b7cc69937ddca6b4a69d3796dfe6f93618c
|
[
"MIT"
] |
permissive
|
tpike3/SugarScape
|
4813e4fefbfb0a701f5913d74f045fd0eaed1942
|
39efe4007fba2b12b75c72f7795827a1f74d640b
|
refs/heads/main
| 2021-06-20T03:55:46.288721
| 2021-01-20T17:06:35
| 2021-01-20T17:06:35
| 168,583,530
| 11
| 3
|
MIT
| 2021-01-20T17:19:53
| 2019-01-31T19:29:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,084
|
py
|
import numpy as np
import pandas as pd
import pandas._testing as tm
class TestSeriesSubclassing:
def test_indexing_sliced(self):
s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd"))
res = s.loc[["a", "b"]]
exp = tm.SubclassedSeries([1, 2], index=list("ab"))
tm.assert_series_equal(res, exp)
res = s.iloc[[2, 3]]
exp = tm.SubclassedSeries([3, 4], index=list("cd"))
tm.assert_series_equal(res, exp)
res = s.loc[["a", "b"]]
exp = tm.SubclassedSeries([1, 2], index=list("ab"))
tm.assert_series_equal(res, exp)
def test_to_frame(self):
s = tm.SubclassedSeries([1, 2, 3, 4], index=list("abcd"), name="xxx")
res = s.to_frame()
exp = tm.SubclassedDataFrame({"xxx": [1, 2, 3, 4]}, index=list("abcd"))
tm.assert_frame_equal(res, exp)
def test_subclass_unstack(self):
# GH 15564
s = tm.SubclassedSeries([1, 2, 3, 4], index=[list("aabb"), list("xyxy")])
res = s.unstack()
exp = tm.SubclassedDataFrame({"x": [1, 3], "y": [2, 4]}, index=["a", "b"])
tm.assert_frame_equal(res, exp)
def test_subclass_empty_repr(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
sub_series = tm.SubclassedSeries()
assert "SubclassedSeries" in repr(sub_series)
def test_asof(self):
N = 3
rng = pd.date_range("1/1/1990", periods=N, freq="53s")
s = tm.SubclassedSeries({"A": [np.nan, np.nan, np.nan]}, index=rng)
result = s.asof(rng[-2:])
assert isinstance(result, tm.SubclassedSeries)
def test_explode(self):
s = tm.SubclassedSeries([[1, 2, 3], "foo", [], [3, 4]])
result = s.explode()
assert isinstance(result, tm.SubclassedSeries)
def test_equals(self):
# https://github.com/pandas-dev/pandas/pull/34402
# allow subclass in both directions
s1 = pd.Series([1, 2, 3])
s2 = tm.SubclassedSeries([1, 2, 3])
assert s1.equals(s2)
assert s2.equals(s1)
|
[
"tpike3@gmu.edu"
] |
tpike3@gmu.edu
|
a9c3328717b43707b2bf21b0c04fd68897484d1a
|
53ab530408135b31dce247ec76d5c70d143cae69
|
/commands/deviot_languages.py
|
7bf2dd70fa1de9434f58aff7c744c2adc80d5242
|
[
"Apache-2.0"
] |
permissive
|
hoat23/Deviot
|
d3ede1b5884cb421fa17832cc7fe56dcc598ce44
|
77a9e08059f9226ebf23a216b00c6ebb5b1cd054
|
refs/heads/master
| 2021-07-22T20:25:07.840839
| 2017-10-07T19:53:21
| 2017-10-07T19:53:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
from sublime_plugin import WindowCommand
from ..libraries.quick_menu import QuickMenu
class DeviotLanguagesCommand(WindowCommand):
def run(self):
QuickMenu().quick_language()
|
[
"guillermoepd@hotmail.com"
] |
guillermoepd@hotmail.com
|
a794cd368971ddd7da52ea42ef11f525d6acfa03
|
4ebfb207661bafcebb9b75936318c7dc84db3d80
|
/myvenv/Scripts/rst2s5.py
|
8f3e1f9599c87a7c8383bf0cf545291231482abe
|
[] |
no_license
|
YooInKeun/ToDo-App
|
2c72a91488cb376ff6c380ccbe5106dfdae09ecb
|
c231e5b13936a33bf60e42268c1ad856495aa432
|
refs/heads/master
| 2022-12-11T18:41:06.856657
| 2019-09-12T07:39:25
| 2019-09-12T07:39:25
| 200,203,474
| 0
| 0
| null | 2022-11-22T04:13:58
| 2019-08-02T09:07:51
|
Python
|
UTF-8
|
Python
| false
| false
| 718
|
py
|
#!c:\users\keun0\onedrive\바탕 화면\창업 경진대회\todo\todo-app\myvenv\scripts\python.exe
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
|
[
"keun0390@naver.com"
] |
keun0390@naver.com
|
657fe89cd2b81c2f222c3e4b5ec744eb2230ebac
|
0f40272f221acd09932e5e4b7f6287300526c902
|
/Programmers/Python/Code/줄 서는 방법.py
|
c1ff2fd5f910d0a0f0592e1a715b923f87a01fb8
|
[] |
no_license
|
wansang93/Algorithm
|
60cfa0d5a3cda7e41096cb22537a35c4789fc9e8
|
65425d1bf8e49cc3a732680c0c1030a2dc0333ca
|
refs/heads/master
| 2023-08-05T04:35:44.128549
| 2023-07-29T17:57:00
| 2023-07-29T17:57:00
| 225,189,092
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
import math
def solution(n, k):
answer = []
nums = [i for i in range(1, n+1)]
k -= 1
while nums:
index = k // math.factorial(n-1)
answer.append(nums.pop(index))
k %= math.factorial(n-1)
n -= 1
return answer
data1 = 3, 5
print(solution(*data1))
|
[
"wansang93@naver.com"
] |
wansang93@naver.com
|
999b382c9ad57d0ddfec93969cb49d317a2255d2
|
a1a86ccffff5f1a8fdab92f58fe46cd66f9cc0e2
|
/docrec/ocr__/recognition.py
|
63a91f818d84a4929e0fb5014006eafe4fed7c67
|
[] |
no_license
|
ZhengHui-Z/deeprec-sib18
|
8ec2c3b5b2fb6bfc6879f28a28c56776a7aa4faa
|
036171c33bc2f90645d8b9794aa0850c34a3ad05
|
refs/heads/master
| 2020-06-19T20:09:57.953934
| 2019-06-24T14:03:00
| 2019-06-24T14:03:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
import numpy as np
from pytesseract import image_to_string
from PIL import Image
from ..text.textprocessing import text2words
# http://www.nltk.org/howto/portuguese_en.html
# http://stanford.edu/~rjweiss/public_html/IRiSS2013/text2/notebooks/cleaningtext.html
# Run OCR
def image2text(image, language='en_US'):
lang = {'en_US': 'eng', 'pt_BR': 'por'}[language]
text = image_to_string(
Image.fromarray(image.astype(np.uint8)), lang=lang
).encode('utf-8', 'ignore')
return text
def image2words(image, language='en_US', min_length=3):
return text2words(
image2text(image, language=language),
min_length=min_length
)
def number_of_words(image, language='en_US', min_length=3):
return len(image2words(image, language=language, min_length=min_length))
|
[
"paixao@gmail.com"
] |
paixao@gmail.com
|
7c07abfe45a78368fccc1684dd15011fba059c07
|
56ca0c81e6f8f984737f57c43ad8d44a84f0e6cf
|
/src/bpp/migrations/0293_pbn_api_kasowanie_przed_nie_eksp_zero.py
|
e14f1181b74a62c95a3a32e731aa17b60a5f7220
|
[
"MIT",
"CC0-1.0"
] |
permissive
|
iplweb/bpp
|
c40f64c78c0da9f21c1bd5cf35d56274a491f840
|
a3d36a8d76733a479e6b580ba6ea57034574e14a
|
refs/heads/dev
| 2023-08-09T22:10:49.509079
| 2023-07-25T04:55:54
| 2023-07-25T04:55:54
| 87,017,024
| 2
| 0
|
NOASSERTION
| 2023-03-04T04:02:36
| 2017-04-02T21:22:20
|
Python
|
UTF-8
|
Python
| false
| false
| 784
|
py
|
# Generated by Django 3.0.14 on 2021-09-15 19:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bpp", "0292_przypinanie_dyscyplin"),
]
operations = [
migrations.AddField(
model_name="uczelnia",
name="pbn_api_kasuj_przed_wysylka",
field=models.BooleanField(
default=False,
verbose_name="Kasuj oświadczenia rekordu przed wysłaniem do PBN",
),
),
migrations.AddField(
model_name="uczelnia",
name="pbn_api_nie_wysylaj_prac_bez_pk",
field=models.BooleanField(
default=False, verbose_name="Nie wysyłaj do PBN prac z PK=0"
),
),
]
|
[
"michal.dtz@gmail.com"
] |
michal.dtz@gmail.com
|
040780f0a66c35d9feada04c693a6b39fc7f7acc
|
70f564990215f47b139a777826f211477e9b44f6
|
/plan2vec_experiments/analysis_icml_2020/local_metric_img_visualization.py
|
832991f25694a02b928ea7dc8ae3ba5d41a0fb3b
|
[] |
no_license
|
geyang/plan2vec
|
de87f2d77732c4aacdefd00067ebebacb7cd763f
|
aeeb50aed3d7da4c266b4ca163e96d4c0747e3c1
|
refs/heads/master
| 2022-11-16T03:40:42.638239
| 2022-10-28T04:01:29
| 2022-10-28T04:01:29
| 261,273,420
| 65
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
from plan2vec.plotting.maze_world.connect_the_dots_image_maze import Args, main
if __name__ == "__main__":
import jaynes
from plan2vec_experiments import instr, RUN, config_charts, dir_prefix
from os.path import join as pJoin, dirname, normpath
from ml_logger import logger
logger.configure(log_directory=RUN.server, register_experiment=False)
# glob_root = dir_prefix()
# glob_root = "/geyang/plan2vec/2019/12-16/analysis/local-metric-analysis/all_local_metric"
# glob_root = "/geyang/plan2vec/2020/02-08/neo_plan2vec/uvpn_image/quick_eval_new_local_metric/local_metric/10.50"
glob_root = "/geyang/plan2vec/2020/02-08/neo_plan2vec/uvpn_image/quick_eval_new_local_metric/local_metric/hige_loss/lr-sweep/12.24"
kwargs = []
with logger.PrefixContext(glob_root):
# note: rope uses {}-{} as postfix. maze do not.
weight_paths = logger.glob("**/models/**/f_lm.pkl")
logger.print('found these experiments')
logger.print(*weight_paths, sep="\n")
for p in weight_paths:
parameter_path = normpath(pJoin(dirname(p), '..', '..', 'parameters.pkl'))
env_id, local_metric, latent_dim = \
logger.get_parameters(
'Args.env_id', 'Args.local_metric', 'Args.latent_dim',
path=parameter_path, default=None)
logger.abspath(p)
kwargs.append(dict(env_id=env_id, load_local_metric=logger.abspath(p),
local_metric=local_metric, latent_dim=latent_dim))
jaynes.config()
for _ in kwargs:
jaynes.run(instr(main, n_rollouts=100, **_))
config_charts("""
charts:
- type: file
glob: "**/*render.png"
- type: file
glob: "**/*data.png"
- type: file
glob: "**/*connected.png"
- type: file
glob: "**/*gt.png"
- type: file
glob: "**/*gt_wider.png"
keys:
- run.status
- Args.env_id
- Args.load_local_metric
""")
jaynes.listen()
|
[
"yangge1987@gmail.com"
] |
yangge1987@gmail.com
|
29fabdd37b5eee248069bcbcc7c7fc5826ff0d69
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/1ed1c792986545bca6955b9771a56a39.py
|
d6cd9ce8061599c2e2f464b8921262b171bc563d
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
class Bob:
def __init__(self):
pass
def hey(self, msg):
if msg == None or msg.strip() == '':
return 'Fine. Be that way!'
if str.isupper(msg):
return 'Woah, chill out!'
if msg[-1] == '?':
return 'Sure.'
else:
return 'Whatever.'
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
474c5c6d151d1b4d7a8b912ad1a74352ab53ca44
|
988205a1bc674399272d2b77f06bb6ae2c0b96ab
|
/student/urls.py
|
0b3401187a6dbb34b251f3f7cd4c6d64ac2fea80
|
[] |
no_license
|
safwanvk/as
|
7797f63f84d52ba2857e99ae99b4aa5b9fd67983
|
1de9e1a6855a0b3f16ffdb4d693cd90579a37e40
|
refs/heads/main
| 2023-04-01T14:44:18.047169
| 2021-03-31T07:03:15
| 2021-03-31T07:03:15
| 349,309,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
from django.urls import path
from .views import *
urlpatterns = [
path('login', student_login),
path('session-check', session_check),
path('create', create_student),
path('delete/<pk>', delete_student),
path('', get_student)
]
|
[
"safwanvalakundil@gmail.com"
] |
safwanvalakundil@gmail.com
|
444a7786b5c7a8bd07929aceea4b8c0e8d44e648
|
a7853b95403b527a527f58cc4b94783161eaaa1d
|
/graph/graph_drawing_objective.py
|
40aa76c8871f0204c2b489d8310a9b482574fa7a
|
[] |
no_license
|
jinnaiyuu/covering-options
|
6c3d2b2818d4074893d2eb1eed72fb77920bb3c3
|
00539a00842d40ba2b397496ec351a683f43d38f
|
refs/heads/master
| 2020-12-19T23:54:41.080277
| 2020-01-23T21:33:21
| 2020-01-23T21:33:21
| 235,890,627
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,714
|
py
|
import numpy as np
import itertools
from scipy.optimize import minimize
from options.util import GetRandomWalk
def Attr(rho, P, fs):
# rho: numpy array of size N. prob.
# P : numpy array of size NxN. each row being a prob.
# F : list of numpy arrays of size N (TODO: should this be a numpy array?)
ret = 0.0
N = rho.shape[0]
for u in range(N):
for v in range(N):
prob = rho[u] * P[u, v]
# ret += (F[u] - F[v]) * (F[u] - F[v]) # TODO: 1 dimensional for now
for f in fs:
ret += (f[u] - f[v]) * (f[u] - f[v])
return ret / 2.0
def Repl(rho, P, delta, fs):
ret = 0.0
N = rho.shape[0]
for u in range(N):
for v in range(N):
prob = rho[u] * rho[v] # For repulsive term, we take exp. over rhos.
for j in range(len(fs)):
for k in range(j, len(fs)):
f1 = fs[j]
f2 = fs[k]
if j == k:
res = delta
else:
res = 0
ret += (f1[u] * f2[u] - res) * (f1[v] * f2[v] - res)
return ret
def GraphDrawingObjective(rho, P, delta, beta):
# TODO: delta should be a function instead of a constant value
N = rho.shape[0]
def GDO(F):
fs = []
for k in range(int(F.shape[0] / N)):
f = F[N * k:N * (k+1)]
fs.append(f)
return Attr(rho, P, fs) + beta * Repl(rho, P, delta, fs)
return GDO
if __name__ == "__main__":
# rho = np.array([0.25, 0.50, 0.25])
# P = np.array([[0.0, 1.0, 0.0],
# [0.5, 0.0, 0.5],
# [0.0, 1.0, 0.0]])
rho = np.full(9, 1.0/9.0, dtype=float)
A = np.zeros((9, 9), dtype=float)
A[0, 1] = 1.0
A[0, 3] = 1.0
A[1, 0] = 1.0
A[1, 2] = 1.0
A[1, 4] = 1.0
A[2, 1] = 1.0
A[2, 5] = 1.0
A[3, 0] = 1.0
A[3, 4] = 1.0
A[3, 6] = 1.0
A[4, 1] = 1.0
A[4, 3] = 1.0
A[4, 5] = 1.0
A[4, 7] = 1.0
A[5, 2] = 1.0
A[5, 4] = 1.0
A[5, 8] = 1.0
A[6, 3] = 1.0
A[6, 7] = 1.0
A[7, 4] = 1.0
A[7, 6] = 1.0
A[7, 8] = 1.0
A[8, 5] = 1.0
A[8, 7] = 1.0
P = GetRandomWalk(A)
print('P=', P)
delta = 0.1
beta = 5.0
GDO_fn = GraphDrawingObjective(rho, P, delta, beta)
dim = 3
x0 = np.full(int(rho.shape[0]) * dim, 0.1)
res = minimize(GDO_fn, x0, method='nelder-mead')
sol = res.x.reshape((dim, int(rho.shape[0])))
print('solution=\n', sol)
# gdo_val = GDO_fn([f1, f2])
# print('gdo=', gdo_val)
# For our purpose, we want to draw an edge from minimum to maximum.
|
[
"ddyuudd@gmail.com"
] |
ddyuudd@gmail.com
|
b4143060947db0249cb4b379bf6a879771385ca7
|
cf111b440f33ba9741ff45c60ac33dfade24e2ac
|
/Projects/Autocal/attic/autocal-20100708/libautocal/autocal.py
|
2c143bfd55996a5b498ff00611aacad771a6a1c2
|
[
"Unlicense"
] |
permissive
|
fredmorcos/attic
|
cd08e951f56c3b256899ef5ca4ccd030d3185bc1
|
36d5891a959cfc83f9eeef003b4e0b574dd7d7e1
|
refs/heads/master
| 2023-07-05T10:03:58.115062
| 2023-06-21T22:55:38
| 2023-06-22T07:07:58
| 154,962,425
| 4
| 1
|
Unlicense
| 2023-06-22T07:08:00
| 2018-10-27T12:30:38
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 795
|
py
|
#!/usr/bin/env python
import sys
import config, optimize, loader
_help = '''AutoCal 0.1 - Fred Morcos <fred.morcos@gmail.com>
Usage: ./autocal.py [COMMANDS] < <input-file>
Commands:
\t--qt\t\t\tShow the Qt user interface.
\t--verbose,-v\t\tShow debug output.
\t--quiet,-q\t\tDo not output errors.
\t--help,-h\t\tShow this help.
'''
if __name__ == '__main__':
for a in sys.argv:
if a == '--verbose' or a == '-v':
config.debug = True
elif a == '--quiet' or a == '-q':
config.verbose_error = False
elif a == '--help' or a == '-h':
print _help
sys.exit(0)
elif a == '--qt':
from autocalqt import qt_start
qt_start()
sys.exit(0)
input_data = ''
for line in sys.stdin:
input_data += line
s = loader.load(input_data)
s = optimize.start(s)
print loader.save(s)
|
[
"fred.morcos@gmail.com"
] |
fred.morcos@gmail.com
|
3f563dd24da29a3808436df13732d8d92dc6540f
|
baaff7bac9cf0e18bddc27ed7866885637db9dac
|
/Studentportal/principle/migrations/0005_auto_20200427_1626.py
|
7f2dcf3448db240598d8725aa7c1bbb6ff382970
|
[] |
no_license
|
pratikgosavii/School-College-management-portal
|
0d477718a315c73b483b3885fce38d94f8cf7227
|
79ca0be6891067379b1544f4a8cd8bd82b177b51
|
refs/heads/master
| 2022-06-03T23:07:11.080921
| 2020-04-30T22:40:58
| 2020-04-30T22:40:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
# Generated by Django 3.0.2 on 2020-04-27 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('principle', '0004_auto_20200426_0042'),
]
operations = [
migrations.RemoveField(
model_name='addteacher',
name='Teacher_Subjects',
),
migrations.AddField(
model_name='addteacher',
name='Teacher_Subjects',
field=models.ManyToManyField(to='principle.subjects'),
),
]
|
[
"pratikgosavi654@gmail.com"
] |
pratikgosavi654@gmail.com
|
a91efe540d7ef22c8d8655ae84aaf3bad07462a7
|
58375cb29ebcd2da7adc182fd10c6f76d6deee6e
|
/FOTS/rroi_align/modules/rroi_align.py
|
7f3e5b4a21b030ab660d57f0a6ec05d9db7d052b
|
[
"BSD-3-Clause"
] |
permissive
|
jiangxiluning/FOTS.PyTorch
|
d0d5a53595308335f20803d7a1d73274a4dad5a7
|
7484a81417f35b4f5c48edd3eb3e855416797379
|
refs/heads/master
| 2023-02-19T23:45:06.317493
| 2023-02-14T15:30:18
| 2023-02-14T15:30:18
| 142,129,096
| 726
| 236
|
BSD-3-Clause
| 2022-10-06T17:19:51
| 2018-07-24T08:31:45
|
Python
|
UTF-8
|
Python
| false
| false
| 545
|
py
|
from torch.nn.modules.module import Module
from ..functions.rroi_align import RRoiAlignFunction
class _RRoiAlign(Module):
def __init__(self, pooled_height, pooled_width, spatial_scale):
super(_RRoiAlign, self).__init__()
self.pooled_width = int(pooled_width)
self.pooled_height = int(pooled_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
return RRoiAlignFunction.apply(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale)
|
[
"jiangxiluning@gmail.com"
] |
jiangxiluning@gmail.com
|
55a3ea9dd99ff3bd699c788ab07cea3e89d23de7
|
3f73ce74b6fdfb7966abb71a98f4986edd727c5f
|
/lib/pandas_option.py
|
0db90923fd1ec5c8f68491c947fc9cd7b40b1acc
|
[
"MIT"
] |
permissive
|
yuta-komura/amateras
|
9c2efd310b18f159b1354864d65f9894ab93737f
|
cf8cc8fe0b5d8c382090fd1784a3ce96e6953157
|
refs/heads/master
| 2023-01-21T19:57:18.763894
| 2020-11-25T04:02:28
| 2020-11-25T04:02:28
| 297,432,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
import pandas as pd
def display_max_columns():
pd.options.display.max_columns = None
def display_max_rows():
pd.options.display.max_rows = None
def display_round_down():
pd.options.display.float_format = '{:.2f}'.format
|
[
"you@example.com"
] |
you@example.com
|
c583e85a4941db164985b6a0183b73927b75c83d
|
48d86947d5f3b5896c4a05cfcddcff01582a26ef
|
/amnesia/number/forms.py
|
fa9f1b18200101af1e9d576c327b0b05b5d1afa4
|
[] |
no_license
|
pratulyab/amnesia
|
181874288c97fbf7e73d10c64e214c2a17574773
|
6b0b3428a27f98e0e2f6bb8aefdc8a4459e7b8cc
|
refs/heads/master
| 2021-01-20T12:49:16.592335
| 2017-05-07T20:38:06
| 2017-05-07T20:38:06
| 90,409,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,969
|
py
|
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth import password_validation
from django.core.exceptions import ValidationError
from django.core import validators
from django.db.utils import IntegrityError
from django.utils.translation import ugettext_lazy as _
from number.models import PhoneNumber
from sms import lookup_number
from material import *
class PhoneNumberForm(forms.ModelForm):
calling_code = forms.CharField(label=_('Calling Code'), widget=forms.TextInput(attrs={'maxlength': 4}))
def __init__(self, *args, **kwargs):
super(PhoneNumberForm, self).__init__(*args, **kwargs)
self.fields['number'].validators = [validators.RegexValidator(r'^\d{10}$')]
def clean(self, *args, **kwargs):
super(PhoneNumberForm, self).clean(*args, **kwargs)
if self.cleaned_data.get('number', ''):
phone_number = self.cleaned_data.get('calling_code', '') + self.cleaned_data['number']
if not lookup_number(phone_number, self.cleaned_data['country'].code):
raise forms.ValidationError(_('Not a valid number according to Twilio\'s Lookup API'))
return self.cleaned_data
def save(self, commit=True, *args, **kwargs):
obj = super(PhoneNumberForm, self).save(commit=False, *args, **kwargs)
if not self.cleaned_data.get('calling_code', '') or kwargs.get('calling_code', ''):
raise forms.ValidationError(_('Calling code is required.'))
if not obj.country.calling_code:
obj.country.calling_code = self.cleaned_data['calling_code'] if self.cleaned_data.get('calling_code', '') else kwargs['calling_code']
if commit:
try:
obj.save()
except (ValidationError, IntegrityError):
raise forms.ValidationError(_('Error Occurred. User with this number has already registered.'))
return obj
class Meta:
model = PhoneNumber
fields = ['country', 'number']
help_texts = {
'number': 'Make sure to enter a valid 10 digit number. It will be verified using Twilio\'s Lookup API',
}
|
[
"pratulyabubna@outlook.com"
] |
pratulyabubna@outlook.com
|
6ec05d3bc0cd2747d542611cb02e8455d14a425b
|
4e0f2938b003f5d68a57f213e652fbffb2f72ba2
|
/venv/Lib/site-packages/cx_OracleObject/Utils.py
|
e9107e5e7501b1942bcd28955a5ca2210e161f27
|
[] |
no_license
|
adcGG/Lianxi
|
e4b1ce0d3cfc76e625e1e1caca0a58f25ba5d692
|
3659c3ca11a13b4ad54dbd2e669949701bae10b5
|
refs/heads/master
| 2022-12-13T05:45:41.312292
| 2019-08-14T07:38:19
| 2019-08-14T07:38:19
| 201,189,540
| 0
| 1
| null | 2022-04-22T22:08:16
| 2019-08-08T06:07:53
|
Python
|
UTF-8
|
Python
| false
| false
| 6,568
|
py
|
"""Defines utility functions."""
import cx_Exceptions
import sys
__all__ = [ "OrderObjects" ]
def ClausesForOutput(clauses, firstString, restString, joinString):
"""Return a list of clauses suitable for output in a SQL statement."""
if not clauses:
return ""
joinString = joinString + "\n" + restString
return firstString + joinString.join(clauses)
def DependenciesOfInterest(key, objectsOfInterest, dependencies,
dependenciesOfInterest):
"""Return a list of dependencies on objects of interest."""
if key in dependencies:
for refKey in dependencies[key]:
if refKey in objectsOfInterest:
dependenciesOfInterest[refKey] = None
else:
DependenciesOfInterest(refKey, objectsOfInterest, dependencies,
dependenciesOfInterest)
def OrderObjects(objects, dependencies):
"""Put the objects in the order necessary for creation without errors."""
# initialize the mapping that indicates which items this object depends on
iDependOn = {}
dependsOnMe = {}
for key in objects:
iDependOn[key] = {}
dependsOnMe[key] = {}
# populate a mapping which indicates all of the dependencies for an object
mappedDependencies = {}
for owner, name, type, refOwner, refName, refType in dependencies:
key = (owner, name, type)
refKey = (refOwner, refName, refType)
subDict = mappedDependencies.get(key)
if subDict is None:
subDict = mappedDependencies[key] = {}
subDict[refKey] = None
# now populate the mapping that indicates which items this object depends on
# note that an object may depend on an object which is not in the list of
# interest, but it itself depends on an object which is in the list so the
# chain of dependencies is traversed until no objects of interest are found
for key in iDependOn:
refKeys = {}
DependenciesOfInterest(key, iDependOn, mappedDependencies, refKeys)
for refKey in refKeys:
iDependOn[key][refKey] = None
dependsOnMe[refKey][key] = None
# order the items until no more items left
outputObjs = {}
orderedObjs = []
while iDependOn:
# acquire a list of items which do not depend on anything
references = {}
keysToOutput = {}
for key, value in list(iDependOn.items()):
if not value:
owner, name, type = key
if owner not in keysToOutput:
keysToOutput[owner] = []
keysToOutput[owner].append(key)
del iDependOn[key]
else:
for refKey in value:
owner, name, type = refKey
if owner not in references:
references[owner] = 0
references[owner] += 1
# detect a circular reference and avoid an infinite loop
if not keysToOutput:
keys = list(iDependOn.keys())
keys.sort()
for key in keys:
print("%s.%s (%s)" % key, file = sys.stderr)
refKeys = list(iDependOn[key].keys())
refKeys.sort()
for refKey in refKeys:
print(" %s.%s (%s)" % refKey, file = sys.stderr)
raise CircularReferenceDetected()
# for each owner that has something to describe
while keysToOutput:
# determine the owner with the most references
outputOwner = ""
maxReferences = 0
keys = list(references.keys())
keys.sort()
for key in keys:
value = references[key]
if value > maxReferences and key in keysToOutput:
maxReferences = value
outputOwner = key
if not outputOwner:
for key in keysToOutput:
outputOwner = key
break
# remove this owner from the list
keys = keysToOutput[outputOwner]
del keysToOutput[outputOwner]
if outputOwner in references:
del references[outputOwner]
# process this list, removing dependencies and adding additional
# objects
tempKeys = keys
keys = []
while tempKeys:
nextKeys = []
tempKeys.sort()
for key in tempKeys:
refKeys = list(dependsOnMe[key].keys())
refKeys.sort()
for refKey in dependsOnMe[key]:
del iDependOn[refKey][key]
if not iDependOn[refKey]:
owner, name, type = refKey
if owner == outputOwner:
del iDependOn[refKey]
nextKeys.append(refKey)
elif owner in keysToOutput:
del iDependOn[refKey]
keysToOutput[owner].append(refKey)
keys += tempKeys
tempKeys = nextKeys
# output the list of objects that have their dependencies satisfied
for key in keys:
if key not in outputObjs:
orderedObjs.append(key)
outputObjs[key] = None
# return the ordered list
return orderedObjs
def SetOptions(obj, options):
"""Set values from the options on the command line."""
if options:
for attribute in dir(options):
if attribute.startswith("_"):
continue
if hasattr(obj, attribute):
value = getattr(options, attribute)
if isinstance(value, list):
value = [s for v in value for s in v.split(",")]
setattr(obj, attribute, value)
def SizeForOutput(size):
"""Return the size suitable for output in a SQL statement. Note that a
negative size is assumed to be unlimited."""
if size < 0:
return "unlimited"
kilobytes, remainder = divmod(size, 1024)
if not remainder:
megabytes, remainder = divmod(kilobytes, 1024)
if not remainder:
return "%gm" % megabytes
else:
return "%gk" % kilobytes
else:
return "%g" % size
class CircularReferenceDetected(cx_Exceptions.BaseException):
message = "Circular reference detected!"
|
[
"979818137@11.com"
] |
979818137@11.com
|
9f80f8e98ba81fbc03fffe07a29c8ce878090be2
|
7971a30e49246a1080490c9641c29cb8fd575c12
|
/Subset_DataStructures/remove_duplicates.py
|
6781a36207b4e74472372b44658b8e5dafccec4e
|
[] |
no_license
|
ymwondimu/HackerRank
|
3870922a29a1e4271a1d3cfd238fd83fd80749c8
|
6481d7ddf61868108a071b44e3fdb098e8cbd61e
|
refs/heads/master
| 2020-03-21T19:05:00.406134
| 2018-07-26T01:11:05
| 2018-07-26T01:11:05
| 138,929,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,068
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def removeDuplicates(head):
curr = head.next
prev = head
return head
def main():
node1 = SinglyLinkedListNode(1)
node2 = SinglyLinkedListNode(2)
node3 = SinglyLinkedListNode(3)
node4 = SinglyLinkedListNode(3)
node5 = SinglyLinkedListNode(4)
node6 = SinglyLinkedListNode(5)
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
node5.next = node6
h = removeDuplicates(node1)
while (h):
print (h.data)
h = h.next
if __name__ == "__main__":
main()
|
[
"ywondimu6@gatech.edu"
] |
ywondimu6@gatech.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.