blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0b420050e1479b0904e29b59e1c48a5160989fd1
|
f392a5e4193d44c41e234696d093140cdf301497
|
/tests/example2.py
|
8b1a937795148bdddefeb027df7948a1d1727c74
|
[
"Apache-2.0"
] |
permissive
|
GateNLP/gate-lf-python-data
|
fb151132c94e25f59947d6400692f23914dfa89e
|
89880a82458f09702c1d6828ae341997e0b45f73
|
refs/heads/master
| 2021-03-27T08:55:26.304655
| 2019-05-31T11:44:29
| 2019-05-31T11:44:29
| 113,597,027
| 4
| 1
|
Apache-2.0
| 2019-05-30T08:50:59
| 2017-12-08T16:52:39
|
Python
|
UTF-8
|
Python
| false
| false
| 362
|
py
|
from __future__ import print_function
from gatelfdata import Dataset
import sys
if len(sys.argv) != 2:
raise Exception("Need one parameter: meta file")
file = sys.argv[1]
ds = Dataset(file)
valset = ds.convert_to_file()
for b in ds.batches_converted(batch_size=20, as_numpy=False, pad_left=True):
print("Batch: len=", len(b))
print("Batch: data=", b)
|
[
"johann.petrak@gmail.com"
] |
johann.petrak@gmail.com
|
8b9843406d7206f8d8eb6ef33274a88f5669773e
|
b727870804e5c7a474c271e1cf0ebfe05619ddfb
|
/keras44_5_wine_conv1d.py
|
38577cf7df599d8d5b61c45ee04523731daff3ff
|
[] |
no_license
|
marattang/keras
|
843227592f7b3cb08034bfdc2e6319200e62e990
|
cc78d1d70bfbe99e78f19ae11053ebbb87f20864
|
refs/heads/main
| 2023-08-03T21:50:53.438394
| 2021-09-10T05:11:15
| 2021-09-10T05:11:15
| 383,742,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,365
|
py
|
import numpy as np
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, GlobalAveragePooling2D, Flatten, LSTM, Conv1D
from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, RobustScaler, StandardScaler, PowerTransformer, QuantileTransformer
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import EarlyStopping
from matplotlib import font_manager, rc
from tensorflow.python.keras.layers.core import Dropout
font_path = "C:/Windows/Fonts/gulim.ttc"
font = font_manager.FontProperties(fname=font_path).get_name()
rc('font', family=font)
# 완성하시오
# acc 0.8 이상 만들것
dataset = load_wine()
x = dataset.data
y = dataset.target
print(dataset.DESCR)
print(dataset.feature_names)
print(np.unique(y))
y = to_categorical(y)
print(y.shape)
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, shuffle=True, random_state=66)
print(x_train)
print(x_train.shape)
scaler = PowerTransformer()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
# print(x_train.shape)
# print(x_test.shape)
x_train = x_train.reshape(124, 13, 1)
x_test = x_test.reshape(54, 13, 1)
#
# model = Sequential()
# model.add(LSTM(units=128, activation='relu', input_shape=(13, 1)))
# model.add(Dense(256, activation='relu'))
# model.add(Dense(128, activation='relu'))
# model.add(Dense(128, activation='relu'))
# model.add(Dense(64, activation='relu'))
# model.add(Dense(32, activation='relu'))
# model.add(Dropout(0.1))
# model.add(Dense(32, activation='relu'))
# model.add(Dense(3, activation='softmax'))
model = Sequential()
model.add(Conv1D(16, kernel_size=1, activation='relu', input_shape=(13, 1)))
model.add(Conv1D(8, 1))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(32, activation='relu'))
model.add(Dense(3, activation='softmax'))
#
es = EarlyStopping(monitor='val_loss', mode='min', patience=15)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# hist = model.fit(x_train, y_train, batch_size=32, epochs=500, validation_split=0.1, callbacks=[es])
hist = model.fit(x_train, y_train, batch_size=1, epochs=70, validation_split=0.05)
# plt.plot(hist.history['loss']) # x: epoch, y: hist.history['loss']
# plt.plot(hist.history['val_loss'])
# plt.xlabel('epochs')
# plt.ylabel('loss, val_loss')
# plt.title('로스, 발로스')
# plt.show()
#
loss = model.evaluate(x_test, y_test)
print('loss : ', loss[0])
print('accuracy : ', loss[1])
# DNN
# QuantileTransformer - accuracy : 0.9259259104728699
# MaxAbsScaler - accuracy : 0.9259259104728699
# MinMaxScaler - accuracy : 0.9629629850387573
# RobustScaler - accuracy : 0.9814814925193787
# StandardScaler - accuracy : 0.9814814925193787
# PowerTransformer - accuracy : 0.9814814925193787
# CNN
# accuracy : 0.9814814925193787
# RNN
# epochs 50 -> 70
# 하이퍼 파라미터 작업 후
# accuracy : 0.9444444179534912 -> accuracy : 1.0
# conv1d
# accuracy : 0.9814814925193787
|
[
"tlawlfp0322@gmail.com"
] |
tlawlfp0322@gmail.com
|
582e0a4f9404ffe497957148713488fb28333b04
|
49f23f530d0cda7aadbb27be9c5bdefaa794d27f
|
/server/common_models/user.py
|
a5b3f4d6f5e5d6819209dd9b15cdda3c1a15dacb
|
[
"MIT"
] |
permissive
|
Soopro/totoro
|
198f3a51ae94d7466136ee766be98cb559c991f1
|
6be1af50496340ded9879a6450c8208ac9f97e72
|
refs/heads/master
| 2020-05-14T09:22:21.942621
| 2019-08-03T20:55:23
| 2019-08-03T20:55:23
| 181,738,167
| 0
| 1
|
MIT
| 2019-10-29T13:43:24
| 2019-04-16T17:42:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,223
|
py
|
# coding=utf-8
from __future__ import absolute_import
from document import BaseDocument, ObjectId, INDEX_DESC
from utils.misc import now
class User(BaseDocument):
STATUS_BEGINNER, STATUS_VIP, STATUS_BANNED = (0, 1, 2)
MAX_QUERY = 120
structure = {
'login': unicode,
'password_hash': unicode,
'openid': unicode,
'unionid': unicode,
'credit': int,
'meta': dict,
'creation': int,
'updated': int,
'status': int,
}
sensitive_fields = ['meta']
required_fields = ['openid']
default_values = {
'login': u'',
'password_hash': u'',
'unionid': u'',
'credit': 0,
'meta': {},
'creation': now,
'updated': now,
'status': STATUS_BEGINNER
}
indexes = [
{
'fields': ['openid'],
'unique': True,
},
{
'fields': ['login'],
'unique': True,
},
{
'fields': ['creation'],
},
{
'fields': ['status'],
}
]
def find_all(self):
return self.find().sort('creation', INDEX_DESC)
def find_activated(self):
return self.find({
'status': self.STATUS_ACTIVATED
}).sort('creation', INDEX_DESC).limit(self.MAX_QUERY)
def find_by_status(self, status):
return self.find({
'status': status
}).sort('creation', INDEX_DESC).limit(self.MAX_QUERY)
def find_one_by_id(self, user_id):
return self.find_one({
'_id': ObjectId(user_id),
})
def find_one_by_login(self, login):
if not login:
return None
return self.find_one({
'login': login,
})
def find_one_by_openid(self, openid):
return self.find_one({
'openid': openid,
})
def displace_login(self, login, openid):
# login can on exists once.
return self.collection.update(
{'openid': {'$ne': openid}, 'login': login},
{'$set': {'login': u'', 'status': self.STATUS_BEGINNER}},
multi=True)
def count_used(self):
return self.find().count()
|
[
"redy.ru@gmail.com"
] |
redy.ru@gmail.com
|
63b86d3db2a788557594680b422fe05b9c77afcf
|
d01f9ff2d7ba3c7c99158678adeaf082f3f15dbc
|
/model/cpn/ade.cpn.R50_v1c.v38.v2/train.py
|
294cb1add0f70efdd177b711e5ca1fc5df2170d0
|
[
"MIT"
] |
permissive
|
akinoriosamura/TorchSeg-mirror
|
d8e76d99e80d55c2555f4f8f7a7fc3f30ef5dec4
|
34033fe85fc24015bcef7a92aad39d2a25a001a5
|
refs/heads/master
| 2021-06-18T15:47:00.946788
| 2019-10-26T04:46:07
| 2019-10-26T04:46:07
| 217,657,156
| 0
| 0
|
MIT
| 2021-06-08T20:36:44
| 2019-10-26T04:46:39
|
Python
|
UTF-8
|
Python
| false
| false
| 6,088
|
py
|
from __future__ import division
import os.path as osp
import sys
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from config import config
from dataloader import get_train_loader
from network import CPNet
from datasets import ADE
from utils.init_func import init_weight, group_weight
from engine.lr_policy import PolyLR
from engine.logger import get_logger
from engine.engine import Engine
from seg_opr.sync_bn import DataParallelModel, Reduce, BatchNorm2d
from seg_opr.seg_oprs import one_hot
try:
from apex.parallel import SyncBatchNorm, DistributedDataParallel
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex .")
logger = get_logger()
torch.manual_seed(config.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(config.seed)
parser = argparse.ArgumentParser()
with Engine(custom_parser=parser) as engine:
args = parser.parse_args()
cudnn.benchmark = True
if engine.distributed:
torch.cuda.set_device(engine.local_rank)
# data loader
train_loader, train_sampler = get_train_loader(engine, ADE)
# config network and criterion
criterion = nn.CrossEntropyLoss(reduction='mean',
ignore_index=-1)
if engine.distributed:
logger.info('Use the Multi-Process-SyncBatchNorm')
BatchNorm2d = SyncBatchNorm
else:
BatchNorm2d = BatchNorm2d
model = CPNet(config.num_classes, criterion=criterion,
pretrained_model=config.pretrained_model,
norm_layer=BatchNorm2d)
init_weight(model.business_layer, nn.init.kaiming_normal_,
BatchNorm2d, config.bn_eps, config.bn_momentum,
mode='fan_in', nonlinearity='relu')
# group weight and config optimizer
base_lr = config.lr
# if engine.distributed:
# base_lr = config.lr * engine.world_size
params_list = []
params_list = group_weight(params_list, model.backbone,
BatchNorm2d, base_lr)
for module in model.business_layer:
params_list = group_weight(params_list, module, BatchNorm2d,
base_lr * 10)
# config lr policy
total_iteration = config.nepochs * config.niters_per_epoch
lr_policy = PolyLR(base_lr, config.lr_power, total_iteration)
optimizer = torch.optim.SGD(params_list,
lr=base_lr,
momentum=config.momentum,
weight_decay=config.weight_decay)
if engine.distributed:
if torch.cuda.is_available():
model.cuda()
model = DistributedDataParallel(model)
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DataParallelModel(model, engine.devices)
model.to(device)
engine.register_state(dataloader=train_loader, model=model,
optimizer=optimizer)
if engine.continue_state_object:
engine.restore_checkpoint()
optimizer.zero_grad()
model.train()
for epoch in range(engine.state.epoch, config.nepochs):
if engine.distributed:
train_sampler.set_epoch(epoch)
bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
pbar = tqdm(range(config.niters_per_epoch), file=sys.stdout,
bar_format=bar_format)
dataloader = iter(train_loader)
for idx in pbar:
engine.update_iteration(epoch, idx)
minibatch = dataloader.next()
imgs = minibatch['data']
gts = minibatch['label']
imgs = imgs.cuda(non_blocking=True)
gts = gts.cuda(non_blocking=True)
b, h, w = gts.size()
scaled_gts = F.interpolate((gts.view(b, 1, h, w)).float(),
scale_factor=0.125,
mode="nearest")
b, c, h, w = scaled_gts.size()
scaled_gts = scaled_gts.squeeze_()
C = config.num_classes + 1
one_hot_gts = one_hot(scaled_gts, C).view(b, C, -1)
similarity_gts = torch.bmm(one_hot_gts.permute(0, 2, 1),
one_hot_gts)
gts = gts - 1
loss = model(imgs, gts, similarity_gts)
# reduce the whole loss over multi-gpu
if engine.distributed:
dist.all_reduce(loss, dist.ReduceOp.SUM)
loss = loss / engine.world_size
else:
loss = Reduce.apply(*loss) / len(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
current_idx = epoch * config.niters_per_epoch + idx
lr = lr_policy.get_lr(current_idx)
optimizer.param_groups[0]['lr'] = lr
optimizer.param_groups[1]['lr'] = lr
for i in range(2, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = lr * 10
print_str = 'Epoch{}/{}'.format(epoch, config.nepochs) \
+ ' Iter{}/{}:'.format(idx + 1, config.niters_per_epoch) \
+ ' lr=%.2e' % lr \
+ ' loss=%.2f' % loss.item()
pbar.set_description(print_str, refresh=False)
if (epoch >= config.nepochs - 20) or (
epoch % config.snapshot_iter == 0):
if engine.distributed and (engine.local_rank == 0):
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
elif not engine.distributed:
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
|
[
"osamura.akinori@gmail.com"
] |
osamura.akinori@gmail.com
|
6f6c6ada1f54e063deb49df940dc1cc3650971d6
|
b9008dc6326b30de1a16ba01a1f3143aa248f7c3
|
/python/chapter3/ex01_10.py
|
0eed47d612f626182ab96704303341e3153a0b74
|
[] |
no_license
|
wonjongah/multicampus_IoT
|
ce219f8b9875aa7738ef952a8702d818a571610e
|
765a5cd7df09a869a4074d8eafce69f1d6cfda4a
|
refs/heads/master
| 2023-02-13T12:30:19.924691
| 2021-01-08T10:17:42
| 2021-01-08T10:17:42
| 292,800,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
country = "Korea"
if country == "Korea":
print("한국입니다")
if country != "Korea":
print("한국이 아닙니다")
if "korea" > "japan":
print("한국이 더 크다")
if "korea" < "japan":
print("일본이 더 크다")
if "Korea" > "korea":
print("Korea가 더 큽니다")
if "Korea" < "korea":
print("korea가 더 큽니다")
print(ord("K"))
print(ord("k"))
|
[
"wonjongah@gmail.com"
] |
wonjongah@gmail.com
|
65a186d1f261b126882e2435ee2ae83f22c7970b
|
48c47c714502fdc8cb8bb59601f30c344945a6d0
|
/sdt/util/logging.py
|
2269f55874bf16b4b6e19049b6a40526ff113113
|
[] |
no_license
|
ronvree/SoftDecisionTree
|
327ef0e89eb600f0ee16d3f9cb0ad619b8bb9ba7
|
b3ad17be8870c08be66d78974e0f78ae6f0439c7
|
refs/heads/master
| 2022-12-12T13:35:51.805748
| 2020-08-22T15:05:13
| 2020-08-22T15:05:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,456
|
py
|
import os
class TrainLog:
"""
Object for managing the train log directory
"""
def __init__(self, log_dir: str): # Store log in log_dir
self._log_dir = log_dir
self._logs = dict()
# Ensure the directories exist
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
if not os.path.isdir(self.metadata_dir):
os.mkdir(self.metadata_dir)
if not os.path.isdir(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
@property
def log_dir(self):
return self._log_dir
@property
def checkpoint_dir(self):
return self._log_dir + '/checkpoints'
@property
def metadata_dir(self):
return self._log_dir + '/metadata'
def log_message(self, msg: str):
"""
Write a message to the log file
:param msg: the message string to be written to the log file
"""
with open(self.log_dir + '/log.txt', 'w') as f:
f.write(msg)
def create_log(self, log_name: str, key_name: str, *value_names):
"""
Create a csv for logging information
:param log_name: The name of the log. The log filename will be <log_name>.csv.
:param key_name: The name of the attribute that is used as key (e.g. epoch number)
:param value_names: The names of the attributes that are logged
"""
if log_name in self._logs.keys():
raise Exception('Log already exists!')
# Add to existing logs
self._logs[log_name] = (key_name, value_names)
# Create log file. Create columns
with open(self.log_dir + f'/{log_name}.csv', 'w') as f:
f.write(','.join((key_name,) + value_names) + '\n')
def log_values(self, log_name, key, *values):
"""
Log values in an existent log file
:param log_name: The name of the log file
:param key: The key attribute for logging these values
:param values: value attributes that will be stored in the log
"""
if log_name not in self._logs.keys():
raise Exception('Log not existent!')
if len(values) != len(self._logs[log_name][1]):
raise Exception('Not all required values are logged!')
# Write a new line with the given values
with open(self.log_dir + f'/{log_name}.csv', 'a') as f:
f.write(','.join(str(v) for v in (key,) + values) + '\n')
|
[
"ronvbree@gmail.com"
] |
ronvbree@gmail.com
|
8bd85a71ed32a09c3f871431ee97970c9134121b
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/FrontCache/FPythonCode/FC_TCOLL_01_ATS_40.py
|
11d35ced4c4937f267029d8778ff4dfce2f825ed
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030
| 2021-05-10T08:50:05
| 2021-05-10T08:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,150
|
py
|
'''----------------------------------------------------------------------------------------------------------
MODULE : FC_TCOLL_01_ATS_40
PROJECT : FX onto Front Arena
PURPOSE : This module is the entry point for the Trade Collection ATSs. These ATSs will
subscribe to Trade Collection Requests. They will pull the relevant Front Cache
data from Front Cache Tradign Manager Template for the specific trades in the
incoming request. Once a Request and/or Batch is complete, a Response message
will be posted onto the AMB so that the Response can be send to subscribing
consumers to notify them that the data for the Request or Batch is avaiable
for consumption.
DEPARTMENT AND DESK : All Departments and all Desks.
REQUASTER : FX onto Front Arena Project
DEVELOPER : Heinrich Cronje
CR NUMBER : XXXXXX
-------------------------------------------------------------------------------------------------------------
'''
'''----------------------------------------------------------------------------------------------------------
Importing all relevant Python and custom modules needed for the ATS to start up. Initializing the FC_UTILS
module to load all Parameters, Logging, Error Handler.
----------------------------------------------------------------------------------------------------------'''
import FC_ERROR_HANDLER_DEFAULT as ERROR_HANDLER_DEFAULT
import traceback
try:
from FC_UTILS import FC_UTILS as UTILS
except ImportError, e:
ERROR_HANDLER_DEFAULT.handelError('Import Error in module %s.' %__name__, e, traceback)
raise ImportError('Import Error in module %s. ERROR: %s.' %(__name__, str(e)))
try:
UTILS.Initialize(__name__)
except Exception, e:
ERROR_HANDLER_DEFAULT.handelError('Initialization Error in module %s. FC_UTILS could not be initialized. '
'No Parameters, Logging or Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved.' %__name__, e, traceback)
raise Exception('Initialization Error in module %s. FC_UTILS could not be initialized. '
'No Parameters, Logging or Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved. ERROR: %s. ' %(__name__, str(e)))
try:
from FC_EXCEPTION import FC_EXCEPTION as EXCEPTION
except ImportError, e:
ERROR_HANDLER_DEFAULT.handelError('Import Error in module %s. FC_EXCEPTION could not be imported. '
'No Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved.' %__name__, e, traceback)
raise Exception('Import Error in module %s. FC_EXCEPTION could not be imported. '
'No Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved. ERROR: %s. ' %(__name__, str(e)))
try:
from datetime import datetime
except ImportError, e:
UTILS.ErrorHandler.processError(None, EXCEPTION('Import Error in module %s. Module datetime could not be imported. '
'The ATS will not start until the root issue is resolved.' %__name__, traceback, 'CRITICAL', e), __name__)
raise Exception('Import Error in module %s. Module datetime could not be imported. '
'The ATS will not start until the root issue is resolved. ERROR: %s' %(__name__, str(e)))
try:
from FC_TCOLL_ATS_WORKER import FC_TCOLL_ATS_WORKER as TCOLL_ATS_WORKER
except ImportError, e:
UTILS.ErrorHandler.processError(None, EXCEPTION('Could not import the worker module in module %s' %__name__, traceback, 'CRITICAL', None), __name__)
raise Exception('Could not import the worker module in module %s. ERROR: %s' %(__name__, str(e)))
'''----------------------------------------------------------------------------------------------------------
Global variables
-------------------------------------------------------------------------------------------------------------
'''
global worker
worker = None
'''----------------------------------------------------------------------------------------------------------
work function which the ATS will call once started.
-------------------------------------------------------------------------------------------------------------
'''
def work():
global worker
if not worker:
UTILS.ErrorHandler.processError(None, EXCEPTION(UTILS.Constants.fcExceptionConstants.WORKER_VARIABLE_S_IS_NOT_INSTANTIATED %__name__, traceback, UTILS.Constants.fcGenericConstants.CRITICAL, None), __name__)
else:
worker.work()
'''----------------------------------------------------------------------------------------------------------
start function which the ATS will call when the ATS is starting.
-------------------------------------------------------------------------------------------------------------
'''
def start():
UTILS.Logger.flogger.info(UTILS.Constants.fcFloggerConstants.STARTING_ATS_S_AT_S %(__name__, datetime.now()))
global worker
if not worker:
worker = TCOLL_ATS_WORKER()
worker.start()
'''----------------------------------------------------------------------------------------------------------
stop function which the ATS will call when the ATS is stopping.
-------------------------------------------------------------------------------------------------------------
'''
def stop():
global worker
if not worker:
UTILS.ErrorHandler.processError(None, EXCEPTION(UTILS.Constants.fcExceptionConstants.WORKER_VARIABLE_IN_S_IS_NOT_INSTANTIATED_STOP %__name__, traceback, UTILS.Constants.fcGenericConstants.MEDIUM, None), __name__)
else:
worker.stop()
#start()
#work()
#stop()
|
[
"nencho.georogiev@absa.africa"
] |
nencho.georogiev@absa.africa
|
97f54bfaf9957347fb4254fc70ebbe9c10c2fb2f
|
03e4331a8d5c107f7cc1d814215ed1547ba6a0f0
|
/xTool/models/models.py
|
315e176f675bac1087ca7cd370482d941f7dd775
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
KqSMea8/xTool
|
ba61a4f56e2e5ddda7caaf429cfb452be06a65c6
|
eade656ca77347d2c05e66a3d680e236c8779683
|
refs/heads/master
| 2020-04-11T19:01:29.673695
| 2018-12-16T16:15:50
| 2018-12-16T16:15:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
#coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.standard_library import install_aliases
from builtins import str
from builtins import object, bytes
import dill
from urllib.parse import urlparse, quote, parse_qsl
from sqlalchemy import (
Column, Integer, String, DateTime, Text, Boolean, ForeignKey, PickleType,
Index, Float, LargeBinary)
from sqlalchemy import func, or_, and_, true as sqltrue
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.orm import reconstructor, relationship, synonym
install_aliases()
Base = declarative_base()
# 主键ID的长度
ID_LEN = 250
# 中间表的默认key
XCOM_RETURN_KEY = 'return_value'
|
[
"jinyinqiao@gmail.com"
] |
jinyinqiao@gmail.com
|
8123f8b823863a2cdfac01616013fec780ac3e16
|
ef4a1748a5bfb5d02f29390d6a66f4a01643401c
|
/algorithm/2_algo/strting_search.py
|
cd08c5997becfb887981008a564f5f0a36907fff
|
[] |
no_license
|
websvey1/TIL
|
aa86c1b31d3efc177df45503d705b3e58b800f8e
|
189e797ba44e2fd22a033d1024633f9e0128d5cf
|
refs/heads/master
| 2023-01-12T10:23:45.677578
| 2019-12-09T07:26:59
| 2019-12-09T07:26:59
| 162,102,142
| 0
| 1
| null | 2022-12-11T16:31:08
| 2018-12-17T08:57:58
|
Python
|
UTF-8
|
Python
| false
| false
| 667
|
py
|
import sys, time
start = time.time()
sys.stdin = open("./tc/strting_search.txt","r")
T = int(input())
for tc in range(1,T+1):
data = input()
all = input()
############# 쉬운버전 ############3 0.0초
result = 5
if data in all:
result = 1
else:
result = 0
###############3 어렵 ####################3 0.001초
# result = 5
#
# for i in range(len(all)-len(data)+1):
# if all[i:i+len(data)] == data:
# result = 1
# break
# else:
# result = 0
# print(data)
# print(all[i:i+len(data)])
print("#%d %d" %(tc,result), time.time()- start)
|
[
"websvey1@gmail.com"
] |
websvey1@gmail.com
|
bd090aca89d155016d194168fac8a7c7b8509f17
|
ea393959886a5cd13da4539d634f2ca0bbcd06a2
|
/82.py
|
7ccff9c2594f1e475a361dff197c8395f4f63aba
|
[] |
no_license
|
zhangchizju2012/LeetCode
|
f605f35b82f16282559af71e4e61ec2629a90ebc
|
0c4c38849309124121b03cc0b4bf39071b5d1c8c
|
refs/heads/master
| 2020-04-05T12:12:14.810639
| 2018-08-09T10:24:52
| 2018-08-09T10:24:52
| 81,021,830
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 15 01:07:11 2017
@author: zhangchi
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return None
result = ListNode(0)
temp = result
data = head.val
label = True
while head.next is not None:
head = head.next
if head.val != data and label == True:
temp.next = ListNode(data)
temp = temp.next
data = head.val
elif head.val != data and label != True:
label = True
data = head.val
else:
label = False
if label == True:
temp.next = ListNode(head.val)
return result.next
|
[
"zhangchizju2012@zju.edu.cn"
] |
zhangchizju2012@zju.edu.cn
|
67545b2050a0a9a4e4595f07aeedbc7bf6d89031
|
5945903ff7b3c0be799d8b228aa96309e8d6b68a
|
/PTA_AL_1011.py
|
ccbd0a00df6ca36344b78bfa9460a3742a7ea3c2
|
[] |
no_license
|
freesan44/LeetCode
|
44fd01fa37e2d7e729ae947da2350b1649c163ae
|
2ed9f1955c527d43fe1a02e5bebf5a6f981ef388
|
refs/heads/master
| 2021-12-07T20:07:02.308097
| 2021-11-01T23:58:11
| 2021-11-01T23:58:11
| 245,178,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
inputList = []
for _ in range(3):
inputData = list(map(float,input().split()))
# inputData = list(map(float, "1.1 2.5 1.7".split()))
inputList.append(inputData)
retList = []
res = 1
for i in inputList:
maxVal = max(i)
indexVal = i.index(maxVal)
if indexVal == 0:
retList.append("W")
elif indexVal == 1:
retList.append("T")
else:
retList.append("L")
res *= maxVal
res = 2 * (res*0.65 - 1)
## 难点是格式化方式
res = "%.2f" % res
retList.append(res)
print(" ".join(retList))
|
[
"freesan44@163.com"
] |
freesan44@163.com
|
35e7366e76f6e50c77b6fa3fcf1065b6905128ef
|
05780fe9a74b116832611a35fce38fa24b4d4ffc
|
/madgraph/madgraph_binaries/models/taudecay_UFO/__init__.py
|
3c9f65445319ce87b7af17ac5a5968bbe0ceae11
|
[] |
no_license
|
cesarotti/Dark-Photons
|
d810658190297528470abe757c4a678075ef48f6
|
c6dce1df70c660555bf039a78765e4efbffb4877
|
refs/heads/master
| 2021-01-22T19:26:13.892225
| 2015-01-28T05:43:20
| 2015-01-28T05:49:54
| 20,692,647
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
import particles
import couplings
import lorentz
import parameters
import vertices
import coupling_orders
import write_param_card
import propagators
all_particles = particles.all_particles
all_vertices = vertices.all_vertices
all_couplings = couplings.all_couplings
all_lorentz = lorentz.all_lorentz
all_parameters = parameters.all_parameters
all_orders = coupling_orders.all_orders
all_functions = function_library.all_functions
all_propagators = propagators.all_propagators
try:
import decays
except ImportError:
pass
else:
all_decays = decays.all_decays
try:
import form_factors
except ImportError:
pass
else:
all_form_factors = form_factors.all_form_factors
try:
import CT_vertices
except ImportError:
pass
else:
all_CTvertices = CT_vertices.all_CTvertices
gauge = [0]
__author__ = "K. Mawatari, J. Nakamura"
__date__ = "2014.05.08"
__version__= "2.0"
|
[
"eyvind.niklasson@gmail.com"
] |
eyvind.niklasson@gmail.com
|
071cf340c23a15c39e29549b47c35d45036859f0
|
551ef0567aca428a535775d3949f5d9670c0d29c
|
/abc/173/d/main.py
|
17b7c7ad3e7efd9f8db31585e1d42505e573aa4d
|
[] |
no_license
|
komo-fr/AtCoder
|
7451a9402466ce8d487d0c521128732061c647df
|
c916889294cb12f21e74254de43b3e17e1b354bc
|
refs/heads/master
| 2023-07-22T07:05:52.955188
| 2023-03-01T14:22:16
| 2023-03-01T14:22:16
| 213,109,943
| 0
| 0
| null | 2023-07-06T22:01:28
| 2019-10-06T04:44:49
|
Python
|
UTF-8
|
Python
| false
| false
| 370
|
py
|
#!/usr/bin/env python3
from collections import deque
N = int(input().split()[0])
a_list = list(map(int, input().split()))
a_list = sorted(a_list, reverse=True)
c_list = a_list[: (N + 1) // 2]
if len(c_list) == 1:
ans = c_list[0]
elif N % 2 == 0:
ans = c_list[0] + sum(c_list[1:]) * 2
else:
ans = c_list[0] + sum(c_list[1:-1]) * 2 + c_list[-1]
print(ans)
|
[
"komo.mdrms@gmail.com"
] |
komo.mdrms@gmail.com
|
fe9f5ac55217dfc033c9cc3c4fd89943726640c8
|
614e01d08c8bb5adbe4d263d9dba04688502a12f
|
/toggl_driver/commands/start_timer.py
|
fe1b1ed883f441770071e74e3ae2ab9cf118f09e
|
[
"MIT"
] |
permissive
|
cloew/TogglDriver
|
28b2b2ebd396d08000fc92e0013f15722975ae06
|
7b0528710e7686690a88a22cf5cca1f3ac55ebbf
|
refs/heads/master
| 2021-01-10T13:05:58.759515
| 2015-10-01T03:43:52
| 2015-10-01T03:43:52
| 43,025,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
from ..args import OptionalProjectArg
from ..config import GlobalConfig
from kao_command.args import Arg, BareWords
class StartTimer:
""" Represents a command to start the Toggl Timer """
description = "Start the Toggl Timer"
args = [Arg('description', nargs='+', provider=BareWords),
OptionalProjectArg(help="start the timer within")]
def run(self, *, description, project=None, workspace=None):
""" Start the timer """
entry = None
if project:
entry = GlobalConfig.connection.TimeEntry(description=description, pid=project.id)
else:
entry = GlobalConfig.connection.TimeEntry(description=description, wid=workspace.id)
entry.start()
|
[
"cloew123@gmail.com"
] |
cloew123@gmail.com
|
9a77046a8b02899002884bdbcb8f4b15478e20c2
|
eff7effdc4ada534be1c76ca83ac026ace4f4c05
|
/leetcode/242.py
|
715f3b2f70d33cf3920ab152d51059243bef0a29
|
[] |
no_license
|
ceuity/algorithm
|
470951d9fe77de3b0b28ae06f8224cf8a619d5b5
|
dd28a842709ae00c3793741e411f2cb8e5086fda
|
refs/heads/master
| 2023-06-20T11:32:56.994859
| 2021-07-19T20:31:07
| 2021-07-19T20:31:07
| 279,136,037
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
from collections import Counter
# 48ms
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if sorted(s) == sorted(t):
return True
else:
return False
# 32ms
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if Counter(s) == Counter(t):
return True
else:
return False
"""
처음엔 간단하게 sorted 함수를 이용하여 풀었으나, Counter 함수를 이용했을 때 더 빨랐다.
"""
|
[
"everland7942@gmail.com"
] |
everland7942@gmail.com
|
2d740379b638a22df79119c84d3b7dddf824aa09
|
4ef80242cf22a1ccd0d7a2042476b5b6ac1eb03e
|
/build/lib/scadparser/ScadModel.py
|
c00db947830a69e33733ad984fc06ea2a68a7bc0
|
[] |
no_license
|
rblack42/ScadParser
|
71081adb99ec03e78bc78b4101562b7fa1bab134
|
a9cc10b23c6515a53065dfb58b23881d0145f88d
|
refs/heads/master
| 2023-07-11T03:51:53.434534
| 2021-08-27T02:03:37
| 2021-08-27T02:03:37
| 397,718,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,172
|
py
|
from __future__ import annotations
import os
import tatsu
from tatsu.ast import AST
symbol_table = {}
def store(id, val):
symbol_table[id] = val
def lookup(id):
if not id in symbol_table:
return id
else:
return symbol_table[id]
class ScadSemantics(object):
def include(self, ast):
print("CWD" + str(os.getcwd()))
grammar = open('grammar/scad.tatsu').read()
incfile = os.path.join('scad', ast.file)
prog = open(incfile).read()
parser = tatsu.compile(grammar)
ast = parser.parse(prog,
trace=False, colorize=True, semantics=ScadSemantics())
return ast
def int(self, ast):
return int(ast)
def fract(self, ast):
return float(ast)
def ident(self, ast):
return lookup(ast)
def declaration(self, ast):
store(ast.id, ast.value)
return ast
def addition(self, ast):
return ast.left + ast.right
def subtraction(self, ast):
return ast.left - ast.right
def multiplication(self, ast):
return ast.left * ast.right
def division(self, ast):
return ast.left / ast.right
|
[
"roie.black@gmail.com"
] |
roie.black@gmail.com
|
bbc92ccd5d682422acb5a8364021fb0f1838bea1
|
3ec32f5aba8624125918adad5cfbc174d698526d
|
/test/functional/zmq_test.py
|
3a7b77bc01b67f696e4585ac2662d14f1115b421
|
[
"MIT"
] |
permissive
|
aixinwang/atc
|
b51b85bd91956657d70b72ca128d30132754269e
|
9f0b53af19735ce0d6a5a6feed6733a51f109019
|
refs/heads/master
| 2021-04-03T06:48:24.497048
| 2018-03-14T04:53:58
| 2018-03-14T04:59:04
| 125,152,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,297
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Ai the coins developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ API."""
import configparser
import os
import struct
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import (assert_equal,
bytes_to_hex_str,
)
class ZMQTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
def setup_nodes(self):
# Try to import python3-zmq. Skip this test if the import fails.
try:
import zmq
except ImportError:
raise SkipTest("python3-zmq module not available.")
# Check that bitcoin has been built with ZMQ enabled
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.dirname(__file__) + "/../config.ini"
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_ZMQ"):
raise SkipTest("bitcoind has not been built with zmq enabled.")
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.set(zmq.RCVTIMEO, 60000)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
ip_address = "tcp://127.0.0.1:28332"
self.zmqSubSocket.connect(ip_address)
extra_args = [['-zmqpubhashtx=%s' % ip_address, '-zmqpubhashblock=%s' % ip_address], []]
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
def run_test(self):
try:
self._zmq_test()
finally:
# Destroy the zmq context
self.log.debug("Destroying zmq context")
self.zmqContext.destroy(linger=None)
def _zmq_test(self):
genhashes = self.nodes[0].generate(1)
self.sync_all()
self.log.info("Wait for tx")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) # must be sequence 0 on hashtx
self.log.info("Wait for block")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) # must be sequence 0 on hashblock
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) # blockhash from generate must be equal to the hash received over zmq
self.log.info("Generate 10 blocks (and 10 coinbase txes)")
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
blockcount = 0
for x in range(n * 2):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount + 1)
blockcount += 1
for x in range(n):
assert_equal(genhashes[x], zmqHashes[x]) # blockhash from generate must be equal to the hash received over zmq
self.log.info("Wait for tx from second node")
# test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
assert_equal(topic, b"hashtx")
hashZMQ = bytes_to_hex_str(body)
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount + 1)
assert_equal(hashRPC, hashZMQ) # txid from sendtoaddress must be equal to the hash received over zmq
if __name__ == '__main__':
ZMQTest().main()
|
[
"your_email@youremail.com"
] |
your_email@youremail.com
|
6e07ce4368cf52e75c822a991a574494f9378a4d
|
f2575444e57696b83ce6dcec40ad515b56a1b3a9
|
/Algorithms/Implementation/JumpingOnTheCloudsRevisited.py
|
6ac69a0db82d7aeb16eaa9fcb0a6ad2d256bdec5
|
[] |
no_license
|
abhi10010/Hackerrank-Solutions
|
046487d79fc5bf84b4df5ef2117578d29cb19243
|
da2a57b8ebfcc330d94d104c1755b8c62a9e3e65
|
refs/heads/master
| 2021-07-24T09:41:49.995295
| 2020-07-12T09:31:58
| 2020-07-12T09:31:58
| 195,647,097
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
import math
import os
import random
import re
import sys
.
def jumpingOnClouds(c, k):
e=100
for i in range(0,len(c),k):
if c[i]==0:
e-=1
else:
e-=3
if i>=len(c) and c[0]==0:
e-=1
break
if i>len(c) and c[0]==1:
e-=3
break
return e
|
[
"noreply@github.com"
] |
abhi10010.noreply@github.com
|
164e025c757cbef908707f5219e2c665aaa5261b
|
be84495751737bbf0a8b7d8db2fb737cbd9c297c
|
/sdl/tests/sdl/rnd_test.py
|
bd5ccfdfe9baac2abbb86c6b90739265c39087ae
|
[] |
no_license
|
mario007/renmas
|
5e38ff66cffb27b3edc59e95b7cf88906ccc03c9
|
bfb4e1defc88eb514e58bdff7082d722fc885e64
|
refs/heads/master
| 2021-01-10T21:29:35.019792
| 2014-08-17T19:11:51
| 2014-08-17T19:11:51
| 1,688,798
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
import unittest
from tdasm import Runtime
from sdl.vector import Vector2, Vector3, Vector4
from sdl.shader import Shader
from sdl.args import IntArg, FloatArg, Vec2Arg, Vec3Arg, Vec4Arg
code = """
p1 = rand_int()
p2 = random()
p3 = random2()
p4 = random3()
p5 = random4()
"""
p1 = IntArg('p1', 333)
p2 = FloatArg('p2', 333.0)
p3 = Vec2Arg('p3', Vector2(0.0, 0.0))
p4 = Vec3Arg('p4', Vector3(0.0, 0.0, 0.0))
p5 = Vec4Arg('p5', Vector4(0.0, 0.0, 0.0, 0.0))
shader = Shader(code=code, args=[p1, p2, p3, p4, p5])
shader.compile()
shader.prepare([Runtime()])
shader.execute()
print(shader.get_value('p1'))
print(shader.get_value('p2'))
print(shader.get_value('p3'))
print(shader.get_value('p4'))
print(shader.get_value('p5'))
|
[
"mvidov@yahoo.com"
] |
mvidov@yahoo.com
|
c6872feee88fe1b197782ffe58764561cf3b2807
|
9f78c2bfadd1e87d779a786e7cd0952b6fbc96f1
|
/common/models/log/AppErrorLog.py
|
918ff63b12f1ffc3cbcf7a180a16e09a55e0cc6a
|
[] |
no_license
|
Erick-LONG/order
|
08393ed9b315cf2c6af5e2b9bfd6917605fe8d94
|
4b853403c9c949b3ecbe2766ec77750557cf11fc
|
refs/heads/master
| 2022-11-11T09:32:53.570524
| 2020-06-30T09:20:18
| 2020-06-30T09:20:18
| 262,786,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
# coding: utf-8
from application import db
class AppErrorLog(db.Model):
__tablename__ = 'app_error_log'
id = db.Column(db.Integer, primary_key=True)
referer_url = db.Column(db.String(255), nullable=False, server_default=db.FetchedValue())
target_url = db.Column(db.String(255), nullable=False, server_default=db.FetchedValue())
query_params = db.Column(db.Text, nullable=False)
content = db.Column(db.String, nullable=False)
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
|
[
"834424581@qq.com"
] |
834424581@qq.com
|
75a863a592f82faf0099f420daadea5edbe253db
|
074655fbb70dc7dad1807597efa267abb0fb3500
|
/desafios/desafio-106.py
|
a3efcfa39014af63e7cdc60e9f066c83e8da1fb4
|
[] |
no_license
|
rodrigojgrande/python-mundo
|
bfa57ff12c537084aeeb5469451e13e74c6fb9f1
|
d482c84d5c6ae8cfec79317b85390e17ede17f58
|
refs/heads/master
| 2023-04-23T08:22:45.251817
| 2021-05-19T13:08:21
| 2021-05-19T13:08:21
| 351,783,397
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
#Exercício Python 106: Faça um mini-sistema que utilize o Interactive Help do Python. O usuário vai digitar o comando e o manual vai aparecer. Quando o usuário digitar a palavra ‘FIM’, o programa se encerrará. Importante: use cores.
from time import sleep
cor = ('\033[m', # 0 - sem cores
'\033[0;30;41m', # 1 - vermelho
'\033[0;30;42m', # 2 - verde
'\033[0;30;43m', # 3 - amarelo
'\033[0;30;44m', # 4 - azul
'\033[0;30;45m', # 5 - roxo
'\033[7;30m' # 6 - branco
);
def ajuda(comando):
titulo(f'Acessando o manual do comando \'{comando}\'', 4)
print(cor[6], end='')
help(comando)
print(cor[0], end='')
sleep(2)
def titulo(msg, c=0):
tamanho = len(msg) + 4
print(cor[c], end='')
print('~' * tamanho)
print(f' {msg}')
print('~' * tamanho)
print(cor[0], end='')
sleep(1)
#Programa Principal
comando = ''
while True:
titulo('SISTEMA DE AJUDA PyHELP', 2)
comando = str(input("Função ou Biblioteca > "))
if comando.upper() == 'FIM':
break
else:
ajuda(comando)
titulo('ATÉ LOGO', 1)
|
[
"rodrigojgrande@gmail.com"
] |
rodrigojgrande@gmail.com
|
efc52d8b10f9081ff6a555ce6d84839a77e88f05
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02973/s843936905.py
|
1546a742a043329f3c3d9e840dbeb8dda98ec3c7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
from bisect import bisect_right
def main():
inf=float("inf")
n=int(input())
alst=[int(input()) for _ in range(n)]
work=[inf]*n
for i in range(n-1,-1,-1):
j=bisect_right(work,alst[i])
work[j]=alst[i]
print(n-work.count(inf))
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
332db90717d18029d34aa1bbca1ce2d43fdd2a1d
|
e495c3f9227d790f3c08a56c357f2a95c167ec9c
|
/zerojudge.tw/a780.py
|
5d40bbaa97c9b43d36b1a9634123d570ef876590
|
[] |
no_license
|
InfiniteWing/Solves
|
84d894d66588693c73ec1dcaebe3b8b148e1d224
|
a0f8f09fac5e462d7d12a23ccd8414bd5ff8ffad
|
refs/heads/master
| 2021-10-11T00:17:37.367776
| 2019-01-19T15:07:54
| 2019-01-19T15:07:54
| 103,742,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
def main():
while True:
try:
s = input()
except EOFError:
break
o, e, a = float(s.split()[0]),float(s.split()[1]),float(s.split()[2])
if(o == 0 and e == 0 and a == 0):
break
m = o / e
f = a / m
print('{0:.2f}'.format(round(m,2)),'{0:.2f}'.format(round(f,2)))
main()
|
[
"sars6608@gmail.com"
] |
sars6608@gmail.com
|
d8f453cbc8d6faf8544ab9e6c7c8f3f69eca3db6
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part007491.py
|
e36cb723cb2eaeafc9ed54508427de7131e5b47c
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher25426(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.1.1.2.2.2.1.0', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.0', 1, 1, None), Mul)
]),
1: (1, Multiset({}), [
(VariableWithCount('i2.1.1.2.2.2.1.0', 1, 1, S(1)), Mul),
(VariableWithCount('i2.2.1.1', 1, 1, None), Mul)
]),
2: (2, Multiset({}), [
(VariableWithCount('i2.1.1.2.2.2.1.0', 1, 1, S(1)), Mul),
(VariableWithCount('i2.3.1.0', 1, 1, None), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher25426._instance is None:
CommutativeMatcher25426._instance = CommutativeMatcher25426()
return CommutativeMatcher25426._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 25425
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
e646e840415066474e090129909b4fa89a674716
|
334d0a4652c44d0c313e11b6dcf8fb89829c6dbe
|
/checkov/terraform/checks/provider/bridgecrew/credentials.py
|
fac41da6ae3817889168443670a4b19c7c89d1ea
|
[
"Apache-2.0"
] |
permissive
|
schosterbarak/checkov
|
4131e03b88ae91d82b2fa211f17e370a6f881157
|
ea6d697de4de2083c8f6a7aa9ceceffd6b621b58
|
refs/heads/master
| 2022-05-22T18:12:40.994315
| 2022-04-28T07:44:05
| 2022-04-28T07:59:17
| 233,451,426
| 0
| 0
|
Apache-2.0
| 2020-03-23T12:12:23
| 2020-01-12T20:07:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
import re
from typing import Dict, List, Any
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.provider.base_check import BaseProviderCheck
from checkov.common.models.consts import bridgecrew_token_pattern
class BridgecrewCredentials(BaseProviderCheck):
def __init__(self) -> None:
name = "Ensure no hard coded API token exist in the provider"
id = "CKV_BCW_1"
supported_provider = ["bridgecrew"]
categories = [CheckCategories.SECRETS]
super().__init__(name=name, id=id, categories=categories, supported_provider=supported_provider)
def scan_provider_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
if self.secret_found(conf, "token", bridgecrew_token_pattern):
return CheckResult.FAILED
return CheckResult.PASSED
@staticmethod
def secret_found(conf: Dict[str, List[Any]], field: str, pattern: str) -> bool:
if field in conf.keys():
value = conf[field][0]
if re.match(pattern, value) is not None:
return True
return False
check = BridgecrewCredentials()
|
[
"noreply@github.com"
] |
schosterbarak.noreply@github.com
|
71ad3f0dc161322df687c69ddedcd303e2fee89f
|
3cd75f3d62911ba3d2114f95203e81d91be32877
|
/1day/Str09.py
|
432786c3f756f162e0a411ef5774f40ee0cbf828
|
[] |
no_license
|
kukukuni/Python_ex
|
3667e2fe1db3a161d9e6acd5d8145a3e692f8e89
|
e56d10bbcf3dc33b4422462a5b3c2dedb082b8c3
|
refs/heads/master
| 2022-11-05T13:58:55.770984
| 2019-04-14T00:57:18
| 2019-04-14T00:57:18
| 181,244,073
| 0
| 1
| null | 2022-10-23T06:38:06
| 2019-04-14T00:50:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 359
|
py
|
# Str09.py
'''
print('숫자 1번 입력'); n1 = int(input())
print('숫자 2번 입력'); n2 = int(input())
print(n1+n2)
print('숫자 2개 입력')
n1, n2 = input().split(',')
print(int(n1)+int(n2))
print('숫자 2개 입력')
n1, n2 = map(int,input().split(','))
print(n1+n2)
'''
n1, n2 = map(int,input('숫자 2개 입력\n').split(','))
print(n1+n2)
|
[
"mxneyelo@gmail.com"
] |
mxneyelo@gmail.com
|
d6db57ca78ffbfbe55bff62613e68d9b7b0a32b5
|
d78e59d285cdd1e16f1d7d836d39715e8a581c8b
|
/machine_learning/ml_basic/lab10_NN_ReLu_Xavier_Dropout_and_Adam/06_dropout_for_mnist.py
|
7ddee527ad9b4d1754910dc4f1f7f773f5825539
|
[] |
no_license
|
egaoneko/study
|
79c11e0df9d713b05babde3461f2e74f3906ad80
|
b965654c7cc8e8361f5ec0596af57c55d35137cc
|
refs/heads/master
| 2020-04-12T09:04:55.131290
| 2017-09-03T10:13:04
| 2017-09-03T10:13:04
| 54,833,896
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,649
|
py
|
'''
A logistic regression learning algorithm example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
# Import MINST data
from random import randint
import math
from matplotlib import pyplot as plt
# from lab07_App_and_Tip import input_data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
import tensorflow as tf
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# tf Graph Input
X = tf.placeholder("float", [None, 784]) # mnist data image of shape 28*28=784
Y = tf.placeholder("float", [None, 10]) # 0-9 digits recognition => 10 classes
def xavier_init(n_inputs, n_outputs, uniform=True):
"""Set the parameter initialization using the method described.
This method is designed to keep the scale of the gradients roughly the same
in all layers.
Xavier Glorot and Yoshua Bengio (2010):
Understanding the difficulty of training deep feedforward neural
networks. International conference on artificial intelligence and
statistics.
Args:
n_inputs: The number of input nodes into each output.
n_outputs: The number of output nodes for each input.
uniform: If true use a uniform distribution, otherwise use a normal.
Returns:
An initializer.
"""
if uniform:
# 6 was used in the paper.
init_range = math.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
# 3 gives us approximately the same limits as above since this repicks
# values greater than 2 standard deviations from the mean.
stddev = math.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
# Set model weights
W1 = tf.get_variable("W1", shape=[784, 256], initializer=xavier_init(784, 256))
W2 = tf.get_variable("W2", shape=[256, 256], initializer=xavier_init(256, 256))
W3 = tf.get_variable("W3", shape=[256, 256], initializer=xavier_init(256, 256))
W4 = tf.get_variable("W4", shape=[256, 256], initializer=xavier_init(256, 256))
W5 = tf.get_variable("W5", shape=[256, 10], initializer=xavier_init(256, 10))
B1 = tf.Variable(tf.random_normal([256]))
B2 = tf.Variable(tf.random_normal([256]))
B3 = tf.Variable(tf.random_normal([256]))
B4 = tf.Variable(tf.random_normal([256]))
B5 = tf.Variable(tf.random_normal([10]))
# Construct model
dropout_rate = tf.placeholder("float")
_L1 = tf.nn.relu(tf.add(tf.matmul(X, W1), B1)) # Hidden layer with RELU activation
L1 = tf.nn.dropout(_L1, dropout_rate)
_L2 = tf.nn.relu(tf.add(tf.matmul(L1, W2), B2)) # Hidden layer with RELU activation
L2 = tf.nn.dropout(_L2, dropout_rate)
_L3 = tf.nn.relu(tf.add(tf.matmul(L2, W3), B3)) # Hidden layer with RELU activation
L3 = tf.nn.dropout(_L3, dropout_rate)
_L4 = tf.nn.relu(tf.add(tf.matmul(L3, W4), B4)) # Hidden layer with RELU activation
L4 = tf.nn.dropout(_L4, dropout_rate)
hypothesis = tf.add(tf.matmul(L2, W5), B5) # No need to use softmax here
# Minimize error using cross entropy
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(hypothesis, Y)) # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Gradient Descent
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys, dropout_rate: 0.7})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys, dropout_rate: 0.7}) / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({X: mnist.test.images, Y: mnist.test.labels, dropout_rate: 1}))
|
[
"egaoneko@naver.com"
] |
egaoneko@naver.com
|
442f038c532429a81876bc8698542bb72ca76432
|
f8376e83352d2dfab28c41f24345071a77a45fd9
|
/Regular Expression/re/phn.py
|
80c12d5a86a9d9db9511ad8c56d94500a7275064
|
[] |
no_license
|
harihavwas/pythonProgram
|
2111ee98eccda68165159db0305c413ee53ee38a
|
126df8b3a418dbaf618575b450fd4cfde44c80a7
|
refs/heads/master
| 2023-07-27T23:39:10.867329
| 2021-09-16T15:35:00
| 2021-09-16T15:35:00
| 402,320,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
import re
f=open('phn','r')
x='[+][9][1]\d{10}$'
for i in f:
s=i.rstrip('\n')
m=re.fullmatch(x,s)
if m!=None:
print(s)
|
[
"hkk4harikrishnan@gmail.com"
] |
hkk4harikrishnan@gmail.com
|
5210afc84531e89637ca9db677df678b13d46e8d
|
f89cd872172489785df20354c7a78bc332c4d894
|
/equationTemplate.py
|
e46c6c29a3f9ac3c018bd91bbd236fa72b2eb375
|
[] |
no_license
|
amararora07/CodeFights
|
d565ed21b1f5c2fbe4d902159db61bee8244e1c8
|
51e3cb75eb32d22dac60f380b1f5b87822678c20
|
refs/heads/master
| 2021-09-06T15:45:08.716269
| 2018-02-08T06:06:52
| 2018-02-08T06:06:52
| 109,230,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
from itertools import permutations as p
def equationTemplate(v):
for i in p(v,4):
if i[0]*i[1]*i[2]==x[3]:
return True
elif i[0]*i[1]==i[2]*i[3]:
return True
return False
|
[
"noreply@github.com"
] |
amararora07.noreply@github.com
|
380cb5a1e2cf85fb3c7431a312ad036cab0a410f
|
000e9c92b8b86402ab506a191cc60302f2c269a3
|
/orders/migrations/0004_translations.py
|
c2c7f568841f74a299851a573b8e9ccd93dfb5b5
|
[
"MIT"
] |
permissive
|
FrankCasanova/onlineshop
|
71c29fe3cc6a1dbb715474ffb09bde98443591af
|
1a9011ce3d49976e2584cdadc33893d04947a73b
|
refs/heads/master
| 2023-08-25T20:24:15.754513
| 2021-10-22T16:59:34
| 2021-10-22T16:59:34
| 406,788,119
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
# Generated by Django 3.2.8 on 2021-10-17 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0003_auto_20211011_1122'),
]
operations = [
migrations.AlterField(
model_name='order',
name='address',
field=models.CharField(max_length=250, verbose_name='address'),
),
migrations.AlterField(
model_name='order',
name='city',
field=models.CharField(max_length=100, verbose_name='city'),
),
migrations.AlterField(
model_name='order',
name='email',
field=models.EmailField(max_length=254, verbose_name='e-mail'),
),
migrations.AlterField(
model_name='order',
name='first_name',
field=models.CharField(max_length=50, verbose_name='first_name'),
),
migrations.AlterField(
model_name='order',
name='last_name',
field=models.CharField(max_length=50, verbose_name='last_name'),
),
migrations.AlterField(
model_name='order',
name='postal_code',
field=models.CharField(max_length=20, verbose_name='postal code'),
),
]
|
[
"frankcasanova.info@gmail.com"
] |
frankcasanova.info@gmail.com
|
8a06a2e7dfcfe5bf589af9767e48dd05d03919eb
|
cc6e7f63eaf4b3570771c46fb8b24b88e6e1f59e
|
/typical/TDPC/A.py
|
0980d25005faca221dd08fe47b5fde2dab33484c
|
[] |
no_license
|
kamojiro/atcoderall
|
82a39e9be083a01c14445417597bf357e6c854a8
|
973af643c06125f52d302a5bc1d65f07a9414419
|
refs/heads/master
| 2022-07-12T00:14:38.803239
| 2022-06-23T10:24:54
| 2022-06-23T10:24:54
| 161,755,381
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
from copy import deepcopy
N = int( input())
P = list( map(int, input().split()))
Q = set([0])
for i in range(N):
R = deepcopy(Q)
p = P[i]
for r in R:
Q.add(r+p)
print(len(Q))
|
[
"tamagoma002@yahoo.co.jp"
] |
tamagoma002@yahoo.co.jp
|
2bedb12cb2b704a1990287c3c9623e526b68825e
|
833a83e8fd32041b20c0a13f6bf0759c4067bee6
|
/homeworkpal_project/maximo/migrations/0003_auto_20151104_2238.py
|
acc9ef30e6f720f7fc7d129fd7a46d5c7829d0d7
|
[
"MIT"
] |
permissive
|
luiscberrocal/homeworkpal
|
ac92de1dcbd43d9290fde8174f4d4544ed2cad23
|
342acf876264fade818b107f4af13cac067f1ace
|
refs/heads/master
| 2020-12-12T13:13:47.022473
| 2015-12-29T19:38:43
| 2015-12-29T19:38:43
| 44,059,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maximo', '0002_datadocument'),
]
operations = [
migrations.AlterField(
model_name='datadocument',
name='processed',
field=models.DateTimeField(null=True, blank=True),
),
]
|
[
"luis.berrocal.1942@gmail.com"
] |
luis.berrocal.1942@gmail.com
|
21fbee6df470e1100c9990e738892141bd260770
|
a364f53dda3a96c59b2b54799907f7d5cde57214
|
/easy/278-First Bad Version.py
|
b8730330fdf693d30feb90c5163921ac734e1e16
|
[
"Apache-2.0"
] |
permissive
|
Davidxswang/leetcode
|
641cc5c10d2a97d5eb0396be0cfc818f371aff52
|
d554b7f5228f14c646f726ddb91014a612673e06
|
refs/heads/master
| 2022-12-24T11:31:48.930229
| 2020-10-08T06:02:57
| 2020-10-08T06:02:57
| 260,053,912
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,668
|
py
|
"""
You are a product manager and currently leading a team to develop a new product. Unfortunately, the latest version of your product fails the quality check. Since each version is developed based on the previous version, all the versions after a bad version are also bad.
Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which causes all the following ones to be bad.
You are given an API bool isBadVersion(version) which will return whether version is bad. Implement a function to find the first bad version. You should minimize the number of calls to the API.
Example:
Given n = 5, and version = 4 is the first bad version.
call isBadVersion(3) -> false
call isBadVersion(5) -> true
call isBadVersion(4) -> true
Then 4 is the first bad version.
"""
# The tricky part here is the case start == end.
# time complexity: O(logn), space complexity: O(logn) due to the function call stack
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return an integer
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
return self.check(1, n)
def check(self, start: int, end: int) -> int:
if start == end:
return start
middle = start + (end - start) // 2
middleResult = isBadVersion(middle)
if middleResult:
return self.check(start, middle)
else:
middle1Result = isBadVersion(middle + 1)
if middle1Result:
return middle + 1
else:
return self.check(middle + 1, end)
|
[
"wxs199327@hotmail.com"
] |
wxs199327@hotmail.com
|
5b74f7b4264a2bdcf246eb141174ffb4f69616fe
|
9fbe90eab4cb25022e7c93776da3a5733656a09a
|
/examples/boardgame/player.py
|
039c9731b60bbac9246f112b24ac637561d86a8e
|
[
"MIT"
] |
permissive
|
Nathanator/networkzero
|
453e218d6e0b8080158cb968f4acc5e0cb0fb65c
|
e6bf437f424660c32cf1ef81f83d9eee925f44e7
|
refs/heads/master
| 2021-01-15T13:14:53.101742
| 2016-04-07T20:32:28
| 2016-04-07T20:32:28
| 55,724,894
| 0
| 0
| null | 2016-04-07T20:12:18
| 2016-04-07T20:12:17
| null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
import networkzero as nw0
address = nw0.discover("board")
player = input("Which player? ")
while True:
move = input("Move: ")
nw0.send_command(address, "MOVE '%s' '%s'" % (player, move))
|
[
"mail@timgolden.me.uk"
] |
mail@timgolden.me.uk
|
13fa8feb12381497d43c29fb6b3033f1e053a654
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/090d103842942eab3616de2464e26c4db3b402611e2f44f446e8b4086b8df170/xml/parsers/expat/model.py
|
a7f24af4b3f303679b553ba09459d025a5309dbf
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
# encoding: utf-8
# module xml.parsers.expat.model calls itself pyexpat.model
# from C:\Users\Doly\Anaconda3\lib\site-packages\numba\npyufunc\workqueue.cp37-win_amd64.pyd
# by generator 1.147
""" Constants used to interpret content model information. """
# no imports
# Variables with simple values
XML_CQUANT_NONE = 0
XML_CQUANT_OPT = 1
XML_CQUANT_PLUS = 3
XML_CQUANT_REP = 2
XML_CTYPE_ANY = 2
XML_CTYPE_CHOICE = 5
XML_CTYPE_EMPTY = 1
XML_CTYPE_MIXED = 3
XML_CTYPE_NAME = 4
XML_CTYPE_SEQ = 6
__loader__ = None
__spec__ = None
# no functions
# no classes
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
aaef2d15129a5165f1996b41811e74b2bb8706b9
|
34599596e145555fde0d4264a1d222f951f49051
|
/pcat2py/class/20f21bf6-5cc5-11e4-af55-00155d01fe08.py
|
291d4124651b9efe02966dcbd2fccda4c97ca607
|
[
"MIT"
] |
permissive
|
phnomcobra/PCAT2PY
|
dc2fcbee142ce442e53da08476bfe4e68619346d
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
refs/heads/master
| 2021-01-11T02:23:30.669168
| 2018-02-13T17:04:03
| 2018-02-13T17:04:03
| 70,970,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
#!/usr/bin/python
################################################################################
# 20f21bf6-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20f21bf6-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
sz = cli.get_reg_sz(r'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon', 'Allocatefloppies')
# Output Lines
self.output = [r'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon', ('Allocatefloppies=' + sz)]
if sz == "0":
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Windows NT'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Windows NT\CurrentVersion'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Microsoft\Windows NT\CurrentVersion\Winlogon' -name 'Allocatefloppies' -value 0")
|
[
"phnomcobra@gmail.com"
] |
phnomcobra@gmail.com
|
d979d6055cd3b9523c5c7306b9146672c4d1ba5a
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/travelport/models/journey.py
|
fbec726990b63d058dbf48495740315328d8d4e3
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
from __future__ import annotations
from dataclasses import dataclass, field
from xsdata.models.datatype import XmlDuration
from travelport.models.air_segment_ref import AirSegmentRef
__NAMESPACE__ = "http://www.travelport.com/schema/air_v52_0"
@dataclass
class Journey:
"""
Information about all connecting segment list and total traveling time.
Parameters
----------
air_segment_ref
travel_time
Total traveling time that is difference between the departure time
of the first segment and the arrival time of the last segments for
that particular entire set of connection.
"""
class Meta:
namespace = "http://www.travelport.com/schema/air_v52_0"
air_segment_ref: list[AirSegmentRef] = field(
default_factory=list,
metadata={
"name": "AirSegmentRef",
"type": "Element",
"max_occurs": 999,
}
)
travel_time: None | XmlDuration = field(
default=None,
metadata={
"name": "TravelTime",
"type": "Attribute",
}
)
|
[
"chris@komposta.net"
] |
chris@komposta.net
|
6f7393b8be9b1f7cdda141ca678315df0f7d0786
|
288a00d2ab34cba6c389b8c2444455aee55a8a95
|
/tests/data23/recipe-435885.py
|
e88c6b46b1f95afffedafd3382b1d82cbf0470bf
|
[
"BSD-2-Clause"
] |
permissive
|
JohannesBuchner/pystrict3
|
ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb
|
18b0dd369082422f9bf0f89c72e7acb53a49849c
|
refs/heads/master
| 2023-08-14T06:37:37.954880
| 2023-07-13T11:16:38
| 2023-07-13T11:16:38
| 268,571,175
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
# dispatch.py
# definitions:
import threading
class Dispatcher(object):
def __init__(self, targets=None, nonBlocking=True):
if not targets or targets is None:
self._targets = []
else:
self._targets = targets
self._nonBlocking = nonBlocking
def __iadd__(self, target):
self._targets.append(target)
return self
def __isub__(self, target):
self._targets.remove(target)
return self
def isNonBlocking(self):
return self._nonBlocking
nonBlocking = property(isNonBlocking)
def __call__(self, *listArgs, **kwArgs):
def invokeTargets():
for target in self._targets:
target(*listArgs, **kwArgs)
if self.nonBlocking:
threading.Timer(0, invokeTargets).start()
else:
invokeTargets()
# demos:
def Test1():
"""
A simple example demonstrating most functionality.
"""
def m1():
print('m1 invoked')
def m2():
print('m2 invoked')
e = Dispatcher()
e += m1
e += m2
e += m2
print('Dispatching:')
e()
e -= m1
print('Dispatching:')
e()
e -= m2
print('Dispatching:')
e()
def Test2():
"""
A more realistic example for the OO programmer.
"""
class Sprite(object):
def __init__(self, location):
self._location = location
locationChanged = Dispatcher()
def getLocation(self):
return self._location
def setLocation(self, newLocation):
oldLocation = self._location
self._location = newLocation
# Dispatch a "property change event"
self.locationChanged(oldLocation, newLocation)
location = property(getLocation, setLocation)
s = Sprite((2,4))
def SpriteLocationChanged(oldLocation, newLocation):
print('oldLocation =', oldLocation)
print('newLocation =', newLocation)
s.locationChanged += SpriteLocationChanged
s.location = (3,4)
s.location = (4,4)
if __name__ == '__main__':
Test1()
Test2()
|
[
"johannes.buchner.acad@gmx.com"
] |
johannes.buchner.acad@gmx.com
|
cb0e7bf0d07ab3a63cbf6e86a3f500d771f3843e
|
aced407b41f6669f69e9eb8bd599260d50c0bd3f
|
/server/libs/top/api/rest/TradeShippingaddressUpdateRequest.py
|
0f5dd43dd498846c1ab1208cb7481da7a49cf645
|
[] |
no_license
|
alswl/music_sofa
|
42f7d15431f11b97bf67b604cfde0a0e9e3860cc
|
c4e5425ef6c80c3e57c91ba568f7cbfe63faa378
|
refs/heads/master
| 2016-09-12T18:37:34.357510
| 2016-05-20T11:49:52
| 2016-05-20T11:49:52
| 58,946,171
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
'''
Created by auto_sdk on 2013-11-07 12:53:22
'''
from top.api.base import RestApi
class TradeShippingaddressUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.receiver_address = None
self.receiver_city = None
self.receiver_district = None
self.receiver_mobile = None
self.receiver_name = None
self.receiver_phone = None
self.receiver_state = None
self.receiver_zip = None
self.tid = None
def getapiname(self):
return 'taobao.trade.shippingaddress.update'
|
[
"alswlx@gmail.com"
] |
alswlx@gmail.com
|
0203ee949e80c337db199c170eae4e2cfd879524
|
0930b6c994225d44818887716ce4e8771af86b81
|
/exercisesDosCapitulos/10-arquivosEExcecoes/10.1-aprendendoPython/aprendendoPython.py
|
6e32d8cb6c3d8180a69b10b03a53d70b8a10c8cd
|
[] |
no_license
|
jonasht/cursoIntesivoDePython
|
44d81b08f1652c4fa7a6d14a0e3f62ee8e06944c
|
fb5f5c9884fb1a6062a7c4e7676e5cc3b13c0ebb
|
refs/heads/master
| 2023-05-23T20:44:34.372825
| 2021-06-19T12:13:46
| 2021-06-19T12:13:46
| 293,325,804
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 728
|
py
|
# para poder achar o arquivo no ubuntu deve-se escrever no terminal
# code "diretorio" do arquivo para poder funcionar
arquivoDeTexto = 't.txt'
def l(): print('=-'*40+'=')
print()
l()
print('mostrando um arquivo todo de uma so vez com arquivo.read()')
with open(arquivoDeTexto) as arq:
palavras = arq.read()
print(palavras)
l()
print('percorrendo o objeto arquivo com um laço "for" ')
with open(arquivoDeTexto) as arquivo:
for frase in arquivo:
print(frase.rstrip())
l()
print('armazendo linhas em uma lista e trabalhando com ela fora do "with" usando um "for"')
with open(arquivoDeTexto) as arquivo:
linhas = arquivo.readlines()
for linha in linhas:
print(linha.rstrip())
l()
|
[
"jhenriquet@outlook.com.br"
] |
jhenriquet@outlook.com.br
|
f9a3905061d392da39f24c565147913325dbd3f4
|
3b625b6a8867c71399b421615f2391269e6dee53
|
/appfordomaintests_de_2065/wsgi.py
|
1c0dab674258642ab854785653e2665b332b5146
|
[] |
no_license
|
crowdbotics-apps/appfordomaintests-de-2065
|
cd691f1b94ed3f792724f7d0316518400c07619c
|
78e2519a37f767953064c31e898d08b7b395b6bb
|
refs/heads/master
| 2022-04-15T19:07:57.805517
| 2020-03-19T16:48:08
| 2020-03-19T16:48:08
| 248,555,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
"""
WSGI config for appfordomaintests_de_2065 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'appfordomaintests_de_2065.settings')
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
0670598f0b9e25686ea6b06c303213ef4d557478
|
5e2dddce9c67d5b54d203776acd38d425dbd3398
|
/spacy/lang/bn/tokenizer_exceptions.py
|
82f3cfaf78640cf4e4be76697544dcba61533595
|
[
"MIT"
] |
permissive
|
yuxuan2015/spacy_zh_model
|
8164a608b825844e9c58d946dcc8698853075e37
|
e89e00497ab3dad0dd034933e25bc2c3f7888737
|
refs/heads/master
| 2020-05-15T11:07:52.906139
| 2019-08-27T08:28:11
| 2019-08-27T08:28:11
| 182,213,671
| 1
| 0
| null | 2019-04-19T06:27:18
| 2019-04-19T06:27:17
| null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
# coding=utf-8
from __future__ import unicode_literals
from ...symbols import ORTH, LEMMA
_exc = {}
for exc_data in [
{ORTH: "ডঃ", LEMMA: "ডক্টর"},
{ORTH: "ডাঃ", LEMMA: "ডাক্তার"},
{ORTH: "ড.", LEMMA: "ডক্টর"},
{ORTH: "ডা.", LEMMA: "ডাক্তার"},
{ORTH: "মোঃ", LEMMA: "মোহাম্মদ"},
{ORTH: "মো.", LEMMA: "মোহাম্মদ"},
{ORTH: "সে.", LEMMA: "সেলসিয়াস"},
{ORTH: "কি.মি.", LEMMA: "কিলোমিটার"},
{ORTH: "কি.মি", LEMMA: "কিলোমিটার"},
{ORTH: "সে.মি.", LEMMA: "সেন্টিমিটার"},
{ORTH: "সে.মি", LEMMA: "সেন্টিমিটার"},
{ORTH: "মি.লি.", LEMMA: "মিলিলিটার"}]:
_exc[exc_data[ORTH]] = [exc_data]
TOKENIZER_EXCEPTIONS = _exc
|
[
"yuxuan2015@example.com"
] |
yuxuan2015@example.com
|
34be8784c8de3e7f0b2d38864291b7b19e58d65a
|
d9a4121ac2872bbe3f76564caebe6818dc5888a7
|
/tests/test_analysis_submission_response.py
|
84d476d5916af4ccb26e3b41aa77c5f6c8d8d179
|
[
"MIT"
] |
permissive
|
s0b0lev/mythx-models
|
ecb07abada43eb9c26929bfd6cd76dca9105207f
|
0fc14fef9e41a68a7d97e0bb170fd0eca5693d9a
|
refs/heads/master
| 2020-08-20T19:22:14.320454
| 2019-10-11T08:32:04
| 2019-10-11T08:32:04
| 216,057,981
| 0
| 0
|
MIT
| 2019-10-18T15:47:10
| 2019-10-18T15:47:09
| null |
UTF-8
|
Python
| false
| false
| 2,212
|
py
|
import json
import pytest
from mythx_models.exceptions import ValidationError
from mythx_models.response import Analysis, AnalysisSubmissionResponse
from mythx_models.util import serialize_api_timestamp
from . import common as testdata
def assert_analysis_data(expected, analysis: Analysis):
assert expected["apiVersion"] == analysis.api_version
assert expected["maruVersion"] == analysis.maru_version
assert expected["mythrilVersion"] == analysis.mythril_version
assert expected["harveyVersion"] == analysis.harvey_version
assert expected["queueTime"] == analysis.queue_time
assert expected["runTime"] == analysis.run_time
assert expected["status"] == analysis.status
assert expected["submittedAt"] == serialize_api_timestamp(analysis.submitted_at)
assert expected["submittedBy"] == analysis.submitted_by
assert expected["uuid"] == analysis.uuid
def test_analysis_submission_from_valid_json():
resp = AnalysisSubmissionResponse.from_json(
json.dumps(testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT)
)
assert_analysis_data(testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT, resp.analysis)
def test_analysis_submission_from_empty_json():
with pytest.raises(ValidationError):
AnalysisSubmissionResponse.from_json("{}")
def test_analysis_submission_from_valid_dict():
resp = AnalysisSubmissionResponse.from_dict(
testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT
)
assert_analysis_data(testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT, resp.analysis)
def test_analysis_submission_from_empty_dict():
with pytest.raises(ValidationError):
AnalysisSubmissionResponse.from_dict({})
def test_analysis_submission_to_dict():
d = testdata.ANALYSIS_SUBMISSION_RESPONSE_OBJECT.to_dict()
assert d == testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT
def test_analysis_submission_to_json():
json_str = testdata.ANALYSIS_SUBMISSION_RESPONSE_OBJECT.to_json()
assert json.loads(json_str) == testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT
def test_analysis_submission_property_delegation():
assert_analysis_data(
testdata.ANALYSIS_SUBMISSION_RESPONSE_DICT,
testdata.ANALYSIS_SUBMISSION_RESPONSE_OBJECT,
)
|
[
"dmuhs@protonmail.ch"
] |
dmuhs@protonmail.ch
|
40e701e304cdc95780f0c60fa96c57f9e665568e
|
ab269258a76b4a7f9af01de0b73144db23d6f005
|
/System Scripting/Problem06/6P/pythonwmi-simplified.py
|
c16e30867db2ed09bc26032ce471117879c17a56
|
[] |
no_license
|
jubic/RP-Misc
|
24715770b457c3f40db145f4f34d0fb775b71653
|
3c8e12646779e060180870475c0ef10773140e0f
|
refs/heads/master
| 2016-09-06T07:00:36.032240
| 2013-03-30T09:10:02
| 2013-03-30T09:10:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
from win32com.client import Dispatch
import os
server = Dispatch("WbemScripting.SWbemLocator")
conn = server.ConnectServer("localhost", "root\\cimv2")
def query(what):
results = conn.ExecQuery("Select * from " + what)
collections = []
for item in results:
data = {}
for each in item.Properties_:
data[each.Name] = each.Value
collections.append(data)
return collections
def write_to_file(fd, results):
for result in results:
for key, value in result.items():
f.write("%40s = %s\n" % (key, value) )
f.write("%50s" % "---------------------")
f.write("\n")
results = query("Win32_OperatingSystem")
filename = results[0]["CSName"]
f = open(filename, "wb")
f.write("%50s" % "====== OperatingSystem ======\n")
write_to_file(f, results)
f.write("%50s" % "====== Win32_Processor ======\n")
results = query("Win32_Processor")
write_to_file(f, results)
f.write("%50s" % "====== Win32_PhysicalMemory ======\n")
results = query("Win32_PhysicalMemory")
write_to_file(f, results)
f.write("%50s" % "====== Win32_LogicalDisk ======\n")
results = query("Win32_LogicalDisk")
write_to_file(f, results)
f.write("%50s" % "====== Win32_NetworkAdapterConfiguration ======\n")
results = query("Win32_NetworkAdapterConfiguration")
write_to_file(f, results)
f.write("%50s" % "====== Win32_Product ======\n")
results = query("Win32_Product")
write_to_file(f, results)
f.close()
|
[
"jubic@live.com.sg"
] |
jubic@live.com.sg
|
d988c1a01af2913efd6faa4787ac8de7865acd11
|
4875d4e4ad63310e44086be4d8e450eba151ecaf
|
/code/pyworkshop/05_Dictionaries/02_Dict.py
|
d414fa02e0804626a53aafdee1dc7412c5c5c1ef
|
[
"MIT"
] |
permissive
|
archeranimesh/pythonFundamentals
|
7a066ee1ee23a5a78623e5ed50da5167e2c59c16
|
35662181d95406505002fe6a39f577822bfd560b
|
refs/heads/master
| 2020-06-01T12:15:51.828280
| 2019-07-13T17:48:21
| 2019-07-13T17:48:21
| 190,776,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
nums = {"one": 1, "two": 2, "three": 3}
# add new key/Value pair.
nums["four"] = 4
print(nums)
# There are no duplicate key in Dictionaries.
# If new value is assigned to same key, it will
# Override the old value.
nums["two"] = "2222"
print(nums) # {'one': 1, 'two': '2222', 'three': 3, 'four': 4}
# Existence of a key in dict.
print("one" in nums)
nums["two"] = 2
print(nums)
# Combine two list.
rainbow = {"Green": "G", "Red": "R", "Blue": "B"}
rainbow.update(nums)
print(rainbow)
# Append value to a list in dict.
color = {"Green": ["Spinich"]}
print(color)
vegetable = color
print(type(vegetable["Green"]))
vegetable["Green"].append("Lettuce")
print(color)
# 3 important functions on Dictionaries
# .keys(): returns special list called dict keys
print(nums.keys())
# .values: returns a special list called dict values
print(nums.values())
# .item: returns a list of tuple, called dict items
print(nums.items())
|
[
"animeshb@archerimagine.com"
] |
animeshb@archerimagine.com
|
b75a006234cd636a9f0b674101009b376cf4ede1
|
e5a0a77a66563511c72feda18229712f109ab16d
|
/code/Chapter 14 - configparser/crud_config.py
|
f82dbd809b9feb0fd1c7fc3301b61832c269eb04
|
[] |
no_license
|
driscollis/Python-101-Russian
|
0136b3fe3edee025e4408a89c0461bb79ab4be07
|
28ce6727ef56dee8b6966526c5f80d8323ec9d73
|
refs/heads/master
| 2021-10-20T23:31:05.413934
| 2018-10-23T06:54:30
| 2018-10-23T06:54:30
| 149,648,717
| 0
| 2
| null | 2018-10-23T06:54:31
| 2018-09-20T17:53:06
|
Python
|
UTF-8
|
Python
| false
| false
| 760
|
py
|
import configparser
import os
def crudConfig(path):
"""
Create, read, update, delete config
"""
if not os.path.exists(path):
createConfig(path)
config = configparser.ConfigParser()
config.read(path)
# read some values from the config
font = config.get("Settings", "font")
font_size = config.get("Settings", "font_size")
# change a value in the config
config.set("Settings", "font_size", "12")
# delete a value from the config
config.remove_option("Settings", "font_style")
# write changes back to the config file
with open(path, "w") as config_file:
config.write(config_file)
if __name__ == "__main__":
path = "settings.ini"
crudConfig(path)
|
[
"mike@pythonlibrary.org"
] |
mike@pythonlibrary.org
|
7ceae1ad282b1059676d6451c86751575d7e1e6b
|
a40950330ea44c2721f35aeeab8f3a0a11846b68
|
/OpenCV-python读取监控/发送照片.py
|
7485633f350fd958947abde317da53ecd06ae10f
|
[] |
no_license
|
huang443765159/kai
|
7726bcad4e204629edb453aeabcc97242af7132b
|
0d66ae4da5a6973e24e1e512fd0df32335e710c5
|
refs/heads/master
| 2023-03-06T23:13:59.600011
| 2023-03-04T06:14:12
| 2023-03-04T06:14:12
| 233,500,005
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
import cv2
import socket
import pickle
import base64
import numpy as np
network = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
frame = cv2.imread('B.jpeg')
new_frame = cv2.resize(frame, (int(frame.shape[1] * 0.2), int(frame.shape[0] * 0.2)))
tx_data = pickle.dumps((b'\x01', new_frame))
print(len(tx_data))
head, rx_data = pickle.loads(tx_data)
new_rx = cv2.resize(rx_data, (int(rx_data.shape[1] * 2), int(rx_data.shape[0] * 2)))
# print(head, rx_data)
cv2.imshow('1', new_rx)
cv2.waitKey(0)
|
[
"443765159@qq.com"
] |
443765159@qq.com
|
0a50a5878a88fadb82d274ab672c616160eb913b
|
79e8e93a6807a4e2bd4923e0d9b78e3a7b38bb0b
|
/python/round2/permutations.py
|
9ef6c0e553d85cf6940d2bfd03b7f8a1e35da930
|
[] |
no_license
|
djole103/algo
|
2885c30e927898c749e99ee05ff6c8f43033c9eb
|
5c60dc77fcc091d1b2c52de99ee3071d82e1e17f
|
refs/heads/master
| 2020-04-12T02:28:58.300269
| 2017-04-19T23:18:04
| 2017-04-19T23:18:04
| 43,453,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
import collections
#O(n) storage
def isPermutation(str1, str2):
if str1 == None or str2 == None: return False
d = collections.defaultdict(int)
for l in str1:
d[l] +=1
for l in str2:
if l not in d:
return False
d[l] -= 1
if d[l] < 0:
return False
return True
def isPermutationLol(str1, str2):
return sorted(str1) == sorted(str2)
def allPermutations(str):
if len(str) <= 1: return str
perms = []
for i in range(len(str)):
perms += [ str[i]+x for x in allPermutations(str[:i] + str[i+1:])]
return perms
print(allPermutations("abc"))
def swapPermute(xs, low=0):
if low+1 >= len(xs):
yield xs
else:
for p in swapPermute(xs, low+1):
yield p
for i in range(low+1,len(xs)):
xs[low], xs[i] = xs[i], xs[low]
for p in swapPermute(xs, low+1):
yield p
xs[low], xs[i] = xs[i], xs[low]
for i in swapPermute(['a','b','c']):
print(i)
|
[
"djordje_7@hotmail.com"
] |
djordje_7@hotmail.com
|
5fd162325d1b76f2416508c204ac01e3912b2b7c
|
eb067a086adce4571a8d69db5423db41d8817d0d
|
/test.py
|
e28ceca3d03757572d445f642b7afc80d5a00003
|
[] |
no_license
|
thomasballinger/simplerest
|
09f47d1e6a3a4f5b6dc5de0f511dfd9d14783e8a
|
d0309b5a9439de8c16d107d33e4784e0a9b068a9
|
refs/heads/master
| 2016-09-15T19:00:37.995063
| 2014-05-22T13:12:18
| 2014-05-22T13:12:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
import socket
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('', 7000))
server.listen(5)
while True:
s, (ip, port) = server.accept()
data = ''
data = s.recv(10000)
print data
s.send("your page")
s.close()
|
[
"thomasballinger@gmail.com"
] |
thomasballinger@gmail.com
|
7924494e333eaaa3fc1fb45014a438dff96f2abb
|
c5542154b44f1b228cdadeaf44c6a5998ed37ed4
|
/base/day3/if/2numif.py
|
cf3ae469fede9e992a02e8e751cd5ee19d44e9a9
|
[] |
no_license
|
ThrallOtaku/python3Test
|
a31a2de1576b3a3c1062a7d6908119d7cbf21b36
|
c62e6024bbbeafd396b68e40332991758914ba0b
|
refs/heads/master
| 2020-03-07T22:45:47.403999
| 2018-06-08T10:19:42
| 2018-06-08T10:19:42
| 127,763,269
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
'''
x=10
num= 10 if x>18 else 20
print(num)
'''
#ages=10
#price= 20 if ages>=16 else 10
import os
os.system("calc") if 3>12 else os.system("notepad")
|
[
"tanght@11wlw.com"
] |
tanght@11wlw.com
|
3b34c3aa261597bb0b7a20265a7d26473b548dd0
|
a50a4e874d3d203344a47bc7ad9c317b213eab90
|
/base/config.py
|
28c8fb077efd365c3408ab3d90723e234358ad31
|
[] |
no_license
|
fjl121029xx/yarn-api-python
|
d5b61ca0695d5fdc4f8923d5814f6576c3c87509
|
4468609dea2d7630fd9fc3dabbe7c02ded7aa4a1
|
refs/heads/master
| 2020-12-04T02:02:40.913088
| 2020-02-27T08:08:18
| 2020-02-27T08:08:18
| 231,563,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,782
|
py
|
AppConfig = {
'DOHKO': {
# 'livyServerUri': 'http://172.20.44.6:8999/sessions/',
# 'yarnServerUri': 'http://172.20.44.6:8088/ws/v1/cluster/apps/',
# 'livyServerPath': '/usr/hdp/current/livy2-server/bin/livy-server',
'livyServerUri': 'http://172.26.25.148:8999/sessions/',
'yarnServerUri': 'http://172.26.25.148:8088/ws/v1/cluster/apps/',
'livyServerPath': '/home/hadoop/livy/bin/livy-server',
'readApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '2g',
"driverCores": 1,
"executorMemory": '2g',
"executorCores": 2,
"numExecutors": 4,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": 'yqs',
'conf': {
"spark.default.parallelism": 12,
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy"
}
},
'writeApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '512m',
"driverCores": 1,
"executorMemory": '2g',
"executorCores": 2,
"numExecutors": 2,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": 'yqs',
'conf': {
"spark.default.parallelism": 12,
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy"
}
}
},
'PRODUCT': {
# 'livyServerUri': 'http://rm.yqs.hualala.com:8999/sessions/',
# 'yarnServerUri': 'http://rm.yqs.hualala.com:8088/ws/v1/cluster/apps/',
# 'livyServerPath': '/home/olap/tools/apps/livy/bin/livy-server',
'livyServerUri': 'http://172.26.25.148:8999/sessions/',
'yarnServerUri': 'http://172.26.25.148:8088/ws/v1/cluster/apps/',
'livyServerPath': '/home/hadoop/livy/bin/livy-server',
'readApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '16g',
"driverCores": 8,
"executorMemory": '10g',
"executorCores": 6,
"numExecutors": 35,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": None,
'conf': {
"spark.default.parallelism": 400,
"spark.scheduler.mode": "FAIR",
"spark.serializer": "org.apache.spark.serializer.KryoSerializer",
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy",
"spark.sql.inMemoryColumnarStorage.batchSize": 300000,
"spark.sql.files.maxPartitionBytes": 134217728,
"spark.sql.broadcastTimeout": 60,
"spark.sql.orc.enabled": True,
"spark.sql.orc.impl": "native",
"spark.sql.orc.enableVectorizedReader": True,
"spark.sql.hive.convertMetastoreOrc": True
}
},
'writeApp': {
"jars": ["hdfs://cluster/yqs/tools/engine-0.0.1-SNAPSHOT.jar"],
"pyFiles": [],
"files": [],
"archives": [],
"kind": 'spark',
"driverMemory": '10g',
"driverCores": 4,
"executorMemory": '10g',
"executorCores": 6,
"numExecutors": 10,
"queue": 'default',
"heartbeatTimeoutInSecond": 86400,
"proxyUser": None,
'conf': {
"spark.default.parallelism": 400,
"spark.scheduler.mode": "FAIR",
"spark.serializer": "org.apache.spark.serializer.KryoSerializer",
"spark.rdd.compress": True,
"spark.io.compression.codec": "snappy",
"spark.sql.inMemoryColumnarStorage.batchSize": 300000,
"spark.sql.files.maxPartitionBytes": 134217728,
"spark.sql.broadcastTimeout": 60,
"spark.sql.orc.enabled": True,
"spark.sql.orc.impl": "native",
"spark.sql.orc.enableVectorizedReader": True,
"spark.sql.hive.convertMetastoreOrc": True,
"spark.sql.orc.filterPushdown": True,
"spark.sql.orc.char.enabled": True
}
}
}
}
|
[
"sishengqikuo_xx@163.com"
] |
sishengqikuo_xx@163.com
|
688a69eeefdd18fc59f72c8a0c55e7ada6bac042
|
00ccdc877771cb0cf493526d1e201e0f625bf5e7
|
/dohq_teamcity/models/vcs_root_entry.py
|
71eac569239c8a77d6638dac74da2b020c9df602
|
[
"MIT"
] |
permissive
|
expobrain/teamcity
|
a52928045166bb5d34f4a0396cb840bfee8f43d5
|
9f04c0692a2c5b277a608c2f11cc1fb48e0c87e2
|
refs/heads/master
| 2020-04-13T13:11:07.270515
| 2018-10-18T01:40:06
| 2018-10-18T01:40:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,526
|
py
|
# coding: utf-8
from dohq_teamcity.custom.base_model import TeamCityObject
# from dohq_teamcity.models.vcs_root import VcsRoot # noqa: F401,E501
class VcsRootEntry(TeamCityObject):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'checkout_rules': 'str',
'id': 'str',
'inherited': 'bool',
'vcs_root': 'VcsRoot'
}
attribute_map = {
'checkout_rules': 'checkout-rules',
'id': 'id',
'inherited': 'inherited',
'vcs_root': 'vcs-root'
}
def __init__(self, checkout_rules=None, id=None, inherited=False, vcs_root=None, teamcity=None): # noqa: E501
"""VcsRootEntry - a model defined in Swagger""" # noqa: E501
self._checkout_rules = None
self._id = None
self._inherited = None
self._vcs_root = None
self.discriminator = None
if checkout_rules is not None:
self.checkout_rules = checkout_rules
if id is not None:
self.id = id
if inherited is not None:
self.inherited = inherited
if vcs_root is not None:
self.vcs_root = vcs_root
super(VcsRootEntry, self).__init__(teamcity=teamcity)
@property
def checkout_rules(self):
"""Gets the checkout_rules of this VcsRootEntry. # noqa: E501
:return: The checkout_rules of this VcsRootEntry. # noqa: E501
:rtype: str
"""
return self._checkout_rules
@checkout_rules.setter
def checkout_rules(self, checkout_rules):
"""Sets the checkout_rules of this VcsRootEntry.
:param checkout_rules: The checkout_rules of this VcsRootEntry. # noqa: E501
:type: str
"""
self._checkout_rules = checkout_rules
@property
def id(self):
"""Gets the id of this VcsRootEntry. # noqa: E501
:return: The id of this VcsRootEntry. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VcsRootEntry.
:param id: The id of this VcsRootEntry. # noqa: E501
:type: str
"""
self._id = id
@property
def inherited(self):
"""Gets the inherited of this VcsRootEntry. # noqa: E501
:return: The inherited of this VcsRootEntry. # noqa: E501
:rtype: bool
"""
return self._inherited
@inherited.setter
def inherited(self, inherited):
"""Sets the inherited of this VcsRootEntry.
:param inherited: The inherited of this VcsRootEntry. # noqa: E501
:type: bool
"""
self._inherited = inherited
@property
def vcs_root(self):
"""Gets the vcs_root of this VcsRootEntry. # noqa: E501
:return: The vcs_root of this VcsRootEntry. # noqa: E501
:rtype: VcsRoot
"""
return self._vcs_root
@vcs_root.setter
def vcs_root(self, vcs_root):
"""Sets the vcs_root of this VcsRootEntry.
:param vcs_root: The vcs_root of this VcsRootEntry. # noqa: E501
:type: VcsRoot
"""
self._vcs_root = vcs_root
|
[
"allburov@gmail.com"
] |
allburov@gmail.com
|
55107bfc0b70cbf0ce0416d7d702e61475dc14dd
|
f26521284741a1f730e2d52de7426807247e08b6
|
/Python/Topics/Class/Who is who/main.py
|
2bcfcf86bdc076784c5ae9755f976ba6ac78e8bc
|
[
"MIT"
] |
permissive
|
drtierney/hyperskill-problems
|
0e6fe8ca418d1af700a5a1b1b2eed1f1f07b8e9e
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
refs/heads/main
| 2021-10-25T07:02:50.838216
| 2021-10-16T19:08:49
| 2021-10-16T19:08:49
| 253,045,232
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 349
|
py
|
class Angel:
color = "white"
feature = "wings"
home = "Heaven"
class Demon:
color = "red"
feature = "horns"
home = "Hell"
the_angel = Angel()
print(the_angel.color)
print(the_angel.feature)
print(the_angel.home)
the_demon = Demon()
for attribute in ['color', 'feature', 'home']:
print(getattr(the_demon, attribute))
|
[
"d.r.tierney@hotmail.co.uk"
] |
d.r.tierney@hotmail.co.uk
|
2dc9330666f5fbcb6526ba3adaba2c90af3de318
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03032/s021397547.py
|
d664139ae2e131d0ee24b03e0e38925747d285b4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
from bisect import bisect_left
N,K = map(int,input().split())
V = list(map(int,input().split()))
vmax = -10**9
for n in range(min(N,K)+1):
A = V[:n]
for m in range(min(N,K)-n+1):
B = V[N-m:]
B = B+A
B = sorted(B)
ind = bisect_left(B,0)
k = min(ind,K-n-m)
v = sum(B[k:])
vmax = max(vmax,v)
print(vmax)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9aff4776e7a2ddf4284e16a6e8a0267f27c5ea27
|
1cb7aeb570630c9743a5b0dc7a254197d26016de
|
/py/testdir_ec2_only/test_parse_syn_s3n_thru_hdfs.py
|
877490a8fb08c2a833f6b77182c24885c8f4cc5c
|
[
"Apache-2.0"
] |
permissive
|
devinshields/h2o
|
576dbebc663265190cfca3fe8341b10d2243213c
|
9d8b782e5cb7f38f3cb0086fef15ecec7d9282d5
|
refs/heads/master
| 2021-01-18T04:57:34.703132
| 2013-08-27T00:57:51
| 2013-08-27T00:57:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts
import h2o_browse as h2b
import h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
# all hdfs info is done thru the hdfs_config michal's ec2 config sets up?
h2o_hosts.build_cloud_with_hosts(1,
# this is for our amazon ec hdfs
# see https://github.com/0xdata/h2o/wiki/H2O-and-s3n
hdfs_name_node='10.78.14.235:9000',
hdfs_version='0.20.2')
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_syn_s3n_thru_hdfs(self):
# I put these file copies on s3 with unique suffixes
# under this s3n "path"
csvFilename = "*_10000x200*"
trialMax = 1
timeoutSecs = 500
URI = "s3n://home-0xdiag-datasets/syn_datasets"
s3nKey = URI + "/" + csvFilename
for trial in range(trialMax):
# since we delete the key, we have to re-import every iteration
# s3n URI thru HDFS is not typical.
importHDFSResult = h2o.nodes[0].import_hdfs(URI)
s3nFullList = importHDFSResult['succeeded']
### print "s3nFullList:", h2o.dump_json(s3nFullList)
self.assertGreater(len(s3nFullList),1,"Didn't see more than 1 files in s3n?")
key2 = "syn_datasets_" + str(trial) + ".hex"
print "Loading s3n key: ", s3nKey, 'thru HDFS'
start = time.time()
parseKey = h2o.nodes[0].parse(s3nKey, key2,
timeoutSecs=500, retryDelaySecs=10, pollTimeoutSecs=60)
elapsed = time.time() - start
print s3nKey, 'parse time:', parseKey['response']['time']
print "parse result:", parseKey['destination_key']
print "Trial #", trial, "completed in", elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
inspect = h2o_cmd.runInspect(None, parseKey['destination_key'])
print "\n" + key2 + \
" num_rows:", "{:,}".format(inspect['num_rows']), \
" num_cols:", "{:,}".format(inspect['num_cols'])
print "Deleting key in H2O so we get it from s3n (if ec2) or nfs again.", \
"Otherwise it would just parse the cached key."
storeView = h2o.nodes[0].store_view()
### print "storeView:", h2o.dump_json(storeView)
print "BROKE: we can't delete keys with a pattern match yet..this fails"
print "So we only do 1 trial and don't delete"
# print "Removing", s3nKey
# removeKeyResult = h2o.nodes[0].remove_key(key=s3nKey)
if __name__ == '__main__':
h2o.unit_main()
|
[
"kevin@0xdata.com"
] |
kevin@0xdata.com
|
d6664e32d60cee4b037eec8832a8a5700d57d63a
|
e81576012330e6a6024d14f3e241f88ca34b73cd
|
/python_code/vnev/Lib/site-packages/jdcloud_sdk/services/resourcetag/models/ResourceReqVo.py
|
d520231ef4b8a9e9d0ce52cfcf8615761daffcb6
|
[
"MIT"
] |
permissive
|
Ureimu/weather-robot
|
eba6a84147755aa83c941a306bac1a7c4e95e23e
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
refs/heads/master
| 2021-01-15T07:23:42.274413
| 2020-03-23T02:30:19
| 2020-03-23T02:30:19
| 242,912,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,909
|
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class ResourceReqVo(object):
def __init__(self, serviceCodes=None, resourceIds=None, tagFilters=None, orderCondition=None, descOrAsc=None, pageSize=None, currentPage=None):
"""
:param serviceCodes: (Optional) 产品线名称列表
标签系统支持的产品线名称如下
- vm disk sqlserver es mongodb ip
- memcached redis drds rds database db_ro
- percona percona_ro mariadb mariadb_ro pg cdn
- nativecontainer pod zfs jqs kubernetesNodegroup jcq
:param resourceIds: (Optional) 资源id列表
:param tagFilters: (Optional) 标签过滤列表
:param orderCondition: (Optional) 排序依据
:param descOrAsc: (Optional) 排序方向, 取值为ASC, DESC
:param pageSize: (Optional) 每页记录数
:param currentPage: (Optional) 当前页码
"""
self.serviceCodes = serviceCodes
self.resourceIds = resourceIds
self.tagFilters = tagFilters
self.orderCondition = orderCondition
self.descOrAsc = descOrAsc
self.pageSize = pageSize
self.currentPage = currentPage
|
[
"a1090693441@163.com"
] |
a1090693441@163.com
|
b6b87d81e355a4f8bff6abb5d8f6e610fc0bb9d5
|
68271a37c3c4dd3d31b24c0cddbf574472f9f6a5
|
/backend.py
|
7e19cee00e6f11342bb1cc90a70ae13c0cee22f0
|
[
"MIT"
] |
permissive
|
LSaldyt/automata
|
8bcbb269bdfdf01803d66b77eb31be0a7eddb83b
|
ff0ba058f087fbcd7958866019b4b7cb43e924bd
|
refs/heads/master
| 2020-04-28T19:01:49.703783
| 2019-04-22T21:06:01
| 2019-04-22T21:06:01
| 175,497,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
def cuboid(o, size=(1,1,1)):
X = [[[0, 1, 0], [0, 0, 0], [1, 0, 0], [1, 1, 0]],
[[0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0]],
[[1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1]],
[[0, 0, 1], [0, 0, 0], [0, 1, 0], [0, 1, 1]],
[[0, 1, 0], [0, 1, 1], [1, 1, 1], [1, 1, 0]],
[[0, 1, 1], [0, 0, 1], [1, 0, 1], [1, 1, 1]]]
X = np.array(X).astype(float)
for i in range(3):
X[:,:,i] *= size[i]
X += np.array(o)
return X
def renderCubes(positions, sizes=None, colors=None, **kwargs):
if not isinstance(sizes,(list,np.ndarray)): sizes=[(1,1,1)]*len(positions)
if not isinstance(colors,(list,np.ndarray)): colors=["C0"]*len(positions)
g = []
for p,s,c in zip(positions,sizes,colors):
g.append( cuboid(p, size=s) )
return Poly3DCollection(np.concatenate(g),
facecolors=np.repeat(colors,6, axis=0), **kwargs)
|
[
"lucassaldyt@gmail.com"
] |
lucassaldyt@gmail.com
|
afbc0ea56e7cb155afec46f10c5e11b4625c3058
|
abad82a1f487c5ff2fb6a84059a665aa178275cb
|
/Codewars/7kyu/so-easy-charge-time-calculation/Python/test.py
|
0c7363e56959f0d28c875a4cc5375e8de39a6d2e
|
[
"MIT"
] |
permissive
|
RevansChen/online-judge
|
8ae55f136739a54f9c9640a967ec931425379507
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
refs/heads/master
| 2021-01-19T23:02:58.273081
| 2019-07-05T09:42:40
| 2019-07-05T09:42:40
| 88,911,035
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
# Python - 3.6.0
Test.describe('Basic Tests')
Test.assert_equals(calculate_time(1000, 500), 2.6)
Test.assert_equals(calculate_time(1500, 500), 3.9)
Test.assert_equals(calculate_time(2000, 1000), 2.6)
Test.assert_equals(calculate_time(5000, 1000), 6.5)
Test.assert_equals(calculate_time(1000, 5000), 0.26)
|
[
"d79523@hotmail.com"
] |
d79523@hotmail.com
|
22f53a51f9d56c1020e164b962c2a83a03669e8f
|
a1c9c55e1520356113a320be18e8fcb31654a944
|
/archive/0.10/generated/seaborn-jointplot-5.py
|
97b6478b704f4f926f64361289dfc029bff368a4
|
[] |
no_license
|
seaborn/seaborn.github.io
|
bac12a9255b41c7971e9e94ea393d372ef66ef62
|
f70445bc3456f0216169806c2daf03452ca1eba4
|
refs/heads/master
| 2023-01-06T10:50:10.789810
| 2022-12-30T19:59:55
| 2022-12-30T19:59:55
| 70,731,605
| 16
| 5
| null | 2022-06-28T00:32:07
| 2016-10-12T18:56:12
|
HTML
|
UTF-8
|
Python
| false
| false
| 147
|
py
|
g = (sns.jointplot("sepal_length", "sepal_width",
data=iris, color="k")
.plot_joint(sns.kdeplot, zorder=0, n_levels=6))
|
[
"mwaskom@nyu.edu"
] |
mwaskom@nyu.edu
|
56f93826cccd3b8c8efa2400ea3934ed95d6102e
|
db0e991d02a50eda222aaebeb7a5772b9cba467f
|
/account/admin.py
|
6db31fc81344fb402617759b0d2e5180d5105ae8
|
[] |
no_license
|
iamrraj/Djanog_Learn
|
1ba90ac797b284c5e2a7dd733fd61353ee8af241
|
c522b41411415585468cadfe6999262a6c9b487d
|
refs/heads/master
| 2020-04-17T09:48:09.414269
| 2019-01-18T21:24:53
| 2019-01-18T21:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,221
|
py
|
from django.contrib import admin
from .models import Profile,Categoty,Product,Slide
# Register your models here.
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user','location')
list_display = ('user','birth_date','email_confirmed', 'location')
list_filter = [ 'location', 'user' ]
search_fields = ['location', 'user' ]
class ProductAdmin(admin.ModelAdmin):
list_display = ('name','disprice','seller')
list_display = ('name','disprice','seller', 'acprice','categoty')
list_display_links = ('name','disprice','seller', 'acprice','categoty')
list_filter = ['pub_date','categoty','seller']
search_fields = ['categoty','seller','name']
class CategoryAdmin(admin.ModelAdmin):
list_display = ('name','pub_date')
list_display_links = ('name','pub_date')
list_filter = ['pub_date','name']
search_fields = ['id','name','pub_date']
class SlideAdmin(admin.ModelAdmin):
list_display = ('image','pub_date')
list_display_links = ('image','pub_date')
list_filter = ['pub_date','image']
admin.site.register(Profile,ProfileAdmin)
admin.site.register(Product,ProductAdmin)
admin.site.register(Categoty,CategoryAdmin)
admin.site.register(Slide,SlideAdmin)
|
[
"rajr97555@gmail.com"
] |
rajr97555@gmail.com
|
e7dc87d8f60339b7be557f18f118cc68c3545903
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2098/49405/273048.py
|
cff498a9698319a280edf449fde35bbd033aa422
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
a = int(input())
b = 1
while b < a:
b *= 26
for i in range(b, 0, -1):
print(chr(a // b + ord("A") - 1), end="")
a %= 26
b //= 26
if a > 0: print(chr(a + ord("A") - 1))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
dcd624ef125ecb43865c6cf90b0020339955f483
|
87f31b789750f6b545d6a79bd0b7028ebf4126c7
|
/vislab/_results.py
|
1716115a35aa0d860c71db36fd53bb1bdebffacc
|
[
"BSD-2-Clause"
] |
permissive
|
hertzmann/vislab
|
db4d1b9e63e9bb8a33e491cff433e02c0315ca81
|
fcded208637fb51edfeaef1bde0bf766f9af1941
|
refs/heads/master
| 2021-01-24T04:20:32.382941
| 2016-07-12T20:21:28
| 2016-07-12T20:21:28
| 13,571,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,575
|
py
|
import os
import pandas as pd
import cPickle
import numpy as np
import vislab
def load_pred_results(collection_name, cache_dirname, multiclass=False, force=False):
"""
Return DataFrame of prediction experiment results and Panel of per-image
predictions.
"""
if not os.path.exists(cache_dirname):
vislab.util.makedirs(cache_dirname)
results_df_filename = os.path.join(
cache_dirname, '{}_results_df.pickle'.format(collection_name))
preds_panel_filename = os.path.join(
cache_dirname, '{}_preds_panel.pickle'.format(collection_name))
# If cache exists, load and return.
if (os.path.exists(results_df_filename) and
os.path.exists(preds_panel_filename) and
not force):
results_df = pd.read_pickle(results_df_filename)
preds_panel = pd.read_pickle(preds_panel_filename)
print("Loaded from cache: {} records".format(results_df.shape[0]))
return results_df, preds_panel
# Otherwise, construct from database.
client = vislab.util.get_mongodb_client()
collection = client['predict'][collection_name]
print("Results in collection {}: {}".format(collection_name, collection.count()))
df = pd.DataFrame(list(collection.find()))
df.index = df.index.astype(str)
# Make the features list hashable for filtering/joins.
df['features_str'] = df['features'].apply(lambda x: ','.join(sorted(x)))
# We need a unique representation of the predictor settings.
df['setting'] = df.apply(lambda x: '{} {} {}'.format(x['features_str'], x['quadratic'], 'vw'), axis=1)
# And of the task performed.
df['full_task'] = df.apply(lambda x: '{} {}'.format(x['task'], x['data']), axis=1)
df = df.drop_duplicates(cols=['features_str', 'full_task'], take_last=True)
# Just for printing, if needed.
df = df.sort(['full_task', 'setting'])
# Get all predictions in a separate panel and drop the pickled ones.
if multiclass:
data = {}
for setting in df['setting'].unique():
el = df[df['setting'] == setting].iloc[0]
try:
pred_df = cPickle.loads(el['pred_df'])
except:
assert('results_name' in el)
pred_df_filename = '{}/{}.h5'.format(
vislab.config['paths']['results'], el['results_name'])
#print(pred_df_filename)
pred_df = pd.read_hdf(pred_df_filename, 'df')
# Not sure why there should ever be duplicate indices, but
# there are for one of the wikipaintings results...
pred_df['__index'] = pred_df.index
pred_df.drop_duplicates(cols='__index', take_last=True, inplace=True)
del pred_df['__index']
data[setting] = pred_df
preds_panel = pd.Panel(data).swapaxes('items', 'minor')
else:
preds_panel = get_all_preds_panel(df)
try:
del df['pred_df']
except KeyError:
pass
df.to_pickle(results_df_filename)
preds_panel.to_pickle(preds_panel_filename)
return df, preds_panel
def get_all_preds_panel(df):
all_full_tasks = df['full_task'].unique()
data = dict((
(full_task, get_all_preds_df(df, full_task))
for full_task in all_full_tasks
))
all_preds_panel = pd.Panel(data)
return all_preds_panel
def get_all_preds_df(df, full_task):
"""
Get the DataFrame of predictions from the results dataframe.
Tip: get all predictions of an image with
all_preds_panel.major_xs('f_1604904579').T
"""
tdf = df[df['full_task'] == full_task]
# Make sure that there are no duplicate settings.
if len(tdf.setting.unique()) != tdf.shape[0]:
try:
del df['pred_df']
except KeyError:
pass
print(tdf.to_string())
raise Exception("Non-unique feature-setting pairs")
pred_dfs = []
for i, row in tdf.iterrows():
try:
pred_df = cPickle.loads(row['pred_df'])
except:
assert('results_name' in row)
pred_df_filename = '{}/{}.h5'.format(
vislab.config['paths']['results'], row['results_name'])
pred_df = pd.read_hdf(pred_df_filename, 'df')
pred_df.index = pred_df.index.astype(str)
pred_dfs.append(pred_df)
# Make sure that all the settings had the same label and split information
arbitrary_pred_df = pred_dfs[0]
assert(np.all(df_['label'] == arbitrary_pred_df['label'] for df_ in pred_dfs))
assert(np.all(df_['split'] == arbitrary_pred_df['split'] for df_ in pred_dfs))
data = []
for df_ in pred_dfs:
df_["index"] = df_.index
# TODO: why the fuck are the duplicate indices???
df_ = df_.drop_duplicates('index')
if 'score' in df_.columns:
data.append(df_['score'])
else:
# TODO: temporary, remove when all experiments are re-run
data.append(df_['pred'])
all_preds_df = pd.DataFrame(data, index=tdf['setting']).T
all_preds_df['label'] = arbitrary_pred_df['label']
all_preds_df['split'] = arbitrary_pred_df['split']
# # Re-order columns
# # columns = all_preds_df.columns.values
# # reordered_columns = ['split', 'label'] + (columns - ['split', 'label']).tolist()
# # all_preds_df = all_preds_df[:, reordered_columns]
all_preds_df.index = all_preds_df.index.astype(str)
return all_preds_df
if __name__ == '__main__':
load_pred_results('wikipaintings_oct25', 'whatever', multiclass=True)
|
[
"sergeykarayev@gmail.com"
] |
sergeykarayev@gmail.com
|
0f002addf74bef460a8840967aaf1a0aba19ff6d
|
47136f769b2e870242f438927cee8dabcbca94c0
|
/week8/informatics/4/F.py
|
ac3e3e385b9712dcdb1ec40e27313b118220704f
|
[] |
no_license
|
Almanova/WebDevelopment-Spring2020
|
de97b5aba1f13a766e2ef183151e39db3c8bba53
|
0abdee8f25dee1a4d32da2b633903d33936b6e77
|
refs/heads/master
| 2023-01-11T08:20:27.232203
| 2020-04-17T01:31:01
| 2020-04-17T01:31:01
| 236,373,539
| 0
| 0
| null | 2023-01-07T16:25:00
| 2020-01-26T20:42:31
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 177
|
py
|
n = int(input())
list = input().split()
cnt = 0
for i in range(1, n - 1):
if int(list[i - 1]) < int(list[i]) and int(list[i + 1]) < int(list[i]):
cnt += 1
print(cnt)
|
[
"almanovamadina@yahoo.com"
] |
almanovamadina@yahoo.com
|
e54d5f06e5fc1b80bc598b447f5332574e03328c
|
35f7970d0423dac96f1fefda6fb2246ada0bd483
|
/catkin_ws/build/rotors_joy_interface/catkin_generated/pkg.installspace.context.pc.py
|
fe71778c0472e11f9e3595aebb9f1e531858b5eb
|
[] |
no_license
|
HugoGrellier/ros_project_bebop
|
7c169635fa5ffe664bdb4155bac212a0a5f7b941
|
d6c8c3ada879747a7b070dc88646d4c3b86d28c5
|
refs/heads/master
| 2023-02-09T10:37:22.209574
| 2020-10-20T18:52:04
| 2020-10-20T18:52:04
| 306,311,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;geometry_msgs;mav_msgs;sensor_msgs;trajectory_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rotors_joy_interface"
PROJECT_SPACE_DIR = "/home/student/Documents/ros_project_bebop/catkin_ws/install"
PROJECT_VERSION = "2.1.1"
|
[
"hugo.grellier@cpe.fr"
] |
hugo.grellier@cpe.fr
|
6291822cb31b4bf8385ea3f7c22d79a5f2a4e13f
|
fb5b1b8dce103dea28be52f7bbd9ea84da2cec81
|
/kolibri/core/discovery/api.py
|
1a8389a92afb0d82f12c4c2108850fecba086a18
|
[
"MIT"
] |
permissive
|
lyw07/kolibri
|
d7f6f92656faa0483cd2cbdf57a3b6c54d52c2f2
|
11e0d01e2bc43850a6dfd4238e6408004449c3dc
|
refs/heads/develop
| 2021-01-02T09:40:04.457976
| 2019-05-20T21:29:27
| 2019-05-20T21:29:27
| 99,255,547
| 1
| 0
|
MIT
| 2018-03-08T18:43:36
| 2017-08-03T16:53:09
|
Python
|
UTF-8
|
Python
| false
| false
| 380
|
py
|
from rest_framework import viewsets
from .models import NetworkLocation
from .serializers import NetworkLocationSerializer
from kolibri.core.content.permissions import CanManageContent
class NetworkLocationViewSet(viewsets.ModelViewSet):
permission_classes = (CanManageContent,)
serializer_class = NetworkLocationSerializer
queryset = NetworkLocation.objects.all()
|
[
"jamalex@gmail.com"
] |
jamalex@gmail.com
|
d06a16fc3cb7202fdd9058558cf45839be272a0b
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/menus/wagtail_hooks_20201030120530.py
|
0719f7961b7e5d8a34a3fef28930d93e6014a64d
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415
| 2021-01-05T14:55:45
| 2021-01-05T14:55:45
| 303,961,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
""" Kategoria zostanie dodana w pasku bocznym u admina"""
from wagtail.contrib.modeladmin.options import ModelAdmin
import ModelAdmin, decorator
class MenuAdmin(ModelAdmin)
|
[
"rucinska.patrycja@gmail.com"
] |
rucinska.patrycja@gmail.com
|
5f7296a2c63f51459b3ce77f09584dbef613d994
|
76b064a76ffd23b0d0dff57d266abd6a111e9247
|
/Ch04 Counting Elements/MissingIntegers.py
|
819800f4b1f6de69459764150ccb264152e3f8ce
|
[] |
no_license
|
startFromBottom/codility_problems
|
78e0e0fcd914730e0dd8f725dde3dc96be83a255
|
c8e128b5768e8140e658274e7cc8fee95c1bce9a
|
refs/heads/master
| 2022-12-05T12:38:01.595250
| 2020-08-25T11:49:44
| 2020-08-25T11:49:44
| 289,836,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
"""
problem link : https://app.codility.com/programmers/lessons/4-counting_elements/missing_integer/
result : https://app.codility.com/demo/results/trainingETTG9E-G32/
"""
def solution(A):
A = sorted(list(set(A)))
# ex) A = [98, 99, 100] -> 1
if A[0] > 1:
return 1
for i in range(1, len(A)):
# ex) A = [1,2,4,5] -> 3
if A[i - 1] >= 0 and A[i] > A[i - 1] + 1:
return A[i - 1] + 1
# ex) A = [-3,-1, 3] -> 1
elif A[i - 1] <= 0 and A[i] > 1:
return 1
# ex) A = [-3, -1] -> 1
if A[-1] <= 0:
return 1
# ex) A = [1, 2, 3] -> 4
return A[-1] + 1
|
[
"uhh0701@gmail.com"
] |
uhh0701@gmail.com
|
88f092084337bcd4d9073c16381898f674a18ef3
|
81bad22641705683c68ff89f19362ba202891652
|
/napari/plugins/exceptions.py
|
e9979de0d9e5c07e6d09e0f1592bcca062d4cf1c
|
[
"BSD-3-Clause"
] |
permissive
|
sofroniewn/napari
|
ee2a39a1a1132910db6f2a47994671e8138edb51
|
beaa98efe5cf04ba659086e7a514b2ade05277af
|
refs/heads/main
| 2023-07-12T02:46:41.185932
| 2022-09-14T21:57:15
| 2022-09-14T21:57:15
| 154,751,137
| 2
| 3
|
BSD-3-Clause
| 2023-07-01T10:26:45
| 2018-10-25T23:43:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,970
|
py
|
from napari_plugin_engine import PluginError, standard_metadata
from ..utils.translations import trans
def format_exceptions(
plugin_name: str, as_html: bool = False, color="Neutral"
):
"""Return formatted tracebacks for all exceptions raised by plugin.
Parameters
----------
plugin_name : str
The name of a plugin for which to retrieve tracebacks.
as_html : bool
Whether to return the exception string as formatted html,
defaults to False.
Returns
-------
str
A formatted string with traceback information for every exception
raised by ``plugin_name`` during this session.
"""
_plugin_errors = PluginError.get(plugin_name=plugin_name)
if not _plugin_errors:
return ''
from napari import __version__
from ..utils._tracebacks import get_tb_formatter
format_exc_info = get_tb_formatter()
_linewidth = 80
_pad = (_linewidth - len(plugin_name) - 18) // 2
msg = [
trans._(
"{pad} Errors for plugin '{plugin_name}' {pad}",
deferred=True,
pad='=' * _pad,
plugin_name=plugin_name,
),
'',
f'{"napari version": >16}: {__version__}',
]
err0 = _plugin_errors[0]
if err0.plugin:
package_meta = standard_metadata(err0.plugin)
if package_meta:
msg.extend(
[
f'{"plugin package": >16}: {package_meta["package"]}',
f'{"version": >16}: {package_meta["version"]}',
f'{"module": >16}: {err0.plugin}',
]
)
msg.append('')
for n, err in enumerate(_plugin_errors):
_pad = _linewidth - len(str(err)) - 10
msg += ['', f'ERROR #{n + 1}: {str(err)} {"-" * _pad}', '']
msg.append(format_exc_info(err.info(), as_html, color))
msg.append('=' * _linewidth)
return ("<br>" if as_html else "\n").join(msg)
|
[
"noreply@github.com"
] |
sofroniewn.noreply@github.com
|
28fe05503890e1d58e8f3360c2c2d65753534bd2
|
8d6ae21b78b3b40382e21198c571a7957e055be5
|
/July20/Statements/factors.py
|
0711fc04d9ad0d4b19c9061f222235e998ee6070
|
[] |
no_license
|
vj-reddy/PythonBatch1
|
6c1a429e0ac57ea1db7b04af18187e84cd52f2d5
|
b86a5a16b1004d1e4f855a57b019704c71425bbf
|
refs/heads/master
| 2023-03-16T06:05:48.104363
| 2020-10-16T13:55:03
| 2020-10-16T13:55:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
number = int(input("Enter the number: "))
index = 2
while index <= number//2:
if number%index == 0:
print(index)
index = index+1
|
[
"qtdevops@gmail.com"
] |
qtdevops@gmail.com
|
aa6025ca3f596c50a066dfe23bd9e32f3de84ba2
|
ebe422519443dbe9c4acd3c7fd527d05cf444c59
|
/modular_equation.py
|
117dc8a748507e9c28c073df70bd420e73642f56
|
[] |
no_license
|
SaiSudhaV/coding_platforms
|
2eba22d72fdc490a65e71daca41bb3d71b5d0a7b
|
44d0f80104d0ab04ef93716f058b4b567759a699
|
refs/heads/master
| 2023-06-19T18:05:37.876791
| 2021-07-15T18:02:19
| 2021-07-15T18:02:19
| 355,178,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
# cook your dish here
def modular_equation(n, m):
res, tem = [], [1] * (n + 1)
for i in range(2, n + 1):
res.append(tem[m % i])
j = m % i
while j < n + 1:
tem[j] += 1
j += i
return sum(res)
if __name__ == "__main__":
t = int(input())
for i in range(t):
n, m = map(int, input().split())
print(modular_equation(n, m))
|
[
"saisudhavadisina@gmail.com"
] |
saisudhavadisina@gmail.com
|
e6af48993c7c26fd4ed95950dd100596814de47c
|
05ceedee44c66ece52a9d7df9dc8ac2df536557b
|
/monero_glue/messages/StellarGetPublicKey.py
|
0e6305747025ab95cfb7e0ed611cbf5e6901497d
|
[
"MIT"
] |
permissive
|
tsusanka/monero-agent
|
1e48042f7cbb77b3d3f6262c97de71da4f6beb3d
|
526ca5a57714cdca3370021feda3ed5ad3e3ea1a
|
refs/heads/master
| 2020-03-25T11:43:16.967931
| 2018-08-06T15:07:05
| 2018-08-06T15:07:05
| 143,745,130
| 1
| 0
| null | 2018-08-06T15:06:04
| 2018-08-06T15:06:03
| null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
# Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import List
except ImportError:
List = None # type: ignore
class StellarGetPublicKey(p.MessageType):
MESSAGE_WIRE_TYPE = 200
FIELDS = {
1: ('address_n', p.UVarintType, p.FLAG_REPEATED),
2: ('show_display', p.BoolType, 0),
}
def __init__(
self,
address_n: List[int] = None,
show_display: bool = None,
) -> None:
self.address_n = address_n if address_n is not None else []
self.show_display = show_display
|
[
"dusan.klinec@gmail.com"
] |
dusan.klinec@gmail.com
|
3fe2c84bde72e2715727d3d95441bd71841b53b0
|
f5a4f340da539520c60c4bce08356c6f5c171c54
|
/tests/integration/reqs/test_tx.py
|
e8551a73473e6e6ef046ce1ffa96278212f25855
|
[
"ISC",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
yyolk/xrpl-py
|
e3935c0a0f488793153ca29e9d71c197cf88f857
|
e5bbdf458ad83e6670a4ebf3df63e17fed8b099f
|
refs/heads/master
| 2023-07-17T03:19:29.239838
| 2021-07-03T01:24:57
| 2021-07-03T01:24:57
| 355,299,041
| 1
| 0
|
ISC
| 2021-04-08T05:29:43
| 2021-04-06T18:57:06
| null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
from tests.integration.integration_test_case import IntegrationTestCase
from tests.integration.it_utils import test_async_and_sync
from tests.integration.reusable_values import OFFER
from xrpl.models.requests import Tx
class TestTx(IntegrationTestCase):
@test_async_and_sync(globals())
async def test_basic_functionality(self, client):
response = await client.request(
Tx(
transaction=OFFER.result["hash"],
),
)
self.assertTrue(response.is_successful())
|
[
"noreply@github.com"
] |
yyolk.noreply@github.com
|
50f1b2c6c3f6bec0a574850bc96f48c8683609c8
|
cd0987589d3815de1dea8529a7705caac479e7e9
|
/webkit/WebKitTools/Scripts/webkitpy/tool/steps/build.py
|
10fe1a806ce51955b95b23099c0fc1bcef93962e
|
[] |
no_license
|
azrul2202/WebKit-Smartphone
|
0aab1ff641d74f15c0623f00c56806dbc9b59fc1
|
023d6fe819445369134dee793b69de36748e71d7
|
refs/heads/master
| 2021-01-15T09:24:31.288774
| 2011-07-11T11:12:44
| 2011-07-11T11:12:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,415
|
py
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
from webkitpy.common.system.deprecated_logging import log
class Build(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.build,
Options.quiet,
Options.build_style,
]
def build(self, build_style):
self._tool.executive.run_and_throw_if_fail(self.port().build_webkit_command(build_style=build_style), self._options.quiet)
def run(self, state):
if not self._options.build:
return
log("Building WebKit")
if self._options.build_style == "both":
self.build("debug")
self.build("release")
else:
self.build(self._options.build_style)
|
[
"sdevitt@rim.com"
] |
sdevitt@rim.com
|
2a0b3a93e513b7f33f12ced12c7f3a932ee7c77e
|
7111511ef0cca1bcf84a76d49419fad504d78f6e
|
/test331scaping_DictWriter.py
|
a15348ff338735c3fd8aa09bcf4f71bffd95733e
|
[] |
no_license
|
blockchain99/pythonlecture
|
7800033cd62251b0eec8cf3b93f253175d9cb2e8
|
198e1b6d68db72e4a5009f988c503958ad7ab444
|
refs/heads/master
| 2020-12-12T14:21:53.626918
| 2020-01-15T19:02:07
| 2020-01-15T19:02:07
| 234,148,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,469
|
py
|
import requests
from bs4 import BeautifulSoup
from csv import writer, DictWriter
response = requests.get("https://www.rithmschool.com/blog")
# print(response.text)
with open('test331out.text', 'w') as file:
file.write(response.text)
print("==============================================================")
#go to above url -> open developer tool in chrome.
soup = BeautifulSoup(response.text, "html.parser")
articles = soup.find_all("article")
# print(articles)
print("-------------------csv DicWriter---------------------")
with open("test331blogDict.csv", "w") as file:
headers = ["title", "link", "date"]
csv_dictwriter = DictWriter(file, fieldnames=headers)
csv_dictwriter.writeheader()
for article in articles:
#get_text: access the inner text in an element("a")
# print(article.find("a").get_text()) #anchor tag -> convert to text
a_tag = article.find("a")
title = a_tag.get_text() #anchor tag -> convert to text
url = a_tag['href']
# print(article.find("time")) #<time datetime="2019-10-22" pubdate=""></time>
time = article.find("time")
date = time['datetime']
# print(date) #2019-09-03
# print(title, url, date)
# csv_writer.writerow(title, url, date) #TypeError: writerow() takes exactly one argument (3 given)
csv_dictwriter.writerow({
"title" : title,
"link" : url,
"date" : date
})
|
[
"shinebytheriver@yahoo.com"
] |
shinebytheriver@yahoo.com
|
bc43583f980dc0e9c3943616e02cb5acb73ba03c
|
2695d586778c3a19cad843f14f505f3e534f470d
|
/practice/Dynamic_Programming/Sticker_9465.py
|
cac0d3e543a6e3506d7453dc877d19c9cfa72409
|
[] |
no_license
|
kjh03160/Algorithm_Basic
|
efdb2473186d0aff983a8c0f961d6b86ce66b0d1
|
24842569237db95629cec38ca9ea8e459857c77e
|
refs/heads/master
| 2023-07-14T21:34:29.074373
| 2021-09-11T10:13:00
| 2021-09-11T10:13:00
| 276,395,958
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
# https://www.acmicpc.net/problem/9465
def answer(L):
DP = [[0, 0, 0] for _ in range(len(L[0]))]
DP[0] = [L[0][0], L[1][0], 0]
for i in range(1, len(L[0])):
DP[i][0] = max(DP[i - 1][1], DP[i - 1][2]) + L[0][i] # 현재 위를 선택할 때
DP[i][1] = max(DP[i - 1][0], DP[i - 1][2]) + L[1][i] # 현재 아래를 선택할 때
DP[i][2] = max(DP[i - 1]) # 아무것도 선택 안할 때
return max(DP[-1])
import sys
input = sys.stdin.readline
t = int(input())
T = []
for i in range(t):
x = []
n = int(input())
x.append(list(map(int, input().split())))
x.append(list(map(int, input().split())))
T.append(x)
# print(x)
for i in T:
print(answer(i))
|
[
"kis03160@likelion.org"
] |
kis03160@likelion.org
|
deae57d8466c67c0588f984d9edd9a77a8bac904
|
ed38a50a81aeb206e7c735971bb874eb481e2e82
|
/2A_2/python/funkcje03.py
|
a39d5694b51d6b1c312abac8c1751b4b94a38b3a
|
[] |
no_license
|
lo1cgsan/rok202021
|
e70db45494d891f179c08ddf3ef1ac55de25e76f
|
07af7ea54d61d03f851de675744bada9083ecdca
|
refs/heads/main
| 2023-06-01T03:38:20.534102
| 2021-06-21T12:21:42
| 2021-06-21T12:21:42
| 347,921,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# funkcje02.py
#
def zwieksz(a):
a += 2 # powiększenie wartości o dwa, tj: a = a + 2
print(a)
def zwieksz2(b):
b[0] += 2
print(b)
def main(args):
a = int(input("Podaj liczbę: ")) # zmienna lokalna
print(a)
zwieksz(a)
print(a)
b = [1]; # lista 1-elementowa
b[0] = int(input("Podaj liczbę: "))
zwieksz2(b)
print(b)
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
[
"lo1cgsan@gmail.com"
] |
lo1cgsan@gmail.com
|
3deacc17b483fd79573c192526fc20b8ae69b30f
|
be2a81f03e8a2dac7d356dde7a3ffdcfe3f77e00
|
/providers/com/biomedcentral/migrations/0002_favicon.py
|
f3d7c663a9a58c872689d4481f4d3d62cbe13f76
|
[
"Apache-2.0"
] |
permissive
|
Stevenholloway/SHARE
|
4193bbd3ca50765a24bf21c0cc14438175fbb678
|
b9759106d12c2ff548bad22c4be8650e9f41e61e
|
refs/heads/develop
| 2021-01-21T19:13:35.205983
| 2017-02-23T14:45:46
| 2017-02-23T14:45:46
| 63,431,390
| 0
| 0
| null | 2016-07-15T15:17:45
| 2016-07-15T15:17:44
| null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-02-10 14:32
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('com.biomedcentral', '0001_initial'),
('share', '0018_store_favicons'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotFaviconMigration('com.biomedcentral'),
),
]
|
[
"chriskseto@gmail.com"
] |
chriskseto@gmail.com
|
81d64fbe8e61f3bfd56fd9fe45446ed82ad92b0e
|
3ee5f3f013cbb6ab8620c973c191ccc5e5d47aec
|
/nps_examples_py/setup.py
|
e7d39e367baae191821864d00af970b90237b4ce
|
[] |
no_license
|
nps-ros2/nps-ros2-examples
|
96837de908c7c76089f1eda6c10fb28c23452fdf
|
557244746b370f04288a7de74b4b12991cf331e0
|
refs/heads/master
| 2020-05-07T20:51:12.577750
| 2019-09-10T17:43:42
| 2019-09-10T17:43:42
| 180,880,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
from setuptools import find_packages
from setuptools import setup
package_name = 'nps_examples_py'
setup(
name=package_name,
version='0.6.2',
packages=find_packages(exclude=['test']),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
author='your name',
author_email='you@yours.com',
maintainer='your name',
maintainer_email='you@yours.com',
keywords=['ROS'],
classifiers=[
'Programming Language :: Python'
],
description=(
'Adapted from ROS2 demos.'
),
license='your license',
entry_points={
'console_scripts': [
'listener = nps_examples_py.topics.listener:main',
'talker = nps_examples_py.topics.talker:main'
],
},
)
|
[
"bdallen@nps.edu"
] |
bdallen@nps.edu
|
621c24156116cefc947d5504a6bd59729de62186
|
5d74051293a4740c597abb016870a56a58cecf5b
|
/modules/persons/application/controllers/v1/delete_user_api.py
|
76a6e1ba2a6489b962b624f13f7d278ed3e95ec2
|
[
"BSD-3-Clause"
] |
permissive
|
eduardolujan/hexagonal_architecture_django
|
98e707148745f5a36f166c0584cfba21cca473f0
|
8055927cb460bc40f3a2651c01a9d1da696177e8
|
refs/heads/develop
| 2023-02-21T22:46:20.614779
| 2021-01-16T02:48:37
| 2021-01-16T02:48:37
| 305,813,872
| 5
| 2
|
BSD-3-Clause
| 2021-01-16T18:00:26
| 2020-10-20T19:32:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,043
|
py
|
# -*- coding: utf-8 -*-
from modules.shared.infrastructure.log import LoggerDecorator, PyLoggerService
from modules.shared.domain.http import status as http_status
from modules.shared.domain.requests import Request
from modules.shared.domain.responses import Response
from modules.shared.domain.serializers import SerializerManager
from modules.users.domain.repository import UserRepository
from modules.users.application.delete import UserDeleter as DeleteUserService
@LoggerDecorator(logger=PyLoggerService(file_path=__file__))
class DeleteUserApi:
"""
Delete User API
"""
def __init__(self,
request: Request,
response: Response,
request_serializer_manager: SerializerManager,
user_repository: UserRepository):
# Http objects
self.__request = request
self.__response = response
self.__request_serializer_manager = request_serializer_manager
# Delete user
self.__user_repository = user_repository
def __call__(self, id: int) -> None:
"""
Delete user by id
@param id: user id
@type id: int
"""
try:
delete_user_data = dict(id=id)
delete_user_dto = self.__request_serializer_manager.get_dto_from_dict(delete_user_data)
delete_user = DeleteUserService(self.__user_repository)
delete_user(**delete_user_dto)
response_data = dict(
success=True,
message='All ok',
)
return self.__response(response_data, status=http_status.HTTP_200_OK)
except Exception as err:
self.log.exception(f"Error in {__class__}::post, err:{err}")
response_data = dict(
success=False,
message=f"{err}"
)
if hasattr(err, 'errors'):
response_data.update(errors=err.errors)
return self.__response(response_data, status=http_status.HTTP_400_BAD_REQUEST)
|
[
"eduardo.lujan.p@gmail.com"
] |
eduardo.lujan.p@gmail.com
|
d5b778e30438fb5003e9ab8f5be37d0e342c02cc
|
380712a4d3436b5997cebdaf2d6bdd5227ffef99
|
/06_using_classess.py
|
d142dc0ceb8bdaa3387069624f66825710ea5553
|
[] |
no_license
|
rifqirosyidi/tkinter-basic
|
76b80095a0063a5e184fa12a1fb9193f3ea91fb6
|
75d9ae83af4b555335b95ac177bdd361529550ed
|
refs/heads/master
| 2020-08-30T08:52:43.162243
| 2019-11-01T13:32:50
| 2019-11-01T13:32:50
| 218,325,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
from tkinter import *
class SomeClass:
def __init__(self, master):
self.frame = Frame(master)
self.frame.pack()
self.print_button = Button(self.frame, text="Print Me", command=self.print_message)
self.print_button.pack(side=LEFT)
self.close_button = Button(self.frame, text="Close Me", command=self.frame.quit)
self.close_button.pack(side=LEFT)
def print_message(self):
print("Print Me, Hi You Click Print Me")
root = Tk()
b = SomeClass(root)
root.mainloop()
|
[
"rief.rosyidi@gmail.com"
] |
rief.rosyidi@gmail.com
|
ed7e12c276248207dcadefe405fbe058b20652dd
|
258e47d8e55db0fb12437aa1e7f9860a8bef6623
|
/agilex/configuracion_agilex/doctype/tipo_de_documento/tipo_de_documento_dashboard.py
|
6f3e46469731e8b52ecba7c28fb32c310398f215
|
[
"MIT"
] |
permissive
|
Nirchains/agilex
|
003894bed211c71004f37beb22fd96fc1df6576f
|
04470873abdea5d0023a1ccadf02a932fb3e834b
|
refs/heads/master
| 2021-06-12T11:23:48.027599
| 2021-05-28T21:48:00
| 2021-05-28T21:48:00
| 166,990,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
from frappe import _
def get_data():
return {
'heatmap': False,
'heatmap_message': _('This is based on the Time Sheets created against this project'),
'fieldname': 'tipo_de_documento',
'transactions': [
{
'label': _('Ver expedientes'),
'items': ['Expediente']
}
]
}
|
[
"nirchains@gmail.com"
] |
nirchains@gmail.com
|
51fbd3042c1ab812d5c5f8d4532d7226469186bd
|
d0d845cc5c77ec62cb5f5268527efadc5ff68e12
|
/tests/linsys_test.py
|
01b8acbc61b5dff2c53bf5ee4ce03f50f6297486
|
[
"MIT"
] |
permissive
|
madhavajay/ud953
|
2134a267ccf15ff95d717b9d76633bfd83ea5e40
|
6c101ae15adefa98ad4950275b52ef03419a0f40
|
refs/heads/master
| 2021-01-21T04:44:51.628018
| 2016-06-18T08:58:20
| 2016-06-18T08:58:20
| 50,235,584
| 2
| 6
| null | 2016-06-18T09:02:22
| 2016-01-23T11:29:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,192
|
py
|
# -*- coding: utf-8 -*-
# Author: github.com/madhavajay
"""This is a test for the Linear System Class"""
from decimal import Decimal, getcontext
from vector import Vector
from line import Line
from plane import Plane
from linsys import LinearSystem
# set the decimal precision
getcontext().prec = 30
def test_linsys_basepoint():
"""Test Linear System Base Point"""
plane_1 = Plane(Vector([1, 1, 1]), 1)
plane_2 = Plane(Vector([0, 1, 0]), 2)
plane_3 = Plane(Vector([1, 1, -1]), 3)
plane_4 = Plane(Vector([1, 0, -2]), 2)
system = LinearSystem([plane_1, plane_2, plane_3, plane_4])
system[0] = plane_1
vector1 = Vector([1, 2])
constant = 2
answer = Vector([2, 0])
line = Line(vector1, constant)
basepoint = line.basepoint
assert basepoint == answer
def test_linsys_swap_row():
"""Test Linear System Swap Row"""
plane_1 = Plane(Vector([1, 1, 1]), 1)
plane_2 = Plane(Vector([0, 1, 0]), 2)
plane_3 = Plane(Vector([1, 1, -1]), 3)
plane_4 = Plane(Vector([1, 0, -2]), 2)
lin_sys = LinearSystem([plane_1, plane_2, plane_3, plane_4])
lin_sys.swap_rows(0, 1)
assert lin_sys[0] == plane_2 # swapped
assert lin_sys[1] == plane_1 # swapped
assert lin_sys[2] == plane_3
assert lin_sys[3] == plane_4
lin_sys.swap_rows(1, 3)
assert lin_sys[0] == plane_2
assert lin_sys[1] == plane_4 # swapped
assert lin_sys[2] == plane_3
assert lin_sys[3] == plane_1 # swapped
lin_sys.swap_rows(3, 1)
assert lin_sys[0] == plane_2
assert lin_sys[1] == plane_1 # swapped
assert lin_sys[2] == plane_3
assert lin_sys[3] == plane_4 # swapped
def test_linsys_multiply_row():
"""Test Linear System Multiply Coefficient and Row"""
plane_1 = Plane(Vector([1, 1, 1]), 1)
plane_2 = Plane(Vector([0, 1, 0]), 2)
plane_3 = Plane(Vector([1, 1, -1]), 3)
plane_4 = Plane(Vector([1, 0, -2]), 2)
# same as the end of the last test
lin_sys = LinearSystem([plane_2, plane_1, plane_3, plane_4])
lin_sys.multiply_coefficient_and_row(1, 0)
assert lin_sys[0] == plane_2
assert lin_sys[1] == plane_1
assert lin_sys[2] == plane_3
assert lin_sys[3] == plane_4
lin_sys.multiply_coefficient_and_row(-1, 2)
new_plane_3 = Plane(Vector([-1, -1, 1]), -3)
assert lin_sys[0] == plane_2
assert lin_sys[1] == plane_1
assert lin_sys[2] == new_plane_3
assert lin_sys[3] == plane_4
lin_sys.multiply_coefficient_and_row(10, 1)
new_plane_1 = Plane(Vector([10, 10, 10]), 10)
assert lin_sys[0] == plane_2
assert lin_sys[1] == new_plane_1
assert lin_sys[2] == new_plane_3
assert lin_sys[3] == plane_4
def test_linsys_multiply_row_add():
"""Test Linear System Multiply Times Row and add to Row"""
plane_2 = Plane(Vector([0, 1, 0]), 2)
new_plane_1 = Plane(Vector([10, 10, 10]), 10)
new_plane_3 = Plane(Vector([-1, -1, 1]), -3)
plane_4 = Plane(Vector([1, 0, -2]), 2)
# same as the end of the last test
lin_sys = LinearSystem([plane_2, new_plane_1, new_plane_3, plane_4])
# multiply the first row by 0 and add to the second row
# this should have no affect
lin_sys.add_multiple_times_row_to_row(0, 0, 1)
assert lin_sys[0] == plane_2
assert lin_sys[1] == new_plane_1
assert lin_sys[2] == new_plane_3
assert lin_sys[3] == plane_4
# multiply the first row by 1 and add it to the second row
lin_sys.add_multiple_times_row_to_row(1, 0, 1)
plane_1_added = Plane(Vector([10, 11, 10]), 12)
assert lin_sys[0] == plane_2
assert lin_sys[1] == plane_1_added
assert lin_sys[2] == new_plane_3
assert lin_sys[3] == plane_4
# multiply the second row by -1 and add to the first row
lin_sys.add_multiple_times_row_to_row(-1, 1, 0)
plane_2_subtracted = Plane(Vector([-10, -10, -10]), -10)
assert lin_sys[0] == plane_2_subtracted
assert lin_sys[1] == plane_1_added
assert lin_sys[2] == new_plane_3
assert lin_sys[3] == plane_4
def test_triangular_form():
"""Test for Triangular Form"""
plane_1 = Plane(Vector([0, 1, 1]), 1)
plane_2 = Plane(Vector([1, -1, 1]), 2)
plane_3 = Plane(Vector([1, 2, -5]), 3)
lin_sys = LinearSystem([plane_1, plane_2, plane_3])
triangular = lin_sys.compute_triangular_form()
assert triangular[0] == Plane(Vector([1, -1, 1]), 2)
assert triangular[1] == Plane(Vector([0, 1, 1]), 1)
assert triangular[2] == Plane(Vector([0, 0, -9]), -2)
def test_rref_form():
"""Test for RREF Reduced Row Echelon Form"""
plane_1 = Plane(Vector([0, 1, 1]), 1)
plane_2 = Plane(Vector([1, -1, 1]), 2)
plane_3 = Plane(Vector([1, 2, -5]), 3)
lin_sys = LinearSystem([plane_1, plane_2, plane_3])
rref = lin_sys.compute_rref_form()
assert rref[0] == Plane(Vector([1, 0, 0]), Decimal(23) / Decimal(9))
assert rref[1] == Plane(Vector([0, 1, 0]), Decimal(7) / Decimal(9))
assert rref[2] == Plane(Vector([0, 0, 1]), Decimal(2) / Decimal(9))
def test_no_consistent_solutions():
"""Test the system has no solutions"""
plane_1 = Plane(Vector([1, 1, -1]), 2)
plane_2 = Plane(Vector([2, 3, -1]), 0)
plane_3 = Plane(Vector([3, 4, -2]), 1)
lin_sys_1 = LinearSystem([plane_1, plane_2, plane_3])
solutions_1 = lin_sys_1.system_solutions()
assert solutions_1 == 'system has no consistent solutions'
def test_infinite_solutions():
"""Test the system has infinite solutions"""
plane_4 = Plane(Vector([1, 1, 1]), 3)
plane_5 = Plane(Vector([2, 4, 1]), 8)
plane_6 = Plane(Vector([6, 10, 4]), 22)
lin_sys_2 = LinearSystem([plane_4, plane_5, plane_6])
solutions_2 = lin_sys_2.system_solutions()
assert solutions_2 == 'system has infinite solutions'
def test_single_solution():
"""Test the system has a single solution"""
plane_7 = Plane(Vector([1, 1, 1]), 1)
plane_8 = Plane(Vector([0, 1, 0]), 2)
plane_9 = Plane(Vector([1, 1, -1]), 3)
plane_10 = Plane(Vector([1, 0, -2]), 2)
lin_sys_3 = LinearSystem([plane_7, plane_8, plane_9, plane_10])
solutions_3 = lin_sys_3.system_solutions()
assert solutions_3 == 'solution is: a = 0.000, b = 2.000, c = -1.000'
|
[
"me@madhavajay.com"
] |
me@madhavajay.com
|
41a53bbfa73c42d13714aa95f8a6f780a4bd9f0f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/299/66464/submittedfiles/testes.py
|
83cd0f4164df46ab44cfb4ae691bbced548efa8a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
# -*- coding: utf-8 -*-
print("Nikolas Sivini Borges Galvão")
print("20")
print(11+1037)
print((9*35+160)/5)
print(3.14159*5**2*3)
print((2+5)**2)
#add
x=0
y=2
while x<100:
x=x+y
print(x)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
48a03867f4833bc8161bc39682ab3974887a8612
|
d0fe1112743cc36b2089b695fb7c527a3b8bb9f7
|
/LifeCycleAnalyzer/Simulators/__init__.py
|
da272a632315bffa45e44941c0af211cb81b23f6
|
[
"MIT"
] |
permissive
|
vd1371/GIAMS
|
bfff465c69f02a5dd1a2544bfe7170087a8e181d
|
cf8b7cb028b6cc6cd7facd6f45dd288067e9ff65
|
refs/heads/master
| 2023-04-14T07:09:00.801898
| 2022-08-01T01:16:06
| 2022-08-01T01:16:06
| 278,012,609
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
from .MainSimulator import MainSimulator
from .DummyRiskAnalyzer import DummyRiskAnalyzer
from .EnvSimulator import EnvSimulator
|
[
"vd1371@gmail.com"
] |
vd1371@gmail.com
|
f421a6af40ae5171cceff2d4962cb7c99889310d
|
fe87192240c3d5ffe7deb5c9f2b7f02f347a2c00
|
/peptide-permable/analyze_result.py
|
69ef531b3d8aa7f477fdaf44fe9133b385513008
|
[] |
no_license
|
leexa90/dl_dev_course
|
ccfae0bbef4790b0b75fc9da0679f23c1da3bcf5
|
10a9e826cd7e752ce607deadc63826b313de39d2
|
refs/heads/master
| 2022-08-17T05:07:35.280305
| 2017-12-07T09:04:14
| 2017-12-07T09:04:14
| 105,847,852
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,368
|
py
|
import pandas as pd
import os
files = sorted([x for x in os.listdir('results') if ('.csv' in x and 'results5' in x)])
dict_files = {}
data = pd.read_csv('results/'+files[0])
data['diff'] = 0
dict_files[0] = files[0]
counter =1
for i in files[1:]:
print i
counter += 1
dict_files[counter] = i
temp = pd.read_csv('results/'+i)
temp['diff'] = counter
data = pd.concat([data,temp])
import numpy as np
import matplotlib.pyplot as plt
size= np.log10(data.prob)
plt.hist(size,bins=100)
dictt_inv = {0: 'A', 1: 'C', 2: 'E', 3: 'D', 4: 'G', 5: 'F', 6: 'I', 7: 'H', 8: 'K', 9: 'M',
10: 'L', 11: 'N', 12: 'Q', 13: 'P', 14: 'S', 15: 'R', 16: 'T', 17: 'W', 18: 'V', 19: 'Y'}
def string(arr):
result = ''
for i in arr:
result += dictt_inv[i]
return result
p53_seq='ETFSDLWKLLPEN'
p53_seq_vec = np.array([2., 16., 5., 14., 3., 10., 17., 8., 10., 10., 13., 2., 11.])
data['var'] = map(np.std, np.array(data[['fold' +str(x) for x in range(0,60)]]))
data['var'] = data['var']/(59**.5)
best = data.sort_values('prob')[list(data.keys()[0:13])+['diff','prob']].reset_index(drop=True)
def get_diff(x):
return np.argsort(p53_seq_vec != x[[str(y) for y in range(13)]].values)[-3:]
for i in range(1,10):
print p53_seq
#print best.iloc[-i][range(0,13)].values, best.iloc[-i].prob,'\n'
#print np.argsort(p53_seq_vec != best.iloc[-i][range(0,13)].values)[-3:],'\n'
print string(best.iloc[-i][range(0,13)].values), best.iloc[-i].prob,'\n'
#best['prob'] = np.log10(best['prob']+0.01)
for diff in pd.unique(data[data['prob']-data['var'] >= 0.60]['diff']):
above_30 = data[data['prob']-data['var'] >= 0.60]
above_30 = above_30[above_30['diff']== diff]
score = np.zeros((13,20))
float_formatter = lambda x: "%.3f" % x
np.set_printoptions(formatter={'float_kind':float_formatter})
for aa in range(0,20):
for pos in range(0,13):
score[pos,aa] = np.sum(above_30[above_30[str(pos)] == aa].prob)/np.sum(above_30.prob)
import matplotlib as mpl
from matplotlib.text import TextPath
from matplotlib.patches import PathPatch
from matplotlib.font_manager import FontProperties
fp = FontProperties(family="monospace", weight="bold")
globscale = 1.35
LETTERS = {
"A" : TextPath((-0.35, 0), "A", size=1, prop=fp),
"C" : TextPath((-0.35, 0), "C", size=1, prop=fp),
"E" : TextPath((-0.35, 0), "E", size=1, prop=fp),
"D" : TextPath((-0.35, 0), "D", size=1, prop=fp) ,
"G" : TextPath((-0.35, 0), "G", size=1, prop=fp),
"F" : TextPath((-0.35, 0), "F", size=1, prop=fp),
"I" : TextPath((-0.35, 0), "I", size=1, prop=fp),
"H" : TextPath((-0.35, 0), "H", size=1, prop=fp) ,
"K" : TextPath((-0.35, 0), "K", size=1, prop=fp),
"M" : TextPath((-0.35, 0), "M", size=1, prop=fp),
"L" : TextPath((-0.35, 0.003), "L", size=1, prop=fp),
"N" : TextPath((-0.35, 0), "N", size=1, prop=fp) ,
"Q" : TextPath((-0.35, 0.01), "Q", size=1, prop=fp),
"P" : TextPath((-0.35, 0), "P", size=1, prop=fp),
"S" : TextPath((-0.35, 0.01), "S", size=1, prop=fp),
"R" : TextPath((-0.35, 0), "R", size=1, prop=fp),
"T" : TextPath((-0.35, 0), "T", size=1, prop=fp),
"W" : TextPath((-0.35, 0), "W", size=1, prop=fp),
"V" : TextPath((-0.35, 0), "V", size=1, prop=fp),
"Y" : TextPath((-0.35, 0), "Y", size=1, prop=fp) }
COLOR_SCHEME = {'A': 'grey', 'C': 'lightBlue', 'E': 'red', 'D': 'red',
'G': 'grey', 'F': 'green', 'I': 'grey', 'H': 'blue', 'K': 'blue',
'M': 'grey', 'L': 'grey', 'N': 'lightBlue', 'Q': 'lightBlue', 'P': 'orange',
'S': 'lightBlue', 'R': 'blue', 'T': 'lightBlue', 'W': 'green', 'V': 'grey',
'Y': 'green'}
def letterAt(letter, x, y, yscale=1, ax=None):
text = LETTERS[letter]
t = mpl.transforms.Affine2D().scale(1*globscale, yscale*globscale) + \
mpl.transforms.Affine2D().translate(x,y) + ax.transData
p = PathPatch(text, lw=0, fc=COLOR_SCHEME[letter], transform=t)
if ax != None:
ax.add_artist(p)
return p
def plot(thres=0.05,name='temp'):
fig, ax = plt.subplots(figsize=(10,8))
for i in range(0,13):
y = 0
for aa in np.argsort(score[i,:]):#for aa in range(0,20)[::-1]:
temp_score = score[i,aa]
if temp_score >= thres:
letter = dictt_inv[aa]
a=letterAt(letter,i+1,y,temp_score,ax)
y += temp_score
plt.xlim((0,14))
plt.ylim((-0.1,1))
plt.title(dict_files[diff]+',num samples:'+str(len(above_30)))
plt.xlabel('peptide position')
plt.ylabel('probabilities')
plt.tight_layout()
plt.xticks(range(1,14),['E1', 'T2', 'F3', 'S4', 'D5', 'L6', 'W7', 'K8', 'L9', 'L10', 'P11', 'E12', 'N13'])
for i in range(0,13):
a=letterAt(p53_seq[i],i+1,-0.1,0.09,ax)
plt.plot((0,14),(0,0),color='black',linewidth='5')
plt.savefig(name+'.png',dpi=300)
#plt.show()
plt.close()
for i in (5,):
plot(i*1.0/100,'Fig_60percent%s_thres%s_var'%(diff,i))
|
[
"lee.x.a90@gmail.com"
] |
lee.x.a90@gmail.com
|
98adc2f86d297b8a79c2eb2c4ad0528d7e435bc9
|
a056e699bb03614563dc9090c4c3bc65479fc2d9
|
/buffered_normal.py
|
052155b79cc21ef651f9092a14a4e1be4c1a18a0
|
[
"BSD-3-Clause"
] |
permissive
|
iandees/marblecutter
|
278890faaf7a4d7b604bf2520aff8adb3c5d1b95
|
779b9c597bbd69ca3044f2c246721dc4eeeef61d
|
refs/heads/mapzen
| 2021-01-25T04:36:13.210028
| 2017-06-15T19:35:53
| 2017-06-15T19:35:53
| 93,455,208
| 0
| 0
| null | 2017-06-05T23:13:17
| 2017-06-05T23:13:17
| null |
UTF-8
|
Python
| false
| false
| 1,243
|
py
|
# noqa
# coding=utf-8
from __future__ import division
import logging
from StringIO import StringIO
import numpy as np
from PIL import Image
from normal import render_normal
LOG = logging.getLogger(__name__)
BUFFER = 4
COLLAR = 2
CONTENT_TYPE = 'image/png'
EXT = 'png'
NAME = 'Buffered Normal'
def render(tile, (data, buffers)): # noqa
buffers = map(lambda x: max(0, x - COLLAR), buffers)
data = data[0][buffers[3]:data.shape[1] - buffers[1],
buffers[0]:data.shape[2] - buffers[2]]
if buffers[0] == 0:
# empty left
cols = data[:, :COLLAR]
data = np.hstack((cols, data))
pass
if buffers[2] == 0:
# empty right
cols = data[:, -COLLAR:]
data = np.hstack((data, cols))
pass
if buffers[3] == 0:
# empty top buffer; repeat
rows = data[:COLLAR]
data = np.vstack((rows, data))
buffers[3] = COLLAR
if buffers[1] == 0:
# empty bottom buffer; repeat
data = np.vstack((data, rows))
buffers[1] = COLLAR
imgarr = render_normal(tile, data, buffers)
out = StringIO()
im = Image.fromarray(imgarr, 'RGBA')
im.save(out, 'png')
return (CONTENT_TYPE, out.getvalue())
|
[
"seth@mojodna.net"
] |
seth@mojodna.net
|
855f7cf66e2f45a2fe4d5bc6c25db3575a14ec1d
|
a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea
|
/airflow/providers/amazon/aws/example_dags/example_redshift_to_s3.py
|
8116e02dc165ce82f017a21ede850dece6254ec9
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ishiis/airflow
|
4305794e36b611d01f49e3f2401be3dc49782670
|
292440d54f4db84aaf0c5a98cf5fcf34303f2fa8
|
refs/heads/master
| 2022-07-30T00:51:28.806940
| 2022-07-14T12:07:11
| 2022-07-14T12:07:11
| 209,801,072
| 1
| 0
|
Apache-2.0
| 2019-09-20T13:47:26
| 2019-09-20T13:47:26
| null |
UTF-8
|
Python
| false
| false
| 1,575
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from os import getenv
from airflow import DAG
from airflow.providers.amazon.aws.transfers.redshift_to_s3 import RedshiftToS3Operator
S3_BUCKET_NAME = getenv("S3_BUCKET_NAME", "s3_bucket_name")
S3_KEY = getenv("S3_KEY", "s3_key")
REDSHIFT_TABLE = getenv("REDSHIFT_TABLE", "redshift_table")
with DAG(
dag_id="example_redshift_to_s3",
start_date=datetime(2021, 1, 1),
schedule_interval=None,
catchup=False,
tags=['example'],
) as dag:
# [START howto_transfer_redshift_to_s3]
task_transfer_redshift_to_s3 = RedshiftToS3Operator(
task_id='transfer_redshift_to_s3',
s3_bucket=S3_BUCKET_NAME,
s3_key=S3_KEY,
schema='PUBLIC',
table=REDSHIFT_TABLE,
)
# [END howto_transfer_redshift_to_s3]
|
[
"noreply@github.com"
] |
ishiis.noreply@github.com
|
469e579b0a396a30e46ed93bc267b76bed2218c9
|
b088d5dc4321f9f145c7bceb20a0b9479b374c65
|
/level1&2/42883.py
|
8169482655042d081bd9380cf7217e0935b0e85c
|
[] |
no_license
|
heojungeun/codingtestPractice
|
55bfc2b13791f5cb3133b0815991a0c696f8482c
|
65d668bf6df82967f89d4ec4eb3a1e11de603729
|
refs/heads/master
| 2022-09-17T00:34:05.887237
| 2020-05-30T06:45:30
| 2020-05-30T06:45:30
| 261,093,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,404
|
py
|
def solution(number, k):
# import itertools
# dig = []
# for i in range(0,len(number)):
# dig.append(i)
# dig = list(itertools.combinations(dig,k))
# lenn = len(number)
# arr = []
# for x in dig:
# tmp = ''
# for i in range(lenn):
# if i in x:
# continue
# tmp += number[i]
# arr.append(int(tmp))
# answer = str(max(arr))
st = []
for x in number:
if k==0 or not st:
st.append(x)
else:
if st[-1] < x:
tmp = reversed(st)
for e in tmp:
if e < x:
st.pop()
k -= 1
if k==0 or not st:
st.append(x)
break
else:
st.append(x)
break
else:
st.append(x)
while k > 0:
st.pop()
k -= 1
answer = "".join(st)
return answer
def standardsolution(number,k):
st = []
for i, num in enumerate(number):
while st and k>0 and st[-1]<num:
st.pop()
k -= 1
if k==0:
st += number[i:]
break
st.append(num)
st = st[:-k] if k>0 else st
return "".join(st)
n = "12"
nk = 1
print(solution(n,nk))
|
[
"heocube@naver.com"
] |
heocube@naver.com
|
33864e4c4e10988ab56cdf4ba1f15fbbd344f0e0
|
d2f50124ff3bec70b9b3139ecb063b06e526781d
|
/despachos_mercancias/migrations/0012_auto_20170113_1639.py
|
17429c15b3153edf6a6c8081aaad3e0199999d20
|
[] |
no_license
|
odecsarrollo/odecopack-componentes
|
e8d993f089bf53bbf3c53d1265e70ac5c06b59b8
|
b583a115fb30205d358d97644c38d66636b573ff
|
refs/heads/master
| 2022-12-12T00:33:02.874268
| 2020-08-13T18:45:01
| 2020-08-13T18:45:01
| 189,262,705
| 0
| 0
| null | 2022-12-08T11:23:46
| 2019-05-29T16:37:21
|
Python
|
UTF-8
|
Python
| false
| false
| 618
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-13 21:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('despachos_mercancias', '0011_enviotransportadoratcc_ciudad'),
]
operations = [
migrations.AlterField(
model_name='enviotransportadoratcc',
name='ciudad',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='mis_envios_tcc', to='geografia_colombia.Ciudad'),
),
]
|
[
"fabio.garcia.sanchez@gmail.com"
] |
fabio.garcia.sanchez@gmail.com
|
6d9ad99200ba4e3cdf7b88a7da2787de0df12c8b
|
afde521f50b6be4be9e5c3071ed6459419fb5edb
|
/env/lib/python3.6/site-packages/pyecharts/charts/scatter3D.py
|
35198ac9849fd58ec18641bff9956994438195d7
|
[] |
no_license
|
guhongcheng/myblog
|
ddef4aa0888dedfb70933b34bfd0c5da5bb5d5cd
|
b11f5ee26125b9551b1f27814b96a845dd4e6a76
|
refs/heads/master
| 2022-12-18T20:26:46.596014
| 2018-07-26T02:46:07
| 2018-07-26T02:46:07
| 134,683,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
# coding=utf-8
from pyecharts.chart import Chart
import pyecharts.constants as constants
class Scatter3D(Chart):
"""
<<< 3D 散点图 >>>
"""
def __init__(self, title="", subtitle="", **kwargs):
kwargs["renderer"] = constants.CANVAS_RENDERER
super(Scatter3D, self).__init__(title, subtitle, **kwargs)
self._js_dependencies.add("echartsgl")
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
def __add(self, name, data, grid3d_opacity=1, **kwargs):
"""
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选
:param data:
数据项,数据中,每一行是一个『数据项』,每一列属于一个『维度』
:param grid3d_opacity:
3D 笛卡尔坐标系组的透明度(点的透明度),默认为 1,完全不透明。
:param kwargs:
"""
kwargs.update(
xaxis3d_type="value", yaxis3d_type="value", zaxis3d_type="value"
)
chart = self._get_all_options(**kwargs)
self._option.get("legend")[0].get("data").append(name)
self._option.update(
xAxis3D=chart["xaxis3D"],
yAxis3D=chart["yaxis3D"],
zAxis3D=chart["zaxis3D"],
grid3D=chart["grid3D"],
)
self._option.get("series").append(
{
"type": "scatter3D",
"name": name,
"data": data,
"label": chart["label"],
"itemStyle": {"opacity": grid3d_opacity},
}
)
self._config_components(**kwargs)
|
[
"1051712303@qq.com"
] |
1051712303@qq.com
|
dba87a9b580d39b7e8694daed7b9a5cb06a8db56
|
998a180e5c974d89c9ad33532d4fd33298c806a4
|
/chapter1_arrays_and_strings/palindrome_permutation_1_4.py
|
9fff45e7b07a41cdbf04a5422ddc172fcfa0d501
|
[] |
no_license
|
Taycode/cracking-the-coding-interview-solutions
|
c542a047a37b5af406469ba3f912b4bbdc326b05
|
0c2dcc4d4558dc4766b5ddcce470a60986eb39a6
|
refs/heads/master
| 2023-02-08T16:31:59.683541
| 2020-12-27T16:59:12
| 2020-12-27T16:59:12
| 324,807,557
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
"""
Given a string, write a function to check if it is a permutation of a palin
drome. A palindrome is a word or phrase that is the same forwards and backwards. A permutation
is a rearrangement of letters. The palindrome does not need to be limited to just dictionary words.
EXAMPLE
Input: Tact Coa
Output: True (permutations: "taco cat", "atco eta", etc.)
"""
def palindrome_permutation(string):
"""
:param string: string
:return: boolean
"""
the_dict = {}
string = string.replace(' ', '')
string = string.lower()
for _ in string:
if _ not in the_dict.keys():
the_dict[_] = 1
else:
the_dict[_] += 1
values = list(the_dict.values())
length = len(string)
if length % 2 == 0:
for _ in values:
if _ % 2 != 0:
return False
else:
return True
else:
count = 0
for _ in values:
if _ % 2 != 0:
count += 1
if count > 1:
return False
else:
return True
print(palindrome_permutation('Tact Coa'))
|
[
"tay2druh@gmail.com"
] |
tay2druh@gmail.com
|
ce37c43e76430750154401851a00fca84140d317
|
abd9537f8b90a990e195ded5f9fafdcc108d2a48
|
/swea/d4/1486/1486_shelf_powerset.py
|
487056a865e5b4c70509b2d17d0851b107ba7e2c
|
[] |
no_license
|
ohdnf/algorithms
|
127171744631406c1d08cc2583aa569a094fa2cd
|
6f286753dab827facc436af4f2130f11dad2d44f
|
refs/heads/master
| 2023-08-09T11:19:56.445351
| 2021-08-31T13:11:46
| 2021-08-31T13:11:46
| 236,180,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
import sys
sys.stdin = open('input.txt')
t = int(input())
for test_case in range(1, t+1):
n, b = map(int, input().split()) # 점원 수, 선반 높이
clerks = list(map(int, input().split()))
# clerks.sort(reverse=True)
heights = list()
for i in range(1<<n):
tmp = 0
for j in range(n+1):
if i & (1<<j):
tmp += clerks[j]
if tmp >= b:
heights.append(tmp - b)
heights.sort()
print('#{} {}'.format(test_case, heights[0]))
|
[
"jupyohong7@gmail.com"
] |
jupyohong7@gmail.com
|
7f2df471b94bb54376e154486267ebd828d91fe3
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_deepens.py
|
5ee1f3387e1e9e28ab6fb75803b9751b7df84712
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
#calss header
class _DEEPENS():
def __init__(self,):
self.name = "DEEPENS"
self.definitions = deepen
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['deepen']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
809cb39be1c498b2dc3381f28cb369e0fa000dd1
|
d404fb72dee51f8c2791bf21cc5d9ee91d2d6a45
|
/ch03_if/0118_grade.py
|
994ed4ab4c6a92af05a7cb316a7605ce19cac7b7
|
[] |
no_license
|
kangwonlee/18pf_welcome_template
|
6c5c997e7aac08d8a7d94d4a146037c2d3b4a813
|
9279559c7cde37a18b8e1d5e596f161087493218
|
refs/heads/master
| 2021-04-12T07:52:29.577562
| 2018-03-18T21:29:28
| 2018-03-18T21:29:28
| 125,769,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
score = int(input("성적을 입력하시오: "))
if score >= 90:
print("학점 A")
elif score >= 80:
print("학점 B")
elif score >= 70:
print("학점 C")
elif score >= 60:
print("학점 D")
else:
print("학점 F")
|
[
"kangwon.lee@kpu.ac.kr"
] |
kangwon.lee@kpu.ac.kr
|
12e01a17bd141b1e82d95006b641e5bb0343d272
|
484c462c29e3c2f8ac280b79c11db6982c6a8ca6
|
/neurolab-0.2.3/neurolab/__init__.py
|
1c9445506114073da6f75ac4c16ca8634f996e27
|
[] |
no_license
|
thelma1944/Python_Stuff
|
b5fa53bf008bb5e865204201b144fe20e7f87565
|
077131a2c9f247396dca86fdf18933d38ae8d501
|
refs/heads/master
| 2021-06-05T12:25:35.779070
| 2020-10-03T18:20:16
| 2020-10-03T18:20:16
| 16,077,931
| 0
| 1
| null | 2021-03-26T00:30:14
| 2014-01-20T17:36:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,754
|
py
|
# -*- coding: utf-8 -*-
"""
Neurolab is a simple and powerful Neural Network Library for Python.
Contains based neural networks, train algorithms and flexible framework
to create and explore other neural network types.
:Features:
- Pure python + numpy
- API like Neural Network Toolbox (NNT) from MATLAB
- Interface to use train algorithms form scipy.optimize
- Flexible network configurations and learning algorithms. You may change: train, error, initializetion and activation functions
- Unlimited number of neural layers and number of neurons in layers
- Variety of supported types of Artificial Neural Network and learning algorithms
:Example:
>>> import numpy as np
>>> import neurolab as nl
>>> # Create train samples
>>> input = np.random.uniform(-0.5, 0.5, (10, 2))
>>> target = (input[:, 0] + input[:, 1]).reshape(10, 1)
>>> # Create network with 2 inputs, 5 neurons in input layer and 1 in output layer
>>> net = nl.net.newff([[-0.5, 0.5], [-0.5, 0.5]], [5, 1])
>>> # Train process
>>> err = net.train(input, target, show=15)
Epoch: 15; Error: 0.150308402918;
Epoch: 30; Error: 0.072265865089;
Epoch: 45; Error: 0.016931355131;
The goal of learning is reached
>>> # Test
>>> net.sim([[0.2, 0.1]]) # 0.2 + 0.1
array([[ 0.28757596]])
:Links:
- `Home Page <http://code.google.com/p/neurolab/>`_
- `PyPI Page <http://pypi.python.org/pypi/neurolab>`_
- `Documentation <http://packages.python.org/neurolab/>`_
- `Examples <http://packages.python.org/neurolab/example.html>`_
"""
import net
from tool import load
__version__ = '0.2.3'
# Development Status :: 1 - Planning, 2 - Pre-Alpha, 3 - Alpha,
# 4 - Beta, 5 - Production/Stable
__status__ = '4 - Beta'
|
[
"thelma1944@gmail.com"
] |
thelma1944@gmail.com
|
14575f006fa799ab2f3698289711bf9ad024a62a
|
86813bf514f3e0257f92207f40a68443f08ee44b
|
/0338 比特位计数/0338 比特位计数.py
|
96ff96f4ec3d4463f1b79f1a90552912d3e21da3
|
[] |
no_license
|
Aurora-yuan/Leetcode_Python3
|
4ce56679b48862c87addc8cd870cdd525c9d926c
|
720bb530850febc2aa67a56a7a0b3a85ab37f415
|
refs/heads/master
| 2021-07-12T13:23:19.399155
| 2020-10-21T03:14:36
| 2020-10-21T03:14:36
| 212,998,500
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
#label: math/dynamic programming difficulty: medium
"""
思路一:
麻瓜思想,每个数转成二进制计数
"""
class Solution:
def countBits(self, num: int) -> List[int]:
res = list()
for i in range(num+1):
res.append(bin(i).count('1'))
return res
“”“
思路二:
《剑指Offer》里提到的结论:如果一个数 i 和 i - 1 做与运算,那么 i 的二进制表示形式中的最右边一个 1 会变成0 。
利用动态规划的思想。
如果我们已经知道了 i & i -1 这个数字的1的个数cnt,那么根据上面的提到的结论, i 这个数字中 1 的个数就是 cnt + 1。
所以不难得到状态转移方程: dp[i] = dp[i & (i - 1)] + 1
”“”
class Solution(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
dp = [0 for i in range(num + 1)]
for i in range(1, num + 1):
dp[i] = dp[i & (i - 1)] + 1
return dp
|
[
"noreply@github.com"
] |
Aurora-yuan.noreply@github.com
|
0eefce36ea159a3ee01e3a8648d44a932052a570
|
679e31fe16e92e1d0bc3448c25845103f19a622f
|
/web_flask/3-python_route.py
|
96ec3a910bab88bd161ba28e53c1573167ff9a05
|
[] |
no_license
|
Gikaro/AirBnB_clone_v2
|
ab7d63ce3e942253ded54d30d68c631eb055308c
|
5744e747f2fdb722d7e6843bd1e4a67abf9c8243
|
refs/heads/master
| 2023-03-20T01:34:49.172584
| 2020-09-02T23:22:39
| 2020-09-02T23:22:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
#!/usr/bin/python3
"""WebFlask module"""
from flask import Flask
from os import environ
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def hello_route():
"""Display Hello HBNB"""
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def hbnb_route():
"""Display HBNB"""
return 'HBNB'
@app.route('/c/<text>', strict_slashes=False)
def c_route(text):
"""Display text"""
real_text = text.replace('_', ' ')
return 'C {}'.format(real_text)
@app.route('/python/', strict_slashes=False)
@app.route('/python/<text>', strict_slashes=False)
def python_route(text='is cool'):
"""Display text"""
real_text = text.replace('_', ' ')
return 'Python {}'.format(real_text)
if __name__ == '__main__':
environ['FLASK_APP'] = __file__
app.run(host='0.0.0.0', port=5000)
|
[
"sebri.issam@gmail.com"
] |
sebri.issam@gmail.com
|
1e80f050379c3620ecae456eef6480ff547b77d4
|
13f5c66af02a64aa8c5d988e9560b82bcf058fd0
|
/learning_sql/views.py
|
8cec81152d26fe1d92d386c49f799cf9269d320b
|
[] |
no_license
|
heitorchang/reading-list
|
a1090b969d0f16cbc7c0e371671e85dca0bde201
|
3dcfd68cb02179e75216ff459fda693ec1fb8684
|
refs/heads/master
| 2023-04-27T03:04:28.122341
| 2023-04-21T14:04:20
| 2023-04-21T14:04:20
| 67,825,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
# p. 249
def create_totals_vw():
cursor = cnx.cursor()
cursor.execute("""
CREATE VIEW customer_totals_vw
(cust_id,
cust_type_cd,
cust_name,
num_accounts,
tot_deposits
)
AS
SELECT cst.cust_id, cst.cust_type_cd,
CASE
WHEN cst.cust_type_cd = 'B' THEN
(SELECT bus.name FROM business AS bus WHERE bus.cust_id = cst.cust_id)
ELSE
(SELECT concat(ind.fname, ' ', ind.lname)
FROM individual AS ind
WHERE ind.cust_id = cst.cust_id)
END AS cust_name,
SUM(CASE WHEN act.status = 'ACTIVE' THEN 1 ELSE 0 END) AS tot_active_accounts,
SUM(CASE WHEN act.status = 'ACTIVE' THEN act.avail_balance ELSE 0 END) AS tot_balance
FROM customer AS cst INNER JOIN account AS act
ON act.cust_id = cst.cust_id
GROUP BY cst.cust_id, cst.cust_type_cd;""")
cursor.close()
def create_totals_tbl():
cursor = cnx.cursor()
# NOTE: creating this table freezes data; new data will not be reflected
cursor.execute("""
CREATE TABLE customer_totals
AS
SELECT * FROM customer_totals_vw;""")
cursor.close()
|
[
"heitorchang@gmail.com"
] |
heitorchang@gmail.com
|
799f27d7bd6278066b4a0c11259c76d826704d80
|
48e9d0e84238daf0de290551e3588e9ff3f49549
|
/calculadora.py
|
43fadb96364fb7c2a09a91ee806895c89e916e0c
|
[] |
no_license
|
celord/PythonGreencore
|
9606af569738703b66d80bce6e423c9a313fa539
|
259aadcc346203f8092f6c6d286e3fca2e9fc550
|
refs/heads/master
| 2020-05-30T23:18:15.542876
| 2019-06-19T14:39:59
| 2019-06-19T14:39:59
| 190,014,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
def Menu():
print("""*****************
Calculadora
************
Menu
1) Suma
2) Resta
3) Multiplicacion
4) Division
""")
def Calculadora():
"Funcion para calcular Operaciones Aritmeticas"
Menu()
opc = int(input("Seleccion Opcion \n"))
while (opc > 0 and opc <5):
x = int(input("Ingrese Numero\n"))
y = int(input("Ingrese otro numero\n"))
if (opc ==1):
print ("La suma es: ", x + y)
opc = int(input("Seleccione Opcion"))
elif (opc == 2):
print("La resta es:", x-y )
opc = int(input("Seleccione Opcion"))
elif(opc==3):
print("La multiplicacion es:", x * y)
opc = int(input("Seleccione Opcion"))
elif(opc==4):
try:
print("La division es: ", x/y)
opc = int(input("Seleccione Opcion"))
except ZeroDivisionError:
print("No se permite la division entre 0")
opc = int(input("Seleccione Opcion"))
Calculadora()
|
[
"celord@gmail.com"
] |
celord@gmail.com
|
34e03ed4d72971d6ba7816fbfd77917897ceb6db
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_103/ch81_2020_04_08_14_08_30_221505.py
|
3d1850c8883fa9998cbf766d0212133a37e9b36c
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
def interseccao_valores(dicio1,dicio2):
lista=[]
for valor in dicio1.values():
for valor2 in dicio2.values():
if valor==valor2:
lista.append(valor)
return lista
|
[
"you@example.com"
] |
you@example.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.