blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a317b31dcfbca9e5076548df8be19fa41cca8ce3
|
65347ef29a8d580a4a1d3f72e50db2d6cff04ec8
|
/Autoencoders-Experiments/VAE-CF-PyTorch-Version2/loggers.py
|
b689469e636bc7b20bfff4f2e6f640b4793c2898
|
[] |
no_license
|
zhy5186612/transfer-rec
|
61ae52a0b940d51930b728333efb8df9e594f01f
|
932c16e8e016785e7edc1a8174e86df18a1a53c1
|
refs/heads/master
| 2022-05-17T13:31:26.514594
| 2020-04-23T13:20:23
| 2020-04-23T13:20:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,172
|
py
|
import os
from abc import ABCMeta, abstractmethod
import torch
def save_state_dict(state_dict, path, filename):
torch.save(state_dict, os.path.join(path, filename))
class LoggerService(object):
def __init__(self, train_loggers=None, val_loggers=None):
self.train_loggers = train_loggers if train_loggers else []
self.val_loggers = val_loggers if val_loggers else []
def complete(self, log_data):
for logger in self.train_loggers:
logger.complete(**log_data)
for logger in self.val_loggers:
logger.complete(**log_data)
def log_train(self, log_data):
for logger in self.train_loggers:
logger.log(**log_data)
def log_val(self, log_data):
for logger in self.val_loggers:
logger.log(**log_data)
class AbstractBaseLogger(metaclass=ABCMeta):
@abstractmethod
def log(self, *args, **kwargs):
raise NotImplementedError
def complete(self, *args, **kwargs):
pass
class RecentModelLogger(AbstractBaseLogger):
def __init__(self, checkpoint_path, filename='checkpoint-recent.pth'):
self.checkpoint_path = checkpoint_path
if not os.path.exists(self.checkpoint_path):
os.mkdir(self.checkpoint_path)
self.recent_epoch = None
self.filename = filename
def log(self, *args, **kwargs):
epoch = kwargs['epoch']
if self.recent_epoch != epoch:
self.recent_epoch = epoch
state_dict = kwargs['state_dict']
state_dict['epoch'] = kwargs['epoch']
save_state_dict(state_dict, self.checkpoint_path, self.filename)
def complete(self, *args, **kwargs):
save_state_dict(kwargs['state_dict'], self.checkpoint_path, self.filename + '.final')
class BestModelLogger(AbstractBaseLogger):
def __init__(self, checkpoint_path, metric_key='mean_iou', filename='best_acc_model.pth'):
self.checkpoint_path = checkpoint_path
if not os.path.exists(self.checkpoint_path):
os.mkdir(self.checkpoint_path)
self.best_metric = 0.
self.metric_key = metric_key
self.filename = filename
def log(self, *args, **kwargs):
current_metric = kwargs[self.metric_key]
if self.best_metric < current_metric:
print("Update Best {} Model at {}".format(self.metric_key, kwargs['epoch']))
self.best_metric = current_metric
save_state_dict(kwargs['state_dict'], self.checkpoint_path, self.filename)
class MetricGraphPrinter(AbstractBaseLogger):
def __init__(self, writer, key='train_loss', graph_name='Train Loss', group_name='metric'):
self.key = key
self.graph_label = graph_name
self.group_name = group_name
self.writer = writer
def log(self, *args, **kwargs):
if self.key in kwargs:
self.writer.add_scalar(self.group_name + '/' + self.graph_label, kwargs[self.key], kwargs['accum_iter'])
else:
self.writer.add_scalar(self.group_name + '/' + self.graph_label, 0, kwargs['accum_iter'])
def complete(self, *args, **kwargs):
self.writer.close()
|
[
"le_j6@denison.edu"
] |
le_j6@denison.edu
|
6876f8b79652c236366a047dbb8480193fb49b09
|
3b786d3854e830a4b46ee55851ca186becbfa650
|
/SystemTesting/pylib/vmware/nsx/manager/cluster_profile/cluster_profile.py
|
17c2433dff9f9c71fb66ecf51a75199cd369ed5f
|
[] |
no_license
|
Cloudxtreme/MyProject
|
d81f8d38684333c22084b88141b712c78b140777
|
5b55817c050b637e2747084290f6206d2e622938
|
refs/heads/master
| 2021-05-31T10:26:42.951835
| 2015-12-10T09:57:04
| 2015-12-10T09:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
import vmware.base.profile as profile
class ClusterProfile(profile.Profile):
def __init__(self, parent=None, id_=None):
super(ClusterProfile, self).__init__(parent=parent, id_=id_)
def get_id_(self):
return self.id_
def get_cluster_profile_id(self):
return self.id_
|
[
"bpei@vmware.com"
] |
bpei@vmware.com
|
02bedd3b8649c9858a34d4ea7357625607b60bdc
|
924cce00272c808c9bea774ab79b85b1ea273263
|
/setup.py
|
6a8a0ea0d3c1bb2f139a810861fa0d52a281fb09
|
[
"MIT"
] |
permissive
|
robot-ai-machinelearning/lagom
|
8804d054598f03a2ab3e7a19a8676333feeba4b8
|
19f77e43cd3c83dd7c2ea4d7ad5c52b7cd287784
|
refs/heads/master
| 2020-03-29T16:22:26.750541
| 2018-09-24T12:32:42
| 2018-09-24T12:32:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
from setuptools import setup
from setuptools import find_packages
from lagom.version import __version__
# Read content of README.md
with open('README.md', 'r') as f:
long_description = f.read()
setup(name='lagom',
version=__version__,
author='Xingdong Zuo',
author_email='zuoxingdong@hotmail.com',
description='lagom: A light PyTorch infrastructure to quickly prototype reinforcement learning algorithms.',
# Long description of README markdown, shows in Python Package Index
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/zuoxingdong/lagom',
# Minimal requried dependencies (full dependencies in requirements.txt)
install_requires=['numpy',
'scipy',
'matplotlib',
'scikit-image',
'imageio',
'pandas',
'seaborn',
'jupyterlab',
'gym',
'cma',
'flake8',
'sphinx',
'sphinx_rtd_theme',
'cloudpickle',
'pyyaml',
'colored'],
tests_require=['pytest'],
# Only Python 3+
python_requires='>=3',
# List all lagom packages (folder with __init__.py), useful to distribute a release
packages=find_packages(),
# tell pip some metadata (e.g. Python version, OS etc.)
classifiers=['Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Topic :: Scientific/Engineering :: Artificial Intelligence']
)
|
[
"zuoxingdong@hotmail.com"
] |
zuoxingdong@hotmail.com
|
c1d15d9e0318b44e03a91d4e873a5a11680aa2d4
|
951b605ea41da28dccba6d3de63fb9211b7ad5b1
|
/MNIST/Mains/main_01205.py
|
e5fcc1eec7a618a15ef61d1fe3a39de1376e6d49
|
[
"MIT"
] |
permissive
|
tiangeluo/DefectiveCNN
|
99296f7a86efd3c4d044701f4e94388989cbd66a
|
fdbf5235adffa846630fadb4ff910de50870c077
|
refs/heads/master
| 2022-01-29T14:23:10.198712
| 2022-01-08T22:20:54
| 2022-01-08T22:20:54
| 222,830,775
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,621
|
py
|
'''Train CIFAR10 with PyTorch.'''
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import IPython
import torchvision
#import torchvision.transforms as transforms
from torchvision import datasets, transforms
import os
import argparse
#from models import *
from resnet_drop_12_05_first import ResNet18
#from resnet_drop_12_05 import ResNet18
#logfile = open('csv/12_05_first.csv','a')
#from resnet import ResNet18
logfile = open('csv/01205.csv','a')
import csv
w = csv.writer(logfile)
from utils import progress_bar
from torch.optim.lr_scheduler import MultiStepLR
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
#transforms.RandomCrop(28, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307), (0.3081)),
])
#trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform_train)
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
#
#testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform_test)
#testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2)
#classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
kwargs = {'num_workers': 1, 'pin_memory': True} if device == 'cuda' else {}
trainloader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=128, shuffle=True, **kwargs)
testloader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=128, shuffle=True, **kwargs)
# Model
print('==> Building model..')
# net = VGG('VGG19')
net = ResNet18()
# net = PreActResNet18()
# net = GoogLeNet()
# net = DenseNet121()
# net = ResNeXt29_2x64d()
# net = MobileNet()
# net = MobileNetV2()
# net = DPN92()
# net = ShuffleNetG2()
# net = SENet18()
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.t7')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
scheduler = MultiStepLR(optimizer, milestones=[20,40], gamma=0.1)
# Training
def train(epoch):
scheduler.step()
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
return 100.*correct/total
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/01205.t7')
best_acc = acc
return acc
for epoch in range(start_epoch, start_epoch+50):
arr = []
arr.append(train(epoch))
arr.append(test(epoch))
w.writerow(arr)
|
[
"tiangelce@gmail.com"
] |
tiangelce@gmail.com
|
9fb0d6e102b861baf24eb39d9eee70ba2a3cf078
|
968aa9bac548662b49af4e2b873b61873ba6f680
|
/buildframework/helium/sf/python/blockspackager/setup.py
|
0484901f98089bdeaf01bf0195eb846afb12f0c2
|
[] |
no_license
|
anagovitsyn/oss.FCL.sftools.dev.build
|
b3401a1ee3fb3c8f3d5caae6e5018ad7851963f3
|
f458a4ce83f74d603362fe6b71eaa647ccc62fee
|
refs/heads/master
| 2021-12-11T09:37:34.633852
| 2010-12-01T08:05:36
| 2010-12-01T08:05:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
#============================================================================
#Name : .py
#Part of : Helium
#Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
#All rights reserved.
#This component and the accompanying materials are made available
#under the terms of the License "Eclipse Public License v1.0"
#which accompanies this distribution, and is available
#at the URL "http://www.eclipse.org/legal/epl-v10.html".
#
#Initial Contributors:
#Nokia Corporation - initial contribution.
#
#Contributors:
#
#Description:
#===============================================================================
import os
from setuptools import setup, find_packages
pyfiles = []
for x in os.listdir('lib'):
if x.endswith('.py'):
pyfiles.append(x.replace('.py', ''))
setup(
name = 'blockspackager',
version = '0.1',
description = "blockspackager",
license = 'EPL',
package_dir = {'': 'lib'},
py_modules = pyfiles,
packages = find_packages('lib', exclude=["*tests"]),
test_suite = 'nose.collector',
package_data = {'': ['*.xml', '*.conf', '*.xsd', '*.nsi']},
zip_safe = False,
)
|
[
"none@none"
] |
none@none
|
c2be7f9b9b2a746a7ca42771ddb8abe8a63b93ab
|
dc3c88f1fe5c80147e4c52ee6ec3136307ec9702
|
/findUsages/test/command_test.py
|
5974a9599ae0dba4b2b31ade975d806786b20488
|
[] |
no_license
|
ypapax/all_sublime_plugins
|
062f9b9992a093a02e6b905c1329c681c8532034
|
8b10e471233bd6c2e77907cf5569b0ddccfc88f9
|
refs/heads/master
| 2021-01-15T21:10:08.029750
| 2015-08-16T06:32:51
| 2015-08-16T06:32:51
| 40,391,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
import unittest
import sys
import os
currentFolder = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(currentFolder, '..'))
sys.path.insert(0, os.path.join(currentFolder, '../../util'))
import color
import assertMy
from findUsagesModel import FindUsagesModel
class Test(unittest.TestCase):
def test_testName(self):
color.blue("test here baby")
findUsagesModel = FindUsagesModel(os.path.join(currentFolder, 'dataPoligon/'))
findUsagesModel.i.currentSet(('/path/to/first.py', 1))
result = findUsagesModel.command("next")
expected = ('/path/to/second.py', 2, '2 of 3')
assertMy.equals(result, expected)
if __name__ == '__main__':
unittest.main()
|
[
"maxYefr@gmail.com"
] |
maxYefr@gmail.com
|
5705542ccfac4178bfdc9969f7151bd732dd78b8
|
f8f2536fa873afa43dafe0217faa9134e57c8a1e
|
/aliyun-python-sdk-drds/aliyunsdkdrds/request/v20190123/SubmitHotExpandPreCheckTaskRequest.py
|
bf5b5b21e2df81da0980c7d0119439679ad23ed7
|
[
"Apache-2.0"
] |
permissive
|
Sunnywillow/aliyun-openapi-python-sdk
|
40b1b17ca39467e9f8405cb2ca08a85b9befd533
|
6855864a1d46f818d73f5870da0efec2b820baf5
|
refs/heads/master
| 2022-12-04T02:22:27.550198
| 2020-08-20T04:11:34
| 2020-08-20T04:11:34
| 288,944,896
| 1
| 0
|
NOASSERTION
| 2020-08-20T08:04:01
| 2020-08-20T08:04:01
| null |
UTF-8
|
Python
| false
| false
| 2,094
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdrds.endpoint import endpoint_data
class SubmitHotExpandPreCheckTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Drds', '2019-01-23', 'SubmitHotExpandPreCheckTask','Drds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TableLists(self):
return self.get_query_params().get('TableLists')
def set_TableLists(self, TableLists):
for depth1 in range(len(TableLists)):
if TableLists[depth1] is not None:
self.add_query_param('TableList.' + str(depth1 + 1) , TableLists[depth1])
def get_DrdsInstanceId(self):
return self.get_query_params().get('DrdsInstanceId')
def set_DrdsInstanceId(self,DrdsInstanceId):
self.add_query_param('DrdsInstanceId',DrdsInstanceId)
def get_DbName(self):
return self.get_query_params().get('DbName')
def set_DbName(self,DbName):
self.add_query_param('DbName',DbName)
def get_DbInstType(self):
return self.get_query_params().get('DbInstType')
def set_DbInstType(self,DbInstType):
self.add_query_param('DbInstType',DbInstType)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
a4d2039633e795db8cd201647a8a32acfed59ce7
|
6bff309542d142259ba5ccbc9cb6286415aaf9a1
|
/hackbeil/utils.py
|
573ac5b01a3c7c7481e9269e9158bff8b667575f
|
[] |
no_license
|
RonnyPfannschmidt-Attic/hackbeil
|
c898f0065a5c8b59fafe05886a4b8c5355e0765c
|
c0d08b9c556de76a7f73c1a8707b4c8fa4f6cf91
|
refs/heads/master
| 2020-12-05T19:25:01.357747
| 2011-02-01T11:40:53
| 2011-02-01T11:40:53
| 66,011,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
from itertools import islice
from collections import deque
def window(s, width):
s = iter(s) # else a grue eats us on normal sequences
q = deque(islice(s, width), maxlen=width)
yield tuple(q)
for item in s:
q.append(item)
yield tuple(q)
|
[
"opensource@ronnypfannschmidt.de"
] |
opensource@ronnypfannschmidt.de
|
aafb4e26305823ccceae86cca6d23f70c29b97fb
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03777/s125701871.py
|
be82f746ef480a14fa9e30705a3dc2ce88655c45
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
icase=0
if icase==0:
a,b=input().split()
if a=="H":
print(b)
elif a=="D":
if b=="H":
print("D")
elif b=="D":
print("H")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
85e61b194f6013cc5bdd5ec8c25a8bbdc66079df
|
58acf9b3f80c13b67e15db2a352d737045edac7a
|
/server/retail/migrations/0002_store_business_hours_end.py
|
104786f41822d31fc8a93e5ae0c4314b2d740173
|
[] |
no_license
|
andyk1278/biz_mgmt_drf
|
40daf3bbc874f2253d16582627b342b7bba01e34
|
3d6d3438a1a790e0f95683648cc4fd0b68436c7d
|
refs/heads/master
| 2021-01-23T18:22:04.600776
| 2017-02-25T03:15:52
| 2017-02-25T03:15:52
| 82,996,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 533
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('retail', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='store',
name='business_hours_end',
field=models.IntegerField(default=17, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(23)]),
),
]
|
[
"andyk1278@gmail.com"
] |
andyk1278@gmail.com
|
dfef1033df4c24b10566c7e5ec119248f7f49d3c
|
ecf1ce6f8b592f76c7b7c253608c1264ae0676a3
|
/days/day101/Bite 47. Write a new password field validator/password.py
|
007097825ba4bd20523fa3a54f74749d537087a3
|
[] |
permissive
|
alex-vegan/100daysofcode-with-python-course
|
94e99880a50ac412e398ad209ed53796f253641f
|
b6c12316abe18274b7963371b8f0ed2fd549ef07
|
refs/heads/master
| 2021-07-20T23:05:59.721661
| 2019-01-21T16:18:25
| 2019-01-21T16:18:25
| 150,115,516
| 0
| 0
|
MIT
| 2018-09-24T14:28:16
| 2018-09-24T14:28:15
| null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
import string
PUNCTUATION_CHARS = list(string.punctuation)
used_passwords = set('PassWord@1 PyBit$s9'.split())
def validate_password(password):
validator = {'leng':False,'digit':False,'lowch':False,'upch':False,'punc':False,'unused':False}
_lowch = False
if len(password) > 5 and len(password) < 13:
validator['leng'] = True
for char in list(password):
if char in string.digits:
validator['digit'] = True
if char in string.ascii_lowercase and _lowch:
validator['lowch'] = True
if char in string.ascii_lowercase:
_lowch = True
if char in string.ascii_uppercase:
validator['upch'] = True
if char in string.punctuation:
validator['punc'] = True
if not password in used_passwords:
validator['unused'] = True
result = all(validator.values())
if result:
used_passwords.add(password)
return result
|
[
"alex-vegan@outlook.com"
] |
alex-vegan@outlook.com
|
441d575f09cb0d9f6737a402764f968e1cbd991d
|
98a1c37ccda91f2c4be14683f5899393f6b97d29
|
/05-Spider/01-爬虫基础/01-urllib模块相关/v20-ajax.py
|
00f1643929a27bf3517217637d08657304f00ad4
|
[] |
no_license
|
yeshixuan/Python
|
1b564d9e46b469f62f59a3a04b340abd68ea4419
|
98ba749ca9ea12004cdff1fdb7e002dea2f42096
|
refs/heads/master
| 2020-04-29T22:17:01.014401
| 2019-05-14T05:15:29
| 2019-05-14T05:15:29
| 176,442,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
'''
爬取豆瓣电影数据
了解ajax的基本爬取方式
一般使用json格式
'''
from urllib import request
import json
url = "https://movie.douban.com/j/chart/top_list?type=11&interval_id=100%3A90&action=&start=40&limit=20"
rsp = request.urlopen(url)
html = rsp.read().decode()
print(type(html))
data = json.loads(html)
print(type(data))
print(data)
|
[
"979697327@qq.com"
] |
979697327@qq.com
|
a2b78f6a5b669caba0cdc24a8412586b1db48fba
|
5945903ff7b3c0be799d8b228aa96309e8d6b68a
|
/LeetCode_121.py
|
45b3ab58b459e2cf5e93da61a259a75cffb8e313
|
[] |
no_license
|
freesan44/LeetCode
|
44fd01fa37e2d7e729ae947da2350b1649c163ae
|
2ed9f1955c527d43fe1a02e5bebf5a6f981ef388
|
refs/heads/master
| 2021-12-07T20:07:02.308097
| 2021-11-01T23:58:11
| 2021-11-01T23:58:11
| 245,178,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
class Solution:
def maxProfit(self, prices: [int]) -> int:
# maxNum = 0
# while len(prices) >1:
# minIndex = 0
# maxIndex = prices.index(max(prices[1:]))
# # print(str(prices[maxIndex])+ "-" + str(prices[minIndex]))
# if (maxIndex > minIndex) and (maxNum < (prices[maxIndex] - prices[minIndex])):
# maxNum = prices[maxIndex] - prices[minIndex]
# # print(maxNum)
# prices.pop(0)
#
# return maxNum
#动态规划
if len(prices) <= 0:
return 0
minNum = prices[0]
maxList = [0]* len(prices)
for i in range(1,len(prices)):
minNum = min(minNum, prices[i])
maxList[i] = max(maxList[i-1],prices[i]-minNum)
return maxList[-1]
if __name__ == '__main__':
# nums = [7,2,4,1]
nums = [7,1,5,3,6,4]
# nums = [2,1,2,1,0,1,2]
result = Solution().maxProfit(nums)
print(result)
|
[
"freesan44@163.com"
] |
freesan44@163.com
|
1aaa960fae097a2b618319b702a83ddf1a9a06d7
|
2d358ffb51f03cc64cc2da0f684b0928aebe139c
|
/test4/test4/urls.py
|
43ae073a9b754f2867ce7cdb3baaa58c51b4e172
|
[] |
no_license
|
853695319/learningdjango
|
195ffabdbd3a5b6bc4386cbb678504c0d2cd0095
|
d2aac1117bb2ca31e4f247a9d206adcf3a9f39a2
|
refs/heads/master
| 2020-05-03T04:59:16.094900
| 2019-04-23T06:25:02
| 2019-04-23T06:25:02
| 178,437,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
"""test4 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based vor you can load the media manually as it is done in the demo app:
{% load static %}
<script type="text/javascript" src="{% static "ckeditor/ckeditor-init.js" %}"></script>
<script type="text/javascript" src="{% static "ckeditor/ckeditor/ckeditor.js" %}"></script>
iews
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('booktest.urls', namespace='booktest')),
url(r'^captcha/', include('captcha.urls')),
]
|
[
"853695319@qq.com"
] |
853695319@qq.com
|
bed0addb8d125bfdbad7509237b48bb8e5298a78
|
1fe8d4133981e53e88abf633046060b56fae883e
|
/venv/lib/python3.8/site-packages/tensorflow/python/keras/utils/kernelized_utils 2.py
|
74651b35917eb8222c2f3664d0f356d2a35dfcab
|
[] |
no_license
|
Akira331/flask-cifar10
|
6c49db8485038731ce67d23f0972b9574746c7a7
|
283e7a2867c77d4b6aba7aea9013bf241d35d76c
|
refs/heads/master
| 2023-06-14T16:35:06.384755
| 2021-07-05T14:09:15
| 2021-07-05T14:09:15
| 382,864,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:7e475cdd3df33b8b101b18a249883bb43ee2863ad84a5196b08e7c9365880478
size 4521
|
[
"business030301@gmail.com"
] |
business030301@gmail.com
|
79caad3774a563c959a24e594778535930e71b9a
|
5ca4a0d91f5bd119e80478b5bd3d43ed30133a42
|
/film20/showtimes/management/commands/showtimes.py
|
48d40f1d5b801dd4dd3f183b1fd3254d3c9d6993
|
[] |
no_license
|
thuvh/filmmaster
|
1fc81377feef5a9e13f792b329ef90f840404ec5
|
dd6a2ee5a4951b2397170d5086c000169bf91350
|
refs/heads/master
| 2021-01-17T16:10:54.682908
| 2012-04-29T18:19:52
| 2012-04-29T18:19:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q, F
from django.conf import settings
from optparse import make_option
import logging
logger = logging.getLogger(__name__)
from film20.showtimes.models import Town, FilmOnChannel, Screening
from film20.filmbasket.models import BasketItem
from film20.core.models import Recommendation, Film
from django.contrib.auth.models import User
from decimal import Decimal
import datetime, time
from film20.showtimes.showtimes_helper import *
from django.core.urlresolvers import reverse
from django.conf import settings
class Command(BaseCommand):
help = ''
option_list = BaseCommand.option_list + (
make_option('--debug',
action='store_true',
dest='debug',
default=False,
),
make_option('--rematch',
action='store_true',
dest='rematch',
default=False,
help='Tries to rematch movies',
),
make_option('--days',
dest='days',
default=0,
type='int',
),
)
def rematch(self):
days = self.opts.get('days')
from film20.showtimes.models import UNMATCHED, MATCHED, FilmOnChannel
movies = FilmOnChannel.objects.filter(match=UNMATCHED)
if days:
since = datetime.date.today() - datetime.timedelta(days=days)
movies = movies.filter(created_at__gte=since)
logger.debug("%s movies to rematch", len(movies))
cnt = 0
for movie in movies:
movie.match_and_save()
if movie.match==MATCHED:
logger.info("%s: matched", movie)
cnt += 1
if cnt:
logger.info("%s movies has been matched", cnt)
def handle(self, *args, **opts):
self.opts = opts
if opts.get('rematch'):
self.rematch()
|
[
"email@ibrahimcesar.com"
] |
email@ibrahimcesar.com
|
1c98241e2a84408738a904c610b39141eedecf5b
|
466912406272829982f75854cf0104c6ce8c9814
|
/data/spider2/aggregator/funding/debug_funding_aggregator.py
|
95e4c7df1d1473d6b0e1784f1588de6b0b184e96
|
[] |
no_license
|
logonmy/Codes
|
9631fa103fc499663361fa7eeccd7cedb9bb08e4
|
92723efdeccfc193f9ee5d0ab77203c254f34bc2
|
refs/heads/master
| 2021-09-21T18:07:22.985184
| 2018-08-30T05:53:26
| 2018-08-30T05:53:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,635
|
py
|
# -*- coding: utf-8 -*-
import os, sys
import time, datetime
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../util'))
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../support'))
import loghelper
import util
import db
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../util'))
import helper
#logger
loghelper.init_logger("funding_aggregator", stream=True)
logger = loghelper.get_logger("funding_aggregator")
def aggregate(source_funding):
#flag = True
sf = source_funding
logger.info("source_funding_id: %s" % sf["id"])
#logger.info(sf)
# TODO
# 如果funding日期比公司成立日期晚一年, 跳过
conn = db.connect_torndb()
source_company = conn.get("select * from source_company where id=%s", sf["sourceCompanyId"])
#logger.info(source_company)
conn.close()
if source_company is None:
return False
if source_company["companyId"] is None:
return False
company_id = source_company["companyId"]
logger.info("company_id: %s", company_id)
return aggregate1(sf, source_company, company_id)
def aggregate1(sf,source_company,company_id, test=False):
flag = True
table_names = helper.get_table_names(test)
conn = db.connect_torndb()
sfirs = conn.query("select * from source_funding_investor_rel where sourceFundingId=%s", sf["id"])
if sf["investment"] == 0 and len(sfirs)==0:
conn.close()
return True
f = conn.get("select * from " + table_names["funding"] + " where companyId=%s and round=%s and (active is null or active!='N') limit 1",
company_id, sf["round"])
if f is None and sf["fundingDate"] is not None and sf["round"]<=1020:
'''
f = conn.get("select * from " + table_names["funding"] + " where companyId=%s and year(fundingDate)=%s and month(fundingDate)=%s and (active is null or active!='N') limit 1",
company_id, sf["fundingDate"].year, sf["fundingDate"].month)
'''
f = conn.get("select * from funding where companyId=%s and fundingDate>date_sub(%s,interval 1 month) and fundingDate<date_add(%s,interval 1 month) and (active is null or active!='N') limit 1",
company_id, sf["fundingDate"], sf["fundingDate"])
if f is None:
sql = "insert " + table_names["funding"] + "(companyId,preMoney,postMoney,investment,\
round,roundDesc,currency,precise,fundingDate,fundingType,\
active,createTime,modifyTime) \
values(%s,%s,%s,%s, %s,%s,%s,%s,%s,%s,'Y',now(),now())"
fundingId=conn.insert(sql,
company_id,
sf["preMoney"],
sf["postMoney"],
sf["investment"],
sf["round"],
sf["roundDesc"],
sf["currency"],
sf["precise"],
sf["fundingDate"],
8030
)
else:
fundingId = f["id"]
if f["round"] == 1110 and sf["round"] == 1105:
conn.update("update " + table_names["funding"] + " set round=1105 where id=%s",fundingId)
logger.info("fundingId: %s", fundingId)
for sfir in sfirs:
investor_id = None
investor_company_id = None
if sfir["investorType"] == 38001:
source_investor = conn.get("select * from source_investor where id=%s", sfir["sourceInvestorId"])
if source_investor is None:
flag = False
continue
investor_id = source_investor["investorId"]
if investor_id is None:
flag = False
continue
investor = conn.get("select * from investor where id=%s", investor_id)
if investor is None or investor["active"] == 'N':
flag = False
continue
else:
source_company = conn.get("select * from source_company where id=%s", sfir["sourceCompanyId"])
if source_company is None or source_company["companyId"] is None:
flag = False
continue
investor_company_id = source_company["companyId"]
if sfir["investorType"] == 38001:
funding_investor_rel = conn.get("select * from " + table_names["funding_investor_rel"] + " \
where investorId=%s and fundingId=%s limit 1",
investor_id, fundingId)
else:
funding_investor_rel = conn.get("select * from " + table_names["funding_investor_rel"] + " \
where companyId=%s and fundingId=%s limit 1",
investor_company_id, fundingId)
if funding_investor_rel is None:
sql = "insert " + table_names["funding_investor_rel"] + "(fundingId, investorType, investorId, companyId, currency, investment,\
precise,active,createTime,modifyTime) \
values(%s,%s,%s,%s,%s,%s,%s,'Y',now(),now())"
conn.insert(sql,
fundingId,
sfir["investorType"],
investor_id,
investor_company_id,
sfir["currency"],
sfir["investment"],
sfir["precise"]
)
# update company stage
if not test:
funding = conn.get("select * from funding where companyId=%s order by round desc, fundingDate desc limit 1",
company_id)
if funding is not None:
conn.update("update company set round=%s, roundDesc=%s where id=%s",
funding["round"],funding["roundDesc"],company_id)
conn.close()
return flag
if __name__ == '__main__':
#2671 investor聚合错误的处理
logger.info("funding aggregator start")
conn = db.connect_torndb()
source_fundings = conn.query("select * from source_funding where processStatus=-1 order by id")
conn.close()
for source_funding in source_fundings:
flag = aggregate(source_funding)
if flag:
conn = db.connect_torndb()
conn.update("update source_funding set processStatus=2 where id=%s", source_funding["id"])
conn.close()
pass
logger.info("funding aggregator end.")
|
[
"hush_guo@163.com"
] |
hush_guo@163.com
|
9f13e226573356dc8bab614ec9567e813db6a94a
|
f889bc01147869459c0a516382e7b95221295a7b
|
/test/test_gift_message_cart_repository_v1_api.py
|
3dba14bcb4d13cd918869870e9d5b5b39067c7b1
|
[] |
no_license
|
wildatheart/magento2-api-client
|
249a86f5c0289743f8df5b0324ccabd76f326512
|
e6a707f85b37c6c3e4ef3ff78507a7deb8f71427
|
refs/heads/master
| 2021-07-14T16:01:17.644472
| 2017-10-18T13:33:08
| 2017-10-18T13:33:08
| 107,412,121
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,540
|
py
|
# coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.apis.gift_message_cart_repository_v1_api import GiftMessageCartRepositoryV1Api
class TestGiftMessageCartRepositoryV1Api(unittest.TestCase):
""" GiftMessageCartRepositoryV1Api unit test stubs """
def setUp(self):
self.api = swagger_client.apis.gift_message_cart_repository_v1_api.GiftMessageCartRepositoryV1Api()
def tearDown(self):
pass
def test_gift_message_cart_repository_v1_get_get(self):
"""
Test case for gift_message_cart_repository_v1_get_get
"""
pass
def test_gift_message_cart_repository_v1_get_get_0(self):
"""
Test case for gift_message_cart_repository_v1_get_get_0
"""
pass
def test_gift_message_cart_repository_v1_save_post(self):
"""
Test case for gift_message_cart_repository_v1_save_post
"""
pass
def test_gift_message_cart_repository_v1_save_post_0(self):
"""
Test case for gift_message_cart_repository_v1_save_post_0
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"sander@wildatheart.eu"
] |
sander@wildatheart.eu
|
84d813947981e994b93499fd286b907abe127477
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/288/87938/submittedfiles/minha_bib.py
|
914176dc060030cfa538765eccdbddb1cf8bc8fa
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
#def multiplicacao(x,y):
# m= x * y
# return m
opt=input("Escolha um simbolo [X ou O]: ")
while (opt!= 'X') or (opt!= 'O'):
print ("SMBOLO INVALIDO!")
opt=input("Escolha um simbolo [X ou O]: ")
print ("pronto")
"""
def fatorial(n):
f=1
for i in range(2,n+1,1):
f*=i
return f
def ler_inteiro():
i=input(mensagem):
return
def cronometro(s):
for i in range(s,-1,-1):
print ("%d segundos" %i)
def ler carta_baralho():
while(True):
carta= ler_inteiro("digite um numero")
if (carta >=1 and carta <=13):
break
else:
print ("CARTA INVALIDA")
return carta
"""
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
7b3bfb0814e6c001961a2c7f97fd06a7dfa53607
|
4589a9ea76e458793ad78059839b81d365f433de
|
/athena_automation/athenataf/tests/configuration/wireless_network/access_rules/network_based/single_rule/captive_portal/CaptivePortal.py
|
cee48be9e290a9fa148dda689de7bd889877d997
|
[] |
no_license
|
cash2one/reautomation_handoff
|
5e2c4c432d8f658d1b57211782744bd0b56c52f6
|
7ef83572d659db35036189eb394f99de1369db5a
|
refs/heads/master
| 2020-05-22T17:56:33.214080
| 2015-07-13T07:51:18
| 2015-07-13T07:51:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,957
|
py
|
from athenataf.lib.functionality.test.ConfigurationTest import ConfigurationTest
import time
class CaptivePortal(ConfigurationTest):
'''
Test class for action allow of Captive Portal.
'''
def test_ath_2135_create_internal_captive_portal(self):
self.NetworkPage.delete_network_if_present()
self.take_s1_snapshot()
basic_info = self.NetworkPage.create_new_network()
vlan = basic_info.employee_network_info()
security = vlan.use_vlan_defaults()
security.enable_mac_authentication()
access_page = security.click_on_next()
access_page.network_based.click()
access_page.create_captive_portal()
access_page.save_settings.click()
access_page.finish_network_setup()
self.take_s2_snapshot()
edit_network = self.NetworkPage.edit_network()
edit_network.access_accordion.click()
edit_network.assert_captive_portal()
self.LeftPanel.go_to_network_page()
self.NetworkPage.delete_network_if_present()
self.take_s3_snapshot()
self.assert_s1_s2_diff(0)
self.assert_s1_s3_diff()
self.clear()
def test_ath_2136_create_external_captive_portal(self):
self.NetworkPage.delete_network_if_present()
self.take_s1_snapshot()
basic_info = self.NetworkPage.create_new_network()
vlan = basic_info.employee_network_info()
security = vlan.use_vlan_defaults()
security.enable_mac_authentication()
access_page = security.click_on_next()
access_page.network_based.click()
access_page.create_captive_portal(external=True)
access_page.save_settings.click()
access_page.finish_network_setup()
self.take_s2_snapshot()
edit_network = self.NetworkPage.edit_network()
edit_network.access_accordion.click()
edit_network.assert_captive_portal()
self.LeftPanel.go_to_network_page()
self.NetworkPage.delete_network_if_present()
self.take_s3_snapshot()
self.assert_s1_s2_diff(0)
self.assert_s1_s3_diff()
self.clear()
def test_ath_8825_internal_captive_portal_exclude_3g_4g_wifi_exit(self):
conf = self.config.config_vars
self.NetworkPage.delete_network_if_present()
self.take_s1_snapshot()
basic_info = self.NetworkPage.create_new_network()
vlan = basic_info.guest_network_info()
security = vlan.use_vlan_defaults()
security.set_splash_page_type_value(conf.Splash_page_Acknowledged)
security.enable_mac_authentication()
security.set_disable_if_uplink_type_is(True,True,False)
access_page = security.click_on_next()
access_page.click_role_access()
access_page.create_new_roles_with_different_rules()
access_page.finish_network_setup()
edit_network = self.NetworkPage.edit_network()
edit_network._click_access_accordion()
edit_network.move_rules()
edit_network.delete_access_role()
edit_network.networks.click()
self.take_s2_snapshot()
self.LeftPanel.go_to_network_page()
self.NetworkPage.delete_network_if_present()
self.take_s3_snapshot()
self.assert_s1_s2_diff(0)
self.assert_s1_s3_diff()
self.clear()
def test_ath_3358_change_internal_splash_page_settings(self):
self.NetworkPage.delete_network_if_present()
self.take_s1_snapshot()
basic_info = self.NetworkPage.create_new_network()
vlan = basic_info.employee_network_info()
security = vlan.use_vlan_defaults()
security.enable_mac_authentication()
access_page = security.click_on_next()
access_page.network_based.click()
access_page.create_captive_portal()
access_page.splash_page_settings()
access_page.finish_network_setup()
self.take_s2_snapshot()
edit_network = self.NetworkPage.edit_network()
edit_network.access_accordion.click()
edit_network.assert_captive_portal()
self.LeftPanel.go_to_network_page()
self.NetworkPage.delete_network_if_present()
self.take_s3_snapshot()
self.assert_s1_s2_diff(0)
self.assert_s1_s3_diff()
self.clear()
def test_ath_3359_change_external_splash_page_settings(self):
self.NetworkPage.delete_network_if_present()
self.take_s1_snapshot()
basic_info = self.NetworkPage.create_new_network()
vlan = basic_info.employee_network_info()
security = vlan.use_vlan_defaults()
security.enable_mac_authentication()
access_page = security.click_on_next()
access_page.network_based.click()
access_page.create_captive_portal(external=True)
access_page.create_new_external_captive_portal()
time.sleep(10)
access_page.save_settings.click()
time.sleep(10)
access_page.finish_network_setup()
time.sleep(10)
self.take_s2_snapshot()
time.sleep(5)
edit_network = self.NetworkPage.edit_network()
edit_network.access_accordion.click()
edit_network.assert_captive_portal()
self.LeftPanel.go_to_network_page()
time.sleep(5)
security_page = self.LeftPanel.go_to_security()
time.sleep(5)
security_page.go_to_roles()
security_page.external_captive_profile.click()
security_page.delete_captive_portal()
self.LeftPanel.go_to_network_page()
self.NetworkPage.delete_network_if_present()
self.take_s3_snapshot()
self.assert_s1_s2_diff(0)
self.assert_s1_s3_diff()
self.clear()
|
[
"raju_set@testmile.com"
] |
raju_set@testmile.com
|
c538db30c06025857f964a8147d610c90bdc9912
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R4/benchmark/startPyquil814.py
|
4850b493521c3c99e7e11f0e59c1765be7172ca2
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
# qubit number=4
# total number=14
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=5
prog += SWAP(1,0) # number=6
prog += SWAP(1,0) # number=7
prog += Y(3) # number=8
prog += Z(2) # number=11
prog += Y(3) # number=9
prog += RX(0.5466371217246238,1) # number=10
prog += CNOT(1,0) # number=12
prog += CNOT(1,0) # number=13
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil814.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
4c9a8abb2018a549057f8beb05fa7780556a7ce6
|
77e8b7bc211624687eb61fdb071020642b2c0080
|
/bc/Q3/q31.py
|
ca7455879b9c8f2bdbdf985b524f2e514d0d1dd6
|
[] |
no_license
|
wherby/hackerrank
|
fab46ea208042ce8055c2755545896bf69f88895
|
84345f56690ea6b1d5db181b12d2a2669007456c
|
refs/heads/master
| 2020-09-26T23:53:06.841052
| 2019-09-15T12:11:43
| 2019-09-15T12:11:43
| 67,225,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,713
|
py
|
filename = "input/input02.txt"
f=open(filename,'r')
# Enter your code here. Read input from STDIN. Print output to STDOUT
import math
import sys
import bisect
if "f" in locals():
inputA=f
else:
inputA=sys.stdin
class BigBisect:
def __init__(self):
self.Count =80
self.array=[]
for i in range(self.Count):
self.array.append([])
def insort_left(self,index,num):
n = index % self.Count
bisect.insort_left(self.array[n],num)
def remove(self,index,num):
n = index % self.Count
self.array[n].remove(num)
def bisectleft(self,num):
index = 0
for i in range(self.Count):
a1 =self.array[i]
t1 = bisect.bisect_left(a1,num)
index = index +t1
return index
def printS(self):
print self.array
bs=BigBisect()
ins=[]
for line in inputA:
ins.append(line)
n,m = map(int , ins[0].strip().split())
al =map(int , ins[1].strip().split())
tnum=0
ttnum =0
dic1 = {}
sl= []
for i in range(m):
t = al[i]
if t not in dic1:
dic1[t] =[1]
else:
dic1[t].append(i)
bs.insort_left(i,t)
it = bs.bisectleft(t)
#it = bisect.bisect_left(sl,t)
#bisect.insort_left(sl,t)
#it = sl.index(t)
tnum = tnum + i -it - len(dic1[t]) +1
ttnum = tnum
#print ttnum
#print ttnum
#print n -m
for i in range(n-m):
t1 = al[i]
t = al[i +m]
it1 = bs.bisectleft(t1)
tnum = tnum - it1
#sl.remove(t1)
bs.remove(i,t1)
#print sl
dic1[t1] =dic1[t1][1:]
if t not in dic1:
dic1[t] =[1]
else:
dic1[t].append(m+i)
bs.insort_left(i+m,t)
#bisect.insort(sl,t)
#it = sl.index(t)
it = bs.bisectleft(t)
tnum = tnum + m -it - len(dic1[t])
#print tnum,sl
ttnum = ttnum +tnum
print ttnum
|
[
"187225577@qq.com"
] |
187225577@qq.com
|
c462a70aa495ba2662ca1fe303c4eacc3aacd585
|
d8a79f43a7a23f297c9c3a71482506769ca5ab55
|
/NLP Core/Program TextVisualization 3.py
|
d1ddc15652531edbdf9a630c481e7e0689526adb
|
[] |
no_license
|
rishavhack/Natural-Language-Processing-NLP-
|
0d4c66744f97712cb7a24c29523c949588fd3cff
|
42a7271ee20dcaca37468c2a2faff2b5e09fe4e4
|
refs/heads/master
| 2022-11-23T09:30:29.124443
| 2019-09-24T08:12:00
| 2019-11-05T16:32:25
| 148,737,049
| 0
| 1
| null | 2022-11-21T21:11:48
| 2018-09-14T04:48:33
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,957
|
py
|
import nltk
import heapq
import re
import numpy as np
paragraph = """Thank you all so very much. Thank you to the Academy.
Thank you to all of you in this room. I have to congratulate
the other incredible nominees this year. The Revenant was
the product of the tireless efforts of an unbelievable cast
and crew. First off, to my brother in this endeavor, Mr. Tom
Hardy. Tom, your talent on screen can only be surpassed by
your friendship off screen … thank you for creating a t
ranscendent cinematic experience. Thank you to everybody at
Fox and New Regency … my entire team. I have to thank
everyone from the very onset of my career … To my parents;
none of this would be possible without you. And to my
friends, I love you dearly; you know who you are. And lastly,
I just want to say this: Making The Revenant was about
man's relationship to the natural world. A world that we
collectively felt in 2015 as the hottest year in recorded
history. Our production needed to move to the southern
tip of this planet just to be able to find snow. Climate
change is real, it is happening right now. It is the most
urgent threat facing our entire species, and we need to work
collectively together and stop procrastinating. We need to
support leaders around the world who do not speak for the
big polluters, but who speak for all of humanity, for the
indigenous people of the world, for the billions and
billions of underprivileged people out there who would be
most affected by this. For our children’s children, and
for those people out there whose voices have been drowned
out by the politics of greed. I thank you all for this
amazing award tonight. Let us not take this planet for
granted. I do not take tonight for granted. Thank you so very much."""
dataset = nltk.sent_tokenize(paragraph)
for i in range(len(dataset)):
dataset[i] = dataset[i].lower()
dataset[i] = re.sub(r'\W',' ',dataset[i])
dataset[i] = re.sub(r'\s+',' ',dataset[i])
#Create the histogram
word2count = {}
for data in dataset:
words = nltk.word_tokenize(data)
for word in words:
if word not in word2count.keys():
word2count[word] = 1
else:
word2count[word] += 1
freq_words = heapq.nlargest(100,word2count,key=word2count.get)
X = []
for data in dataset:
vector = []
for word in freq_words:
if word in nltk.word_tokenize(data):
vector.append(1)
else:
vector.append(0)
X.append(vector)
X = np.asarray(X)
|
[
"rishav.jnit@gmail.com"
] |
rishav.jnit@gmail.com
|
1b1b556b50ad5f3c3e7cb9298f559fd6979c54f3
|
303bac96502e5b1666c05afd6c2e85cf33f19d8c
|
/solutions/python3/991.py
|
04761dbc2602deba947593d4f33dd4905289176e
|
[
"MIT"
] |
permissive
|
jxhangithub/leetcode
|
5e82f4aeee1bf201e93e889e5c4ded2fcda90437
|
0de1af607557d95856f0e4c2a12a56c8c57d731d
|
refs/heads/master
| 2022-05-22T12:57:54.251281
| 2022-03-09T22:36:20
| 2022-03-09T22:36:20
| 370,508,127
| 1
| 0
|
MIT
| 2022-03-09T22:36:20
| 2021-05-24T23:16:10
| null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
class Solution:
def brokenCalc(self, X, Y):
res = 0
while X < Y:
res += Y % 2 + 1
Y = (Y + 1) // 2
return res + X - Y
|
[
"cenkay.arapsagolu@gmail.com"
] |
cenkay.arapsagolu@gmail.com
|
35f52912a608ccbcb805ac26295fbff68a2533ff
|
e4efa86abf3dbcea07d5931524d1b06c7f504a68
|
/SIEM/parsers/windows/windows_dism_logs_parser.py
|
1471ff85fb2f2b93248faad5634231a1c08534e0
|
[] |
no_license
|
rajeevsareen/System-Information-and-Event-Management-SIEM-
|
f8db07642e7b361033bc919a9cd1edf8b424a52b
|
fca2b6c075ecd71d22d844f58d547567c1d01f10
|
refs/heads/master
| 2020-06-20T03:41:23.076790
| 2019-07-15T11:07:37
| 2019-07-15T11:07:37
| 196,979,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
import MySQLdb
db = MySQLdb.connect("localhost","root","root","siem")
cursor = db.cursor()
f = open("pathto\\dism.txt")
lines = f.readlines()
length = len(lines)
i = 0
for i in range(0,length):
i = lines[i].strip()
i = i.split()
if(i[2] == 'Info' or i[2] == 'Warning'):
time = i[1]
time = time[:-1]
desc = i[4:]
desc = ' '.join(desc)
desc = desc.replace("'","")
desc = desc.replace('"',"")
sql = "Insert into info values('" + i[0] + "','" + time + "','" + i[2] + "','" + i[3] + "','" + desc + "')"
cursor.execute(sql)
else:
desc = i[2:]
desc = ' '.join(desc)
desc = desc.replace("'","")
desc = desc.replace('"',"")
sql = "Insert into error values('" + i[0] + "','" + i[1] + "','" + desc + "')"
cursor.execute(sql)
db.commit()
print("LOG FILE HAS BEEN SCANNED AND RESLUTS ARE STORED IN DATABASE")
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
35a3b9674d483bfa15f5a5ceb01408cfb6c14286
|
59cc4c5983dd486b93e409da3df9e20cd8dbd04e
|
/tfdiffeq_examples/temp/plot_tf.py
|
d84b94304d00df65a61988e715ac390d97980bab
|
[] |
no_license
|
titu1994/Python-Work
|
a95866bf47a3aba274376ec72b994e2e4fbda634
|
bc7f201ed01e2f7c49ae8d143d29e87e94420dc9
|
refs/heads/master
| 2023-04-27T02:33:30.731988
| 2023-04-22T19:13:03
| 2023-04-22T19:13:03
| 53,428,058
| 13
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,699
|
py
|
import time
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tfdiffeq import odeint
tf.enable_eager_execution()
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--data_size', type=int, default=2000)
parser.add_argument('--rtol', type=float, default=1e-3)
parser.add_argument('--atol', type=float, default=1e-4)
parser.add_argument('--viz', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
# parser.add_argument('--adjoint', type=eval, default=False)
parser.set_defaults(viz=True)
args = parser.parse_args()
device = 'gpu:' + str(args.gpu) if tf.test.is_gpu_available() else 'cpu:0'
true_y0 = tf.convert_to_tensor([[1, -1]], dtype=tf.float64)
t_n = np.linspace(-2, 1, num=args.data_size)
t = tf.convert_to_tensor(t_n, dtype=tf.float32)
true_A = tf.convert_to_tensor([[1, -0.2], [-0.2, 1]], dtype=tf.float64)
class Lambda(tf.keras.Model):
def call(self, t, y):
dydt = tf.matmul(y, true_A)
return dydt
with tf.device(device):
t1 = time.time()
pred_y = odeint(Lambda(), true_y0, t, rtol=args.rtol, atol=args.atol, method=args.method)
t2 = time.time()
print("Number of solutions : ", pred_y.shape)
print("Time taken : ", t2 - t1)
pred_y = pred_y.numpy()
plt.plot(t_n, pred_y[:, 0, 0], t_n, pred_y[:, 0, 1], 'r-', label='trajectory')
# plt.plot(time, pred_y.numpy(), 'b--', label='y')
plt.legend()
plt.xlabel('time')
plt.ylabel('magnitude')
plt.show()
plt.plot(pred_y[:, 0, 0], pred_y[:, 0, 1], 'b-', label='phase')
plt.legend()
plt.xlabel('x')
plt.ylabel('y')
plt.show()
|
[
"titu1994@gmail.com"
] |
titu1994@gmail.com
|
ab7d94ff6509aac101925b1ab246dbb74581f0b3
|
f79102231c83674a4c01e56e3953b2a65cb14da2
|
/leetcode/reverse_interger.py
|
9571569e2cbce94a55731f622a4276c52da040ca
|
[] |
no_license
|
Activity00/Python
|
4971b177beaf72df0de97f7e78f400d48104dce1
|
166d97f36bbeea74c84ec57466bd0a65b608ed09
|
refs/heads/master
| 2020-12-24T07:53:06.782982
| 2020-09-29T10:55:43
| 2020-09-29T10:55:43
| 73,362,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
# coding: utf-8
"""
@author: 武明辉
@time: 2018/6/10 10:49
"""
import doctest
"""
Given a 32-bit signed integer, reverse digits of an integer.
Example 1:
Input: 123
Output: 321
Example 2:
Input: -123
Output: -321
Example 3:
Input: 120
Output: 21
Note:
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range:
[−231, 231 − 1]. For the purpose of this problem, assume that your function returns 0 when the reversed integer
overflows.
"""
class Solution:
def reverse(self, x):
"""
>>> s = Solution()
>>> s.reverse(123)
321
>>> s.reverse(-123)
-321
>>> s.reverse(120)
21
"""
ret = 0
sign = 1 if x > 0 else -1
x = abs(x)
while x:
x, y = divmod(x, 10)
ret = ret * 10 + y
return ret * sign if -2 ** 31 <= ret <= 2 ** 31 - 1 else 0
def reverse2(self, x):
"""
使用中间字符串
"""
"""
>>> s = Solution()
>>> s.reverse(123)
321
>>> s.reverse(-123)
-321
>>> s.reverse(120)
21
"""
ret = int(str(abs(x))[::-1])
return ret * ((x > 0) - (x < 0)) if -2 ** 31 <= ret <= 2 ** 31 - 1 else 0
if __name__ == '__main__':
doctest.testmod()
|
[
"1032662429@qq.com"
] |
1032662429@qq.com
|
0eb496a235d0f3495fa48fb0f2c58dfd9336be28
|
5d304c6ec0f01edee73e3b612f84307060c0da54
|
/p0067_add_binary.py
|
a7010bf789f911bea168c98e054c3008f15def98
|
[] |
no_license
|
xartisan/leetcode-solutions-in-python
|
cfa06b9e02f7ec0446cf6b71df4ea46caa359adc
|
7e3929a4b5bd0344f93373979c9d1acc4ae192a7
|
refs/heads/master
| 2020-03-14T17:10:07.957089
| 2018-07-29T10:11:01
| 2018-07-29T10:11:01
| 131,713,447
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
class Solution:
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
from itertools import zip_longest
carry = 0
rv = []
table = {
'1': 1,
'0': 0
}
for x, y in zip_longest(reversed(a), reversed(b), fillvalue='0'):
carry, d = divmod(carry + table[x] + table[y], 2)
rv.append(str(d))
if carry != 0:
rv.append(str(carry))
return ''.join(reversed(rv))
|
[
"codeartisan@outlook.com"
] |
codeartisan@outlook.com
|
c0f2b8a927a899507254586333c1131f5e8e00cd
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-cloudide/huaweicloudsdkcloudide/v2/model/show_category_list_response.py
|
ef0346dc2024937e92742cb24b10f878f5c2405d
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,898
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowCategoryListResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'result': 'object',
'status': 'str'
}
attribute_map = {
'result': 'result',
'status': 'status'
}
def __init__(self, result=None, status=None):
"""ShowCategoryListResponse
The model defined in huaweicloud sdk
:param result: 返回值
:type result: object
:param status: 状态
:type status: str
"""
super(ShowCategoryListResponse, self).__init__()
self._result = None
self._status = None
self.discriminator = None
if result is not None:
self.result = result
if status is not None:
self.status = status
@property
def result(self):
"""Gets the result of this ShowCategoryListResponse.
返回值
:return: The result of this ShowCategoryListResponse.
:rtype: object
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this ShowCategoryListResponse.
返回值
:param result: The result of this ShowCategoryListResponse.
:type result: object
"""
self._result = result
@property
def status(self):
"""Gets the status of this ShowCategoryListResponse.
状态
:return: The status of this ShowCategoryListResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowCategoryListResponse.
状态
:param status: The status of this ShowCategoryListResponse.
:type status: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowCategoryListResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
bddbcef7b8e45e1edf3a29fb93241e6408279d8e
|
9219c678926a5d28d7441f9b62f1fce85b1716ed
|
/cgi-bin/request/gis/nexrad_storm_attrs.py
|
6d8efe796e60fb903f3e05805c94c84d66a36e96
|
[
"MIT"
] |
permissive
|
mbalonso/iem
|
610b041cb1a171ecbc77071c43596a5f25328ab3
|
66553cbd4bd53d75eb4d5b59b4188f1532127b68
|
refs/heads/master
| 2021-01-11T03:30:09.802178
| 2016-10-22T15:23:59
| 2016-10-22T15:23:59
| 71,001,283
| 0
| 0
| null | 2016-10-22T15:24:00
| 2016-10-15T16:59:28
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,075
|
py
|
#!/usr/bin/python
"""
Dump storm attributes from the database to a shapefile for the users
"""
import datetime
import zipfile
import sys
import cgi
# import cgitb
import psycopg2
import shapefile
import pytz
import cStringIO
# cgitb.enable()
def get_context():
"""Figure out the CGI variables passed to this script"""
form = cgi.FieldStorage()
if 'year' in form:
year1 = form.getfirst('year')
year2 = year1
else:
year1 = form.getfirst('year1')
year2 = form.getfirst('year2')
month1 = form.getfirst('month1')
month2 = form.getfirst('month2')
day1 = form.getfirst('day1')
day2 = form.getfirst('day2')
hour1 = form.getfirst('hour1')
hour2 = form.getfirst('hour2')
minute1 = form.getfirst('minute1')
minute2 = form.getfirst('minute2')
sts = datetime.datetime(int(year1), int(month1), int(day1),
int(hour1), int(minute1))
sts = sts.replace(tzinfo=pytz.timezone("UTC"))
ets = datetime.datetime(int(year2), int(month2), int(day2),
int(hour2), int(minute2))
ets = ets.replace(tzinfo=pytz.timezone("UTC"))
if ets < sts:
s = ets
ets = sts
sts = s
radar = form.getlist('radar')
fmt = form.getfirst('fmt', 'shp')
return dict(sts=sts, ets=ets, radar=radar, fmt=fmt)
def run(ctx):
pgconn = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
cursor = pgconn.cursor()
"""
Need to limit what we are allowing them to request as the file would get
massive. So lets set arbitrary values of
1) If 2 or more RADARs, less than 7 days
"""
if len(ctx['radar']) == 1:
ctx['radar'].append('XXX')
radarlimit = ''
if 'ALL' not in ctx['radar']:
radarlimit = " and nexrad in %s " % (str(tuple(ctx['radar'])), )
if len(ctx['radar']) > 2 and (ctx['ets'] - ctx['sts']).days > 6:
ctx['ets'] = ctx['sts'] + datetime.timedelta(days=7)
sql = """
SELECT to_char(valid at time zone 'UTC', 'YYYYMMDDHH24MI') as utctime,
storm_id, nexrad, azimuth, range, tvs, meso, posh, poh, max_size,
vil, max_dbz, max_dbz_height, top, drct, sknt,
ST_y(geom) as lat, ST_x(geom) as lon
from nexrad_attributes_log WHERE
valid >= '%s' and valid < '%s' %s ORDER by valid ASC
""" % (ctx['sts'].strftime("%Y-%m-%d %H:%M+00"),
ctx['ets'].strftime("%Y-%m-%d %H:%M+00"), radarlimit)
# print 'Content-type: text/plain\n'
# print sql
# sys.exit()
# sys.stderr.write("Begin SQL...")
cursor.execute(sql)
if cursor.rowcount == 0:
sys.stdout.write("Content-type: text/plain\n\n")
sys.stdout.write("ERROR: no results found for your query")
return
fn = "stormattr_%s_%s" % (ctx['sts'].strftime("%Y%m%d%H%M"),
ctx['ets'].strftime("%Y%m%d%H%M"))
# sys.stderr.write("End SQL with rowcount %s" % (cursor.rowcount, ))
if ctx['fmt'] == 'csv':
sys.stdout.write("Content-type: application/octet-stream\n")
sys.stdout.write(("Content-Disposition: attachment; "
"filename=%s.csv\n\n") % (fn,))
sys.stdout.write(("VALID,STORM_ID,NEXRAD,AZIMUTH,RANGE,TVS,MESO,POSH,"
"POH,MAX_SIZE,VIL,MAX_DBZ,MAZ_DBZ_H,TOP,DRCT,SKNT,"
"LAT,LON\n"))
for row in cursor:
sys.stdout.write(",".join([str(s) for s in row])+"\n")
return
w = shapefile.Writer(shapeType=shapefile.POINT)
w.field('VALID', 'C', 12)
w.field('STORM_ID', 'C', 2)
w.field('NEXRAD', 'C', 3)
w.field('AZIMUTH', 'I')
w.field('RANGE', 'I')
w.field('TVS', 'C', 10)
w.field('MESO', 'C', 10)
w.field('POSH', 'I')
w.field('POH', 'I')
w.field('MAX_SIZE', 'F', 5, 2)
w.field('VIL', 'I')
w.field('MAX_DBZ', 'I')
w.field('MAX_DBZ_H', 'F', 5, 2)
w.field('TOP', 'F', 5, 2)
w.field('DRCT', 'I')
w.field('SKNT', 'I')
w.field('LAT', 'F', 7, 4)
w.field('LON', 'F', 9, 4)
for row in cursor:
w.point(row[-1], row[-2])
w.record(*row)
# sys.stderr.write("End LOOP...")
shp = cStringIO.StringIO()
shx = cStringIO.StringIO()
dbf = cStringIO.StringIO()
w.save(shp=shp, shx=shx, dbf=dbf)
# sys.stderr.write("End of w.save()")
zio = cStringIO.StringIO()
zf = zipfile.ZipFile(zio, mode='w',
compression=zipfile.ZIP_DEFLATED)
zf.writestr(fn+'.prj',
open(('/mesonet/www/apps/iemwebsite/data/gis/meta/4326.prj'
)).read())
zf.writestr(fn+".shp", shp.getvalue())
zf.writestr(fn+'.shx', shx.getvalue())
zf.writestr(fn+'.dbf', dbf.getvalue())
zf.close()
sys.stdout.write(("Content-Disposition: attachment; "
"filename=%s.zip\n\n") % (fn,))
sys.stdout.write(zio.getvalue())
def main():
"""Do something fun!"""
ctx = get_context()
run(ctx)
if __name__ == '__main__':
# Go Main!
main()
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
45e0f517072b23638872c42b0114a276ef0b55c9
|
f63028878311f21f73ed21f9bc88a0fd2ba8ba88
|
/07.raspberrypi/02.GPIO-ZERO/01.LED/ex02.py
|
e4969cb786791b32475ef09c44ffba63e63d8b52
|
[] |
no_license
|
nugeat23/workspace
|
ac12b93b0cb826206138aa2262382b0e6389977b
|
221344b95daa40c3ba66d27e04cbf9dae3172edc
|
refs/heads/master
| 2023-07-14T20:37:32.851769
| 2021-09-01T08:55:01
| 2021-09-01T08:55:01
| 383,780,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
from gpiozero import LED
from signal import pause
green = LED(19)
green.blink(on_time=0.5, off_time=0.5)
try:
pause()
except:
pass
print("end")
|
[
"nugeat23@gmail.com"
] |
nugeat23@gmail.com
|
c63e796f7add4cd1460f1ec2e563614475804e2d
|
3ae36a5791c26bb7b41a6ed7d81d16cb45cfb8c9
|
/python-practice/old_practice/practice_replace_words.py
|
c079eec121325f09930a6d453633cd5933f3d7da
|
[] |
no_license
|
crossin/Crossin-practices
|
0ef23022e3f298862aa831a7cb9684dc4aa04653
|
1b0cbe8db9b947122c40dcfca4ae883cd99b6087
|
refs/heads/master
| 2021-01-01T16:42:52.298084
| 2017-07-11T01:17:38
| 2017-07-11T01:17:38
| 97,899,778
| 1
| 0
| null | 2017-07-21T02:58:33
| 2017-07-21T02:58:33
| null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
#-*- coding:utf-8 -*-
#生成替换词文件
def generate_file():
words = ['a1','b2','cc','de','23']
with open('words.txt','w')as f:
for i in words:
f.write(i)
f.write('\n')
#读取替换词文件,并写入list
def block():
global b_words
b_words = []
with open('words.txt','r') as l:
for i in l.readlines():
if i:
b_words.append(i.strip())
# print(b_words)
#替换关键词
def replace(word):
for i in b_words:
#print(i)
word = word.replace(i,'*'*len(i))
print(word)
generate_file()
block()
while True:
word = input('>>>>')
if word:
replace(word)
else:
exit()
|
[
"782744680@qq.com"
] |
782744680@qq.com
|
0f5b840bf23c437aff4c68afa7cd02783ff22ff1
|
c9fbb0884039f381a874f92d2d786ea7df26df8f
|
/matrix.py
|
7420b94b7d98544c445966502a4ddb3462189236
|
[] |
no_license
|
rakeshbalakrishnan08/enter-the-matrix
|
20b605c63fb969f780e2cf5232ccc5f4e3ebe266
|
307fee16c449fbd6aa678796a4b04eb1e6ed1f28
|
refs/heads/master
| 2020-04-22T22:43:46.822306
| 2019-02-14T02:09:40
| 2019-02-14T02:09:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
from random import random
from vector import Vector
class Matrix:
@staticmethod
def random(rows, cols):
randrows = [[random() for c in range(cols)] for r in range(rows)]
return Matrix(*[Vector(*row) for row in randrows])
def __init__(self, *vectors):
self.vectors = vectors
self.shape = (len(vectors), len(vectors[0].nums))
def __str__(self):
nl = '\n '
return f"[{nl.join([str(v) for v in self.vectors])}]"
def __add__(self, other):
if self.shape != other.shape:
return
return Matrix(*[self.vectors[row] + other.vectors[row] for row in range(self.shape[0])])
def scale(self, scalar):
return Matrix(*[v.scale(scalar) for v in self.vectors])
def dot(self, other):
if self.shape != other.shape[::-1]:
return
column_vectors = [Vector(*col) for col in zip(*[v.nums for v in other.vectors])]
transposed = Matrix(*column_vectors)
vectors = []
for row in self.vectors:
nums = []
for col in transposed.vectors:
scalar = row.dot(col)
nums.append(scalar)
vector = Vector(*nums)
vectors.append(vector)
return Matrix(*vectors)
|
[
"chyld.medford@gmail.com"
] |
chyld.medford@gmail.com
|
cf8dc9cd19e111494722163a89e9429aab62862d
|
ea4567b4388ea97c8ca718d9e331dc796439ee44
|
/app_autonation/appium_learn/hupu_project/utils/read_config.py
|
ee9a1d228323e1834db8ea158c48a7cf1693128d
|
[] |
no_license
|
Kingwolf9527/python_knowledge
|
ace65470ec706cae195b228b8e8d6ca8db574db8
|
1ccb3a788c172f3122a7c119d0607aa90934e59b
|
refs/heads/master
| 2020-12-04T06:12:49.809020
| 2020-02-10T18:22:36
| 2020-02-10T18:22:44
| 231,647,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
# ! /usr/bin/env python
# - * - coding:utf-8 - * -
# __author__ : KingWolf
# createtime : 2019/6/5 20:18
import configparser
import os
class Read_config(object):
def __init__(self,file):
path = os.path.dirname(os.path.dirname(__file__))
file_path = os.path.join(path,'config',file)
self.cf = configparser.ConfigParser()
self.cf.read(file_path,encoding='utf-8')
def get_selection(self,selection_name,parms):
"""
:param selection_name:
:param parms:
:return:
"""
value = self.cf.get(selection_name,parms)
return value
if __name__ == '__main__':
r = Read_config(file='hupu_app_config.ini')
activity = r.get_selection('capability_app','appActivity')
print(activity)
|
[
"lccr777@163.com"
] |
lccr777@163.com
|
62f3ebe3da98be68c4292a35064555348fa7afcd
|
725ac5a0bf72829be627bf8dc82fdc51ba0f94ae
|
/Distillation/three_layer_self-attention_to_distill_bert/model.py
|
e55977c74e165bf3dcd217c8a1a6c53ca094e275
|
[] |
no_license
|
shawroad/NLP_pytorch_project
|
fa14b6e4a156229765e1d552901d0492d8e1def3
|
1272fed2dc8fef78a9ded0f1ae1644d613a3b57b
|
refs/heads/master
| 2023-06-25T02:37:35.503251
| 2023-06-12T10:57:11
| 2023-06-12T10:57:11
| 229,694,655
| 530
| 104
| null | 2020-12-08T09:21:47
| 2019-12-23T06:54:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/9/16 9:33
# @Author : xiaolu
# @FileName: model.py
# @Software: PyCharm
from torch import nn
from transformers import BertModel, BertConfig
from torch.nn import CrossEntropyLoss
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.config = BertConfig.from_pretrained('./roberta_pretrain/bert_config.json')
self.roberta = BertModel.from_pretrained('./roberta_pretrain/pytorch_model.bin', config=self.config)
self.num_labels = 2
self.output = nn.Linear(self.config.hidden_size, self.num_labels)
def forward(self, input_ids=None, attention_mask=None, segment_ids=None, labels=None):
# input_ids, input_mask, segment_ids, labels=labels_ids
sequence_output, cls_output = self.roberta(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=attention_mask)
logits = self.output(cls_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss, logits
return logits
|
[
"luxiaonlp@163.com"
] |
luxiaonlp@163.com
|
292d3c40993bd425955fa6ca2659a1c6fb8f5970
|
876de904572c611b8cbad21f50877cdc812f2946
|
/Leetcode/525. 连续数组.py
|
a1a22371c2d95ccef3746307643fce3107cb3c25
|
[
"MIT"
] |
permissive
|
QDylan/Learning-
|
66a33de0e15f26672fb63c0b393866721def27ae
|
f09e0aa3de081883b4a7ebfe4d31b5f86f24b64f
|
refs/heads/master
| 2023-02-08T02:34:26.616116
| 2020-12-25T05:02:32
| 2020-12-25T05:02:32
| 263,805,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
# -*- coding: utf-8 -*-
"""
@Time : 2020-10-08 11:32
@Author : QDY
@FileName: 525. 连续数组.py
@Software: PyCharm
"""
"""
给定一个二进制数组, 找到含有相同数量的 0 和 1 的最长连续子数组(的长度)。
示例 1:
输入: [0,1]
输出: 2
说明: [0, 1] 是具有相同数量0和1的最长连续子数组。
示例 2:
输入: [0,1,0]
输出: 2
说明: [0, 1] (或 [1, 0]) 是具有相同数量0和1的最长连续子数组。
"""
class Solution:
def findMaxLength(self, nums) -> int:
res, length = 0, len(nums)
prefix, hm = 0, {0: -1}
for i in range(length):
# 将原数组的0全部变为-1 则问题等价于“元素值总和为0的连续数组”
prefix += nums[i] if nums[i] > 0 else -1
if prefix in hm:
res = max(res, i - hm[prefix])
else:
hm[prefix] = i
return res
|
[
"qdy960411@outlook.com"
] |
qdy960411@outlook.com
|
2783c07eb0d27a43d07d1abde5dc6d95c2170639
|
edcbe265ae91291ef0ffbf776b2cd4d8aab1756c
|
/store/migrations/0006_auto_20160628_0800.py
|
e2e403f22302bf0432b4aa03e260b0f85b4cfc55
|
[] |
no_license
|
mehdi1361/E_Shop
|
15e0db32d059e6da163ac2f8cc7ec2e6a3398828
|
2ca55f98ca1f3bcfbf1e03bc2de63ec56cf11022
|
refs/heads/master
| 2020-04-06T07:05:35.496303
| 2017-10-08T13:37:45
| 2017-10-08T13:37:45
| 61,431,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0005_auto_20160628_0748'),
]
operations = [
migrations.RemoveField(
model_name='storehouse',
name='repostory',
),
migrations.AddField(
model_name='storehouse',
name='repository',
field=models.ForeignKey(to='store.CoRepository', default=1, related_name='repository', verbose_name='repository'),
),
]
|
[
"mhd.mosavi@gmail.com"
] |
mhd.mosavi@gmail.com
|
b767506677b7f314037fe1046c6f2db5fd5774d7
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/XALogvSrMr3LRwXPH_8.py
|
196f0b7e20b80f59076253fccf2f6364d9000f14
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
def is_shuffled_well(lst):
count_up, count_down = 0, 0
for i in range(1, len(lst)):
if lst[i] == lst[i - 1] + 1:
count_up += 1
count_down = 0
elif lst[i] == lst[i - 1] - 1:
count_down += 1
count_up = 0
else:
count_up, count_down = 0, 0
if count_up == 2 or count_down == 2:
return False
return True
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
78bb74c0825151c5a21945e33b69b42d02e0b56f
|
0ba3f4517ba70564e8d0049eec2612c99c04a887
|
/python-introducton/lesson6-2.py
|
f95a11fedfcfb38f5478cf1755c17bce1e770641
|
[] |
no_license
|
akihiro-hosoya/study-python
|
238c90e35aad4062bc08c7f56334bbdcc714a676
|
f41430cab8e448b79728121a0d50bf62295f96f5
|
refs/heads/master
| 2020-12-27T15:17:51.510587
| 2020-03-05T18:45:28
| 2020-03-05T18:45:28
| 237,949,984
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
#coding:utf-8
import tkinter as tk
import tkinter.messagebox as tmsg
#ボタンがクリックされたときに実行される関数を作成する
def ButtonClick():
#テキスト入力欄の文字列を取得
b = editbox1.get()
# メッセージとして表示
tmsg.showinfo('入力されたテキスト', b)
#ウィンドウを作る
root = tk.Tk() #tkinterでウィンドウを表示するときの決まり文句
#geometryメソッド
root.geometry('400x150') #ウィンドウサイズを変更する
#titleメソッド
root.title('数当てゲーム')
#Labelメソッド
label1 = tk.Label(root, text='数を入力してね', font=('Helvetica', 14))
#placeメソッド
label1.place(x = 20, y = 20)
#Entryメソッド
editbox1 = tk.Entry(width = 4, font=('Helvetica', 28))
editbox1.place(x=120, y=60)
#Buttonメソッド
button1 = tk.Button(root, text ='チェック', font=('Helvetica', 14), command=ButtonClick)
button1.place(x=220, y=60)
root.mainloop() #tkinterでウィンドウを表示するときの決まり文句
|
[
"iicina.yftj.33@gmail.com"
] |
iicina.yftj.33@gmail.com
|
700e427971a5ef455b2667c5e74494d0e83009b9
|
11aac6edab131293027add959b697127bf3042a4
|
/widthOfBinaryTree.py
|
9172a61c3a6b5bd303543c7b7f2af3066bce1f73
|
[] |
no_license
|
jdanray/leetcode
|
a76b3436002b31865967b757b73c85992636383b
|
fd736af3e79899b86dac89d4d925d5bd985944ad
|
refs/heads/master
| 2023-08-15T01:20:05.110565
| 2023-08-14T00:25:58
| 2023-08-14T00:25:58
| 148,686,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,503
|
py
|
# https://leetcode.com/problems/maximum-width-of-binary-tree/
class Solution:
def widthOfBinaryTree(self, root):
if not root:
return 0
widths = dict()
stack = [(1, 1, root)]
while stack:
level, pos, node = stack.pop()
if not node:
continue
if level in widths:
widths[level].append(pos)
else:
widths[level] = [pos]
k = 2 * pos
stack.append((level + 1, k - 1, node.left))
stack.append((level + 1, k, node.right))
return max(max(widths[w]) - min(widths[w]) for w in widths) + 1
"""
This is an improvement on the previous solution
The code is prettier and simpler, and the program uses less space
Invariants:
-Of course, curlevel represents the current level
-For each curlevel, leftpos/rightpos represents the position of the leftmost/rightmost node on that level
Initially, they both equal 1. At the first level, there's only root, and it's both the leftmost and rightmost node
As we scan a level, rightpos must change
-maxwidth represents the maximum width we've seen so far while scanning levels
-width is equal to the difference between the leftmost node and the rightmost node
It increases as we scan a level
We always scan the whole level, so we find the biggest width for each level
We use BFS, so the first time we encounter a node on a next level, we know we've moved on to that level
So, update curlevel and leftpos
The BFS runs left to right, which makes it easier to keep track of leftpos and rightpos
"""
class Solution:
def widthOfBinaryTree(self, root):
maxwidth = 0
leftpos = 1
rightpos = 1
curlevel = 1
queue = [(curlevel, leftpos, root)]
while queue:
lvl, pos, node = queue.pop(0)
if not node:
continue
rightpos = pos
if lvl == curlevel + 1:
leftpos = pos
curlevel = lvl
width = rightpos - leftpos + 1
maxwidth = max(maxwidth, width)
childpos = 2 * pos
queue.append((lvl + 1, childpos - 1, node.left))
queue.append((lvl + 1, childpos, node.right))
return maxwidth
# New solution
# Don't need to keep track of the current level
class Solution(object):
def widthOfBinaryTree(self, root):
queue = collections.deque([[root, 1]])
res = 0
while queue:
left = -1
right = -1
for _ in range(len(queue)):
node, pos = queue.popleft()
if not node:
continue
if left == -1:
left = pos
right = pos
queue.append([node.left, pos * 2])
queue.append([node.right, pos * 2 + 1])
res = max(res, right - left + 1)
return res
|
[
"jdanray@users.noreply.github.com"
] |
jdanray@users.noreply.github.com
|
836ae114da35b6737a7f57a372a5f74d0ba2bc7e
|
068d271e241d8cdb46dbf4243166e4b8ee7025b2
|
/Django/南京邮电大学/sacc_sign/users/migrations/0006_remove_emailverifyrecord_send_type.py
|
d37793f76a0bec3545a72faf7ed013cff0d875f7
|
[] |
no_license
|
caiqinxiong/python
|
f6e226e76cb62aac970bcfbcb6c8adfc64858b60
|
9029f6c528d2cb742b600af224e803baa74cbe6a
|
refs/heads/master
| 2023-05-26T19:41:34.911885
| 2020-05-15T09:02:08
| 2020-05-15T09:02:08
| 195,261,757
| 1
| 0
| null | 2021-06-10T23:33:33
| 2019-07-04T15:01:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 339
|
py
|
# Generated by Django 2.0.3 on 2018-03-27 12:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0005_emailverifyrecord'),
]
operations = [
migrations.RemoveField(
model_name='emailverifyrecord',
name='send_type',
),
]
|
[
"13269469526@163.com"
] |
13269469526@163.com
|
5131f22e9027b94118beca9e65efc7f45212a6af
|
2a266dda00578ea177b231e8f0dfd14a1824d2e6
|
/abbrev/freq_ab.py
|
c7373d4ac5df760fbcdbf2b775d66989165cff3d
|
[] |
no_license
|
sanskrit-lexicon/PWK
|
fbb51c19d9169e4c28d5c9056484c4a53def78eb
|
57d07725b828a95b22b859422287474bfd858ffe
|
refs/heads/master
| 2023-08-17T04:32:37.387691
| 2023-08-15T18:34:46
| 2023-08-15T18:34:46
| 15,903,957
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
#-*- coding:utf-8 -*-
"""freq_ab.py
"""
from __future__ import print_function
import sys, re,codecs
import digentry
def write_freq(fileout,freq,abbrevd):
keys = sorted(freq.keys(), key = lambda x: x.lower())
outarr = []
notips = []
for key in keys:
if key not in abbrevd:
notips.append(key)
tip = '[unknown]'
else:
rec = abbrevd[key]
tip = rec.tip
rec.used = True
out = '%s %s %s' % (key,freq[key],tip)
outarr.append(out)
with codecs.open(fileout,"w","utf-8") as f:
for out in outarr:
f.write(out+'\n')
print(len(keys),"records written to",fileout)
print(len(notips),"abbreviations without tooltip" )
for key in notips:
print(' %s %s unknown' %(key,freq[key]))
def find_freq(entries):
# <ab>X</ab> OR <lex>X</lex>
d = {}
for entry in entries:
for line in entry.datalines:
for m in re.finditer(r'<ab>([^<]*)</ab>',line):
g = m.group(1)
if g not in d:
d[g] = 0
d[g] = d[g] + 1
for m in re.finditer(r'<lex>([^<]*)</lex>',line):
g = m.group(1)
if g not in d:
d[g] = 0
d[g] = d[g] + 1
print(len(d.keys()),'different greek strings')
return d
class Abbrev:
def __init__(self,line):
m = re.search(r'^([^\t]+)\t<id>(.*?)</id> *<disp>(.*?)</disp>',line)
if m == None:
print('Abbrev error:',line)
exit(1)
self.abbrev = m.group(1)
temp = m.group(2)
self.tip = m.group(3)
if self.abbrev != temp:
print('Abbrev warning: %s != %s' %(self.abbrev, temp))
self.used = False
def init_abbrev(filein):
# slurp lines
with codecs.open(filein,encoding='utf-8',mode='r') as f:
lines = [line.rstrip('\r\n') for line in f if not line.startswith(';')]
recs=[Abbrev(line) for line in lines]
# check for dups, and get dictionary
d = {}
for rec in recs:
key = rec.abbrev
if key in d:
print('init_abbrev: duplicate abbreviation',key)
d[key] = rec
print(len(recs),"abbreviations read from",filein)
return recs,d
def check_used(abbrevs):
unused = [rec for rec in abbrevs if rec.used == False]
print(len(unused),"abbreviation tips unused:")
for rec in unused:
print(' %s %s' %(rec.abbrev,rec.tip))
if __name__=="__main__":
filein = sys.argv[1] # digitization consisten with option
filein1 = sys.argv[2] # tooltip file for abbreviations
fileout = sys.argv[3] # changes for filein
entries = digentry.init(filein)
abbrevs,dabbrevs= init_abbrev(filein1)
d = find_freq(entries)
write_freq(fileout,d,dabbrevs) # also modifies abbrev used field
check_used(abbrevs)
|
[
"funderburkjim@gmail.com"
] |
funderburkjim@gmail.com
|
29e331b49fe69da564b8f76ca88314dd473d7072
|
178a155566be0a59bdf9095d7ba2c2e0625d9fd3
|
/bin/lazyhtml.py
|
583176a8c2c92a0a0c1da561c4a40c74202d3365
|
[] |
no_license
|
pfuntner/toys
|
a4a1e78f0ed836ac86bf5dd918c67153444acb04
|
194ebc5ab693e928a809b6dfe5783c08e6a9d081
|
refs/heads/master
| 2023-08-23T23:41:36.781341
| 2023-08-09T12:01:10
| 2023-08-09T12:01:10
| 99,418,283
| 10
| 2
| null | 2022-12-24T14:15:09
| 2017-08-05T11:17:13
|
Python
|
UTF-8
|
Python
| false
| false
| 6,402
|
py
|
#! /usr/bin/env python3
import sys
import json
import signal
import logging
import argparse
from html.parser import HTMLParser
import xml.etree.ElementTree as ET
class LazyHtml(object):
"""
This outer class is the interface we expect callers to use:
parser = LazyHtml(log)
root = parser.parse(html_string)
"""
class Node(object):
"""
Helper class to represent a node in an HTML tree.
We don't expect the caller to reference this class to it's an inner class of LazyHtml.
"""
def __init__(self, tag, attrs):
self.path = None
self.tag = tag
self.attrs = attrs
self.children = list()
self.text = None
self.tail = None
def __str__(self):
return f'<{self.tag} {self.attrs}/>'
class BaseLazyHtmlParser(HTMLParser):
"""
Helper class which extends the base HTMLParser class to forgive common HTML errors
such as neglecting to close tags that prevents the source from being parsed as XHTML.
We don't expect the caller to reference this class to it's an inner class of LazyHtml.
"""
def __init__(self, log):
self.root = None
self.log = log
self.stack = list()
super().__init__()
def handle_starttag(self, tag, attrs):
"""
Remember a new tag
"""
self.log.debug(f'Encountered a <{tag}>')
node = LazyHtml.Node(tag, attrs)
if self.root is None:
self.root = node
else:
self.stack[-1].children.append(node)
self.stack.append(node)
node.path = '/' + '/'.join([stack_node.tag for stack_node in self.stack])
def handle_endtag(self, tag):
"""
Finish a tag - we will forgive if the "current" node is not the one being closed... We'll work our way
up the stack to find the LAST node that matches the tag being closed.
It is an error if there are no open nodes matching the tag being closed.
"""
self.log.debug(f'Encountered a </{tag}>')
if tag not in [node.tag for node in self.stack]:
self.log.error(f'Encountered unmatched </{tag}>')
exit(1)
while self.stack and self.stack[-1].tag != tag:
self.log.debug(f'Encountered </{tag}> but expected </{self.stack[-1].tag}> first')
self.stack.pop()
self.stack.pop()
def handle_data(self, data):
"""
Add data to a node.
It is an error if we read non-whitespace data without an open node.
"""
self.log.debug(f'Encountered data {data!r}')
if self.stack:
self.stack[-1].children.append(data.strip())
elif not data.strip():
self.log.debug('Ignoring whitespace without a node')
else:
self.log.error(f'Encountered data {data!r} without a node')
exit(1)
def __init__(self, log=None):
if log:
self.log = log
else:
logging.basicConfig(format='%(asctime)s %(levelname)s %(pathname)s:%(lineno)d %(msg)s')
self.log = logging.getLogger()
self.parser = self.BaseLazyHtmlParser(self.log)
def get_root(self, html):
self.parser.feed(html)
return self.parser.root
def parse(self, html):
root = self.get_root(html)
self.normalize_strings(root)
return self.to_xml(root)
@staticmethod
def get_attrs(tuples):
return {name:value for (name, value) in tuples}
def to_json(self, node):
ret = None
if node:
if isinstance(node, str):
return node
else:
ret = {'tag': node.tag, 'attrs': self.get_attrs(node.attrs), 'children': [], 'path': node.path}
for child in node.children:
ret['children'].append(self.to_json(child))
return ret
def normalize_strings(self, node):
"""
Prepare a tree for XML by moving string nodes to either:
- parent.text if the string node is the first node of the children of the parent
- older_sibling.tail if the string node is not the first node of the children of the parent
"""
if node:
pos = 0
if node.children:
if isinstance(node.children[0], str):
self.log.debug(f'{node.tag} text: {node.children[0]!r}')
node.text = node.children.pop(0)
else:
self.normalize_strings(node.children[0])
pos += 1
while pos < len(node.children):
if isinstance(node.children[pos], str):
self.log.debug(f'{node.tag} tail: {node.children[pos]!r}')
node.children[pos-1].tail = node.children.pop(pos)
else:
self.normalize_strings(node.children[pos])
pos += 1
def to_xml(self, node, parent=None):
params = {'attrib': self.get_attrs(node.attrs)}
xml_node = ET.Element(node.tag, **params) if parent is None else ET.SubElement(parent, node.tag, **params)
if node.text:
xml_node.text = node.text
if node.tail:
xml_node.tail = node.tail
for child in node.children:
self.to_xml(child, xml_node)
return xml_node
def visit(node, indent=0):
if node:
if isinstance(node, str):
print(f'{" " * (indent*2)}{node}')
else:
attrs = (' ' + ' '.join([f'{name}={value!r}' for (name, value) in html_parser.get_attrs(node.attrs).items()])) if node.attrs else ''
print(f'{" " * (indent*2)}<{node.tag}{attrs}>')
for child in node.children:
visit(child, indent+1)
print(f'{" " * (indent*2)}</{node.tag}>')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=sys.argv[0])
group = parser.add_mutually_exclusive_group()
group.add_argument('-j', '--json', action='store_true', help='Generate JSON output')
group.add_argument('-x', '--xml', action='store_true', help='Generate XML output')
parser.add_argument('-v', '--verbose', action='count', help='Enable debugging')
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(levelname)s %(pathname)s:%(lineno)d %(msg)s')
log = logging.getLogger()
log.setLevel(logging.WARNING - (args.verbose or 0)*10)
signal.signal(signal.SIGPIPE, lambda signum, stack_frame: exit(0))
if sys.stdin.isatty():
parser.error('stdin must be redirected')
html_parser = LazyHtml(log)
root = html_parser.get_root(sys.stdin.read())
if args.json:
json.dump(html_parser.to_json(root), sys.stdout)
elif args.xml:
html_parser.normalize_strings(root)
ET.dump(html_parser.to_xml(root))
else:
visit(root)
|
[
"jpfuntne@cisco.com"
] |
jpfuntne@cisco.com
|
e76ea8046de2f60095cd2ccd24fede57d5ea31e1
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2096/60763/281373.py
|
e2192515db85dfcca18346ca2edfb6e6b957b896
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
x = int(input())
sqrt = 0
while sqrt*sqrt <= x:
if sqrt*sqrt == x or (sqrt+1)*(sqrt+1) > x:
break
sqrt+=1
print(sqrt)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
47976502d48e9a7befc8f25a4a40b5c79b62478c
|
efc690a4c42b1511deb0fe80bf146872c45aed69
|
/conf_site/reviews/views/results.py
|
8e9c337d3fcfa45024658285ca45002dd3f015b0
|
[
"MIT"
] |
permissive
|
jasongrout/conf_site
|
34aa1197727fbbbdf8811338764a7451445f1803
|
6b3beb21de8d847cba65dcb6da84464b40739d48
|
refs/heads/master
| 2021-03-31T06:35:59.696561
| 2020-03-17T20:39:57
| 2020-03-17T20:39:57
| 248,086,087
| 0
| 0
|
MIT
| 2020-03-17T22:32:02
| 2020-03-17T22:32:01
| null |
UTF-8
|
Python
| false
| false
| 6,671
|
py
|
# -*- coding: utf-8 -*-
# Views relating to accepting/rejecting a reviewed proposal.
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.http import HttpResponseRedirect
from django.template.defaultfilters import pluralize
from django.urls import reverse
from django.views.generic import View
from conf_site.proposals.models import Proposal
from conf_site.reviews.models import ProposalNotification, ProposalResult
from conf_site.reviews.views import ProposalListView
from symposion.schedule.models import Presentation
class SuperuserOnlyView(UserPassesTestMixin, View):
"""A view which only allows access to superusers."""
def test_func(self):
if self.request.user.is_superuser:
return True
elif not self.request.user.is_anonymous:
# Non-anonymous, non-superuser users should see an error page.
self.raise_exception = True
return False
class ProposalChangeResultPostView(SuperuserOnlyView):
"""A view to allow superusers to change a proposal's voting result."""
http_method_names = ["get"]
def get(self, *args, **kwargs):
"""Update an individual ProposalResult object."""
proposal = Proposal.objects.get(pk=kwargs["pk"])
result = ProposalResult.objects.get_or_create(proposal=proposal)[0]
result.status = kwargs["status"]
result.save()
return HttpResponseRedirect(
reverse("review_proposal_detail", args=[proposal.id])
)
class ProposalResultListView(SuperuserOnlyView, ProposalListView):
def get(self, request, *args, **kwargs):
self.status = kwargs["status"]
if self.status == ProposalResult.RESULT_UNDECIDED:
# Create ProposalResults for proposals that do not have them.
proposals_without_result = Proposal.objects.filter(
review_result=None
)
for proposal in proposals_without_result:
ProposalResult.objects.create(
proposal=proposal, status=ProposalResult.RESULT_UNDECIDED
)
return super(ProposalResultListView, self).get(
request, *args, **kwargs
)
def get_queryset(self):
return Proposal.objects.order_by("pk").filter(
review_result__status=self.status
)
def get_context_data(self, **kwargs):
context = super(ProposalResultListView, self).get_context_data(
**kwargs
)
temp_result = ProposalResult(status=self.status)
context["proposal_category"] = temp_result.get_status_display()
return context
class ProposalMultieditPostView(SuperuserOnlyView):
"""A view to let superusers modify multiple proposals' results."""
http_method_names = ["post"]
def post(self, *args, **kwargs):
proposal_pks = self.request.POST.getlist("proposal_pk")
proposals = Proposal.objects.filter(pk__in=proposal_pks)
new_status = self.request.POST.get("mark_status")
if new_status:
# <queryset>.update() will not work here because
# the status field lives in the related model
# ProposalResult.
for proposal in proposals:
try:
proposal.review_result.status = new_status
proposal.review_result.save()
except ProposalResult.DoesNotExist:
proposal.review_result = ProposalResult.objects.create(
proposal=proposal, status=new_status
)
return HttpResponseRedirect(
reverse("review_proposal_result_list", args=[new_status])
)
elif self.request.POST.get("send_notification"):
# Save ProposalNotification to database, as a type
# of rudimentary logging.
notification = ProposalNotification.objects.create(
from_address=self.request.POST.get("from_address"),
subject=self.request.POST.get("subject"),
body=self.request.POST.get("body"),
)
notification.proposals.set(proposals)
unemailed_speakers = notification.send_email()
for speaker in unemailed_speakers:
messages.warning(
self.request,
"Speaker {} does not have an email address "
"and has not been notified.".format(speaker.name)
)
return HttpResponseRedirect(reverse("review_proposal_list"))
elif self.request.POST.get("create_presentations"):
num_presentations_created = 0
for proposal in proposals:
# We don't need to add all of the proposal's metadata
# to the presentation. Most fields will automatically be
# added when we save the proposal.
# See https://github.com/pydata/conf_site/pull/176.
# Note that the title needs to be added here so that
# a presentation's slugs are created properly.
presentation, created = Presentation.objects.get_or_create(
proposal_base=proposal.proposalbase_ptr,
section=proposal.section,
speaker=proposal.speaker,
title=proposal.title,
)
# If the presentation already existed, we do not need
# to attach it to the proposal.
if created:
proposal.presentation = presentation
proposal.save()
num_presentations_created += 1
# Create a message if any new presentations were created.
if num_presentations_created:
messages.success(
self.request,
"{} presentation{} created.".format(
num_presentations_created,
pluralize(num_presentations_created),
),
)
else:
messages.warning(
self.request,
"All selected proposals already had presentations.",
)
# Since the "create presentations" action can only
# be initated from the "Accepted Proposals"
# category listing, we return the user there.
return HttpResponseRedirect(
reverse(
"review_proposal_result_list",
args=[ProposalResult.RESULT_ACCEPTED],
)
)
|
[
"martey@mobolic.com"
] |
martey@mobolic.com
|
a6163925f7b8ca2e2f2092068ada1812096825e1
|
536ec8e275d0e4ac826ed492a818802f17eb29da
|
/ABC/160/d.py
|
02dddae132428e1112cce372965697f32401ed8e
|
[] |
no_license
|
tamanyan/coding-problems
|
3d74ee708a943348ee06f1a25c45ee3a35cfd9ee
|
415e8230c8386163e1abf5eea217a1e5be8a15bc
|
refs/heads/master
| 2020-07-03T21:36:23.566534
| 2020-06-10T16:33:55
| 2020-06-10T16:33:55
| 202,057,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,544
|
py
|
from heapq import heappush, heappop, heapify
from collections import deque, defaultdict, Counter
import itertools
from itertools import permutations, combinations, accumulate
import sys
import bisect
import string
import math
import time
def I(): return int(input())
def S(): return input()
def MI(): return map(int, input().split())
def MS(): return map(str, input().split())
def LI(): return [int(i) for i in input().split()]
def LI_(): return [int(i)-1 for i in input().split()]
def StoI(): return [ord(i)-97 for i in input()]
def ItoS(nn): return chr(nn+97)
def input(): return sys.stdin.readline().rstrip()
def show(*inp, end='\n'):
if show_flg:
print(*inp, end=end)
def print_matrix(mat):
for i in range(len(mat)):
print(*mat[i])
yn = {False: 'No', True: 'Yes'}
YN = {False: 'NO', True: 'YES'}
MOD = 10**9+7
inf = float('inf')
IINF = 10**19
l_alp = string.ascii_lowercase
u_alp = string.ascii_uppercase
ts = time.time()
sys.setrecursionlimit(10**6)
nums = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
show_flg = False
# show_flg = True
def bfs(graph, initial):
n = len(graph) - 1
dist = [-1] * (n + 1)
q = deque([initial])
visited = [False] * (n + 1)
visited[initial] = True
dist[initial] = 0
while len(q) != 0:
edge = q.popleft()
nxt = graph[edge]
for i, e in enumerate(nxt):
if visited[e] is False:
q.append(e)
dist[e] = dist[edge] + 1
visited[e] = True
return dist
def main():
N, M = MI()
ret = bfs(graph, 1)
print(ret)
def main():
N, X, Y = MI()
graph = [[] for i in range(N+1)]
for i in range(1, N):
graph[i].append(i+1)
graph[i+1].append(i)
graph[X].append(Y)
graph[Y].append(X)
d = defaultdict(int)
for i in range(1, N+1):
ret = bfs(graph, i)
for j in range(1, len(ret)):
if i != j:
d[ret[j]] += 1
# print(ret)
# print(d)
for i in range(1, N):
if i in d:
print(d[i]//2)
else:
print(0)
# d = defaultdict(int)
# for i in range(1, N+1):
# for j in range(i+1, N+1):
# a = abs(j - i)
# b = abs(X - i) + 1 + abs(Y - j)
# c = abs(Y - i) + 1 + abs(X - j)
# d[min(a, b, c)] += 1
# for i in range(1, N):
# if i in d:
# print(d[i])
# else:
# print(0)
if __name__ == '__main__':
main()
|
[
"tamanyan.sss@gmail.com"
] |
tamanyan.sss@gmail.com
|
ad20541f220ca3f25625474ef3e4b31b78b5f186
|
c94a89b95140bd1e348318ecb2d4867e676ba18c
|
/Kids_ProjectPro/urls.py
|
f49350e44751bce58a5c970419d7491ddc5ed535
|
[] |
no_license
|
Bek-End/Kids_ProjectPro
|
c499a0319726f2c819527ce3b12f00e48edc708f
|
e1a9b6a4825bccbe2b620ddecc0b7cfac519d4ca
|
refs/heads/master
| 2023-02-06T09:09:49.302103
| 2020-12-30T18:42:30
| 2020-12-30T18:42:30
| 325,620,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
"""Kids_ProjectPro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
# from django.views.static import serve
# from django.conf.urls import url
urlpatterns = [
path('verify/', include('verification.urls')),
path('account/',include('accounts.urls')),
path('admin/', admin.site.urls),
# url(r'^media/(?P<path>.*)$', serve,{'document_root': settings.MEDIA_ROOT}),
# url(r'^static/(?P<path>.*)$', serve,{'document_root': settings.STATIC_ROOT}),
]
|
[
"="
] |
=
|
79b1bcd49c77b250f57382414962ded3e5d58e28
|
dd8faa90ee03ff52c571994ff797e4e4db38726d
|
/lib/python2.7/site-packages/google_compute_engine/instance_setup/instance_setup.py
|
1677fc765c6b90328a859867099fa926d89b8411
|
[] |
no_license
|
bopopescu/elasticluster_forked
|
16453b6d322ed26156a9759a8a3507ba9023883b
|
ba5433bc4a6e7b876161030f06b76ff1837e0e71
|
refs/heads/master
| 2022-11-21T20:49:13.533208
| 2017-10-25T22:39:00
| 2017-10-25T22:39:00
| 282,557,817
| 0
| 0
| null | 2020-07-26T02:02:21
| 2020-07-26T02:02:20
| null |
UTF-8
|
Python
| false
| false
| 7,502
|
py
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run initialization code the first time the instance boots."""
import logging.handlers
import optparse
import os
import re
import shutil
import subprocess
import tempfile
from google_compute_engine import file_utils
from google_compute_engine import logger
from google_compute_engine import metadata_watcher
from google_compute_engine.boto import boto_config
from google_compute_engine.instance_setup import instance_config
class InstanceSetup(object):
"""Initialize the instance the first time it boots."""
def __init__(self, debug=False):
"""Constructor.
Args:
debug: bool, True if debug output should write to the console.
"""
self.debug = debug
facility = logging.handlers.SysLogHandler.LOG_DAEMON
self.logger = logger.Logger(
name='instance-setup', debug=self.debug, facility=facility)
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
self.metadata_dict = None
self.instance_config = instance_config.InstanceConfig(logger=self.logger)
if self.instance_config.GetOptionBool('InstanceSetup', 'network_enabled'):
self.metadata_dict = self.watcher.GetMetadata()
instance_config_metadata = self._GetInstanceConfig()
self.instance_config = instance_config.InstanceConfig(
logger=self.logger, instance_config_metadata=instance_config_metadata)
if self.instance_config.GetOptionBool('InstanceSetup', 'set_host_keys'):
self._SetSshHostKeys()
if self.instance_config.GetOptionBool('InstanceSetup', 'set_boto_config'):
self._SetupBotoConfig()
if self.instance_config.GetOptionBool(
'InstanceSetup', 'optimize_local_ssd'):
self._RunScript('optimize_local_ssd')
if self.instance_config.GetOptionBool('InstanceSetup', 'set_multiqueue'):
self._RunScript('set_multiqueue')
try:
self.instance_config.WriteConfig()
except (IOError, OSError) as e:
self.logger.warning(str(e))
def _GetInstanceConfig(self):
"""Get the instance configuration specified in metadata.
Returns:
string, the instance configuration data.
"""
try:
instance_data = self.metadata_dict['instance']['attributes']
except KeyError:
instance_data = {}
self.logger.warning('Instance attributes were not found.')
try:
project_data = self.metadata_dict['project']['attributes']
except KeyError:
project_data = {}
self.logger.warning('Project attributes were not found.')
return (instance_data.get('google-instance-configs') or
project_data.get('google-instance-configs'))
def _RunScript(self, script):
"""Run a script and log the streamed script output.
Args:
script: string, the file location of an executable script.
"""
process = subprocess.Popen(
script, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
while True:
for line in iter(process.stdout.readline, b''):
self.logger.info(line.decode('utf-8').rstrip('\n'))
if process.poll() is not None:
break
def _GetInstanceId(self):
"""Get the instance ID for this VM.
Returns:
string, the instance ID for the VM.
"""
try:
return str(self.metadata_dict['instance']['id'])
except KeyError:
self.logger.warning('Instance ID was not found in metadata.')
return None
def _GenerateSshKey(self, key_type, key_dest):
"""Generate a new SSH key.
Args:
key_type: string, the type of the SSH key.
key_dest: string, a file location to store the SSH key.
"""
# Create a temporary file to save the created RSA keys.
with tempfile.NamedTemporaryFile(prefix=key_type, delete=True) as temp:
temp_key = temp.name
command = ['ssh-keygen', '-t', key_type, '-f', temp_key, '-N', '', '-q']
try:
self.logger.info('Generating SSH key %s.', key_dest)
subprocess.check_call(command)
except subprocess.CalledProcessError:
self.logger.warning('Could not create SSH key %s.', key_dest)
return
shutil.move(temp_key, key_dest)
shutil.move('%s.pub' % temp_key, '%s.pub' % key_dest)
file_utils.SetPermissions(key_dest, mode=0o600)
file_utils.SetPermissions('%s.pub' % key_dest, mode=0o644)
def _StartSshd(self):
"""Initialize the SSH daemon."""
# Exit as early as possible.
# Instance setup systemd scripts block sshd from starting.
if os.path.exists('/bin/systemctl'):
return
elif (os.path.exists('/etc/init.d/ssh') or
os.path.exists('/etc/init/ssh.conf')):
subprocess.call(['service', 'ssh', 'start'])
subprocess.call(['service', 'ssh', 'reload'])
elif (os.path.exists('/etc/init.d/sshd') or
os.path.exists('/etc/init/sshd.conf')):
subprocess.call(['service', 'sshd', 'start'])
subprocess.call(['service', 'sshd', 'reload'])
def _SetSshHostKeys(self):
"""Regenerates SSH host keys when the VM is restarted with a new IP address.
Booting a VM from an image with a known SSH key allows a number of attacks.
This function will regenerating the host key whenever the IP address
changes. This applies the first time the instance is booted, and each time
the disk is used to boot a new instance.
"""
section = 'Instance'
instance_id = self._GetInstanceId()
if instance_id != self.instance_config.GetOptionString(
section, 'instance_id'):
self.logger.info('Generating SSH host keys for instance %s.', instance_id)
file_regex = re.compile(r'ssh_host_(?P<type>[a-z0-9]*)_key\Z')
key_dir = '/etc/ssh'
key_files = [f for f in os.listdir(key_dir) if file_regex.match(f)]
for key_file in key_files:
key_type = file_regex.match(key_file).group('type')
key_dest = os.path.join(key_dir, key_file)
self._GenerateSshKey(key_type, key_dest)
self._StartSshd()
self.instance_config.SetOption(section, 'instance_id', str(instance_id))
def _GetNumericProjectId(self):
"""Get the numeric project ID.
Returns:
string, the numeric project ID.
"""
try:
return str(self.metadata_dict['project']['numericProjectId'])
except KeyError:
self.logger.warning('Numeric project ID was not found in metadata.')
return None
def _SetupBotoConfig(self):
"""Set the boto config so GSUtil works with provisioned service accounts."""
project_id = self._GetNumericProjectId()
try:
boto_config.BotoConfig(project_id, debug=self.debug)
except (IOError, OSError) as e:
self.logger.warning(str(e))
def main():
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug', action='store_true', dest='debug',
help='print debug output to the console.')
(options, _) = parser.parse_args()
InstanceSetup(debug=bool(options.debug))
if __name__ == '__main__':
main()
|
[
"vipin@kryptonite"
] |
vipin@kryptonite
|
0768dc07b55dd1f9ec3c1e126265bb64af79e753
|
c528d85b2db4e106f0a08ed0c0bb8f9a06c75c27
|
/produccion/migrations/0016_produccionrealizada_cantidad_reproceso.py
|
8e696116ef666a3b2e0438d0b937accc4769a505
|
[] |
no_license
|
alrvivas/CampoApp2
|
446e9eeb5f2b03963616b433340e99e3d481e532
|
8c574173f68887a16fb6c0e728a10ef12dbae4ce
|
refs/heads/master
| 2016-09-10T19:51:50.586769
| 2015-05-19T20:59:49
| 2015-05-19T20:59:49
| 35,906,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('produccion', '0015_auto_20150205_1009'),
]
operations = [
migrations.AddField(
model_name='produccionrealizada',
name='cantidad_reproceso',
field=models.IntegerField(null=True, blank=True),
preserve_default=True,
),
]
|
[
"alr.vivas@gmail.com"
] |
alr.vivas@gmail.com
|
020f38bcdcaa8a734beb4dd1a45e1e2e66bcd782
|
fd25231975acd147e04dc3ed3627c92cb1a4f86c
|
/FlaskAPI/vir_env/lib/python3.7/site-packages/sklearn/linear_model/theil_sen.py
|
3fbea57d5f9babe483c0672e470413c5cb049f49
|
[] |
no_license
|
sumitkutty/Flight-Price-Prediction
|
832a2802a3367e655b46d3b44f073d917abd2320
|
d974a8b75fbcbfa42f11703602af3e45a3f08b3c
|
refs/heads/master
| 2022-12-25T07:13:06.375888
| 2020-10-08T18:46:44
| 2020-10-08T18:46:44
| 302,366,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4180feba49700d901330d36bcb7c90b1782af3a052c08cac446725a84da24bc5
size 580
|
[
"sumitkutty37@gmail.com"
] |
sumitkutty37@gmail.com
|
3f7f38885107eef43aceac023faab24e60716559
|
9d6e747ed7204555199ece2033decff978295a09
|
/Programmers/연습문제/나누어 떨어지는 숫자 배열.py
|
fb1afb8f9e293de68da3844ce7cbf9d97d70dcdf
|
[] |
no_license
|
leejaeyeong/Algorithm
|
5b47ed9aa241990945cbf2451afe7f084984ced5
|
72072d1e0c28e72075fc00db9239a4bd444b68b6
|
refs/heads/master
| 2021-08-08T10:57:07.345943
| 2021-07-11T15:01:59
| 2021-07-11T15:01:59
| 238,156,464
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
def solution(arr, divisor):
answer = []
for i in range(len(arr)) :
if arr[i] % divisor == 0 :
answer.append(arr[i])
answer.append(-1) if len(answer) == 0 else answer.sort()
return answer
|
[
"dldustn14@gmail.com"
] |
dldustn14@gmail.com
|
4a5bef3cc293b6ab8ce44bb01c17f1a769623747
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_170/ch118_2020_04_01_01_59_46_302026.py
|
7174e745c9ee56ed8d0b829dcf7a56162f0013bf
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
import math
def reflexao_total_interna(n1, n2, a2):
a2 = a2 * (math.pi/180)
a1 = math.asin((math.sin(a2))*n2/n1)
if math.sin(a1) > 1:
return True
else:
return False
|
[
"you@example.com"
] |
you@example.com
|
db489fba758e2bb4794dc2d8786107c23b06f986
|
af34b2b44fe1a797a11f27e508d0d2e3c8764027
|
/face/admin.py
|
f54344d632c1c0208da3331d47993c48fb0fad15
|
[] |
no_license
|
CodingSta/askface-with-aws
|
12afa8f7884847bd979a890f96510642018ffe63
|
39ea4ddfbd5360629e7b3a77014adf2d3fadcf6d
|
refs/heads/master
| 2020-05-27T15:35:48.108675
| 2018-08-14T11:46:43
| 2018-08-14T11:46:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
from django.contrib import admin
from django.db.models import Count
from .models import Collection, Person, Face
from .forms import FaceForm
@admin.register(Collection)
class CollectionAdmin(admin.ModelAdmin):
list_display = ['pk', 'name', 'slug']
class FaceInline(admin.TabularInline):
model = Face
form = FaceForm
@admin.register(Person)
class PersonAdmin(admin.ModelAdmin):
list_display = ['pk', 'collection', 'name', 'face_count']
list_display_links = ['name']
inlines = [FaceInline]
actions = ['indexing']
def get_queryset(self, request):
return Person.objects.all().annotate(Count('face'))
def face_count(self, person):
return person.face__count
def indexing(self, request, queryset):
for face in Face.objects.filter(person__in=queryset):
face.indexing()
self.message_user(request, 'indexing 완료')
@admin.register(Face)
class FaceAdmin(admin.ModelAdmin):
list_display = ['person', 'photo', 'meta']
|
[
"me@askcompany.kr"
] |
me@askcompany.kr
|
24a44d518db3c183a6059c55278073b59cd615df
|
deff2922412bd0376ef38a34a895990d106b7b66
|
/goodsunit/serializers.py
|
eedf1db3efe76eb7e5f964b7a1f96578f613af56
|
[
"Apache-2.0"
] |
permissive
|
dpd-pub/GreaterWMS
|
56ba10750e8724bc7b39de077e4b4bc43340a94a
|
9eabb1b9b0f5376dcccd89ed86dd76995955a8ec
|
refs/heads/master
| 2023-02-27T18:05:29.384931
| 2021-02-10T02:19:33
| 2021-02-10T02:19:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,698
|
py
|
from rest_framework import serializers
from .models import ListModel
from userprofile.models import Users
import re
from rest_framework.exceptions import APIException
def data_validate(data):
script_obj = re.findall(r'script', str(data), re.IGNORECASE)
select_obj = re.findall(r'select', str(data), re.IGNORECASE)
if script_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
elif select_obj:
raise APIException({'detail': 'Bad Data can‘not be store'})
else:
return data
def openid_validate(data):
if Users.objects.filter(openid=data).exists():
return data
else:
raise APIException({'detail': 'User does not exists'})
def appid_validate(data):
if Users.objects.filter(appid=data).exists():
return data
else:
raise APIException({'detail': 'User does not exists'})
class GoodsunitGetSerializer(serializers.ModelSerializer):
goods_unit = serializers.CharField(read_only=True, required=False)
creater = serializers.CharField(read_only=True, required=False)
create_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
update_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M:%S')
class Meta:
model = ListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', ]
class GoodsunitPostSerializer(serializers.ModelSerializer):
openid = serializers.CharField(read_only=False, required=False, validators=[openid_validate])
goods_unit = serializers.CharField(read_only=False, required=True, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[data_validate])
class Meta:
model = ListModel
exclude = ['is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class GoodsunitUpdateSerializer(serializers.ModelSerializer):
goods_unit = serializers.CharField(read_only=False, required=True, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=True, validators=[data_validate])
class Meta:
model = ListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
class GoodsunitPartialUpdateSerializer(serializers.ModelSerializer):
goods_unit = serializers.CharField(read_only=False, required=False, validators=[data_validate])
creater = serializers.CharField(read_only=False, required=False, validators=[data_validate])
class Meta:
model = ListModel
exclude = ['openid', 'is_delete', ]
read_only_fields = ['id', 'create_time', 'update_time', ]
|
[
"singosgu@gmail.com"
] |
singosgu@gmail.com
|
8815ed33785797bc95470cbc7b0d27380f4511b6
|
1082cee55e32fa76859666aa011428bf979182ea
|
/pose/configs/top_down/vit/coco/vit_large_patch16_384_coco_256x192.py
|
f371af7b4ab66da7ec5182b8843fce3e2361f2d2
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
cvsch/HRFormer
|
f7a96d8620f87986cf10c74fe4f47d5b7106d732
|
9e6ce958ba502354dff748846d6d98f682f5f9d1
|
refs/heads/main
| 2023-08-20T21:29:51.448485
| 2021-10-19T01:20:02
| 2021-10-19T01:20:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,684
|
py
|
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=5, create_symlink=False)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='AdamW',
lr=5e-4,
betas=(0.9, 0.999),
weight_decay=0.01,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='TopDown',
pretrained='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth',
backbone=dict(
type='VisionTransformer',
model_name='vit_large_patch16_384',
img_size= [256, 192],
patch_size=16,
in_chans=3,
embed_dim=1024,
depth=24,
num_heads=16,
num_classes=19,
drop_rate=0,
drop_path_rate=0.1,
norm_cfg=norm_cfg,
pos_embed_interp=True,
align_corners=False,
),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=1024,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=2,
num_deconv_filters=(256, 256),
num_deconv_kernels=(4, 4),
norm_cfg=norm_cfg,
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_root = "/path/to/dataset/coco" # Set the data path here
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file=f'{data_root}/dataset/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=256),
test_dataloader=dict(samples_per_gpu=256),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
|
[
"yhyuan@pku.edu.cn"
] |
yhyuan@pku.edu.cn
|
d8964fa3e67459f90ecc2926d498618a15de084e
|
952dc66c61966f099756cdb6c2d13b40352f63cc
|
/zerver/migrations/0436_realmauthenticationmethods.py
|
0716113e5428c03ea3a99236b30a568e8c822435
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
zulip/zulip
|
5ae6aad35fd9f72996c0a2a9cdd674400966ebf6
|
965a25d91b6ee2db54038f5df855215fa25146b0
|
refs/heads/main
| 2023-08-28T23:43:00.971110
| 2023-08-28T16:47:09
| 2023-08-28T19:33:02
| 43,160,685
| 20,239
| 8,996
|
Apache-2.0
| 2023-09-14T20:57:47
| 2015-09-25T16:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,780
|
py
|
# Generated by Django 4.2 on 2023-04-13 23:45
import django.db.models.deletion
from django.db import migrations, models
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def fill_RealmAuthenticationMethod_data(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
Realm = apps.get_model("zerver", "Realm")
RealmAuthenticationMethod = apps.get_model("zerver", "RealmAuthenticationMethod")
rows_to_create = []
for realm in Realm.objects.order_by("id"):
for key, value in realm.authentication_methods.iteritems():
if value:
rows_to_create.append(RealmAuthenticationMethod(name=key, realm_id=realm.id))
RealmAuthenticationMethod.objects.bulk_create(rows_to_create, batch_size=10000)
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0435_scheduledmessage_rendered_content"),
]
operations = [
migrations.CreateModel(
name="RealmAuthenticationMethod",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("name", models.CharField(max_length=80)),
(
"realm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="zerver.realm"
),
),
],
options={
"unique_together": {("realm", "name")},
},
),
migrations.RunPython(fill_RealmAuthenticationMethod_data),
]
|
[
"tabbott@zulip.com"
] |
tabbott@zulip.com
|
2fb88aab69ecd33a8ec7b8ed31e521803c847414
|
fd2de23a704ec408f47c9f2263b604cbd204c0a3
|
/MacrosAndScripts/myPlotStyle.py
|
f4c76ee6871fbaffc287d874bd84f58c5c4415d7
|
[] |
no_license
|
gparida/monoHiggs_postAnalyzer
|
1a71c3eaa1cb11ce40923eb831077709987bd866
|
00fb3e37c5fa6bdd75e7426c3a7bf49534c3eec4
|
refs/heads/master
| 2023-01-19T15:19:09.487955
| 2020-12-03T10:15:11
| 2020-12-03T10:15:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,060
|
py
|
import ROOT
def add_lumi(year, channel_):
lowX=0.40
lowY=0.825
lumi = ROOT.TPaveText(lowX, lowY+0.06, lowX+0.50, lowY+0.16, "NDC")
lumi.SetBorderSize( 0 )
lumi.SetFillStyle( 0 )
lumi.SetTextAlign( 32 )#12
lumi.SetTextColor( 1 )
lumi.SetTextSize(0.05)
lumi.SetTextFont ( 42 )
lumiProcessed="41.52"
if year=="2018":
lumiProcessed="59.7"
if channel_=="combined":
lumi.AddText("4 channels combined "+year+" , "+lumiProcessed+" fb^{-1} (13 TeV)")
if channel_=="mutau":
lumi.AddText("#mu#tau_{h} "+year+" , "+lumiProcessed+" fb^{-1} (13 TeV)")
if channel_=="etau":
lumi.AddText("e#tau_{h} "+year+" , "+lumiProcessed+" fb^{-1} (13 TeV)")
if channel_=="tautau":
lumi.AddText("#tau_{h}#tau_{h} "+year+" , "+lumiProcessed+" fb^{-1} (13 TeV)")
if channel_=="emu":
lumi.AddText("e#mu "+year+" , "+lumiProcessed+" fb^{-1} (13 TeV)")
return lumi
def add_CMS():
lowX=0.65 #0.21
lowY=0.68
lumi = ROOT.TPaveText(lowX, lowY+0.06, lowX+0.15, lowY+0.16, "NDC")
lumi.SetTextFont(61)
lumi.SetTextSize(0.08)
lumi.SetBorderSize( 0 )
lumi.SetFillStyle( 0 )
lumi.SetTextAlign( 31 )#12
lumi.SetTextColor( 1 )
lumi.AddText("CMS")
return lumi
def add_Preliminary():
lowX=0.65 # 0.21
lowY=0.63
lumi = ROOT.TPaveText(lowX, lowY+0.06, lowX+0.15, lowY+0.16, "NDC")
lumi.SetTextFont(61)
lumi.SetTextFont ( 40 )
lumi.SetTextSize(0.06)
lumi.SetBorderSize( 0 )
lumi.SetFillStyle( 0 )
lumi.SetTextAlign( 31 )#12
lumi.SetTextColor( 1 )
lumi.AddText("Preliminary")
return lumi
def make_legend():
output = ROOT.TLegend(0.85, 0.45, 1.0, 0.75, "", "brNDC")
#output = ROOT.TLegend(0.2, 0.1, 0.47, 0.65, "", "brNDC")
output.SetLineWidth(1)
output.SetLineStyle(1)
output.SetFillStyle(1001) #0
output.SetFillColor(0)
output.SetBorderSize(1)
output.SetTextFont(42)
return output
# declare colors
color_ztt="#ffcc66"
color_zll="#4496c8"
color_tt="#9999cc"
color_ggh="#12cadd"
color_vv="#990099"
color_wjets="#cc6666"
color_jetfake="#f1cde1"
errorStyle=3002
c=ROOT.TCanvas("canvas","",0,0,1300,1200)
pad1 = ROOT.TPad("pad1","pad1",0,0.25,1,1)
pad1.SetFillColor(0)
pad1.SetBorderMode(0)
pad1.SetBorderSize(1)
pad1.SetTickx(1)
pad1.SetTicky(1)
pad1.SetGridx()
pad1.SetLeftMargin(0.15) #0.15
pad1.SetRightMargin(0.15) #0.1
pad1.SetTopMargin(0.122)
pad1.SetBottomMargin(0.025)
pad1.SetFrameFillStyle(0)
pad1.SetFrameLineStyle(0)
pad1.SetFrameLineWidth(1)
pad1.SetFrameBorderMode(0)
pad1.SetFrameBorderSize(1)
categ = ROOT.TPaveText(0.21, 0.5+0.013, 0.43, 0.70+0.155, "NDC")
categ.SetBorderSize( 0 )
categ.SetFillStyle( 0 )
categ.SetTextAlign( 12 )
categ.SetTextSize ( 0.06 )
categ.SetTextColor( 1 )
categ.SetTextFont ( 42 )
pad2 = ROOT.TPad("pad2","pad2",0,0,1,0.25);
pad2.SetTopMargin(0.02);
pad2.SetBottomMargin(0.35);
pad2.SetLeftMargin(0.15);
pad2.SetRightMargin(0.15);
pad2.SetTickx(1)
pad2.SetTicky(1)
pad2.SetFrameLineWidth(1)
#pad2.SetGridx()
pad2.SetGridy()
#pad2.SetLogy()
|
[
"jithin.madhusudanan.sreekala@cern.ch"
] |
jithin.madhusudanan.sreekala@cern.ch
|
8faab94f20ece7853c36eb7e313df20ec3fb9a12
|
eacff46eda2c6b509449979a16002b96d4645d8e
|
/Collections-a-installer/community-general-2.4.0/tests/unit/mock/loader.py
|
907ec9b928942d36e4ffde7f76faa7b0ca28cd86
|
[
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] |
permissive
|
d-amien-b/simple-getwordpress
|
5e6d4d15d5f87124ab591e46b63fec552998fdc3
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
refs/heads/master
| 2023-04-08T22:13:37.347545
| 2021-04-06T09:25:51
| 2021-04-06T09:25:51
| 351,698,069
| 0
| 0
|
MIT
| 2021-03-31T16:16:45
| 2021-03-26T07:30:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,271
|
py
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleParserError
from ansible.parsing.dataloader import DataLoader
from ansible.module_utils._text import to_bytes, to_text
class DictDataLoader(DataLoader):
def __init__(self, file_mapping=None):
file_mapping = {} if file_mapping is None else file_mapping
assert type(file_mapping) == dict
super(DictDataLoader, self).__init__()
self._file_mapping = file_mapping
self._build_known_directories()
self._vault_secrets = None
def load_from_file(self, path, cache=True, unsafe=False):
path = to_text(path)
if path in self._file_mapping:
return self.load(self._file_mapping[path], path)
return None
# TODO: the real _get_file_contents returns a bytestring, so we actually convert the
# unicode/text it's created with to utf-8
def _get_file_contents(self, path):
path = to_text(path)
if path in self._file_mapping:
return (to_bytes(self._file_mapping[path]), False)
else:
raise AnsibleParserError("file not found: %s" % path)
def path_exists(self, path):
path = to_text(path)
return path in self._file_mapping or path in self._known_directories
def is_file(self, path):
path = to_text(path)
return path in self._file_mapping
def is_directory(self, path):
path = to_text(path)
return path in self._known_directories
def list_directory(self, path):
ret = []
path = to_text(path)
for x in (list(self._file_mapping.keys()) + self._known_directories):
if x.startswith(path):
if os.path.dirname(x) == path:
ret.append(os.path.basename(x))
return ret
def is_executable(self, path):
# FIXME: figure out a way to make paths return true for this
return False
def _add_known_directory(self, directory):
if directory not in self._known_directories:
self._known_directories.append(directory)
def _build_known_directories(self):
self._known_directories = []
for path in self._file_mapping:
dirname = os.path.dirname(path)
while dirname not in ('/', ''):
self._add_known_directory(dirname)
dirname = os.path.dirname(dirname)
def push(self, path, content):
rebuild_dirs = False
if path not in self._file_mapping:
rebuild_dirs = True
self._file_mapping[path] = content
if rebuild_dirs:
self._build_known_directories()
def pop(self, path):
if path in self._file_mapping:
del self._file_mapping[path]
self._build_known_directories()
def clear(self):
self._file_mapping = dict()
self._known_directories = []
def get_basedir(self):
return os.getcwd()
def set_vault_secrets(self, vault_secrets):
self._vault_secrets = vault_secrets
|
[
"test@burdo.fr"
] |
test@burdo.fr
|
eea5106a6a9289aa568acdbe862f369afb82dced
|
ec53949dafa4b6ad675d679b05ed7c83fef2c69a
|
/DataStructuresAndAlgo/Tuples/methodsTuple.py
|
54834f82a3fc7c4bb83d3270f4e2881055b2118c
|
[] |
no_license
|
tpotjj/Python
|
9a5a20a53cd7a6ec14386c1db8ce155e0fc9ab8a
|
ca73c116ada4d05c0c565508163557744c86fc76
|
refs/heads/master
| 2023-07-11T16:37:10.039522
| 2021-08-14T11:17:55
| 2021-08-14T11:17:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
myTuple = (1, 2, 3, 4, 5, 2, 2)
secondTuple = (1, 2, 6, 9, 8, 7)
print(myTuple + secondTuple)
print(myTuple * 2)
print(1 in myTuple)
print(myTuple.count(2))
print(len(myTuple))
print(max(myTuple))
print(min(myTuple))
x = [1, 2, 3, 4]
y = tuple(x)
print(type(y))
|
[
"joris97jansen@gmail.com"
] |
joris97jansen@gmail.com
|
518099f4cfceb4cf5f9f1df05eed811c28c2a387
|
8d293da5176e7734392465599a9b43b15e6a54af
|
/starwars/parts_scrap.py
|
045bb01d1fd7e77bee0e72ee7b75fcd112953e3d
|
[
"MIT"
] |
permissive
|
whaleygeek/mb_deathstar
|
04b5c4e331e4f6f7330d5840e47197e7ede6cbff
|
f756b8b5b45927039c547d0f96f8e31a365b383b
|
refs/heads/master
| 2021-01-11T20:09:37.134425
| 2017-01-22T19:32:42
| 2017-01-22T19:32:42
| 79,052,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,629
|
py
|
#Minecraft Star Wars
#Martin O'Hanlon
#www.stuffaboutcode.com
from deathstar import DeathStar
from starwarscraft import TieFighter, MilleniumFalcon, XWingFighter, XWingFighterDiagonal
from planet import Planet
from trench import Trench
from projectile import XWingMissile
from mcpi.minecraft import Minecraft
from mcpi.minecraft import Vec3
from mcpi import block
from time import sleep
#Main program
#create connection to minecraft
mc = Minecraft.create()
pos = Vec3(0,30,0)
mc.player.setTilePos(pos.x + 25 ,pos.y + 20, pos.z + 25)
#create Alderaan
alderaanPos = pos.clone()
alderaanPos.x += 50
alderaanPos.z += 50
alderaan = Planet(alderaanPos, 10, block.WOOL.id, 3)
#create DeathStar
sleep(15)
deathstarPos = pos.clone()
deathstar = DeathStar(deathstarPos, 15)
sleep(12)
mc.postToChat("Not Alderaan, we are peaceful, we have no weapons.")
#blow up Alderaan
sleep(3)
deathstar.fire(alderaanPos.x, alderaanPos.y, alderaanPos.z, 0.5, 1)
alderaan.destroy(2)
#millenium falcon arrives
sleep(10)
falconPos = pos.clone()
falconPos.z -= 50
falcon = MilleniumFalcon(falconPos)
mc.postToChat("Thats no moon, its a space station")
falcon.fly(pos.x, pos.y, pos.z, 0.5)
#millenium falcon is chased from the death star by tie fighters
sleep(10)
falconFly = falcon.fly(pos.x, pos.y, pos.z + 50, 0.3, True)
mc.postToChat("Sure hope the old man got that tractor beam out of commission, or this is gonna be a real short trip")
#tie fighters take chase
sleep(5)
tie1Pos = pos.clone()
tie1Pos.x -= 5
tie2Pos = pos.clone()
tie2Pos.x += 5
tie1 = TieFighter(tie1Pos)
tie2 = TieFighter(tie2Pos)
tie1Fly = tie1.fly(tie1Pos.x, tie1Pos.y, tie1Pos.z + 50, 0.25, True)
tie2Fly = tie2.fly(tie2Pos.x, tie2Pos.y, tie2Pos.z + 50, 0.25, True)
#wait for falcon and tie fighters to stop
falconFly.join()
falcon.clear()
tie1Fly.join()
tie2Fly.join()
tie1.clear()
tie2.clear()
mc.postToChat("They let us go. It was the only reason for the ease of our escape.")
#create Yavin 4
sleep(10)
yavinPos = pos.clone()
yavinPos.x -= 60
yavinPos.z += 60
yavin = Planet(yavinPos, 10, block.WOOL.id, 13)
#x wing fighter attacks
sleep(5)
xWing1Pos = pos.clone()
xWing1Pos.x -= 50
xWing1Pos.z += 50
xWing1 = XWingFighterDiagonal(xWing1Pos)
xWing1.fly(pos.x - 10, pos.y, pos.z + 10, 0.25)
xWing1.clear()
#fly x wing down the treach
trenchPos = Vec3(50,40,-50)
trench = Trench(trenchPos, 14, 8, 100)
#put the player in the trench
mc.player.setTilePos(57, 41, 49)
sleep(2)
xWing2Pos = Vec3(57, 44, 45)
xWing2 = XWingFighter(xWing2Pos)
xWing2Fly = xWing2.fly(57, 44, -25, 0.25, True)
#fire the missile at the exhaust port
sleep(12)
mc.postToChat("Use the force Luke")
sleep(1)
missile = XWingMissile()
missileFire = missile.fire(xWing2.position.x, xWing2.position.y, xWing2.position.z - 3,
trench.exhaustPortPos.x, trench.exhaustPortPos.y, trench.exhaustPortPos.z,
0.1, True)
#wait for the missile and the xwing to stop
missileFire.join()
xWing2Fly.join()
xWing2.clear()
trench.clear()
#move player back above deathstar
mc.player.setTilePos(pos.x, pos.y + 20, pos.z)
#xwing escapes from deathstar
xWing3Pos = pos.clone()
xWing3Pos.z -= 10
xWing3 = XWingFighter(xWing1Pos)
xWing3Fly = xWing3.fly(pos.x, pos.y, pos.z - 50, 0.25, True)
sleep(3)
#destroy the deathstar
deathstar.destroy()
sleep(5)
mc.postToChat("duh der der duh, der, duh der der duh, der, der der der der der")
sleep(10)
#finish by clearing xwing and yavin
xWing3Fly.join()
xWing3.clear()
yavin.clear()
|
[
"david@thinkingbinaries.com"
] |
david@thinkingbinaries.com
|
72412cdf98c14f9a80622c60fb2992562913cb86
|
6177f542a4db03e9fc29d1f535ca7c3d2b35a751
|
/concert_master/scripts/concert_info
|
4ba0973d65a446ee60396143d0f160641636f7c7
|
[] |
no_license
|
robotics-in-concert/rocon_concert
|
ac5b97d0048f72712bbd5bc0369b6a7dcb1361a7
|
f940eaa3fd612317f2043d6edba10cd309384134
|
refs/heads/devel
| 2020-04-03T21:28:19.846013
| 2017-01-25T16:22:18
| 2017-01-25T16:22:18
| 3,861,812
| 5
| 5
| null | 2017-01-09T08:27:43
| 2012-03-29T02:26:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,312
|
#!/usr/bin/env python
#
# License: BSD
# https://raw.github.com/robotics-in-concert/rocon_concert/license/LICENSE
#
# This is just a copy of rocon_master_info
#
##############################################################################
# Imports
##############################################################################
import argparse
import sys
import rospy
import rocon_master_info
##############################################################################
# Functions
##############################################################################
def parse_arguments():
parser = argparse.ArgumentParser(description='View concert information details.\n\nThis command will defer \
to the rqt plugin if available, otherwise it will simply print details to the console.')
parser.add_argument('-c', '--console', action='store_true', help='force output to the console only')
myargs = rospy.myargv(argv=sys.argv)
return parser.parse_args(args=myargs[1:])
##############################################################################
# Main
##############################################################################
if __name__ == '__main__':
args = parse_arguments()
rocon_master_info.main('concert_info', 'Concert Information', console=args.console)
|
[
"d.stonier@gmail.com"
] |
d.stonier@gmail.com
|
|
da6607ef6fb14226e045c90c0a639ef531fdeae1
|
14453c13d552165cabe72a310f44f7c58eaacad0
|
/driver/examples/zarr_to_nc.py
|
9a9366747a2bbfe10e72f26266170ea288bff912
|
[
"Apache-2.0"
] |
permissive
|
ai2cm/pace
|
76a98ffae3baa92bd3b2ddc422b50dfa50255642
|
c543e8ec478d46d88b48cdd3beaaa1717a95b935
|
refs/heads/main
| 2023-07-06T07:18:11.558315
| 2022-12-22T21:45:34
| 2022-12-22T21:45:34
| 392,106,887
| 27
| 13
|
Apache-2.0
| 2023-07-03T13:47:46
| 2021-08-02T22:05:11
|
Python
|
UTF-8
|
Python
| false
| false
| 472
|
py
|
import argparse
import xarray as xr
import zarr
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Converts zarr directory stores to netcdf"
)
parser.add_argument("zarr_in", type=str, help="path of zarr to convert")
parser.add_argument("netcdf_out", type=str, help="output netcdf")
args = parser.parse_args()
ds: xr.Dataset = xr.open_zarr(store=zarr.DirectoryStore(args.zarr_in))
ds.to_netcdf(args.netcdf_out)
|
[
"noreply@github.com"
] |
ai2cm.noreply@github.com
|
50545660461d4734744ed1fb38ee3b7dbb01c0dc
|
b2dd1a41354907b2a010b33aee11dc5955e9eabd
|
/tests/test_widgets.py
|
e1696200ebbbfcbc2f1cdfbb2250b422a929572b
|
[] |
no_license
|
jeetu7/tw2.jqplugins.jqgrid
|
34359af04f1de20a93f82a838308c85cadf80469
|
86d694bf7ea6276b20006d644dd9b4a96b505991
|
refs/heads/master
| 2020-12-25T04:29:02.775745
| 2012-07-07T14:14:09
| 2012-07-07T14:14:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
from webob import Request
from webob.multidict import NestedMultiDict
from tw2.core.testbase import assert_in_xml, assert_eq_xml, WidgetTest
from nose.tools import raises
from cStringIO import StringIO
from tw2.core import EmptyField, IntValidator, ValidationError
from cgi import FieldStorage
import formencode
import webob
if hasattr(webob, 'NestedMultiDict'):
from webob import NestedMultiDict
else:
from webob.multidict import NestedMultiDict
import tw2.jqplugins.jqgrid.widgets as w
class TestJQGridWidget(WidgetTest):
widget = w.jqGridWidget
attrs = {'id' : 'foo'}
params = {'options' : {
'data': [
{ 'field1' : 'foo', 'field2' : 'foo' } for i in range(2)
],
'datatype': "local",
'colNames':['Field1', 'Field2'],
'colModel':[
{'name':'field1'},
{'name':'field2'},
],
'viewrecords': True,
'rowNum':100,
'rowList':[100,200],
'caption':"Example"
}}
expected = """
<div>
<table id="foo"></table>
<script type="text/javascript">
$(document).ready(
function(){
var opts = {"viewrecords": true, "rowList": [100, 200], "colModel": [{"name": "field1"}, {"name": "field2"}], "caption": "Example", "datatype": "local", "colNames": ["Field1", "Field2"], "data": [{"field2": "foo", "field1": "foo"}, {"field2": "foo", "field1": "foo"}], "rowNum": 100};
var grid = $("#foo");
grid.jqGrid(opts);
if ( 'pager' in opts ) {
opts['pager_selector'] = opts['pager'];
opts['pager'] = $(opts['pager'])
var pager_opts = {}
var prmEdit = {};
var prmAdd = {};
var prmDel = {};
var prmSearch = {};
var prmView = {};
grid.navGrid('#'+opts['pager_selector'], pager_opts,
prmEdit, prmAdd, prmDel, prmSearch, prmView)
;
}
}
);
</script>
</div>"""
|
[
"ralph.bean@gmail.com"
] |
ralph.bean@gmail.com
|
ab81570c377f89bd683f3f79f3b00aa6ebd43962
|
117dceedcb8e93e40428e439fb02d16c58aa4474
|
/作业/0322/code/mywebsite/home/templatetags/demo.py
|
b0192d7d3ce71e59c075ba364cd66ec7fd42018f
|
[] |
no_license
|
qwert19981228/P4
|
ab5ceff94ec49ecbc47d008d6f239e03781eb0fd
|
b7c434cc64df64ae48a84ee056cbccc0db622fde
|
refs/heads/master
| 2021-10-24T12:32:34.160886
| 2019-03-26T03:39:52
| 2019-03-26T03:39:52
| 167,921,434
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
from django import template
register = template.Library()
@register.simple_tag
def dan(data):
for i in data:
return i
|
[
"qwert19981228@outlook.com"
] |
qwert19981228@outlook.com
|
77f6275501857c35fd9202d8af88ba58ac4770f7
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2177/60619/281049.py
|
29b365990a3485c30f94756d6d80e192f9f24ac3
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
i = int(input())
if i == 11:
print(12)
print("6 7 5 8 4 9 3 10 2 11 1 12", end=" ")
elif i == 1:
print(2)
print("1 2", end=" ")
elif i == 9:
print(10)
print("5 6 4 7 3 8 2 9 1 10", end=" ")
elif i == 13:
print(14)
print("7 8 6 9 5 10 4 11 3 12 2 13 1 14", end=" ")
elif i == 35:
print(36)
print("18 19 17 20 16 21 15 22 14 23 13 24 12 25 11 26 10 27 9 28 8 29 7 30 6 31 5 32 4 33 3 34 2 35 1 36",end=" ")
elif i == 16:
print(17)
print("9 8 10 7 11 6 12 5 13 4 14 3 15 2 16 1 17", end=" ")
else:
print(i)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
6851cf6e45b7a9d37c68a9ad99a6e7a405d23e88
|
e081eebc37aef48084fa62a1b36443f03b9e2abe
|
/Chef_Detective.py
|
9740b546f3162141b0d1f6767895dff7974d644f
|
[] |
no_license
|
S-C-U-B-E/CodeChef-Practise-Beginner-Python
|
93fa202eede83cf4f58177bffb4ecc4ddb7f19bc
|
78a02303b3cdd7eb7b0c45be59a1c282234f8719
|
refs/heads/master
| 2021-03-24T01:10:43.913150
| 2020-03-16T12:49:12
| 2020-03-16T12:49:12
| 247,501,633
| 16
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
n=int(input())
r=[int(x) for x in input().split()]
r=set(r)
for i in range(n):
if (i+1) not in r:
print(i+1,end=" ")
|
[
"sssanyal10@gmail.com"
] |
sssanyal10@gmail.com
|
d4826815890181cbe6ef3a944b88f49cc2476a6b
|
531e5d92c003a68fd88ab56e1cea2955774947af
|
/tests/plugins/api/test_default_definition_body_plugin.py
|
ad6e1648ce76eecbb2203b57767f5857656156f4
|
[
"Apache-2.0"
] |
permissive
|
jfuss/serverless-application-model
|
675eea54acae72383234ed188bd02cfcaeadfb35
|
1af3e97b2043369087729cc3849934f8cf838b7e
|
refs/heads/develop
| 2023-06-10T13:11:24.555408
| 2023-05-25T15:53:12
| 2023-05-25T15:53:12
| 117,887,073
| 2
| 1
|
Apache-2.0
| 2021-11-15T20:47:42
| 2018-01-17T20:09:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
from mock import Mock, patch
from unittest import TestCase
from samtranslator.plugins.api.default_definition_body_plugin import DefaultDefinitionBodyPlugin
from samtranslator.public.plugins import BasePlugin
IMPLICIT_API_LOGICAL_ID = "ServerlessRestApi"
class TestDefaultDefinitionBodyPlugin_init(TestCase):
def setUp(self):
self.plugin = DefaultDefinitionBodyPlugin()
def test_plugin_must_setup_correct_name(self):
# Name is the class name
expected_name = "DefaultDefinitionBodyPlugin"
self.assertEqual(self.plugin.name, expected_name)
def test_plugin_must_be_instance_of_base_plugin_class(self):
self.assertTrue(isinstance(self.plugin, BasePlugin))
class TestDefaultDefinitionBodyPlugin_on_before_transform_template(TestCase):
def setUp(self):
self.plugin = DefaultDefinitionBodyPlugin()
@patch("samtranslator.plugins.api.default_definition_body_plugin.SamTemplate")
def test_must_process_functions(self, SamTemplateMock):
template_dict = {"a": "b"}
api_resources = [("id1", ApiResource()), ("id2", ApiResource()), ("id3", ApiResource())]
sam_template = Mock()
SamTemplateMock.return_value = sam_template
sam_template.iterate = Mock()
sam_template.iterate.return_value = api_resources
self.plugin.on_before_transform_template(template_dict)
SamTemplateMock.assert_called_with(template_dict)
# Make sure this is called only for Apis
sam_template.iterate.assert_any_call({"AWS::Serverless::Api"})
sam_template.iterate.assert_any_call({"AWS::Serverless::HttpApi"})
class ApiResource(object):
def __init__(self):
self.properties = {}
|
[
"noreply@github.com"
] |
jfuss.noreply@github.com
|
d6ba727b7916256b1fe606eaee94e197cd497ade
|
537d28fb2142331e27c84ebf2c16bad77aceb24e
|
/ml/m06_wine3.py
|
68a32d34513e45b664a6ac60eed3a02164e1662a
|
[] |
no_license
|
gema0000/bit2019
|
c27c3cec8d8d3a0907ade41523ce1c5ee86337b6
|
2f44ad3956b387186935374d9a488ad40a13bcaf
|
refs/heads/master
| 2020-07-03T05:19:41.051447
| 2019-10-26T23:56:25
| 2019-10-26T23:56:25
| 201,796,021
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
# 데이터 읽어 들이기
wine = pd.read_csv("./data/winequality-white.csv", sep=";", encoding="utf-8")
# 데이터를 레이블과 데이터로 분리하기
y = wine["quality"]
x = wine.drop("quality", axis=1)
# y 레이블 변경하기
newlist = []
for v in list(y):
if v <= 4:
newlist += [0]
elif v <= 7:
newlist += [1]
else:
newlist += [2]
y = newlist
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2)
# 학습하기
model = RandomForestClassifier()
model.fit(x_train, y_train)
aaa = model.score(x_test, y_test)
# 평가하기
y_pred = model.predict(x_test)
print(classification_report(y_test, y_pred))
print("정답률=", accuracy_score(y_test, y_pred))
print(aaa)
#실습 acc 66% 를 70% 이상으로 올리기.
|
[
"gema0000@naver.com"
] |
gema0000@naver.com
|
1709bf60d3e0cbdb92a08d489184c10f929da344
|
0d0b8236ff06027037d2a8a724d13a1866a9999c
|
/0x0B-python-input_output/12-main.py
|
c1b177be92e6bcc58fb85bc5686fc08c60d83afa
|
[] |
no_license
|
Danucas/holbertonschool-higher_level_programming
|
3f8e81a610bf80890280b764362b56ad8803e2df
|
b963d41af8bccf764dff67f80ea16f1184c0a96d
|
refs/heads/master
| 2022-07-31T05:53:57.046789
| 2020-05-21T21:29:54
| 2020-05-21T21:29:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
#!/usr/bin/python3
Student = __import__('12-student').Student
student_1 = Student("John", "Doe", 23)
student_2 = Student("Bob", "Dylan", 27)
j_student_1 = student_1.to_json()
j_student_2 = student_2.to_json(['first_name', 3])
j_student_3 = student_2.to_json(['middle_name', 'age'])
print(j_student_1)
print(j_student_2)
print(j_student_3)
|
[
"danrodcastillo1994@gmail.com"
] |
danrodcastillo1994@gmail.com
|
d56411141515e59e13c8e7158958d63b0b9c075f
|
6a1afd25fd19e24eecf2e7f233027681a05903b8
|
/backend/tchr_3855/urls.py
|
a774b8dbe6339003537a5be0b1f3e8aadddee690
|
[] |
no_license
|
crowdbotics-apps/tchr-3855
|
85e0c264c9b5adef9ae782cf9b23c0754a268564
|
5c9ef3ff5e4662bf23ca10532eeafd4e55f847b0
|
refs/heads/master
| 2022-12-10T22:10:23.132461
| 2019-05-26T10:22:49
| 2019-05-26T10:22:49
| 188,668,403
| 0
| 0
| null | 2022-12-09T03:58:32
| 2019-05-26T10:22:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
"""tchr_3855 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
admin.site.site_header = 'Tchr'
admin.site.site_title = 'Tchr Admin Portal'
admin.site.index_title = 'Tchr Admin'
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
fdb52e61ad56ed5ce29737ced83eb1ee158f29fe
|
56cd86b5438db288a2b602166071b079ffc5c22f
|
/backend/windy_utils_rest.py
|
d582f5031a4b730fbd668b3001fb53e9644edd9f
|
[
"MIT"
] |
permissive
|
mmmaaaggg/QABAT
|
d789921151533fd0c9fd4b89dc5eee2137b67e80
|
d6f20d926de047af6857e466cf28084d0ba69993
|
refs/heads/master
| 2022-07-13T10:10:28.985442
| 2021-01-27T07:05:46
| 2021-01-27T07:05:46
| 139,661,275
| 4
| 1
|
MIT
| 2022-06-21T22:03:11
| 2018-07-04T02:58:18
|
Python
|
UTF-8
|
Python
| false
| false
| 7,528
|
py
|
# -*- coding: utf-8 -*-
"""
Created on 2016-12-22
@author: MG
"""
import pandas as pd
import requests
import json
from datetime import datetime, date
import logging
logger = logging.getLogger()
STR_FORMAT_DATE = '%Y-%m-%d'
STR_FORMAT_DATETIME_WIND = '%Y-%m-%d %H:%M:%S' # 2017-03-06 00:00:00.005000
UN_AVAILABLE_DATETIME = datetime.strptime('1900-01-01', STR_FORMAT_DATE)
UN_AVAILABLE_DATE = UN_AVAILABLE_DATETIME.date()
def format_2_date_str(dt) -> str:
if dt is None:
return None
dt_type = type(dt)
if dt_type == str:
return dt
elif dt_type == date:
if dt > UN_AVAILABLE_DATE:
return dt.strftime(STR_FORMAT_DATE)
else:
return None
elif dt_type == datetime:
if dt > UN_AVAILABLE_DATETIME:
return dt.strftime(STR_FORMAT_DATE)
else:
return None
else:
return dt
def format_2_datetime_str(dt) -> str:
if dt is None:
return None
dt_type = type(dt)
if dt_type == str:
return dt
elif dt_type == date:
if dt > UN_AVAILABLE_DATE:
return dt.strftime(STR_FORMAT_DATE)
else:
return None
elif dt_type == datetime:
if dt > UN_AVAILABLE_DATETIME:
return dt.strftime(STR_FORMAT_DATETIME_WIND)
else:
return None
else:
return dt
class APIError(Exception):
def __init__(self, status, ret_dic):
self.status = status
self.ret_dic = ret_dic
def __str__(self):
return "APIError:status=POST / {} {}".format(self.status, self.ret_dic)
class WindRest:
def __init__(self, url_str):
self.url = url_str
self.header = {'Content-Type': 'application/json'}
def _url(self, path: str) -> str:
return self.url + path
def public_post(self, path: str, req_data: str) -> list:
# print('self._url(path):', self._url(path))
ret_data = requests.post(self._url(path), data=req_data, headers=self.header)
ret_dic = ret_data.json()
if ret_data.status_code != 200:
raise APIError(ret_data.status_code, ret_dic)
else:
return ret_data.status_code, ret_dic
def wset(self, table_name, options) -> pd.DataFrame:
path = 'wset/'
req_data_dic = {"table_name": table_name, "options": options}
req_data = json.dumps(req_data_dic)
_, json_dic = self.public_post(path, req_data)
df = pd.DataFrame(json_dic).T
return df
def wss(self, codes, fields, options="") -> pd.DataFrame:
path = 'wss/'
req_data_dic = {"codes": codes, "fields": fields, "options": options}
req_data = json.dumps(req_data_dic)
_, json_dic = self.public_post(path, req_data)
df = pd.DataFrame(json_dic).T
return df
def wsd(self, codes, fields, begin_time, end_time, options="") -> pd.DataFrame:
path = 'wsd/'
req_data_dic = {"codes": codes, "fields": fields,
"begin_time": format_2_date_str(begin_time),
"end_time": format_2_date_str(end_time),
"options": options}
req_data = json.dumps(req_data_dic)
_, json_dic = self.public_post(path, req_data)
df = pd.DataFrame(json_dic).T
return df
def wsi(self, codes, fields, begin_time, end_time, options="") -> pd.DataFrame:
path = 'wsi/'
req_data_dic = {"codes": codes, "fields": fields,
"begin_time": format_2_date_str(begin_time),
"end_time": format_2_date_str(end_time),
"options": options}
req_data = json.dumps(req_data_dic)
_, json_dic = self.public_post(path, req_data)
df = pd.DataFrame(json_dic).T
return df
def wst(self, codes, fields, begin_time, end_time, options="") -> pd.DataFrame:
path = 'wst/'
req_data_dic = {"codes": codes, "fields": fields,
"begin_time": format_2_datetime_str(begin_time),
"end_time": format_2_datetime_str(end_time),
"options": options}
req_data = json.dumps(req_data_dic)
_, json_dic = self.public_post(path, req_data)
df = pd.DataFrame(json_dic).T
return df
def wsq(self, codes, fields, options="") -> pd.DataFrame:
path = 'wsq/'
req_data_dic = {"codes": codes, "fields": fields, "options": options}
req_data = json.dumps(req_data_dic)
_, json_dic = self.public_post(path, req_data)
df = pd.DataFrame(json_dic).T
return df
def tdaysoffset(self, offset, begin_time, options="") -> dict:
path = 'tdaysoffset/'
req_data_dic = {"offset": offset,
"begin_time": format_2_date_str(begin_time),
"options": options}
req_data = json.dumps(req_data_dic)
_, json_dic = self.public_post(path, req_data)
date_str = json_dic['Date']
return date_str
def tdays(self, begin_time, end_time, options="") -> dict:
path = 'tdays/'
req_data_dic = {"begin_time": format_2_date_str(begin_time),
"end_time": format_2_date_str(end_time),
"options": options}
req_data = json.dumps(req_data_dic)
_, json_dic = self.public_post(path, req_data)
# df = pd.DataFrame(json_dic)
return json_dic
def edb(self, codes, begin_time, end_time, options) -> pd.DataFrame:
path = 'edb/'
req_data_dic = {"codes": codes,
"begin_time": format_2_date_str(begin_time),
"end_time": format_2_date_str(end_time),
"options": options}
req_data = json.dumps(req_data_dic)
_, json_dic = self.public_post(path, req_data)
df = pd.DataFrame(json_dic).T
return df
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s: %(levelname)s [%(name)s:%(funcName)s] %(message)s')
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
# url_str = "http://10.0.5.65:5000/wind/"
url_str = "http://10.0.5.62:5000/wind/" # "http://10.0.3.78:5000/wind/"
rest = WindRest(url_str)
# data_df = rest.wset(table_name="sectorconstituent", options="date=2017-03-21;sectorid=1000023121000000")
# data_df = rest.wss(codes="QHZG160525.OF", fields="fund_setupdate,fund_maturitydate,fund_mgrcomp,fund_existingyear,fund_ptmyear,fund_type,fund_fundmanager")
# data_df = rest.wsd("601398.SH", "open,high,low,close,volume", "2017-01-04", "2017-02-28", "PriceAdj=F")
# data_df = rest.tdays(begin_time="2017-01-04", end_time="2017-02-28")
# data_df = rest.wst("600000.SH", "ask1,bid1,asize1,bsize1,volume,amt,pre_close,open,high,low,last", "2017-10-20 09:15:00", "2017-10-20 09:26:00", "")
# data_df = rest.wsi("RU1801.SHF", "open,high,low,close,volume,amt,oi", "2017-12-8 09:00:00", "2017-12-8 11:30:00", "")
try:
data_df = rest.wsd("000987.SZ", "open,high,low,close,volume", "2017-12-18", "2017-12-19", "")
print(data_df)
except APIError as exp:
if exp.status == 500:
print('APIError.status:', exp.status, exp.ret_dic['message'])
else:
print(exp.ret_dic.setdefault('error_code', ''), exp.ret_dic['message'])
# date_str = rest.tdaysoffset(1, '2017-3-31')
# print(date_str)
|
[
"mmmaaaggg@163.com"
] |
mmmaaaggg@163.com
|
4a142a01a45fa51f9c863eef0f039791d863a013
|
9f884a3584eef771f8c010e296c5d763098be243
|
/povary/apps/seo_v2/migrations/0001_initial.py
|
bce4b67bee7aa622577573aa590abe5b27575f0d
|
[
"BSD-3-Clause"
] |
permissive
|
TorinAsakura/cooking
|
fc8658ce2ac21c2e00dc307399a5fa24971a20c1
|
cf0c78f613fa9ce0fcd4ec7a397ab880d9dd631a
|
refs/heads/master
| 2023-01-24T13:07:38.529811
| 2020-12-08T22:14:33
| 2020-12-08T22:14:33
| 319,773,012
| 0
| 0
|
BSD-3-Clause
| 2020-12-08T22:14:34
| 2020-12-08T22:08:34
| null |
UTF-8
|
Python
| false
| false
| 4,096
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SeoTarget'
db.create_table(u'seo_v2_seotarget', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128, unique=True, null=True, blank=True)),
))
db.send_create_signal(u'seo_v2', ['SeoTarget'])
# Adding model 'SeoTemplate'
db.create_table(u'seo_v2_seotemplate', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('target', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seo_v2.SeoTarget'], null=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True, blank=True)),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('keywords', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('title', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('bottom_desc', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal(u'seo_v2', ['SeoTemplate'])
# Adding unique constraint on 'SeoTemplate', fields ['object_id', 'content_type']
db.create_unique(u'seo_v2_seotemplate', ['object_id', 'content_type_id'])
def backwards(self, orm):
# Removing unique constraint on 'SeoTemplate', fields ['object_id', 'content_type']
db.delete_unique(u'seo_v2_seotemplate', ['object_id', 'content_type_id'])
# Deleting model 'SeoTarget'
db.delete_table(u'seo_v2_seotarget')
# Deleting model 'SeoTemplate'
db.delete_table(u'seo_v2_seotemplate')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'seo_v2.seotarget': {
'Meta': {'object_name': 'SeoTarget'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'seo_v2.seotemplate': {
'Meta': {'unique_together': "(('object_id', 'content_type'),)", 'object_name': 'SeoTemplate'},
'bottom_desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['seo_v2.SeoTarget']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['seo_v2']
|
[
"me@torinasakura.name"
] |
me@torinasakura.name
|
66407c0b6c977324006de7c5c228296df7783a34
|
818e5e78f84596a7c086b218fd4aa9e8ea912afe
|
/hackatons/materials/algo/source/T5_LinearStructure/P3_List/ListWithCurrentElement.py
|
fdfd273b72742e9271e6be2dcdf2c5b282946b4b
|
[] |
no_license
|
davendiy/forpythonanywhere
|
44fbc63651309598b58391667f0fead40e8fad91
|
1b9292ca33b06b17cd516e4e9913479edb6d35cd
|
refs/heads/master
| 2020-08-10T04:24:02.665635
| 2019-10-25T07:05:46
| 2019-10-25T07:05:46
| 214,255,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,289
|
py
|
class Node:
""" Допоміжний клас - вузол списку. """
def __init__(self, item):
""" Конструктор """
self.mItem = item # навантаження вузла
self.mNext = None # посилання на наступний вузол списку
class ListWithCurrent:
def __init__(self):
""" Конструктор - створює новий порожній список.
"""
self.mHead = None # Перший вузол списку
self.mPrev = None # Вузол, що передує поточному елементу списку
self.mCurr = None # Поточний вузол списку
def empty(self):
""" Перевіряє чи список порожній
:return: True, якщо список не містить жодного елемента
"""
return self.mHead is None
def reset(self):
""" Зробити поточний елемент першим.
"""
self.mCurr = self.mHead
self.mPrev = None
def next(self):
""" Перейти до наступного елемента.
Породжує виключення StopIteration, якщо наступний елемент порожній
:return: None
"""
if self.mCurr is not None:
self.mPrev = self.mCurr
self.mCurr = self.mCurr.mNext
else:
raise StopIteration
def current(self):
""" Отримати поточний елемент
:return: Навантаження поточного елементу
"""
if self.mCurr is not None:
return self.mCurr.mItem
else:
return None
def insert(self, item):
""" Вставити новий елемент у список перед поточним
:param item: елемент, що вставляється у спиоск
:return: None
"""
node = Node(item)
node.mNext = self.mCurr
if self.mCurr == self.mHead:
self.mHead = node
if self.mPrev is not None:
self.mPrev.mNext = node
self.mPrev = node
def remove(self):
""" Видалити поточний елемент у списку
Видалення переставляє вказівник на поточний елемент на наступний
"""
pass # TODO: Implement by yourself
def __str__(self):
return str(self.current())
def __iter__(self):
self.mIterator = self.mHead
return self
def __next__(self):
if self.mIterator is not None:
cur = self.mIterator.mItem
self.mIterator = self.mIterator.mNext
return cur
else:
raise StopIteration
l = ListWithCurrent()
l.insert(11)
l.insert(12)
l.insert(13)
l.insert(14)
l.insert(15)
l.insert(16)
l.reset()
l.next()
print(l)
it = iter(l)
while True:
try:
print(next(l))
except StopIteration:
break
# l.reset()
# print(l)
# l.next()
# l.next()
# l.next()
# print(l)
# l.next()
#
#
# l.insert(555)
# #
# for el in l:
# print(el)
|
[
"davendiy@gmail.com"
] |
davendiy@gmail.com
|
d80a97bb723810380e758ac7493d3b6fade462e5
|
afd957cf224d7c0b1b9c7216c9767fbea0dbcfea
|
/Python codebetter training/class&objects/program6.py
|
f3921d4e010c7788eb403b820c09daf12f21f848
|
[] |
no_license
|
Chandu8817/python_code
|
d7fea1b81cbb84b98f5527beaa7350884f5e2ab8
|
2827ebeb463a764b67ba6621d08a58a3783d26e4
|
refs/heads/master
| 2023-05-03T01:09:37.199935
| 2021-05-28T13:07:39
| 2021-05-28T13:07:39
| 299,087,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
class Employe:
name="rohan"
def __init__(self,id,salary):
self.id=id
self.salary=salary
def show(self):
print(self.name,self.id,self.salary)
obj=Employe(123,40000)
obj.show()
|
[
"you@example.com"
] |
you@example.com
|
c4dc8e90148e5925afe99b51bdd1f6a3b9dc57b6
|
956b70a8904fbbab3686f1b11e7ff1b6402caa48
|
/codecademy/student_become_teacher/lesson1.py
|
07e1de2ac66539c8d743b0f8c411c7d0c43c8a27
|
[] |
no_license
|
udoyen/andela-homestead
|
3b839eec813084c48b8588f3d4977801077e360e
|
74405ae893d5f3b0548f840c6ca76a4b9315760f
|
refs/heads/master
| 2020-08-05T16:14:13.831835
| 2016-10-16T12:12:00
| 2016-10-16T12:12:00
| 66,066,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
lloyd = {
"name": "Lloyd",
"homework": [90.0, 97.0, 75.0, 92.0],
"quizzes": [88.0, 40.0, 94.0],
"tests": [75.0, 90.0]
}
alice = {
"name": "Alice",
"homework": [100.0, 92.0, 98.0, 100.0],
"quizzes": [82.0, 83.0, 91.0],
"tests": [89.0, 97.0]
}
tyler = {
"name": "Tyler",
"homework": [0.0, 87.0, 75.0, 22.0],
"quizzes": [0.0, 75.0, 78.0],
"tests": [100.0, 100.0]
}
students = [lloyd, alice, tyler]
for student in students:
print(student['name'])
print(student['homework'])
print(student['quizzes'])
print(student['tests'])
print()
def average(numbers):
total = sum(numbers)
total = float(total)
return total / len(numbers)
def get_average(student):
homework = average(student['homework'])
quizzes = average(student['quizzes'])
tests = average(student['tests'])
return (0.1 * homework) + (0.3 * quizzes) + (0.6 * tests)
def get_letter_grade(score):
if score >= 90:
return "A"
elif score >= 80:
return "B"
elif score >= 70:
return "C"
elif score >= 60:
return "D"
else:
return "F"
def get_class_average(students):
results = []
for student in students:
results.append(get_average(student))
return average(results)
print(get_letter_grade(get_average(lloyd)))
print(get_class_average(students))
print(get_letter_grade(get_class_average(students)))
|
[
"datameshprojects@gmail.com"
] |
datameshprojects@gmail.com
|
257b3908e7b755a7f4c0fafd5ad2fd16de5664cd
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/169/usersdata/268/72565/submittedfiles/divisores.py
|
1d0ddbb1446164b961708d65187171252ce94f5d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
# -*- coding: utf-8 -*-
import math
n=int(input('Digite a quantidade de multiplos: '))
a=int(input('Digite o numero a : '))
b=int(input('DIgite o numero b : '))
i=1
if (n%2)==0:
if (a<b):
while(i<=n/2):
multiploa=a*i
print(multiploa)
multiplob=b*i
print(multiplob)
i= i + 1
else:
while(i<=n/2):
multiplob=b*i
print(multiplob)
multiplob=a*i
print(multiploa)
i= i + 1
else:
if (a<b):
while(i<=(n//2)):
multiploa=a*i
print(multiploa)
multiplob=b*i
print(multiplob)
i= i + 1
else:
while(i<=n//2):
multiplob=b*i
print(multiplob)
multiplob=a*i
print(multiploa)
i= i + 1
if(a%2!=0):
if (a<b):
multiploa=a*i
print(multiploa)
else :
multiplob=b*i
print(multiplob)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
f3ab348e0be0a838a775ac8a6672141ced7a15de
|
ad206aa0d228d5d3e41261316b88e190437e21c4
|
/contrib/devtools/test-security-check.py
|
e179401c7ed762f5c793294d304d0b9f67e0dd66
|
[
"MIT"
] |
permissive
|
gtacoin-dev/gtacoin
|
a0188517948afb4458913d87b2f600ffaf9b6803
|
f66f063b47ba973856c200074db1b95abf5ab794
|
refs/heads/master
| 2021-01-22T10:59:35.068066
| 2017-02-15T15:29:16
| 2017-02-15T15:29:16
| 82,058,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,651
|
py
|
#!/usr/bin/python2
# Copyright (c) 2015-2016 The Gtacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Test script for security-check.py
'''
from __future__ import division,print_function
import subprocess
import sys
import unittest
def write_testcode(filename):
with open(filename, 'w') as f:
f.write('''
#include <stdio.h>
int main()
{
printf("the quick brown fox jumps over the lazy god\\n");
return 0;
}
''')
def call_security_check(cc, source, executable, options):
subprocess.check_call([cc,source,'-o',executable] + options)
p = subprocess.Popen(['./security-check.py',executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout.rstrip())
class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
cc = 'gcc'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro']),
(1, executable+': failed PIE NX RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro']),
(1, executable+': failed PIE RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro']),
(1, executable+': failed PIE RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE']),
(1, executable+': failed RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE']),
(0, ''))
def test_PE(self):
source = 'test1.c'
executable = 'test1.exe'
cc = 'i686-w64-mingw32-gcc'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, []),
(1, executable+': failed PIE NX'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat']),
(1, executable+': failed PIE'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase']),
(0, ''))
if __name__ == '__main__':
unittest.main()
|
[
"coinbitex@coinbitex.local"
] |
coinbitex@coinbitex.local
|
7152ebbf5e92af8028c28d7c542e1b6f87c1066b
|
f183df1dcdfee18a77b95ba395ab1f7cc15a5a5b
|
/python/flask/ex03/env/bin/pygmentize
|
97ff469afb30e69e141002889a8e8a7f9789d71f
|
[] |
no_license
|
ltakuno/arquivos
|
7686578fbdefaf04d11e738325d7a34631b4c113
|
c04198264cc9f32fd472453fea3b627a03794008
|
refs/heads/master
| 2023-01-24T13:11:29.861084
| 2021-10-26T10:52:03
| 2021-10-26T10:52:03
| 98,174,425
| 0
| 1
| null | 2023-01-11T12:02:14
| 2017-07-24T09:38:12
|
Python
|
UTF-8
|
Python
| false
| false
| 266
|
#!/home/leo/Desktop/Pessoal/arquivos/python/flask/ex03/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pygments.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"leonardo.takuno@gmail.com"
] |
leonardo.takuno@gmail.com
|
|
698c24e7ea2ec00d6d552bae36318d0420665ece
|
36b46e4c1d6ea1294269d57fc5467be600748db0
|
/batch2/day15/image.py
|
44f34193a29679072d7f522a0a466e3a6c53499d
|
[] |
no_license
|
shaadomanthra/cbpython
|
5a4f6b588d59e99d4d01ae19b018efe964f6a1c4
|
57e855b49221ff1a502c3f80a3ee62815f619c51
|
refs/heads/master
| 2022-09-26T09:19:17.168692
| 2020-06-04T12:15:08
| 2020-06-04T12:15:08
| 262,913,907
| 0
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
from cv2 import cv2
img = cv2.imread('f1.jpg')
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# pip install opencv-python
# pip uninstall opencv-python
|
[
"packetcode@gmail.com"
] |
packetcode@gmail.com
|
226587a22ab6d94abd1eaa631c9cf7d4c94bcbe3
|
f82757475ea13965581c2147ff57123b361c5d62
|
/gi-stubs/repository/GTop/glibtop_fsusage.py
|
ce0b9f44de9aed3e9066212f3111af5827cdf720
|
[] |
no_license
|
ttys3/pygobject-stubs
|
9b15d1b473db06f47e5ffba5ad0a31d6d1becb57
|
d0e6e93399212aada4386d2ce80344eb9a31db48
|
refs/heads/master
| 2022-09-23T12:58:44.526554
| 2020-06-06T04:15:00
| 2020-06-06T04:15:00
| 269,693,287
| 8
| 2
| null | 2020-06-05T15:57:54
| 2020-06-05T15:57:54
| null |
UTF-8
|
Python
| false
| false
| 5,569
|
py
|
# encoding: utf-8
# module gi.repository.GTop
# from /usr/lib64/girepository-1.0/GTop-2.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
class glibtop_fsusage(__gi.Struct):
"""
:Constructors:
::
glibtop_fsusage()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
bavail = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
bfree = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
blocks = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
block_size = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ffree = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
files = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
flags = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
read = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
write = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(glibtop_fsusage), '__module__': 'gi.repository.GTop', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'glibtop_fsusage' objects>, '__weakref__': <attribute '__weakref__' of 'glibtop_fsusage' objects>, '__doc__': None, 'flags': <property object at 0x7f9700c73770>, 'blocks': <property object at 0x7f9700c73860>, 'bfree': <property object at 0x7f9700c73950>, 'bavail': <property object at 0x7f9700c73a40>, 'files': <property object at 0x7f9700c73b30>, 'ffree': <property object at 0x7f9700c73c20>, 'block_size': <property object at 0x7f9700c73d10>, 'read': <property object at 0x7f9700c73e00>, 'write': <property object at 0x7f9700c73ef0>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(glibtop_fsusage)
|
[
"ttys3@outlook.com"
] |
ttys3@outlook.com
|
05e941b211ab3c58e344cee9ddbef30df1cc5c81
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/virtual_machine_capture_result.py
|
2972e1f2fcda79359a1c924857d0281796beedb2
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,895
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualMachineCaptureResult(SubResource):
"""Output of virtual machine capture operation.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource Id
:type id: str
:ivar schema: the schema of the captured virtual machine
:vartype schema: str
:ivar content_version: the version of the content
:vartype content_version: str
:ivar parameters: parameters of the captured virtual machine
:vartype parameters: object
:ivar resources: a list of resource items of the captured virtual machine
:vartype resources: list[object]
"""
_validation = {
'schema': {'readonly': True},
'content_version': {'readonly': True},
'parameters': {'readonly': True},
'resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'schema': {'key': '$schema', 'type': 'str'},
'content_version': {'key': 'contentVersion', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'object'},
'resources': {'key': 'resources', 'type': '[object]'},
}
def __init__(self, **kwargs):
super(VirtualMachineCaptureResult, self).__init__(**kwargs)
self.schema = None
self.content_version = None
self.parameters = None
self.resources = None
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
ef806be5869ec78751bf85efc29d25df572e1eb4
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_385/ch19_2019_03_12_19_38_14_663227.py
|
d013924ef9a73fc01e5311fa6f7f4983c515a29b
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
import math
def calcula_distancia_do_projetil(v,a,y):
d=(v**2/(2*9.8))*((1+(1+(2*(9.8)*y)/((v**2)*(math.sin(a))**2)))**(0.5))*math.sin(2*a)
return d
|
[
"you@example.com"
] |
you@example.com
|
badaf0e123f718073e3bea705aaaaec24180f8a9
|
430bd23decf16dc572a587b7af9f5c8e7dea5e6b
|
/clients/python/swagger_client/apis/leaderboard_api.py
|
e0906cb597edb1fa6abaa8f8a43e89f91389cf1f
|
[
"Apache-2.0"
] |
permissive
|
jltrade/api-connectors
|
332d4df5e7e60bd27b6c5a43182df7d99a665972
|
fa2cf561b414e18e9d2e1b5d68e94cc710d315e5
|
refs/heads/master
| 2020-06-19T10:20:46.022967
| 2016-09-24T13:12:17
| 2016-09-24T13:12:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,571
|
py
|
# coding: utf-8
"""
BitMEX API
REST API for the BitMEX.com trading platform.<br><br><a href=\"/app/restAPI\">REST Documentation</a><br><a href=\"/app/wsAPI\">Websocket Documentation</a>
OpenAPI spec version: 1.2.0
Contact: support@bitmex.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class LeaderboardApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def leaderboard_get(self, **kwargs):
"""
Get current leaderboard.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.leaderboard_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str method: Ranking type. Options: \"notional\", \"ROE\"
:return: list[Leaderboard]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.leaderboard_get_with_http_info(**kwargs)
else:
(data) = self.leaderboard_get_with_http_info(**kwargs)
return data
def leaderboard_get_with_http_info(self, **kwargs):
"""
Get current leaderboard.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.leaderboard_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str method: Ranking type. Options: \"notional\", \"ROE\"
:return: list[Leaderboard]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['method']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method leaderboard_get" % key
)
params[key] = val
del params['kwargs']
resource_path = '/leaderboard'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'method' in params:
query_params['method'] = params['method']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Leaderboard]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
|
[
"samuel.trace.reed@gmail.com"
] |
samuel.trace.reed@gmail.com
|
7cc31d11d80087d2904c8f28fcb8dd61c8dc7740
|
dab68b742da7945b75ac957deed6e9a72283934f
|
/Golf-Report/config/settings.py
|
26159dff1d88930d1b129440dcef5b3d7dd6faba
|
[] |
no_license
|
hyunmin0317/Stock-Insight
|
90dd03665c8c5edbc041284ccefa78e877f9c3c3
|
558f4da73e62aa064994e680d923ba68d5b8ca4f
|
refs/heads/master
| 2023-06-29T23:56:42.979878
| 2021-08-02T02:15:08
| 2021-08-02T02:15:08
| 389,097,513
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,463
|
py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-1v-(2=2!&i%)-iti4z!_%f-kahyn0zj)@dndhorz@qrt1px()u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sass_processor',
'stock.apps.StockConfig',
]
SASS_PROCESSOR_ENABLED = True
SASS_PROCESSOR_ROOT = os.path.join(BASE_DIR, 'static')
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / 'static',
]
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"choihm9903@naver.com"
] |
choihm9903@naver.com
|
408b33ccab6b8d5c16cf60ae37e714dabbbb9d32
|
57120090948f99de2258a6f01a0cc65443441ce9
|
/hyperclass/graph/exe/pygsp.py
|
0f58c1bf6917edac5157eac592fec080a30215f0
|
[] |
no_license
|
MysteriousSonOfGod/hyperclass
|
c67eff91f6f0f64fa4a92f8567243ef5cd8fa3c8
|
e8cec11b364e8b049e7432b95ce20a2c5de94235
|
refs/heads/master
| 2023-01-28T16:42:09.289664
| 2020-12-07T22:54:50
| 2020-12-07T22:54:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
from pygsp import graphs
import xarray as xa
import numpy as np
from hyperclass.data.spatial.tile import Tile, Block
import os, math, sys
block_shape = (500, 500)
block_indices = (0,0)
image_name = "ang20170720t004130_corr_v2p9"
N_neighbors = 8
dm = DataManager( image_name, block_shape=block_shape )
tile: Tile = dm.getTile()
block = tile.getBlock( *block_indices )
data: np.ndarray = block.getPointData().values
graph = graphs.NNGraph( data, 'knn', True, True, True, N_neighbors )
print (".")
|
[
"thomas.maxwell@nasa.gov"
] |
thomas.maxwell@nasa.gov
|
22e8481e0326ea50c9064e5eb25279ea2c83211d
|
e2c79931c43a1a6e566d05aafa2655ba8d128657
|
/triv/io/mimetypes/__init__.py
|
b4a7c6465896d004c2a48d6e9ebe9d57bcfa4de6
|
[] |
no_license
|
pombredanne/trivio.datasources
|
c45c772270ba6ad80ade84d9b4be6b024e4a8f2d
|
00f0c341e776c77516b17be036569d0eac7cdced
|
refs/heads/master
| 2021-01-12T22:38:35.572615
| 2013-07-02T21:20:47
| 2013-07-02T21:20:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
from . import application_json
from . import application_x_arc
from . import application_x_json_stream
from . import application_x_hadoop_sequence
from . import text_csv
|
[
"srobertson@codeit.com"
] |
srobertson@codeit.com
|
d3d9bf07d206c7c98ab748808ca1ca6ff279dd7a
|
37438771565238194ea997fa65619bd32c823706
|
/detect_tracking/18.5.31_first_success/wyz_ws/devel/lib/python2.7/dist-packages/image_geometry/__init__.py
|
cfa550544b8f9c86f4e2a7cf963edf5e2583218a
|
[] |
no_license
|
Aaron9477/restore
|
b040b8be695c513946c0243c4acb735f427d8bba
|
8dc13ed7cf0c4e5cde911169d11e330d826f40bd
|
refs/heads/master
| 2021-09-15T10:50:59.969952
| 2018-05-31T03:11:55
| 2018-05-31T03:11:55
| 110,834,815
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,050
|
py
|
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/ubuntu/WYZ/wyz_ws/src/vision_opencv-kinetic/image_geometry/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
[
"869788668@qq.com"
] |
869788668@qq.com
|
7a3fe8d03675f0f2f25deafd18430d4b97b6da39
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/sets_20200605203258.py
|
f16a7163f3eec97f0550e2e5a9c3cfa32aa6775a
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
import json
def Strings(str):
values = {}
newArray = []
keys = []
finalArr = []
for i in str:
newArray.append(i.split(":"))
for j in range(len(newArray)):
if newArray[j][0] in values:
values[newArray[j][0]] += int(newArray[j][1])
else:
values[newArray[j][0]] = int(newArray[j][1])
for k in values:
keys.append(k)
keys = sorted(keys)
newString = " "
for i in range(len(keys)-1):
if keys[i] in values:
newString += keys[i] + ":"+ json.dumps(values[keysi]) + ","
print(newString)
Strings(["Z:1","B:3","C:3","Z:4","B:2"])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
e338718887a056636b2427531d8b2b6041d3fd9f
|
6e68584f2819351abe628b659c01184f51fec976
|
/Centre_College/CSC_117/CSC_117_Python_Files/monteCarloGUI.py
|
0b71697ca698fbc152b4b222c923e09586afee51
|
[] |
no_license
|
DanSGraham/code
|
0a16a2bfe51cebb62819cd510c7717ae24b12d1b
|
fc54b6d50360ae12f207385b5d25adf72bfa8121
|
refs/heads/master
| 2020-03-29T21:09:18.974467
| 2017-06-14T04:04:48
| 2017-06-14T04:04:48
| 36,774,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,178
|
py
|
#
# An approximation of pi
#Daniel Graham
#This method should give an approximation of pi due to the ratio of circle to rectangle.
#Since the area of the circle = pi and the area of the rectangle will = 4 by multiplying by 4,
#the function returns an approximation of pi
import math
import random
from graphics import *
def distance_from_center(x,y):
distance1 = math.sqrt((x)**2+(y)**2)
return distance1
def mainGUI():
win = GraphWin("Pi Approximation", 1000,1000)
win.setCoords( -1.5,-1.5,1.5,1.5)
square = Rectangle(Point(-1,1), Point(1,-1))
circle = Circle(Point(0,0),1)
title = Text(Point(0,1.25), "Pi Approximation")
prompt = Text(Point(-.2,-1.25), "How many points would you like to use? \n Click after Entry!")
prompt_entry = Entry(Point(0.4,-1.25), 8)
title.draw(win)
prompt.draw(win)
prompt_entry.draw(win)
circle.draw(win)
square.draw(win)
win.getMouse()
while prompt_entry.getText() == "":
win.getMouse()
prompt.undraw()
prompt_entry.undraw()
number_of_dots = int(prompt_entry.getText())
inside_circle = 0
outside_circle = 0
for a in xrange(number_of_dots):
pointx = random.uniform(-1,1)
pointy = random.uniform(-1,1)
point = Point(pointx,pointy)
point.draw(win)
distance = distance_from_center(pointx, pointy)
if distance <= 1.0:
inside_circle += 1
point.setFill('red')
else:
outside_circle +=1
point.setFill('blue')
pi_approx = inside_circle/float((inside_circle+outside_circle))*4
text_string = "According to this approximation, pi = " + str(pi_approx)
points_string = "You used " + str(number_of_dots) + " points to approximate pi. There were " + str(inside_circle) + " points inside the circle \n Click when Finished!"
end_text = Text(Point(0,-1.2), text_string)
points_text = Text(Point(0, -1.30), points_string)
points_text.draw(win)
end_text.draw(win)
win.getMouse()
win.close()
mainGUI()
|
[
"dan.s.graham@gmail.com"
] |
dan.s.graham@gmail.com
|
48551fb220fdc14d1dfe72e50fcfacfa205e2e76
|
e005e5fa3fdf18cf5a72d14379568b97dfd4754c
|
/lect02_codes/lect02_codes/proj/py_version/main.py
|
997ea3e164e26dc651460daca9e531d6e62e75f9
|
[] |
no_license
|
evaseemefly/learn_sourcecode_DataAnalysis
|
1962b23fa46987c671025c157be542fcd2eef60f
|
ec73d3962966d94ccb2e2f0a82486e8ac615c449
|
refs/heads/master
| 2018-10-25T16:56:48.367272
| 2018-10-08T04:18:38
| 2018-10-08T04:18:38
| 107,554,982
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,623
|
py
|
# -*- coding: utf-8 -*-
"""
作者: 梁斌
版本: 1.0
日期: 2017/10
实战案例2:麦当劳菜单营养成分分析
该案例有配套的讲解版本,在jupyter演示版中可找到
声明:小象学院拥有完全知识产权的权利;只限于善意学习者在本课程使用,
不得在课程范围外向任何第三方散播。任何其他人或机构不得盗版、复制、仿造其中的创意,
我们将保留一切通过法律手段追究违反者的权利
"""
import os
import pandas as pd
# 指定数据集路径
dataset_path = '../data'
datafile = os.path.join(dataset_path, 'menu.csv')
# 分析的数据列
used_cols = ['Calories', 'Calories from Fat', 'Total Fat', 'Cholesterol', 'Sugars']
def inspect_data(df_data):
"""
查看数据集基本信息
"""
print('\n===================== 数据预览: =====================')
print(df_data.head())
print('\n===================== 数据信息: =====================')
print(df_data.info())
print('\n===================== 数据基本统计信息: =====================')
print(df_data.describe())
def main():
"""
主函数
"""
# 读入数据
menu_data = pd.read_csv(datafile)
# 查看数据集基本信息
inspect_data(menu_data)
# 任务1. 按单品类型分析查看数据
print('\n===================== 任务1. 按单品类型分析查看数据 =====================')
print('\n===================== 营养成分最高的单品: =====================')
max_idxs = [menu_data[col].argmax() for col in used_cols]
for col, max_idx in zip(used_cols, max_idxs):
print('{} 最高的单品:{}'.format(col, menu_data.iloc[max_idx]['Item']))
print('\n===================== 营养成分最低的单品: =====================')
min_idxs = [menu_data[col].argmin() for col in used_cols]
for col, min_idx in zip(used_cols, min_idxs):
print('{} 最低的单品:{}'.format(col, menu_data.iloc[min_idx]['Item']))
# 任务2. 按菜单类型分析查看数据
print('\n===================== 任务2. 按菜单类型分析查看数据 =====================')
print('\n===================== 菜单类型的单品数目分布: =====================')
cat_grouped = menu_data.groupby('Category')
print('菜单类型的单品数目:')
print(cat_grouped.size().sort_values(ascending=False))
# 菜单类型的营养成分分布
print('\n===================== 菜单类型的营养成分分布: =====================')
print(cat_grouped[used_cols].mean())
print('\n===================== 营养成分最高的菜单类型: =====================')
max_cats = [cat_grouped[col].mean().argmax() for col in used_cols]
for col, cat in zip(used_cols, max_cats):
print('{} 最高的菜单类型:{}'.format(col, cat))
print('\n===================== 营养成分最低的菜单类型: =====================')
min_cats = [cat_grouped[col].mean().argmin() for col in used_cols]
for col, cat in zip(used_cols, min_cats):
print('{} 最低的菜单类型:{}'.format(col, cat))
# 任务3. 查看分析单品及菜单的份量
print('\n===================== 任务3. 查看分析单品及菜单的份量 =====================')
# 过滤数据,只保留包含 'g'的单品
sel_menu_data = menu_data[menu_data['Serving Size'].str.contains('g')].copy()
def proc_size_str(size_str):
"""
处理serving size字符串,返回g
"""
start_idx = size_str.index('(') + 1
end_idx = size_str.index('g')
size_val = size_str[start_idx: end_idx]
return float(size_val)
sel_menu_data['Size'] = sel_menu_data['Serving Size'].apply(proc_size_str)
inspect_data(sel_menu_data)
max_idx = sel_menu_data['Size'].argmax()
print('份量最多的单品:{},{}g'.format(sel_menu_data.iloc[max_idx]['Item'], sel_menu_data['Size'].max()))
min_idx = sel_menu_data['Size'].argmin()
print('份量最少的单品:{},{}g'.format(sel_menu_data.iloc[min_idx]['Item'], sel_menu_data['Size'].min()))
sel_cat_grouped = sel_menu_data.groupby('Category')
print('份量最多的类别:{},{}g'.format(sel_cat_grouped['Size'].mean().argmax(),
sel_cat_grouped['Size'].mean().max()))
print('份量最少的类别:{},{}g'.format(sel_cat_grouped['Size'].mean().argmin(),
sel_cat_grouped['Size'].mean().min()))
if __name__ == '__main__':
main()
|
[
"evaseemefly@126.com"
] |
evaseemefly@126.com
|
21f20cba381b35b90be5ac12499ce8e389d2d0bd
|
8e03374062754d568a75f6a0938224c1de3baf3c
|
/news_api/news/migrations/0002_auto_20210505_0040.py
|
4911de0d05217fd93a635da0b7fa7a442a90b9aa
|
[] |
no_license
|
PythonDjangoJavascript/news_api
|
dbad588cea338cedb085241713a1d7fe4e162d77
|
493f0ccc6dea577fe5a1fbeb03520ff2066bc12a
|
refs/heads/main
| 2023-04-22T17:21:53.520734
| 2021-05-09T06:00:16
| 2021-05-09T06:00:16
| 364,386,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
# Generated by Django 3.2.1 on 2021-05-05 00:40
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='loacation',
field=models.CharField(default=django.utils.timezone.now, max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='article',
name='publication_date',
field=models.DateField(blank=True, null=True),
),
]
|
[
"nuruddinsayeed@gmail.com"
] |
nuruddinsayeed@gmail.com
|
a6ddac7dbb84f364bb7a1c24bb39dc942613b16a
|
f850e0f75a76c500f5ba8a9ab6fa6d5f40d22b23
|
/cutecharts_demo/__init__.py
|
c6223d8fde18a448c03c6f8cbd0affbf3a225cd8
|
[
"MIT"
] |
permissive
|
jay20161013/pywebio-chart-gallery
|
805afa2643b0d330a4a2f80f1e0a8827e8f61afe
|
11fd8a70b2e9ff5482cf5924b110a11f3469edfc
|
refs/heads/master
| 2023-03-20T01:58:30.979109
| 2021-03-18T12:48:31
| 2021-03-18T12:48:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,503
|
py
|
from collections import OrderedDict
from pywebio.output import *
from pywebio.session import hold, get_info
from .demos.example_bar import main as bar
from .demos.example_line import main as line
from .demos.example_page import main as page
from .demos.example_pie import main as pie
from .demos.example_radar import main as radar
from .demos.example_scatter import main as scatter
all_demos = OrderedDict([
("Bar", bar),
("Line", line),
("Pie", pie),
("Radar", radar),
("Scatter", scatter),
("Page", page),
])
def t(eng, chinese):
"""return English or Chinese text according to the user's browser language"""
return chinese if 'zh' in get_info().user_language else eng
@use_scope('demo', clear=True)
def show_demo(name):
if name not in all_demos:
return
all_demos[name]()
put_html('<a href="https://github.com/wang0618/pywebio-chart-gallery/blob/master'
'/cutecharts_demo/demos/example_%s.py" target="_blank">%s</a>' % (name.lower(), t('Source code', '源码')))
scroll_to('demo-list', 'top')
async def cutecharts():
"""PyWebIO cutechart Demo
Demo of using cutechart.py for data visualization in PyWebIO.
在PyWebIO中使用 cutechart.py 进行数据可视化示例"""
put_markdown(t(r"""## Cutecharts.py
[cutecharts.py](https://github.com/cutecharts/cutecharts.py) is a hand drawing style charts library for Python which uses [chart.xkcd](https://github.com/timqian/chart.xkcd) as underlying implementation.
In PyWebIO, you can use the following code to output the cutecharts.py chart instance:
```python
# `chart` is cutecharts chart instance
pywebio.output.put_html(chart.render_notebook())
```
For details, please refer to the source code of the demo below.
## Demos List
""", r"""## Cutecharts.py
[cutecharts.py](https://github.com/cutecharts/cutecharts.py) 是一个可以创建具有卡通风格的可视化图表的python库。底层使用了 [chart.xkcd](https://github.com/timqian/chart.xkcd) Javascript库。
PyWebIO 支持输出使用 cutecharts.py 库创建的图表。使用方式为在PyWebIO会话中调用
```python
# chart 为 cutecharts 的图表实例
pywebio.output.put_html(chart.render_notebook())
```
具体可以参考下面demo中的源码。
## Demos List
"""), strip_indent=4)
set_scope('demo-list')
put_buttons(list(all_demos.keys()), onclick=show_demo)
await hold()
|
[
"wang0.618@qq.com"
] |
wang0.618@qq.com
|
439395335e5c739d4423a055f98518f3acc20b6f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02580/s760103061.py
|
925ab9a2461c29ace7811e08760b425431497890
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
def resolve():
#n=int(input())
#a,b=map(int,input().split())
#x=list(map(int,input().split()))
#a=[list(map(lambda x:int(x)%2,input().split())) for _ in range(h)]
import sys
input = sys.stdin.readline
H,W,m=map(int,input().split())
bomb=set()
cnt1=[0 for i in range(H)]
cnt2=[0 for i in range(W)]
for i in range(m):
h,w=map(int,input().split())
bomb.add((h-1,w-1))
cnt1[h-1]+=1
cnt2[w-1]+=1
m1=max(cnt1)
m2=max(cnt2)
hk,wk=[],[]
for i,x in enumerate(cnt1):
if x==m1:
hk.append(i)
for i,x in enumerate(cnt2):
if x==m2:
wk.append(i)
ans=m1+m2
for i in hk:
for j in wk:
if (i,j) in bomb:
continue
print(ans)
exit()
print(ans-1)
if __name__ == '__main__':
resolve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
391d9a14de710453f5a3315da0ab8bf862bcd5f2
|
93e5b82332af9f0d3e203d086e30794fb90a2086
|
/ForKids/chapter11/dark_green_circle.py
|
e339c298cdebe175e39607c47f1ada7d0798a7ae
|
[] |
no_license
|
swell1009/ex
|
cfaae0b5fe917f12416170dce60f7dea8194f368
|
29b274fb51adbdc43af6ebecaec89c97bc58be6f
|
refs/heads/master
| 2020-04-04T10:15:20.578932
| 2018-11-22T06:27:30
| 2018-11-22T06:27:30
| 155,848,187
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
import turtle
t = turtle.Pen()
def mycircle(red, green, blue):
t.color(red, green, blue)
t.begin_fill()
t.circle(50)
t.end_fill()
mycircle(0, 0.5, 0)
|
[
"swell1009@qq.com"
] |
swell1009@qq.com
|
18e21f4ddcedd079fc47959f47eab0660531d78c
|
d326cd8d4ca98e89b32e6a6bf6ecb26310cebdc1
|
/BioinformaticsStronghold/rna/rna.py
|
adb1292a38f53d25bd1f4e80d516beb0647b67f3
|
[] |
no_license
|
dswisher/rosalind
|
d6af5195cdbe03adb5a19ed60fcbf8c05beac784
|
4519740350e47202f7a45ce70e434f7ee15c6afc
|
refs/heads/master
| 2021-08-09T02:58:17.131164
| 2017-11-12T01:26:26
| 2017-11-12T01:26:26
| 100,122,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
import sys
if len(sys.argv) < 2:
print 'You must specify the name of the file to load.'
sys.exit(1)
s = file(sys.argv[1]).read()
rna = ''
for c in s:
if c == 'T':
rna += 'U'
else:
rna += c
print rna
|
[
"big.swish@gmail.com"
] |
big.swish@gmail.com
|
1c05115db2ff9b5a220b165aa2ded0b8753fb727
|
5c72ec8fb05492ebbb97c2a2a1b24e85c4a2b03e
|
/Algorithms and data structures/2 Data structures/Arrays/Reverse/reverse.py
|
f0e937528bea08b1ba71b9194fbe5c14007a6914
|
[] |
no_license
|
mxmaslin/Shultais-education
|
2b732a9dfd713fcc4c7bd29101fae4e4c2d7ffae
|
b7e82aa34b304a090a1acd16d309f95550c59dca
|
refs/heads/master
| 2020-08-31T21:21:28.801596
| 2019-11-05T16:24:43
| 2019-11-05T16:24:43
| 218,789,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,764
|
py
|
class Array:
"""
Линейный статический массив.
"""
def __init__(self, size):
# Данные массива, изначально массив пустой и все его элементы заполнены None.
# То есть сразу выделяем массив фиксированного объема.
self.data = [None] * size
# Длина заполненного массива.
# По умолчанию 0, так как массив пустой.
self.length = 0
# Полный размер массива.
self.size = size
def append(self, value):
"""
Добавление нового элемента в конец линейного массива.
Время работы O(1).
"""
if self.length == self.size:
raise OverflowError
self.data[self.length] = value
self.length += 1
def reverse(self):
"""
Разворачивает массив.
"""
half_arr_length = self.length // 2
for i in range(half_arr_length):
self.data[i], self.data[self.length-i-1] = self.data[self.length-i-1], self.data[i]
def __str__(self):
"""
Возвращает все элементы массива в виде строки.
"""
return "[" + ", ".join(map(str, self.data[:self.length])) + "]"
array = Array(4)
array.append(6)
array.append(2)
array.append(1)
array.append(9)
array.reverse()
assert str(array) == '[9, 1, 2, 6]'
array = Array(5)
array.append(6)
array.append(2)
array.append(1)
array.append(9)
array.append(10)
array.reverse()
assert str(array) == '[10, 9, 1, 2, 6]'
|
[
"zapzarap@yandex.ru"
] |
zapzarap@yandex.ru
|
8598a3824baa01b0f2e672df3a660f0fa4b48861
|
40e9169343968444c764b41f5945c6e00ad6ecd6
|
/test_for_calc_test.py
|
115d2e221016755bd9f2368e9c04786f6923ff2c
|
[] |
no_license
|
a-bautista/ci-python
|
62ff4b3a19f9c293f665a4e62a44936c9ac001ce
|
19f9ef9db261897a3ef5977e710ca42ee98de662
|
refs/heads/master
| 2020-04-15T01:37:31.290988
| 2019-01-06T07:15:21
| 2019-01-06T07:15:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
import calc_test
class TestCalculator:
def test_addition(self):
assert 4 == calc_test.add(2,2)
def test_subtraction(self):
assert 2 == calc_test.subtract(4,2)
|
[
"alex.bautista.ramos.90@gmail.com"
] |
alex.bautista.ramos.90@gmail.com
|
953c91eff3d40c25d9b77ced31dd89cc264c21d9
|
6982c3c54ee9199d93fb89c61cfdcba15b9b7012
|
/fluentpython/chapter11/demo06.py
|
94b5b1596302e2ffce45b78a46450e090bfe2b27
|
[] |
no_license
|
gzgdouru/python_study
|
a640e1097ebc27d12049ded53fb1af3ba9729bac
|
e24b39e82e39ee5a5e54566781457e18c90a122a
|
refs/heads/master
| 2020-03-29T11:33:13.150869
| 2019-03-08T09:24:29
| 2019-03-08T09:24:29
| 149,858,658
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
'''
多继承和方法解析顺序
'''
class A:
def ping(self):
print("ping:", self)
class B(A):
def pong(self):
print("pong:", self)
class C(A):
def pong(self):
print("PONG:", self)
class D(B, C):
def ping(self):
super().ping()
print("pong-ping:", self)
def pingpong(self):
self.ping()
super().ping()
self.pong()
super().pong()
C.pong(self)
if __name__ == "__main__":
d = D()
d.pingpong()
|
[
"18719091650@163.com"
] |
18719091650@163.com
|
6d7d036f9849749bfb43e031add73277c67f841f
|
b2ba670818623f8ab18162382f7394baed97b7cb
|
/test-data/AndroidSlicer/Carnote/DD/17.py
|
32b94fd9d389f16b7824dc82e3080ae2e9a2589b
|
[
"MIT"
] |
permissive
|
hsumyatwin/ESDroid-artifact
|
012c26c40537a79b255da033e7b36d78086b743a
|
bff082c4daeeed62ceda3d715c07643203a0b44b
|
refs/heads/main
| 2023-04-11T19:17:33.711133
| 2022-09-30T13:40:23
| 2022-09-30T13:40:23
| 303,378,286
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
#start monkey test seedNo 0
import os;
from subprocess import Popen
from subprocess import PIPE
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
from com.android.monkeyrunner.MonkeyDevice import takeSnapshot
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
from com.android.monkeyrunner import MonkeyView
import random
import sys
import subprocess
from sys import exit
from random import randint
device = MonkeyRunner.waitForConnection()
package = 'com.spisoft.quicknote'
activity ='com.spisoft.quicknote.MainActivity'
runComponent = package+'/'+activity
device.startActivity(component=runComponent)
MonkeyRunner.sleep(0.5)
MonkeyRunner.sleep(0.5)
device.touch(945,1127, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(982,153, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(699,932, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(923,1695, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(963,1730, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(62,124, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(165,437, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(432,463, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(660,748, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(467,678, 'DOWN_AND_UP')
|
[
"hsumyatwin@gmail.com"
] |
hsumyatwin@gmail.com
|
da294679cccbfb84c52a9e21d94a08a84ffd77b2
|
723e7aab2dddb92655801bd7cfe1469d08e664c6
|
/reveal_fp7_module/reveal-popularity-prediction/reveal_popularity_prediction/output/wp5_output.py
|
72312b2ee91f439c1e552471e58e9c0c7dd361a5
|
[
"Apache-2.0"
] |
permissive
|
elceespatial/news-popularity-prediction
|
2fdf1186782da7a7604aeffcbb3eeed46214b47e
|
5f66982c659de017665116297bb4fd29ca13f835
|
refs/heads/master
| 2022-02-09T03:27:49.179710
| 2017-12-15T17:08:10
| 2017-12-15T17:08:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,663
|
py
|
# -*- coding: <UTF-8> -*-
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import json
from kombu.utils import uuid
from reveal_popularity_prediction.output.rabbitmq_util import rabbitmq_server_service,\
establish_rabbitmq_connection,\
simple_notification, simpler_notification
def publish_to_wp5(prediction_json,
rabbitmq_dict,
assessment_id):
rabbitmq_uri = rabbitmq_dict["rabbitmq_uri"]
rabbitmq_queue = rabbitmq_dict["rabbitmq_queue"]
rabbitmq_exchange = rabbitmq_dict["rabbitmq_exchange"]
rabbitmq_routing_key = rabbitmq_dict["rabbitmq_routing_key"]
rabbitmq_channel = rabbitmq_dict["channel"]
# rabbitmq_server_service("restart")
# rabbitmq_connection = establish_rabbitmq_connection(rabbitmq_uri)
# Make wp5 json report.
json_report = make_w5_json_report(prediction_json,
assessment_id)
json_report_string = json.dumps(json_report)
# print("wp5", json_report_string)
# simple_notification(rabbitmq_connection, rabbitmq_queue, rabbitmq_exchange, rabbitmq_routing_key, json_report_string)
simpler_notification(rabbitmq_channel, rabbitmq_queue, rabbitmq_exchange, rabbitmq_routing_key, json_report_string)
def make_w5_json_report(prediction_json,
assessment_id):
json_report = dict()
tweet_url = form_tweet_url(prediction_json)
highly_controversial = is_highly_controversial(prediction_json)
json_report["certh:tweet_url"] = tweet_url
json_report["certh:highly_controversial"] = highly_controversial
json_report["certh:item_url"] = prediction_json["url"]
json_report["certh:time_posted"] = prediction_json["snapshots"][-1]["timestamp_list"][0]
json_report["certh:assessment_timestamp"] = prediction_json["assessment_timestamp"]
json_report["certh:assessment_id"] = assessment_id
json_report["certh:platform"] = prediction_json["platform_name"]
json_report["certh:current_time_stats"] = form_current_time_stats(prediction_json)
json_report["certh:prediction_stats"] = form_prediction_stats(prediction_json)
return json_report
def form_tweet_url(item):
user_screen_name = item["user_screen_name"]
tweet_id = item["tweet_id"]
tweet_url = "https://twitter.com/" + user_screen_name + "/status/" + repr(tweet_id)
return tweet_url
def is_highly_controversial(item):
if item["predictions"]["controversiality"] > 0.1:
highly_controversial = True
else:
highly_controversial = False
return highly_controversial
def form_features_dict(item):
features_dict = dict()
for feature_name, feature_value in item["snapshots"][-1]["features"].items():
features_dict["certh:" + feature_name] = feature_value
return features_dict
def form_current_time_stats(item):
current_time_stats_dict = dict()
current_time_stats_dict["certh:time_collected"] = item["tweet_timestamp"]
current_time_stats_dict["certh:features"] = form_features_dict(item)
if item["platform_name"] == "YouTube":
current_time_stats_dict["certh:user_set"] = ["https://www.youtube.com/channel/" + user_url for user_url in item["snapshots"][-1]["user_set"]]
elif item["platform_name"] == "Reddit":
current_time_stats_dict["certh:user_set"] = ["https://www.reddit.com/user/" + user_url for user_url in item["snapshots"][-1]["user_set"]]
else:
print("Invalid platform name.")
raise RuntimeError
current_time_stats_dict["certh:comment_count"] = item["targets"]["comment_count"]
current_time_stats_dict["certh:user_count"] = item["targets"]["user_count"]
current_time_stats_dict["certh:upvote_count"] = item["targets"]["upvote_count"]
current_time_stats_dict["certh:downvote_count"] = item["targets"]["downvote_count"]
current_time_stats_dict["certh:score"] = item["targets"]["score"]
current_time_stats_dict["certh:controversiality"] = item["targets"]["controversiality"]
return current_time_stats_dict
def form_prediction_stats(item):
prediction_stats_dict = dict()
prediction_stats_dict["certh:prediction_window"] = [item["prediction_window"]["prediction_lower_timestamp"],
item["prediction_window"]["prediction_upper_timestamp"]]
prediction_stats_dict["certh:comment_count_prediction"] = item["predictions"]["comments"]
prediction_stats_dict["certh:user_count_prediction"] = item["predictions"]["users"]
prediction_stats_dict["certh:score_prediction"] = item["predictions"]["score"]
prediction_stats_dict["certh:controversiality_prediction"] = item["predictions"]["controversiality"]
return prediction_stats_dict
def check_wp5_rabbitmq_connection(wp5_rabbitmq_connection,
wp5_rabbitmq_queue,
wp5_rabbitmq_exchange,
wp5_rabbitmq_routing_key,
rabbitmq_connection,
rabbitmq_queue,
rabbitmq_exchange,
rabbitmq_routing_key,
assessment_id):
wp5_rabbitmq_exchange = assessment_id + "_certh_popularity_prediction"
wp5_rabbitmq_queue = "certh_popularity_prediction.gen-%s" % uuid()
wp5_rabbitmq_routing_key = "reveal_routing"
if wp5_rabbitmq_connection is None:
wp5_rabbitmq_connection = rabbitmq_connection
return wp5_rabbitmq_connection,\
wp5_rabbitmq_queue,\
wp5_rabbitmq_exchange,\
wp5_rabbitmq_routing_key
|
[
"georgevrizos@gmail.com"
] |
georgevrizos@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.