blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
83060bc4714b82ea8cc3e7bba7f4ce4411fdd011 | bc10a5df6d3f7e59cea6bf364ac614d39d04ac75 | /submodule/template_lib/pytorch/fgsm_tutorial.py | 45008790ddb86792173526de3d373db99817e2af | [
"MIT"
] | permissive | AnonymousGFR/wbgan.pytorch | f65efdb1f5a6678b7344a6e4186f6e78d446d53c | d75cb6599852e901df0136db87520e3314f8ca71 | refs/heads/master | 2023-04-14T06:22:34.542167 | 2019-10-08T14:28:46 | 2019-10-08T14:28:46 | 210,862,958 | 1 | 0 | MIT | 2023-03-24T23:08:30 | 2019-09-25T14:17:04 | Python | UTF-8 | Python | false | false | 5,192 | py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
outdir = 'results/fgsm'
epsilons = [0, .05, .1, .15, .2, .25, .3]
pretrained_model = f"{outdir}/data/lenet_mnist_model.pth"
use_cuda = True
# LeNet Model definition
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
# MNIST Test dataset and dataloader declaration
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(f'{outdir}/data', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
])),
batch_size=1, shuffle=True)
# Define what device we are using
print("CUDA Available: ", torch.cuda.is_available())
device = torch.device(
"cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")
# Initialize the network
model = Net().to(device)
# Load the pretrained model
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))
# Set the model in evaluation mode. In this case this is for the Dropout layers
model.eval()
# FGSM attack code
def fgsm_attack(image, epsilon, data_grad):
# Collect the element-wise sign of the data gradient
sign_data_grad = data_grad.sign()
# Create the perturbed image by adjusting each pixel of the input image
perturbed_image = image + epsilon * sign_data_grad
# Adding clipping to maintain [0,1] range
perturbed_image = torch.clamp(perturbed_image, 0, 1)
# Return the perturbed image
return perturbed_image
def test(model, device, test_loader, epsilon):
# Accuracy counter
correct = 0
adv_examples = []
# Loop over all examples in test set
for data, target in test_loader:
# Send the data and label to the device
data, target = data.to(device), target.to(device)
# Set requires_grad attribute of tensor. Important for Attack
data.requires_grad = True
# Forward pass the data through the model
output = model(data)
init_pred = output.max(1, keepdim=True)[
1] # get the index of the max log-probability
# If the initial prediction is wrong, dont bother attacking, just move on
if init_pred.item() != target.item():
continue
# Calculate the loss
loss = F.nll_loss(output, target)
# Zero all existing gradients
model.zero_grad()
# Calculate gradients of model in backward pass
loss.backward()
# Collect datagrad
data_grad = data.grad.data
# Call FGSM Attack
perturbed_data = fgsm_attack(data, epsilon, data_grad)
# Re-classify the perturbed image
output = model(perturbed_data)
# Check for success
final_pred = output.max(1, keepdim=True)[
1] # get the index of the max log-probability
if final_pred.item() == target.item():
correct += 1
# Special case for saving 0 epsilon examples
if (epsilon == 0) and (len(adv_examples) < 5):
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
adv_examples.append((init_pred.item(), final_pred.item(), adv_ex))
else:
# Save some adv examples for visualization later
if len(adv_examples) < 5:
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
adv_examples.append((init_pred.item(), final_pred.item(), adv_ex))
# Calculate final accuracy for this epsilon
final_acc = correct / float(len(test_loader))
print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct,
len(test_loader),
final_acc))
# Return the accuracy and an adversarial example
return final_acc, adv_examples
accuracies = []
examples = []
# Run test for each epsilon
for eps in epsilons:
acc, ex = test(model, device, test_loader, eps)
accuracies.append(acc)
examples.append(ex)
plt.figure(figsize=(5, 5))
plt.plot(epsilons, accuracies, "*-")
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, .35, step=0.05))
plt.title("Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()
# Plot several examples of adversarial samples at each epsilon
cnt = 0
plt.figure(figsize=(8, 10))
for i in range(len(epsilons)):
for j in range(len(examples[i])):
cnt += 1
plt.subplot(len(epsilons), len(examples[0]), cnt)
plt.xticks([], [])
plt.yticks([], [])
if j == 0:
plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)
orig, adv, ex = examples[i][j]
plt.title("{} -> {}".format(orig, adv))
plt.imshow(ex, cmap="gray")
plt.tight_layout()
plt.show()
| [
"you@example.com"
] | you@example.com |
ed8fd8a7e250c616bfaaad5bd06f39a58258aad1 | e6d862a9df10dccfa88856cf16951de8e0eeff2b | /VMS/core/python/setup.py | 145c7ec74b08da59c3c13b1c966f6979da01ad6e | [] | no_license | AllocateSoftware/API-Stubs | c3de123626f831b2bd37aba25050c01746f5e560 | f19d153f8e9a37c7fb1474a63c92f67fc6c8bdf0 | refs/heads/master | 2022-06-01T07:26:53.264948 | 2020-01-09T13:44:41 | 2020-01-09T13:44:41 | 232,816,845 | 0 | 0 | null | 2022-05-20T21:23:09 | 2020-01-09T13:34:35 | C# | UTF-8 | Python | false | false | 1,110 | py | # coding: utf-8
"""
VMS API
## Description API to be impemented by VMS systems for integration into HealthSuite business processes # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@allocatesoftware.com
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "api-server"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="VMS API",
author="API support",
author_email="support@allocatesoftware.com",
url="",
keywords=["OpenAPI", "OpenAPI-Generator", "VMS API"],
install_requires=REQUIRES,
packages=find_packages(exclude=["test", "tests"]),
include_package_data=True,
long_description="""\
## Description API to be impemented by VMS systems for integration into HealthSuite business processes # noqa: E501
"""
)
| [
"nigel.magnay@gmail.com"
] | nigel.magnay@gmail.com |
6f55d1a15017e4db87d7d41770a35d0bedbffe61 | 31fd6c73c37b5065221427d8272e92afc5a79a53 | /src/uniform-tilings/coxeter/integer.py | afcbbb6ec064ff1d47a0d7422bfb08a8eff48433 | [
"MIT"
] | permissive | GenerousMan/pywonderland | 2b28d59aea65fc402dcbea8be585c4e30c7fed3b | 28b7611066fde2590ab9f60971248eb47809ec3e | refs/heads/master | 2020-12-28T09:07:27.086833 | 2020-02-04T11:48:08 | 2020-02-04T11:48:08 | 238,259,252 | 1 | 0 | MIT | 2020-02-04T17:04:20 | 2020-02-04T17:04:19 | null | UTF-8 | Python | false | false | 785 | py | """
Helper functions for integer arithmetic.
"""
from collections import defaultdict
def lcm(m, n):
if m * n == 0:
return 0
q, r = m, n
while r != 0:
q, r = r, q % r
return abs((m * n) // q)
def decompose(n):
"""Decompose an integer `n` into a product of primes.
The result is stored in a dict {prime: exponent}.
This function is used for generating cyclotomic polynomials.
"""
n = abs(n)
primes = defaultdict(int)
# factor 2
while n % 2 == 0:
primes[2] += 1
n = n // 2
# odd prime factors
for i in range(3, int(n**0.5) + 1, 2):
while n % i == 0:
primes[i] += 1
n = n // i
# if n itself is prime
if n > 2:
primes[n] += 1
return primes
| [
"mathzhaoliang@gmail.com"
] | mathzhaoliang@gmail.com |
f6a8ae1541c9a1aea0034d32f065444a664ebed5 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/55/usersdata/111/22852/submittedfiles/av2_p3_civil.py | 1f8bc8ba880d7c45429f3ca8df20c55e347dfe9a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
def peso(a,x,y):
soma=0
c=0
d=0
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
if x==a[i+1]
c=i
if y==a[j+1]
d=j
for i in range(c,a.shape[0]+1,1):
for j in range(d,a.shape[1]+1,1):
soma=soma+a[i,j]
return soma-a[c,d]
linhas=input('Digite a quantidade de linhas: ')
colunas=input('Digite a quantidade de colunas: ')
a=np.zeros((linhas,colunas))
for i in range(0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=input('Digite os elementos da matriz: ')
n1=input('Digite o valor da posição x: ')
n2=input('Digite o valor da posição y: ')
matriz=a[i,j]
resultado=peso(a,x,y)
print matriz
print resultado
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e7433943b06f1d78b9a7fd9a4c54496f2fd4de75 | 6987dfe763caed29613ae255c00821ffb25f44c9 | /utils/ltp/ltp_xml.py | b9c9c1205fdcd7370b8f2fc55893503643bb2607 | [] | no_license | sapnajayavel/FakeReview-Detector | 70a4f9decc7214f796f362b1e53736dd4c3ef71f | ab3ebef3cc0bb6c483c18d39bff1277d2e98c9c0 | refs/heads/master | 2021-01-22T13:30:58.090821 | 2013-06-16T23:42:37 | 2013-06-16T23:42:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | #!/usr/bin/env python2.7
#encoding=utf-8
"""
"""
from xml.dom.minidom import parse,parseString
class ltpXML:
def __init__(self,sentence,xml_str):
self.sentence = sentence
self.xml_str = xml_str
self.relate_list = self.get_relate_list()
self.word_list = self.get_word_list()
def get_clause_relate_path(self):
relate_str = ""
for i in self.relate_list:
relate_str += (i+"@")
return relate_str.rstrip('@')
def get_word_list(self):
doc = parseString(self.xml_str)
return [node.getAttribute('cont') for node in doc.getElementsByTagName('word')]
def get_relate_list(self):
doc = parseString(self.xml_str)
return [node.getAttribute('relate') for node in doc.getElementsByTagName("word")]
| [
"yangxue00.yxmn@gmail.com"
] | yangxue00.yxmn@gmail.com |
a063e399690c0d581577baa5acc8bb0f037038f6 | 698c383b77111d53495268b64d45f39ffcba126d | /homebooks/accounts/utils/make_user_yaml.py | dd16087b243bf1cb875dca3c6c2651217cc629e0 | [] | no_license | noname2048/ab-homebooks-django | 5f42080d17c9e16a68210766bab11cff56ef74dd | 702e2957ea48eb4fdee8d00558d2ded1434e170d | refs/heads/main | 2023-07-07T15:28:36.000023 | 2021-08-10T12:27:46 | 2021-08-10T12:27:46 | 363,648,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | import yaml
from faker import Factory
from yaml import load, dump
from pathlib import Path
if not Path(Path(__file__).resolve() / "test_users.yaml").exists():
fake = Factory.create("en_US")
fake.seed(1)
user_list = [{"username": fake.user_name(), "email": fake.email()} for _ in range(100)]
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
with open("test_users.yaml", "w") as f:
dump(user_list, f)
else:
with open("test_users.yaml", "w") as f:
user_list = load(f, loader=yaml.FullLoader)
print(user_list)
| [
"sungwook.csw@gmail.com"
] | sungwook.csw@gmail.com |
40058b6bf2a6a6dbfac1e5619d0b93b834b79775 | 1bb2a9150de01c618163bbb8f872bdce6f14df4f | /Puzzle/030_set_tap.py | f618728840144e90b9c2fb12dd4b874157cd6e85 | [] | no_license | whyj107/Algorithm | a1c9a49a12a067366bd0f93abf9fa35ebd62102e | aca83908cee49ba638bef906087ab3559b36b146 | refs/heads/master | 2023-04-14T12:59:52.761752 | 2021-05-01T03:53:31 | 2021-05-01T03:53:31 | 240,014,212 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py | # 멀티 탭으로 만든 문어 다리 배선
# 문제
# n=20일 때 몇 가지의 멀티 탭 배치를 생각할 수 있는지 구해 보세요
# (단, 전원 용량은 생각하지 않기로 한다.)
N = 20
def set_tap1(remain):
if remain == 1:
return 1
cnt = 0
# 2구
for i in range(1, remain // 2 + 1):
if remain - i == i:
cnt += set_tap1(i) * (set_tap1(i) + 1) // 2
else:
cnt += set_tap1(remain - i) * set_tap1(i)
# 3구
for i in range(1, remain // 3 + 1):
for j in range(i, (remain - i) // 2 + 1):
if (remain - (i - j) == i) and (i == j):
cnt += set_tap1(i) * (set_tap1(i) + 1) * (set_tap1(i) + 2) // 6
elif remain - (i + j) == i:
cnt += set_tap1(i) * (set_tap1(i) + 1) * set_tap1(j) // 2
elif i == j:
cnt += set_tap1(remain - (i + j)) * set_tap1(i) * (set_tap1(i) + 1) // 2
elif remain - (i + j) == j:
cnt += set_tap1(j) * (set_tap1(j) + 1) * set_tap1(i) // 2
else:
cnt += set_tap1(remain - (i + j)) * set_tap1(j) * set_tap1(i)
return cnt
#print(set_tap1(N))
memo = {1: 1}
def set_tap2(remain):
if remain in memo:
return memo[remain]
cnt = 0
# 2구
for i in range(1, remain // 2 + 1):
if remain - i == i:
cnt += set_tap2(i) * (set_tap2(i) + 1) // 2
else:
cnt += set_tap2(remain - i) * set_tap2(i)
# 3구
for i in range(1, remain // 3 + 1):
for j in range(i, (remain - i) // 2 + 1):
if (remain - (i + j) == i) and (i == j):
cnt += set_tap2(i) * (set_tap2(i) + 1) * (set_tap2(i) + 2) // 6
elif remain - (i + j) == i:
cnt += set_tap2(i) * (set_tap2(i) + 1) * set_tap2(j) // 2
elif i == j:
cnt += set_tap2(remain - (i + j)) * set_tap2(i) * (set_tap2(i) + 1) // 2
elif remain - (i + j) == j:
cnt += set_tap2(j) * (set_tap2(j) + 1) * set_tap2(i) // 2
else:
cnt += set_tap2(remain - (i + j)) * set_tap2(j) * set_tap2(i)
memo[remain] = cnt
return cnt
print(set_tap2(N))
| [
"60024292+whyj107@users.noreply.github.com"
] | 60024292+whyj107@users.noreply.github.com |
174d29bde1166d77b5ea8428c53b1229672719c3 | 3cc2f47de6d78d610a2887f92bfba150b2994888 | /application/utils/helper.py | a55fae66e681c0efa5cc87d0be94b0835edd7f75 | [] | no_license | fengliu222/blogbar | c8a66df586187d0a16063e4536e76d155863fe17 | ff6e7182f000777112101eed12ae9e2ca4298d25 | refs/heads/master | 2021-01-15T08:59:51.478354 | 2014-12-20T08:13:53 | 2014-12-20T08:13:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | # coding: utf-8
def parse_int(integer, default=None):
"""提取整数,若失败则返回default值"""
try:
return int(integer)
except Exception, e:
return default
| [
"hustlzp@qq.com"
] | hustlzp@qq.com |
5dc8c4505c10729f9eba9ecc3c9bdcb476c5dbda | 43e0cfda9c2ac5be1123f50723a79da1dd56195f | /python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py | 12546ea463a84ac9d8defa21eb099c6346b2678d | [
"Apache-2.0"
] | permissive | jiangjiajun/Paddle | 837f5a36e868a3c21006f5f7bb824055edae671f | 9b35f03572867bbca056da93698f36035106c1f3 | refs/heads/develop | 2022-08-23T11:12:04.503753 | 2022-08-11T14:40:07 | 2022-08-11T14:40:07 | 426,936,577 | 0 | 0 | Apache-2.0 | 2022-02-17T03:43:19 | 2021-11-11T09:09:28 | Python | UTF-8 | Python | false | false | 5,489 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from operator import __add__, __sub__, __mul__, __truediv__
import numpy as np
import paddle
from paddle.fluid.framework import _test_eager_guard
op_list = [__add__, __sub__, __mul__, __truediv__]
def get_actual_res(x, y, op):
if op == __add__:
res = paddle.incubate.sparse.add(x, y)
elif op == __sub__:
res = paddle.incubate.sparse.subtract(x, y)
elif op == __mul__:
res = paddle.incubate.sparse.multiply(x, y)
elif op == __truediv__:
res = paddle.incubate.sparse.divide(x, y)
else:
raise ValueError("unsupported op")
return res
class TestSparseElementWiseAPI(unittest.TestCase):
"""
test paddle.sparse.add, subtract, multiply, divide
"""
def setUp(self):
paddle.fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
np.random.seed(2022)
self.op_list = op_list
self.csr_shape = [128, 256]
self.coo_shape = [4, 8, 3, 5]
self.support_dtypes = ['float32', 'float64', 'int32', 'int64']
def func_test_csr(self, op):
for dtype in self.support_dtypes:
x = np.random.randint(-255, 255, size=self.csr_shape).astype(dtype)
y = np.random.randint(-255, 255, size=self.csr_shape).astype(dtype)
dense_x = paddle.to_tensor(x, dtype=dtype, stop_gradient=False)
dense_y = paddle.to_tensor(y, dtype=dtype, stop_gradient=False)
s_dense_x = paddle.to_tensor(x, dtype=dtype, stop_gradient=False)
s_dense_y = paddle.to_tensor(y, dtype=dtype, stop_gradient=False)
csr_x = s_dense_x.to_sparse_csr()
csr_y = s_dense_y.to_sparse_csr()
actual_res = get_actual_res(csr_x, csr_y, op)
actual_res.backward(actual_res)
expect_res = op(dense_x, dense_y)
expect_res.backward(expect_res)
self.assertTrue(
np.allclose(expect_res.numpy(),
actual_res.to_dense().numpy(),
equal_nan=True))
if not (op == __truediv__ and dtype in ['int32', 'int64']):
self.assertTrue(
np.allclose(dense_x.grad.numpy(),
csr_x.grad.to_dense().numpy(),
equal_nan=True))
self.assertTrue(
np.allclose(dense_y.grad.numpy(),
csr_y.grad.to_dense().numpy(),
equal_nan=True))
def func_test_coo(self, op):
for sparse_dim in range(len(self.coo_shape) - 1, len(self.coo_shape)):
for dtype in self.support_dtypes:
x = np.random.randint(-255, 255,
size=self.coo_shape).astype(dtype)
y = np.random.randint(-255, 255,
size=self.coo_shape).astype(dtype)
dense_x = paddle.to_tensor(x, dtype=dtype, stop_gradient=False)
dense_y = paddle.to_tensor(y, dtype=dtype, stop_gradient=False)
s_dense_x = paddle.to_tensor(x,
dtype=dtype,
stop_gradient=False)
s_dense_y = paddle.to_tensor(y,
dtype=dtype,
stop_gradient=False)
coo_x = s_dense_x.to_sparse_coo(sparse_dim)
coo_y = s_dense_y.to_sparse_coo(sparse_dim)
actual_res = get_actual_res(coo_x, coo_y, op)
actual_res.backward(actual_res)
expect_res = op(dense_x, dense_y)
expect_res.backward(expect_res)
self.assertTrue(
np.allclose(expect_res.numpy(),
actual_res.to_dense().numpy(),
equal_nan=True))
self.assertTrue(
np.allclose(dense_x.grad.numpy(),
coo_x.grad.to_dense().numpy(),
equal_nan=True))
self.assertTrue(
np.allclose(dense_y.grad.numpy(),
coo_y.grad.to_dense().numpy(),
equal_nan=True))
def test_support_dtypes_csr(self):
paddle.device.set_device('cpu')
if paddle.device.get_device() == "cpu":
for op in op_list:
self.func_test_csr(op)
def test_support_dtypes_coo(self):
paddle.device.set_device('cpu')
if paddle.device.get_device() == "cpu":
for op in op_list:
self.func_test_coo(op)
if __name__ == "__main__":
paddle.device.set_device('cpu')
unittest.main()
| [
"noreply@github.com"
] | jiangjiajun.noreply@github.com |
4946b6aece4fd26a815a69b1f0b8b0e7d424b1f3 | 843ca2944c5a92dc3d13d132c34545d3b18a9eb9 | /scripts/cell/interfaces/Avatar/DialogSystem.py | fc3f3e99bb9671019e60d6fc59f6cc818c81ed2c | [] | no_license | m969/wanfadalu_server | 550dac97795bfc15c1d47f0c18bfab9c18fe6a76 | 08f5bec5ed4a17f4f1fcc40a27311aa03906e5ef | refs/heads/master | 2021-07-03T10:21:21.331204 | 2020-08-14T14:56:42 | 2020-08-14T14:56:42 | 138,024,157 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,553 | py | # -*- coding: utf-8 -*-
import KBEngine
from KBEDebug import *
import GlobalConst
import PyDatas.level_data as level_data
import PyDatas.dialog_config_Table as dialog_config_Table
from DIALOG_ITEM import TDialogItem
from DIALOG_ITEM import TDialogItemList
dialogDatas = dialog_config_Table.datas
dialogTypeMap = {
1: "Arena",
2: "Store",
3: "Sect",
4: "Task",
}
class DialogSystem:
def __init__(self):
# DEBUG_MSG("DialogSystem:__init__")
pass
def onTimer(self, timerHandle, userData):
pass
def requestBuyGoods(self, exposed, spaceID, npcName, goodsID):
DEBUG_MSG("DialogSystem:requestBuyGoods")
if exposed != self.id:
return
def giveGoods(self, goodsID):
DEBUG_MSG("DialogSystem:giveGoods")
def deleteGoods(self, goodsID):
"""
删除背包中的物品
"""
DEBUG_MSG("DialogSystem:deleteGoods")
def deductMoney(self, num):
DEBUG_MSG("getMoney")
def requestDialog(self, exposed, npcEntityID):
DEBUG_MSG("DialogSystem:requestDialog")
if exposed != self.id:
return
npc = KBEngine.entities.get(npcEntityID)
if npc is None:
return
self.dialogNpc = npc
dialogItems = TDialogItemList()
dialogItems["values"] = []
dialogItems["npcName"] = ""
dialogItems["npcDialog"] = ""
if npc.npcType == GlobalConst.NpcType_Arena:
dialogItems["npcName"] = "守擂人"
dialogItems["npcDialog"] = "你要上擂台吗?"
item = TDialogItem()
item["id"] = 1001
item["content"] = "我要上擂台"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
item = TDialogItem()
item["id"] = 0
item["content"] = "算了,怂"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
self.client.OnDialogItemsReturn(dialogItems)
elif npc.npcType == GlobalConst.NpcType_Store:
dialogItems["npcName"] = "商人"
dialogItems["npcDialog"] = "你要购买道具吗?"
item = TDialogItem()
item["id"] = 1002
item["content"] = "我要购买道具"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
item = TDialogItem()
item["id"] = 0
item["content"] = "算了,穷"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
self.client.OnDialogItemsReturn(dialogItems)
elif npc.npcType == GlobalConst.NpcType_Sect:
dialogItems["npcName"] = "守宗人"
dialogItems["npcDialog"] = "你要加入云灵宗吗?"
item = TDialogItem()
item["id"] = 1002
item["content"] = "我要加入宗门"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
item = TDialogItem()
item["id"] = 0
item["content"] = "算了,流浪挺好"
dialogItems["values"].append(item)
# dialogItems[item["id"]] = item
self.client.OnDialogItemsReturn(dialogItems)
def selectDialogItem(self, exposed, dialogID):
DEBUG_MSG("DialogSystem:selectDialogItem")
if exposed != self.id:
return
if dialogID == 0:
return
dialogData = dialogDatas[dialogID]
dialogType = dialogData["type"]
# dialogScript = eval(dialogTypeMap[dialogType])()
# dialogScript.execute(self)
npcType = self.dialogNpc.npcType
if npcType == GlobalConst.NpcType_Arena:
self.requestEnterArena(self.id, self.dialogNpc.arenaID)
elif npcType == GlobalConst.NpcType_Store:
self.requestPullStorePropList(self.id, self.dialogNpc.id)
elif npcType == GlobalConst.NpcType_Sect:
self.base.requestJoinSect(self.dialogNpc.sectID)
def getTaskInfo(self, npcName):
DEBUG_MSG("DialogSystem:getTaskInfo")
def setTaskFinish(self, npcName, taskIndex, watcherIndex):
DEBUG_MSG("DialogSystem:setTaskFinish")
def isTaskFinish(self, npcName, taskIndex):
DEBUG_MSG("DialogSystem:isTaskFinish")
def giveAward(self, npcName, taskIndex):
DEBUG_MSG("DialogSystem:giveAward")
def giveTask(self, npcName, taskIndex):
DEBUG_MSG("DialogSystem:giveTask")
| [
"969041327@qq.com"
] | 969041327@qq.com |
fc79f953728d1619a0608c81d27284876bdce5b6 | 16f61285e902285ecade358870a2ab5720bfc3b4 | /utilities/make_nsr_database.py | 350eb90bfb64c5d88363c94b7f0d50bbba4e90cf | [] | no_license | naturalis/galaxy-tool-taxonmatcher | 56dbd56264392fb3f2c477ea7aea65a0cd919a19 | 4e9e337c7156d624db60842ed9c7304d909f479d | refs/heads/master | 2022-11-17T18:35:23.176173 | 2022-11-15T10:15:18 | 2022-11-15T10:15:18 | 149,607,894 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | #!/usr/bin/python3
import sqlite3
import csv
db = sqlite3.connect('nsr_taxonmatcher')
cursor = db.cursor()
def make_database():
cursor.execute('''CREATE TABLE nsr(id INTEGER PRIMARY KEY, source TEXT, taxonID INTEGER, acceptedNameUsageID INTEGER, taxonomicStatus TEXT, species_rank TEXT, genus_rank TEXT, family_rank TEXT, order_rank TEXT, class_rank TEXT, phylum_rank TEXT, kingdom_rank TEXT, metadata TEXT)''')
db.commit()
def check_unknowns(data):
for x in data:
if not data[str(x)]:
data[str(x)] = "unknown "+str(x)
return data
def add_nsr_taxonomy():
with open("Taxa.txt", "r", encoding='latin-1') as csv_file:#, encoding='latin-1'
nsr = csv.reader(csv_file, delimiter=',')
for line in nsr:
#line = line.strip().split("\t")
species = line[2].replace(line[3].strip(), "")
print(line)
print(species)
try:
metadata = line[15]+";"+line[16]+";"+line[3]
except:
metadata = ""
data = {"source":"nsr", "taxonID":line[0], "acceptedNameUsageID":line[1], "taxonomicStatus":line[4], "species_rank":species.strip(), "genus_rank":line[11].strip(), "family_rank":line[10], "order_rank":line[9],"class_rank":line[8], "phylum_rank":line[7], "kingdom_rank":line[6], "metadata":metadata}
cursor.execute('''INSERT INTO nsr(source, taxonID, acceptedNameUsageID, taxonomicStatus, species_rank, genus_rank, family_rank, order_rank, class_rank, phylum_rank, kingdom_rank, metadata)VALUES(:source, :taxonID, :acceptedNameUsageID, :taxonomicStatus, :species_rank, :genus_rank, :family_rank, :order_rank, :class_rank, :phylum_rank, :kingdom_rank, :metadata)''', data)
db.commit()
def main():
make_database()
add_nsr_taxonomy()
cursor.execute("CREATE INDEX index_nsr_species ON nsr (species_rank);")
if __name__ == "__main__":
main()
| [
"martenhoogeveen@gmail.com"
] | martenhoogeveen@gmail.com |
58a9216257324f092630be979ba310cc8f359ca1 | 149c90e6366d8adf9cbfd9332612736a41e6dc25 | /__manifest__.py | 224e05af8799187aeda5ad199cb0481c624444e3 | [] | no_license | brahim94/formation | 02549ef1fee7d7432987248e87627b22158a12f6 | 1cd96a6ccfac1d61e524a20b84d4c30d395f5a35 | refs/heads/master | 2023-03-06T16:19:15.883896 | 2021-02-25T18:13:16 | 2021-02-25T18:13:16 | 341,509,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 902 | py | # -*- coding: utf-8 -*-
{
'name': "formation",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "My Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/13.0/odoo/addons/base/data/ir_module_category_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'egov_rh_ma'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/formation.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
}
| [
"brahim-ayad@hotmail.com"
] | brahim-ayad@hotmail.com |
cf7283cf94a1136d00a4c9a0df38866b2d50be20 | 6ecff67d6103ddbd787f78c35182722b83b8a37e | /백준/Python/카테고리/우선순위 큐/11286(절대값 힙).py | c06ae059aafbe6c660e2deb3dda5c19f3f852654 | [] | no_license | jsungmin6/Algorithm | 9ef2339aa00921e7df756a8dff569954a008c118 | bc1ea9de9f7ba3f1aa6616ebef8719540d72e0bf | refs/heads/master | 2023-05-27T06:24:16.123307 | 2021-06-11T09:22:21 | 2021-06-11T09:22:21 | 259,299,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | '''
분류
우선순위 큐
근거
절대값 힙, 작은수부터 출력해 배열에서 제거
풀이과정
1.절대값이 가장 작은 값을 출력 하는데 같을경우에는 -를 출력한다.
힙을 튜블로 저장해서 -,+ 여부를 파악하고 순서를 줘야할 것 같다.
2.다른 풀이를 보니 우선순위큐를 두개써서 음수와 양수를 나누어 저장해 깔끔하게 구했다.
시간복잡도
우선순위 큐는 삽입과 삭제가 O(log(N)) 이다 . N번 수행하니
O(Nlog(N))
'''
import heapq
import sys
input = sys.stdin.readline
N=int(input())
heap=[]
for _ in range(N):
data = int(input())
if data == 0:
if not heap:
print(0)
else:
print(heapq.heappop(heap)[1])
else:
heapq.heappush(heap,(abs(data),data))
#우선순위 큐 두개사용
import sys
import heapq
inp=sys.stdin.readline
n=int(inp())
hp=[]
hn=[]
for _ in range(n):
x=int(inp())
if x==0:
if hp and hn:
if hp[0]<hn[0]:
print(heapq.heappop(hp))
else:
print(-heapq.heappop(hn))
elif hp:
print(heapq.heappop(hp))
elif hn:
print(-heapq.heappop(hn))
else:
print(0)
else:
if x>0:
heapq.heappush(hp,x)
else:
heapq.heappush(hn,-x) | [
"jsungmin506@gmail.com"
] | jsungmin506@gmail.com |
9229c9844bad464a609cd06d4a743ce10d40f974 | 1fb1c41a7b916da23f02db059526f93f071c5479 | /django12/src/blog/views.py | 9f847f4e748441c0399614978594313e2c220119 | [] | no_license | jm40108/1 | 1a70d90ad8f7bfb0e4da55960b075a70766482c5 | 2f11ad1751aa4d5df01c00fcd07fd2c68a93a425 | refs/heads/master | 2020-04-14T01:20:18.752824 | 2018-12-30T03:23:12 | 2018-12-30T03:23:12 | 163,557,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,542 | py | from django.shortcuts import render
from django.views.generic.list import ListView
from .models import *
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView
from .forms import *
from django.http.response import HttpResponseRedirect
from django.urls.base import reverse
from django.contrib.auth.mixins import LoginRequiredMixin
#제네릭뷰
#뷰 클래스 구현 시 제네릭뷰를 상속받아 변수/메소드를 수정해 사용
#게시물 목록(index)
class Index(ListView):
template_name = 'blog/index.html' #HTML 파일의 경로를 저장하는 변수
model = Post #목록으로 보여진 모델클래스를 지정하는 변수
context_object_name = 'post_list'
paginate_by = 5
#상세 페이지(detail)
class Detail(DetailView):
template_name = 'blog/detail.html'
model = Post
context_object_name = 'obj'
#글 등록 페이지(postRegister)
class PostRegister(LoginRequiredMixin,FormView):
template_name='blog/postregister.html'
form_class=PostForm
context_object_name = 'form'
def form_valid(self, form):
obj = form.save(commit=False)#obj =Post 객체
obj.author = self.request.user
obj.save()
for f in self.request.FILES.getlist('images'):
#f : 이미지 정보, f를 이용해 PostImage 객체를 생성, 데이터베이스에 저장
image = PostImage(post = obj, image = f)
image.save()
for f in self.request.FILES.getlist('files'):
file = PostFile(post = obj, file = f)
file.save()
return HttpResponseRedirect(reverse('blog:detail', args=(obj.id,)))
#검색기능을 구현한 뷰클래스
class SearchP(FormView):
template_name = 'blog/searchP.html'
form_class = SearchForm
context_object_name = 'form'
# 유효성검사를 통과한 요청들을 처리하기 위해서 form_valid함수 오버라이딩
def form_valid(self, form):
#post 객채중에 사용자가 입력한 텍스트를 포함한 객체를 찾아 HTML결과로 보여주기
#사용자가 입력한 텍스트 추출
search_word = form.cleaned_data['search_word']
#추출된 텍스트를 포함한 Post객체들을 추출
post_list = Post.objects.filter(headline__contains=search_word)
#추출된 결과를 HTML로 전달
return render(self.request, self.template_name, {'form':form, 'search_word':search_word, 'postlist':post_list}) | [
"user@DESKTOP-7EQF3M8"
] | user@DESKTOP-7EQF3M8 |
99f41f0320101cd077bdd50a4a2cb1db34c8fa1e | 3940b4a507789e1fbbaffeb200149aee215f655a | /lc/25.ReverseNodesInK-Group.py | 6607facb67916c01b66962b629574b88293be6ce | [] | no_license | akimi-yano/algorithm-practice | 15f52022ec79542d218c6f901a54396a62080445 | 1abc28919abb55b93d3879860ac9c1297d493d09 | refs/heads/master | 2023-06-11T13:17:56.971791 | 2023-06-10T05:17:56 | 2023-06-10T05:17:56 | 239,395,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | # 25. Reverse Nodes in k-Group
# Hard
# 4406
# 427
# Add to List
# Share
# Given a linked list, reverse the nodes of a linked list k at a time and return its modified list.
# k is a positive integer and is less than or equal to the length of the linked list. If the number of nodes is not a multiple of k then left-out nodes, in the end, should remain as it is.
# You may not alter the values in the list's nodes, only nodes themselves may be changed.
# Example 1:
# Input: head = [1,2,3,4,5], k = 2
# Output: [2,1,4,3,5]
# Example 2:
# Input: head = [1,2,3,4,5], k = 3
# Output: [3,2,1,4,5]
# Example 3:
# Input: head = [1,2,3,4,5], k = 1
# Output: [1,2,3,4,5]
# Example 4:
# Input: head = [1], k = 1
# Output: [1]
# Constraints:
# The number of nodes in the list is in the range sz.
# 1 <= sz <= 5000
# 0 <= Node.val <= 1000
# 1 <= k <= sz
# Follow-up: Can you solve the problem in O(1) extra memory space?
# This solution works:
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseKGroup(self, head: ListNode, k: int) -> ListNode:
cur = head
ans = []
count = 0
while cur:
temp = []
while count < k and cur:
temp.append(cur)
cur = cur.next
count += 1
if len(temp) >= k:
ans.extend(list(reversed(temp)))
else:
ans.extend(temp)
count = 0
new_head = current = None
for node in ans:
node.next = None
if not new_head:
new_head = current = node
else:
current.next = node
current = current.next
return new_head
| [
"akimi.mimi.yano@gmail.com"
] | akimi.mimi.yano@gmail.com |
4e09d800c43d33123b8450392a65415a90949257 | ddc38e9a10e31c122e19d8fe132e9d73ac954c50 | /tests/conftest.py | 4338383926352a7245060ae3a6035ede61b2a92b | [
"MIT"
] | permissive | Tinche/pyrseia | 8734bddc5f0efcc5ceb901fd80a7bd239306577f | 5abf2eda9be06b7417395a50ef676454bbd8f667 | refs/heads/master | 2023-09-03T17:38:05.059968 | 2020-05-10T01:05:20 | 2020-05-10T01:05:20 | 256,872,895 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | from typing import Callable, TypeVar, Type
from pytest import fixture # type: ignore
from pyrseia import server, Server
from .calculator import Calculator
C = TypeVar("C")
@fixture
def calculator_server_creator() -> Callable[[Type[C]], Server[Calculator, C]]:
def create_server(
ctx_cls: Type[C], middleware=[]
) -> Server[Calculator, C]:
serv: Server[Calculator, C] = server(
Calculator, ctx_cls=ctx_cls, middleware=middleware
)
@serv.implement(Calculator.call_one)
async def impl_test_call_one(i: int) -> int:
return i
@serv.implement(Calculator.add)
async def add(a: int, b: int) -> int:
return a + b
@serv.implement(Calculator.multiply)
async def multiply(a: int, b: int) -> int:
return a * b
return serv
return create_server
| [
"tinchester@gmail.com"
] | tinchester@gmail.com |
61a0e08eee109f4b7a51ef5accb0820c64fd186e | d2dbc4eeba22f308634d8accde50062739fb50d1 | /sd/algorithms/partitional.py | 004f4e4ff78310c3a207d2e1a97987346704b6a9 | [
"Apache-2.0"
] | permissive | shibaji7/SuperDARN-Clustering | bf2d793a23dfae105bd4ca5bbac09fd64e60182a | d7427ba609fb7f5e50c26f52364e5e9e118bbc31 | refs/heads/master | 2022-12-24T07:08:52.308469 | 2020-10-06T16:46:08 | 2020-10-06T16:46:08 | 267,057,983 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | #!/usr/bin/env python
"""
partitional.py: module is deddicated to run different partitional algorithms.
Partitional clustering algorithms aim to discover the groupings present in the data by optimizing
a specific objective function and iteratively improving the quality of the partitions. These algorithms
generally require certain user parameters to choose the prototype points that represent each cluster.
For this reason they are also called prototype-based clustering algorithms.
- kMeans
- kMediods
- kMedians
- kModes
- fuzzykMeans
- Mean Shift
- Kernel kMeans
"""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import sys
sys.path.append("extra/")
import numpy as np
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift
from kmodes.kmodes import KModes
from kmedians import KMedians
from kmedoids import KMedoids
from fuzzykmeans import FuzzyKMeans
from kernelkmeans import KernelKMeans
class Partition(object):
"""All partitoned algorithms are implemened here."""
def __init__(self, method, data, n_clusters=2, random_state=0):
"""
Initialize all the parameters.
method: Name of the algorithms (lower case joined by underscore)
data: Data (2D Matrix)
n_clusters: Number of clusters
random_state: Random initial state
"""
self.method = method
self.data = data
self.n_clusters = n_clusters
np.random.seed(random_state)
return
def setup(self, keywords={}):
"""
Setup the algorithms
"""
for key in keywords.keys():
setattr(self, key, keywords[key])
if self.method == "kmeans": self.obj = KMeans(n_clusters=self.n_clusters, **keywords)
if self.method == "kmedoids": self.obj = KMedoids(n_clusters=self.n_clusters, **keywords)
if self.method == "kmodes": self.obj = KModes(n_clusters=self.n_clusters, init="Huang", **keywords)
if self.method == "kmedians": self.obj = KMedians(n_clusters=self.n_clusters, **keywords)
if self.method == "fuzzykmeans": self.obj = FuzzyKMeans(n_clusters=self.n_clusters, **keywords)
if self.method == "meanshift": self.obj = MeanShift(n_jobs=10, **keywords)
if self.method == "kernelkmeans": self.obj = KernelKMeans(n_clusters=self.n_clusters, **keywords)
return
def run(self):
"""
Run the models
"""
if self.method == "kmeans": self.obj.fit(self.data)
if self.method == "kmedoids": self.obj.fit(self.data)
if self.method == "kmodes": self.obj.fit(self.data)
if self.method == "kmedians": self.obj.fit(self.data)
if self.method == "fuzzykmeans": self.obj.fit(self.data)
if self.method == "meanshift": self.obj.fit(self.data)
if self.method == "kernelkmeans": self.obj.fit(self.data)
return
| [
"shibaji7@vt.edu"
] | shibaji7@vt.edu |
9182b833c33a0c031a4b6abd00195b321488a6ba | 35fa8925e63f2b0f62ef6bfc1ff4e03cf42bd923 | /django_analyses/serializers/analysis.py | 5686c2648bdc0f3b2920d148bc7a04ed4cdb06cb | [
"Apache-2.0"
] | permissive | TheLabbingProject/django_analyses | 9e6f8b9bd2a84e8efe6dda6a15de6a3ecdf48ec1 | 5642579660fd09dde4a23bf02ec98a7ec264bceb | refs/heads/master | 2023-02-26T07:53:53.142552 | 2023-02-17T08:12:17 | 2023-02-17T08:12:17 | 225,623,958 | 1 | 2 | Apache-2.0 | 2023-02-17T08:12:18 | 2019-12-03T13:15:29 | Python | UTF-8 | Python | false | false | 662 | py | from django_analyses.models.analysis import Analysis
from django_analyses.models.category import Category
from rest_framework import serializers
class AnalysisSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name="analyses:analysis-detail")
category = serializers.HyperlinkedRelatedField(
view_name="analyses:category-detail", queryset=Category.objects.all(),
)
class Meta:
model = Analysis
fields = (
"id",
"title",
"description",
"category",
"created",
"modified",
"url",
)
| [
"z.baratz@gmail.com"
] | z.baratz@gmail.com |
766e6c727fc7adb01aeabb92bfcb815182f6aae6 | 81f3f88c6b1a687726b75b8f53c2fb29d72df584 | /aispider/crawler/auto_crawler.py | d998aaa9ed30476aae87695ddcafe35746857691 | [
"Apache-2.0"
] | permissive | East196/aispider | 1f256ee1eea991d49d41373f2b4734a65b264b7d | 6b42ff5c4bda89cbd923a86f3aa3fdf4a224dbb9 | refs/heads/master | 2020-04-07T10:14:14.222722 | 2018-04-21T04:55:11 | 2018-04-21T04:55:11 | 124,202,962 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,020 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
爬取器
"""
from __future__ import print_function
import json
import re
from xml.sax.saxutils import escape
from bs4 import BeautifulSoup
from lxml import etree
from aispider.core.http_request import HttpRequest
from aispider.core.request import get_soup, get_json, get_domain
class AutoCrawler(object):
"""
自动爬取器,从xml rule中读取任务执行
"""
def __init__(self, http_request=None):
if http_request is None:
self.http_request = HttpRequest()
else:
self.http_request = http_request
def crawl(self, url, rule=None):
"""
爬取操作
:param url: 页面url
:param rule: 页面解析rule的定义XML文档
:type rule: string
:return: 提取对象
:rtype: dict
"""
if rule is None:
print("rule must be set")
html = self.http_request.get_html(url)
return self.extract(html, rule)
def extract(self, html, rule):
"""
根据rule指定的抓取规则,从页面内容中提取所需的对象
:param html: 原始内容
:param rule: 抓取规则
:type rule: string
:return: 提取对象
:rtype: dict
"""
doc = html.replace('\n', '')
doc_data = (doc, {})
root = etree.XML(rule)
for child in root:
print(child.tag, child.attrib['type'])
doc_data = [sub for sub in get_doc(child, doc_data)]
return doc_data
def urls(self, url_template="http://www.baidu.com", **kwargs):
# TODO 2 or 3 list
# TODO 使用pop强化kwargs的限定
"""
从url模板获取urls
:param url_template: url模板
:param kwargs: url定义的
"""
for list_item in self._get_list_arg(kwargs)[1]:
sub_kwargs = dict(kwargs)
sub_kwargs['page'] = list_item
yield url_template.format(**sub_kwargs)
@staticmethod
def _get_list_arg(kwargs):
for key, value in kwargs.items():
if type(value) is list:
return key, value
def get_doc(root, doc_data):
"""
:type root: beautifulsoup对象
:param root: 上层解析器xml的结点
:param doc_data: 是一个元组,第一个doc代表传入数据,第二个代表输出数据
:return:是一个元组,第一个doc代表传入数据,第二个代表输出数据
"""
if root.tag != 'parse':
return
if type(doc_data) != list:
doc_data = [doc_data]
# TODO 带第二参数,一直传第二参数内的数据
if root.attrib['type'] == 'json':
print("json")
print(doc_data)
for sub_doc, data in doc_data:
sub_doc_json = json.loads(sub_doc.strip())
# TODO 目前只有1级,TODO多级的jsonpath
jsonpath = root.attrib['jsonpath']
yield sub_doc_json.get(jsonpath), data
if root.attrib['type'] == 're':
print("re")
print(doc_data)
for sub_doc, data in doc_data:
for item in re.findall(root.attrib['restr'], sub_doc):
print("re :" + str(item))
if root.attrib['name']:
data = dict(data, **{root.attrib['name']: "ok"})
yield item.replace('\n', '').strip(), data
if root.attrib['type'] == 'soup':
print("soup")
print(doc_data)
for sub_doc, data in doc_data:
print(sub_doc, data)
soup = BeautifulSoup(sub_doc, 'lxml')
cssselector = root.attrib['cssselector']
print(cssselector)
if root.attrib['list'] == 'False':
print("list false")
a = soup.select_one(cssselector)
yield from rule(root, a, data)
if root.attrib['list'] == 'True':
print("list true")
for a in soup.select(cssselector):
yield from rule(root, a, data)
def rule(root, a, data):
if root.attrib['name']:
data = dict(data, **{root.attrib['name']: a.get_text()})
if len(root):
print(root)
items = item_extract(a, root)
data = dict(data, **items)
yield data, data
else:
yield a.get_text(), data
def item_extract(soup, root):
"""
:param soup:上层html数据的soup
:param root:上层解析器xml的结点
:return: 根据解析器抽取的items
"""
items = {}
for child in root:
if child.tag != 'parse':
continue
if child.attrib['type'] == 'soup':
sub_cssselector = child.attrib['cssselector']
print(soup, sub_cssselector)
if child.get('attr'):
items[child.attrib['name']] = soup.select_one(sub_cssselector)[child.get('attr')]
else:
items[child.attrib['name']] = soup.select_one(sub_cssselector).string
return items
if __name__ == '__main__':
crawler = AutoCrawler()
for url in crawler.urls(url_template="http://a.b.cn/{type}?page={page}", page=[1, 2, 3], type=1):
print(url)
print(escape("<script>FM.view\((.+?)\);</script>"))
rule = '''
<root>
<parse type='re' restr="%s" name='abc' list='True' />
<parse type='json' jsonpath='jjj' list='False'/>
<parse type='soup' name='aname' cssselector='div' list='True'>
<parse type='soup' name='sub' cssselector='a.sub'/>
<parse type='soup' name='sup' cssselector='a.sup'/>
</parse>
</root>
''' % escape("<script>FM.view\((.+?)\);</script>")
doc = """<script>FM.view(
{"jjj":"<div>1<a class='sub' href='a.html'>111</a><a class='sup' href='a.html'>222</a></div>
<div>1<a class='sub' href='a.html'>111</a><a class='sup' href='a.html'>222</a></div>","xxx":2}
);</script>"""
doc_data = AutoCrawler().extract(doc, rule)
for d in doc_data:
print(1, 1, d)
| [
"2901180515@qq.com"
] | 2901180515@qq.com |
710eb29d865055eab7e561ae6bcc477dc460ff18 | 7410903c6cd5ef35c592af00c934fb21c369cbf2 | /00_Code/01_LeetCode/537_ComplexNumberMultiplication.py | 094cbd3b7c1735c92ca7bc620c1998bdb3a74513 | [
"MIT"
] | permissive | KartikKannapur/Algorithms | f4e4726170599db0622d18e8c06a382e9bce9e77 | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | refs/heads/master | 2020-12-25T18:32:41.086518 | 2020-10-19T02:59:47 | 2020-10-19T02:59:47 | 93,961,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | """
Given two strings representing two complex numbers.
You need to return a string representing their multiplication.
Note i2 = -1 according to the definition.
Example 1:
Input: "1+1i", "1+1i"
Output: "0+2i"
Explanation: (1 + i) * (1 + i) = 1 + i2 + 2 * i = 2i, and you
need convert it to the form of 0+2i.
Example 2:
Input: "1+-1i", "1+-1i"
Output: "0+-2i"
Explanation: (1 - i) * (1 - i) = 1 + i2 - 2 * i = -2i, and you
need convert it to the form of 0+-2i.
Note:
The input strings will not have extra blank.
The input strings will be given in the form of a+bi, where the
integer a and b will both belong to the range of [-100, 100].
And the output should be also in this form.
Your runtime beats 49.77 % of python submissions
"""
class Solution(object):
def complexNumberMultiply(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
# #Method 1
r1, im1 = a.split("+")
r2, im2 = b.split("+")
r1, r2 = int(r1), int(r2)
im1, im2 = int(im1[:-1]), int(im2[:-1])
return str((r1*r2) - (im1*im2)) + str("+") + str((r1*im2) + (r2*im1)) + "i"
| [
"kartikkannapur@gmail.com"
] | kartikkannapur@gmail.com |
db82c1c496fe6537c6a8907b9536571681a9557c | 98879590858368d5c32c389db31b761e479a0ab8 | /wsgiserver/test_server2.py | 65acfc0ccd5cfba74d4dc697f67fa500d68a87c0 | [] | no_license | zhiruchen/get_hands_dirty | 0bbf3719113dcf474baae571ecd55e5c234072a3 | af98a11bbeb8183428fe41cb7c9fa9a2354983e9 | refs/heads/master | 2020-04-17T12:00:44.275247 | 2017-06-24T16:28:43 | 2017-06-24T16:28:43 | 66,988,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | # -*- coding: utf-8 -*-
import re
from cgi import escape
def index(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return ['''Hello World Application
This is the hello world application:
`continue <hello/>`_
''']
def hello(environ, start_response):
args = environ['myapp.url_args']
if args:
subject = escape(args[0])
else:
subject = 'World'
start_response('200 OK', [('Content-Type', 'text/html')])
return ['''Hello %(subject)s
Hello %(subject)s!
''' % {'subject': subject}]
def not_found(environ, start_response):
start_response('404 NOT FOUND', [('Content-Type', 'text/html')])
return ['NOT FOUND']
# url到函数的映射
urls = [
(r'^$', index),
(r'hello/?$', hello),
(r'hello/(.+)$', hello)
]
def application(environ, start_response):
"""将当前请求的路径分发给不同的函数"""
path = environ.get('PATH_INFO', '').lstrip('/')
for regex, callback in urls:
match = re.search(regex, path)
if match is not None:
environ['myapp.url_args'] = match.groups()
return callback(environ, start_response)
return not_found(environ, start_response)
if __name__ == '__main__':
from wsgiref.simple_server import make_server
srv = make_server('localhost', 8080, application)
srv.serve_forever() | [
"zhiruchen1992@foxmail.com"
] | zhiruchen1992@foxmail.com |
3e3a6e0c7c8a80543715611c08a374cc4e38615b | 114ea96f201cf3de825900fc632b6696433ae240 | /my_selenium/scroll.py | 538bf4ca52bdd1ef18230e06d1660bc08b65d708 | [] | no_license | iorilan/py_tiny_apps_playground | 2662133ec97aee0d495329a355fd22f7fb14ad2e | 70fb7965c1c27419ee83b6e9fd94d27ad47e4040 | refs/heads/master | 2023-02-08T12:06:47.406728 | 2020-12-31T15:08:24 | 2020-12-31T15:08:24 | 282,785,377 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | """
http://the-internet.herokuapp.com/infinite_scroll
"""
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
def go():
y=100
# to keep browser open by passing in below option
ops = Options()
ops.add_experimental_option('detach',True)
browser = webdriver.Chrome(ChromeDriverManager().install(),chrome_options=ops)
browser.get("http://the-internet.herokuapp.com/infinite_scroll")
while(True):
time.sleep(0.3)
browser.execute_script(f'window.scrollTo(0, {y})')
y+=10
if __name__ == "__main__":
go() | [
"iorilan@hotmail.com"
] | iorilan@hotmail.com |
3f5d03e017f3cbb1e790acea900ca9157adbf6c2 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/automation/azure-mgmt-automation/azure/mgmt/automation/models/type_field_py3.py | d522b7658ebd52d0c6959e7716a83589d1124bef | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 1,042 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TypeField(Model):
"""Information about a field of a type.
:param name: Gets or sets the name of the field.
:type name: str
:param type: Gets or sets the type of the field.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, name: str=None, type: str=None, **kwargs) -> None:
super(TypeField, self).__init__(**kwargs)
self.name = name
self.type = type
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
e68925dd548561cc823aaf960ced2ccccccc2fda | 7e96ba20c25c6fb56af6ccd36b3b6d68df6a081c | /Shivi_khanuja/Django/belt_reviewer/apps/reviews/apps.py | 074ef3493d7c37a3c7566d311a3355dc3c20d54f | [] | no_license | CodingDojoDallas/python_september_2017 | 9d8cd74131a809bc6b13b7f465594cf8b1e2fd75 | f9f2f7b39bf9c4fceda3df5dc7424164aa5d5df5 | refs/heads/master | 2021-01-23T08:52:22.899994 | 2017-10-30T17:00:55 | 2017-10-30T17:00:55 | 102,558,291 | 2 | 14 | null | 2018-01-13T05:28:34 | 2017-09-06T03:28:38 | Python | UTF-8 | Python | false | false | 162 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class ReviewsConfig(AppConfig):
name = 'reviews'
| [
"shivalikhanuja.net@gmail.com"
] | shivalikhanuja.net@gmail.com |
b98467ba8ab928b8fe67d4607f0eaa6093bece31 | 14d1b3b0d2f4b21a62ae11bb30adaafca4aa0a76 | /demo_2.py | 106f3e70eebbcab59aad22dcb7825bc9f53da156 | [] | no_license | bdrummo6/CSPT13_Algorithms_GP | 9d64f83cdf566588b988d4108be655997d3b0dd0 | 907dc29d245ebb88d9ce07cf60249bf1ca862c50 | refs/heads/main | 2022-12-26T17:48:29.633204 | 2020-10-06T02:29:58 | 2020-10-06T02:29:58 | 301,584,067 | 0 | 0 | null | 2020-10-06T01:35:29 | 2020-10-06T01:35:28 | null | UTF-8 | Python | false | false | 4,375 | py | '''Explorer's Dilemna - aka the Knapsack Problem
After spending several days exploring a deserted island out in the Pacific,
you stumble upon a cave full of pirate loot! There are coins, jewels,
paintings, and many other types of valuable objects.
However, as you begin to explore the cave and take stock of what you've
found, you hear something. Turning to look, the cave has started to flood!
You'll need to get to higher ground ASAP.
There IS enough time for you to fill your backpack with some of the items
in the cave. Given that...
- you have 60 seconds until the cave is underwater
- your backpack can hold up to 50 pounds
- you want to maximize the value of the items you retrieve (since you can
only make one trip)
HOW DO YOU DECIDE WHICH ITEMS TO TAKE?
'''
import random
import time
from itertools import combinations
class Item:
def __init__(self, name, weight, value):
self.name = name
self.weight = weight
self.value = value
self.efficiency = 0
def __str__(self):
return f'{self.name}, {self.weight} lbs, ${self.value}'
small_cave = []
medium_cave = []
large_cave = []
def fill_cave_with_items():
'''Randomly generates Item objects and
creates caves of different sizes for testing
'''
names = ["painting", "jewel", "coin", "statue", "treasure chest",
"gold", "silver", "sword", "goblet", "hat"]
for _ in range(5):
n = names[random.randint(0,4)]
w = random.randint(1, 25)
v = random.randint(1, 100)
small_cave.append(Item(n, w, v))
for _ in range(15):
n = names[random.randint(0,4)]
w = random.randint(1, 25)
v = random.randint(1, 100)
medium_cave.append(Item(n, w, v))
for _ in range(25):
n = names[random.randint(0,4)]
w = random.randint(1, 25)
v = random.randint(1, 100)
large_cave.append(Item(n, w, v))
def print_results(items, knapsack):
'''Print out contents of what the algorithm
calculated should be added to the knapsack
'''
# print(f'\nItems in the cave:')
# for i in items:
# print(i)
print('\nBest items to put in knapsack: ')
for item in knapsack:
print(f'-{item}')
print(f'\nResult calculated in {time.time()-start:.5f} seconds\n')
print('\n-------------------------')
def naive_fill_knapsack(sack, items):
'''
Put highest value items in knapsack until full
'''
# TODO - sort items by value
# TODO - put most valuable items in knapsack until full
return sack
def brute_force_fill_knapsack(sack, items):
''' Try every combination to find the best'''
# TODO - generate all possible combinations of items
# TODO - calculate the value of all combinations
# find the combo with the highest value
return sack
def greedy_fill_knapsack(sack, items):
'''Use ratio of [value] / [weight]
to choose items for knapsack
'''
# TODO - calculate efficiencies
# TODO - sort items by efficiency
# TODO - put items in knapsack until full
return sack
# TESTS -
# Below are a series of tests that can be utilized to demonstrate
# the differences between each approach. Timing is included to give
# students an idea of how poorly some approaches scale. However,
# efficiency should also be formalized using Big O notation.
fill_cave_with_items()
knapsack = []
# Test 1 - Naive
print('\nStarting test 1, naive approach...')
items = large_cave
start = time.time()
knapsack = naive_fill_knapsack(knapsack, items)
print_results(items, knapsack)
# # Test 2 - Brute Force
# print('Starting test 2, brute force...')
# items = medium_cave
# start = time.time()
# knapsack = brute_force_fill_knapsack(knapsack, items)
# print_results(items, knapsack)
# Test 3 - Brute Force
# print('Starting test 3, brute force...')
# items = large_cave
# start = time.time()
# knapsack = brute_force_fill_knapsack(knapsack, items)
# print_results(items, knapsack)
# # Test 4 - Greedy
# print('Starting test 4, greedy approach...')
# items = medium_cave
# start = time.time()
# greedy_fill_knapsack(knapsack, items)
# print_results(items, knapsack)
# Test 5 - Greedy
# print('Starting test 5, greedy approach...')
# items = large_cave
# start = time.time()
# greedy_fill_knapsack(knapsack, items)
# print_results(items, knapsack) | [
"tomtarpeydev@gmail.com"
] | tomtarpeydev@gmail.com |
8f06b68aecddec25becf243ccd0383fb3198050f | a4ac55fca45c6d958513af43a1916c66fb25b931 | /pyatool/extras.py | 685dbf8e9c9ea0f853f63f2f2bd2fe94754931ad | [
"MIT"
] | permissive | guolong123/pyatool | d4393ebbfe3160400b0060e52f0d73c457212999 | 3d7e1bad67d990909dc590c1132c0f3811a2eee9 | refs/heads/master | 2020-04-24T02:19:07.642579 | 2019-02-20T05:59:01 | 2019-02-20T05:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,097 | py | import requests
import tempfile
import os
import re
import platform
SYSTEM_TYPE = platform.system()
def hello_world(toolkit=None):
"""
test only
:param toolkit:
:return:
"""
# toolkit contains some tools for development
# get device id
device_id = toolkit.device_id
print(device_id)
# use adb
toolkit.adb.run(['shell', 'ps'])
def install_from(url=None, path=None, toolkit=None):
"""
根据url或path安装apk
:param url:
:param path:
:param toolkit:
:return:
"""
if (not (url or path)) or (url and path):
raise TypeError('need url or path for installation, not both or none')
if url and not path:
return _install_from_url(url, toolkit)
else:
return _install_from_path(path, toolkit)
def _install_from_url(url, toolkit=None):
resp = requests.get(url)
if not resp.ok:
return False
with tempfile.NamedTemporaryFile('wb+', suffix='.apk', delete=False) as temp:
temp.write(resp.content)
temp.close()
toolkit.adb.run(['install', '-r', '-d', '-t', temp.name])
os.remove(temp.name)
return True
def _install_from_path(path, toolkit=None):
return toolkit.adb.run(['install', '-r', '-d', '-t', path])
def get_current_activity(toolkit=None):
"""
获取设备的当前activity名称
:param toolkit:
:return:
"""
# TODO if sh has installed in windows, command is same as linux ..
# filter_name = 'findstr' if SYSTEM_TYPE == 'Windows' else 'grep'
return toolkit.adb.run(['shell', 'dumpsys', 'activity', 'top', '|', 'grep', 'ACTIVITY'])
def is_installed(package_name, toolkit=None):
"""
检测包是否已被安装到设备上
:param package_name:
:param toolkit:
:return:
"""
return package_name in show_package(toolkit)
def show_package(toolkit=None):
"""
展示设备上所有已安装的包
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'pm', 'list', 'package'])
def clean_cache(package_name, toolkit=None):
"""
清理对应包的缓存
:param package_name:
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'pm', 'clear', package_name])
def uninstall(package_name, toolkit=None, save_data=None):
"""
卸载指定包
:param package_name:
:param toolkit:
:param save_data:
:return:
"""
if save_data:
cmd_list = ['uninstall', '-k', package_name]
else:
cmd_list = ['uninstall', package_name]
return toolkit.adb.run(cmd_list)
def switch_airplane(status, toolkit=None):
"""
切换飞行模式的开关
:param status: true or false
:param toolkit:
:return:
"""
base_setting_cmd = ["shell", "settings", "put", "global", "airplane_mode_on"]
base_am_cmd = ["shell", "am", "broadcast", "-a", "android.intent.action.AIRPLANE_MODE", "--ez", "state"]
if status:
base_setting_cmd += ['1']
base_am_cmd += ['true']
else:
base_setting_cmd += ['0']
base_am_cmd += ['false']
toolkit.adb.run(base_setting_cmd)
toolkit.adb.run(base_am_cmd)
def switch_wifi(status, toolkit=None):
"""
切换wifi开关
:param status: true or false
:param toolkit:
:return:
"""
base_cmd = ['shell', 'svc', 'wifi']
cmd_dict = {
True: base_cmd + ['enable'],
False: base_cmd + ['disable'],
}
toolkit.adb.run(cmd_dict[status])
def switch_screen(status, toolkit=None):
"""
点亮/熄灭 屏幕
:param status: true or false
:param toolkit:
:return:
"""
base_cmd = ['shell', 'input', 'keyevent']
cmd_dict = {
True: base_cmd + ['224'],
False: base_cmd + ['223'],
}
toolkit.adb.run(cmd_dict[status])
def input_text(content, toolkit=None):
"""
输入文字(不支持中文)
# TODO 中文输入 可以利用ADBKeyBoard (https://github.com/senzhk/ADBKeyBoard)
:param content:
:param toolkit:
:return:
"""
toolkit.adb.run(['shell', 'input', 'text', content])
def start_activity(package_name, activity_name=None, flag=None, toolkit=None):
"""
根据包名/活动名 启动应用/活动
:param package_name:
:param activity_name:
:param flag:
:param toolkit:
:return:
"""
base_cmd = ['shell', 'am', 'start']
if flag:
base_cmd.append(flag)
if not activity_name:
return toolkit.adb.run(base_cmd + [package_name])
return toolkit.adb.run(base_cmd + ['{}/.{}'.format(package_name, activity_name)])
def force_stop(package_name, toolkit=None):
"""
根据包名/活动名 停止应用
:param package_name:
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'am', 'force-stop', package_name])
def _clean_backstage(toolkit=None):
"""
(无效)清理后台应用/进程
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'am', 'kill-all'])
def send_broadcast(broadcast_name, flag=None, toolkit=None):
"""
发送广播
:param broadcast_name:
:param flag:
:param toolkit:
:return:
"""
base_cmd = ['shell', 'am', 'start']
if flag:
base_cmd.append(flag)
return toolkit.adb.run(base_cmd + [broadcast_name])
def input_key_event(key_code, toolkit=None):
"""
send key event
:param key_code:
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'input', 'keyevent', str(key_code)])
def swipe(x1, y1, x2, y2, toolkit=None):
"""
swipe from (x1, y1) to (x2, y2)
:param x1:
:param y1:
:param x2:
:param y2:
:param toolkit:
:return:
"""
x1, y1, x2, y2 = map(str, (x1, y1, x2, y2))
return toolkit.adb.run(['shell', 'input', 'swipe', x1, y1, x2, y2])
def click(x, y, toolkit=None):
"""
click (x, y)
:param x:
:param y:
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'input', 'tap', str(x), str(y)])
def get_ip_address(toolkit=None):
"""
获取android设备ip地址
:param toolkit:
:return:
"""
# TODO better design?
result = toolkit.adb.run(['shell', 'ifconfig', 'wlan0'])
return re.findall(r'inet\s*addr:(.*?)\s', result, re.DOTALL)[0]
def set_ime(ime_name, toolkit=None):
"""
设置输入法(需要使用adb shell ime list -a 获取输入法包名)
:param ime_name: 输入法包名 eg:com.android.inputmethod.pinyin/.PinyinIME
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'ime', 'set', ime_name])
def pull(src, target, toolkit=None):
"""
adb pull
:param src:
:param target:
:param toolkit:
:return:
"""
return toolkit.adb.run(['pull', src, target])
def push(src, target, toolkit=None):
"""
adb push
:param src:
:param target:
:param toolkit:
:return:
"""
return toolkit.adb.run(['push', src, target])
def is_connected(toolkit=None):
"""
check if device is connected
:param toolkit:
:return:
"""
try:
toolkit.adb.run(['shell', 'echo', '"hello"'])
except RuntimeError:
return False
return True
def make_dir(target_dir, toolkit=None):
"""
make empty dir
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'mkdir', target_dir])
def remove_dir(target, toolkit=None):
"""
clean dir, by running 'rm -rf'
:param target:
:param toolkit:
:return:
"""
return toolkit.adb.run(['shell', 'rm', '-rf', target])
__all__ = [
'hello_world',
'install_from',
'show_package',
'get_current_activity',
'is_installed',
'clean_cache',
'uninstall',
'switch_airplane',
'switch_wifi',
'input_text',
'start_activity',
'get_ip_address',
'set_ime',
'push',
'pull',
'send_broadcast',
'force_stop',
'input_key_event',
'swipe',
'click',
'is_connected',
'make_dir',
'remove_dir',
]
| [
"178894043@qq.com"
] | 178894043@qq.com |
9ec0d07f9d5c5478b3c58d1c523384b8f2a6aa8e | 1b3402ff6f4b531d13add47d94fa497666bdd3f1 | /authentication/serializers.py | 0cab463ae50c55a50877768f7d80bf9ebf71a3f8 | [] | no_license | falled10/breaking_brain_api | b4287ed8986986dea5621deb45529c6986af0f32 | b16324f5a0a6d102797944a02c3194d6f86b049e | refs/heads/master | 2023-06-03T07:25:11.862023 | 2021-06-20T16:00:43 | 2021-06-20T16:00:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,190 | py | from django.utils.encoding import force_text, force_bytes
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.conf import settings
from rest_framework import serializers
from authentication.models import User
from authentication.tokens import TokenGenerator
from breaking_brain_api.tasks import send_email
class SignUpSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'password')
extra_kwargs = {'username': {'required': True}}
def create(self, validated_data):
user = User(email=validated_data['email'], username=validated_data['username'])
user.set_password(validated_data['password'])
user.save()
token = f'{urlsafe_base64_encode(force_bytes(user.email))}.{TokenGenerator.make_token(user)}'
url = f'{settings.USER_ACTIVATION_URL}?token={token}'
context = {
'url': url,
'email': user.email
}
template = 'notifications/activate_user.html'
send_email.delay(
subject="Activate your ChooseOne account",
template=template,
recipients=[user.email],
context=context
)
return user
class ActivateUserSerializer(serializers.Serializer):
token = serializers.CharField()
def validate(self, attrs):
token = attrs['token']
error_text = f"Provided activation token '{token}' is not valid"
try:
email, token = token.split('.')
email = force_text(urlsafe_base64_decode(email))
except (TypeError, ValueError):
raise serializers.ValidationError(error_text)
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
raise serializers.ValidationError(error_text)
if not TokenGenerator.check_token(user, token):
raise serializers.ValidationError(error_text)
attrs['email'] = email
return attrs
def activate_user(self):
user = User.objects.get(email=self.validated_data['email'])
user.is_active = True
user.save()
return user
| [
"jurakulek@gmail.com"
] | jurakulek@gmail.com |
dc2ed23ce665cf327bd85c3c52419c99ed15d825 | dd0f95e4b20112b3c8ec4ded3e2baa9acaa5f71e | /tests/tests.py | e1669df1959e52168d30d20d954360fad2da7d33 | [] | no_license | scotm/postcode_locator | 1cf6f0e075ff38a16806be82be4cb6b345cf6d73 | e9d6a486dc66cc05bc53157f91a71477abc1f449 | refs/heads/master | 2021-01-21T10:13:23.944029 | 2019-03-26T16:47:21 | 2019-03-26T16:47:21 | 31,371,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,626 | py | from django.contrib.gis.geos import Point
from django.core.urlresolvers import reverse
from django.test import TestCase
from postcode_locator.models import PostcodeMapping
from postcode_locator.tests.factories import PostcodeMappingFactory
class MatchPostcodeTest(TestCase):
def setUp(self):
pass
def test_match_postcode(self):
point = Point(-3.1627269999999998, 55.9735760000000013)
p = PostcodeMappingFactory(postcode='EH67HQ', point=point)
with self.assertRaises(PostcodeMapping.DoesNotExist):
PostcodeMapping.match_postcode('')
with self.assertRaises(PostcodeMapping.DoesNotExist):
PostcodeMapping.match_postcode('AS2SAD')
self.assertEqual(unicode(p), 'EH67HQ')
self.assertEqual(PostcodeMapping.match_postcode('AS2SAD',raise_exceptions=False), None)
self.assertEqual(PostcodeMapping.match_postcode('EH6 7HQ').point, point)
self.assertEqual(PostcodeMapping.match_postcode('EH67HQ').point, point)
self.assertEqual(PostcodeMapping.match_postcode('EH6 7HQ').point, point)
def test_postcodemappingfactory(self):
p = PostcodeMappingFactory.create()
q = PostcodeMappingFactory.create()
self.assertNotEqual(p.point, q.point)
def test_page(self):
import json
p = PostcodeMappingFactory.create()
response = self.client.get(reverse('postcode_point'), data={'postcode':p.pk})
data = json.loads(response.content)
self.assertAlmostEqual(data['point'][0], p.point.x,places=6)
self.assertAlmostEqual(data['point'][1], p.point.y,places=6)
| [
"scott.scotm@gmail.com"
] | scott.scotm@gmail.com |
da0b1ea947109a1a2d20982c3c045252fa82861c | 9b14cd92abcae4e26df1afc3cc595e1edeffda82 | /ATLASalertsservice/alerts.py | 79452ceab9de5b49e82a4533e4f45dd73f8e03d2 | [] | no_license | ATLAS-Analytics/AnalyticsNotebooks | 8d374363c3a55067614a802c1928743e990bd419 | 6656470cf42c5211891013a9f3a0f9036749abe1 | refs/heads/master | 2020-04-15T15:50:44.881217 | 2018-10-19T18:00:05 | 2018-10-19T18:00:05 | 55,799,185 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,594 | py | import requests, httplib2, json, time
from oauth2client.service_account import ServiceAccountCredentials
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
import os
from subprocess import Popen, PIPE
class alerts:
def __init__(self):
SCOPE = ["https://spreadsheets.google.com/feeds"]
SECRETS_FILE = "AlertingService-879d85ad058f.json"
credentials = ServiceAccountCredentials.from_json_keyfile_name(SECRETS_FILE, SCOPE)
http = credentials.authorize(httplib2.Http())
discoveryUrl = 'https://sheets.googleapis.com/$discovery/rest?version=v4'
self.service = discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discoveryUrl)
return
def addAlert(self, test, email, text):
spreadsheetId = '19bS4cxqBEwr_cnCEfAkbaLo9nCLjWnTnhQHsZGK9TYU'
rangeName = test + '!A1:C1'
myBody = {u'range': rangeName, u'values': [[time.strftime("%Y/%m/%d %H:%M:%S"), email, text]], u'majorDimension': u'ROWS'}
cells = self.service.spreadsheets().values().append(spreadsheetId=spreadsheetId, range=rangeName,valueInputOption='RAW', insertDataOption='INSERT_ROWS', body=myBody).execute()
return
def sendMail(self, test, to, body):
msg = MIMEText(body)
msg['Subject'] = test
msg['From'] = 'AAAS@mwt2.org'
msg['To'] = to
p = Popen(["/usr/sbin/sendmail", "-t", "-oi", "-r AAAS@mwt2.org"], stdin=PIPE)
print(msg.as_string())
p.communicate(msg.as_string().encode('utf-8'))
def send_HTML_mail(self, test, to, body, subtitle="", images=[]):
msg = MIMEMultipart('related')
msg['Subject'] = test
msg['From'] = 'AAAS@mwt2.org'
msg['To'] = to
msgAlternative = MIMEMultipart('alternative')
msg.attach(msgAlternative)
html = open("index.htm", "r").read()
image_template = open("image_template.htm", "r").read()
html = html.replace('TheMainTitle',test)
html = html.replace('TheSubtitle',subtitle)
html = html.replace('MyBody', body)
html = html.replace('TheImagePlaceholder1', image_template * int( (len(images)+1) / 2 ) )
html = html.replace('TheImagePlaceholder2', image_template * int(len(images)/2) )
for ind,i in enumerate(images):
#print("Adding image:", i)
html = html.replace('FigureTitle',i['Title'],2) #appears twice per figure
html = html.replace('FigureFilename',"cid:image"+str(ind),1)
html = html.replace('FigureDescription',i['Description'],1)
link=''
if 'Link' in i:
link=i['Link']
html = html.replace('FigureLink',link,1)
img_data = open(i['Filename'], 'rb').read()
image = MIMEImage(img_data, name=i['Filename'])
image.add_header('Content-ID', '<image'+str(ind)+'>')
msg.attach(image)
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(body, 'plain')
part2 = MIMEText(html, 'html')
msgAlternative.attach(part1)
msgAlternative.attach(part2)
p = Popen(["/usr/sbin/sendmail", "-t", "-oi", "-r AAAS@mwt2.org"], stdin=PIPE)
#print(msg.as_string())
p.communicate(msg.as_string().encode('utf-8'))
| [
"ivukotic@cern.ch"
] | ivukotic@cern.ch |
c143278afc362b186e45a61344e467ce938f73cf | ddd7e91dae17664505ea4f9be675e125337347a2 | /unused/2014/preprocess/preprocess_ndpi.py | e183a7517dc9cfbe7be7907701c28afb1afe6eb4 | [] | no_license | akurnikova/MouseBrainAtlas | 25c4134bae53827167e4b54ba83f215aec9f2d85 | ed1b5858467febdaed0a58a1a742764d214cc38e | refs/heads/master | 2021-07-15T17:17:19.881627 | 2019-02-22T06:00:17 | 2019-02-22T06:00:17 | 103,425,463 | 0 | 0 | null | 2018-04-27T19:08:02 | 2017-09-13T16:45:56 | Jupyter Notebook | UTF-8 | Python | false | false | 5,345 | py | #!/usr/bin/python
def execute_command(cmd):
try:
retcode = call(cmd, shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError as e:
print >>sys.stderr, "Execution failed:", e
raise e
import os
import sys
import shutil
import glob
import argparse
sys.path.append('/home/yuncong/morphsnakes')
import morphsnakes
import numpy as np
from matplotlib import pyplot as plt
from skimage.color import rgb2gray
from skimage.io import imread, imsave
from skimage.morphology import remove_small_objects
from skimage.measure import regionprops, label
ndpi_dir = os.environ['GORDON_NDPI_DIR']
temp_dir = os.environ['GORDON_TEMP_DIR']
data_dir = os.environ['GORDON_DATA_DIR']
repo_dir = os.environ['GORDON_REPO_DIR']
result_dir = os.environ['GORDON_RESULT_DIR']
labeling_dir = os.environ['GORDON_LABELING_DIR']
ndpisplit = '/oasis/projects/nsf/csd181/yuncong/ndpisplit'
def foreground_mask_morphsnakes(img):
gI = morphsnakes.gborders(img, alpha=20000, sigma=1)
mgac = morphsnakes.MorphGAC(gI, smoothing=2, threshold=0.3, balloon=-3)
mgac.levelset = np.ones_like(img)
mgac.levelset[:3,:] = 0
mgac.levelset[-3:,:] = 0
mgac.levelset[:,:3] = 0
mgac.levelset[:,-3:] = 0
num_iters = 1000
for i in xrange(num_iters):
msnake.step()
if np.sum(msnake.levelset - previous_levelset) < 3:
break
previous_levelset = msnake.levelset
blob_labels, n_labels = label(msnake.levelset, neighbors=4, return_num=True)
blob_props = regionprops(blob_labels + 1)
largest_blob = np.argmax([p.area for p in blob_props])
mask = np.zeros_like(msnake.levelset, dtype=np.bool)
mask[blob_labels == largest_blob] = 1
min_size = 40
mask = remove_small_objects(mask, min_size=min_size, connectivity=1, in_place=False)
return mask
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("stack", type=str, help="choose what stack of images to crop and resolution, ex: RS141")
parser.add_argument("-r","--rotation",type=str,help="how each slice will be rotated",default = None)
parser.add_argument("-m","--mirror",type=str,help="to mirror horizontal type 'flop', for vertical type 'flip'",default=None)
args = parser.parse_args()
stack = args.stack
stack_temp_dir = os.path.join(temp_dir, stack)
if not os.path.exists(stack_temp_dir):
os.makedirs(stack_temp_dir)
# Split ndpi files
for ndpi_file in os.listdir(os.path.join(ndpi_dir, stack)):
if not ndpi_file.endswith('ndpi'): continue
execute_command(ndpisplit + ' ' + ndpi_file)
for level in ['macro', 'x0.078125', 'x0.3125', 'x1.25', 'x5', 'x20']:
res_temp_dir = os.path.join(stack_temp_dir, level)
os.mkdir(res_temp_dir)
for f in glob.glob('*_%s_z0.tif'%level):
shutil.move(f, res_temp_dir)
map_dir = os.mkdir(os.path.join(stack_temp_dir, 'map'))
for f in glob.glob('*map*'%level):
shutil.move(f, res_temp_dir)
# Crop sections out of whole-slide images, according to manually produced bounding boxes information
stack_data_dir = os.path.join(data_dir, stack)
if not os.path.exists(stack_data_dir):
os.makedirs(stack_data_dir)
for resol in ['x0.3125', 'x1.25', 'x5', 'x20']:
res_data_dir = os.path.join(stack_data_dir, resol)
if not os.path.exists(res_data_dir):
os.makedirs(res_data_dir)
section_ind = 0
res_temp_dir = os.path.join(stack_temp_dir, resol)
for slide_im_filename in os.listdir(res_temp_dir):
_, slide_str, _ = slide_im_filename.split('_')[:3]
slide_im_path = os.path.join(res_temp_dir, slide_im_filename)
img_id = subprocess.check_output(["identify", slide_im_path]).split()
tot_w, tot_h = map(int, img_id[2].split('x'))
bb_txt = os.path.join(repo_dir, 'preprocessing/bounding_box_data', stack, stack + '_' + slide_str + '.txt')
for x_perc, y_perc, w_perc, h_perc in map(split, open(bb_txt, 'r').readlines()):
(x,y,w,h) = (int(tot_w * float(x_perc)), int(tot_h * float(y_perc)),
int(tot_w * float(w_perc)), int(tot_h * float(h_perc)))
geom = str(w) + 'x' + str(h) + '+' + str(x) + '+' + str(y)
section_ind += 1
section_im_filename = '_'.join([stack, resol, '%04d' % section_ind]) +'.tif'
section_im_path = os.path.join(res_data_dir, section_im_filename)
# Crops the image according to bounding box data
cmd1 = "convert %s -crop %s %s" % (slide_im_path, geom, section_im_path)
execute_command(cmd1)
# Rotates the cropped image if specified
if args.rotation is not None:
cmd2 = "convert %s -page +0+0 -rotate %s %s" % (section_im_path, args.rotation, section_im_path)
execute_command(cmd2)
# Reflects the rotated image if specified
if args.mirror is not None:
cmd3 = "convert %s -%s %s" % (section_im_path, args.mirror, section_im_path)
execute_command(cmd3)
print "Processed %s" % section_im_filename
section_im = imread(section_im_path)
mask = foreground_mask_morphsnakes(section_im)
mask_filename = '_'.join([stack, resol, '%04d' % section_ind]) +'_mask.png'
mask_path = os.path.join(res_data_dir, mask_filename)
imsave(mask_path, mask) | [
"cyc3700@gmail.com"
] | cyc3700@gmail.com |
77058cba020de410735849d1908ef6510f24db88 | dd4d2589d1f14303cacd3b7ee1dd5f6bacd3bf3c | /company/amazon/graphs/connected_graphs.py | e8044bd1e1dd19d5a95ccc77e3a4796e68f2fa77 | [] | no_license | salujaharkirat/ds-algo | ec22eaae81bdb78f2818248508325a536aedbb7b | 819b5971826d97ec600b92776c5158518c9cbf22 | refs/heads/master | 2023-05-02T17:20:49.425484 | 2021-05-23T07:54:29 | 2021-05-23T07:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | class Solution:
def dfs(self, isConnected):
visited = [0] * len(isConnected)
res = 0
def traverse(visited, i):
for j in range(len(isConnected)):
if isConnected[i][j] == 1 and visited[j] == 0:
visited[j] = 1
traverse(visited, j)
for i in range(len(isConnected)):
if visited[i] == 0:
traverse(visited, i)
res += 1
return res
def findCircleNum(self, isConnected: List[List[int]]) -> int:
return self.dfs(isConnected)
| [
"saluja.harkirat@gmail.com"
] | saluja.harkirat@gmail.com |
13866286b72f821ce21e6968b0f7be80736b3dc1 | 24b2f3f5f49ed19cf7fd3dcd433d6b72806e08cf | /python/sorting_and_searching/0363_Max_Sum_of_Rectangle_No_Larger_Than_K.py | 680321f332bc9186ac9d8af3bffac598b0194d6a | [] | no_license | lizzzcai/leetcode | 97089e4ca8c3c53b5a4a50de899591be415bac37 | 551cd3b4616c16a6562eb7c577ce671b419f0616 | refs/heads/master | 2021-06-23T05:59:56.928042 | 2020-12-07T03:07:58 | 2020-12-07T03:07:58 | 162,840,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,167 | py | '''
27/05/2020
363. Max Sum of Rectangle No Larger Than K - Hard
Tag: Binary Search, Dynamic Programming, Queue
Given a non-empty 2D matrix matrix and an integer k, find the max sum of a rectangle in the matrix such that its sum is no larger than k.
Example:
Input: matrix = [[1,0,1],[0,-2,3]], k = 2
Output: 2
Explanation: Because the sum of rectangle [[0, 1], [-2, 3]] is 2,
and 2 is the max number no larger than k (k = 2).
Note:
The rectangle inside the matrix must have an area > 0.
What if the number of rows is much larger than the number of columns?
'''
from typing import List
import collections
import math
import bisect
# Solution
class Solution1:
def maxSumSubmatrix(self, matrix: List[List[int]], k: int) -> int:
P = []
for row in matrix:
p = [0]
for x in row:
p.append(p[-1]+x)
P.append(p)
res = -math.inf
for c1 in range(len(P[0])):
for c2 in range(c1+1, len(P[0])):
# 2-D prefix sum between c1 and c2
curr_sum, curr_P = 0, [0]
for r in range(len(P)):
curr_sum += P[r][c2]-P[r][c1]
idx = bisect.bisect_left(curr_P, curr_sum - k)
if idx < len(curr_P):
res = max(res, curr_sum - curr_P[idx])
if res == k:
return res
bisect.insort_left(curr_P, curr_sum)
return res
class Solution2:
def maxSumSubmatrix(self, matrix: List[List[int]], k: int) -> int:
P = []
for row in matrix:
p = [0]
for x in row:
p.append(p[-1]+x)
P.append(p)
res = -math.inf
for c1 in range(len(P[0])):
for c2 in range(c1+1, len(P[0])):
# 2-D prefix sum between c1 and c2
curr_sum, curr_P = 0, [0]
for r in range(len(P)):
curr_sum += P[r][c2]-P[r][c1]
idx = bisect.bisect_left(curr_P, curr_sum - k)
if idx < len(curr_P):
res = max(res, curr_sum - curr_P[idx])
if res == k:
return res
bisect.insort_left(curr_P, curr_sum)
return res
# Unit Test
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_testCase(self):
for Sol in [Solution1()]:
func = Sol.maxSumSubmatrix
self.assertEqual(func([[1,0,1],[0,-2,3]], 2), 2)
self.assertEqual(func([[2,2,-1]], 0), -1)
self.assertEqual(func([[-9,-6,-1,-7,-6,-5,-4,-7,-6,0],[-4,-9,-4,-7,-7,-4,-4,-6,-6,-6],[-2,-2,-6,-7,-7,0,-1,-1,-8,-2],[-5,-3,-1,-6,-1,-1,-6,-3,-4,-8],[-4,-1,0,-8,0,-9,-8,-7,-2,-4],[0,-3,-1,-7,-2,-5,-5,-5,-8,-7],[-2,0,-8,-2,-9,-2,0,0,-9,-6],[-3,-4,-3,-7,-2,-1,-9,-5,-7,-2],[-8,-3,-2,-8,-9,0,-7,-8,-9,-3],[-7,-4,-3,-3,-3,-1,0,-1,-8,-2]], -321), -323)
if __name__ == '__main__':
unittest.main() | [
"lilcolinn@gmail.com"
] | lilcolinn@gmail.com |
9b06082fd362d65269019bd114f3fabdf83ff4c5 | 740ed147112eddc2581504f5a8c5c4cb4dbe32f6 | /pprof/projects/pprof/sqlite3.py | e407453388a8fe510e87f19c8ee2998d4bd0e9c8 | [] | no_license | CIB/pprof-study | 3765499c8111dfcf6f690ea192b9ce235f1f28c4 | 9d6b995ba21ced3fa39327eff6dc34274e3b3b56 | refs/heads/master | 2021-01-22T01:33:53.452195 | 2016-02-15T14:45:14 | 2016-02-15T14:45:14 | 37,426,320 | 0 | 0 | null | 2015-06-14T19:28:01 | 2015-06-14T19:28:00 | null | UTF-8 | Python | false | false | 2,569 | py | from pprof.projects.pprof.group import PprofGroup
from os import path
from plumbum import local
class SQLite3(PprofGroup):
""" SQLite3 """
NAME = 'sqlite3'
DOMAIN = 'database'
src_dir = "sqlite-amalgamation-3080900"
src_file = src_dir + ".zip"
src_uri = "http://www.sqlite.org/2015/" + src_file
def download(self):
from pprof.utils.downloader import Wget
from plumbum.cmd import unzip
with local.cwd(self.builddir):
Wget(self.src_uri, self.src_file)
unzip(self.src_file)
self.fetch_leveldb()
def configure(self):
pass
def build(self):
from pprof.utils.compiler import lt_clang
from pprof.utils.run import run
with local.cwd(self.builddir):
sqlite_dir = path.join(self.builddir, self.src_dir)
clang = lt_clang(self.cflags, self.ldflags,
self.compiler_extension)
with local.cwd(sqlite_dir):
run(clang["-fPIC", "-I.", "-c", "sqlite3.c"])
run(clang["-shared", "-Wl,-soname,libsqlite3.so.0", "-o",
"libsqlite3.so", "sqlite3.o", "-ldl"])
with local.cwd(self.builddir):
self.build_leveldb()
def fetch_leveldb(self):
src_uri = "https://github.com/google/leveldb"
with local.cwd(self.builddir):
from pprof.utils.downloader import Git
Git(src_uri, "leveldb.src")
def build_leveldb(self):
from pprof.utils.compiler import lt_clang, lt_clang_cxx
from pprof.utils.run import run
from plumbum.cmd import make
sqlite_dir = path.join(self.builddir, self.src_dir)
leveldb_dir = path.join(self.builddir, "leveldb.src")
# We need to place sqlite3 in front of all other flags.
self.ldflags = ["-L", sqlite_dir] + self.ldflags
self.cflags = ["-I", sqlite_dir] + self.cflags
clang_cxx = lt_clang_cxx(self.cflags, self.ldflags)
clang = lt_clang(self.cflags, self.ldflags)
with local.cwd(leveldb_dir):
with local.env(CXX=str(clang_cxx), CC=str(clang)):
run(make["clean", "out-static/db_bench_sqlite3"])
def run_tests(self, experiment):
from pprof.project import wrap
from pprof.utils.run import run
leveldb_dir = path.join(self.builddir, "leveldb.src")
with local.cwd(leveldb_dir):
sqlite = wrap(
path.join(leveldb_dir, "out-static", "db_bench_sqlite3"), experiment)
run(sqlite)
| [
"simbuerg@fim.uni-passau.de"
] | simbuerg@fim.uni-passau.de |
4b2cd183f9317d769c5c7ab8699d92dc79d30b09 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /grow_bert/lowcost/layers/resolution_layer.py | b35bb95842ab66c6af0c41c742b726d6721f1191 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 4,229 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pooling layer to reduce input sequence length."""
import tensorflow as tf
from official.modeling import tf_utils
class MaskPoolLayer(tf.keras.layers.Layer):
"""Mask pooling layer."""
def __init__(self, pool_size, nocls=True, **kwargs):
super(MaskPoolLayer, self).__init__(**kwargs)
self.nocls = nocls
self.pool_size = pool_size
assert self.pool_size > 0
def call(self, input_tensor, unpooled_len=0):
if self.pool_size == 1:
return input_tensor
batch_size, seq_len = tf_utils.get_shape_list(input_tensor, expected_rank=2)
# reshape tensor in order to use tf.nn.pool
reshaped_tensor = tf.reshape(input_tensor, [batch_size, seq_len, 1])
if self.nocls:
tensor_to_pool = reshaped_tensor[:, 1:, :]
else:
tensor_to_pool = reshaped_tensor
if unpooled_len > 0:
tensor_to_pool = tensor_to_pool[:, :-unpooled_len, :]
pooled_tensor = tf.nn.max_pool(
tensor_to_pool,
ksize=self.pool_size,
strides=self.pool_size,
padding='SAME')
if self.nocls:
pooled_tensor = tf.concat([reshaped_tensor[:, 0:1, :], pooled_tensor],
axis=1)
if unpooled_len > 0:
pooled_tensor = tf.concat(
[pooled_tensor, reshaped_tensor[:, -unpooled_len:, :]], axis=1)
pooled_tensor = tf.reshape(pooled_tensor, [batch_size, -1])
return pooled_tensor
class EmbedPoolLayer(tf.keras.layers.Layer):
"""Embedding pooling layer."""
def __init__(self, hidden_size, pool_size, pool_name=None, **kwargs):
super(EmbedPoolLayer, self).__init__(**kwargs)
self.pool_name = pool_name
self.pool_size = pool_size
self.hidden_size = hidden_size
if self.pool_name == 'concat':
self.embedding_projection_dense = tf.keras.layers.Dense(
self.hidden_size, name='resolution/projection_dense')
def call(self, input_tensor, unpooled_len=0):
if self.pool_size <= 1 or self.pool_name is None:
return input_tensor
if self.pool_name == 'concat':
if unpooled_len == 0:
tensor_to_pool = input_tensor
else:
tensor_to_pool = input_tensor[:, :-unpooled_len, :]
else:
if unpooled_len == 0:
tensor_to_pool = input_tensor[:, 1:, :]
else:
tensor_to_pool = input_tensor[:, 1:-unpooled_len, :]
if self.pool_name == 'mean':
pooled_tensor = tf.nn.avg_pool(
tensor_to_pool,
ksize=self.pool_size,
strides=self.pool_size,
padding='SAME')
pooled_tensor = tf.concat([input_tensor[:, 0:1, :], pooled_tensor],
axis=1)
elif self.pool_name == 'max':
pooled_tensor = tf.nn.max_pool(
tensor_to_pool,
ksize=self.pool_size,
strides=self.pool_size,
padding='SAME')
pooled_tensor = tf.concat([input_tensor[:, 0:1, :], pooled_tensor],
axis=1)
elif self.pool_name == 'concat':
batch_size, seq_len, embed_dim = tensor_to_pool.shape
assert seq_len % self.pool_size == 0, (f'seqlen: {seq_len}, poolsize: '
f'{self.pool_size}')
pooled_len = seq_len // self.pool_size
pooled_tensor = tf.reshape(
tensor_to_pool, [batch_size, pooled_len, self.pool_size * embed_dim])
pooled_tensor = self.embedding_projection_dense(pooled_tensor)
elif self.pool_name is not None:
raise NotImplementedError
if unpooled_len > 0:
pooled_tensor = tf.concat(
[pooled_tensor, input_tensor[:, -unpooled_len:, :]], axis=1)
return pooled_tensor
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
e3444263f858ef21d7e3d42b693a6d4a0113c448 | e122b7aa4f2f0b09962c2a3580b9a440ed1dbcce | /aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/BatchGetEdgeInstanceDriverConfigsRequest.py | fc162a96be2081c218889caa3105e475c54d4f0c | [
"Apache-2.0"
] | permissive | laashub-soa/aliyun-openapi-python-sdk | 0a07805972fb5893f933951960d0f53fc2938df9 | c0d98a5363225fc8ad4aee74e30d10f62403319b | refs/heads/master | 2023-02-28T00:17:05.934082 | 2021-02-05T09:46:35 | 2021-02-05T09:46:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class BatchGetEdgeInstanceDriverConfigsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'BatchGetEdgeInstanceDriverConfigs','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DriverIds(self):
return self.get_query_params().get('DriverIds')
def set_DriverIds(self, DriverIdss):
for depth1 in range(len(DriverIdss)):
if DriverIdss[depth1] is not None:
self.add_query_param('DriverIds.' + str(depth1 + 1) , DriverIdss[depth1])
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
52020455331440cfe02942db01afd18aed00716a | 717558d6a075163294054bd5aea4ef3234df23ad | /models_all/st_test6.py | 020b56f9d7347dd2d7a7fd204cf82942113dc054 | [
"MIT"
] | permissive | RomeoV/pyomo-MINLP-benchmarking | 1270766397fbc4e57ea1bd0c2285fb7edf64062d | 996d2c8ee1cb9b03fe00c6246f52294337d8b92c | refs/heads/master | 2021-07-11T17:54:25.284712 | 2020-08-13T23:43:14 | 2020-08-13T23:43:14 | 185,664,992 | 8 | 1 | MIT | 2019-05-10T19:07:05 | 2019-05-08T19:09:05 | Python | UTF-8 | Python | false | false | 2,229 | py | # MINLP written by GAMS Convert at 05/15/20 00:51:23
#
# Equation counts
# Total E G L N X C B
# 6 1 0 5 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 11 1 0 10 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 57 47 10 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.i1 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i2 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i3 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i4 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i5 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i6 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i7 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i8 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i9 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i10 = Var(within=Integers,bounds=(0,1),initialize=0)
m.obj = Objective(expr=50*m.i1*m.i1 + 48*m.i1 + 50*m.i2*m.i2 + 42*m.i2 + 50*m.i3*m.i3 + 48*m.i3 + 50*m.i4*m.i4 + 45*m.i4
+ 50*m.i5*m.i5 + 44*m.i5 + 50*m.i6*m.i6 + 41*m.i6 + 50*m.i7*m.i7 + 47*m.i7 + 50*m.i8*m.i8 + 42*
m.i8 + 50*m.i9*m.i9 + 45*m.i9 + 50*m.i10*m.i10 + 46*m.i10, sense=minimize)
m.c1 = Constraint(expr= - 2*m.i1 - 6*m.i2 - m.i3 - 3*m.i5 - 3*m.i6 - 2*m.i7 - 6*m.i8 - 2*m.i9 - 2*m.i10 <= -4)
m.c2 = Constraint(expr= 6*m.i1 - 5*m.i2 + 8*m.i3 - 3*m.i4 + m.i6 + 3*m.i7 + 8*m.i8 + 9*m.i9 - 3*m.i10 <= 22)
m.c3 = Constraint(expr= - 5*m.i1 + 6*m.i2 + 5*m.i3 + 3*m.i4 + 8*m.i5 - 8*m.i6 + 9*m.i7 + 2*m.i8 - 9*m.i10 <= -6)
m.c4 = Constraint(expr= 9*m.i1 + 5*m.i2 - 9*m.i4 + m.i5 - 8*m.i6 + 3*m.i7 - 9*m.i8 - 9*m.i9 - 3*m.i10 <= -23)
m.c5 = Constraint(expr= - 8*m.i1 + 7*m.i2 - 4*m.i3 - 5*m.i4 - 9*m.i5 + m.i6 - 7*m.i7 - m.i8 + 3*m.i9 - 2*m.i10 <= -12)
| [
"bernalde@cmu.edu"
] | bernalde@cmu.edu |
669b4b361e5ae6e31c58326b71d172bdfac6fa46 | 1d0613fb401e92b6861ea3f615561df854603db6 | /KiBuzzard/deps/fonttools/Tests/ttLib/tables/otBase_test.py | ce0416e429bf1d1d925f0deb9a77811759bbcc93 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"OFL-1.1",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | gregdavill/KiBuzzard | 8c84a4339108c9942e1ec0e05e4110bba49fd265 | 88c4129b3fbed2cad718c01e5e2d29204e2f2071 | refs/heads/main | 2023-09-01T19:46:45.146077 | 2023-08-31T11:55:10 | 2023-08-31T11:55:10 | 328,686,533 | 358 | 36 | MIT | 2023-08-31T12:12:45 | 2021-01-11T14:16:42 | Python | UTF-8 | Python | false | false | 3,188 | py | from fontTools.misc.textTools import deHexStr
from fontTools.ttLib.tables.otBase import OTTableReader, OTTableWriter
import unittest
class OTTableReaderTest(unittest.TestCase):
def test_readShort(self):
reader = OTTableReader(deHexStr("CA FE"))
self.assertEqual(reader.readShort(), -13570)
self.assertEqual(reader.pos, 2)
def test_readLong(self):
reader = OTTableReader(deHexStr("CA FE BE EF"))
self.assertEqual(reader.readLong(), -889274641)
self.assertEqual(reader.pos, 4)
def test_readUInt8(self):
reader = OTTableReader(deHexStr("C3"))
self.assertEqual(reader.readUInt8(), 0xC3)
self.assertEqual(reader.pos, 1)
def test_readUShort(self):
reader = OTTableReader(deHexStr("CA FE"))
self.assertEqual(reader.readUShort(), 0xCAFE)
self.assertEqual(reader.pos, 2)
def test_readUShortArray(self):
reader = OTTableReader(deHexStr("DE AD BE EF CA FE"))
self.assertEqual(list(reader.readUShortArray(3)),
[0xDEAD, 0xBEEF, 0xCAFE])
self.assertEqual(reader.pos, 6)
def test_readUInt24(self):
reader = OTTableReader(deHexStr("C3 13 37"))
self.assertEqual(reader.readUInt24(), 0xC31337)
self.assertEqual(reader.pos, 3)
def test_readULong(self):
reader = OTTableReader(deHexStr("CA FE BE EF"))
self.assertEqual(reader.readULong(), 0xCAFEBEEF)
self.assertEqual(reader.pos, 4)
def test_readTag(self):
reader = OTTableReader(deHexStr("46 6F 6F 64"))
self.assertEqual(reader.readTag(), "Food")
self.assertEqual(reader.pos, 4)
def test_readData(self):
reader = OTTableReader(deHexStr("48 65 6C 6C 6F"))
self.assertEqual(reader.readData(5), b"Hello")
self.assertEqual(reader.pos, 5)
def test_getSubReader(self):
reader = OTTableReader(deHexStr("CAFE F00D"))
sub = reader.getSubReader(2)
self.assertEqual(sub.readUShort(), 0xF00D)
self.assertEqual(reader.readUShort(), 0xCAFE)
class OTTableWriterTest(unittest.TestCase):
def test_writeShort(self):
writer = OTTableWriter()
writer.writeShort(-12345)
self.assertEqual(writer.getData(), deHexStr("CF C7"))
def test_writeLong(self):
writer = OTTableWriter()
writer.writeLong(-12345678)
self.assertEqual(writer.getData(), deHexStr("FF 43 9E B2"))
def test_writeUInt8(self):
writer = OTTableWriter()
writer.writeUInt8(0xBE)
self.assertEqual(writer.getData(), deHexStr("BE"))
def test_writeUShort(self):
writer = OTTableWriter()
writer.writeUShort(0xBEEF)
self.assertEqual(writer.getData(), deHexStr("BE EF"))
def test_writeUInt24(self):
writer = OTTableWriter()
writer.writeUInt24(0xBEEF77)
self.assertEqual(writer.getData(), deHexStr("BE EF 77"))
def test_writeULong(self):
writer = OTTableWriter()
writer.writeULong(0xBEEFCAFE)
self.assertEqual(writer.getData(), deHexStr("BE EF CA FE"))
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
| [
"greg.davill@gmail.com"
] | greg.davill@gmail.com |
e66cb54f081ee6a1b66de4c70b0070575006b270 | caf135d264c4c1fdd320b42bf0d019e350938b2d | /04_Algorithms/Leetcode/JZ23 二叉搜索树的后序遍历序列.py | 9fd12c4174b4cea3111d6a7932fcc8b955ad6e21 | [] | no_license | coolxv/DL-Prep | 4243c51103bdc38972b8a7cbe3db4efa93851342 | 3e6565527ee8479e178852fffc4ccd0e44166e48 | refs/heads/master | 2022-12-31T22:42:20.806208 | 2020-10-23T10:19:19 | 2020-10-23T10:19:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py | # -*- coding:utf-8 -*-
class Solution1:
def VerifySquenceOfBST(self, sequence):
def helper(seq):
if 0 < len(seq) < 2:
return True
elif len(seq) == 0:
return False
else:
pivot = seq[-1]
place = -1
small, big = False, False
for idx, num in enumerate(seq[:-1]):
if num < pivot:
idx += 1
if big:
return False
elif num > pivot:
if place == -1:
place = idx
big = True
else:
return False
return helper(seq[:place]) and helper(seq[place:-1])
return helper(sequence)
# -*- coding:utf-8 -*-
# 根据跟左右分成左右两部分呢,只要右边都大于跟就可以
class Solution:
def VerifySquenceOfBST(self, sequence):
def helper(seq):
if 0 <= len(seq) < 2:
return True
pivot = seq[-1]
place = len(seq) - 1
for idx, num in enumerate(seq[:-1]):
if num > pivot:
place = idx
break
elif num == pivot:
return False
for num in seq[place:-1]:
if num <= pivot:
return False
return helper(seq[:place]) and helper(seq[place:-1])
if not sequence:
return False
return helper(sequence)
sol = Solution()
print(sol.VerifySquenceOfBST([4, 7, 5, 12, 10]))
print(sol.VerifySquenceOfBST([4, 9, 3, 12, 10]))
print(sol.VerifySquenceOfBST([4, 8, 6, 12, 16, 14, 10]))
print(sol.VerifySquenceOfBST([5, 4, 3, 2, 1]))
| [
"1574572981@qq.com"
] | 1574572981@qq.com |
e8fb944f87186041b706d37c50f6956045ae2213 | 7c39cc529a292967d8fc52718a227fccc0da53e4 | /src/crawler/crawler/items.py | 8341771ba3b5981723f73e6ed2a721d274f4581e | [] | no_license | carrie0307/parking-domain-detection | 860b5d1ad5e0e0344c4667e2e91149257a1068a6 | 9ff9f4f2b31c73599d0ac46eeb62634e2a8d940a | refs/heads/master | 2020-04-10T18:03:27.496758 | 2018-12-11T00:28:18 | 2018-12-11T00:28:18 | 161,192,700 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class CrawlerItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
'''
oldDomain = scrapy.Field()
newDomain = scrapy.Field()
url_links = scrapy.Field()
'''
label = scrapy.Field()
name = scrapy.Field()
down_link = scrapy.Field()
apk_name = scrapy.Field()
| [
"cst_study@163.com"
] | cst_study@163.com |
6e78a0942b241bb27c202408d2d7248f3628cdbc | 9689ebc06e7c9a5c1b5b19d34dbcf0f5b5b82cb6 | /speech/migrations/0125_auto_20190121_1023.py | b2e25a07b6d803abbd5e98a66ad8e95273c52cfe | [] | no_license | tigrezhito1/Ramas | 94fe57dc4235616522aa50b36f5a655861ecbb9f | fa894fa69f6bf2a645179cadc11fb8809e82700a | refs/heads/master | 2020-05-02T07:03:03.564208 | 2019-03-26T14:55:29 | 2019-03-26T14:55:29 | 177,808,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,863 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-01-21 10:23
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('speech', '0124_auto_20190121_0906'),
]
operations = [
migrations.AlterField(
model_name='agente',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 421338)),
),
migrations.AlterField(
model_name='api',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 420012)),
),
migrations.AlterField(
model_name='base',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 418687)),
),
migrations.AlterField(
model_name='campania',
name='fecha',
field=models.DateTimeField(db_column='fecha cargada', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 416723)),
),
migrations.AlterField(
model_name='cliente',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 414579)),
),
migrations.AlterField(
model_name='estado',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 416022)),
),
migrations.AlterField(
model_name='supervisor',
name='fecha',
field=models.DateTimeField(db_column='fecha', default=datetime.datetime(2019, 1, 21, 10, 23, 15, 413576)),
),
]
| [
"you@example.com"
] | you@example.com |
877180851854964f8c776d432dcf5da1e3a1f906 | 4a7494457d4ffcc32fd663835ceaa6360488f9e6 | /largest-subarray.py | 8f90cd5e9901bd472435d910a0b9fe416b59f539 | [] | no_license | eliyahuchaim/randomCodeChallenges | c5e35c7afc80f6c2acc482878256bb4710bbbe55 | 780e4404bcad20c6ccaba7a11c52c8d67c96cd59 | refs/heads/master | 2021-05-01T07:18:19.164784 | 2018-02-11T18:21:17 | 2018-02-11T18:21:17 | 121,152,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | array = [1, -1, 5, 3, -7, 4, 5, 6, -100, 4]
def largestSubarray(array):
total = temp = 0
for n in array:
# temp += n if temp + n > 0 else 0
if temp + n < 0:
temp = 0
else:
temp += n
if total < temp: total = temp
return total
# print(largestSubarray(array))
def test(n):
if n < 2:
return n
return n * test(n-1)
print(test(5))
| [
"github email address"
] | github email address |
d217fb6e18d40c8667f11db7401b0d973258cfa1 | 5f2b22d4ffec7fc1a4e40932acac30256f63d812 | /mathematical-modeling/test/canadian_temperature.py | fcb568f1f5270207fcff7d06d953597f12abea66 | [] | no_license | Thpffcj/Python-Learning | 45734dd31e4d8d047eec5c5d26309bc7449bfd0d | 5dacac6d33fcb7c034ecf5be58d02f506fd1d6ad | refs/heads/master | 2023-08-04T21:02:36.984616 | 2021-09-21T01:30:04 | 2021-09-21T01:30:04 | 111,358,872 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,834 | py | # -*- coding: UTF-8 -*-
# Created by thpffcj on 2019/9/20.
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 40)
pd.set_option('display.width', 1000)
'''
[839 rows x 25 columns]
"Date/Time","Year","Month","Mean Max Temp (°C)","Mean Max Temp Flag","Mean Min Temp (°C)","Mean Min Temp Flag","Mean Temp (°C)","Mean Temp Flag","Extr Max Temp (°C)","Extr Max Temp Flag","Extr Min Temp (°C)","Extr Min Temp Flag","Total Rain (mm)","Total Rain Flag","Total Snow (cm)","Total Snow Flag","Total Precip (mm)","Total Precip Flag","Snow Grnd Last Day (cm)","Snow Grnd Last Day Flag","Dir of Max Gust (10's deg)","Dir of Max Gust Flag","Spd of Max Gust (km/h)","Spd of Max Gust Flag"
[366 rows x 27 columns]
"Date/Time","Year","Month","Day","Data Quality","Max Temp (°C)","Max Temp Flag","Min Temp (°C)","Min Temp Flag","Mean Temp (°C)","Mean Temp Flag","Heat Deg Days (°C)","Heat Deg Days Flag","Cool Deg Days (°C)","Cool Deg Days Flag","Total Rain (mm)","Total Rain Flag","Total RaiTotal Snow (cm)","Total Snow Flag","Total Precip (mm)","Total Precip Flag","Snow on Grnd (cm)","Snow on Grnd Flag","Dir of Max Gust (10s deg)","Dir of Max Gust Flag","Spd of Max Gust (km/h)","Spd of Max Gust Flag"
'''
data_1938_2007 = pd.read_csv("卡纳达气候数据/Manitoba/WINNIPEG_1938_2007.csv", skiprows=18)
# for i in range(2009, 2010):
# file_name = "卡纳达气候数据/Manitoba/WINNIPEG_daily_".join(str(i)).join(".csv")
file_name = "卡纳达气候数据/Manitoba/WINNIPEG_daily_2009.csv"
data = pd.read_csv(file_name, skiprows=24)
max_temp_data = data["Max Temp (°C)"]
min_temp_data = data["Min Temp (°C)"]
mean_temp_data = data["Mean Temp (°C)"]
result = pd.DataFrame(columns=["Date/Time", "Year", "Month"
"Mean Max Temp (°C)",
"Mean Min Temp (°C)",
"Mean Temp (°C)",
"Total Rain (mm)",
"Total Snow (cm)",
"Total Precip (mm)"])
# print(data)
for i in range(1, 13):
date_time = ""
# print(date_time)
mean_max_temp = data[data["Month"] == i]["Max Temp (°C)"].dropna().values.mean()
mean_min_temp = data[data["Month"] == i]["Min Temp (°C)"].dropna().values.mean()
mean_temp = data[data["Month"] == i]["Mean Temp (°C)"].dropna().values.mean()
total_rain = data[data["Month"] == i]["Total Rain (mm)"].dropna().values.sum()
total_snow = data[data["Month"] == i]["Total Snow (cm)"].dropna().values.sum()
total_precip = data[data["Month"] == i]["Total Precip (mm)"].dropna().values.sum()
print(result)
# result.to_csv('卡纳达气候数据/Manitoba/2009.csv', sep=',', header=True, index=True)
# month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
#
# location = 0
# for i in range(0, 12):
# max_temp = 0.0
# min_temp = 0.0
# mean_temp = 0.0
# day_not_null = 0.0
#
# # 循环每月
# for j in range(0, month[i]):
# # 最高气温之和
# if not np.isnan(max_temp_data[location]):
# max_temp += float(max_temp_data[location])
# day_not_null += 1
#
# # 最低气温之和
# if not np.isnan(min_temp_data[location]):
# min_temp += float(min_temp_data[location])
# day_not_null += 1
#
# # 平均气温之和
# if not np.isnan(mean_temp_data[location]):
# mean_temp += float(mean_temp_data[location])
# day_not_null += 1
#
# location = location + 1
#
# if day_not_null != 0:
# max_temp = max_temp / day_not_null
# min_temp = min_temp / day_not_null
# mean_temp = mean_temp / day_not_null
# print(i)
# print(max_temp)
# print(min_temp)
# print(mean_temp)
| [
"1441732331@qq.com"
] | 1441732331@qq.com |
5573cd63e7ebaa8b444ea121a417072282d3ba39 | 1fc45a47f0e540941c87b04616f3b4019da9f9a0 | /tests/sentry/api/endpoints/test_project_environments.py | 3279e53b271d1c8c31363bcb8df014286d498391 | [
"BSD-2-Clause"
] | permissive | seukjung/sentry-8.15.0 | febc11864a74a68ddb97b146cc1d2438ef019241 | fd3cab65c64fcbc32817885fa44df65534844793 | refs/heads/master | 2022-10-28T06:39:17.063333 | 2018-01-17T12:31:55 | 2018-01-17T12:31:55 | 117,833,103 | 0 | 0 | BSD-3-Clause | 2022-10-05T18:09:54 | 2018-01-17T12:28:13 | Python | UTF-8 | Python | false | false | 1,174 | py | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Environment
from sentry.testutils import APITestCase
class ProjectEnvironmentsTest(APITestCase):
def test_simple(self):
project = self.create_project()
env1 = Environment.objects.create(
project_id=project.id,
organization_id=project.organization_id,
name='production',
)
env1.add_project(project)
env2 = Environment.objects.create(
project_id=project.id,
organization_id=project.organization_id,
name='staging',
)
env2.add_project(project)
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-environments', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
})
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert response.data[0]['name'] == 'production'
assert response.data[1]['name'] == 'staging'
| [
"jeyce@github.com"
] | jeyce@github.com |
43765cb5e4883a166f855945d77778bc48262c7f | 4c489dadcc7d5b59e95eb1991085509372233d53 | /backend/home/migrations/0001_load_initial_data.py | 54607830076275e75394db8f9cbd3dc37bd32255 | [] | no_license | crowdbotics-apps/test-26812 | 1a304728cd5230c7f7a843bb37be5b1f2a65f923 | dedd52fc3ab5cb0d5d49f1dafd93232b246beb87 | refs/heads/master | 2023-05-07T09:40:39.041733 | 2021-05-17T18:41:05 | 2021-05-17T18:41:05 | 368,288,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "test-26812.botics.co"
site_params = {
"name": "test",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
42092263ef3f5e333a75f78220560c9da11a25c8 | 4ca853aeabbd3e33f5c0f28699b3de7206ad83a7 | /ML With python/ASSIGNMENT/p41.py | 657bbf07ebc5b1d9c5524a377bc2fbccd8239179 | [] | no_license | kundan4U/ML-With-Python- | 9fe1ce5049591040521b0809e9cd154158364c72 | 5376693ae3f52721663adc713f6926c0ccccbf75 | refs/heads/main | 2023-06-17T20:27:30.411104 | 2021-07-21T18:47:23 | 2021-07-21T18:47:23 | 388,214,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py |
list=[]
print("plese Enter 10 integer number")
for i in range(0,10):
n=int(input())
list.append(n)
print("Sum of list is :",sum(list))
print("Average of list is :",sum(list)/10) | [
"kc946605@gmail.com"
] | kc946605@gmail.com |
d5a0b82e66988683dc1a7a08141b7edbbd417a8e | 20aadf6ec9fd64d1d6dffff56b05853e0ab26b1f | /l6/L6_pbm10.py | 3ae3245b6a76d7e97b2c8dab28e13dd0e95b16a6 | [] | no_license | feminas-k/MITx---6.00.1x | 9a8e81630be784e5aaa890d811674962c66d56eb | 1ddf24c25220f8b5f78d36e2a3342b6babb40669 | refs/heads/master | 2021-01-19T00:59:57.434511 | 2016-06-13T18:13:17 | 2016-06-13T18:13:17 | 61,058,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | def howMany(aDict):
'''
aDict: A dictionary, where all the values are lists.
returns: int, how many values are in the dictionary.
'''
templist = aDict.values()
result = 0
for elmnt in aDict.values():
result += len(elmnt)
return result
| [
"femi1991@gmail.com"
] | femi1991@gmail.com |
2e8b923f0207d66b8e635ac23e77072882adefa1 | 62261d7fbd2feab54b1b5f9f0fef33fd784873f9 | /src/results/deepfool/merge.py | 3ca3a046015f8a2c7f425357dd4043e353ce924f | [] | no_license | ychnlgy/DeepConsensus-experimental-FROZEN | 50ebfe25b33ce8715fb9f24285969831cef311f2 | 904ae3988fee1df20273e002ba53a49a0d811192 | refs/heads/master | 2020-03-31T06:18:38.029959 | 2018-12-02T05:04:52 | 2018-12-02T05:04:52 | 151,976,523 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | import scipy.misc, os, collections, numpy
ORI = "original.png"
PER = "perturbed.png"
def pair(folder):
files = os.listdir(folder)
sep = collections.defaultdict(dict)
for f in files:
fid, lab = f.split("-")
im = scipy.misc.imread(os.path.join(folder, f))
sep[fid][lab] = im
h = 2*im.shape[0]
w = len(files)//2 * im.shape[1]
if len(im.shape) == 3:
c = im.shape[2]
size = (w, h, c)
out = numpy.zeros((w, h, c))
else:
size = (w, h)
out = numpy.zeros(size)
for i, (k, v) in enumerate(sep.items()):
out[i * im.shape[1]: (i+1)*im.shape[1], :im.shape[0]] = v[ORI]
out[i * im.shape[1]: (i+1)*im.shape[1], im.shape[0]:] = v[PER]
scipy.misc.imsave("merged.png", out)
if __name__ == "__main__":
import sys
pair(sys.argv[1])
| [
"ychnlgy@gmail.com"
] | ychnlgy@gmail.com |
e3063f03197b9586200a1b0b57c839c13178ae36 | 8f7dee55fd89be91f5cc0f3539099d49575fe3a9 | /profile/context_processors.py | a5e74883e9f039415a0d319a2c201278b3bad380 | [] | no_license | qoin-open-source/samen-doen | f9569439a5b715ed08b4a4dadc3e58a89f9e1d70 | 9feadb90fe14d458102ff3b55e60c56e0c8349e4 | refs/heads/master | 2020-05-03T23:59:43.794911 | 2019-04-02T21:15:18 | 2019-04-02T21:15:18 | 178,876,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | from django.conf import settings
#from cms.models import Page
def top_menu_items(request):
"""
Adds list of items to include in top menu (reverse ids from menu system).
"""
return {
'top_menu_items': settings.TOP_MENU_ITEMS
}
def tandc_url(request):
"""Adds terms and conditions url for language."""
# try:
# url = Page.objects.get(
# reverse_id='tandc', publisher_is_draft=False).get_absolute_url()
# except Page.DoesNotExist:
url = ''
return {
'tandc_url': url,
}
| [
"stephen.wolff@qoin.com"
] | stephen.wolff@qoin.com |
7714c67660d779859e7a8c5e203bd5850c7d3fcc | 092dd56a1bf9357466c05d0f5aedf240cec1a27b | /tests/fullscale/eqinfo/TestEqInfoTri.py | 90cb696cfd95f9f891ecb9e9d0875d78fd19a316 | [
"MIT"
] | permissive | rwalkerlewis/pylith | cef02d5543e99a3e778a1c530967e6b5f1d5dcba | c5f872c6afff004a06311d36ac078133a30abd99 | refs/heads/main | 2023-08-24T18:27:30.877550 | 2023-06-21T22:03:01 | 2023-06-21T22:03:01 | 154,047,591 | 0 | 0 | MIT | 2018-10-21T20:05:59 | 2018-10-21T20:05:59 | null | UTF-8 | Python | false | false | 2,571 | py | #!/usr/bin/env nemesis
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2022 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/fullscale/eqinfo/TestEqInfoTri.py
#
# @brief Test suite for testing pylith_eqinfo with tri fault meshes.
import numpy
from TestEqInfo import TestEqInfo, run_eqinfo
class TestEqInfoTri(TestEqInfo):
"""Test suite for testing pylith_eqinfo with tri3 meshes.
"""
def setUp(self):
"""Setup for test.
"""
run_eqinfo("tri", ["tri.cfg"])
return
def test_stats(self):
"""Check fault stats.
"""
import stats_tri
timestamp = numpy.array([0.0, 1.0], dtype=numpy.float64)
oneE = stats_tri.RuptureStats()
oneE.timestamp = timestamp
oneE.ruparea = numpy.array([1.5 + 2.0, 1.5 + 2.0], dtype=numpy.float64)
slip0 = (0.2**2 + 0.5**2)**0.5
slip1 = (0.5**2 + 0.4**2)**0.5
oneE.potency = numpy.array(
[slip0 * 1.5 + slip1 * 2.0, 0.1 * 1.5 + 0.2 * 2.0], dtype=numpy.float64)
oneE.moment = numpy.array([slip0 * 1.5 * 1.0e+10 + slip1 * 2.0 * 2.0e+10,
0.1 * 1.5 * 1.0e+10 + 0.2 * 2.0 * 2.0e+10], dtype=numpy.float64)
self._check(oneE, stats_tri.one)
twoE = stats_tri.RuptureStats()
twoE.timestamp = timestamp
twoE.ruparea = numpy.array([1.5, 0.0], dtype=numpy.float64)
twoE.potency = numpy.array([0.1 * 1.5, 0.0], dtype=numpy.float64)
twoE.moment = numpy.array(
[0.1 * 1.5 * 1.0e+10, 0.0], dtype=numpy.float64)
self._check(twoE, stats_tri.two)
allE = stats_tri.RuptureStats()
allE.timestamp = timestamp
allE.ruparea = oneE.ruparea + twoE.ruparea
allE.potency = oneE.potency + twoE.potency
allE.moment = oneE.moment + twoE.moment
self._check(allE, stats_tri.all)
return
# ----------------------------------------------------------------------
if __name__ == '__main__':
import unittest
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestEqInfoTri))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
| [
"baagaard@usgs.gov"
] | baagaard@usgs.gov |
121e27cfcaef5efab2c56974139a1bafd566952e | edbf8601ae771031ad8ab27b19c2bf450ca7df76 | /19-Remove-Nth-Node-From-End-of-List/RemoveNthNodeFromEndOfList.py3 | 8a93b5636d86f21ee4c093c44be72653cf74c6a3 | [] | no_license | gxwangdi/Leetcode | ec619fba272a29ebf8b8c7f0038aefd747ccf44a | 29c4c703d18c6ff2e16b9f912210399be427c1e8 | refs/heads/master | 2022-07-02T22:08:32.556252 | 2022-06-21T16:58:28 | 2022-06-21T16:58:28 | 54,813,467 | 3 | 2 | null | 2022-06-21T16:58:29 | 2016-03-27T05:02:36 | Java | UTF-8 | Python | false | false | 670 | py3 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
if head==None or n<0 :
return None
dummy = ListNode(0)
dummy.next = head
prev = dummy
target = head
tail = head
i = 0
while i<n and tail!=None:
tail = tail.next
i+=1
while tail!=None:
tail = tail.next
target = target.next
prev = prev.next
prev.next = target.next
return dummy.next
| [
"gxwangdi@gmail.com"
] | gxwangdi@gmail.com |
755afec3b1065969f31399dc2818065df4c4ce1b | e3fe234510d19c120d56f9a2876b7d508d306212 | /tool/add_user_info.py | cf7926ddc05cd92b753d4b9fb6fff028de2b4ba3 | [
"Apache-2.0"
] | permissive | KEVINYZY/python-tutorial | 78b348fb2fa2eb1c8c55d016affb6a9534332997 | ae43536908eb8af56c34865f52a6e8644edc4fa3 | refs/heads/master | 2020-03-30T02:11:03.394073 | 2019-12-03T00:52:10 | 2019-12-03T00:52:10 | 150,617,875 | 0 | 0 | Apache-2.0 | 2018-09-27T16:39:29 | 2018-09-27T16:39:28 | null | UTF-8 | Python | false | false | 863 | py | # -*- coding: utf-8 -*-
# Author: XuMing <shibing624@126.com>
# Data: 17/8/30
# Brief:
import sys
path_user_cdc_client = sys.argv[1]
path_file = sys.argv[2]
path_output = sys.argv[3]
userid_map = {}
with open(path_user_cdc_client, "r")as f:
for line in f:
userid = line.strip().split("\t")[0]
userid = userid.decode("gb18030")
userid_map[userid] = line.strip().decode("gb18030")
content = set()
with open(path_file, "r") as f:
for line in f:
# parts = (line.strip()).decode("utf-8").split("\t")
# userid = parts[0]
userid = (line.strip()).decode("utf8")
if userid in userid_map:
content.add((line.strip()).decode("utf-8") + "\t" + userid_map[userid])
with open(path_output, "w") as f:
for line in content:
f.write((line.strip()).encode("utf-8"))
f.write("\n")
| [
"507153809@qq.com"
] | 507153809@qq.com |
a7cb90a23bc7cad769ce94384e03a03fa159fdf6 | fd8ef75bb06383538cdb21ed2a0ef88e570179b7 | /src/openfermion/resource_estimates/thc/compute_lambda_thc.py | 16fa0ef8d1fcf0fdb71359635f9c43ccda4892ed | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | quantumlib/OpenFermion | d1147383f99573d19005bd0f3e0120e9e9bed04c | 788481753c798a72c5cb3aa9f2aa9da3ce3190b0 | refs/heads/master | 2023-09-04T11:00:32.124157 | 2023-08-24T21:54:30 | 2023-08-24T21:54:30 | 104,403,768 | 1,481 | 406 | Apache-2.0 | 2023-08-24T21:54:31 | 2017-09-21T22:10:28 | Python | UTF-8 | Python | false | false | 2,973 | py | #coverage:ignore
"""
Compute lambdas for THC according to
PRX QUANTUM 2, 030305 (2021) Section II. D.
"""
import numpy as np
from openfermion.resource_estimates.molecule import pyscf_to_cas
def compute_lambda(pyscf_mf,
etaPp: np.ndarray,
MPQ: np.ndarray,
use_eri_thc_for_t=False):
"""
Compute lambda thc
Args:
pyscf_mf - PySCF mean field object
etaPp - leaf tensor for THC that is dim(nthc x norb). The nthc and norb
is inferred from this quantity.
MPQ - central tensor for THC factorization. dim(nthc x nthc)
Returns:
"""
nthc = etaPp.shape[0]
# grab tensors from pyscf_mf object
h1, eri_full, _, _, _ = pyscf_to_cas(pyscf_mf)
# computing Least-squares THC residual
CprP = np.einsum("Pp,Pr->prP", etaPp,
etaPp) # this is einsum('mp,mq->pqm', etaPp, etaPp)
BprQ = np.tensordot(CprP, MPQ, axes=([2], [0]))
Iapprox = np.tensordot(CprP, np.transpose(BprQ), axes=([2], [0]))
deri = eri_full - Iapprox
res = 0.5 * np.sum((deri)**2)
# NOTE: remove in future once we resolve why it was used in the first place.
# NOTE: see T construction for details.
eri_thc = np.einsum("Pp,Pr,Qq,Qs,PQ->prqs",
etaPp,
etaPp,
etaPp,
etaPp,
MPQ,
optimize=True)
# projecting into the THC basis requires each THC factor mu to be nrmlzd.
# we roll the normalization constant into the central tensor zeta
SPQ = etaPp.dot(
etaPp.T) # (nthc x norb) x (norb x nthc) -> (nthc x nthc) metric
cP = np.diag(np.diag(
SPQ)) # grab diagonal elements. equivalent to np.diag(np.diagonal(SPQ))
# no sqrts because we have two normalized THC vectors (index by mu and nu)
# on each side.
MPQ_normalized = cP.dot(MPQ).dot(cP) # get normalized zeta in Eq. 11 & 12
lambda_z = np.sum(np.abs(MPQ_normalized)) * 0.5 # Eq. 13
# NCR: originally Joonho's code add np.einsum('llij->ij', eri_thc)
# NCR: I don't know how much this matters.
if use_eri_thc_for_t:
# use eri_thc for second coulomb contraction. This was in the original
# code which is different than what the paper says.
T = h1 - 0.5 * np.einsum("illj->ij", eri_full) + np.einsum(
"llij->ij", eri_thc) # Eq. 3 + Eq. 18
else:
T = h1 - 0.5 * np.einsum("illj->ij", eri_full) + np.einsum(
"llij->ij", eri_full) # Eq. 3 + Eq. 18
#e, v = np.linalg.eigh(T)
e = np.linalg.eigvalsh(T) # only need eigenvalues
lambda_T = np.sum(
np.abs(e)) # Eq. 19. NOTE: sum over spin orbitals removes 1/2 factor
lambda_tot = lambda_z + lambda_T # Eq. 20
#return nthc, np.sqrt(res), res, lambda_T, lambda_z, lambda_tot
return lambda_tot, nthc, np.sqrt(res), res, lambda_T, lambda_z
| [
"noreply@github.com"
] | quantumlib.noreply@github.com |
308fa8a66e05dc307f31798d5666ddc5f04c04b9 | a6d8465aed280c36fb7129e1fa762535bae19941 | /embroidery365/builder/models.py | 2a66c14eedd56fbd8017ed36c96924943f213b2b | [] | no_license | rahuezo/365digitizing_and_embroidery | c61c53f567e73163a67d3fd568a20551a3681ccd | 41a22b6ff8bd83238219f2d34ce13b5a8ef9bb57 | refs/heads/master | 2020-09-02T11:59:07.702947 | 2017-11-11T02:40:01 | 2017-11-11T02:40:01 | 98,377,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
class BaseItem(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Style(models.Model):
name = models.CharField(max_length=255)
base_item = models.ForeignKey(BaseItem)
def __str__(self):
return self.name
class Size(models.Model):
size = models.CharField(max_length=3)
base_item = models.ForeignKey(BaseItem)
def __str__(self):
return "{0}-{1}".format(self.base_item.name, self.size)
class Placement(models.Model):
position = models.CharField(max_length=255)
base_item = models.ForeignKey(BaseItem)
def __str__(self):
return "{0}-{1}".format(self.base_item.name, self.position)
class Order(models.Model):
customer = models.ForeignKey(User, on_delete=models.CASCADE)
order_base_item = models.CharField(max_length=255)
order_style = models.CharField(max_length=255)
order_logo = models.ImageField(blank=True)
order_item_placement = models.CharField(max_length=255)
order_logo_width = models.DecimalField(max_digits=10, decimal_places=2)
order_logo_height = models.DecimalField(max_digits=10, decimal_places=2)
order_details = models.TextField(blank=True)
total = models.DecimalField(max_digits=10, decimal_places=2)
extra_details = models.TextField(blank=True, null=True)
logo_colors = models.TextField(blank=True, null=True)
created = models.DateField(auto_now_add=True)
def __str__(self):
return "{0} {1} {2}".format(self.customer, self.order_base_item, self.order_style)
| [
"rahuezo@ucdavis.edu"
] | rahuezo@ucdavis.edu |
0c681f2924ffd76c5f9ae78985c972d4343bc44c | 0d2f636592dc12458254d793f342857298c26f12 | /11-1(tag).py | 734ce229ca7758695bc91f180e55aa7f25671b3e | [] | no_license | chenpc1214/test | c6b545dbe13e672f11c58464405e024394fc755b | 8610320686c499be2f5fa36ba9f11935aa6d657b | refs/heads/master | 2022-12-13T22:44:41.256315 | 2020-09-08T16:25:49 | 2020-09-08T16:25:49 | 255,796,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py |
def absolute(n):
""" 絕對值設計 """
if n < 0:
n = -n
print("絕對值是 ", n)
x = int(input("請輸入數值 = "))
absolute(x)
"""自己做的"""
"""n = input("請輸入數值=")
def absolute(n):
return abs(n)"""
| [
"kkbuger1523@gmail.com"
] | kkbuger1523@gmail.com |
fbd130a61a01bd9a6823abfaf12fe6aaabe8bdfa | 7e80b503e6563f190147e5e6bc2b47d5d8020510 | /xpdan/tests/test_main_pipeline.py | d48ca0a8d228efbdc4760025b3eb6e1f3beab1e4 | [] | no_license | eaculb/xpdAn | a687c15175fdff82a0f2092509faa7c2da288357 | 82e16ba50ddbfb9dcd0dba9b3b181354c329d58a | refs/heads/master | 2021-09-04T06:07:48.173024 | 2017-10-11T14:24:59 | 2017-10-11T14:24:59 | 106,735,663 | 0 | 0 | null | 2017-10-12T19:20:47 | 2017-10-12T19:20:47 | null | UTF-8 | Python | false | false | 1,230 | py | import os
import time
from xpdan.pipelines.main import conf_main_pipeline
def test_main_pipeline(exp_db, fast_tmp_dir, start_uid3):
"""Decider between pipelines"""
source = conf_main_pipeline(exp_db, fast_tmp_dir,
vis=True,
write_to_disk=True,
mask_setting=None,
verbose=True)
# source.visualize('/home/christopher/dev/xpdAn/examples/mystream.png')
t0 = time.time()
for nd in exp_db[-1].documents(fill=True):
source.emit(nd)
t1 = time.time()
print(t1 - t0)
for root, dirs, files in os.walk(fast_tmp_dir):
level = root.replace(fast_tmp_dir, '').count(os.sep)
indent = ' ' * 4 * level
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
assert 'Au' in os.listdir(fast_tmp_dir)
assert 'Au_{:.6}.yml'.format(start_uid3) in os.listdir(
os.path.join(fast_tmp_dir, 'Au'))
for f in ['dark_sub', 'mask', 'iq_q', 'iq_tth', 'pdf']:
assert f in os.listdir(
os.path.join(fast_tmp_dir, 'Au'))
| [
"cjwright4242@gmail.com"
] | cjwright4242@gmail.com |
33ffbc046e4b0ac1d10d366ef83a771449e1ddc9 | 5ba2ea4694d9423bc5435badba93b7b8fedfadd0 | /webapp/common/form_filter.py | b8a0c53c890bb381213adb1227e781f2a1f1a292 | [] | no_license | Digital-Botschafter-und-mehr/mein-stadtarchiv | bdf480d82b366253afd27c697143ad5d727f652f | a9876230edac695710d4ec17b223e065fa61937c | refs/heads/master | 2023-02-05T18:43:13.159174 | 2021-01-01T09:35:46 | 2021-01-01T09:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | # encoding: utf-8
"""
Copyright (c) 2017, Ernesto Ruge
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import json
def json_filter(value):
return json.loads(value)
| [
"mail@ernestoruge.de"
] | mail@ernestoruge.de |
ba73735c7237f4e48b4b1fbd2aa067c357f01d0e | c2be187155aabf59a4c0d3f5065bc26239c0b827 | /get_products.py | f2882901593542808abf671d139350736cc370d0 | [] | no_license | dankCodeNugs/tmtext | 1d6c54f79399bfa5e6f3905c0f72ba0be59d8d0d | 8e2d834775f440def7f57294674b8109b46ee191 | refs/heads/master | 2023-03-16T08:49:38.456929 | 2016-12-20T19:45:54 | 2016-12-20T19:45:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | #!/usr/bin/python
# extract products from a certain output file, that belong to a certain category/department
# and write their respective category ids to a file
# usage: site given as first argument, category as second argument
import json
import codecs
import re
import sys
from pprint import pprint
def get_products(filename, category):
output_all = codecs.open(filename, "r", "utf-8")
products = []
for line in output_all:
# print line
if line.strip():
item = json.loads(line.strip())
if 'department' in item:
if item['department'] == category:
products.append(item['product_name'])
if 'category' in item:
if item['category'] == category:
products.append(item['product_name'])
# close all opened files
output_all.close()
return products
site = sys.argv[1]
category = sys.argv[2]
filename = "sample_output/" + site + "_bestsellers_dept.jl"
prods = get_products(filename, category)
pprint(prods)
| [
"life.long.learner127@outlook.com"
] | life.long.learner127@outlook.com |
5ccb3f3b16bf9a927d6c3b37a551c8127225de2e | 696b9b8963a6b26776849f69263e50860317a37d | /PyPoll/main.py | 61a19cdc1d53b9a47dffc15695d5372dc380ec3e | [] | no_license | neelarka/python-challenge | 64a7099470b2885511b568625c8c0e320801da82 | 9060c30ed9a319807fd5bd8756b3bc36f522bbc8 | refs/heads/master | 2020-03-24T22:00:26.874430 | 2018-08-05T04:23:11 | 2018-08-05T04:23:11 | 143,061,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,350 | py | import os
import csv
from pathlib import Path
filepath = Path("../../Desktop/election_data.csv")
with open(filepath, newline="", encoding='utf-8' ) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
reader = csv.reader(csvfile)
next(reader, None)
Voter_Id = []
county = []
candidate = []
for row in csvreader:
Voter_Id.append(row[0])
county.append(row[1])
candidate.append(row[2])
length = len(Voter_Id)
# print ("The Total Votes : " + str(length))
# Votes For Khan
candidates = []
for name in candidate:
if name == "Khan":
candidates.append(candidate)
#print(candidates)
#print(county)
length1 = len(candidates)
#print ("The Total Votes for Khan: " + str(length1))
percentage_Khan = length1/length
#print(percentage_Khan)
# Votes For Correy
candidates1 = []
for name in candidate:
if name == "Correy":
candidates1.append(candidate)
#print(candidates)
#print(county)
length2 = len(candidates1)
#print ("The Total Votes for Correy: " + str(length2))
percentage_Correy = length2/length
#print(percentage_Correy)
# Votes For Li
candidates2 = []
for name in candidate:
if name == "Li":
candidates2.append(candidate)
length3 = len(candidates2)
#print ("The Total Votes for Li: " + str(length3))
percentage_Li = length3/length
#print(percentage_Li)
# Votes For O'Tooley
candidates3 = []
for name in candidate:
if name == "O'Tooley":
candidates3.append(candidate)
length4 = len(candidates3)
#print ("The Total Votes for O'Tooley: " + str(length4))
percentage_O_Tooley = length4/length
#print(percentage_O_Tooley)
print("Election Results" + "\n --------------------")
print ("The Total Votes : " + str(length) + "\n --------------------")
print("Khan: " + str("%.3f" % percentage_Khan) + " (" + str(length1)+ ")" )
print("Correy: " + str("%.3f" % percentage_Correy) + " (" + str(length2)+ ")" )
print("Li: " + str("%.3f" % percentage_Li) + " (" + str(length3)+ ")" )
print("O'Tooley: " + str("%.3f" % percentage_O_Tooley) + " (" + str(length4)+ ")" )
print("--------------------")
winner = max(length1, length2, length3, length4)
if winner == length1 :
print(" Winner : " + "Khan" )
elif winner == length2 :
print(" Winner : " + "Correy" )
elif winner == length3 :
print(" Winner : " + "Li" )
else :
print(" Winner : " + "O'Tooley" )
print("--------------------")
text_file = open("Output_PyPoll.txt", "w")
text_file.write("Election Results \n" )
text_file.write("\n--------------------\n")
text_file.write("The Total Votes : " + str(length) + "\n")
text_file.write("\n--------------------\n")
text_file.write("Khan: " + str("%.3f" % percentage_Khan) + " (" + str(length1)+ ")" )
text_file.write("\n Li: " + str("%.3f" % percentage_Li) + " (" + str(length3)+ ")" )
text_file.write("\n O'Tooley: " + str("%.3f" % percentage_O_Tooley) + " (" + str(length4)+ ") \n" )
text_file.write("\n--------------------\n")
winner = max(length1, length2, length3, length4)
if winner == length1 :
text_file.write(" Winner : " + "Khan" )
elif winner == length2 :
text_file.write(" Winner : " + "Correy" )
elif winner == length3 :
text_file.write(" Winner : " + "Li" )
else :
text_file.write(" Winner : " + "O'Tooley" )
text_file.write("\n--------------------")
text_file.close()
| [
"you@example.com"
] | you@example.com |
5e08cd42df0284a8dbe8ab6c59c652b94834a5ae | b2d4c5b7738f3b53126d73bcac6165cbb32445eb | /05_数据存储/02-关系型数据库存储/_01_MySQL的存储/_05_删除数据.py | c378a6ab681dba229320434e26d188cf3b8b0411 | [] | no_license | xuelang201201/Python3Spider | 933911abb5056bc7864d3c6bfaf1d7f75ca6ac98 | 0b190f11f74f66058eda6a40a000c5d6076764ea | refs/heads/master | 2022-07-02T11:15:42.032997 | 2020-05-16T13:01:08 | 2020-05-16T13:01:08 | 259,358,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import pymysql
db = pymysql.connect(host='localhost', user='root', password='123456', port=3306, db='spiders')
cursor = db.cursor()
table = 'students'
condition = 'age > 20'
sql = 'DELETE FROM {table} WHERE {condition}'.format(table=table, condition=condition)
try:
cursor.execute(sql)
print('Successful')
db.commit()
except Exception as reason:
print('Failed: ' + str(reason))
db.rollback()
db.close()
| [
"xuelang201201@gmail.com"
] | xuelang201201@gmail.com |
903c419d5b5ada8e3f10bf8af828028c8b21c111 | 0a9949a7dbe5f7d70028b22779b3821c62eb6510 | /static/flight_price_analysis/conf/conf.py | 03dee15c55f02670e982b0b01d56b2fd80b699f2 | [] | no_license | 744996162/warehouse | ed34f251addb9438a783945b6eed5eabe18ef5a2 | 3efd299a59a0703a1a092c58a6f7dc2564b92e4d | refs/heads/master | 2020-06-04T22:10:14.727156 | 2015-07-03T09:40:09 | 2015-07-03T09:40:09 | 35,603,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | #coding=utf-8
__author__ = 'zhangc'
import ConfigParser
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
#测试路径
# conf_path=path+"/conf/db.conf"
# conf_path=path+"/conf/db3.conf"
#部署路径
# conf_path="conf/db3.conf"
# conf_path="db.conf"
conf_path = BASE_DIR+"/conf/db.conf"
class DBConf(object):
_inst=None
def __init__(self):
self.config=ConfigParser.ConfigParser()
with open(conf_path,'r') as conf_file:
# with open(conf_path,'r') as conf_file:
self.config.readfp(conf_file)
@staticmethod
def getInst():
if not DBConf._inst:
DBConf._inst = object.__new__(DBConf)
DBConf._inst.__init__()
return DBConf._inst
def get_mysql(self, key):
return self.config.get('mysql', key)
if __name__=="__main__":
pass
test=DBConf()
# print(test.get_mysql("databasegtgj"))
print(test.get_mysql("databasebi"))
| [
"744996162@qq.com"
] | 744996162@qq.com |
4c67b10133e699217a67331d1ee268eb65d7d2c7 | 34cab614568d4ce3cf28167450d6d2bc2bf7bfbf | /importers/cunia.py | 287229bfda6b4aace79d72da50a213322351c454 | [] | no_license | Halicea/ArmandDictionar | 07949936efd3a55edfa1e7a1e12d1ed8c48c4bdf | a82f77065e03cafa6c6b50c163fa53858ab356b8 | refs/heads/master | 2016-09-05T14:03:41.261258 | 2013-04-29T10:27:27 | 2013-04-29T10:27:27 | 964,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,917 | py | # -*- coding: utf-8 -*-
import codecs
import re
import os
langs = ['ro', 'en', 'fr']
err_count = 0
tr_not_found = {'ro':0, 'en':0, 'fr':0}
with_ref=0
def parse_line(l, index):
global err_count
global tr_not_found
global with_ref
results = []
#synonims or similar meaning
words = l.split(u'§')
for i in range(0, len(words)):
res = {}
index = 0
w = words[i].strip()
if ' ' in w:
res['rmn']=w[:w.index(' ')]
index = w.index(' ')+1
#find translations
for lang in langs:
key = '{%s:'%lang
if( key in w):
lindex = w.index(key)+len(lang)+2
try:
rindex = w.index('}', lindex)
res[lang] = w[lindex:rindex].split(',')
except:
err_count+=1
#print w.encode('utf-8', errors='ignore')
elif not ('vedz' in w):
tr_not_found[lang] +=1
res[lang]=[]
if 'vedz' in w:
with_ref+=1
res['referece']= w[w.index('vedz')+4:]
res['raw'] = w
res['index'] = index
results.append(res)
return results
directory = '/Users/kostamihajlov/Desktop/cunia'
d =[]
merge_count = 0
warn_merge = 0
max_merge =10
current_let = None
line = 0
lines = tuple(codecs.open(os.path.join(directory,'cunia.txt'),'r', 'utf16'))
lang_matcher = "\{[fr: en: ro:].*\}"
merge_required = [u'unã cu', u'vedz', u'tu-aestu', '{ro:','{en:','{fr:']
merge_required_count = 0
for k in lines:
clean= k.strip().replace(u'\ufffc', '')
if len(clean)==1:
current_let = clean
print 'Starting with letter:%s'%current_let
elif not clean:
pass
elif u"Dictsiunar a Limbãljei Armãneascã" in clean:
pass
else:
merged = False
if d:
for k in merge_required:
if(d[-1].endswith(k)):
d[-1] = d[-1]+' '+clean
merged = True
merge_required_count+=1
break;
if not merged:
if(clean[0].lower()==current_let.lower()):
d.append(clean)
merge_count = 0
else:
d[-1] = d[-1]+' '+clean
merge_count+=1
#print u'Merging line %s and merge count is %s'%(line, merge_count)
if merge_count>=max_merge:
#print 'Max Merge received on line %s'%line
pass
line+=1
wc = 0
final = []
index = 0
for w in d:
final.extend(parse_line(w, index))
index+=1
current_letter = None
prev_letter = None
f = None
for w in final:
try:
current_letter = w['rmn'][0]
if current_letter!=prev_letter:
prev_letter = current_letter
if f: f.close()
f = codecs.open(os.path.join(directory, current_letter+'.txt'), 'w', 'utf-16')
f.write('%s %s en:%s fr:%s ro:%s\n'%(w['index'], w['rmn'], w['en'], w['fr'], w['ro']))
except:
print w
print 'Regular Merges:', merge_required_count
print 'Total Words', len(final)
print 'Total Lines', len(d)
print 'Errors', err_count
print 'References', with_ref
print 'Without translations', tr_not_found | [
"costa@halicea.com"
] | costa@halicea.com |
c8dee9b1d52c4575ef88af44d875106e2a851a69 | 1f620140538728b25fd0181e493975534aa0e1fb | /project/basis/admin.py | 29ffeb36206b3b503e22a6c5ab29970a5e48bb91 | [] | no_license | YukiUmetsu/recipe-app-api-python-django | 2a22f63871489cd073d5c312e20fd9fe49eee5a5 | abaf4a0826e840e990781b20aaa5d7f0577c54c5 | refs/heads/master | 2022-11-30T03:11:16.129881 | 2020-03-03T20:04:00 | 2020-03-03T20:04:00 | 244,045,701 | 0 | 0 | null | 2022-11-22T05:21:23 | 2020-02-29T21:41:25 | Python | UTF-8 | Python | false | false | 947 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from basis import models
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{
'fields': (
'is_active',
'is_staff',
'is_superuser',
)
}
),
(_('Important dates'), {'fields': ('last_login',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
admin.site.register(models.Tag)
admin.site.register(models.Ingredient)
| [
"yuuki.umetsu@gmail.com"
] | yuuki.umetsu@gmail.com |
f8e645fdf821b4a8a13e40bf4c64379b0006bd1f | 8af71789222675dddd541bafba681143162f4206 | /apps/entidades/admin.py | 0036ab360b549a2fda945f3612c3020591d73f18 | [] | no_license | RubenAlvarenga/nhakanina | b82d23d80e06aaf49693c8fb65a70ee73e130994 | 3e39a522029c9a6cbb455b2e736ce335ebc4bf1d | refs/heads/master | 2021-01-10T15:32:01.550423 | 2016-03-07T17:34:09 | 2016-03-07T17:34:09 | 43,449,047 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 873 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Persona, Alumno
from apps.finanzas.models import PlanPago
class AlumnoAdmin(admin.ModelAdmin):
list_display = ('codigo', 'cedula', 'get_full_name')
search_fields = ['cedula', 'apellido1', 'apellido2', 'nombre1', 'nombre2']
list_display_links = ('get_full_name',)
class PersonaAdmin(admin.ModelAdmin):
list_display = ('id', 'cedula', 'get_full_name')
search_fields = ['cedula', 'apellido1', 'apellido2', 'nombre1', 'nombre2']
list_display_links = ('get_full_name',)
class PlanPagoInine(admin.TabularInline):
model = PlanPago
readonly_fields = ('concepto', 'estado', 'vencimiento', 'secuencia', 'monto')
template = 'catedras/planpago_inline.html'
admin.site.register(Persona, PersonaAdmin)
admin.site.register(Alumno, AlumnoAdmin)
| [
"rubenalvarengan@gmail.com"
] | rubenalvarengan@gmail.com |
c5b809b7fc38a9ff5c2f441e16cef7229f172c5c | 019fd2c29b8239d7b0a3906cfbdddfd440362417 | /automl/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py | d93ca92f8ed2cfa20ca2620e5999b95da2b81014 | [
"Apache-2.0"
] | permissive | tswast/google-cloud-python | 1334d26cdb994293f307d889251d7daef5fcb826 | d897d56bce03d1fda98b79afb08264e51d46c421 | refs/heads/master | 2021-06-10T17:40:06.968584 | 2020-01-11T17:41:29 | 2020-01-11T17:41:29 | 58,775,221 | 1 | 1 | Apache-2.0 | 2019-04-10T17:09:46 | 2016-05-13T22:06:37 | Python | UTF-8 | Python | false | false | 1,173 | py | config = {
"interfaces": {
"google.cloud.automl.v1beta1.PredictionService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 60000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000,
}
},
"methods": {
"Predict": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"BatchPredict": {
"timeout_millis": 20000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}
| [
"noreply@github.com"
] | tswast.noreply@github.com |
3b260f7d947e059cd8f8835dcc76c8a3a680903b | f889bc01147869459c0a516382e7b95221295a7b | /test/test_error_parameters_item.py | f28c7deb40b2328947bfa816d2d8f5b8c4d4729a | [] | no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.error_parameters_item import ErrorParametersItem
class TestErrorParametersItem(unittest.TestCase):
""" ErrorParametersItem unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testErrorParametersItem(self):
"""
Test ErrorParametersItem
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.error_parameters_item.ErrorParametersItem()
pass
if __name__ == '__main__':
unittest.main()
| [
"sander@wildatheart.eu"
] | sander@wildatheart.eu |
36085263fc74873d7a489b295947c34b95ac2da9 | c39f999cae8825afe2cdf1518d93ba31bd4c0e95 | /PYME/misc/editgrid.py | 88352f19f837e68d04a636a2ed4eefcea4deadab | [] | no_license | WilliamRo/CLipPYME | 0b69860136a9b2533f2f29fc29408d7471cb934d | 6596167034c727ad7dad0a741dd59e0e48f6852a | refs/heads/master | 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 | Python | UTF-8 | Python | false | false | 2,557 | py | # -*- coding: utf-8 -*-
import wx
import wx.grid
import numpy as np
class EditGrid(wx.grid.Grid):
def __init__(self, *args, **kwargs):
wx.grid.Grid.__init__(self, *args, **kwargs)
wx.EVT_KEY_DOWN(self, self.OnKeyDown)
def OnKeyDown(self, event):
key = event.GetKeyCode()
if event.ControlDown and key == ord('V'):
self.OnPaste(event)
else:
event.Skip()
def toarray(self, selection=None):
if selection:
x0, y0, x1, y1 = selection
else:
x0, y0, x1, y1 = self._getvalidbounds()
out = np.zeros([x1-x0, y1-y0], 'd')
for i in range(x0, x1):
for j in range(y0, y1):
out[i,j]= float(self.GetCellValue(i,j))
return out
def _getvalidbounds(self):
x0 = 0
y0 = 0
x1 = 0
y1 = 0
while y1 <= self.GetNumberCols() and not self.GetCellValue(0, y1) == '':
y1 += 1
while x1 <= self.GetNumberRows() and not self.GetCellValue(x1, 0) =='':
x1 += 1
return x0, y0, x1, y1
def setarray(self, data,x0=0, y0=0):
for i in range(data.shape[0]):
for j in range(data.shape[1]):
self.SetCellValue(i+x0, j+y0, '%s' % data[i, j])
def tostring(self, selection=None):
from cStringIO import StringIO
sb = StringIO()
np.savetxt(sb, self.toarray(selection), delimiter='\t')
return sb.getvalue()
def setfromstring(self, data, x0=0, y0=0):
from cStringIO import StringIO
#print repr(data)
sb = StringIO(data.encode())
self.setarray(np.loadtxt(sb, delimiter = '\t'), x0, y0)
def OnPaste(self, event):
cb = wx.TextDataObject()
wx.TheClipboard.Open()
wx.TheClipboard.GetData(cb)
wx.TheClipboard.Close()
self.setfromstring(cb.GetText())
class EntryGrid(wx.Frame):
def __init__(self, parent=None):
wx.Frame.__init__(self, parent, size=(500, 500))
self.grid = EditGrid(self)
self.grid.CreateGrid(100, 5)
@property
def data(self):
return self.grid.toarray()
def ShowDataGrid():
f = EntryGrid()
f.Show()
return f
| [
"willi4m@zju.edu.cn"
] | willi4m@zju.edu.cn |
9b6987d7cb66e6ddd8024c55a1385a3fdea8a190 | 1cccad3f2b8cb9872fd47360486f43ed90f57c9b | /config/snippets/models.py | fcd342d2d7995fc13300285bb1aba732f10a9e9a | [] | no_license | moorekwon/rest-api | bf377cd98aa07792fd08bda70ff2621c9ca3bf9b | 5455c021dd2796968cd035f41ad7de44ec4201c4 | refs/heads/master | 2021-01-02T15:04:18.630201 | 2020-03-02T15:55:30 | 2020-03-02T15:55:30 | 239,673,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | from django.db import models
# Create your models here.
from pygments.lexers import get_all_lexers
from pygments.styles import get_all_styles
from config.settings import AUTH_USER_MODEL
LEXERS = [item for item in get_all_lexers() if item[1]]
LANGUAGE_CHOICES = sorted([(item[1][0], item[0]) for item in LEXERS])
STYLE_CHOICES = sorted([(item, item) for item in get_all_styles()])
class Snippet(models.Model):
author = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE)
# db index 설정방법 1(Field.db_index)
# created = models.DateTimeField(auto_now_add=True, db_index=True)
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
code = models.TextField()
linenos = models.BooleanField(default=False)
language = models.CharField(choices=LANGUAGE_CHOICES, default='python', max_length=100)
style = models.CharField(choices=STYLE_CHOICES, default='friendly', max_length=100)
class Meta:
ordering = ['created']
# db index 설정방법 2(Model.Meta)
indexes = [
models.Index(fields=['created'])
]
| [
"raccoonhj33@gmail.com"
] | raccoonhj33@gmail.com |
1f2f281bc5d31a4d6b6acc05d5758e652471300c | 2d4ab8e3ea9fd613ec0ae0c1956b68874c9b5f06 | /paip/pipelines/variant_calling/index_alignment.py | 5237bd5c232a9b2a4000d6340f70cca0582b76b0 | [] | no_license | biocodices/paip | 4abd39cbbd372a68592da87177c70c403d5a661d | 040a62c11e5bae306e2de4cc3e0a78772ee580b3 | refs/heads/master | 2021-01-17T20:48:28.642255 | 2019-07-26T14:30:58 | 2019-07-26T14:30:58 | 62,604,413 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | from paip.task_types import SampleTask
from paip.pipelines.variant_calling import MarkDuplicates
from paip.helpers.create_cohort_task import create_cohort_task
class IndexAlignment(SampleTask):
"""
Takes a BAM and creates its BAI (index) companion.
"""
REQUIRES = MarkDuplicates
OUTPUT = "dupmarked_alignment.bai"
def run(self):
with self.output().temporary_path() as temp_bai:
program_name = 'picard BuildBamIndex'
program_options = {
'input_bam': self.input()['dupmarked_bam'].path,
'output_bai': temp_bai,
}
self.run_program(program_name, program_options)
IndexAlignmentCohort = create_cohort_task(IndexAlignment)
| [
"juanmaberros@gmail.com"
] | juanmaberros@gmail.com |
af319fa3841c5cb66f3ce96349db1ab481a2337d | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/Risk/FK_FKYW_PG_KCB_212.py | 06c0ff949a1a41fb9928d88cc150b43d6ed2101e | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,564 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import os
sys.path.append("/home/yhl2/workspace/xtp_test/Autocase_Result/Risk/service")
from order import f
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from QueryStkPriceQty import *
from log import *
from utils import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import SqlData_Transfer
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from env_restart import clear_data_and_restart_sh
class FK_FKYW_PG_212(xtp_test_case):
def setUp(self):
sql_transfer = SqlData_Transfer()
sql_transfer.delete_cur_risk()
sql_transfer.insert_cur_risk('FK_FKYW_GPWT_624')
clear_data_and_restart_sh()
Api.trade.Logout()
time.sleep(2)
Api.trade.Login()
def test_FK_FKYW_PG_212(self):
title = '默认rule25,rule0=0'
#定义当前测试用例的期待值
#期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
#xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '未成交',
'errorID':0,
'errorMSG': '',
'是否生成报单':'是',
'是否是撤废':'否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_ALLOTMENT'],
'order_client_id': 4,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': '700001',
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity': 2580,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
count = 1
max_count = 101
filename = 'FK_FKYW_PG_212_order'
insert_orders_sleep(count, max_count, Api, case_goal, wt_reqs, filename)
time.sleep(3)
max_count = 1
wt_reqs['order_client_id'] = 1
insert_orders_sleep(count, max_count, Api, case_goal, wt_reqs, filename)
time.sleep(3)
file_reorder(filename)
# 校验订单是否正确触发风控
rule25_check2(filename)
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
38e4fda8720303f4e1bd2540c6a20bf972c60ed9 | f546248daf7fd64aeff6517e9fea668b459f9b62 | /yatwin/interfaces/onvif/wsdl/wsdl/errors.py | 557ffa417b37c79dad41bfc06a4d843c207004bc | [] | no_license | andre95d/python-yatwin | 2310b6c6b995771cea9ad53f61ad37c7b10d52d0 | 7d370342f34e26e6e66718ae397eb1d81253cd8a | refs/heads/master | 2023-03-16T18:06:17.141826 | 2020-05-12T23:04:53 | 2020-05-12T23:04:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | """
Library which contains errors
All errors inherit from <Exception>, then pass
... essentially just renaming the <Exception>
Contains:
<FileDoesNotExist>
<InvalidArgument>
<ParseError>
"""
class FileDoesNotExist(Exception): pass
class InvalidArgument(Exception): pass
class ParseError(Exception): pass
| [
"26026015+tombulled@users.noreply.github.com"
] | 26026015+tombulled@users.noreply.github.com |
77a82b1e7d4bc1d00910f61fcaba9536d4b9e2e4 | 89145800ada60f8d2d1b3200b6f384c1a4f8fff8 | /aparcamientos/migrations/0007_auto_20170513_2045.py | c46410372c612861e5c7097ab767691fd484be12 | [] | no_license | malozanom/X-Serv-Practica-Aparcamientos | 2f8f2cab9b9ca096ab3209d8fa6579aacbdce593 | d6da3af090aef7e8b0d23add7a5ff76f979d0311 | refs/heads/master | 2021-06-24T17:34:40.930085 | 2019-11-04T18:36:38 | 2019-11-04T18:36:38 | 90,887,116 | 0 | 0 | null | 2017-05-10T16:46:30 | 2017-05-10T16:46:30 | null | UTF-8 | Python | false | false | 598 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aparcamientos', '0006_auto_20170513_2038'),
]
operations = [
migrations.AlterField(
model_name='aparcamiento',
name='email',
field=models.EmailField(max_length=254, null=True),
),
migrations.AlterField(
model_name='aparcamiento',
name='telefono',
field=models.CharField(max_length=30, null=True),
),
]
| [
"you@example.com"
] | you@example.com |
6e1a191a96a7f8c72db1a5e68631517295612b77 | 81c5323d6a456479d32c9a2a2b9ab61484dd7922 | /otter/assign/r_adapter/tests.py | a1922198413bc3a05d3eb9c88d3baaf1309b1b53 | [
"BSD-3-Clause"
] | permissive | fperez/otter-grader | c488f339fa6db8577c09bbcba22d919519fb3fb5 | 6c9ded064b7c8bbd115d8ab54330f4400a564b17 | refs/heads/master | 2023-07-27T16:33:33.392295 | 2021-08-26T18:12:18 | 2021-08-26T18:12:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,289 | py | """
ottr test adapters for Otter Assign
"""
import re
import pprint
import yaml
import nbformat
from collections import namedtuple
from ..constants import BEGIN_TEST_CONFIG_REGEX, END_TEST_CONFIG_REGEX, OTTR_TEST_FILE_TEMPLATE, OTTR_TEST_NAME_REGEX, TEST_REGEX
from ..tests import write_test
from ..utils import get_source, lock
Test = namedtuple('Test', ['name', 'hidden', 'points', 'body', 'success_message', 'failure_message'])
def read_test(cell, question, assignment, rmd=False):
"""
Returns the contents of a test as a ``(name, hidden, body)`` named tuple
Args:
cell (``nbformat.NotebookNode``): a test cell
question (``dict``): question metadata
assignment (``otter.assign.assignment.Assignment``): the assignment configurations
rmd (``bool``, optional): whether the cell is from an Rmd file; if true, the first and last
lines of ``cell``'s source are trimmed, since they should be backtick delimeters
Returns:
``Test``: test named tuple
"""
if rmd:
source = get_source(cell)[1:-1]
else:
source = get_source(cell)
if source[0].lstrip().startswith("#"):
hidden = bool(re.search(r"hidden", source[0], flags=re.IGNORECASE))
else:
hidden = False
i = 0 if hidden else -1
if rmd:
i = 0
if re.match(BEGIN_TEST_CONFIG_REGEX, source[0], flags=re.IGNORECASE):
for i, line in enumerate(source):
if re.match(END_TEST_CONFIG_REGEX, line, flags=re.IGNORECASE):
break
config = yaml.full_load("\n".join(source[1:i]))
assert isinstance(config, dict), f"Invalid test config in cell {cell}"
else:
config = {}
test_name = config.get("name", None)
hidden = config.get("hidden", hidden)
points = config.get("points", None)
success_message = config.get("success_message", None)
failure_message = config.get("failure_message", None)
# for line in lines:
# match = re.match(OTTR_TEST_NAME_REGEX, line)
# if match:
# test_name = match.group(1)
# break
# assert test_name is not None, f"Could not parse test name:\n{cell}"
# TODO: hook up success_message and failure_message
# TODO: add parsing for TEST CONFIG blocks
return Test(test_name, hidden, None, '\n'.join(source[i+1:]), success_message, failure_message)
def gen_test_cell(question, tests, tests_dict, assignment):
"""
Parses a list of test named tuples and creates a single test file. Adds this test file as a value
to ``tests_dict`` with a key corresponding to the test's name, taken from ``question``. Returns
a code cell that runs the check on this test.
Args:
question (``dict``): question metadata
tests (``list`` of ``Test``): tests to be written
tests_dict (``dict``): the tests for this assignment
assignment (``otter.assign.assignment.Assignment``): the assignment configurations
Returns:
``nbformat.NotebookNode``: code cell calling ``ottr::check`` on this test
"""
cell = nbformat.v4.new_code_cell()
cell.source = ['. = ottr::check("tests/{}.R")'.format(question['name'])]
points = question.get('points', len(tests))
if points is None:
points = 1
if isinstance(points, (int, float)):
if points % len(tests) == 0:
points = [points // len(tests) for _ in range(len(tests))]
else:
points = [points / len(tests) for _ in range(len(tests))]
assert isinstance(points, list) and len(points) == len(tests), \
f"Points for question {question['name']} could not be parsed:\n{points}"
# update point values
tests = [tc._replace(points=p) for tc, p in zip(tests, points)]
test = gen_suite(question['name'], tests, points)
tests_dict[question['name']] = test
lock(cell)
return cell
def gen_suite(name, tests, points):
"""
Generates an R-formatted test file for ottr
Args:
name (``str``): the test name
tests (``list`` of ``Test``): the test case named tuples that define this test file
points (``float`` or ``int`` or ``list`` of ``float`` or ``int``): th points per question
Returns:
``str``: the rendered R test file
"""
template_data = {'name': name, 'test_cases': tests}
return OTTR_TEST_FILE_TEMPLATE.render(**template_data)
def remove_hidden_tests_from_dir(nb, test_dir, assignment, use_files=True):
"""
Rewrites test files in a directory to remove hidden tests
Args:
nb (``nbformat.NotebookNode``): the student notebook
test_dir (``pathlib.Path``): path to test files directory
assignment (``otter.assign.assignment.Assignment``): the assignment configurations
use_files (``bool``, optional): ignored for R assignments
"""
for f in test_dir.iterdir():
if f.suffix != '.R':
continue
with open(f) as f2:
test = f2.read()
test = re.sub(r" ottr::TestCase\$new\(\s*hidden = TRUE[\w\W]+?^ \),?", "", test, flags=re.MULTILINE)
test = re.sub(r",(\s* \))", r"\1", test, flags=re.MULTILINE) # removes a trailing comma if present
write_test({}, f, test, use_file=True)
| [
"cpyles@berkeley.edu"
] | cpyles@berkeley.edu |
6ecfa4e76bd00ac931f5a23055997abaa3f3c7c0 | 02952ddf96e7960a3faef74485f4ffc12bcf2973 | /projects/node_failure/grayscott_example.py | 70cc7692d34770f851f5fac30d67789e8b4e26c1 | [
"BSD-2-Clause"
] | permissive | danielru/pySDC | 5decca37e1ecea643fe21dac0f978e3fdaa24ac6 | 558b2b4db3aeb97e6a87e41cd4958a8a948af37a | refs/heads/master | 2020-12-25T10:58:57.215298 | 2017-03-21T06:45:59 | 2017-03-21T06:45:59 | 31,062,846 | 0 | 0 | null | 2015-02-20T11:52:33 | 2015-02-20T11:52:33 | null | UTF-8 | Python | false | false | 5,773 | py | import numpy as np
import projects.node_failure.emulate_hard_faults as ft
from projects.node_failure.allinclusive_classic_nonMPI_hard_faults import allinclusive_classic_nonMPI_hard_faults
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.sweeper_classes.generic_LU import generic_LU
from pySDC.implementations.datatype_classes.fenics_mesh import fenics_mesh
from pySDC.implementations.transfer_classes.TransferFenicsMesh import mesh_to_mesh_fenics
from pySDC.implementations.problem_classes.GrayScott_1D_FEniCS_implicit import fenics_grayscott
# noinspection PyShadowingNames,PyShadowingBuiltins
def main(ft_strategies):
"""
This routine generates the heatmaps showing the residual for node failures at different steps and iterations
"""
num_procs = 32
# setup parameters "in time"
t0 = 0
dt = 2.0
Tend = 1280.0
Nsteps = int((Tend - t0) / dt)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-07
level_params['dt'] = dt
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['finter'] = True
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = 'LU'
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
# initialize problem parameters
problem_params = dict()
# problem_params['Du'] = 1.0
# problem_params['Dv'] = 0.01
# problem_params['A'] = 0.01
# problem_params['B'] = 0.10
# splitting pulses until steady state
# problem_params['Du'] = 1.0
# problem_params['Dv'] = 0.01
# problem_params['A'] = 0.02
# problem_params['B'] = 0.079
# splitting pulses until steady state
problem_params['Du'] = 1.0
problem_params['Dv'] = 0.01
problem_params['A'] = 0.09
problem_params['B'] = 0.086
problem_params['t0'] = t0 # ugly, but necessary to set up ProblemClass
problem_params['c_nvars'] = [256]
problem_params['family'] = 'CG'
problem_params['order'] = [4]
problem_params['refinements'] = [1, 0]
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = fenics_grayscott # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['dtype_u'] = fenics_mesh # pass data type for u
description['dtype_f'] = fenics_mesh # pass data type for f
description['sweeper_class'] = generic_LU # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh_fenics # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
ft.hard_random = 0.03
controller = allinclusive_classic_nonMPI_hard_faults(num_procs=num_procs,
controller_params=controller_params,
description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
for strategy in ft_strategies:
print('------------------------------------------ working on strategy ', strategy)
ft.strategy = strategy
# read in reference data from clean run, will provide reproducable locations for faults
if strategy is not 'NOFAULT':
reffile = np.load('data/PFASST_GRAYSCOTT_stats_hf_NOFAULT_P16.npz')
ft.refdata = reffile['hard_stats']
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# get residuals of the run
extract_stats = filter_stats(stats, type='residual_post_iteration')
# find boundaries for x-,y- and c-axis as well as arrays
maxprocs = 0
maxiter = 0
minres = 0
maxres = -99
for k, v in extract_stats.items():
maxprocs = max(maxprocs, getattr(k, 'process'))
maxiter = max(maxiter, getattr(k, 'iter'))
minres = min(minres, np.log10(v))
maxres = max(maxres, np.log10(v))
# grep residuals and put into array
residual = np.zeros((maxiter, maxprocs + 1))
residual[:] = -99
for k, v in extract_stats.items():
step = getattr(k, 'process')
iter = getattr(k, 'iter')
if iter is not -1:
residual[iter - 1, step] = np.log10(v)
# stats magic: get niter (probably redundant with maxiter)
extract_stats = filter_stats(stats, level=-1, type='niter')
sortedlist_stats = sort_stats(extract_stats, sortby='process')
iter_count = np.zeros(Nsteps)
for item in sortedlist_stats:
iter_count[item[0]] = item[1]
print(iter_count)
np.savez('data/PFASST_GRAYSCOTT_stats_hf_' + ft.strategy + '_P' + str(num_procs), residual=residual,
iter_count=iter_count, hard_stats=ft.hard_stats)
if __name__ == "__main__":
# ft_strategies = ['SPREAD', 'SPREAD_PREDICT', 'INTERP', 'INTERP_PREDICT']
ft_strategies = ['NOFAULT']
main(ft_strategies=ft_strategies)
| [
"r.speck@fz-juelich.de"
] | r.speck@fz-juelich.de |
4bc2fe82e33b7056ddfa4db9de4d352059f62de5 | 95aa541b9dcede42e6969947d06a974973dd052a | /ch10_thread_and_subprocess/manpage_server.py | 154595fe1845e23af8d4100d18008086d16f2c64 | [] | no_license | AstinCHOI/book_twisted | a693d5eaf7a826b26671e9e98d167f6808dcc0dc | dc5a5dff1ac432e2fb95670b683628e09d24774e | refs/heads/master | 2016-09-05T16:11:03.361046 | 2015-03-27T07:27:23 | 2015-03-27T07:27:23 | 28,185,272 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | import sys
from twisted.internet import protocol, utils, reactor
from twisted.protocols.basic import LineReceiver
from twisted.python import log
class RunCommand(LineReceiver):
def lineReceived(self, line):
log.msg("Man pages requested for: %s" % (line,))
commands = line.strip().split(" ")
output = utils.getProcessOutput("man", commands, errortoo=True)
output.addCallback(self.writeSuccessResponse)
def writeSuccessResponse(self, result):
self.transport.write(result)
self.transport.loseConnection()
class RunCommandFactory(protocol.Factory):
def buildProtocol(self, addr):
return RunCommand()
log.startLogging(sys.stdout)
reactor.listenTCP(8000, RunCommandFactory())
reactor.run()
| [
"asciineo@gmail.com"
] | asciineo@gmail.com |
808da2cebeee1cf74826f659edf3a3e4224feeb7 | 54f33f6026b6fb71caba620f4bf39ec48ad76422 | /trazabilidad/migrations/0004_auto_20200707_1259.py | 6674d2af94293ccf727b9c3873faf9e43c2fc171 | [] | no_license | geovanniberdugo/lab7 | 7d30458c64bc32e21c390d4836db76bf894609f5 | 8a945d937864961da3fcd1e8c4dbf7575febc58d | refs/heads/main | 2023-02-13T02:03:19.067815 | 2021-01-15T22:24:35 | 2021-01-15T22:24:35 | 330,033,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # Generated by Django 2.2.13 on 2020-07-07 17:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('trazabilidad', '0003_recepcion_responsable_tecnico'),
]
operations = [
migrations.AlterModelOptions(
name='reporte',
options={'permissions': [('can_generar_informe', 'Puede generar nuevo informe de resultado'), ('can_see_informe_resultados', 'Puede ver informe de resultado regenerado')], 'verbose_name': 'reporte', 'verbose_name_plural': 'reportes'},
),
]
| [
"geovanni.berdugo@gmail.com"
] | geovanni.berdugo@gmail.com |
336baca78251949d3080584964342725bd72e442 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/111/usersdata/224/64138/submittedfiles/av2_p3_m2.py | 6a24b72cb7c12e52bc9bb665c8c885fcad335b6c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | # -*- coding: utf-8 -*-
import numpy as np
def L(A):
L=[]
for i in range(0,A.shape[0],1):
soma=0
for j in range(0,A.shape[1],1):
soma=soma+A[i,j]
L.append(soma)
return(L)
def C(A):
C=[]
for j in range(0,A.shape[1],1):
soma2=0
for i in range(0,A.shape[0],1):
soma2=soma2+A[i,j]
return(C)
def TERMO(c):
for i in range(0,len(c),1):
contador=0
for j in range(0,len(c),1):
if c[i]==c[j]:
contador=contador+1
if contador==1:
return i
def numero(Lista,Termo):
for i in range(0,len(Lista),1):
if i!=Termo:
numero=Lista[Termo]-lista[i]
return (Numero)
n=int(input('Dimensão da matriz A: '))
A=np.zeros((n,n))
for i in range(0,A.shape[0],1):
for j in range(0,A.shape[1],1):
A[i,j]=int(input('TERMO: '))
L=L(A)
C=C(A)
v=Termo(L)
w=Termo(C)
h=A[v,w]
m=h-(numero(L,v))
print('%.d' %m)
print('%.d' %h)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
98981c98e568239d5f45a9956b008fa6e08dddc3 | d2eb7bd335175edd844a3e6c1c633ee0dc2dbb25 | /contests_cf/cgr13/a.py | 17700eeffc72067c1d7b3c7ce4a4409baea8f942 | [
"BSD-2-Clause"
] | permissive | stdiorion/competitive-programming | 5020a12b85f1e691ceb0cacd021606a9dc58b72c | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | refs/heads/main | 2023-03-27T01:13:42.691586 | 2021-03-08T08:05:53 | 2021-03-08T08:05:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | n, Q = map(int, input().split())
a = list(map(int, input().split()))
queries = [tuple(map(int, input().split())) for _ in range(Q)]
sa = sum(a)
for t, q in queries:
if t == 1:
if a[q - 1]:
sa -= 1
else:
sa += 1
a[q - 1] ^= 1
if t == 2:
print(int(sa >= q)) | [
"itkn1900@gmail.com"
] | itkn1900@gmail.com |
f1e1916d5ee16ab930b58883865ded11391e592b | 9d8e2dd4441c50b443390f76c899ad1f46c42c0e | /hacker_rank/warmup/acm_icpc.py | d89e3c9e0e9eacebe4f54393b2719d36db302d06 | [] | no_license | vikramjit-sidhu/algorithms | 186ec32de471386ce0fd6b469403199a5e3bbc6d | cace332fc8e952db76c19e200cc91ec8485ef14f | refs/heads/master | 2021-01-01T16:20:52.071495 | 2015-08-03T17:42:29 | 2015-08-03T17:42:29 | 29,119,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | """
Hacker Rank - Algorithms Warmup
ACM ICPC team
https://www.hackerrank.com/challenges/acm-icpc-team
"""
def find_max_skillset(n, person_skills):
""" Using brute force method - O(n^2) """
max_or = 0 #the number representing the max no of topics a pair can have in common, using bitwise or gate
count_max_or = 0 #the number of pairs with max no of topics in common
len_list = len(person_skills)
for i in range(len_list):
for j in range(i+1, len_list):
or_op = bin(person_skills[i] | person_skills[j]).count('1')
if or_op > max_or:
count_max_or = 1
max_or = or_op
elif or_op == max_or:
count_max_or += 1
return (max_or, count_max_or)
def main():
n, m = (int(i) for i in input().strip().split(' '))
person_skills = []
for i in range(n):
person_skills.append(int(input().strip(), 2))
common_skills, count_skills = find_max_skillset(n, person_skills)
print(common_skills)
print(count_skills)
if __name__ == '__main__':
main() | [
"vikram.sidhu.007@gmail.com"
] | vikram.sidhu.007@gmail.com |
71b60f4bbb9e39e2541f0232e385b0b1d2e736a1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03150/s166029107.py | 7912d0c3c96249b3eef4d049d6bd02ec4463ca6b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | S = input()
N = len(S)
for i in range(N):
for j in range(i,N):
if S[:i]+S[j:] == "keyence":
print("YES")
exit()
print("NO")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a50387cc2103e7bb54d4efc666435bc50c0049b5 | 3c5ae1e6dcd105354971c78ef279420b0b817ffa | /nova-master/nova/version.py | 01b6b47658d7e4e1fb5362ba57ca75420093f641 | [
"Apache-2.0"
] | permissive | Yunzhe-Sun/CentOS-openstack | e9e99360cedd46e9bf7f2f1b2dab22032c6b2aca | e22b8090e7bf748c38448a61793a23ca71ec91d4 | refs/heads/master | 2021-07-15T23:19:15.560323 | 2021-07-07T02:43:01 | 2021-07-07T02:43:01 | 218,758,174 | 0 | 1 | null | 2021-07-07T02:43:02 | 2019-10-31T12:13:49 | Python | UTF-8 | Python | false | false | 2,531 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
from nova.openstack.common.gettextutils import _
NOVA_VENDOR = "OpenStack Foundation"
NOVA_PRODUCT = "OpenStack Nova"
NOVA_PACKAGE = None # OS distro package version suffix
loaded = False
version_info = pbr.version.VersionInfo('nova')
version_string = version_info.version_string
def _load_config():
# Don't load in global context, since we can't assume
# these modules are accessible when distutils uses
# this module
import ConfigParser
from oslo.config import cfg
from nova.openstack.common import log as logging
global loaded, NOVA_VENDOR, NOVA_PRODUCT, NOVA_PACKAGE
if loaded:
return
loaded = True
cfgfile = cfg.CONF.find_file("release")
if cfgfile is None:
return
try:
cfg = ConfigParser.RawConfigParser()
cfg.read(cfgfile)
NOVA_VENDOR = cfg.get("Nova", "vendor")
if cfg.has_option("Nova", "vendor"):
NOVA_VENDOR = cfg.get("Nova", "vendor")
NOVA_PRODUCT = cfg.get("Nova", "product")
if cfg.has_option("Nova", "product"):
NOVA_PRODUCT = cfg.get("Nova", "product")
NOVA_PACKAGE = cfg.get("Nova", "package")
if cfg.has_option("Nova", "package"):
NOVA_PACKAGE = cfg.get("Nova", "package")
except Exception as ex:
LOG = logging.getLogger(__name__)
LOG.error(_("Failed to load %(cfgfile)s: %(ex)s"),
{'cfgfile': cfgfile, 'ex': ex})
def vendor_string():
_load_config()
return NOVA_VENDOR
def product_string():
_load_config()
return NOVA_PRODUCT
def package_string():
_load_config()
return NOVA_PACKAGE
def version_string_with_package():
if package_string() is None:
return version_info.version_string()
else:
return "%s-%s" % (version_info.version_string(), package_string())
| [
"171194570@qq.com"
] | 171194570@qq.com |
c489de6483130e1719c5238ee2f413453c3d8a12 | 5a281cb78335e06c631181720546f6876005d4e5 | /karbor-1.3.0/karbor/common/opts.py | 141e3d4b1f62c335e989a939c6679f778d7b5c9f | [
"Apache-2.0"
] | permissive | scottwedge/OpenStack-Stein | d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8 | 7077d1f602031dace92916f14e36b124f474de15 | refs/heads/master | 2021-03-22T16:07:19.561504 | 2020-03-15T01:31:10 | 2020-03-15T01:31:10 | 247,380,811 | 0 | 0 | Apache-2.0 | 2020-03-15T01:24:15 | 2020-03-15T01:24:15 | null | UTF-8 | Python | false | false | 3,875 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import itertools
import karbor.api.common
import karbor.api.v1.protectables
import karbor.api.v1.providers
import karbor.common.config
import karbor.db.api
import karbor.exception
import karbor.service
import karbor.services.operationengine.engine.executors.green_thread_executor as green_thread_executor # noqa
import karbor.services.operationengine.engine.executors.thread_pool_executor as thread_pool_executor # noqa
import karbor.services.operationengine.engine.triggers.timetrigger as time_trigger # noqa
import karbor.services.operationengine.karbor_client
import karbor.services.operationengine.manager
import karbor.services.operationengine.operations.base as base
import karbor.services.protection.clients.cinder
import karbor.services.protection.clients.glance
import karbor.services.protection.clients.manila
import karbor.services.protection.clients.neutron
import karbor.services.protection.clients.nova
import karbor.services.protection.flows.restore
import karbor.services.protection.flows.worker
import karbor.services.protection.manager
import karbor.wsgi.eventlet_server
__all__ = ['list_opts']
_opts = [
('clients_keystone', list(itertools.chain(
karbor.common.config.keystone_client_opts))),
('operationengine', list(itertools.chain(
green_thread_executor.green_thread_executor_opts,
karbor.services.operationengine.manager.trigger_manager_opts))),
('karbor_client', list(itertools.chain(
karbor.common.config.service_client_opts))),
('cinder_client', list(itertools.chain(
karbor.common.config.service_client_opts,
karbor.services.protection.clients.cinder.cinder_client_opts))),
('glance_client', list(itertools.chain(
karbor.common.config.service_client_opts,
karbor.services.protection.clients.glance.glance_client_opts))),
('manila_client', list(itertools.chain(
karbor.common.config.service_client_opts,
karbor.services.protection.clients.manila.manila_client_opts))),
('neutron_client', list(itertools.chain(
karbor.common.config.service_client_opts,
karbor.services.protection.clients.neutron.neutron_client_opts))),
('nova_client', list(itertools.chain(
karbor.common.config.service_client_opts,
karbor.services.protection.clients.nova.nova_client_opts))),
('DEFAULT', list(itertools.chain(
karbor.common.config.core_opts,
karbor.common.config.debug_opts,
karbor.common.config.global_opts,
karbor.api.common.api_common_opts,
karbor.api.v1.protectables.query_instance_filters_opts,
karbor.api.v1.providers.query_provider_filters_opts,
karbor.api.v1.providers.query_checkpoint_filters_opts,
karbor.db.api.db_opts,
thread_pool_executor.executor_opts,
time_trigger.time_trigger_opts,
base.record_operation_log_executor_opts,
karbor.services.protection.flows.restore.sync_status_opts,
karbor.services.protection.flows.worker.workflow_opts,
karbor.services.protection.manager.protection_manager_opts,
karbor.wsgi.eventlet_server.socket_opts,
karbor.exception.exc_log_opts,
karbor.service.service_opts)))]
def list_opts():
return [(g, copy.deepcopy(o)) for g, o in _opts]
| [
"Wayne Gong@minbgong-winvm.cisco.com"
] | Wayne Gong@minbgong-winvm.cisco.com |
d2a1e18f1dbe66e35809ad3d837e3d80d6088ed7 | 2dfec35bafb4a808771b723ca94c102240b5d8f4 | /align/alignProject/lineWriteExcel.py | d830271cf966eca71653032874f36f2d7fa74475 | [] | no_license | mengguiyouziyi/outsourcing | cde7169d94bcdde63091e92f91d140c72c1d184c | 140172ca792c9808cb69251468a14529b84ea380 | refs/heads/master | 2022-11-13T19:55:54.851329 | 2020-06-18T02:51:25 | 2020-06-18T02:51:25 | 273,120,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | import os
import xlwt
def toExcel(file_align, en_sen, zh_sen, xlsx):
"""
根据对齐文件 1 <-> 1,在英中断句文件中找到相对应的句子,写入到excel中
:param file_align: 对齐文件路径
:param en_sen: 英文断句文件路径
:param zh_sen: 中文断句文件路径
:param xlsx: 导出的excel文件路径
:return:
"""
current_path = os.getcwd()
# 筛选对应行号,入 xxx_num_list
align = open(os.path.join(current_path, '../file', file_align), 'r', encoding='utf-8')
src_num_list = []
tgt_num_list = []
for line in align.readlines():
if 'omitted' in line or ',' in line:
continue
line = line.strip()
src_num, tgt_num = line.split(' <=> ')
src_num_list.append(src_num)
tgt_num_list.append(tgt_num)
# 根据行号提取对照文本,入 xxx_list
src_sen_file = open(os.path.join(current_path, '../file', en_sen), 'r', encoding='utf-8')
tgt_sen_file = open(os.path.join(current_path, '../file', zh_sen), 'r', encoding='utf-8')
src_sen_list = src_sen_file.readlines()
src_list = [src_sen_list[int(i) - 1] for i in src_num_list]
tgt_sen_list = tgt_sen_file.readlines()
tgt_list = [tgt_sen_list[int(i) - 1] for i in tgt_num_list]
# 将对照文本写入文件
xlsx_file = os.path.join(current_path, '../file', xlsx)
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('align')
for i, en, zh in zip(range(len(src_list)), src_list, tgt_list):
worksheet.write(i, 0, en.strip())
worksheet.write(i, 1, zh.strip())
workbook.save(xlsx_file)
align.close()
src_sen_file.close()
tgt_sen_file.close()
| [
"775618369@qq.com"
] | 775618369@qq.com |
cbea139f8853a006a1972d5ee7af4fcd57543eaa | 8be5929368bd987caf744c92f234884bf49d3d42 | /lib/app/reportdatasources/loader.py | 10ddd1e2dc402ed2c6599f4203b634578a5d1d95 | [
"BSD-3-Clause"
] | permissive | oleg-soroka-lt/noc | 9b670d67495f414d78c7080ad75f013ab1bf4dfb | c39422743f52bface39b54d5d915dcd621f83856 | refs/heads/master | 2023-08-21T03:06:32.235570 | 2021-10-20T10:22:02 | 2021-10-20T10:22:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | # ----------------------------------------------------------------------
# ReportDataSource Loader
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import logging
import os
# NOC modules
from noc.core.loader.base import BaseLoader
from .base import ReportDataSource
logger = logging.getLogger(__name__)
BASE_PREFIX = os.path.join("lib", "app", "reportdatasources")
class ReportDataSourceLoader(BaseLoader):
name = "reportdatasource"
base_cls = ReportDataSource
base_path = ("lib", "app", "reportdatasources")
ignored_names = {"base", "loader"}
# Create singleton object
loader = ReportDataSourceLoader()
| [
"aversanta@gmail.com"
] | aversanta@gmail.com |
2b16643e7e21bbfc7d0c383285dbed507442b4ec | 0c66e605e6e4129b09ea14dbb6aa353d18aaa027 | /diventi/accounts/migrations/0230_auto_20190813_0943.py | cad59fc29518cbca8c11ec4722683b2757ed0e2f | [
"Apache-2.0"
] | permissive | flavoi/diventi | 58fbc8c947f387cbcc1ce607878a59a6f2b72313 | c0b1efe2baa3ff816d6ee9a8e86623f297973ded | refs/heads/master | 2023-07-20T09:32:35.897661 | 2023-07-11T19:44:26 | 2023-07-11T19:44:26 | 102,959,477 | 2 | 1 | Apache-2.0 | 2023-02-08T01:03:17 | 2017-09-09T14:10:51 | Python | UTF-8 | Python | false | false | 452 | py | # Generated by Django 2.2.4 on 2019-08-13 07:43
import diventi.accounts.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0229_auto_20190812_1402'),
]
operations = [
migrations.AlterModelManagers(
name='diventiuser',
managers=[
('objects', diventi.accounts.models.DiventiUserManager()),
],
),
]
| [
"flavius476@gmail.com"
] | flavius476@gmail.com |
74fa365c1adbad9b80577e126779020c1164fc07 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_085029.80+325447.4/sdB_SDSSJ_085029.80+325447.4_coadd.py | 499208837b41dc63ff2c0793947849f68567605c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[132.624167,32.913167], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_SDSSJ_085029.80+325447.4/sdB_SDSSJ_085029.80+325447.4_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_SDSSJ_085029.80+325447.4/sdB_SDSSJ_085029.80+325447.4_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
f41e8f81b5f3338158a8eb1869dcfb2e36386ba3 | 871b090c3562669c6ff2fc92508419b680443fad | /Booking_Management/edl_management/migrations/0026_auto_20210622_2153.py | b4a828c53fcaeae9de2eabecc4d009fe221f5d14 | [] | no_license | maxcrup007/CLEM | 4ace1a28be7a0463d8f04a04c0e19d991d7cdb70 | 3a1cb35fce426f1826e8764487ad97c631ff59a9 | refs/heads/master | 2023-06-06T00:13:26.663741 | 2021-06-24T17:12:19 | 2021-06-24T17:12:19 | 379,998,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | # Generated by Django 3.0 on 2021-06-22 14:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('edl_management', '0025_auto_20210622_1351'),
]
operations = [
migrations.AlterField(
model_name='bookequipmentletter',
name='project',
field=models.ForeignKey(help_text='โครงการที่ทำอยู่', null=True, on_delete=django.db.models.deletion.CASCADE, to='edl_management.Project'),
),
]
| [
"36732487+maxcrup007@users.noreply.github.com"
] | 36732487+maxcrup007@users.noreply.github.com |
2f55c363d20f9a90e541fbc60990f6901e6e451d | 87274e9dd7cf48645b3bc6baa5e7572b9fcdcb0b | /ceshi/ceshi/spiders/lagou_spider.py | a471e1cd80f359b586c3576741c0fef3563362f2 | [] | no_license | ixiaoxinxin/xiaoxinxinPAPAPA | 1619652812cd1db4caa84528c814f58e3afa1ce4 | ac97c81f24e725ff5f33160b032e2f99c7f1dfe2 | refs/heads/master | 2021-05-02T01:27:20.706064 | 2017-02-05T14:24:13 | 2017-02-05T14:24:13 | 79,033,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | # -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.contrib.spiders import Rule,CrawlSpider
from scrapy.selector import Selector
class LagouSpider(Spider):
name = "lagou"
allowed_domains = ["lagou.com"]
start_urls = [#这个名称不能写错,否则会识别不到url,d导致爬不到数据
"https://www.lagou.com/zhaopin/ceshigongchengshi/",
# "https://www.lagou.com/zhaopin/zidonghuaceshi/",
# "https://www.lagou.com/zhaopin/gongnengceshi/",
# "https://www.lagou.com/zhaopin/xingnengceshi/",
# "https://www.lagou.com/zhaopin/ceshikaifa/",
# "https://www.lagou.com/zhaopin/heiheceshi/",
# "https://www.lagou.com/zhaopin/shoujiceshi/"
]
def parse(self, response):
#filename = response.url.split("/")[-2]#将最后一个地址作为文件名进行存储
#open(filename,'wb').write(response.body)
sel = Selector(response)
sites = sel.xpath('//*[@id="s_position_list"]/ul')
for site in sites :
title = site.xpath('//*[@id="s_position_list"]/ul/li').extract()
#link = site.xpath('a/@href').extract()
#desc = site.xpath('text()').extract()
#将中文抓取结果从Unicode转换成utf-8
#http://blog.csdn.net/cc7756789w/article/details/46049369
s = str(title).replace('u\'','\'')
print s.decode("unicode-escape")
#print title | [
"295563386@qq.com"
] | 295563386@qq.com |
61d98a99974294a3d48ec28e780ce63457575bf4 | 272a8b0b38e4af5f22dd811040f0ca2b0b111c61 | /exp_scripts/loss_gan_noload3.py | 3d8162cf5e1b818f3e989bbee573ebe95b041078 | [] | no_license | jessemin/GeneGan | 1c1a97b6ab566a7c556ce1452e4c35530b0b626c | 2ad94e842cfaee531d7e13af7472b623bf96de30 | refs/heads/master | 2021-09-13T13:02:33.629138 | 2018-04-30T06:57:13 | 2018-04-30T06:57:13 | 112,046,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | import os
os.chdir('../exp_notebooks')
os.system('python loss_gan.py\
-w=2001\
-save=loss_gan_noload_3\
-sample_num=10000\
-g_weight=0.5\
-mse_weight=0.5\
-g_lr=0.001\
-d_lr=0.001\
--smooth_rate=0.1\
-cuda=1')
| [
"jesikmin@stanford.edu"
] | jesikmin@stanford.edu |
dd881acb19392265ebaf229e8e5e36e1235eb54b | 6527b66fd08d9e7f833973adf421faccd8b765f5 | /yuancloud/addons/hw_escpos/__init__.py | b69f45ed25a74e18525c21c133b53d0f06904456 | [
"MIT"
] | permissive | cash2one/yuancloud | 9a41933514e57167afb70cb5daba7f352673fb4d | 5a4fd72991c846d5cb7c5082f6bdfef5b2bca572 | refs/heads/master | 2021-06-19T22:11:08.260079 | 2017-06-29T06:26:15 | 2017-06-29T06:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | # -*- coding: utf-8 -*-
# Part of YuanCloud. See LICENSE file for full copyright and licensing details.
import controllers
import escpos
| [
"liuganghao@lztogether.com"
] | liuganghao@lztogether.com |
0d8a541cf7e89dc8bfa362e75441297927f3dbdb | 61afe17201589a61c39429602ca11e3fdacf47a9 | /Chapter3/Day23/16.进程间数据的共享_Array.py | c663d6e8677d235e387ba052b83e1d4875630641 | [] | no_license | Liunrestrained/Python- | ec09315c50b395497dd9b0f83219fef6355e9b21 | 6b2cb4ae74c59820c6eabc4b0e98961ef3b941b2 | refs/heads/main | 2023-07-17T14:16:12.084304 | 2021-08-28T14:05:12 | 2021-08-28T14:05:12 | 399,408,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from multiprocessing import Process, Value, Array
def f(data_array):
data_array[0] = 66
if __name__ == '__main__':
arr = Array('i', [11, 22, 33, 44]) # 数组:元素类型必须是int;只能是这么几个数据,不能多,也不能少
p = Process(target=f, args=(arr, ))
p.start()
p.join()
print(arr[:]) # [66, 22, 33, 44]
| [
"noreply@github.com"
] | Liunrestrained.noreply@github.com |
e890e6a1b3efbd2ecb9561220b3fdbe2a0c6c70d | 3e78bf8a64318c091837cab3c1288cafc3617310 | /scripts/Script.py | c3c3b28229d28ae56f80278c459c443cc3f2cd66 | [] | no_license | eddiedb6/rate | e6887a6f8324e79fafa0ccd75a280dcc883a18b7 | 9c54bf949348d74fe8c7e4b0d3a1abd56b936821 | refs/heads/master | 2022-03-10T12:04:53.025473 | 2019-11-06T10:15:42 | 2019-11-06T10:15:42 | 105,227,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | import sys
import os
import time
# __file__ will be AFW.py in auto/afw
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], "../../scripts"))
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], "../../util"))
sys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], "../.."))
from RateConfig import *
from ScriptRateSearch import *
from ScriptRateFetch import *
from DBUtil import *
from DumpUtil import *
# {
# "date": "2016-09-01",
# "currency": 2,
# "rate": 660
# }
rates = []
if ImportPath == "":
rates = FetchExchangeRateForUpdateFromDB()
else:
rates = FetchExchangeRateForUpdateFromFile()
# Open browser
browser = afw.OpenWebBrowser("Browser")
# Fetch each rate from network
for rate in rates:
# Open page
if not browser.OpenURL("URLRate"):
break
time.sleep(SleepSeconds)
if not SearchRate(browser, rate["date"], rate["currency"]):
break
time.sleep(SleepSeconds)
rateString = FetchRate(browser)
if rateString is not None:
rate["rate"] = float(rateString)
browser.Quit()
if ExportPath == "":
UpdateExchangeRateToDB(rates)
else:
UpdateExchangeRateToFile(rates)
# Report
failedRates = []
print("")
for rate in rates:
if "rate" in rate:
print("Get rate: " + str(rate["rate"]) + ", " + rate["date"] + ", " + str(rate["currency"]))
else:
failedRates.append(rate)
print("")
for rate in failedRates:
print("Failed to get rate: " + rate["date"] + ", " + str(rate["currency"]))
| [
"eddiedb6@gmail.com"
] | eddiedb6@gmail.com |
f610ac4877ded41728b8e65579ba101a3dbe0b45 | 7f31d42f80dd93d6f18baed192125991b3645d66 | /examples/scripts/x86/check_constraint1.py | d96398bde35cf6fda16fd4ffcb7f772c978a822b | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | jmorse/barf-project | e183aba9ab99d0b742c320cba8169704441777ed | 7b336f9747216b44773f01a57bafbe4cbdf3c838 | refs/heads/master | 2021-05-08T07:02:07.175186 | 2017-10-17T15:51:47 | 2017-10-17T15:51:47 | 106,693,149 | 0 | 0 | null | 2017-10-12T12:59:04 | 2017-10-12T12:59:03 | null | UTF-8 | Python | false | false | 2,459 | py | #! /usr/bin/env python
from barf import BARF
if __name__ == "__main__":
#
# Open file
#
barf = BARF("../../samples/bin/constraint1.x86")
#
# Check constraint
#
# 80483ed: 55 push ebp
# 80483ee: 89 e5 mov ebp,esp
# 80483f0: 83 ec 10 sub esp,0x10
# 80483f3: 8b 45 f8 mov eax,DWORD PTR [ebp-0x8]
# 80483f6: 8b 55 f4 mov edx,DWORD PTR [ebp-0xc]
# 80483f9: 01 d0 add eax,edx
# 80483fb: 83 c0 05 add eax,0x5
# 80483fe: 89 45 fc mov DWORD PTR [ebp-0x4],eax
# 8048401: 8b 45 fc mov eax,DWORD PTR [ebp-0x4]
# 8048404: c9 leave
# 8048405: c3 ret
print("[+] Adding instructions to the analyzer...")
for addr, asm_instr, reil_instrs in barf.translate(start=0x80483ed, end=0x8048401):
print("0x{0:08x} : {1}".format(addr, asm_instr))
for reil_instr in reil_instrs:
barf.code_analyzer.add_instruction(reil_instr)
print("[+] Adding pre and post conditions to the analyzer...")
ebp = barf.code_analyzer.get_register_expr("ebp", mode="post")
# Preconditions: set range for variable a and b
a = barf.code_analyzer.get_memory_expr(ebp-0x8, 4, mode="pre")
b = barf.code_analyzer.get_memory_expr(ebp-0xc, 4, mode="pre")
for constr in [a >= 2, a <= 100, b >= 2, b <= 100]:
barf.code_analyzer.add_constraint(constr)
# Postconditions: set desired value for the result
c = barf.code_analyzer.get_memory_expr(ebp-0x4, 4, mode="post")
for constr in [c >= 26, c <= 28]:
barf.code_analyzer.add_constraint(constr)
print("[+] Check for satisfiability...")
if barf.code_analyzer.check() == 'sat':
print("[+] Satisfiable! Possible assignments:")
# Get concrete value for expressions
a_val = barf.code_analyzer.get_expr_value(a)
b_val = barf.code_analyzer.get_expr_value(b)
c_val = barf.code_analyzer.get_expr_value(c)
# Print values
print("- a: {0:#010x} ({0})".format(a_val))
print("- b: {0:#010x} ({0})".format(b_val))
print("- c: {0:#010x} ({0})".format(c_val))
assert a_val + b_val + 5 == c_val
else:
print("[-] Unsatisfiable!")
| [
"cnheitman@fundacionsadosky.org.ar"
] | cnheitman@fundacionsadosky.org.ar |
61d05adc137915e86dd216e736831bf0cc5917fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03665/s821294954.py | b2af28f559909e7fcc451a213b7441c4c193ce2c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | def two_int():
N, K = map(int, input().split())
return N,K
def one_int():
return int(input())
def one_str():
return input()
def many_int():
return list(map(int, input().split()))
N, P = two_int()
A=many_int()
even = 0
odd = 0
for a in A:
if a%2==0:
even+=1
else:
odd+=1
import math
def comb(n,r):
return math.factorial(n) // (math.factorial(n-r) * math.factorial(r))
even_count=0
for i in range(even+1):
even_count += comb(even, i)
odd_count=0
if P==1:
for i in range(1, odd+1, 2):
odd_count += comb(odd, i)
elif P==0:
for i in range(2, odd+1, 2):
odd_count += comb(odd, i)
odd_count+=1
if odd_count != 0 and even_count != 0:
print(odd_count * even_count)
else:
print(odd_count) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
de13c796eb2ff43e6f5f5a08ccc5e6911a6a6749 | d2cfe3ef86d89d580f2a9670f9fc9b11c3e424b4 | /rvsml/EvaluateRVSML.py | 67a0ebf4c95d06d1ab04f36c3197a780b026bcac | [
"BSD-3-Clause"
] | permissive | ShuheiKuriki/SentEval_by_RVSML | 4e89ccd20fa6446c1656d76bafbbd79199b36e86 | b2cebb65c755bfec8ebb0a0400f0d69a5725bd6f | refs/heads/master | 2022-04-25T22:01:21.612920 | 2020-04-14T19:07:08 | 2020-04-14T19:07:08 | 254,819,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,015 | py | import numpy as np
import time
from rvsml.NNClassifier import *
from rvsml.RVSML_OT_Learning import *
import logging
from array import array
def EvaluateRVSML_dtw(classnum, dim, trainset, trainsetnum, testset, testsetdata, testsetdatanum, testsetlabel, testsetnum, logname, logfile):
#rankdim = 58
CVAL = 1
logger = logging.getLogger(logname)
logger.setLevel(10)
sh = logging.StreamHandler()
logger.addHandler(sh)
logging.basicConfig(filename=logfile, format="%(message)s", filemode='w')
# addpath('E:/BING/ActionRecognition/FrameWideFeatures/libsvm-3.20/matlab')
delta = 1
lambda1 = 50
lambda2 = 0.1
max_iters = 10
err_limit = 10**(-2)
class Options:
def __init__(self, max_iters, err_limit, lambda1, lambda2, delta):
self.max_iters = max_iters
self.err_limit = err_limit
self.lambda1 = lambda1
self.lambda2 = lambda2
self.delta = delta
options = Options(max_iters,err_limit,lambda1,lambda2,delta)
# testsetlabel = testsetlabel.T
# for name in matlist:
# exec("%s = %s[0]" % (name, name))
# testsetdatanum = testsetdatanum[0]
trainset_m = trainset
# shape=trainset_m[0].shape
# for c in range(classnum):
# for m in range(trainsetnum[c]):
# trainset_m[c][0][m] = trainset[c][0][m] - traindatamean
# testsetdata_m = testsetdata
# for m in range(testsetdatanum):
# testsetdata_m[m] = testsetdata[m] - traindatamean
## RVSML-DTW
logger.info("data load done")
logger.info("DTW start")
templatenum = 4
lambda0 = 0.0005
tic = time.time()
L = RVSML_OT_Learning_dtw(trainset_m, trainsetnum, dim,templatenum,lambda0,options,logname)
RVSML_dtw_time = time.time() - tic
logger.info("DTW learning done")
## classification with the learned metric
traindownset = [0]*classnum
testdownsetdata = [0]*testsetdatanum
for j in range(classnum):
traindownset[j] = [0]*trainsetnum[j]
for m in range(trainsetnum[j]):
traindownset[j][m] = np.dot(trainset[j][m] ,L)
for j in range(testsetdatanum):
testdownsetdata[j] = np.dot(testsetdata[j], L)
RVSML_dtw_macro, RVSML_dtw_micro, RVSML_dtw_acc, dtw_knn_time, dtw_knn_average_time = NNClassifier_dtw(classnum,traindownset,trainsetnum,testdownsetdata,testsetdatanum,testsetlabel,options,logname)
RVSML_dtw_acc_1 = RVSML_dtw_acc[0]
logger.info('Training time of RVSML instantiated by DTW is {:.4f} \n'.format(RVSML_dtw_time))
logger.info('Classification using 1 nearest neighbor classifier with DTW distance:\n')
logger.info('MAP macro is {:.4f}, micro is {:.4f} \n'.format(RVSML_dtw_macro, RVSML_dtw_micro))
logger.info('dtw_knn_time is {:.4f} \n'.format(dtw_knn_time))
logger.info('dtw_knn_average_time is {:.4f} \n'.format(dtw_knn_average_time))
for acc in RVSML_dtw_acc:
logger.info('Accuracy is {:.4f} \n'.format(acc))
return | [
"shukuri.7336.8@gmail.com"
] | shukuri.7336.8@gmail.com |
c62305d533cfdea397699d4dcbbcc47d4d51441f | f63fdd673e5b9881a0395be83e0a6b5ccc7edbdb | /ReedsShepp_planner/drawing_pallet_jack.py | ac856f0c27793f58e4388e1c15825a7020aa8cfb | [] | no_license | karry3775/Motion-Planning-Python | 63ad4bf928c57b9ea6b8c19fceea16dbc2adb783 | 20ed30526bd03380ead8b29357e5bf7eb291ba9b | refs/heads/master | 2023-02-08T18:19:26.644696 | 2020-12-30T22:22:36 | 2020-12-30T22:22:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,109 | py | import matplotlib.pyplot as plt
import numpy as np
import math
def wrapToPi(theta):
return math.atan2(math.sin(theta),math.cos(theta))
def dpj(x,y,theta,steer):
# plotting the center point
wd = 1.5
plt.plot(x,y,'co')
plt.arrow(x,y,math.cos(theta),math.sin(theta),width=0.01,color='orange')
plt.arrow(x,y,math.cos(wrapToPi(theta+steer)),math.sin(wrapToPi(theta+steer)),width=0.01,color='g')
#wheel cordinates
A = np.array([-0.3,0.1,1]).T
B = np.array([0.3,0.1,1]).T
C = np.array([-0.3,-0.1,1]).T
D = np.array([0.3,-0.1,1]).T
#tranform the wheel
T_wheel = np.array([[math.cos(wrapToPi(theta + steer)),-math.sin(wrapToPi(theta + steer)),x],
[math.sin(wrapToPi(theta + steer)),math.cos(wrapToPi(theta + steer)),y],
[0,0,1]])
A = np.matmul(T_wheel,A)
B = np.matmul(T_wheel,B)
C = np.matmul(T_wheel,C)
D = np.matmul(T_wheel,D)
#front cordinates
a = np.array([0.5,-0.5,1]).T
b = np.array([0.5,0.5,1]).T
c = np.array([0,1,1]).T
d = np.array([0,-1,1]).T
e = np.array([-0.5,1,1]).T
f = np.array([-0.5,-1,1]).T
#dotted front
X = np.array([-0.5,0.75,1]).T
Y = np.array([0,0.75,1]).T
Z = np.array([0.25,0.5,1]).T
W = np.array([0.25,-0.5,1]).T
U = np.array([0,-0.75,1]).T
V = np.array([-0.5,-0.75,1]).T
#back support
g = np.array([-1.5,1,1]).T
h = np.array([-1.5,-1,1]).T
i = np.array([-1.5,0.75,1]).T
j = np.array([-1.5,0.25,1]).T
k = np.array([-1.5,-0.25,1]).T
l = np.array([-1.5,-0.75,1]).T
#drawing the pallet_first_ends
m = np.array([-4,0.75,1]).T
n = np.array([-4,0.25,1]).T
o = np.array([-4,-0.25,1]).T
p = np.array([-4,-0.75,1]).T
#drawing the lasts
q = np.array([-4.5,0.75-0.2,1]).T
r = np.array([-4.5,0.25+0.2,1]).T
s = np.array([-4.5,-0.25-0.2,1]).T
t = np.array([-4.5,-0.75+0.2,1]).T
# Tranformations
T = np.array([[math.cos(wrapToPi(theta)),-math.sin(wrapToPi(theta)),x],
[math.sin(wrapToPi(theta)),math.cos(wrapToPi(theta)),y],
[0,0,1]])
#front cordinates
a = np.matmul(T,a)
b = np.matmul(T,b)
c = np.matmul(T,c)
d = np.matmul(T,d)
e = np.matmul(T,e)
f = np.matmul(T,f)
#dotted front
X = np.matmul(T,X)
Y = np.matmul(T,Y)
Z = np.matmul(T,Z)
W = np.matmul(T,W)
U = np.matmul(T,U)
V = np.matmul(T,V)
#back support
g = np.matmul(T,g)
h = np.matmul(T,h)
i = np.matmul(T,i)
j = np.matmul(T,j)
k = np.matmul(T,k)
l = np.matmul(T,l)
#drawing the pallet_first_ends
m = np.matmul(T,m)
n = np.matmul(T,n)
o = np.matmul(T,o)
p = np.matmul(T,p)
#drawing the lasts
q = np.matmul(T,q)
r = np.matmul(T,r)
s = np.matmul(T,s)
t = np.matmul(T,t)
back_center = [(n[0]+o[0])/2,(n[1]+o[1])/2]
#plotting color
plt.fill([r[0],q[0],m[0],i[0],j[0],n[0],r[0]],[r[1],q[1],m[1],i[1],j[1],n[1],r[1]],color='grey')
plt.fill([s[0],o[0],k[0],l[0],p[0],t[0],s[0]],[s[1],o[1],k[1],l[1],p[1],t[1],s[1]],color='grey')
plt.fill([g[0],e[0],f[0],h[0],g[0]],[g[1],e[1],f[1],h[1],g[1]],color='orange')
plt.fill([e[0],c[0],b[0],a[0],d[0],f[0],e[0]],[e[1],c[1],b[1],a[1],d[1],f[1],e[1]],color='orange')
plt.fill([A[0],B[0],D[0],C[0],A[0]],[A[1],B[1],D[1],C[1],A[1]],color='blue')
plt.plot([a[0],b[0]],[a[1],b[1]],'k',linewidth=wd)
plt.plot([a[0],d[0]],[a[1],d[1]],'k',linewidth=wd)
plt.plot([c[0],b[0]],[c[1],b[1]],'k',linewidth=wd)
plt.plot([c[0],e[0]],[c[1],e[1]],'k',linewidth=wd)
plt.plot([d[0],f[0]],[d[1],f[1]],'k',linewidth=wd)
plt.plot([e[0],f[0]],[e[1],f[1]],'k',linewidth=wd)
plt.plot([X[0],Y[0]],[X[1],Y[1]],'g--')
plt.plot([Z[0],Y[0]],[Z[1],Y[1]],'g--')
plt.plot([Z[0],W[0]],[Z[1],W[1]],'g--')
plt.plot([U[0],W[0]],[U[1],W[1]],'g--')
plt.plot([U[0],V[0]],[U[1],V[1]],'g--')
plt.plot([g[0],h[0]],[g[1],h[1]],'k',linewidth=wd)
plt.plot([g[0],e[0]],[g[1],e[1]],'k',linewidth=wd)
plt.plot([h[0],f[0]],[h[1],f[1]],'k',linewidth=wd)
plt.plot([i[0],l[0]],[i[1],l[1]],'k',linewidth=wd)
plt.plot([m[0],i[0]],[m[1],i[1]],'k',linewidth=wd)
plt.plot([n[0],j[0]],[n[1],j[1]],'k',linewidth=wd)
plt.plot([o[0],k[0]],[o[1],k[1]],'k',linewidth=wd)
plt.plot([p[0],l[0]],[p[1],l[1]],'k',linewidth=wd)
plt.plot([m[0],q[0]],[m[1],q[1]],'k',linewidth=wd)
plt.plot([q[0],r[0]],[q[1],r[1]],'k',linewidth=wd)
plt.plot([n[0],r[0]],[n[1],r[1]],'k',linewidth=wd)
plt.plot([o[0],s[0]],[o[1],s[1]],'k',linewidth=wd)
plt.plot([s[0],t[0]],[s[1],t[1]],'k',linewidth=wd)
plt.plot([p[0],t[0]],[p[1],t[1]],'k',linewidth=wd)
plt.plot([A[0],B[0]],[A[1],B[1]],'k')
plt.plot([A[0],C[0]],[A[1],C[1]],'k')
plt.plot([D[0],B[0]],[D[1],B[1]],'k')
plt.plot([D[0],C[0]],[D[1],C[1]],'k')
plt.plot([back_center[0],x],[back_center[1],y],'o--',linewidth=wd)
# plt.axes().set_aspect('equal','datalim')
# plt.show()
x = 1
y = 2
theta = math.pi/6
steer = math.pi/3
# dpj(x,y,theta,steer)
| [
"kartikprakash3775@gmail.com"
] | kartikprakash3775@gmail.com |
f99707e466fb45f4b828045d6ea2f479b3607a9e | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4145/codes/1785_1593.py | bdc0b42949e4321e6639bacb31776578fbdc14d2 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from numpy import*
v = array(eval(input("vetor de notas: ")))
#v=input("vetor de notas: ")
#vv=arange(v,dtype ==float)
i=1
n=0
c=0
d=0
while(i<size(v)):
v[c]= v[c]*i
c=c+1
d=d+i
i=i+1
m =sum(v)/d
#print(round(n/d,2))
print(round(m,2)) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
0ef05a08b6f483dac468d83fbee9b86c0391c9de | 28a7feb26470d0d6743454222b74abadf05af449 | /dcase_util/utils/timer.py | 79247276c5e10e9961884ce814bfae551278dea5 | [
"MIT"
] | permissive | DCASE-REPO/dcase_util | de322c900388de66af3398af294e095e28392beb | a2694b0b9ad4592c9c27c935fb92b0e5751b8ab4 | refs/heads/master | 2023-06-08T08:06:23.525652 | 2022-06-09T22:05:38 | 2022-06-09T22:05:38 | 110,171,294 | 137 | 41 | MIT | 2022-06-09T22:05:39 | 2017-11-09T22:03:27 | Python | UTF-8 | Python | false | false | 1,526 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import datetime
import time
class Timer(object):
"""Timer class"""
def __init__(self):
# Initialize internal properties
self._start = None
self._elapsed = None
def start(self):
"""Start timer
Returns
-------
self
"""
self._elapsed = None
self._start = time.time()
return self
def stop(self):
"""Stop timer
Returns
-------
self
"""
self._elapsed = (time.time() - self._start)
return self._elapsed
def elapsed(self):
"""Return elapsed time in seconds since timer was started
Can be used without stopping the timer
Returns
-------
float
Seconds since timer was started
"""
return time.time() - self._start
def get_string(self, elapsed=None):
"""Get elapsed time in a string format
Parameters
----------
elapsed : float
Elapsed time in seconds
Default value "None"
Returns
-------
str
Time delta between start and stop
"""
if elapsed is None:
elapsed = (time.time() - self._start)
return str(datetime.timedelta(seconds=elapsed))
def __enter__(self):
self.start()
def __exit__(self, type, value, traceback):
self.stop()
| [
"toni.heittola@gmail.com"
] | toni.heittola@gmail.com |
22476f16fbbdba9d4cd70f408ea0265dce33ddf2 | e522dc3b8ae16fb6adf8c679c2fcd61e06979f29 | /example/serial_example_2.py | c0f03b4ff72605d2fc8273faa36f0f388dd3043c | [
"MIT"
] | permissive | amaork/raspi-io | 96e92330555e7700f54633f582efbc7620f8b10b | aaea4532569010a64f3c54036b9db7eb81515d1a | refs/heads/master | 2021-09-17T15:27:43.853195 | 2021-08-27T08:51:24 | 2021-08-27T08:51:24 | 94,192,125 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | #!/usr/bin/env python3.5
from raspi_io import Serial
import raspi_io.utility as utility
if __name__ == "__main__":
cnt = 0
port = Serial(utility.scan_server()[0], '/dev/ttyS0', 115200, timeout=1)
while True:
port.read(1)
| [
"amaork@gmail.com"
] | amaork@gmail.com |
153829a56bfdcb6f0bf9cf971b037d569141c8b2 | a8750439f200e4efc11715df797489f30e9828c6 | /codechef/CFR_383_2_742_A.py | b5af70d5e071990b82d467c9a9062dbe056f81f8 | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py |
# -*- coding: utf-8 -*-
# @Date : 2018-10-02 08:00:37
# @Author : raj lath (oorja.halt@gmail.com)
# @Link : link
# @Version : 1.0.0
from sys import stdin
max_val=int(10e12)
min_val=int(-10e12)
def read_int() : return int(stdin.readline())
def read_ints() : return [int(x) for x in stdin.readline().split()]
def read_str() : return input()
def read_strs() : return [x for x in stdin.readline().split()]
print(pow(1378, read_int(), 10)) | [
"raj.lath@gmail.com"
] | raj.lath@gmail.com |
98039b25a467025b1eb7152af7fecb84cafc6724 | 4626631c5e68a13ed4dde041212da39d344d74d9 | /examples/scripts/define-user-roles.py | 8066ecfe60673ba22a2b7e702350628e5d50c9c2 | [
"MIT"
] | permissive | xod442/python-hpOneView | a1482677e3252dabf1e14f9349c119428331089f | b78fb81cba34992bb84ed3814aae04ce05ef913f | refs/heads/master | 2021-01-18T05:53:42.466348 | 2015-08-11T15:59:16 | 2015-08-11T15:59:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,513 | py | #!/usr/bin/env python3
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import sys
if sys.version_info < (3, 4):
raise Exception("Must use Python 3.4 or later")
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print("EULA display needed")
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def getrole(sec):
users = sec.get_users()
pprint(users)
def setroles(sec, name, roles):
# Valid Roles:
# 'Read only', 'Full', 'Backup administrator', 'Network administrator'
# 'Server administrator', 'Storage administrator'
print(('Set User: ' + name + ' role: ' + roles))
sec.set_user_roles(name, roles)
def setrole(sec, name, role):
# Valid Roles:
# 'Read only', 'Full', 'Backup administrator', 'Network administrator'
# 'Server administrator', 'Storage administrator'
print(('Set User: ' + name + ' role: ' + role))
sec.set_user_role(name, role)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Define User Roles
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-x', dest='upass', required=False,
help='''
New user password''')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-g', dest='getrole', action='store_true',
help='''
Display the users and exit''')
group.add_argument('-n', dest='name',
help='''
Username to add''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
srv = hpov.servers(con)
net = hpov.networking(con)
sec = hpov.security(con)
sts = hpov.settings(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
if args.getrole:
getrole(sec)
sys.exit()
setrole(sec, args.name, 'Read only')
# setroles(sec, args.name, ['Backup administrator', 'Network administrator'])
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| [
"troy@debdev.org"
] | troy@debdev.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.