blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33e55bfd1958962944107983ffcdbbc3ca47e3ee
|
3bdd0d5e8596fd33011021a08e0d513305d88e4e
|
/3. Greedy algorithms/D1.py
|
51938b703c09d9e5081d68c45171bb991a2cdcad
|
[] |
no_license
|
Ilia-Abrosimov/Algorithms-and-data-structures
|
5b5b6ebd6a2612a0c9eee2fe875d1c220b74bccc
|
dc3a37a8e53cbd27a3e9459a10fd9f0514d43db0
|
refs/heads/master
| 2023-03-30T22:34:56.724678
| 2021-03-25T18:53:58
| 2021-03-25T18:53:58
| 317,554,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
"""
Реализуйте код алгоритма заполнения рюкзака, рассмотренного в лекции:
Взять наиболее ценный предмет, который поместится в рюкзак. Выбрать
следующий по стоимости товар с учётом того, что для него осталось место в
рюкзаке. Формат ввода В первой строке записано целое число с в диапазоне от
0 до 1000 — вместимость рюкзака. Во второй — число n — количество предметов.
Оно не больше 10000.
В следующих n строках записано по 2 числа, разделенные пробелом: стоимость
предмета и его вес. Оба числа не превосходят 1000
Формат вывода Нужно в строке вывести в отсортированном порядке номера
предметов, которые будут выбраны. Номер предмета - это порядковый номер его
появления во входных данных. (Индексация начинается с нуля)
Пример
Ввод
36
4
25 50
30 40
10 80
2 3
Вывод
3
"""
class Thing:
def __init__(self, price, weight, index):
self.price = price
self.weight = weight
self.index = index
def collect_backpack(capacity, global_list):
global_list.sort(key=lambda thing: (-thing.price, thing.weight,
thing.index))
result = []
for item in global_list:
if item.weight <= capacity:
capacity -= item.weight
result.append(item.index)
result.sort()
return result
if __name__ == "__main__":
size = int(input())
n = int(input())
array = []
for i in range(n):
price, weight = input().split()
array.append(Thing(int(price), int(weight), i))
indexs = collect_backpack(size, array)
print(*indexs)
|
[
"abrosimof93@gmail.com"
] |
abrosimof93@gmail.com
|
82d0fbdf3749294c3488152b07e75502c0c56604
|
8611dc0e4afbf8d1d3bf37cd8f65ceaef6b75917
|
/ros/robofest_jtsn/scripts/sign_detect/edge_tests.py
|
679a01813b9d8e26a64e8298daab8b26c8e4d2df
|
[] |
no_license
|
KaiL4eK/robofest_2018
|
c968dc97e393c5a8a80375cac91ded1a6f09a8a9
|
04cd4bccb8895b2a4108783ffa1d551a46737740
|
refs/heads/master
| 2020-07-21T22:43:14.288518
| 2018-05-09T18:51:34
| 2018-05-09T18:51:34
| 206,990,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
import matplotlib.pyplot as plt
from skimage import feature, color, data, exposure
import numpy as np
from sklearn import neighbors, datasets
image_clr = data.astronaut()
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
print(np.unique(y))
image = color.rgb2grey(image_clr)
fd, hog_image = feature.hog(image, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(2, 2), visualise=True, transform_sqrt=True)
print( fd.shape )
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 4), sharex=True, sharey=True)
edges2 = feature.canny(image, sigma=2)
ax1.axis('off')
ax1.imshow(image, cmap=plt.cm.gray)
ax1.set_title('Input image')
ax1.set_adjustable('box-forced')
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 255))
ax2.axis('off')
ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
ax3.axis('off')
ax3.imshow(edges2, cmap=plt.cm.gray)
ax1.set_adjustable('box-forced')
plt.show()
|
[
"darklexi494@gmail.com"
] |
darklexi494@gmail.com
|
b0fc133ca6da659e3e3be02ba9c19a90d4d55eef
|
e80c9f0c9d4e6e8c1e545977d057188c6aec6d8f
|
/main.py
|
b3d3e5936e8dd3004e7635ed79efd507641f618c
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Somedaywilldo/Keras-progressive_growing_of_gans
|
b50fe0b5efc8b34651552a3bee462bf377421f75
|
684dbc6934ff012e7647da108ede30fb9c450502
|
refs/heads/master
| 2020-03-12T01:54:04.860181
| 2018-04-20T16:32:01
| 2018-04-20T16:32:01
| 127,539,159
| 22
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,512
|
py
|
from __future__ import print_function
import numpy as np
import sys
import os
import argparse
###################################################################
# Variables #
# When launching project or scripts from Visual Studio, #
# input_dir and output_dir are passed as arguments. #
# Users could set them from the project setting page. #
###################################################################
input_dir = None
output_dir = None
log_dir = None
#################################################################################
# Keras configs. #
# Please refer to https://keras.io/backend . #
#################################################################################
import keras
from keras import backend as K
#K.set_floatx('float32')
#String: 'float16', 'float32', or 'float64'.
#K.set_epsilon(1e-05)
#float. Sets the value of the fuzz factor used in numeric expressions.
#K.set_image_data_format('channels_first')
#data_format: string. 'channels_first' or 'channels_last'.
#################################################################################
# Keras imports. #
#################################################################################
from keras.models import Model
from keras.models import Sequential
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import Layer
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Activation
from keras.layers import Flatten
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.optimizers import SGD
from keras.optimizers import RMSprop
from train import *
def main():
np.random.seed(config.random_seed)
func_params = config.train
func_name = func_params['func']
del func_params['func']
globals()[func_name](**func_params)
exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str,
default='datasets',
help="Input directory where where training dataset and meta data are saved",
required=True
)
parser.add_argument("--result_dir", type=str,
default='results',
help="Input directory where where logs and models are saved",
required=False
)
parser.add_argument("--resume_dir",type = str,
default = None,
help="whether resume model and where the model are saved",
required = False)
parser.add_argument("--resume_kimg",type = float,
default = 0.0,
help="previous trained images in thousands",
required = False)
args, unknown = parser.parse_known_args()
config.data_dir = args.data_dir
config.result_dir = args.result_dir
if hasattr(args,'resume_dir') and args.resume_dir != None:
config.train.update(resume_network=args.resume_dir)
if hasattr(args,'resume_kimg') and args.resume_kimg != None:
config.train.update(resume_kimg=args.resume_kimg)
#log_dir = output_dir
main()
|
[
"714439471@qq.com"
] |
714439471@qq.com
|
1032ffa6cd3c1d75dcc025361cc06de377d8140c
|
7ff58c1ace33aeeb9b7a9ca293aceed672f910a4
|
/Experimental/experiments/defense/defense_experiments.py
|
590742292280afeb7d0d76a96fdc6fc1e9a37d94
|
[
"MIT"
] |
permissive
|
mballarin97/DrosophilaNetwork
|
981bd56a9bef771190dc37a89d48ffd0ae26a7b9
|
53136ce68e822c745ea23e8ccd287f00a003b928
|
refs/heads/main
| 2023-02-21T17:38:50.374951
| 2021-01-19T15:09:12
| 2021-01-19T15:09:12
| 314,572,228
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,686
|
py
|
import os
import sys
import matplotlib.pyplot as plt
from collections import defaultdict
sys.path.insert(0, os.getcwd() + '/../../')
from graphs import graph_loader
from defenses import Defense
def plot_results(graph, steps, results, title):
plt.figure(figsize=(6.4, 4.8))
for method, result in results.items():
result = [r / len(graph) for r in result]
plt.plot(list(range(steps)), result, label=method)
plt.ylim(0, 1)
plt.ylabel('LCC')
plt.xlabel('N_rm / N')
plt.title(title)
plt.legend()
save_dir = os.getcwd() + '/plots/'
os.makedirs(save_dir, exist_ok=True)
plt.savefig(save_dir + title + '.pdf')
plt.show()
plt.clf()
def main():
graph = graph_loader(graph_type='water', seed=1)
params = {
'runs': 10,
'steps': 30,
'seed': 1,
'k_a': 30,
'attack': 'rb_node',
'attack_approx': int(0.1*len(graph)),
'defense': 'rewire_edge_preferential',
'plot_transition': False,
'gif_animation': False,
'edge_style': None,
'node_style': None,
'fa_iter': 20
}
edge_defenses = ['rewire_edge_random', 'rewire_edge_random_neighbor', 'rewire_edge_preferential_random', 'add_edge_random', 'add_edge_preferential']
print("Running edge defenses")
results = defaultdict(str)
for defense in edge_defenses:
params['defense'] = defense
a = Defense(graph, **params)
results[defense] = a.run_simulation()
plot_results(graph, params['steps'], results, title='water:edge_defense_runs={},attack={},'.format(params['runs'], params['attack']))
if __name__ == '__main__':
main()
|
[
"francesco.bianco.5@studenti.unipd.it"
] |
francesco.bianco.5@studenti.unipd.it
|
a20ced181f7e4e3f10adfaa72a54e5d3d8965fdf
|
a29c5402b81fe3026408755f793636ed389d6316
|
/requestz/__init__.py
|
330e82278b13ed9c9b79854cce0f42f1e2703cd8
|
[
"MIT"
] |
permissive
|
Originator2019/requestz
|
66f90b1ec38d89f77c2a5e60e8f385501ca37417
|
6ec81e4bf520ade2ed6537f24dc0a395b3af754b
|
refs/heads/master
| 2023-03-15T15:08:08.525057
| 2020-11-23T12:19:05
| 2020-11-23T12:19:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,074
|
py
|
from requestz.session import Session
def session():
return Session()
def request(method, url, params=None, headers=None, cookies=None, data=None, json=None, files=None,
timeout=None, verify=None, allow_redirects=None, proxies=None, hooks=None, stream=None, cert=None):
with Session as session:
return session.request(method, url, params, headers, cookies, data, json, files,
timeout, verify, allow_redirects, proxies, hooks, stream, cert)
def get(url, params=None, headers=None, cookies=None, data=None, json=None, files=None, timeout=None,
verify=False):
return request('GET', url, params, headers, cookies, data, json, files, timeout, verify)
def post(url, params=None, headers=None, cookies=None, data=None, json=None, files=None, timeout=None,
verify=False):
return request('POST', url, params, headers, cookies, data, json, files, timeout, verify)
def head(url, params=None, headers=None, cookies=None, data=None, json=None, files=None, timeout=None,
verify=False):
return request('HEAD', url, params, headers, cookies, data, json, files, timeout, verify)
def options(url, params=None, headers=None, cookies=None, data=None, json=None, files=None, timeout=None,
verify=False):
return request('OPTIONS', url, params, headers, cookies, data, json, files, timeout, verify)
def put(url, params=None, headers=None, cookies=None, data=None, json=None, files=None, timeout=None,
verify=False):
return request('PUT', url, params, headers, cookies, data, json, files, timeout, verify)
def patch(url, params=None, headers=None, cookies=None, data=None, json=None, files=None, timeout=None,
verify=False):
return request('PATCH', url, params, headers, cookies, data, json, files, timeout, verify)
def delete(url, params=None, headers=None, cookies=None, data=None, json=None, files=None, timeout=None,
verify=False):
return request('DELETE', url, params, headers, cookies, data, json, files, timeout, verify)
|
[
"hanzhichao@secoo.com"
] |
hanzhichao@secoo.com
|
e86b50789dcbecb7f6ad62d05e5844b6f2c4183e
|
8c8de9155d2b54bb48f312d1b416a0cc3e2ecf46
|
/saliency_experiments/see_predictions.py
|
6fbca4b88321d0a18ce4ead48878692c8d0d37e8
|
[] |
no_license
|
Emr03/HonoursThesis
|
6d0e80630b7ab25a49f8bf733640c52da0ff1eca
|
61d4bedc9821a15bb38ec95044bb52c014d66dee
|
refs/heads/master
| 2021-03-22T05:17:10.518771
| 2017-12-13T04:35:01
| 2017-12-13T04:35:01
| 105,307,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,891
|
py
|
import tensorflow as tf
from keras.models import model_from_json
from os import listdir
from os.path import isfile, join
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import image as mpimg
from PIL import Image
def weighted_pixelwise_crossentropy(y_true, y_pred):
_EPSILON = 10e-8
epsilon = tf.convert_to_tensor(_EPSILON, y_pred.dtype.base_dtype)
# Clips tensor values to a specified min and max.
# t: A Tensor.
# clip_value_min: A 0 - D(scalar) Tensor, or a Tensor with the same shape as t.The minimum value to clip by.
# clip_value_max: A 0 - D(scalar) Tensor, or a Tensor with the same shape as t.The maximum value to clip by
y_pred = tf.clip_by_value(y_pred, epsilon, 1. - epsilon)
# tf.reduce_sum Computes the sum of elements across dimensions of a tensor.
# tf.multiply(x, y) Returns x * y element-wise.
# y_true.reshape
# y_pred.reshape
beta = 0.6
ones = tf.ones_like(y_true)
pos_ce = tf.multiply(y_true * tf.log(y_pred), beta)
neg_ce = tf.multiply((ones - y_true) * tf.log(ones - y_pred), 1-beta)
pixelwise_bce = tf.reduce_sum(pos_ce + neg_ce)
return (-1)*pixelwise_bce
def predictions():
img_path = '/home/elsa/Desktop/Image_Saliency/msra/test/images/images/'
mask_path = '/home/elsa/Desktop/Image_Saliency/msra/test/masks/masks/'
rescale = 1./255
img_files = [f for f in listdir(img_path) if isfile(join(img_path, f))]
mask_files = [f for f in listdir(mask_path) if isfile(join(mask_path, f))]
img_files.sort()
mask_files.sort()
# load model from json file
json_file = open('vgg16_saliency_rescaled.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("low_res_weights/vgg16_saliency_epochs_7.h5")
print("Loaded model from disk")
# loaded_model.get_layer('input_1')(shape=None)
# compile model
loaded_model.compile(loss=weighted_pixelwise_crossentropy, optimizer='rmsprop')
# predict on 10 random images
for n in range(10):
index = np.random.randint(low=0, high=len(img_files))
print(index)
print(img_files[index])
input_img = Image.open(img_path + img_files[index])
input_img = input_img.resize((128, 128), Image.BILINEAR)
input_img = np.asarray(input_img, dtype=np.uint8)
mask_size = (32, 32)
plt.imshow(input_img)
plt.show()
mask = Image.open(mask_path + mask_files[index])
mask = mask.resize(mask_size, Image.BILINEAR)
mask = np.asarray(mask, dtype=np.bool)
# show ground truth
plt.imshow(mask)
plt.show()
# show prediction
w, h, c = input_img.shape
input_img = input_img.reshape(1, w, h, c)
pred_raw = loaded_model.predict(input_img) * 255
pred = pred_raw[0,:, :, 0].astype('uint8') # pred has shape (1, 32, 32, 1) to include batch size and number of channels
plt.imshow(pred)
plt.show()
# false positive rate = % of dark pixels falsely labelled as 1
# detect fpr mask using mask - pred (negative numbers correspond to fpr, positive numbers correspond to fnr
fpr_mask = np.clip(pred_raw - mask, 0, 1) # clip the negative values
fpr = np.sum(fpr_mask)
print('fpr ', fpr)
fnr_mask = np.clip(mask - pred_raw, 0, 1)
fnr = np.sum(fnr_mask)
print('fnr', fnr)
def evaluate():
img_path = '/home/elsa/Desktop/Image_Saliency/msra/test/images/images/'
mask_path = '/home/elsa/Desktop/Image_Saliency/msra/test/masks/masks/'
rescale = 1. / 255
img_files = [f for f in listdir(img_path) if isfile(join(img_path, f))]
mask_files = [f for f in listdir(mask_path) if isfile(join(mask_path, f))]
img_files.sort()
mask_files.sort()
# load model from json file
json_file = open('vgg16_saliency_rescaled.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("low_res_weights/vgg16_saliency_epochs_7.h5")
print("Loaded model from disk")
# compile model
loaded_model.compile(loss=weighted_pixelwise_crossentropy, optimizer='rmsprop')
fpr = 0
fnr = 0
# predict on 200 random images
for n in range(20):
index = np.random.randint(low=0, high=len(img_files))
print(index)
print(img_files[index])
input = Image.open(img_path + img_files[index])
input = input.resize((128, 128), Image.BILINEAR)
input = np.asarray(input, dtype=np.uint8)
# input = np.transpose(input, (1, 0, 2))
# plt.imshow(input)
# plt.show()
mask = Image.open(mask_path + mask_files[index])
mask = mask.resize((32, 32), Image.BILINEAR)
mask = np.asarray(mask, dtype=np.bool)
# show ground truth
# plt.imshow(mask)
# plt.show()
# show prediction
w, h, c = input.shape
input = input.reshape(1, w, h, c)
pred_raw = loaded_model.predict(input)[0, :, :, 0]
pred = (pred_raw*255).astype('uint8') # pred has shape (1, 32, 32, 1) to include batch size and number of channels
# plt.imshow(pred)
# plt.show()
# false positive rate = % of dark pixels falsely labelled as 1
# detect fpr mask using mask - pred (negative numbers correspond to fpr, positive numbers correspond to fnr
fpr_mask = np.clip(pred_raw - mask, 0, 1) # clip the negative values
fpr += np.sum(fpr_mask)/20
fnr_mask = np.clip(mask - pred_raw, 0, 1)
fnr += np.sum(fnr_mask)/20
print('fpr = ', fpr)
print('fnr = ', fnr)
if __name__=="__main__":
predictions()
evaluate()
|
[
"elsa.riachi@mail.mcgill.ca"
] |
elsa.riachi@mail.mcgill.ca
|
84816e0ab3cc6d482ab9ffcfccb9e7f3bd8f82ff
|
3b2f4fc97d45ab1d651a573f30d1e441c31d0937
|
/mysite/settings.py
|
5ea98a6e913322416b29002ae6feb29ed0af9005
|
[] |
no_license
|
meerim-hub/api_project
|
5e2534c37a98e2e7503263554a78b2bd457b7890
|
662d8bcb249be37a090598846bee9877509189a9
|
refs/heads/master
| 2023-04-25T10:26:47.166634
| 2021-05-13T11:56:45
| 2021-05-13T11:56:45
| 367,033,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,198
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
from django.core.mail.backends import smtp
from django.template.backends import django
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-ps9oq(r60r=&ef5qr6(j+03i4nkv*g8mopz1o15w&^ql$6&44$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'drf_yasg',
# 'creditor',
'rest_framework',
'rest_framework.authtoken',
# 'django_filters',
#myapps
'main',
'account',
'cart',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'project_database',
'USER': 'meerim',
'PASSWORD': '1',
'HOST': 'localhost',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Bishkek'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'account.MyUser'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'yourmail@gmail.com'
EMAIL_HOST_PASSWORD = 'yourpassword'
EMAIL_USE_TLS = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 1
}
|
[
"nobody8138@gmail.com"
] |
nobody8138@gmail.com
|
877ca662b6fa0eb16a8bf0b460592ab61d1300d7
|
c2996718fb3a103f95b5e6bdfa5b041916f19cb5
|
/utils/LayerUtils.py
|
296ad47ee6bb51f78392da3af36871a6307e1df0
|
[] |
no_license
|
chinzolboo/lm2_mgis
|
87fe419451598ed40e49b4737de29245f66f0989
|
6a20bdc6e47ae5f9144fab9a9e85d571b8f7d20b
|
refs/heads/master
| 2023-03-14T10:11:08.510274
| 2021-03-03T12:17:17
| 2021-03-03T12:17:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,592
|
py
|
#!/usr/bin/python
# -*- coding: utf-8
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtSql import QSqlDatabase
from qgis.core import *
from SessionHandler import SessionHandler
from ..model.ClPlanType import *
from ..model import SettingsConstants
from ..model import Constants
from ..model.LM2Exception import LM2Exception
import os
class LayerUtils(object):
@staticmethod
def layer_by_name(layer_name):
layers = QgsMapLayerRegistry.instance().mapLayers()
for id, layer in layers.iteritems():
if layer.name() == layer_name:
return layer
return None
@staticmethod
def layer_by_data_source(schema_name, table_name):
layers = QgsMapLayerRegistry.instance().mapLayers()
for id, layer in layers.iteritems():
if layer.type() == QgsMapLayer.VectorLayer:
uri_string = layer.dataProvider().dataSourceUri()
uri = QgsDataSourceURI(uri_string)
if uri.table() == table_name:
if uri.schema() == schema_name:
return layer
@staticmethod
def load_temp_table(sql, layer_name):
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource("", sql, "geometry", "", "gid")
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_layer_by_name(layer_name, id, restrictions=[]):
restrictions = restrictions.split(",")
if len(restrictions) > 0:
for restriction in restrictions:
restriction = restriction.strip()
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource("s" + restriction, layer_name, "geometry", "", id)
vlayer = QgsVectorLayer(uri.uri(), "s" + restriction + "_" + layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer)
return vlayer
@staticmethod
def load_union_layer_by_name(layer_name, id):
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource("data_soums_union", layer_name, "geometry", "", id)
vlayer = QgsVectorLayer(uri.uri(), "data_soums_union" + "_" + layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_ub_data_layer_by_name(layer_name, id):
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource("data_ub", layer_name, "geometry", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_layer_by_name_report(layer_name, id, restrictions=[]):
restrictions = restrictions.split(",")
if len(restrictions) > 0:
for restriction in restrictions:
restriction = restriction.strip()
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource("s" + restriction, layer_name, "geometry", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer,False)
return vlayer
@staticmethod
def load_tmp_layer_by_name(layer_name, id, shema):
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource(shema, layer_name, "geometry", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_layer_by_name_equipment(layer_name, id):
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource("settings", layer_name, "geometry", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_layer_base_layer(layer_name, id, schema_name):
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource(schema_name, layer_name, "geometry", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def check_layer_by_name(layer_name):
is_value = False
for lyr in QgsMapLayerRegistry.instance().mapLayers().values():
if lyr.name() == layer_name:
is_value = True
break
# for key in QgsMapLayerRegistry.instance().mapLayers():
# layer = QgsMapLayerRegistry.instance().mapLayers()[key]
# if layer.name() == layer_name:
# print 'hh'
# is_value = True
return is_value
@staticmethod
def load_plan_layer_base_layer(layer_name, id, schema_name, geometry_column):
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource(schema_name, layer_name, geometry_column, "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_polygon_layer_base_layer(layer_name, id, schema_name):
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource(schema_name, layer_name, "polygon_geom", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_point_layer_base_layer(layer_name, id, schema_name):
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource(schema_name, layer_name, "point_geom", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_line_layer_base_layer(layer_name, id, schema_name):
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource(schema_name, layer_name, "line_geom", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_layer_by_name_admin_units(layer_name, id, restrictions=[]):
restrictions = restrictions.split(",")
if len(restrictions) > 0:
for restriction in restrictions:
restriction = restriction.strip()
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource(restriction, layer_name, "geometry", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_layer_by_ca_sec_parcel(layer_name, id, restrictions=[]):
restrictions = restrictions.split(",")
if len(restrictions) > 0:
for restriction in restrictions:
restriction = restriction.strip()
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource("data_landuse", layer_name, "geometry", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_layer_by_name_pasture_monitoring(layer_name, id):
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource("pasture", layer_name, "geometry", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def load_layer_by_name_set_zones(layer_name, id, restrictions=[]):
restrictions = restrictions.split(",")
if len(restrictions) > 0:
for restriction in restrictions:
restriction = restriction.strip()
uri = QgsDataSourceURI()
user = QSettings().value(SettingsConstants.USER)
db = QSettings().value(SettingsConstants.DATABASE_NAME)
host = QSettings().value(SettingsConstants.HOST)
port = QSettings().value(SettingsConstants.PORT, "5432")
pwd = SessionHandler().current_password()
uri.setConnection(host, port, db, user, pwd)
uri.setDataSource("settings", layer_name, "geometry", "", id)
vlayer = QgsVectorLayer(uri.uri(), layer_name, "postgres")
QgsMapLayerRegistry.instance().addMapLayer(vlayer, False)
return vlayer
@staticmethod
def where(layer, exp):
exp = QgsExpression(exp)
if exp.hasParserError():
raise LM2Exception("Error", "Wrong Expression")
exp.prepare(layer.pendingFields())
for feature in layer.getFeatures():
value = exp.evaluate(feature)
if exp.hasEvalError():
raise ValueError(exp.evalErrorString())
if bool(value):
yield feature
@staticmethod
def deselect_all():
layers = QgsMapLayerRegistry.instance().mapLayers()
for id, layer in layers.iteritems():
if layer.type() == QgsMapLayer.VectorLayer:
layer.removeSelection()
@staticmethod
def refresh_layer():
root = QgsProject.instance().layerTreeRoot()
mygroup = root.findGroup(u"Мэдээний хяналт")
if mygroup is None:
group = root.insertGroup(0, u"Мэдээний хяналт")
group.setExpanded(False)
myNewGroup = group.addGroup(u"Хамгаалалтын зурвас")
mygroup = root.findGroup(u"Тайлан")
if mygroup is None:
group = root.insertGroup(1, u"Тайлан")
myNewGroup = group.addGroup(u"Газрын улсын бүртгэл")
mygroup = root.findGroup(u"ГНСТайлан")
if mygroup is None:
group = root.insertGroup(2, u"ГНСТайлан")
mygroup = root.findGroup(u"Кадастр")
if mygroup is None:
group = root.insertGroup(3, u"Кадастр")
group.setExpanded(False)
myNewGroup = group.addGroup(u"Кадастрын өөрчлөлт")
mygroup = root.findGroup(u"Тусгай хэрэгцээний газар")
if mygroup is None:
group = root.insertGroup(4, u"Тусгай хэрэгцээний газар")
group.setExpanded(False)
mygroup = root.findGroup(u"Үнэлгээ, төлбөрийн бүс")
if mygroup is None:
group = root.insertGroup(5, u"Үнэлгээ, төлбөрийн бүс")
group.setExpanded(False)
mygroup = root.findGroup(U"Хилийн цэс, бүсчлэл")
if mygroup is None:
group = root.insertGroup(6, u"Хилийн цэс, бүсчлэл")
group.setExpanded(False)
mygroup = root.findGroup(U"CAMA")
if mygroup is None:
group = root.insertGroup(7, u"CAMA")
group.setExpanded(False)
mygroup = root.findGroup(U"Хаяг")
if mygroup is None:
group = root.insertGroup(8, u"Хаяг")
group.setExpanded(False)
myNewGroup = group.addGroup(u"Хаяг засварлалт")
myNewGroup = group.addGroup(u"Хаягийн нэгж талбар")
myNewGroup = group.addGroup(u"Хаягийн барилга")
@staticmethod
def refresh_layer1():
session = SessionHandler().session_instance()
root = QgsProject.instance().layerTreeRoot()
mygroup = root.findGroup(u"Мэдээний хяналт")
if mygroup is None:
quality_check_group = root.insertGroup(0, u"Мэдээний хяналт")
mygroup = root.findGroup(u"Тайлан")
if mygroup is None:
reports_group = root.insertGroup(2, u"Тайлан")
mygroup = root.findGroup(u"ГНСТайлан")
if mygroup is None:
gt_report_group = root.insertGroup(1, u"ГНСТайлан")
mygroup = root.findGroup(u"Кадастр")
if mygroup is None:
cadastre_group = root.insertGroup(4, u"Кадастр")
mygroup = root.findGroup(u"Кадастрын төлөвлөгөө")
if mygroup is None:
cadastre_plan_group = root.insertGroup(3, u"Кадастрын төлөвлөгөө")
mygroup = root.findGroup(u"Тусгай хэрэгцээний газар")
if mygroup is None:
cadastre_plan_group = root.insertGroup(6, u"Тусгай хэрэгцээний газар")
mygroup = root.findGroup(u"Кадастрын өөрчлөлт")
if mygroup is None:
cadastre_maintenance_group = root.insertGroup(2, u"Кадастрын өөрчлөлт")
mygroup = root.findGroup(u"Үнэлгээ, төлбөрийн бүс")
if mygroup is None:
land_fee_and_tax_zones_group = root.insertGroup(7, u"Үнэлгээ, төлбөрийн бүс")
mygroup = root.findGroup(U"Хилийн цэс, бүсчлэл")
if mygroup is None:
admin_units_group = root.insertGroup(8, u"Хилийн цэс, бүсчлэл")
mygroup = root.findGroup(U"CAMA")
if mygroup is None:
admin_units_group = root.insertGroup(8, u"CAMA")
mygroup = root.findGroup(U"Хаяг")
if mygroup is None:
admin_units_group = root.insertGroup(8, u"Хаяг")
@staticmethod
def refresh_layer_plan():
root = QgsProject.instance().layerTreeRoot()
mygroup = root.findGroup(U"ГЗБТөлөвлгөө")
if mygroup is None:
plan = root.insertGroup(1, u"ГЗБТөлөвлгөө")
current_root = plan.insertGroup(1, u"Ажиллаж байгаа")
current_root.setExpanded(False)
else:
if not mygroup.findGroup(U"Ажиллаж байгаа"):
current_root = mygroup.insertGroup(1, u"Ажиллаж байгаа")
current_root.setExpanded(False)
if not mygroup.findGroup(U"Бусад ГЗБТ"):
other_root = mygroup.insertGroup(2, u"Бусад ГЗБТ")
other_root.setExpanded(False)
if not mygroup.findGroup(U"Хилийн цэс"):
other_root = mygroup.insertGroup(3, u"Хилийн цэс")
current_root = other_root.insertGroup(4, u"Улсын хилийн цэс")
current_root.setExpanded(False)
current_root = other_root.insertGroup(3, u"Аймгийн хилийн цэс")
current_root.setExpanded(False)
current_root = other_root.insertGroup(2, u"Сумын хилийн цэс")
current_root.setExpanded(False)
current_root = other_root.insertGroup(1, u"Багийн хилийн зааг")
current_root.setExpanded(False)
|
[
"aagii_csms@yahoo.com"
] |
aagii_csms@yahoo.com
|
f59441c882cfd878dc3c83e229c177b548adef6f
|
603c76b7a69830bce15926cd0317b740500bd39f
|
/hw2/logistic_test.py
|
7457788f9984a8896c9c75ec42bcb150a4859e2b
|
[] |
no_license
|
sky1456723/ML2018FALL
|
e88acca528f336a5ac6d76fdb4666c2147a08380
|
c5b8675f0905f45326428d7caffd6bd860475147
|
refs/heads/master
| 2020-03-28T19:29:40.411810
| 2019-01-15T03:41:29
| 2019-01-15T03:41:29
| 148,982,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,851
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 14 20:02:45 2018
@author: jimmy
"""
import numpy as np
import pandas as pd
#import sys
test_x = pd.read_csv('./data/test_x.csv')
w_T = np.load('./parameter/logistic/weight.npy')
mean = np.load('./parameter/logistic/mean.npy')
stddev = np.load('./parameter/logistic/stddev.npy')
sex = pd.get_dummies(test_x['SEX'], prefix='gender')
education = pd.get_dummies(test_x['EDUCATION'], prefix='education')
marriage = pd.get_dummies(test_x['MARRIAGE'], prefix='marriage')
pay_0 = pd.get_dummies(pd.Categorical(test_x['PAY_0'], categories = list(range(-2,9,1))),
prefix='pay_0')
pay_2 = pd.get_dummies(pd.Categorical(test_x['PAY_2'], categories = list(range(-2,9,1))),
prefix='pay_2')
pay_3 = pd.get_dummies(pd.Categorical(test_x['PAY_3'], categories = list(range(-2,9,1))),
prefix='pay_3')
pay_4 = pd.get_dummies(pd.Categorical(test_x['PAY_4'], categories = list(range(-2,9,1))),
prefix='pay_4')
pay_5 = pd.get_dummies(pd.Categorical(test_x['PAY_5'], categories = list(range(-2,9,1))),
prefix='pay_5')
pay_6 = pd.get_dummies(pd.Categorical(test_x['PAY_6'], categories = list(range(-2,9,1))),
prefix='pay_6')
test_x = test_x.drop(['SEX'], axis = 1)
test_x = pd.concat([test_x, sex], axis =1)
test_x = test_x.drop(['EDUCATION'], axis = 1)
test_x = pd.concat([test_x, education], axis =1)
test_x = test_x.drop(['MARRIAGE'], axis = 1)
test_x = pd.concat([test_x, marriage], axis =1)
test_x = test_x.drop(['PAY_0'], axis = 1)
test_x = pd.concat([test_x, pay_0], axis =1)
test_x = test_x.drop(['PAY_2'], axis = 1)
test_x = pd.concat([test_x, pay_2], axis =1)
test_x = test_x.drop(['PAY_3'], axis = 1)
test_x = pd.concat([test_x, pay_3], axis =1)
test_x = test_x.drop(['PAY_4'], axis = 1)
test_x = pd.concat([test_x, pay_4], axis =1)
test_x = test_x.drop(['PAY_5'], axis = 1)
test_x = pd.concat([test_x, pay_5], axis =1)
test_x = test_x.drop(['PAY_6'], axis = 1)
test_x = pd.concat([test_x, pay_6], axis =1)
data_dim = test_x.shape[1]
data_iter = test_x.iterrows()
output_file = open('./result/logistic/1014_log.csv','w')
output_file.write('id,value\n')
while True:
try:
data = next(data_iter)
data_id = data[0]
data_value = np.reshape(data[1].values, (data_dim, 1))
except:
break
to_divide = (data_value-mean)
for i in range(len(to_divide)):
if stddev[i][0] != 0:
to_divide[i][0] /= stddev[i][0]
data_value = np.concatenate( (to_divide, [[1]]), axis = 0 )
ans=0
z = np.matmul(np.transpose(w_T), data_value)
output_P = 1/(1+np.exp(-1*z))
if output_P >= 0.5:
ans=1
output_file.write("id_"+str(data_id)+','+str(ans)+'\n')
output_file.close()
|
[
"b05901009@ntu.edu.tw"
] |
b05901009@ntu.edu.tw
|
177c6a9913af5efb3d97acb588046e22dc90637a
|
a62196ee5894d2536a9bd95a9a9134727e5b08a6
|
/trees.py
|
4d6be421cce4f2cddd1c0950975d82aa996b9834
|
[] |
no_license
|
mazucanti/dict-corrector
|
5af82b5f572c36b4b1daa859bbab3f891958e0e7
|
28859e166d3d83ef9c0a05afc4c14eaf1a5c07cf
|
refs/heads/master
| 2020-09-11T18:28:33.906717
| 2019-11-25T08:10:46
| 2019-11-25T08:10:46
| 222,153,149
| 0
| 0
| null | 2019-11-25T05:41:50
| 2019-11-16T20:06:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,470
|
py
|
import json
import requests
class trie_node():
def __init__(self, letter: str):
self.letter = letter
self.children = []
self.word = False
self.count = 0
def add_node(root, word: str):
node = root
for letter in word:
is_child = False
for child in node.children:
if child.letter == letter:
child.count += 1
is_child = True
node = child
break
if not is_child:
new_node = trie_node(letter)
node.children.append(new_node)
node = new_node
node.word = True
def remote_base_import():
words = {}
alphabet = "abcdefghijklmnopqrstuvwxyz"
for letter in alphabet:
req = requests.get('https://raw.githubusercontent.com/mazucanti/words-base/master/base_words/'+letter+'.json')
temp = json.loads(req.content.decode('utf-8'))
words.update(temp)
return words
def trie_tree():
root = trie_node('*')
words = remote_base_import()
for word in words:
add_node(root, word)
return root
def search(root: trie_node, term: str) -> bool:
found = True
node = root
for letter in term:
if not found:
return False
found = False
for child in node.children:
if letter == child.letter:
node = child
found = True
break
return node.word
|
[
"daniel.mazucanti.ismart@gmail.com"
] |
daniel.mazucanti.ismart@gmail.com
|
70ece0de49a5a8520a39f9bb1a4d20eb03c74152
|
6ef411540ceb96746f83f0a78a95929e7ca63a8e
|
/.history/ex44e_20190817171813.py
|
ff980324b7fba9418d72fb77495c81479557000b
|
[] |
no_license
|
bkapadia01/python3_hardway
|
4ba5c74a2646f5308602f72a1f21722544945734
|
cc9ab593353c11f1205c1a67197689671d956e17
|
refs/heads/master
| 2020-06-10T23:36:38.783824
| 2019-08-19T02:05:14
| 2019-08-19T02:05:14
| 193,791,525
| 1
| 0
| null | 2019-08-19T02:05:15
| 2019-06-25T22:27:33
|
Python
|
UTF-8
|
Python
| false
| false
| 625
|
py
|
class Other(object):
def override(self):
print("other override")
def implicit(self):
print("other implicit")
def altered(self):
print("other altered")
class Child(object):
def __init__(self):
self.other = Other()
def implicit(self):
self.other.implicit()
def override(self):
print("child overide")
def altered(self):
print("child - before other altered function")
self.other.altered()
print("child - after other altered function")
son = Child()
son.implicit()
son.override()
son.altered()
son.__init__(self)
|
[
"Bhavin@Bhavins-MacBook-Pro-2.local"
] |
Bhavin@Bhavins-MacBook-Pro-2.local
|
458e27d060152d28446f71c000c6ea88539ccacf
|
be30ac253e585ec15108c4de0b27830426715c0a
|
/test.py
|
cb210f22cad06c5ded197991f152a06f6c26b873
|
[] |
no_license
|
arun5061/Python
|
04c4e64c8819ca067704dd6e539c28b94af57029
|
88b54b2fad97521e3eb5b68915e1f1c4d4a97438
|
refs/heads/master
| 2020-03-25T23:44:24.732419
| 2018-10-14T06:35:03
| 2018-10-14T06:35:03
| 144,290,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
print("hi")
def test():
print("hello")
test()
|
[
"arun.nettam@gmail.com"
] |
arun.nettam@gmail.com
|
02875b9c470e1e49cf43158adc4ff740c437c40e
|
58cfffe9024ff37609eeb31a50930ce651fac57e
|
/RetoClase.py
|
a2cb6f77a1ea8ba4f9daecdc2805781a27652bfd
|
[] |
no_license
|
ClaudiaMoraC/Ciclo1_Python
|
5f06019798ff75f744d7e16045903b52cc211083
|
1e744a2cc798fd22bc072133447e5b918d0a813f
|
refs/heads/master
| 2023-06-07T09:45:01.280528
| 2021-07-01T17:47:52
| 2021-07-01T17:47:52
| 382,112,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
# 1. Leer un número entero y determinar si es un número terminado en 4.
# entero=input("Escribe un número entero de 4 digitos: ")
# #print(list(entero)) Lista el numero ingresado
# a=int(entero[3])# toma la ultima posicion del numero de 4 digitos
# if a==4:
# print("El número ",entero," termina en 4")
# else:
# print("El número ",entero," no termina en 4")
# 2. Leer un número entero y determinar si tiene 3 dígitos.
# num=input("Escribe un numero entero ")
# #print(list(num))
# if len(num)==3:#Deterina si la longitud del numero num es igual a 3 cifras
# print("El número",num," es de 3 cifras")
# else:
# print("El número ",num," no es de 3 cifras ")
# 5. Leer un número entero de dos dígitos y determinar si ambos dígitos son pares.
# entero=input("Ingresa un número de 2 cifras: ")
# b=int(entero[0])#Tomar la posicion 1
# c=int(entero[1])#tomar la posicion 2
# if (b % 2 == 0) and (c % 2 == 0):#determinar si b y c son pares
# print("Los dos digitos son pares: ",entero)
# elif (b % 2 == 0) or (c % 2 == 0):
# print("Uno de los numeros es par")
# else:
# print("Los dos digitos son impares: ",entero)
|
[
"claudhiiiamora@gmail.com"
] |
claudhiiiamora@gmail.com
|
2f38b2003a98e69d9c7f7c5073618c6adcbceba3
|
c68be152d2b832053586c21317f688323376b55e
|
/app/busi/szxl/crypto.py
|
e06bab6e027a79263d406e10b8e8837229dd8ab9
|
[] |
no_license
|
arbboter/wits
|
f1c4a39883fdc4288fd8faa22353e63f405582c1
|
3e0b1e70d7c58d9a18ff43635a9d3fe78b396f65
|
refs/heads/master
| 2022-12-15T20:44:17.799380
| 2018-05-17T09:30:35
| 2018-05-17T09:30:35
| 133,790,463
| 0
| 0
| null | 2022-12-08T00:44:30
| 2018-05-17T09:26:37
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
# -*- coding: utf-8 -*-
from app.comm.crypto import Rsa
from app import app_conf
import base64
# 加载密钥
my_rsa_private_key = app_conf.my_rsa_pri_file
my_rsa_public_key = app_conf.my_rsa_pub_file
ser_rsa_private_key = app_conf.ser_rsa_pri_file
ser_rsa_public_key = app_conf.ser_rsa_pub_file
# 加密
def rsa_enc(data):
crpt = Rsa(pub_file=ser_rsa_public_key)
enc_data = crpt.enc_bytes(data)
return base64.encodebytes(enc_data)
# 解密
def rsa_dec(data):
data = base64.decodebytes(data)
crpt = Rsa(pri_file=my_rsa_private_key)
return crpt.dec_bytes(data)
# 签名
def rsa_sign(data):
crpt = Rsa(pri_file=my_rsa_private_key)
sign_data = crpt.sign_bytes(data)
return base64.encodebytes(sign_data)
# 验证签名
def rsa_sign_verify(data, sig):
crpt = Rsa(pub_file=ser_rsa_public_key)
sig = base64.decodebytes(sig)
return crpt.sign_verify(data, sig)
def main():
pass
if __name__ == '__main__':
main()
|
[
"linwu2@cmschina.com.cn"
] |
linwu2@cmschina.com.cn
|
4e5fa5c16d43bc73137e466b074d43eddb41abde
|
02ddba8e1fe71de046fcc5194c8427ebc53fd7a5
|
/users/forms.py
|
19d9f1dd7ed4894d78bcd216d417485077653f3f
|
[] |
no_license
|
caglayansert/flask-blog
|
eaa2fb40016dae95bcf216b214cc409215321348
|
799b8cd57408dd84e35094fb802dda873f6aa3cc
|
refs/heads/master
| 2022-12-05T17:31:01.950128
| 2020-08-23T20:19:13
| 2020-08-23T20:19:13
| 289,760,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,240
|
py
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flask_login import current_user
from flaskblog.models import User
class RegistrationForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError(
'That username is taken. Please choose a different one.')
def validate_email(self, email):
email = User.query.filter_by(email=email.data).first()
if email:
raise ValidationError(
'That email is taken. Please choose a different one.')
class LoginForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remembe Me')
submit = SubmitField('Login')
class UpdateAccountForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
picture = FileField('Update Profile Picture', validators=[
FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError(
'That username is taken. Please choose a different one.')
def validate_email(self, email):
if email.data != current_user.email:
email = User.query.filter_by(email=email.data).first()
if email:
raise ValidationError(
'That email is taken. Please choose a different one.')
class RequestResetForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
email = User.query.filter_by(email=email.data).first()
if email is None:
raise ValidationError(
'There is no account with that email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password', validators=[
DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
|
[
"caglayann059@gmail.com"
] |
caglayann059@gmail.com
|
b76ba866a28d3998fa82ee61b887af2a475fcd74
|
41f4285f32bab0f4e89b463d9242bc19093b39a3
|
/mysite/blog/migrations/0001_initial.py
|
5373215dac1c5e924c3a3ee2f8278b35e0e37d4c
|
[] |
no_license
|
VikeshBaid/Django
|
878333dd01035a3c348511ce638083bec78bff1f
|
420fa25c1bb75a6f66ea67a4f293f1f9bb71318e
|
refs/heads/master
| 2020-04-07T22:55:47.502454
| 2018-12-29T12:45:48
| 2018-12-29T12:45:48
| 158,790,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
# Generated by Django 2.1.1 on 2018-09-28 08:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
|
[
"vikesh.happy2@gmail.com"
] |
vikesh.happy2@gmail.com
|
cf26d73fd4f1709c6ef86687b1dcbfc7ed846b30
|
7fefaf6b1236ccc2ee4052c0204dbb0c5979f2f9
|
/venv/Scripts/pip3.7-script.py
|
04c740514ff934f638e22e6f96f5100d6f2abfac
|
[] |
no_license
|
RoySato/DjangoTest
|
5dd82153c1e36475ccfc82d857b1fc5a00166b54
|
a7b285a09e127156e1737f535c17c4f2e5b7e6cd
|
refs/heads/master
| 2020-07-17T11:41:56.495772
| 2019-09-03T07:19:23
| 2019-09-03T07:19:23
| 184,548,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
#!C:\Users\Ryohei\PycharmProject\SatoNPO\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"ryoheisato"
] |
ryoheisato
|
0607a1fbfc6bb659d5f7714e81c004c8b0727d30
|
e653de4fc1ecfd03f0a723a944e776cab42ce7b3
|
/open_file.py
|
5610766040e7999a515e3095bb78794fc2d1a211
|
[] |
no_license
|
chadat23/learn_tkinter
|
73630a6e9c45a4dbb64581ecc2de5bd18d99d3fc
|
e7baeaa3329a53f1039d788076bb2a1a4e394d77
|
refs/heads/master
| 2022-04-29T19:05:24.919489
| 2020-01-13T22:01:56
| 2020-01-13T22:01:56
| 233,496,519
| 0
| 0
| null | 2022-03-12T00:15:31
| 2020-01-13T02:40:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
from pathlib import Path
from tkinter import Tk, Label, Button, DISABLED, SUNKEN, Radiobutton, messagebox, Toplevel, \
filedialog
import tkinter
import json
path = Path('.')
root = Tk()
root.title('Frames!!')
root.iconbitmap(path / 'images' / 'favicon.ico')
def get_defaults():
with open('open_file_defaults.json', 'r') as f:
try:
return json.load(f)
except:
return {}
root.filename = filedialog.askopenfilename(initialdir=get_defaults().get('open_dialog'),
title='Select A File',
filetypes=(('png files', '*.png'), ('all files', '*.*')))
def set_default(property_name: str, val: str):
with open('open_file_defaults.json', 'r') as f:
try:
data = json.load(f.read())
except:
data = {}
with open('open_file_defaults.json', 'w') as f:
data[property_name] = val
j = json.dumps(data, indent=4)
f.write(j)
set_default('open_dialog', str(Path(root.filename).parents[0]))
root.mainloop()
|
[
"chad.derosier@gmail.com"
] |
chad.derosier@gmail.com
|
0962c5dff2f74b0b788738127b8a74c44f562b9e
|
14e1087b9de7403c1c057e9710cd6959f438e040
|
/timer.py
|
38529e7200d955c9756f8c9d5b28ef7c25d6a58e
|
[] |
no_license
|
chetan-techqware/new_codes
|
d48a7d02a0bc4c861af01ff9c058b605b4b22bfe
|
3257f795d2598b3ee131a37de2693f3138de8f79
|
refs/heads/master
| 2023-06-12T21:32:07.991725
| 2021-06-29T05:28:08
| 2021-06-29T05:28:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
import time
def timer(func):
def wrapper(*args, **kwargs):
start = time.time()
rv = func()
total = time.time() - start
print("time : ",total)
return rv
return wrapper
@timer
def test():
for x in range(10000):
pass
test()
|
[
"chetan@techqware.com"
] |
chetan@techqware.com
|
0f6a427acee5f6d732cbca14c98e74e867eaea63
|
f215b0f8cc1ef0c6d9c72949fe71edf9f98e9dce
|
/src/kms_encrypt_decrypt.py
|
f62a51223d3822cc561204866ca353901ff99408
|
[] |
no_license
|
parthsarangi/py-scripts
|
e20194504f5e2dfbdcfc99b4f93e83f51f4a8a5a
|
94f93d89a1a0bc9b8fcb200d74a0c5faa49d01e4
|
refs/heads/master
| 2022-07-10T12:37:34.326071
| 2019-08-12T18:34:09
| 2019-08-12T18:34:09
| 196,789,721
| 0
| 0
| null | 2021-03-25T22:46:23
| 2019-07-14T03:33:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,619
|
py
|
import pybase64 as base64
# from google.cloud import kms_v1
# google-cloud-kms
def start_encrypt(params):
val = []
for item in params["decrypt"]:
val.append(base64.b64encode(item))
return val
def start_decrypt(params):
val = []
for item in params["encrypt"]:
val.append(base64.b64decode(item))
return val
def decrypt_symmetric(project_id, location_id, key_ring_id, crypto_key_id,
ciphertext):
"""Decrypts input ciphertext using the provided symmetric CryptoKey."""
from google.cloud import kms_v1
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the CryptoKey.
name = client.crypto_key_path_path(project_id, location_id, key_ring_id,
crypto_key_id)
# Use the KMS API to decrypt the data.
parameters = {"decrypt": [], "encrypt": ciphertext, "function": "decrypt"}
ciphertext_resp = start_decrypt(parameters)
iv_key = []
for item in ciphertext_resp:
# print()
response = client.decrypt(name, item)
iv_key.append(response.plaintext)
print(response.plaintext)
return iv_key
if __name__ == '__main__':
text_decrypted = [b'hello world']
# passed value of iv an key in tet_encrypted
text_encrypted = [b'Q2lRQWR2MXA5UzJNUVR6Y25MVFNRd0I1M0VJQmhHU1BzNVc0Zk9kbGtJSnRJTkdvalNFU09RRG80YlVYZkQyazlYNjNvV0xvcjFSUTRBeU9sUjI5NUlYM2NGVkVINXlyRm5WZzEydVBzL3FLQkZNY3RHYXVYekVZamc3RzBmbERtZz09',
b'Q2lRQWR2MXA5UXV5NDRGWVI0bW1mQ2c3blZXTVZjcG9HbzFBSHN0SDMyN1JtYTBjemVvU1NRRG80YlVYTTlXd2JFTDlBK21pbGpsVlE3M01zZndub1pkTDF0eHByaFRXdW1pT1FjOEoxOXpsVEQ2RE1Vdi8xWHNDTEh2UmFGdjB1Q0daTk9pTmdObGo3aVFINUhIZXM2bz0=',
b'c291cmNlX2VxX2VuY3J5cHQ=',
b'ZGF0YXBsYXRmb3JtLTEzNjM=',
b'cHJvamVjdHMvZGF0YXBsYXRmb3JtLTEzNjMvbG9jYXRpb25zL3VzL2tleVJpbmdzL2FjbS1tbS11cy9jcnlwdG9LZXlzL2JxLWRlY3J5cHRlcg==']
function = "decrypt"
parameters = {"decrypt": text_decrypted, "encrypt": text_encrypted, "function": function}
if parameters["function"] == "encrypt":
val = start_encrypt(parameters)
if parameters["function"] == "decrypt":
val = start_decrypt(parameters)
for item in val:
print(item)
# Pass the iv and key to ciphertext for decoding
ciphertext = val[0:2]
response = decrypt_symmetric("dataplatform-1363", "us", "acm-mm-us", "bq-decrypter",
ciphertext)
print("iv :" + repr(response[0]) + ": \nkey:" + repr(response[1]))
|
[
"parth.sar@ascendcorp.com"
] |
parth.sar@ascendcorp.com
|
8a4e10af87e6dc2de3c55b9f8c875ed514d89320
|
0c204165819daf4e1a311009955174818cb33795
|
/test/functional/rpc_bind.py
|
a8ef0d9d9b476aed7cfcfb4427058b24504606f3
|
[
"MIT"
] |
permissive
|
minblock/shiftcoins
|
06d5f11020f71e0f6519993f17568d8f951b1138
|
03ebbb07ff103894b30fdea3b67cc31e35f64abc
|
refs/heads/master
| 2021-05-22T23:56:31.183853
| 2020-04-05T03:13:46
| 2020-04-05T03:13:46
| 253,145,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,428
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running shiftcoinsd with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.netutil import all_interfaces, addr_to_hex, get_bind_addrs, test_ipv6_local
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url
class RPCBindTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def add_options(self, parser):
parser.add_argument("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_argument("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_argument("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not (self.options.run_ipv4 or self.options.run_nonloopback):
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.run_bind_test([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main()
|
[
"POSTMASTER@provgn.com"
] |
POSTMASTER@provgn.com
|
b67290080e30bc45b05ee3f015c6778f063a453c
|
2549087f04d43c8c2d4a0817736a1fba9f174d27
|
/apps/accounts/decorators.py
|
efc10a57d3a908e62c436e3865352ab010780c12
|
[
"BSD-3-Clause"
] |
permissive
|
tarvitz/icu
|
a8632d43f4485b684bb686a6a89e15dc4d0eb43d
|
9a7cdac9d26ea224539f68f678b90bf70084374d
|
refs/heads/master
| 2021-01-02T09:15:38.456981
| 2016-09-09T20:29:06
| 2016-09-09T20:29:06
| 6,606,287
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
# -*- coding: utf-8 -*-
from apps.accounts.models import Invite
from apps.core.helpers import get_object_or_None
from django.shortcuts import redirect
from datetime import datetime
def check_invite(sid):
def decorator(func):
def wrapper(request, *args, **kwargs):
now = datetime.now()
secure_id = kwargs[sid]
invite = get_object_or_None(
Invite, sid__iexact=secure_id, expire_date__gte=now
)
if not invite:
return redirect('core:ufo')
return func(request, *args, **kwargs)
return wrapper
return decorator
|
[
"lilfoxster@gmail.com"
] |
lilfoxster@gmail.com
|
ecb0a73ee20955a481b3be1832c1c583e8f7d44f
|
ea3d85d86f28f13c6d60f4f2002b8d1d21a6074a
|
/Zenva/Python/Lists/ListChallanges/AppendSize.py
|
86b836caa9ef8165efb2dc5a84ced86f241e0516
|
[] |
no_license
|
TaylenH/learningProjects
|
934b075d2ec8fa62661e268196f70a67887e71f7
|
27b58c187c9fd93da462d91fd2be22e5ad781f6a
|
refs/heads/master
| 2023-01-13T15:03:03.364520
| 2019-12-03T18:26:15
| 2019-12-03T18:26:15
| 193,642,579
| 0
| 0
| null | 2023-01-11T20:55:38
| 2019-06-25T05:47:25
|
HTML
|
UTF-8
|
Python
| false
| false
| 497
|
py
|
# Create a function called append_size that has one parameter named lst.
# The function should append the size of lst (inclusive) to the end of lst. The function should then return this new list.
# For example, if lst was [23, 42, 108], the function should return [23, 42, 108, 3] because the size of lst was originally 3.
#Write your function here
def append_size(lst):
lst.append(len(lst))
return lst
#Uncomment the line below when your function is done
print(append_size([23, 42, 108]))
|
[
"theynel3@gmail.com"
] |
theynel3@gmail.com
|
4272ddda13fd9a53289e55d25b9e911dc38e107f
|
3ddc4a80624a044b9dc9e547b1b8185af617f843
|
/regression_df.py
|
09f63cd23d06c868ac73763670e418a14b952a55
|
[] |
no_license
|
sidpremkumar/MoneyBall-Statistics
|
7227f3bd11dc0ad36cf8c5a05ca8f76552eed9bf
|
047977de76c3d37d0aaeebebda6d55b42e4f480e
|
refs/heads/master
| 2021-01-09T18:18:49.770619
| 2020-03-05T21:25:53
| 2020-03-05T21:25:53
| 242,404,748
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,308
|
py
|
# 3rd Party Modules
import pandas as pd
def main(games_dict, n = 10):
"""
takes games dictionary and returns single data frame ready for regression
games_dict: dictionary of games,
n: running team stats size
cols_to_add are columns which we add to default
cols_to_remove are columns which we remove from default
default columns*: ['pts','a_pts', 'poss', 'a_poss', 'ort', 'drt',
'efg', 'a_efg', 'ast_pct', 'win', 't1_score', 't2_score', 'game_cover',
'game_spread', 'game_OU', 't1_ML', 't2_ML']
* NOTE: all these columns are AVERAGED over n games, except 't1_score', 't2_score', 'game_cover', 'ML', 'a_ML'
"""
all_games = pd.DataFrame()
for team in games_dict.keys():
df = games_dict[team]
# select columns to roll on
columns_to_roll = ['fgm', 'fga',
'fg3m', 'fg3a', 'ftm', 'fta', 'oreb',
'dreb', 'reb', 'ast', 'stl', 'blk', 'tov', 'pf', 'pts',
'a_fgm', 'a_fga',
'a_fg3m', 'a_fg3a', 'a_ftm', 'a_fta',
'a_oreb', 'a_dreb', 'a_reb', 'a_ast', 'a_stl', 'a_blk',
'a_tov', 'a_pf', 'a_pts', 'cover', 'win', 'OU']
# first make new columns where we want to keep important data for a game instead of rolling on it
new_unrolled_columns = ['t1_score', 't2_score', 'game_cover','game_spread', 'game_OU', 't1_ML', 't2_ML']
corresp_columns = ['pts', 'a_pts', 'cover', 'spread', 'OU', 'ML', 'a_ML']
df[new_unrolled_columns] = df[corresp_columns]
# new win indicator column
df['win'] = df['t1_score'] > df['t2_score']
# roll data, drop first n games
new_df = df[columns_to_roll].rolling(window=n).sum().shift()
df[columns_to_roll] = new_df
df = df.dropna()
# update percentages & put in advanced stats
df.loc[:,'fg3_pct'] = df['fg3m']/df['fg3a']
df.loc[:,'a_fg3_pct'] = df['a_fg3m']/df['a_fg3a']
df.loc[:,'fg_pct'] = df['fgm']/df['fga']
df.loc[:,'a_fg_pct'] = df['a_fgm']/df['a_fga']
df.loc[:,'ft_pct'] = df['ftm']/df['fta']
df.loc[:,'a_ft_pct'] = df['a_ftm']/df['a_fta']
df['poss'] = df['fga'] + 0.5*df['fta'] - df['oreb'] + df['tov']
df['a_poss'] = df['a_fga'] + 0.5*df['a_fta'] - df['a_oreb'] + df['a_tov']
df['ort'] = df['pts']/df['poss']
df['drt'] = df['a_pts']/df['a_poss']
df['efg'] = (df['fgm'] + 0.5*df['fg3m'])/df['fga']
df['a_efg'] = (df['a_fgm'] + 0.5*df['fg3m'])/df['a_fga']
df['ast_pct'] = df['ast']/df['fgm']
#averaging certain columns
columns_to_avg = ['poss', 'cover', 'spread', 'pts', 'a_pts', 'a_poss', 'win', 'OU', 'ML', 'a_ML']
df[columns_to_avg] = df[columns_to_avg]/n
# columns used for regression
columns_for_regression = ['pts','a_pts', 'poss', 'a_poss', 'ort', 'drt', 'OU',
'efg', 'a_efg', 'ast_pct', 'win', 't1_score', 't2_score', 'game_spread', 'game_OU', 't1_ML', 't2_ML']
# necessary columns for identifying game:
identifiers = ['game_id', 'home']
# add all games to master dataframe
# NOTE: games will be double counted
all_games = all_games.append(df[identifiers + columns_for_regression])
return all_games
if __name__ == '__main__':
main()
|
[
"anthonytersaakov@gmail.com"
] |
anthonytersaakov@gmail.com
|
db9a73f53ee529f049229e599950929d9e27a7f9
|
3fe820ef703cb594fd0897e8ba87f05e9ab7a381
|
/Q11.py
|
8a7420d1b9de6700b0bbf50b3babbeda5320283a
|
[] |
no_license
|
Sheetal-145/dictionery
|
af67c95debc7414ceeca1cf083319be753e3ef70
|
30e299e466f75fc2b66f366ecf6dd6f0bbcb72e6
|
refs/heads/main
| 2023-07-16T10:13:50.264496
| 2021-08-24T14:41:23
| 2021-08-24T14:41:23
| 399,352,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
d = {'a':50, 'b':58,'c': 56,'d':40,'e':100, 'f':20}
a=[]
for i in d:
a.append(d[i])
|
[
"noreply@github.com"
] |
Sheetal-145.noreply@github.com
|
2a8270faa10bafac32ba056c7ccde6686464b98c
|
078318ecbd76e8af3e3c35aa3cfe3d1d7748fb43
|
/my_modules/rsa.py
|
6f6a39ea010fb6ed82beca07fe5a01416569bca0
|
[] |
no_license
|
NobuyukiInoue/Example_RSA_Work
|
0e466b7b62f98c17560afd92369232147dc429ed
|
24438d23c76f93e9ecc3fe4daeffaacf8abe7d4c
|
refs/heads/master
| 2020-05-16T09:31:09.878534
| 2019-05-07T01:44:13
| 2019-05-07T01:44:13
| 182,951,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,211
|
py
|
# -*- coding: utf-8 -*-
import base64
import math
import random
def calc_p_q(primeNumber_indexMin, primeNumber_Max):
"""2つの素数(p, q)を求める"""
"""primeNumber_Max以下の素数のリストを取得する"""
prime_list = sieve_of_eratosthenes(primeNumber_Max)
"""p*qの下限値をセット"""
lower = math.pow(2, 16)
while True:
"""primeNumber_indexMin以上、(len(prime_list) - 1)以下の数値の乱数を生成する"""
x = random.randrange(primeNumber_indexMin, len(prime_list) - 1, 1)
while True:
"""primeNumber_indexMin以上、(len(prime_list) - 1)以下の数値の乱数を生成する"""
y = random.randrange(primeNumber_indexMin, len(prime_list) - 1, 1)
if y != x:
break
if x * y > lower:
break
"""得られた2つの素数を返す"""
return prime_list[x], prime_list[y]
def sieve_of_eratosthenes(primeNumber_Max):
"""エラトステネスのふるいを利用して素数の配列を生成する"""
dest = int(math.sqrt(primeNumber_Max))
target_list = list(range(2, primeNumber_Max + 1))
prime_list = []
while True:
num_min = min(target_list)
if num_min >= dest:
prime_list.extend(target_list)
break
prime_list.append(num_min)
i = 0
while True:
if i >= len(target_list):
break
elif target_list[i] % num_min == 0:
target_list.pop(i)
i += 1
return prime_list
def lcm(p, q):
"""
最小公倍数を求める。
"""
return (p * q) // math.gcd(p, q)
def generate_keys(p, q):
"""
与えられた 2 つの素数 p, q から秘密鍵と公開鍵を生成する。
"""
"""2つの素数(p, q)の積nを求める"""
#
"""p - 1 と q - 1 の最小公倍数を求める"""
#
"""公開鍵で使用するeを算出する"""
#
#
#
#
"""秘密鍵で使用するdを算出する"""
#
#
#
#
"""
i = 0
while True:
if (i * L + 1) % E == 0:
D = (i * L + 1) // E
break
i += 1
"""
#
def encrypt_from_text(plain_text, public_key):
"""
公開鍵 public_key を使って平文 plain_text を暗号化する。
"""
E, N = public_key
"""平文文字列を数値に変換する"""
plain_integers = [ord(char) for char in plain_text]
"""公開鍵(eと2つの素数の積n)を使って暗号化後の数値を生成する"""
encrypted_integers = [pow(i, E, N) for i in plain_integers]
"""生成した数値を16進数文字列として出力する"""
encrypted_text = ''.join(format(i, "08x") for i in encrypted_integers)
return encrypted_text
def decrypt_to_text(encrypted_text, private_key):
"""
秘密鍵 private_key を使って暗号文 encrypted_text を復号化する。
"""
D, N = private_key
encrypted_integers = []
for i in range(0, len(encrypted_text), 8):
"""16進数として8文字づつ取り出し、整数に変換する"""
encrypted_integers.append(int(encrypted_text[i:i+8], 16))
"""秘密鍵(dと2つの素数の積n)を使って、復号後の数値を求める"""
decrypted_integers = [pow(i, D, N) for i in encrypted_integers]
"""復号後の数値を文字に変換し、連結する"""
decrypted_text = ''.join(chr(i) for i in decrypted_integers)
return decrypted_text
def encrypt_from_binary(plain_integers, public_key):
"""
公開鍵 public_key を使って平文 plain_text を暗号化する。
"""
#
"""公開鍵(eと2つの素数の積n)を使って暗号化後の数値を生成する"""
#
def decrypt_to_binary(encrypted_integers, private_key):
"""
秘密鍵 private_key を使って暗号文 encrypted_text を復号化する。
"""
#
"""秘密鍵(dと2つの素数の積n)を使って、復号後の数値を求める"""
#
def sanitize(encrypted_text):
"""
UnicodeEncodeErrorを回避する。
"""
return encrypted_text.encode('utf-8', 'replace').decode('utf-8')
|
[
"spring555@gmail.com"
] |
spring555@gmail.com
|
4c8d45598b387770e901a3f5edaa9cca8ccc7d5c
|
f661a041434a80a9f81c48f5ebb30e50262c423a
|
/src/bdc/dependencyChecker.py
|
9c2aaebb9d6ce0909c77400a6b505f7a1faf87b3
|
[
"MIT"
] |
permissive
|
adamjenkins1/BootableDiskCreator
|
9e31f1e18ecea9038cae8e6159b7a6eedfad11f4
|
7d9b36b4125df41a03ee027b335d23da80d315c9
|
refs/heads/master
| 2020-03-28T00:15:21.514439
| 2018-12-21T00:50:36
| 2018-12-21T00:50:36
| 147,388,955
| 0
| 0
|
MIT
| 2018-10-30T08:00:52
| 2018-09-04T18:17:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
#!/usr/bin/env python3
"""Contains class to check required dependencies for BootableDiskCreator CLI and GUI
File name: dependencyChecker.py
Author: Adam Jenkins
Date created: 10/28/18
Date last modified: 10/28/18
Python Version: 3.6.5
"""
import sys
import shutil
class DependencyChecker:
"""class to check BootableDiskCreator dependencies"""
def __init__(self):
"""initializes member variables"""
self.bashDependencies = ['awk', 'mkfs.fat', 'lsblk', 'mount']
self.PyQtVersion = '5.11.3'
self.errors = ''
self.PyQtInstall = False
def main(self):
"""method to check if dependencies are satisfied"""
if not (sys.version_info.major == 3 and sys.version_info.minor >= 5):
self.errors += ('Error: found Python {}.{}, Python >= 3.5 required\n'
.format(sys.version_info.major, sys.version_info.minor))
try:
import PyQt5
self.PyQtInstall = True
except ImportError:
self.errors += 'Error: missing required dependency: PyQt5\n'
if self.PyQtInstall:
from PyQt5 import Qt
if Qt.PYQT_VERSION_STR != self.PyQtVersion:
print('Warning: found PyQt5 {}, this software has only been tested with PyQt5 {}'
.format(Qt.PYQT_VERSION_STR, self.PyQtVersion), file=sys.stderr)
for i in self.bashDependencies:
if shutil.which(i) is None:
self.errors += 'Error: missing required dependency: {}\n'.format(i)
if self.errors:
sys.exit(self.errors[:-1])
|
[
"adamjenkins1701@gmail.com"
] |
adamjenkins1701@gmail.com
|
6ffe76e67b3a50e6829e496e8cb9ad7b747e36e7
|
a84e22847d000aa3294d53f35bf59d61fba4666c
|
/apps/empresas/admin.py
|
77accf4deba363aee21b1c83f6cf88ebc89023f0
|
[] |
no_license
|
gpxlqa/gestao_rh_django
|
e45ac5f51f1ce702fbb130720eda04ce8f4605c2
|
35bc561603f14fcde6bdef8d9e8418ff26a78338
|
refs/heads/main
| 2023-01-13T05:46:10.273944
| 2020-11-18T23:30:59
| 2020-11-18T23:30:59
| 302,779,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 121
|
py
|
from django.contrib import admin
from .models import Empresa
# Register your models here.
admin.site.register(Empresa)
|
[
"gabrielpeschl@gmail.com"
] |
gabrielpeschl@gmail.com
|
cd0efe1579923d92cccb3e33d6aa8d379d5696f3
|
5c98bdca28de9bb3c878ec5df139b43a6ac8f470
|
/COT/disks/tests/test_iso.py
|
b9c9ab65fc428b82b113a351e2b00b0caacf8f16
|
[
"MIT"
] |
permissive
|
glennmatthews/cot
|
2af5ca01c51086a314cf3cb2b626c1a5d3c15e92
|
0811b96311881a8293f28f2e300f6bed1b77ee31
|
refs/heads/master
| 2023-08-17T12:56:11.858190
| 2019-12-04T15:42:20
| 2019-12-04T15:42:20
| 21,240,523
| 88
| 24
|
MIT
| 2023-08-08T17:41:56
| 2014-06-26T12:54:34
|
Python
|
UTF-8
|
Python
| false
| false
| 9,013
|
py
|
#!/usr/bin/env python
#
# test_iso.py - Unit test cases for ISO disk representation.
#
# October 2016, Glenn F. Matthews
# Copyright (c) 2014-2017 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Unit test cases for ISO subclass of DiskRepresentation."""
import logging
import os
import mock
from COT.tests import COTTestCase
from COT.disks import ISO
from COT.helpers import (
helpers, HelperError, HelperNotFoundError,
)
logger = logging.getLogger(__name__)
# pylint: disable=protected-access,missing-type-doc,missing-param-doc
class TestISO(COTTestCase):
"""Test cases for ISO class."""
def setUp(self):
"""Test case setup function called automatically before each test."""
super(TestISO, self).setUp()
self.foo_iso = os.path.join(self.temp_dir, "foo.iso")
def tearDown(self):
"""Test case cleanup function called automatically after each test."""
for name in ['mkisofs', 'genisoimage', 'xorriso', 'isoinfo']:
helper = helpers[name]
helper._installed = None
helper._path = None
helper._version = None
super(TestISO, self).tearDown()
def test_representation(self):
"""Representing an existing ISO."""
iso = ISO(self.input_iso)
self.assertEqual(iso.path, self.input_iso)
self.assertEqual(iso.disk_format, 'iso')
self.assertEqual(iso.capacity, str(self.FILE_SIZE['input.iso']))
if helpers['isoinfo']:
self.assertEqual(iso.disk_subformat, "")
self.assertEqual(iso.files,
['iosxr_config.txt', 'iosxr_config_admin.txt'])
self.assertEqual(iso.predicted_drive_type, 'cdrom')
def test_create_with_files(self):
"""Creation of a ISO with specific file contents."""
disk_path = os.path.join(self.temp_dir, "out.iso")
ISO.create_file(disk_path, files=[self.input_ovf])
iso = ISO(disk_path)
if helpers['isoinfo']:
# Our default create format is rockridge
self.assertEqual(iso.disk_subformat, "rockridge")
self.assertEqual(iso.files,
[os.path.basename(self.input_ovf)])
else:
helpers['isoinfo']._installed = True
with mock.patch.object(helpers['isoinfo'], "call",
return_value="Rock Ridge extensions found"):
self.assertEqual(iso.disk_subformat, "rockridge")
with mock.patch.object(helpers['isoinfo'], "call",
return_value="""
Setting input-charset to 'UTF-8' from locale.
/{0}
""".format(os.path.basename(self.input_ovf))):
self.assertEqual(iso.files,
[os.path.basename(self.input_ovf)])
def test_create_with_files_non_rockridge(self):
"""Creation of a non-rock-ridge ISO with specific file contents."""
disk_path = os.path.join(self.temp_dir, "out.iso")
ISO.create_file(disk_path, files=[self.input_ovf], disk_subformat="")
iso = ISO(disk_path)
if helpers['isoinfo']:
self.assertEqual(iso.disk_subformat, "")
self.assertEqual(iso.files,
[os.path.basename(self.input_ovf)])
else:
helpers['isoinfo']._installed = True
with mock.patch.object(helpers['isoinfo'], "call",
return_value="No SUSP/Rock Ridge present"):
self.assertEqual(iso.disk_subformat, "")
with mock.patch.object(helpers['isoinfo'], "call",
return_value="""
Setting input-charset to 'UTF-8' from locale.
/{0};1
""".format(os.path.basename(self.input_ovf).upper())):
self.assertEqual(iso.files,
[os.path.basename(self.input_ovf)])
def test_create_without_files(self):
"""Can't create an empty ISO."""
self.assertRaises(RuntimeError,
ISO.create_file,
path=os.path.join(self.temp_dir, "out.iso"),
capacity="100")
@mock.patch("COT.helpers.mkisofs.MkISOFS.call")
def test_create_with_mkisofs(self, mock_call):
"""Creation of an ISO with mkisofs (default)."""
helpers['mkisofs']._installed = True
ISO.create_file(path=self.foo_iso, files=[self.input_ovf])
mock_call.assert_called_with(
['-output', self.foo_iso, '-full-iso9660-filenames',
'-iso-level', '2', '-allow-lowercase', '-r', self.input_ovf])
@mock.patch("COT.helpers.mkisofs.GenISOImage.call")
def test_create_with_genisoimage(self, mock_call):
"""Creation of an ISO with genisoimage if mkisofs is unavailable."""
helpers['mkisofs']._installed = False
helpers['genisoimage']._installed = True
ISO.create_file(path=self.foo_iso, files=[self.input_ovf])
mock_call.assert_called_with(
['-output', self.foo_iso, '-full-iso9660-filenames',
'-iso-level', '2', '-allow-lowercase', '-r', self.input_ovf])
@mock.patch("COT.helpers.mkisofs.XorrISO.call")
def test_create_with_xorriso(self, mock_call):
"""Creation of an ISO with xorriso as last resort."""
helpers['mkisofs']._installed = False
helpers['genisoimage']._installed = False
helpers['xorriso']._installed = True
ISO.create_file(path=self.foo_iso, files=[self.input_ovf])
mock_call.assert_called_with(
['-as', 'mkisofs', '-output', self.foo_iso,
'-full-iso9660-filenames', '-iso-level', '2', '-allow-lowercase',
'-r', self.input_ovf])
def test_create_no_helpers_available(self):
"""Creation of ISO should fail if no helpers are install[ed|able]."""
helpers['mkisofs']._installed = False
helpers['genisoimage']._installed = False
helpers['xorriso']._installed = False
helpers['apt-get']._installed = False
helpers['brew']._installed = False
helpers['port']._installed = False
helpers['yum']._installed = False
self.assertRaises(HelperNotFoundError,
ISO.create_file,
path=self.foo_iso,
files=[self.input_ovf])
@mock.patch("COT.helpers.mkisofs.MkISOFS.call")
def test_create_with_mkisofs_non_rockridge(self, mock_call):
"""Creation of a non-Rock-Ridge ISO with mkisofs (default)."""
helpers['mkisofs']._installed = True
ISO.create_file(path=self.foo_iso, files=[self.input_ovf],
disk_subformat="")
mock_call.assert_called_with(
['-output', self.foo_iso, '-full-iso9660-filenames',
'-iso-level', '2', '-allow-lowercase', self.input_ovf])
def test_file_is_this_type_nonexistent(self):
"""Call file_is_this_type should fail if file doesn't exist."""
self.assertRaises(HelperError,
ISO.file_is_this_type, "/foo/bar")
def test_file_is_this_type_isoinfo(self):
"""The file_is_this_type API should use isoinfo if available."""
if helpers['isoinfo']:
self.assertTrue(ISO.file_is_this_type(self.input_iso))
self.assertFalse(ISO.file_is_this_type(self.blank_vmdk))
else:
# Fake it til you make it
helpers['isoinfo']._installed = True
with mock.patch.object(helpers['isoinfo'], "call"):
self.assertTrue(ISO.file_is_this_type(self.input_iso))
with mock.patch.object(helpers['isoinfo'], "call",
side_effect=HelperError):
self.assertFalse(ISO.file_is_this_type(self.blank_vmdk))
def test_file_is_this_type_noisoinfo(self):
"""The file_is_this_type API should work if isoinfo isn't available."""
_isoinfo = helpers['isoinfo']
helpers['isoinfo'] = False
try:
self.assertTrue(ISO.file_is_this_type(self.input_iso))
self.assertFalse(ISO.file_is_this_type(self.blank_vmdk))
finally:
helpers['isoinfo'] = _isoinfo
def test_from_other_image_unsupported(self):
"""No support for from_other_image."""
self.assertRaises(NotImplementedError,
ISO.from_other_image,
self.blank_vmdk, self.temp_dir)
|
[
"glenn@e-dad.net"
] |
glenn@e-dad.net
|
ccb77f5627e45aa5a35143c74f5f33113967e24a
|
22f25d369df00ae2bcee850a8ad22754eb6bb04d
|
/code/crispr_mismatches_counts.py
|
ee4878b3640fee9c45f25a3728811f7a800d65cd
|
[
"MIT"
] |
permissive
|
linsalrob/PhageHosts
|
4142a6bf082bc9c597bd061585e604132ec9d7e0
|
d3b156b3c2dbbc0e69bf6464b644c03c1c084f8f
|
refs/heads/master
| 2021-11-11T21:17:54.469304
| 2021-11-07T02:47:21
| 2021-11-07T02:47:21
| 28,326,376
| 19
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,909
|
py
|
"""
Generate a list of NC ids of the phages and their hosts for the CRISPR
searches. We accept two different parameters, the number of mismatches
and the BLAST output file. Note that we expect the blast output file
to incldue qlen slen as the last two columns
"""
import rob
import sys
import os
import re
try:
mm=int(sys.argv[1])
bf=sys.argv[2]
except:
sys.exit(sys.argv[0] + " <number of mismatches> <crispr blast output file>. You probably want to use crispr.vs.genomes.blastn-nz\n")
hits={}
with open(bf, 'r') as bin:
for l in bin:
p=l.strip().split("\t")
p[2]=float(p[2])
for i in range(3, 10):
p[i]=int(p[i])
p[10] = float(p[10])
p[11] = float(p[11])
p[12] = int(p[12])
p[13] = int(p[13])
# mismatches in the sequences is the sum of the difference in
# length and the mismatches reported by blast
mismatches = p[4] + (p[13]-p[3])
if mismatches > mm:
continue
m=re.findall('NC_\d+', p[0])
if m==[]:
sys.stderr.write("No phage found in " + p[0] + "\n")
continue
if len(m) > 1:
sys.stderr.write("More than one phage found in " + p[0] + "\n")
continue
phage = m[0]
m=re.findall('NC_\d+', p[1])
if m==[]:
sys.stderr.write("No host found in " + p[0] + "\n")
continue
if len(m) > 1:
sys.stderr.write("More than one host found in " + p[0] + "\n")
continue
host = m[0]
if phage not in hits:
hits[phage]={}
if host not in hits[phage]:
hits[phage][host]=set()
hits[phage][host].add(p[2])
for p in hits:
for h in hits[p]:
if len(hits[p][h]) > 1:
print(str(len(hits[p][h])) + " matches from phage " + p +
" to host " + h)
|
[
"raedwards@gmail.com"
] |
raedwards@gmail.com
|
1cd105a79dfdc9370646c2d54157fc7ab85ee646
|
7db61dd832bbb7a1da331286f26a7e63e0379477
|
/venv/Scripts/pip-script.py
|
af28fcddb41975b327eb6e898e9b0cefda6e75c9
|
[] |
no_license
|
MrStubbsGCS/triangleGenerator
|
d41101fd939d7f7d43d2474af09e183c85c71825
|
336d9491374ce0bf73d25b70952165565f4cc5d0
|
refs/heads/master
| 2020-06-18T21:27:20.038973
| 2019-07-11T23:11:33
| 2019-07-11T23:11:33
| 196,454,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
#!C:\Users\collin_stubbs\PycharmProjects\triangleGenerator\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"collin.stubbs@greenwoodcollege.com"
] |
collin.stubbs@greenwoodcollege.com
|
e234b14b17d3d08b23107a846a3ac5a8aab3810b
|
aa35de3242d594915af2dcfe3cc6101a78ca3d87
|
/array_list_tests.py
|
898d3e4b19bed30388286d933a183da0ebe0f495
|
[] |
no_license
|
davyyjones/lab3
|
a003c6f7982d54d231ffed55acf0da17a25cf1b6
|
d3f64dcca22b2bedb7cfce0f0e66a10389af2127
|
refs/heads/master
| 2021-01-19T23:46:54.049031
| 2017-04-27T22:33:09
| 2017-04-27T22:33:09
| 89,025,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
import unittest
class testCase(unittest.TestCase):
def test_remove(self):
t_listy = ListClass([0, 1, 2, 3], 5, 6)
self.assertRaises(IndexError, remove, None, 2)
def test_set(self):
t_listy = ListClass([0, 1, 2, 3], 5, 6)
self.assertEqual([5, 1, 2, 3], set(t_listy, 0, 5))
def test_get(self):
t_listy = ListClass([0, 1, 2, 3], 5, 6)
self.assertEqual(1, get(t_listy, 1))
def test_length(self):
t_listy = ListClass([0, 1, 2, 3], 5, 6)
self.assertEqual(4, length(t_listy))
#print(length(t_listy))
def test_add(self):
t_listy = ListClass([0,1,2,3], 4, 6)
# print(add(t_listy, 0, 1))
def test_empty_list(self):
self.assertEqual(ListClass([], 0, 0), empty_list())
def test_List(self):
self.assertEqual(ListClass(1, None, 1), ListClass(1, None, 1))
if (__name__ == '__main__'):
unittest.main()
|
[
"danydaniel9@yahoo.com"
] |
danydaniel9@yahoo.com
|
f469b74c398f33524d8134b54beb3044ba0d23d4
|
c9b3a2f26314f3d300fcb9b4099ec11a2caa8b35
|
/many_classes/__init__.py
|
426015a1309adc7053abf61d151f78131e5a0730
|
[
"MIT"
] |
permissive
|
LJBD/lib-tango-sample-device
|
a52113c817ea8040f035bda4ce8d3aef66d4a3ab
|
0439b64729e09f27029b87d616d5aebd635346d6
|
refs/heads/master
| 2021-01-02T08:54:47.509656
| 2017-08-09T15:46:49
| 2017-08-09T15:46:49
| 99,095,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
"""
This package contains an example on how to run a Tango Device Server with 2
or more Device Classes.
It includes the said 2 example Device Classes and a module to run them in one
Device Server.
"""
__author__ = "Lukasz Dudek"
__all__ = ["device_one", "device_two", "run_server"]
|
[
"lukasz.j.dudek@uj.edu.pl"
] |
lukasz.j.dudek@uj.edu.pl
|
3ee206e6b317fe4144014725ea4049942832e622
|
c46f2840a2773a09e5f4a03c7d20f22f9516f2e0
|
/dash_interactivity.py
|
bb6b74508e59778cd4721557ded6b9b95588b0e7
|
[] |
no_license
|
guruprasath16/guruprasath16
|
881d0d916043d68ff7e25b35e9ca741dec83f68a
|
7bc2fb53eb1ff70546767494f8f3d5476d2bf2f6
|
refs/heads/main
| 2023-06-02T07:37:44.471092
| 2021-06-17T07:25:52
| 2021-06-17T07:25:52
| 370,590,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,042
|
py
|
import pandas as pd
import plotly.graph_objects as go
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
# Read the airline data into pandas dataframe
airline_data = pd.read_csv('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/Data%20Files/airline_data.csv',
encoding = "ISO-8859-1",
dtype={'Div1Airport': str, 'Div1TailNum': str,
'Div2Airport': str, 'Div2TailNum': str})
app = dash.Dash(__name__)
app.layout = html.Div(children=[html.H1('Airline Performance Dashboard',
style={'textAlign':'center',
'color':'#503D36',
'font-size':40
}),
html.Div(["Input Year",dcc.Input(id='input-year',value='2010',type='number',style={'height':'50px','fontSize':35})],
style={'font-size':40}),
html.Br(),
html.Br(),
html.Div(dcc.Graph(id='line-plot'))])
# add callback decorator
@app.callback(Output(component_id='line-plot',component_property='figure'),
Input(component_id='input-year',component_property='value'))
# Add computation to callback function and return graph
def get_graph(entered_year):
# Select data based on the entered year
df = airline_data[airline_data['Year']==int(entered_year)]
# Group the data by Month and compute average over arrival delay time.
line_data = df.groupby('Month')['ArrDelay'].mean().reset_index()
#
fig = go.Figure(data=go.Scatter(x=line_data['Month'], y=line_data['ArrDelay'], mode='lines', marker=dict(color='green')))
fig.update_layout(title='Month vs Average Flight Delay Time',xaxis_title='Month',yaxis_title='ArrDelay')
return fig
# Run the app
if __name__ == '__main__':
app.run_server()
|
[
"noreply@github.com"
] |
guruprasath16.noreply@github.com
|
cc00026499a649377e8479b2603316dbbab51c1e
|
2e7f4d016d9f44db9504927a8ee8071406c215bc
|
/hardest/binary_validator.py
|
417c42e0a3ef901432b9dc94cbc48445451d56ba
|
[
"MIT"
] |
permissive
|
proggga/hardest
|
435e89b9b1e35425df90a0be394cff8a9cebe2f2
|
234cb41115c30a756ee11ed7c5fa41c9979d3303
|
refs/heads/master
| 2020-06-20T03:50:02.789663
| 2017-08-18T10:38:25
| 2017-08-18T10:38:25
| 94,193,250
| 2
| 1
|
MIT
| 2018-02-05T18:27:18
| 2017-06-13T09:07:14
|
Python
|
UTF-8
|
Python
| false
| false
| 574
|
py
|
"""Binary file validator."""
import os
from hardest.interfaces.validator import Validator
class BinaryValidator(Validator): # pylint: disable=R0903,W0232
"""Validate is binary file is valid."""
def validate(self, data):
# type: (object) -> bool
"""Validate is file is ok."""
filename = str(data) # type: str
return (bool(filename) and
not filename.endswith('-config') and
os.path.isfile(filename) and
not os.path.islink(filename) and
os.access(filename, os.X_OK))
|
[
"noreply@github.com"
] |
proggga.noreply@github.com
|
865c18dda277000d31c27f85f8a47905f619d3ef
|
f45cc0049cd6c3a2b25de0e9bbc80c25c113a356
|
/LeetCode/堆(heap)/1383. 最大的团队表现值.py
|
df293e7f6ac3d24e2d5c2ca9511ba0195bf928bd
|
[] |
no_license
|
yiming1012/MyLeetCode
|
4a387d024969bfd1cdccd4f581051a6e4104891a
|
e43ee86c5a8cdb808da09b4b6138e10275abadb5
|
refs/heads/master
| 2023-06-17T06:43:13.854862
| 2021-07-15T08:54:07
| 2021-07-15T08:54:07
| 261,663,876
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
"""
公司有编号为 1 到 n 的 n 个工程师,给你两个数组 speed 和 efficiency ,其中 speed[i] 和 efficiency[i] 分别代表第 i 位工程师的速度和效率。请你返回由最多 k 个工程师组成的 最大团队表现值 ,由于答案可能很大,请你返回结果对 10^9 + 7 取余后的结果。
团队表现值 的定义为:一个团队中「所有工程师速度的和」乘以他们「效率值中的最小值」。
示例 1:
输入:n = 6, speed = [2,10,3,1,5,8], efficiency = [5,4,3,9,7,2], k = 2
输出:60
解释:
我们选择工程师 2(speed=10 且 efficiency=4)和工程师 5(speed=5 且 efficiency=7)。他们的团队表现值为 performance = (10 + 5) * min(4, 7) = 60 。
示例 2:
输入:n = 6, speed = [2,10,3,1,5,8], efficiency = [5,4,3,9,7,2], k = 3
输出:68
解释:
此示例与第一个示例相同,除了 k = 3 。我们可以选择工程师 1 ,工程师 2 和工程师 5 得到最大的团队表现值。表现值为 performance = (2 + 10 + 5) * min(5, 4, 7) = 68 。
示例 3:
输入:n = 6, speed = [2,10,3,1,5,8], efficiency = [5,4,3,9,7,2], k = 4
输出:72
提示:
1 <= n <= 10^5
speed.length == n
efficiency.length == n
1 <= speed[i] <= 10^5
1 <= efficiency[i] <= 10^8
1 <= k <= n
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/maximum-performance-of-a-team
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
import heapq
from typing import List
class Solution:
def maxPerformance(self, n: int, speed: List[int], efficiency: List[int], k: int) -> int:
mod = 10 ** 9 + 7
arr = sorted(list(zip(efficiency, speed)))[::-1]
pq = []
res = 0
presum = 0
for e, s in arr:
heapq.heappush(pq, s)
presum += s
if len(pq) > k:
presum -= heapq.heappop(pq)
res = max(res, presum * e)
return res % mod
if __name__ == '__main__':
n = 6
speed = [2, 10, 3, 1, 5, 8]
efficiency = [5, 4, 3, 9, 7, 2]
k = 2
print(Solution().maxPerformance(n, speed, efficiency, k))
|
[
"1129079384@qq.com"
] |
1129079384@qq.com
|
7d26c40695281ce74574f0af79b99979e5eb6f19
|
2ad47d27f4d7e7616935091ff92641a2606b8d5b
|
/all_news/aviation_news/views1.py
|
ae96e729612d6fc94db9cc92cd69bf22209af7ed
|
[] |
no_license
|
arsh14jain/aviator_news
|
446df9e11cc90f95f3a384d717e184eb24ea2d54
|
8485d9a212a4eb68ecbfbc360cee3dd017a86578
|
refs/heads/master
| 2020-12-06T20:28:20.417458
| 2020-01-08T11:23:06
| 2020-01-08T11:23:06
| 232,546,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
import requests
from django.shortcuts import render, redirect
from bs4 import BeautifulSoup as BSoup
from aviation_news.models import Headline
def scrape(request):
session = requests.Session()
# session.headers = {"User-Agent": "Googlebot/2.1 (+http://www.google.com/bot.html)"}
url = "https://www.ainonline.com/aviation-news/aircraft"
content = session.get(url, verify=False).content
soup = BSoup(content, "html.parser")
# print(soup.prettify)
print("Hello")
News = soup.find_all('div', {"class":"views-row"})
for artcile in News:
for_link = artcile.find_all('a')[0]
for_img = artcile.find_all('img')[0]
for_text = artcile.find_all('a')[1]
new_headline = Headline()
new_headline.title = for_text.get_text()
new_headline.url = for_link['href']
new_headline.image = for_img['src']
new_headline.save()
return redirect("../")
def news_list(request):
headlines = Headline.objects.all()[::-1]
context = {
'object_list': headlines,
}
return render(request, "aviation_news/home.html", context)
|
[
"arshjain99@gmail.com"
] |
arshjain99@gmail.com
|
7260be47229f42e8fd2089f31c6484f8c9f28901
|
90c6262664d013d47e9a3a9194aa7a366d1cabc4
|
/tests/storage/cases/test_KT1KeY3WswmdH38Q862u9tnFC5Qks4ikSuvJ.py
|
dc7295f528e4260b33b58611a0d25225baf6975c
|
[
"MIT"
] |
permissive
|
tqtezos/pytezos
|
3942fdab7aa7851e9ea81350fa360180229ec082
|
a4ac0b022d35d4c9f3062609d8ce09d584b5faa8
|
refs/heads/master
| 2021-07-10T12:24:24.069256
| 2020-04-04T12:46:24
| 2020-04-04T12:46:24
| 227,664,211
| 1
| 0
|
MIT
| 2020-12-30T16:44:56
| 2019-12-12T17:47:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1KeY3WswmdH38Q862u9tnFC5Qks4ikSuvJ(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/carthagenet/KT1KeY3WswmdH38Q862u9tnFC5Qks4ikSuvJ.json')
def test_storage_encoding_KT1KeY3WswmdH38Q862u9tnFC5Qks4ikSuvJ(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1KeY3WswmdH38Q862u9tnFC5Qks4ikSuvJ(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1KeY3WswmdH38Q862u9tnFC5Qks4ikSuvJ(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
[
"mz@baking-bad.org"
] |
mz@baking-bad.org
|
d3ee76e5c591a4c9b831c91e31ba1b7351022ac2
|
1b0265c61140a1faabcfbea5282b256bb59d4dd9
|
/duplicate_remove.py
|
c8046294bdd189f1225abd6dd45599692fd5ed9f
|
[] |
no_license
|
verdantfire/Python-Tutorial-for-Beginners-2019---Programming-with-Mosh
|
63ca417c2a757ab3002b1b694ba1ab33a8551f6c
|
7fe10bde939d881050226772ea75d997c5753c52
|
refs/heads/master
| 2020-05-29T13:42:36.416284
| 2019-03-08T12:05:31
| 2019-03-08T12:05:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
list=[13,7,7,7,18,13,82,333,18,15,16,90,82,7]
uniques_value=[]
for i in list:
if i not in uniques_value:
uniques_value.append(i)
print(uniques_value)
|
[
"noreply@github.com"
] |
verdantfire.noreply@github.com
|
a40427c6e3ecafaa837fb30cf74a5112298114c9
|
70280955a5382d73e58395eba78c119a400f4ce7
|
/abc/78/2.py
|
0e2305c3a0ec10c37d21b773a5ee96ca15cf0788
|
[] |
no_license
|
cohock13/atcoder
|
a7d0e26a10a4e58690347a2e36839c2f503a79ba
|
d268aa68fc96203eab94d021bd158cf84bdb00bc
|
refs/heads/master
| 2021-01-03T00:41:31.055553
| 2020-10-27T12:28:06
| 2020-10-27T12:28:06
| 239,839,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
py
|
X,Y,Z = map(int,input().split())
print((X-Z)//(Y+Z))
|
[
"callout2690@gmail.com"
] |
callout2690@gmail.com
|
6cf99d039199f484ddbf451e74eb04266475954a
|
5c4d29a2643bf2e8304c0b219b892ee1199d3a09
|
/IVTp/2014/Karamyan/Karamyan 9_8.py
|
337c9b7d5fbdcab725e7674f489252d941e06aca
|
[
"Apache-2.0"
] |
permissive
|
MasBad/pythonintask
|
fc30ef1a1182ca72086396523c93899b08b5978e
|
8553f1eaa481d71d2c7046b0959711b1aa56a9f6
|
refs/heads/master
| 2021-01-24T09:21:02.240724
| 2016-10-26T00:34:28
| 2016-10-26T00:34:28
| 69,497,511
| 1
| 0
| null | 2016-10-24T22:21:23
| 2016-09-28T19:38:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,325
|
py
|
# Задача9, Вариант 8.
# Создайте игру, в которой компьютер выбирает какое-либо слово,
# а игрок должен его отгадать. Компьютер сообщает игроку, сколько
# букв в слове, и дает пять попыток узнать, есть ли какая-либо буква
# в слове, причем программа может отвечать только "Да" и "Нет".
# Вслед за тем игрок должен попробовать отгадать слово.
# Карамян Н.Г.
# 27.05.2016
import random
WORDS = ("питон",
"анаграмма",
"просто",
"сложно",
"ответ",
"вопрос")
word = random.choice(WORDS)
print ('У тебя всего 5 попыток, чтобы отгадать загаданное слово!')
print ('Ты можешь спрсить, если ли определённая буква в этом слове.')
print ('Да прибудет с тобой удача!')
print ('\nКоличество букв в слове: ', len(word))
tries = 5
letter = ()
while tries >= 1:
letter = str(input('В загаданном слове есть буква: '))
if letter not in word:
tries -=1
print ('\nВы ошиблись, такой буквы в слове нет.')
print ('Осталось попыток:', tries)
if letter in word:
tries -= 1
print ('\nВы угадали, эта буква есть в слове!')
print ('Осталось попыток:', tries)
GameOver = 'Сдаюсь'
print ('\nА теперь надо отгадать слово!')
print ('Евсли вы не знаете, что это за слово, напишите "Сдаюсь".')
correct = (input('\nЭто слово: '))
while correct != word:
print ('\nПопробуйте еще раз!')
correct = (input('\nЭто слово: '))
if correct == word:
print ('\nДа! Поздравляю! Вы выиграли!')
if correct == GameOver:
print ('\nЭхх...')
break
input ('Жмак')
|
[
"karamyan.nkk@gmail.com"
] |
karamyan.nkk@gmail.com
|
f937574359536b682ac650ab692471ff9c88e0db
|
cd957535983d2e996d3db3c4e3d0f20d2fcb7e9b
|
/bump
|
c0bf9af8a73ee29befa19b5a6ff888620936c793
|
[
"WTFPL"
] |
permissive
|
silky/git-bump
|
092e1e7add37c95b4924699272ece8a8cd258bb6
|
0fb04048ab72c881b566903fcf63607d689d419d
|
refs/heads/master
| 2021-01-18T10:38:46.083556
| 2014-01-03T22:09:05
| 2014-01-03T22:09:05
| 20,876,479
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,466
|
#!/usr/bin/env python
import re, sys
__USAGE__ = \
"""BUMP is a semantic versioning bump script which accepts the following
mutually exclusive arguments:
-m - a "major" version bump equal to +1.0.0
-n - a "minor" version bump equal to +0.1.0
-p - a "patch" version bump equal to +0.0.1
All of these options allow for the -r flag, which indicates that the state
is a RELEASE not a SNAPSHOT. If -r is not specified, then -SNAPSHOT is
appended to the updated version string."""
__INITIAL__ = ['0', '0', '1']
if __name__ == "__main__":
v = []
try:
v = re.split(re.compile("\.|-"),open("VERSION").read()) or __INITIAL__
v = v[0:3]
map(int, v)
except ValueError:
print("failed to parse the existing VERSION file, assuming v 0.0.1")
v = ['0', '0', '1']
except FileNotFoundError:
print("failed to find a VERSION file, assuming v 0.0.0")
v = ['0', '0', '0']
op = ''
try:
op = sys.argv[1]
except:
print(__USAGE__)
sys.exit(-1)
if(op == '-m'):
v = [str(int(v[0])+1), '0', '0']
elif(op == '-n'):
v = [v[0], str(int(v[1])+1), '0']
elif(op == '-p'):
v = [v[0], v[1], str(int(v[2])+1)]
else:
print(__USAGE__)
sys.exit(-1)
v = '.'.join(v)
if "-r" not in sys.argv:
v += "-SNAPSHOT"
v += "\n"
print(v)
open("VERSION",'w').write(v)
sys.exit(0)
|
[
"rmckenzie92@gmail.com"
] |
rmckenzie92@gmail.com
|
|
2668fcd1ee7cd482eaec956800ad9e5415313b72
|
6573401ab6b19604bff5c79e66f77412fdb26b36
|
/giris/giris/post/admin.py
|
d10ebad42221d484bf2833dbbf3848b3c26d99f1
|
[] |
no_license
|
yasar38/Python-Django-Blog-Web-Site
|
d2da49805c02173a2e282c9f31783b9e6271838d
|
a763644adeab13d433f0a9f69095058905390272
|
refs/heads/main
| 2023-07-25T16:30:35.338534
| 2021-09-07T13:36:23
| 2021-09-07T13:36:23
| 403,982,024
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
from django.contrib import admin
from .models import Post
# from post.models import
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'publishing_date', 'slug'] # üstlerinde tarih ve detaylar çıkması postların
list_display_links = ['publishing_date'] #üstlerine geldiğin zaman içine gitme
list_filter = ['publishing_date'] #sağ tarafta tarih filtreleme
search_fields = ['title', 'content'] #arama çubuğu
list_editable = ['title']
class Meta:
model = Post
admin.site.register(Post, PostAdmin)
|
[
"56197358+yasar38@users.noreply.github.com"
] |
56197358+yasar38@users.noreply.github.com
|
2a41970083f8b69ab98c7b9f179a1f3b690ce11c
|
8743912905eb2e1468ec005d6531340c9bb38d00
|
/venv/bin/easy_install-3.7
|
1b9adf076246bfbd1df72306e935e42bd88be7be
|
[] |
no_license
|
Sharanappabadni/robotframeworkdemo
|
687366b92b813810cf2b07d3e6d03039953074ce
|
3491afd3e3b13d3b63bd826b60098d25e83153a3
|
refs/heads/master
| 2020-07-27T03:44:52.393764
| 2019-09-17T10:40:51
| 2019-09-17T10:40:51
| 208,856,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
7
|
#!/Users/sharanbadni/PycharmProjects/RFSelenium/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"sharanbadni@Sharans-MacBook-Pro-3.local"
] |
sharanbadni@Sharans-MacBook-Pro-3.local
|
1486e03f2d9b873d38d171c6e3b51981836b0c77
|
cd87ee7b6fb0c8510db18ae67219a4c130a3d1c5
|
/can2mqtt/bridge.py
|
3635d919178e4dc1b0f9b94f4585a88dd67f81ef
|
[] |
no_license
|
sadziu82/can2mqtt
|
53e64779030621667144887a2d685bcabed78499
|
6422c7ce299646146389988be37847b5622e1657
|
refs/heads/master
| 2021-07-10T16:19:45.096382
| 2020-08-23T20:19:40
| 2020-08-23T20:19:40
| 190,271,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,606
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import can
import sys
import json
from struct import unpack, pack
from can2mqtt.operation import Operation
from can2mqtt.message import Message
from can2mqtt.device import Device
from can2mqtt.node import Node
from can2mqtt.mqtt import Mqtt
from can2mqtt.excp import HomeCanMessageError, HomeCanMessageNotSupported, HomeCanBridgingForbidden
from can2mqtt.home_automation import KeyAction, DigitalOutput, Cover
def can2mqtt(can_frame):
""" Convert CAN frame into MQTT message
"""
msg = Message.can_decode(can_frame.arbitration_id)
op = Operation.can_decode(can_frame.arbitration_id)
if op not in [Operation.STATE, Operation.EVENT]:
raise HomeCanBridgingForbidden('{} may not be translated from CAN into MQTT'.format(op.name))
if msg == Message.PING:
mqtt_msg = _can2mqtt_ping(can_frame)
elif msg == Message.DATETIME:
mqtt_msg = _can2mqtt_datetime(can_frame)
elif msg == Message.KEY:
mqtt_msg = _can2mqtt_key(can_frame)
elif msg in [Message.TEMPERATURE, Message.RHUMIDITY,
Message.ILLUMINANCE, Message.PRESSURE]:
mqtt_msg = _can2mqtt_simple_sensor_report(can_frame)
elif msg == Message.DUST:
from can2mqtt.bridge_dust import _can2mqtt_dust
mqtt_msg = _can2mqtt_dust(can_frame)
elif msg == Message.DIGITAL_OUTPUT:
mqtt_msg = _can2mqtt_digital_output(can_frame)
elif msg == Message.PCA963x:
from can2mqtt.bridge_pca963x import _can2mqtt_pca963x
mqtt_msg = _can2mqtt_pca963x(can_frame)
elif msg == Message.COVER:
mqtt_msg = _can2mqtt_cover(can_frame)
else:
raise HomeCanMessageNotSupported('can message {} type not yet '
'supported'.format(msg.name))
return mqtt_msg
def _can2mqtt_ping(can_frame):
""" Parse HomeCan CAN frame containing ping message
"""
node_id = Node.can_decode(can_frame.arbitration_id)
device_id = Device.can_decode(can_frame.arbitration_id)
msg = Message.can_decode(can_frame.arbitration_id)
op = Operation.can_decode(can_frame.arbitration_id)
if msg == Message.PING:
raw_payload, = unpack('<H', can_frame.data)
payload = '0x{:4X}'.format(raw_payload)
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return Mqtt.message('NODE/{:X}/{}/{:X}/{}'.format(
node_id, msg.name, device_id, op.name),
payload)
def _can2mqtt_datetime(can_frame):
""" Parse HomeCan CAN frame containing date/time message
"""
node_id = Node.can_decode(can_frame.arbitration_id)
device_id = Device.can_decode(can_frame.arbitration_id)
msg = Message.can_decode(can_frame.arbitration_id)
op = Operation.can_decode(can_frame.arbitration_id)
if msg == Message.DATETIME:
year, month, day, hour, minute, second, weekday = unpack('<HBBBBBB', can_frame.data)
payload = json.dumps({"year": year, "month": month, "day": day,
"hour": hour, "minute": minute, "second": second,
"dayofweek": weekday})
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return Mqtt.message('NODE/{:X}/{}/{:X}/{}'.format(
node_id, msg.name, device_id, op.name),
payload)
def _can2mqtt_key(can_frame):
""" Parse HomeCan CAN frame containing key message
"""
node_id = Node.can_decode(can_frame.arbitration_id)
device_id = Device.can_decode(can_frame.arbitration_id)
msg = Message.can_decode(can_frame.arbitration_id)
op = Operation.can_decode(can_frame.arbitration_id)
if msg == Message.KEY:
keycode, key_action_raw, ar_count, mp_count = unpack('<BBBB', can_frame.data)
key_action = KeyAction(key_action_raw).name
payload = json.dumps({"keycode": keycode, "action": key_action,
"ar_count": ar_count, "mp_count": mp_count})
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return Mqtt.message('NODE/{:X}/{}/{:X}/{}'.format(
node_id, msg.name, device_id, op.name),
payload)
def _can2mqtt_simple_sensor_report(can_frame):
""" Parse HomeCan CAN frame containing simple sensor message
"""
node_id = Node.can_decode(can_frame.arbitration_id)
device_id = Device.can_decode(can_frame.arbitration_id)
msg = Message.can_decode(can_frame.arbitration_id)
op = Operation.can_decode(can_frame.arbitration_id)
if msg in [Message.TEMPERATURE, Message.RHUMIDITY]:
raw_payload, = unpack('<f', can_frame.data)
payload = '{:0.2f}'.format(raw_payload)
elif msg in [Message.ILLUMINANCE, Message.PRESSURE]:
raw_payload, = unpack('<H', can_frame.data)
payload = '{:d}'.format(raw_payload)
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return Mqtt.message('NODE/{:X}/{}/{:X}/{}'.format(
node_id, msg.name, device_id, op.name),
payload)
def _can2mqtt_digital_output(can_frame):
""" Parse HomeCan CAN frame containing digital output message
"""
node_id = Node.can_decode(can_frame.arbitration_id)
device_id = Device.can_decode(can_frame.arbitration_id)
msg = Message.can_decode(can_frame.arbitration_id)
op = Operation.can_decode(can_frame.arbitration_id)
if msg == Message.DIGITAL_OUTPUT:
cmd_raw, = unpack('<B', can_frame.data)
cmd = DigitalOutput(cmd_raw).name
payload = bytes(json.dumps({"state": cmd}), 'utf-8')
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return Mqtt.message('NODE/{:X}/{}/{:X}/{}'.format(
node_id, msg.name, device_id, op.name),
payload)
def _can2mqtt_cover(can_frame):
""" Parse HomeCan CAN frame containing cover message
"""
node_id = Node.can_decode(can_frame.arbitration_id)
device_id = Device.can_decode(can_frame.arbitration_id)
msg = Message.can_decode(can_frame.arbitration_id)
op = Operation.can_decode(can_frame.arbitration_id)
if msg == Message.COVER:
cmd_raw, position, = unpack('<BB', can_frame.data)
cmd = Cover(cmd_raw).name
payload = json.dumps({"cmd": cmd, "position": position})
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return Mqtt.message('NODE/{:X}/{}/{:X}/{}'.format(
node_id, msg.name, device_id, op.name),
payload)
def mqtt2can(mqtt_msg):
""" Convert MQTT message into CAN frame
"""
match = re.match(r'^NODE/(?P<node>[0-9a-fA-F]+)/'
'(?P<msg>[^/]+)/(?P<dev>[0-9a-fA-F]+)/'
'((?P<extra>\S+)/)?(?P<op>[^/]+)$',
mqtt_msg.topic)
if not match:
raise HomeCanMessageError('bad mqtt message')
## base format seems ok, extract parts for further processing
node = match.group('node')
dev = match.group('dev')
try:
msg = Message.mqtt_decode(match.group('msg'))
except KeyError:
raise HomeCanMessageError('wrong mqtt message type')
op = Operation[match.group('op')]
if op not in [Operation.QUERY, Operation.SET, Operation.RESET]:
raise HomeCanBridgingForbidden('{} may not be translated from MQTT into CAN'.format(op.name))
## FIXME should we translate all messages back and forth?
#if op not in [HC_MESSAGE.QUERY, HC_MESSAGE.SET]:
# raise HomeCanMessageError('wrong mqtt message type')`
## calculate CAN extended id
can_eid = msg | Node.mqtt2can(node) | Device.mqtt2can(dev) | op
## prepare frame data based on msg type
if msg == Message.PING:
can_frame = _mqtt2can_ping(can_eid, msg, mqtt_msg.payload)
elif msg == Message.DATETIME:
can_frame = _mqtt2can_datetime(can_eid, msg, mqtt_msg.payload)
elif msg == Message.KEY:
can_frame = _mqtt2can_key(can_eid, msg, mqtt_msg.payload)
elif msg in [Message.TEMPERATURE, Message.RHUMIDITY,
Message.ILLUMINANCE, Message.PRESSURE]:
can_frame = _mqtt2can_simple_sensor(can_eid, msg, mqtt_msg.payload)
elif msg in [Message.DUST]:
from can2mqtt.bridge_dust import _mqtt2can_dust
can_frame = _mqtt2can_dust(can_eid, msg, mqtt_msg.payload)
elif msg == Message.DIGITAL_OUTPUT:
can_frame = _mqtt2can_digital_output(can_eid, msg, mqtt_msg.payload)
elif msg == Message.PCA963x:
from can2mqtt.bridge_pca963x import _mqtt2can_pca963x
extra = match.group('extra')
can_frame = _mqtt2can_pca963x(can_eid, msg, extra, mqtt_msg.payload)
elif msg == Message.COVER:
can_frame = _mqtt2can_cover(can_eid, msg, mqtt_msg.payload)
else:
raise HomeCanMessageNotSupported('mqtt message {} not yet supported'.
format(msg.name))
return can_frame
def _mqtt2can_ping(can_eid, msg, payload):
""" Generate HomeCan CAN frame containing ping message
"""
if msg == Message.PING:
data = pack('<H', int(payload, 0x10))
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return can.Message(arbitration_id=can_eid, data=data)
def _mqtt2can_datetime(can_eid, msg, payload):
""" Generate HomeCan CAN frame containing date/time message
"""
if msg == Message.DATETIME:
dt = json.loads(payload)
data = pack('<HBBBBBB', dt['year'], dt['month'], dt['day'],
dt['hour'], dt['minute'], dt['second'],
dt['dayofweek'])
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return can.Message(arbitration_id=can_eid, data=data)
def _mqtt2can_key(can_eid, msg, payload):
""" Generate HomeCan CAN frame containing key message
"""
if msg == Message.KEY:
key = json.loads(payload)
data = pack('<BBBB', key['keycode'], KeyAction[key['action']], key['ar_count'], key['mp_count'])
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return can.Message(arbitration_id=can_eid, data=data)
def _mqtt2can_simple_sensor(can_eid, msg, payload):
""" Generate HomeCan CAN frame containing simple sensor message
"""
if msg in [Message.TEMPERATURE, Message.RHUMIDITY]:
data = pack('<f', float(payload))
elif msg in [Message.ILLUMINANCE, Message.PRESSURE]:
data = pack('<H', int(payload))
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return can.Message(arbitration_id=can_eid, data=data)
def _mqtt2can_digital_output(can_eid, msg, payload):
""" Generate HomeCan CAN frame containing digital output message
"""
if msg == Message.DIGITAL_OUTPUT:
js = json.loads(payload.decode('utf-8'))
data = pack('<B', DigitalOutput[js['cmd']].value)
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return can.Message(arbitration_id=can_eid, data=data)
def _mqtt2can_cover(can_eid, msg, payload):
""" Generate HomeCan CAN frame containing cover message
"""
if msg == Message.COVER:
js = json.loads(payload.decode('utf-8'))
data = pack('<BB', Cover[js['cmd']].value, js['position'])
else:
raise HomeCanMessageNotSupported('can message {} type not '
'supported by {}'.format(msg.name,
sys._getframe().f_code.co_name))
return can.Message(arbitration_id=can_eid, data=data)
|
[
"pawel.sadowski@ithaca.pl"
] |
pawel.sadowski@ithaca.pl
|
29d4542fbdb48121fa28c9c41ecc6168653409b7
|
ef6b4973cec48057444fe29e589eef5b7f5d9898
|
/result.py
|
76478b0cdbb459eff405dc3fe827bac102a9bb44
|
[] |
no_license
|
mqf-ython/My_git
|
1894e30770ba39fd80f28672993bb92ba6533d36
|
b466b74820a5fc08b7a78e13b051f9944b45027e
|
refs/heads/master
| 2020-11-28T05:50:10.482655
| 2019-12-23T11:59:34
| 2019-12-23T11:59:34
| 229,721,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21
|
py
|
这是一个py文件
|
[
"mqf842562664@sina.com"
] |
mqf842562664@sina.com
|
fd2e76e82415d22ca41e18768408d78109ce9287
|
5ad1d0a55cd0b9ffb582fba7f85cfcab690b3ff6
|
/tests/test_rottenTomatoesTvShowsBrowser.py
|
4afa7ab6152e29a736667387e0c7c7c0a76754e0
|
[
"MIT"
] |
permissive
|
jaebradley/rotten_tomatoes_cli
|
e4624150be486596fb749a239790c2ece9cdb7a4
|
9cecf3d7028aa238f1fb07e53b702e389d94f973
|
refs/heads/master
| 2022-12-11T03:05:48.541756
| 2019-07-30T14:55:48
| 2019-07-30T14:55:48
| 92,615,694
| 18
| 2
|
MIT
| 2022-12-07T23:57:12
| 2017-05-27T18:08:18
|
Python
|
UTF-8
|
Python
| false
| false
| 921
|
py
|
from unittest import TestCase
from mock import Mock, patch
from data.services import RottenTomatoesTvShowsBrowser
class TestRottenTomatoesTvShowsBrowser(TestCase):
browser = RottenTomatoesTvShowsBrowser()
@patch("rotten_tomatoes_client.RottenTomatoesClient.browse_tv_shows")
def test_browse(self, mocked_client_browse_tv_shows):
results = {"results": "jaebaebae"}
category = "category"
tv_shows = "tv shows"
self.browser.tv_shows_parser.parse = Mock("mock_parse")
self.browser.tv_shows_parser.parse.return_value = tv_shows
mocked_client_browse_tv_shows.return_value = results
outcome = self.browser.browse(category=category)
mocked_client_browse_tv_shows.assert_called_once_with(category=category)
self.browser.tv_shows_parser.parse.assert_called_once_with(tv_show_results="jaebaebae")
self.assertEqual(outcome, tv_shows)
|
[
"jae.b.bradley@gmail.com"
] |
jae.b.bradley@gmail.com
|
bc7b37f363d264ba60828d96ba860e347c24131a
|
9bb16f8fbf9f562f1171a3bbff8318a47113823b
|
/agc002/agc002_c/main.py
|
ba4c1ff6b1f5cc9cef6d0e52c7435d9f5a81cd6f
|
[] |
no_license
|
kyamashiro/atcoder
|
83ab0a880e014c167b6e9fe9457e6972901353fc
|
999a7852b70b0a022a4d64ba40d4048ee4cc0c9c
|
refs/heads/master
| 2022-06-01T03:01:39.143632
| 2022-05-22T05:38:42
| 2022-05-22T05:38:42
| 464,391,209
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
#!/usr/bin/env python3
# from typing import *
YES = 'Possible'
NO = 'Impossible'
# def solve(N: int, L: int, a: List[int]) -> Any:
def solve(N, L, a):
pass # TODO: edit here
# generated by oj-template v4.8.1 (https://github.com/online-judge-tools/template-generator)
def main():
import sys
tokens = iter(sys.stdin.read().split())
N = int(next(tokens))
L = int(next(tokens))
a = [None for _ in range(N)]
for i in range(N):
a[i] = int(next(tokens))
assert next(tokens, None) is None
ans = solve(N, L, a)
print(ans) # TODO: edit here
if __name__ == '__main__':
main()
|
[
"kyamashiro73@gmail.com"
] |
kyamashiro73@gmail.com
|
a9f5ea7a63bc4d731f11bb06c5adae2e27a6a1c8
|
f7c657bb472029b56c3dc1e7cf8bede5c7414052
|
/cgi-bin/convertStatus.cgi
|
5282399f6ef7bea99befb406d6e4300931c8c789
|
[] |
no_license
|
tofergregg/virtualgrade
|
3210a7544e554827f1494e0c4f2ca6f46e09bb3e
|
76aaa08b69876ea073d15628a8e1abd3ebfdf30b
|
refs/heads/master
| 2021-01-17T11:18:53.247739
| 2016-05-21T18:02:38
| 2016-05-21T18:02:38
| 15,314,995
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
cgi
|
#!python2.7virt/venv/bin/python
import cgi,sys,os
import cgitb
import subprocess
import uuid
import threading
cgitb.enable()
dataDir = "../data/"
logDir = "log/"
sys.stdout.write("Content-Type: text/plain")
sys.stdout.write("\n")
sys.stdout.write("\n")
sys.stdout.flush()
form = cgi.FieldStorage()
convertId = form['convertId'].value
linesRead = int(form['linesRead'].value)
#print 'linesRead'+str(linesRead)
#print "convertId: " + convertId
try:
with open(dataDir+logDir+convertId+'.log',"r") as f:
fullFile = f.readlines()
for line in fullFile[linesRead:]:
sys.stdout.write(line)
except IOError:
pass # file doesn't exist yet, but we don't care
|
[
"cgregg@cs.tufts.edu"
] |
cgregg@cs.tufts.edu
|
e143191f39ab16858822f4ae63456d0a9efd03a7
|
f200708b8e5a67074f6c805a736311e9b1637532
|
/part_02_system_programming/part_2_3_concur/day9/test.py
|
939992b2b6b0079e70039b5ac18e2175d75becdf
|
[] |
no_license
|
vivid-ZLL/tedu
|
48b78951eae07f5f0433ba85f7cc4e07cd76011d
|
319daf56d88e92f69ee467e0ccf83c01367ed137
|
refs/heads/master
| 2021-04-23T22:40:11.391574
| 2020-01-25T11:26:56
| 2020-01-25T11:26:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
class A01:
def __init__(self, a = "998"):
self.a = a
# def f01(self):
# print("A01")
class B01(A01):
def __init__(self):
super().__init__()
def f01(self):
print(super().a)
c01 = B01()
print(c01.a)
|
[
"283438692@qq.com"
] |
283438692@qq.com
|
14fcbe7c9e598d53e6d34ada297850aec3f245aa
|
c3f6936220f76c056b5b6349a2adb0079e651d90
|
/tethysapp/earth_engine/app.py
|
f47105ea16e560b01bbf64f16a46d4a992eeb307
|
[
"BSD-3-Clause"
] |
permissive
|
kasitif/tethysapp-earth_engine
|
d4fbe0398e68caf847b9e14b6e2bfd8bb49a8671
|
8d029d12cfd37f8ed45594c85dd49de041c9f180
|
refs/heads/master
| 2022-11-28T12:14:51.946940
| 2020-08-12T09:39:36
| 2020-08-12T09:39:36
| 286,971,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,783
|
py
|
from tethys_sdk.base import TethysAppBase, url_map_maker
class EarthEngine(TethysAppBase):
"""
Tethys app class for Earth Engine.
"""
name = 'Earth Engine'
index = 'earth_engine:home'
icon = 'earth_engine/images/earth-engine-logo.png'
package = 'earth_engine'
root_url = 'earth-engine'
color = '#524745'
description = ''
tags = ''
enable_feedback = False
feedback_emails = []
def url_maps(self):
"""
Add controllers
"""
UrlMap = url_map_maker(self.root_url)
url_maps = (
UrlMap(
name='home',
url='earth-engine',
controller='earth_engine.controllers.home.home'
),
UrlMap(
name='about',
url='earth-engine/about',
controller='earth_engine.controllers.home.about'
),
UrlMap(
name='viewer',
url='earth-engine/viewer',
controller='earth_engine.controllers.viewer.viewer'
),
UrlMap(
name='get_image_collection',
url='earth-engine/viewer/get-image-collection',
controller='earth_engine.controllers.viewer.get_image_collection'
),
UrlMap(
name='get_time_series_plot',
url='earth-engine/viewer/get-time-series-plot',
controller='earth_engine.controllers.viewer.get_time_series_plot'
),
UrlMap(
name='rest_get_time_series',
url='earth-engine/api/get-time-series',
controller='earth_engine.controllers.rest.get_time_series'
),
)
return url_maps
|
[
"kasitif@gmail.com"
] |
kasitif@gmail.com
|
90dee1592f22498dfffa373aab92d94898758b53
|
787a4bddd35110f96c7a133a9ce394d1b949f8dc
|
/01_Deep_Learning/00_Courses/PyTorch_Udemy/Course_notes/Summarize materials/3_overfitting.py
|
239eadbdba9ee21f57ae58cef42838ebe134f8f2
|
[] |
no_license
|
rzhang0716/Data-Science
|
8310b2e49f417633f88046225abb6df7447d80c5
|
c456d3223d22b494a77d8142a1b0f9b53cec2bab
|
refs/heads/master
| 2023-08-31T15:03:03.953547
| 2023-08-09T18:32:26
| 2023-08-09T18:32:26
| 244,043,432
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,063
|
py
|
# -*- coding: utf-8 -*-
"""3. Overfitting.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1HBtFuLMd-Knfd8_KokRiZ4y7UB91VVdD
"""
# import libraries
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# import dataset (comes with seaborn)
import seaborn as sns
iris = sns.load_dataset('iris')
# convert from pandas dataframe to tensor
data = torch.tensor( iris[iris.columns[0:4]].values ).float()
# transform species to number
labels = torch.zeros(len(data), dtype=torch.long)
# labels[iris.species=='setosa'] = 0 # don't need!
labels[iris.species=='versicolor'] = 1
labels[iris.species=='virginica'] = 2
"""# 1. Separate data into train and test Manually"""
# (no devset here)
# how many training examples
propTraining = .8 # in proportion, not percent
nTraining = int(len(labels)*propTraining)
# initialize a boolean vector to select data and labels
traintestBool = np.zeros(len(labels),dtype=bool)
# is this the correct way to select samples?
# traintestBool[range(nTraining)] = True
# this is better, but why?
items2use4train = np.random.choice(range(len(labels)),nTraining,replace=False)
traintestBool[items2use4train] = True
traintestBool
# test whether it's balanced
print('Average of full data:')
print( torch.mean(labels.float()) ) # =1 by definition
print(' ')
print('Average of training data:')
print( torch.mean(labels[traintestBool].float()) ) # should be 1...
print(' ')
print('Average of test data:')
print( torch.mean(labels[~traintestBool].float()) ) # should also be 1...
# create the ANN model
# model architecture
ANNiris = nn.Sequential(
nn.Linear(4,64), # input layer
nn.ReLU(), # activation unit
nn.Linear(64,64), # hidden layer
nn.ReLU(), # activation unit
nn.Linear(64,3), # output units
)
# loss function
lossfun = nn.CrossEntropyLoss()
# optimizer
optimizer = torch.optim.SGD(ANNiris.parameters(),lr=.01)
# entire dataset
print( data.shape )
# training set
print( data[traintestBool,:].shape )
# test set
print( data[~traintestBool,:].shape )
"""## Train and test the model"""
# train the model
numepochs = 1000
# initialize losses
losses = torch.zeros(numepochs)
ongoingAcc = []
# loop over epochs
for epochi in range(numepochs):
# forward pass
yHat = ANNiris(data[traintestBool,:])
# compute accuracy (note: denser than previous code!)
ongoingAcc.append( 100*torch.mean(
(torch.argmax(yHat,axis=1) == labels[traintestBool]).float()) )
# compute loss
loss = lossfun(yHat,labels[traintestBool])
losses[epochi] = loss
# backprop
optimizer.zero_grad()
loss.backward()
optimizer.step()
# compute train and test accuracies
# final forward pass USING TRAINING DATA
predictions = ANNiris(data[traintestBool,:])
trainacc = 100*torch.mean((torch.argmax(predictions,axis=1) == labels[traintestBool]).float())
# final forward pass USING TEST DATA!
predictions = ANNiris(data[~traintestBool,:])
testacc = 100*torch.mean((torch.argmax(predictions,axis=1) == labels[~traintestBool]).float())
# report accuracies
print('Final TRAIN accuracy: %g%%' %trainacc)
print('Final TEST accuracy: %g%%' %testacc)
"""# 2. Scikit-Learn"""
from sklearn.model_selection import train_test_split
# a function that creates the ANN model
def createANewModel():
# model architecture
ANNiris = nn.Sequential(
nn.Linear(4,64), # input layer
nn.ReLU(), # activation unit
nn.Linear(64,64), # hidden layer
nn.ReLU(), # activation unit
nn.Linear(64,3), # output units
)
# loss function
lossfun = nn.CrossEntropyLoss()
# optimizer
optimizer = torch.optim.SGD(ANNiris.parameters(),lr=.01)
return ANNiris,lossfun,optimizer
# train the model
# global parameter
numepochs = 200
def trainTheModel(trainProp):
# initialize losses
losses = torch.zeros(numepochs)
trainAcc = []
testAcc = []
# loop over epochs
for epochi in range(numepochs):
# separate train from test data
# Note 1: unique split for each epoch!
# Note 2: here we specify the training size, not the testing size!
X_train,X_test, y_train,y_test = train_test_split(data,labels, train_size=trainProp)
# forward pass and loss
yHat = ANNiris(X_train)
loss = lossfun(yHat,y_train)
# backprop
optimizer.zero_grad()
loss.backward()
optimizer.step()
# compute training accuracy
trainAcc.append( 100*torch.mean((torch.argmax(yHat,axis=1) == y_train).float()).item() )
# test accuracy
predlabels = torch.argmax( ANNiris(X_test),axis=1 )
testAcc.append( 100*torch.mean((predlabels == y_test).float()).item() )
# function output
return trainAcc,testAcc
trainSetSizes = np.linspace(.2,.95,10)
allTrainAcc = np.zeros((len(trainSetSizes),numepochs))
allTestAcc = np.zeros((len(trainSetSizes),numepochs))
for i in range(len(trainSetSizes)):
# create a model
ANNiris,lossfun,optimizer = createANewModel()
# train the model
trainAcc,testAcc = trainTheModel(trainSetSizes[i])
# store the results
allTrainAcc[i,:] = trainAcc
allTestAcc[i,:] = testAcc
fig,ax = plt.subplots(1,2,figsize=(13,5))
ax[0].imshow(allTrainAcc,aspect='auto',
vmin=50,vmax=90, extent=[0,numepochs,trainSetSizes[-1],trainSetSizes[0]])
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('Training size proportion')
ax[0].set_title('Training accuracy')
p = ax[1].imshow(allTestAcc,aspect='auto',
vmin=50,vmax=90, extent=[0,numepochs,trainSetSizes[-1],trainSetSizes[0]])
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Training size proportion')
ax[1].set_title('Test accuracy')
fig.colorbar(p,ax=ax[1])
plt.show()
"""# 3. Data Loader"""
from torch.utils.data import DataLoader
# use scikitlearn to split the data
train_data,test_data, train_labels,test_labels = \
train_test_split(data, labels, train_size=.8)
# then convert them into PyTorch Datasets (note: already converted to tensors)
train_data = torch.utils.data.TensorDataset(train_data,train_labels)
test_data = torch.utils.data.TensorDataset(test_data,test_labels)
# finally, translate into dataloader objects
train_loader = DataLoader(train_data,shuffle=True,batch_size=12)
test_loader = DataLoader(test_data,batch_size=test_data.tensors[0].shape[0])
# check sizes of data batches
for X,y in train_loader:
print(X.shape,y.shape)
X,y
# a function that creates the ANN model
def createANewModel():
# model architecture
ANNiris = nn.Sequential(
nn.Linear(4,64), # input layer
nn.ReLU(), # activation unit
nn.Linear(64,64), # hidden layer
nn.ReLU(), # activation unit
nn.Linear(64,3), # output units
)
# loss function
lossfun = nn.CrossEntropyLoss()
# optimizer
optimizer = torch.optim.SGD(ANNiris.parameters(),lr=.01)
return ANNiris,lossfun,optimizer
# train the model
# global parameter
numepochs = 500
def trainTheModel():
# initialize accuracies as empties (not storing losses here)
trainAcc = []
testAcc = []
# loop over epochs
for epochi in range(numepochs):
# loop over training data batches
batchAcc = []
for X,y in train_loader:
# forward pass and loss
yHat = ANNiris(X)
loss = lossfun(yHat,y)
# backprop
optimizer.zero_grad()
loss.backward()
optimizer.step()
# compute training accuracy just for this batch
batchAcc.append( 100*torch.mean((torch.argmax(yHat,axis=1) == y).float()).item() )
# end of batch loop...
# now that we've trained through the batches, get their average training accuracy
trainAcc.append( np.mean(batchAcc) )
# test accuracy
X,y = next(iter(test_loader)) # extract X,y from test dataloader
predlabels = torch.argmax( ANNiris(X),axis=1 )
testAcc.append( 100*torch.mean((predlabels == y).float()).item() )
# function output
return trainAcc,testAcc
# create a model
ANNiris,lossfun,optimizer = createANewModel()
# train the model
trainAcc,testAcc = trainTheModel()
# plot the results
fig = plt.figure(figsize=(10,5))
plt.plot(trainAcc,'ro-')
plt.plot(testAcc,'bs-')
plt.xlabel('Epochs')
plt.ylabel('Accuracy (%)')
plt.legend(['Train','Test'])
# optional zoom-in to final epochs
# plt.xlim([300,500])
# plt.ylim([90,100.5])
plt.show()
"""# 4. Train-Dev-Test"""
### create fake dataset (same as in previous videos)
fakedata = np.tile(np.array([1,2,3,4]),(10,1)) + np.tile(10*np.arange(1,11),(4,1)).T
fakelabels = np.arange(10)>4
print(fakedata), print(' ')
print(fakelabels)
# specify sizes of the partitions
# order is train,devset,test
partitions = [.8,.1,.1]
# split the data (note the third input, and the TMP in the variable name)
train_data,testTMP_data, train_labels,testTMP_labels = \
train_test_split(fakedata, fakelabels, train_size=partitions[0])
# now split the TMP data
split = partitions[1] / np.sum(partitions[1:])
devset_data,test_data, devset_labels,test_labels = \
train_test_split(testTMP_data, testTMP_labels, train_size=split)
# print out the sizes
print('Training data size: ' + str(train_data.shape))
print('Devset data size: ' + str(devset_data.shape))
print('Test data size: ' + str(test_data.shape))
print(' ')
# print out the train/test data
print('Training data: ')
print(train_data)
print(' ')
print('Devset data: ')
print(devset_data)
print(' ')
print('Test data: ')
print(test_data)
# partition sizes in proportion
partitions = np.array([.8,.1,.1])
print('Partition proportions:')
print(partitions)
print(' ')
# convert those into integers
partitionBnd = np.cumsum(partitions*len(fakelabels)).astype(int)
print('Partition boundaries:')
print(partitionBnd)
print(' ')
# random indices
randindices = np.random.permutation(range(len(fakelabels)))
print('Randomized data indices:')
print(randindices)
print(' ')
# select rows for the training data
train_dataN = fakedata[randindices[:partitionBnd[0]],:]
train_labelsN = fakelabels[randindices[:partitionBnd[0]]]
# select rows for the devset data
devset_dataN = fakedata[randindices[partitionBnd[0]:partitionBnd[1]],:]
devset_labelsN = fakelabels[randindices[partitionBnd[0]:partitionBnd[1]]]
# select rows for the test data
test_dataN = fakedata[randindices[partitionBnd[1]:],:]
test_labelsN = fakelabels[randindices[partitionBnd[1]:]]
# print out the sizes
print('Training data size: ' + str(train_dataN.shape))
print('Devset size: ' + str(devset_dataN.shape))
print('Test data size: ' + str(test_dataN.shape))
print(' ')
# print out the train/test data
print('Training data: ')
print(train_dataN)
print(' ')
print('Devset data: ')
print(devset_dataN)
print(' ')
print('Test data: ')
print(test_dataN)
"""# Reference
These are codes learned from the Udemy
COURSE: A deep understanding of deep learning
TEACHER: Mike X Cohen, sincxpress.com
COURSE URL: udemy.com/course/dudl/?couponCode=202109
"""
|
[
"noreply@github.com"
] |
rzhang0716.noreply@github.com
|
110af786a26a7419a6d9df56b0aaf0f906a45537
|
c0b0041ceb7e7a291976ae08932e7ae927c3d95a
|
/mte/exeptions.py
|
00ef517ba985342d2786051ffdd79b663ca0cae1
|
[] |
no_license
|
jibingli/mte
|
07bba299f00f5cff28a09fc1b2d2b7d2fd859a06
|
5d3574aec0d31e6f45545dfc2579b5a653d399d1
|
refs/heads/master
| 2021-05-15T14:28:49.773588
| 2017-11-03T07:18:32
| 2017-11-03T07:18:32
| 107,210,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
# coding: utf-8
class MyBaseError(BaseException):
pass
class ParamsError(MyBaseError):
pass
class ResponseError(MyBaseError):
pass
class ParseResponseError(MyBaseError):
pass
class ValidationError(MyBaseError):
pass
class FunctionNotFound(NameError):
pass
class VariableNotFound(NameError):
pass
class ApiNotFound(NameError):
pass
class SuiteNotFound(NameError):
pass
|
[
"jibing.li@dianrong.com"
] |
jibing.li@dianrong.com
|
63c98a4d42c9af9236971f85199473a55734bed9
|
7c6b05dd2ea67c51d629ef70d3867a3d5ca1cb9a
|
/py/scrape8.py
|
ae87ebf176a016ca0c753e30c5c1ded749f059c9
|
[] |
no_license
|
Nick-Henderson/genre-from-lyrics
|
579fa135f5d859f0c1832d91e1dea68e8f9fadb9
|
efadbb1a2d2c4325b6666f45c818a18b47cba9f8
|
refs/heads/main
| 2023-02-10T12:34:39.300735
| 2021-01-12T20:29:50
| 2021-01-12T20:29:50
| 326,758,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
# Make HTTP requests
import requests
# Scrape data from an HTML document
from bs4 import BeautifulSoup
# I/O
import os
# Search and manipulate strings
import re
import pandas as pd
def scrape_song_lyrics(url):
try:
page = requests.get(url)
html = BeautifulSoup(page.text, 'html.parser')
lyrics = html.find('div', class_='lyrics').get_text()
#remove identifiers like chorus, verse, etc
lyrics = re.sub(r'[\(\[].*?[\)\]]', '', lyrics)
#remove empty lines
lyrics = os.linesep.join([s for s in lyrics.splitlines() if s])
lyrics = lyrics.replace('\n', ' ')
return lyrics
except:
return 'not found'
info = pd.read_csv('~/Desktop/genre-from-lyrics/data/final_info.csv')
info['url'] = 'https://genius.com/' + (info.artist.str.replace(' ','-').str.capitalize().str.replace("'", "") +
'-' + info.title.str.replace(' ','-').str.lower().str.replace("'", "") + '-lyrics').str.replace('.','').str.replace('&','and')
short = info.iloc[70000:80000]
short['lyrics'] = short.url.apply(scrape_song_lyrics)
short.to_csv('~/Desktop/genre-from-lyrics/data/lyrics8.csv')
|
[
"henderson.nicholasT@gmail.com"
] |
henderson.nicholasT@gmail.com
|
371ce404788449552f8fd540973c2da2197ceb76
|
888b8541150f498e15457d7c2f62545b6ed1e213
|
/first missing positive integer.py
|
dc89ad3d3191b2be7b143ed0e8e8fbaef7f3cc34
|
[] |
no_license
|
densaiko/coding-challenge
|
fb8d2f4a8524d293136d8752f9f57db7eecb3567
|
8f10cd8781e4747bb4dc9e44213e2b52bd6602d0
|
refs/heads/master
| 2022-08-03T17:00:02.467525
| 2020-06-01T00:11:54
| 2020-06-01T00:11:54
| 261,627,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
# First missing positive integer is one of the easy coding challenge in Leetcode, HackerRank and others.
# Here is the list of number [1, -2, 4, 2, 5, 7]
# You have to find the first missing positive integer in that list.
# The solution is so simple. I consider on the fastest solution based on time and space complexity:
# - Time complexity, I only use one variable to increment the integer. Once I got the first missing number, I stop incrementing
# - Space complexity, I don't new new list, dictionary or others to keep new integer
# Lets code:
class solution():
def first_missing_positive_integer(self, nums):
for i in range(1, len(nums)):
if i not in nums:
return i
nums = [1, -2, 4, 2, 5, 7]
print(solution().first_missing_positive_integer(nums))
|
[
"noreply@github.com"
] |
densaiko.noreply@github.com
|
6c0c0e121d69521b40dad3083246cc459574ef82
|
5a405dc212f91a75752d7987e32d5576a4ba71b2
|
/eor/models/sqlalchemy_base.py
|
2e2bf51dae728346af615f6623ab1fb74e45ff1c
|
[] |
no_license
|
pthorn/Eye-Of-Ra
|
58651c1eac9e1912bc68e48aa25153545200a707
|
959a049458de74fb9228acba1f074f3fa56f626f
|
refs/heads/master
| 2020-05-17T00:51:51.914658
| 2016-02-24T11:56:45
| 2016-02-25T11:57:02
| 23,795,644
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,270
|
py
|
# coding: utf-8
import logging
log = logging.getLogger(__name__)
import sqlalchemy
from sqlalchemy import engine_from_config
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from zope.sqlalchemy import ZopeTransactionExtension
import transaction
from ..config import config
class BaseMixin(object):
@classmethod
def get_by_id(cls, id):
return (Session().query(cls)
.filter(cls.id == id)
.one())
@classmethod
def get_count(cls):
return Session().query(cls).count()
def add(self, flush=False):
Session().add(self)
if flush:
Session().flush()
def delete(self, flush=False):
Session().delete(self)
if flush:
Session().flush()
def expunge(self):
Session().expunge(self)
def get_declarative_base():
superclasses = config.sqlalchemy_base_superclasses
if not superclasses:
superclasses = ()
elif not isinstance(superclasses, tuple):
superclasses = (superclasses,)
superclasses += (BaseMixin,)
log.debug('sqlalchemy base superclasses: %s', superclasses)
# http://docs.sqlalchemy.org/en/latest/orm/extensions/declarative/mixins.html#augmenting-the-base
# http://docs.sqlalchemy.org/en/latest/orm/extensions/declarative/api.html#sqlalchemy.ext.declarative.declarative_base
return declarative_base(cls=superclasses)
Base = get_declarative_base()
Session = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
def initialize_sqlalchemy(settings):
def pgsql_version():
res = Session().connection().execute("select version()").fetchone()[0]
transaction.abort()
return res
def psycopg2_version():
import psycopg2
return psycopg2.__version__
engine = engine_from_config(settings, 'sqlalchemy.')
Session.configure(bind=engine)
Base.metadata.bind = engine
log.info('sqlalchemy version %s configured with url %s' % (sqlalchemy.__version__, settings['sqlalchemy.url'])) # TODO prints password into log!
log.info('driver: psycopg2 %s' % psycopg2_version())
log.info('connected to database: %s', pgsql_version())
|
[
"p.thorn.ru@gmail.com"
] |
p.thorn.ru@gmail.com
|
7223483feb8d9ce151a8390f2c7ab38831abbdff
|
aec4eaad5bcfe98fdef38f57afeadd170370827b
|
/test/test_git_blob_response.py
|
bb48f823ba9654ae306d3baf6cc1c4705f05e77b
|
[] |
no_license
|
mastermanas805/giteapy
|
d2d0810a45ba4ab2c15711eae62cc66698bafbc2
|
001e9b66795a6d34146c8532e9d8e648d5b93e59
|
refs/heads/master
| 2023-06-02T07:33:01.136554
| 2020-01-14T11:53:36
| 2020-01-14T11:53:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
# coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.1.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import giteapy
from giteapy.models.git_blob_response import GitBlobResponse # noqa: E501
from giteapy.rest import ApiException
class TestGitBlobResponse(unittest.TestCase):
"""GitBlobResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGitBlobResponse(self):
"""Test GitBlobResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = giteapy.models.git_blob_response.GitBlobResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"rajesh.p.hegde@gmail.com"
] |
rajesh.p.hegde@gmail.com
|
fbf9dd1ed06fc32111e4f6654332c0b7ca5c45af
|
e6ef20050f7e4167719a26ac78a2830014ccdc0a
|
/sistema_tesis/urls.py
|
8696cc2c17e597e445bcfe1f2726f67b339217a6
|
[] |
no_license
|
danielmesa20/sistema_dase
|
d4b559c8582edd3d5975ef25106c8800c8577e44
|
0977ef3b7bb5ebe3ac574b42454f6083856ca66d
|
refs/heads/master
| 2020-12-08T02:21:40.673357
| 2020-01-26T02:16:25
| 2020-01-26T02:16:25
| 232,858,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
"""sistema_tesis URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from DASE.views import home
urlpatterns = [
path('', home),
path('admin/', admin.site.urls),
path('DASE/', include('DASE.urls')),
path('GRAPHICS/', include('graphics.urls')),
path('IMPORT/', include('export_import.urls')),
path('AUTH/', include('authentication.urls')),
]
|
[
"daniel.mesa@correo.unimet.edu.ve"
] |
daniel.mesa@correo.unimet.edu.ve
|
926bfb02f0293ab529bb2bd939bcdd2cd936118c
|
06b5d50d92af07dc7c2d9cc24922a2a906f7b88c
|
/church/main/models.py
|
e40ee931c0616fe4fd17e52e097f5659a351b2e3
|
[] |
no_license
|
ShehanHD/Django
|
4fe6d841e38450b028765cc84bbe7b99e65b9387
|
bc855c16acad5d8f1f5a24dc68438749704935fd
|
refs/heads/master
| 2021-05-20T12:57:36.980701
| 2020-04-25T13:03:43
| 2020-04-25T13:03:43
| 252,305,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,877
|
py
|
from django.db import models
from datetime import datetime
from ckeditor.fields import RichTextField
# Create your models here.
class event(models.Model):
Event = models.CharField( max_length=50)
def __str__(self):
return self.Event
class services(models.Model):
img = models.ImageField(upload_to='pics/services', blank=True, default='pics/about/default.jpg')
day = models.CharField(max_length=50)
event = models.ForeignKey(
event, default=1, verbose_name="Event", on_delete=models.CASCADE)
date = models.DateTimeField("date", default=datetime.now())
description = RichTextField(max_length=100)
state = models.BooleanField(default=True)
class Meta:
verbose_name_plural = "Services"
def __str__(self):
return self.day
class gallery(models.Model):
img = models.ImageField(upload_to='pics/gallery', default='pics/gallery/default.jpg')
description = RichTextField()
onMainPage = models.BooleanField(default=False)
event = models.ForeignKey(
event, default=1, verbose_name="Event", on_delete=models.CASCADE)
class Meta:
verbose_name_plural = "Galleries"
def __str__(self):
return self.album
class about(models.Model):
img = models.ImageField(upload_to='pics/about', blank=True, default='pics/about/default.jpg')
timeLine = models.CharField(max_length=50)
title = models.CharField(max_length=50)
description = RichTextField()
def __str__(self):
return self.title
class team(models.Model):
img = models.ImageField(upload_to='pics/team', blank=True, default='pics/team/default.png')
name = models.CharField(max_length=50)
position = models.CharField(max_length=50)
twiter = models.URLField(max_length=256)
facebook = models.URLField(max_length=256)
def __str__(self):
return self.name
|
[
"51677228+ShehanHD@users.noreply.github.com"
] |
51677228+ShehanHD@users.noreply.github.com
|
d3f56ad470fc6b8eee4653ef42415876f7527f1c
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/pose_estimation/Hourglass_for_PyTorch/mmpose-master/docs/conf.py
|
0275ff35cbed20eb780a081e77423b8fec306ee0
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,281
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'MMPose'
copyright = '2020, MMPose Authors'
author = 'MMPose Authors'
# The full version, including alpha/beta/rc tags
version_file = '../mmpose/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode',
'recommonmark', 'sphinx_markdown_tables'
]
autodoc_mock_imports = ['json_tricks', 'mmpose.version']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
master_doc = 'index'
def builder_inited_handler(app):
subprocess.run(['./merge_docs.sh'])
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
942a568fad26ba3a231acd697bdf6e42c49421a4
|
c0d670be24ffdf6e885d3d17cd4b263792cba134
|
/make_scaled_rms.py
|
eea87f93dde9bb2949e1ac9154f3ab5a7d38b67e
|
[] |
no_license
|
nhurleywalker/sf_spectral_lines
|
5d3ed9f28cba5d64663d053f778b02f22f87b178
|
9ccac2ed73ac757804fa2667f42b56b3495b6a16
|
refs/heads/master
| 2020-07-09T08:03:35.914661
| 2020-04-29T05:21:48
| 2020-04-29T05:21:48
| 203,921,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
#!/usr/bin/env python
from astropy.io import fits
import numpy as np
import sys
print "make_scaled_rms.py <input cube file> <slice number> <spectral rms file>"
infits = sys.argv[1]
inslice = sys.argv[2]
hdu = fits.open(infits)
subset = hdu[0].data[:, int(inslice)-1, 1000:1250, 1000:1250]
rms = np.nanstd(subset)
hdu_spec = fits.open(sys.argv[3])
subset_spec = hdu_spec[0].data[:, 1000:1250, 1000:1250]
rms_spec = np.nanmean(subset_spec)
ratio = rms / rms_spec
hdu_spec[0].data *= ratio
with open('ratios.txt', 'a') as the_file:
the_file.write("{0} {1}\n".format(inslice,ratio))
hdu_spec.writeto(infits.replace(".fits", "_"+inslice+"_rms.fits"))
|
[
"nhw@icrar.org"
] |
nhw@icrar.org
|
726a7d0da4908e19868bd2b398f98e7fe59590ab
|
025b2a1db533b9cda0d0765d23389eecced55c06
|
/models/FCSMNet.py
|
8df6697a90088235a2401f29c1a6a28a66c42a7a
|
[] |
no_license
|
yokosyun/FCSMNet_LR
|
9d9504c0e4a097c65dfb1a318b78e3ff4c81b0a5
|
c48d0011f5ac281ceb80bd7878405ab1b20e72ac
|
refs/heads/main
| 2023-05-15T09:42:21.515550
| 2021-01-26T00:48:52
| 2021-01-26T00:48:52
| 331,175,051
| 0
| 0
| null | 2021-01-26T00:48:53
| 2021-01-20T02:56:04
|
Python
|
UTF-8
|
Python
| false
| false
| 5,751
|
py
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
from submodule import *
import sys
sys.path.append('../')
from utils.activations_autofn import MishAuto
from utils.selectedNorm import *
Act = nn.ReLU
# Act = SwishAuto
# Act = MishAuto
class FCSMNet(nn.Module):
def __init__(self, maxdisp):
super(FCSMNet, self).__init__()
self.maxdisp = maxdisp
self.feature_extraction = feature_extraction()
self.maxpool = nn.MaxPool2d(2)
in_channels = 64
self.down1 = nn.Sequential(
nn.Conv2d(in_channels, in_channels*2, kernel_size=3, padding=1),
SelectedNorm(in_channels*2),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels*2, in_channels*2, kernel_size=3, padding=1),
SelectedNorm(in_channels*2),
nn.ReLU(inplace=True)
)
self.down2 = nn.Sequential(
nn.Conv2d(in_channels*2, in_channels*4, kernel_size=3, padding=1),
SelectedNorm(in_channels*4),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels*4, in_channels*4, kernel_size=3, padding=1),
SelectedNorm(in_channels*4),
nn.ReLU(inplace=True)
)
dilation = 1
pad = 1
self.bottom_11 = nn.Sequential(
nn.Conv2d(in_channels*4, in_channels, kernel_size=3, stride=1, padding=dilation if dilation > 1 else pad, dilation = dilation, bias=False),
SelectedNorm(in_channels),
nn.ReLU(inplace=True),
)
dilation = 3
self.bottom_12 = nn.Sequential(
nn.Conv2d(in_channels*4, in_channels, kernel_size=3, stride=1, padding=dilation if dilation > 1 else pad, dilation = dilation, bias=False),
SelectedNorm(in_channels),
nn.ReLU(inplace=True),
)
dilation = 5
self.bottom_13 = nn.Sequential(
nn.Conv2d(in_channels*4, in_channels, kernel_size=3, stride=1, padding=dilation if dilation > 1 else pad, dilation = dilation, bias=False),
SelectedNorm(in_channels),
nn.ReLU(inplace=True),
)
dilation = 7
self.bottom_14 = nn.Sequential(
nn.Conv2d(in_channels*4, in_channels, kernel_size=3, stride=1, padding=dilation if dilation > 1 else pad, dilation = dilation, bias=False),
SelectedNorm(in_channels),
nn.ReLU(inplace=True),
)
self.bottom_fuse = nn.Sequential(
nn.Conv2d(in_channels*8, in_channels*4, kernel_size=3, padding=1),
SelectedNorm(in_channels*4),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels*4, in_channels*4, kernel_size=1, padding=0),
SelectedNorm(in_channels*4)
)
self.up2 = nn.Sequential(
nn.Conv2d(in_channels*8, in_channels*4, kernel_size=3, padding=1),
SelectedNorm(in_channels*4),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels*4, in_channels*2, kernel_size=3, padding=1),
SelectedNorm(in_channels*2),
nn.ReLU(inplace=True)
)
self.up1 = nn.Sequential(
nn.Conv2d(in_channels*4, in_channels*2, kernel_size=3, padding=1),
SelectedNorm(in_channels*2),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels*2, in_channels, kernel_size=3, padding=1),
SelectedNorm(in_channels),
nn.ReLU(inplace=True)
)
self.classify = nn.Sequential(nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, 2, kernel_size=3, padding=1))
def estimate_disparity(self, cost):
down1 = self.down1(cost)
down2 = self.maxpool(down1)
down2 = self.down2(down2)
bottom_1 = self.maxpool(down2)
bottom_11 = self.bottom_11(bottom_1)
bottom_12 = self.bottom_12(bottom_1)
bottom_13 = self.bottom_13(bottom_1)
bottom_14 = self.bottom_14(bottom_1)
bottom_out = torch.cat([bottom_1 ,bottom_11, bottom_12,bottom_13,bottom_14], axis=1)
bottom_out = self.bottom_fuse(bottom_out)
up2 = F.interpolate(bottom_out, size=None, scale_factor=2, mode='bilinear', align_corners=None)
up2 = torch.cat([up2, down2], axis=1)
up2 = self.up2(up2)
up1 = F.interpolate(up2, size=None, scale_factor=2, mode='bilinear', align_corners=None)
up1 = torch.cat([up1, down1], axis=1)
up1 = self.up1(up1)
return up1
def disparity_regression(self, input, height, width):
lr_disp = self.classify(input)
lr_disp = torch.sigmoid(lr_disp)
lr_disp = lr_disp * self.maxdisp
left_disp = lr_disp[:,0,:,:]
right_disp = lr_disp[:,1,:,:]
if left_disp.ndim ==3:
left_disp = torch.unsqueeze(left_disp,0)
right_disp = torch.unsqueeze(right_disp,0)
left_disp = F.upsample(left_disp, [height,width],mode='bilinear')
right_disp = F.upsample(right_disp, [height,width], mode='bilinear')
return left_disp,right_disp
def forward(self, left, right):
left_feature = self.feature_extraction(left)
right_feature = self.feature_extraction(right)
lr_feature = torch.cat([left_feature, right_feature], axis=1)
up1 = self.estimate_disparity(lr_feature)
pred_left,disp_right = self.disparity_regression(up1,left.size()[2],left.size()[3])
return pred_left,disp_right
|
[
"yoko.syun.1408@gmail.com"
] |
yoko.syun.1408@gmail.com
|
c26e4f66872c3610637b93a586e13eaf42f13c53
|
f2e55a3ee86ece26035755141f34633e8110c27d
|
/misc_test.py
|
21b4be5f81e032451a6e9b10b057737273036959
|
[
"MIT"
] |
permissive
|
seanstappas/qbert-reinforcement-learning
|
2449372f508d4b9e274768321d1906b20809d6c4
|
3d9c8b0821ba6df07d1711c0199a6e876ebc4ad7
|
refs/heads/master
| 2021-03-30T17:53:13.865786
| 2017-11-07T04:54:53
| 2017-11-07T04:54:53
| 107,693,096
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,440
|
py
|
import numpy as np
from csv_utils import save_to_csv, read_from_csv
from pickler import save_to_pickle, load_from_pickle
from plotter import plot_scores
INITIAL_PARAMETERS1 = [
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
INITIAL_PARAMETERS2 = [
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
def numpy_equality():
print(np.array_equal((0, 0, 0), [0, 0, 0]))
print(np.array_equal((0, 0, 0), np.array([0, 0, 0])))
def numpy_multiply():
a = np.array(INITIAL_PARAMETERS1, dtype=np.uint8)
b = np.array(INITIAL_PARAMETERS2, dtype=np.uint8)
print(a * b)
def test_dicts_and_lists():
d = {}
lst = (1, 2, 3)
d[lst] = 'a'
print(d[(1, 2, 3)])
lst = (3, 4), (1, 2, 3)
d[lst] = 'b'
print(d[(3, 4), (1, 2, 3)])
def test_return_none(param):
if param is 0:
return 'ZERO'
if param is 1:
return 'ONE'
if param is 2:
return None
def test_pickle():
q = {(1, 2): 5, (5, 6): 10, 4: float('-inf')}
print(q)
save_to_pickle(q, 'test')
q2 = load_from_pickle('test')
print(q2)
def test_plot():
plot_scores([1, 10, 6, 3], 'test_plot')
def test_csv():
lst = [4, 0, 19, 300, 20, 1]
filename = 'test'
save_to_csv(lst, filename)
lst2 = read_from_csv(filename)
print(lst2)
if __name__ == '__main__':
test_csv()
|
[
"seanstappas@gmail.com"
] |
seanstappas@gmail.com
|
3f4047b169d4bd87b9690e9e7255d65e465edd15
|
a259ef5aa97b04dba9dc300c9f07cd92532db9df
|
/mailing/views.py
|
ebc5a7100199be2e8b38ed42f15711a57fea4414
|
[] |
no_license
|
Pavan-Red-E/ubuntu
|
c73102ecbdce97438660cbc0ccf25e46c6e42f90
|
4d196d91c48dcc7a8991134597c22e8bc6368b97
|
refs/heads/master
| 2022-12-11T08:53:28.359750
| 2020-02-22T11:23:29
| 2020-02-22T11:23:29
| 242,320,880
| 0
| 0
| null | 2022-12-08T03:39:57
| 2020-02-22T10:20:06
|
CSS
|
UTF-8
|
Python
| false
| false
| 462
|
py
|
from django.shortcuts import render
from django.core.mail import send_mail
from django.conf import settings
# Create your views here.
def email(request):
subject = 'Thank you for registering to our site'
message = 'it means a world to us '
email_from = settings.EMAIL_HOST_USER
recipient_list = ['samantakmitra98@gmail.com',]
send_mail( subject, message, email_from, recipient_list )
return redirect('redirect to a new page')
|
[
"pavanreddykandula56@gmail.com"
] |
pavanreddykandula56@gmail.com
|
468152c3e796a1614a72459477e6f8770924b024
|
fafbdf6c37caf5a0cbb74e0bad1f18dfeb3f1692
|
/ch3/fig3-13.py
|
4b94babf4a2a06c4d09b39e45aa9fa581e18e420
|
[
"MIT"
] |
permissive
|
qwerty239qwe/BEBI-5009
|
e09c9f32e2e6e333160920ac929c7f81e266fbe0
|
6265d2912db5b26aea57d8f09439657fae9b76be
|
refs/heads/master
| 2021-04-06T20:31:11.970801
| 2018-03-15T04:06:01
| 2018-03-15T04:06:01
| 125,300,068
| 0
| 0
| null | 2018-03-15T02:13:57
| 2018-03-15T02:13:57
| null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
"""Figure 3.13: Comparions of GMA and Michaelis-Menten rate laws"""
#%%
import numpy as np
import matplotlib.pyplot as plt
t = np.linspace(0, 4)
plt.plot(t, 2 * t / (1 + t), 'k', linewidth=3, label='Michaelis-Menten')
plt.plot(t, t**0.4, 'k--', linewidth=2, label='GMA')
plt.xlabel('Substrate concentration (arbitrary units)')
plt.ylabel('Reaction rate (arbitrary units)')
plt.legend(loc='best')
|
[
"sosiristseng@gmail.com"
] |
sosiristseng@gmail.com
|
eac3d4a782335a1f9e3a436649c4379eb329ed8c
|
2371c3a56bc7daa8b40d3701ca43b714680fdee4
|
/cbtstudy/asgi.py
|
efe5cce2e2936409380b2c6ca01e7530a02513d0
|
[] |
no_license
|
poteto1212/cbtstudy
|
5ba2d0c7e51fba715ae86c9c1fb781ad27cd9e62
|
a5e07c89d9aa40572471403d6193c4298bc372d6
|
refs/heads/master
| 2023-05-18T01:58:48.661487
| 2021-06-10T23:55:08
| 2021-06-10T23:55:08
| 374,130,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
ASGI config for cbtstudy project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cbtstudy.settings')
application = get_asgi_application()
|
[
"you@example.com"
] |
you@example.com
|
477d1c6e00499b43ef2f5bb2ab93e46cd296fbc5
|
5baff1ec4b5db1c1001c150d73b1fe78caf45b34
|
/database_setup.py
|
c99e235fe02f58c28e0047bc84e90497947967c4
|
[] |
no_license
|
judyzaratan/clothing_cat
|
10f0c319e628eca2014e30cb7f8f36d7637449c6
|
2232460fc47e221c6726800b4c11f24fb0304b02
|
refs/heads/master
| 2021-06-24T12:52:15.197338
| 2017-09-07T19:29:34
| 2017-09-07T19:29:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,759
|
py
|
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(32), nullable=False)
email = Column(String(32), nullable=False)
picture = Column(String(150))
class Category(Base):
__tablename__ = 'category'
id = Column(Integer, primary_key=True)
name = Column(String(32),nullable=False)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
user = relationship(User)
items = relationship("Item", backref="category")
@property
def serialize(self):
"""Return object data in serialized format"""
array = []
for i in self.items:
array.append(i.serialize)
return {
'id':self.id,
'name':self.name,
'Item': array}
class Item(Base):
__tablename__ = 'item'
id = Column(Integer, primary_key=True)
name = Column(String(20), nullable=False)
description = Column(String)
category_id = Column(Integer, ForeignKey('category.id'),nullable=False)
# category = relationship(Category)
user_id = Column(Integer, ForeignKey('user.id'),nullable=False)
user = relationship(User)
@property
def serialize(self):
return {
'name': self.name,
'description': self.description,
'id': self.id,
'category': self.category.name,
'created_by': self.user.name
}
engine = create_engine('sqlite:///catalog_with_user.db')
Base.metadata.create_all(engine)
|
[
"mochicat8@gmail.com"
] |
mochicat8@gmail.com
|
df31dd5ada4fedb8ca1246e14d401d81113a57db
|
74675a3ca63eb6cf8f1de989eef5c7b2a892ea4c
|
/xpath爬取百度好听轻音乐.py
|
9c82dfbc7feaed48f16fa656cd58295acea229d6
|
[] |
no_license
|
michael-wy/Reptile
|
19285cd6e814f5ad524ace4b421f5bc83d35ef53
|
b9979c14f3f2fee511c442f48a6115442c717f59
|
refs/heads/master
| 2021-03-11T13:32:01.669819
| 2020-03-11T14:15:48
| 2020-03-11T14:15:48
| 246,532,991
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,857
|
py
|
import requests
from lxml import etree
import re
import time
headers={
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36"
}
songID=[] #歌曲sid
songName=[] #歌曲名字
songNamee=[] #歌曲作者
for i in range(0,2):
url="http://www.htqyy.com/top/musicList/hot?pageIndex="+str(i)+"&pageSize=20"
response=requests.get(url,headers=headers)
#print(response)
r=response.content.decode()
#print(r)
html=etree.HTML(response.content.decode())
#print(html)
#歌曲的名字
namelist=html.xpath('//ul/li/span/a[@target="play"]/./@title')
#歌曲的sid
idlist=html.xpath('//ul/li/span/a[@target="play"]/./@sid')
#歌曲作者的姓名
nameelist=html.xpath('//ul[@id="musicList"]/li/span[@class="artistName"]/a[@target="_blank"]/./@title')
#print(pat1)
#print(pat2)
#print(pat3)
songID.extend(idlist)
songName.extend(namelist)
songNamee.extend(nameelist)
#print(songID)
#print(songName)
#print(songNamee)
for i in range(0,len(songID)):
songurl="http://f2.htqyy.com/play7/"+str(songID[i])+"/mp3/7"
#print(songurl)
songname=songName[i]
songnamee=songNamee[i]
musicc=requests.get(songurl).content
print("正在抓取第",i+1,"首",songname)
with open(r"C:\Users\wy\Desktop\sublime\百度好听轻音乐\music\{},{}.mp3".format(songnamee,songname),"wb") as f:
f.write(musicc)
time.sleep(0.5)
#歌曲链接
#http://f2.htqyy.com/play7/33/mp3/7
#http://f2.htqyy.com/play7/62/mp3/7
#歌单首页 http://www.htqyy.com/top/hot
#歌单第一页http://www.htqyy.com/top/musicList/hot?pageIndex=0&pageSize=20
#歌单第二页http://www.htqyy.com/top/musicList/hot?pageIndex=1&pageSize=20
#歌单第三页http://www.htqyy.com/top/musicList/hot?pageIndex=2&pageSize=20
|
[
"254208240@qq.com"
] |
254208240@qq.com
|
4dedc11bbcb4cf48033088f8cb65e49f59faa0ed
|
a27e12f7c1c53fa246b7e806724124266459379a
|
/2020-d9.py
|
4d6245551524d8449d46a7ec6a6219d19c46516e
|
[] |
no_license
|
budidino/AoC-python
|
f5fb2e9f3c1e1c341b123182fdcfefee7a74603f
|
9e10db2dcbb59808614237389ebfb9745ece078b
|
refs/heads/master
| 2021-07-09T15:21:14.680716
| 2020-12-18T23:23:41
| 2020-12-18T23:23:41
| 225,146,155
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 930
|
py
|
INPUT = "2020-d9.txt"
numbers = [int(line.rstrip('\n')) for line in open(INPUT)]
from itertools import combinations
def isValid(numbers, number):
for num1, num2 in combinations(numbers, 2):
if num1 + num2 == number:
return True
return False
def findSuspect(numbers, preamble):
for index, number in enumerate(numbers[preamble:], preamble):
num = numbers[index-preamble:index]
if not isValid(num, number):
return number
return 0
def findWeakness(numbers, suspect):
low, high = 0, 1
setSum = numbers[low] + numbers[high]
while setSum != suspect:
if setSum < suspect:
high += 1
setSum += numbers[high]
else:
setSum -= numbers[low]
low += 1
return min(numbers[low:high+1]) + max(numbers[low:high+1])
suspect = findSuspect(numbers, 25)
print(f"part 1: {suspect}") # 257342611
weakness = findWeakness(numbers, suspect)
print(f"part 2: {weakness}") # 35602097
|
[
"budidino@gmail.com"
] |
budidino@gmail.com
|
59295628eac2b05f473fb7d9ec72948296bdf797
|
d03c7dda8d053530b0df136a6a43e4d876742386
|
/expedientes/migrations/0001_initial.py
|
381a7c61cbf0390b737abc3f4ff8e7aec452d37a
|
[] |
no_license
|
ConsultarPlus/Modelo_Andres
|
89ee0a59ddef30a3b5cab0e05cf58b183044152d
|
726a78155403de5fc537ae83d6a04125b1a0e4ba
|
refs/heads/master
| 2022-03-15T10:58:24.068449
| 2019-12-14T15:43:00
| 2019-12-14T15:43:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
# Generated by Django 2.2.7 on 2019-11-18 17:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Expedientes',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('fecha_inicio', models.DateField()),
('user', models.CharField(max_length=30)),
('observacion', models.CharField(max_length=255)),
],
),
]
|
[
"software@consultar.org"
] |
software@consultar.org
|
d160d661f6c4804357472bf27bc7cb347402704b
|
35f93b1c89e68f776e21a57307c132e8cb5f9131
|
/validator/atompubbase/tests/pretty-test.py
|
b86262844a05de4c3a11668b76b446077be547e8
|
[] |
no_license
|
myblup/atompubbase
|
a0d1cff6fab39d557cf6a56fedfa9f03df6d22e6
|
6b25900bbda2e05592011d75f43e63e401706b67
|
refs/heads/master
| 2020-12-28T22:01:02.993148
| 2013-04-08T13:39:45
| 2013-04-08T13:39:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,920
|
py
|
from pretty import pretty
try:
from xml.etree.ElementTree import fromstring, tostring
except:
from elementtree.ElementTree import fromstring, tostring
src = """<html:div xmlns:html="http://www.w3.org/1999/xhtml">
<html:p >I took a couple of days off work
and we drove down to Florida to visit family in "The Villages",
a 55+ golf cart community that currently has about 50,000 residents.
</html:p>
<html:p xmlns:html="http://www.w3.org/1999/xhtml">That is not a typo. Check out the <html:a href="http://en.wikipedia.org/wiki/The_Villages">wikipedia</html:a> <html:a href="http://en.wikipedia.org/wiki/The_Villages%2C_Florida">entries</html:a>.
</html:p>
<html:p xmlns:html="http://www.w3.org/1999/xhtml">On Monday we went out to feed the ducks at a nearby pond, but well fed
by everyone else, they weren't interested in our bread. Instead the bread was
attacked from below by the fish in the pond, which wasn't very interesting, that is, until
a local heron came over and started feasting on the fish we'd attracted. There's nothing
like the sight of a still living fish wiggling down the throat of a heron to make
a young boy's day.
</html:p>
<html:table style="width: 194px;" xmlns:html="http://www.w3.org/1999/xhtml"><html:tr><html:td align="center" style="height: 194px;"><html:a href="http://picasaweb.google.com/joe.gregorio/TheVillagesFlorida"><html:img height="160" src="http://lh6.google.com/joe.gregorio/RoK-XGNIkuE/AAAAAAAAAA8/ePqbYyHlxvU/s160-c/TheVillagesFlorida.jpg" style="margin: 1px 0 0 4px;" width="160" /></html:a></html:td></html:tr><html:tr><html:td style="text-align: center; font-family: arial,sans-serif; font-size: 11px;"><html:a href="http://picasaweb.google.com/joe.gregorio/TheVillagesFlorida" style="color: #4D4D4D; font-weight: bold; text-decoration: none;">The Villages, Florida</html:a></html:td></html:tr>
</html:table>
</html:div>"""
print pretty(fromstring(src))
|
[
"strycore@gmail.com"
] |
strycore@gmail.com
|
cf25c6e6015646681869607d42c0638e8d9c3e7b
|
1a166165ab8287d01cbb377a13efdb5eff5dfef0
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_09_01/aio/operations/_network_security_groups_operations.py
|
8a70c96f53bcefee3a26a39802d0d9277be36791
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
manoj0806/azure-sdk-for-python
|
7a14b202ff80f528abd068bf50334e91001a9686
|
aab999792db1132232b2f297c76800590a901142
|
refs/heads/master
| 2023-04-19T16:11:31.984930
| 2021-04-29T23:19:49
| 2021-04-29T23:19:49
| 363,025,016
| 1
| 0
|
MIT
| 2021-04-30T04:23:35
| 2021-04-30T04:23:35
| null |
UTF-8
|
Python
| false
| false
| 30,507
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkSecurityGroupsOperations:
"""NetworkSecurityGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_security_group_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_security_group_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_security_group_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkSecurityGroup":
"""Gets the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_09_01.models.NetworkSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "_models.NetworkSecurityGroup",
**kwargs
) -> "_models.NetworkSecurityGroup":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "_models.NetworkSecurityGroup",
**kwargs
) -> AsyncLROPoller["_models.NetworkSecurityGroup"]:
"""Creates or updates a network security group in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to the create or update network security group
operation.
:type parameters: ~azure.mgmt.network.v2017_09_01.models.NetworkSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_09_01.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.NetworkSecurityGroup":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.NetworkSecurityGroup"]:
"""Updates a network security group tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to update network security group tags.
:type parameters: ~azure.mgmt.network.v2017_09_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_09_01.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.NetworkSecurityGroupListResult"]:
"""Gets all network security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_09_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkSecurityGroupListResult"]:
"""Gets all network security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_09_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
|
[
"noreply@github.com"
] |
manoj0806.noreply@github.com
|
529b47a43c93136a63a93b3c54e0538cc7fc6563
|
6d63708f42e157815a788398dc2d9fb2d05ae2bb
|
/hackernews/hackernews/items.py
|
68d24d8d8311e3a81956ffa7b304ab7234b2ada8
|
[] |
no_license
|
vipulsingh24/Scrapy
|
0a6b22b2552f010f9c243b40348d523f2c6b1799
|
3ec5337448b81a06f532500260c359025cc4ff8d
|
refs/heads/master
| 2021-04-06T19:43:28.035983
| 2018-04-08T14:26:47
| 2018-04-08T14:26:47
| 125,396,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class Hackern ewsItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
link_title = scrapy.Field()
url = scrapy.Field()
sentiment = scrapy.Field()
text = scrapy.Field()
|
[
"letsmailvipul@gmail.com"
] |
letsmailvipul@gmail.com
|
cac4c21a91c4d05fff833cb225997c74a84f67a9
|
4996f8a222d64c9af4dca815ab0055944365c753
|
/2-c9/app.py
|
5913fd04834b8ca4a1a92a5139d65b656035f5a3
|
[] |
no_license
|
harvey1114/shiyanlou
|
ad118e7d36205e6571799ad3b2d2a29ae6d244ad
|
871dd85ca09017324f950310c4f6ba4b6d6f892f
|
refs/heads/master
| 2020-04-01T22:25:36.170274
| 2018-12-05T14:54:04
| 2018-12-05T14:54:04
| 153,707,817
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,860
|
py
|
import os
import json
from flask import Flask, render_template, abort
from flask_sqlalchemy import SQLAlchemy
from pymongo import MongoClient
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD']=True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root@localhost/shiyanlou'
db = SQLAlchemy(app)
client = MongoClient('127.0.0.1',27017)
mgdb = client.shiyanlou
class File(db.Model):
__tablename__ = 'file'
id = db.Column(db.Integer,primary_key=True)
title = db.Column(db.String(80))
created_time = db.Column(db.DateTime)
category_id = db.Column(db.Integer,db.ForeignKey('category.id'))
category = db.relationship('Category')
content = db.Column(db.Text)
def __init__(self,title,created_time,category,content):
self.title = title
self.created_time = created_time
self.category = category
self.content = content
def __repr__(self):
return '<File:{}>'.format(self.title)
def add_tag(self,tag_name):
tag = mgdb.tag.find_one({'fileid':self.id})
if tag is None:
tag_list = []
tag_list.append(tag_name)
tag = {'fileid':self.id,'tags':tag_list}
mgdb.tag.insert_one(tag)
else:
tag_list = tag['tags']
if tag_name not in tag_list:
tag_list.append(tag_name)
mgdb.tag.update_one({'fileid':self.id},{'$set':{'tags':tag_list}})
def remove_tag(self,tag_name):
tag = mgdb.tag.find_one({'fileid':self.id})
tag_list = tag['tags']
if tag is not None and tag_name in tag_list:
tag_list.remove(tag_name)
mgdb.tag.update_one({'fileid':self.id},{'$set':{'tags':tag_list}})
@property
def tags(self):
return mgdb.tag.find_one({'fileid':self.id})['tags']
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(80))
def __init__(self,name):
self.name = name
def __repr__(self):
return '<Category:{}>'.format(self.name)
@app.errorhandler(404)
def not_fount(error):
return render_template('404.html'), 404
@app.route('/')
def index():
files = db.session.query(File).all()
file_list = []
for f in files:
dict_file = {}
dict_file['title']=f.title
dict_file['path']='/files/'+str(f.id)
dict_file['tags']=','.join(mgdb.tag.find_one({'fileid':f.id})['tags'])
file_list.append(dict_file)
return render_template('index.html',file_list=file_list)
@app.route('/files/<file_id>')
def file(file_id):
file_ids = [str(f.id) for f in db.session.query(File).all()]
if file_id not in file_ids:
abort(404)
else:
ff = db.session.query(File).filter(File.id==int(file_id)).one()
return render_template('file.html',ff=ff)
|
[
"harvey@qq.com"
] |
harvey@qq.com
|
b6e8416bb33d12648ad3efb10387f6473c61cb3e
|
40ce4d7545309ca57f0670a3aa27573d43b18552
|
/com.ppc.Bot/devices/gas/carbon_monoxide.py
|
bba3afce23c227967ee0cb6e2b55f74537325cc8
|
[
"Apache-2.0"
] |
permissive
|
slrobertson1/botlab
|
769dab97cca9ee291f3cccffe214544663d5178e
|
fef6005c57010a30ed8d1d599d15644dd7c870d8
|
refs/heads/master
| 2020-07-28T06:45:37.316094
| 2019-09-18T15:34:08
| 2019-09-18T15:34:08
| 209,341,818
| 0
| 0
|
Apache-2.0
| 2019-09-18T15:23:37
| 2019-09-18T15:23:37
| null |
UTF-8
|
Python
| false
| false
| 1,939
|
py
|
'''
Created on May 6, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from devices.device import Device
class CarbonMonoxideDevice(Device):
"""Entry Sensor"""
# Measurement Names
MEASUREMENT_NAME_TEST = 'alarmStatus.1'
# Low battery tag
LOW_BATTERY_TAG = "lowbattery_aaa"
# List of Device Types this class is compatible with
DEVICE_TYPES = [9011]
def __init__(self, botengine, device_id, device_type, device_description, precache_measurements=True):
Device.__init__(self, botengine, device_id, device_type, device_description, precache_measurements=precache_measurements)
def initialize(self, botengine):
Device.initialize(self, botengine)
def get_device_type_name(self):
"""
:return: the name of this device type in the given language, for example, "Entry Sensor"
"""
# NOTE: Device type name
return _("Carbon Monoxide Sensor")
def get_image_name(self):
"""
:return: the font icon name of this device type
"""
return "gas"
#===========================================================================
# Attributes
#===========================================================================
def is_testing(self, botengine=None):
"""
:return: True if the carbon monoxide sensor is under test
"""
if CarbonMonoxideDevice.MEASUREMENT_NAME_TEST in self.measurements:
return self.measurements[CarbonMonoxideDevice.MEASUREMENT_NAME_TEST][0][0]
return False
def did_change_state(self, botengine=None):
"""
:return: True if this entry sensor's state was updated just now
"""
return CarbonMonoxideDevice.MEASUREMENT_NAME_TEST in self.last_updated_params
|
[
"dmoss@peoplepowerco.com"
] |
dmoss@peoplepowerco.com
|
067d22fbbf7aca86bdd1f2062c87f55ae0660c05
|
fccca352b4db6ecdbd76849a3bff52b8756279f0
|
/scraper/scraper/settings.py
|
f5346833ec12802b382a87f90beb80a6859e72d6
|
[] |
no_license
|
dustinmichels/moodle-archiver
|
7e789c65b71faa053dce14da121fe8f08985cb96
|
552144589e8bdd3aede43cc17ba762a82ec8a11b
|
refs/heads/master
| 2020-03-18T19:50:40.418584
| 2018-09-02T20:10:49
| 2018-09-02T20:10:49
| 135,181,335
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,396
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for scraper project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from .user_info import USERNAME, PASSWORD
BOT_NAME = 'scraper'
SPIDER_MODULES = ['scraper.spiders']
NEWSPIDER_MODULE = 'scraper.spiders'
# Set Moodle login info
MOODLE_USERNAME = USERNAME
MOODLE_PASSWORD = PASSWORD
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scraper (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scraper.middlewares.ScraperSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scraper.middlewares.ScraperDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'scraper.pipelines.ScraperPipeline': 300,
#}
ITEM_PIPELINES = {
'scraper.pipelines.SaveFilesPipeline': 300,
}
FILES_STORE = './output/'
MEDIA_ALLOW_REDIRECTS = True
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# For export
FEED_FORMAT = 'json'
FEED_URI = 'output/moodle.json'
|
[
"dustin7538@gmail.com"
] |
dustin7538@gmail.com
|
8549c2316735ab70b1ad802d82ea6847832e6d13
|
868fd5bb48d9a93a6d31a48b7faabb102705b788
|
/examples/wordcount.py
|
f346c221fbeb028fd25051e5dcf71fcf2a91087a
|
[
"MIT"
] |
permissive
|
toddrme2178/featureflow
|
f2223ea263ad9286f687d1608daf388f2c62dfe7
|
15de54864798f600de75185e05764e64835460a3
|
refs/heads/master
| 2020-04-04T20:30:26.962607
| 2018-06-26T14:20:09
| 2018-06-26T14:20:09
| 156,249,241
| 0
| 0
|
MIT
| 2018-11-05T16:35:54
| 2018-11-05T16:35:53
| null |
UTF-8
|
Python
| false
| false
| 3,452
|
py
|
from __future__ import print_function
import featureflow as ff
import argparse
from collections import Counter
import re
import hashlib
class Tokenizer(ff.Node):
"""
Tokenize a stream of text into individual, normalized (lowercase)
words/tokens
"""
def __init__(self, needs=None):
super(Tokenizer, self).__init__(needs=needs)
self._cache = ''
self._pattern = re.compile('(?P<word>[a-zA-Z]+)\W+')
def _enqueue(self, data, pusher):
self._cache += data
def _dequeue(self):
matches = list(self._pattern.finditer(self._cache))
if not matches:
raise ff.NotEnoughData()
last_boundary = matches[-1].end()
self._cache = self._cache[last_boundary:]
return matches
def _process(self, data):
yield map(lambda x: x.groupdict()['word'].lower(), data)
class WordCount(ff.Aggregator, ff.Node):
"""
Keep track of token frequency
"""
def __init__(self, needs=None):
super(WordCount, self).__init__(needs=needs)
self._cache = Counter()
def _enqueue(self, data, pusher):
self._cache.update(data)
class CheckSum(ff.Aggregator, ff.Node):
"""
Compute the checksum of a text stream
"""
def __init__(self, needs=None):
super(CheckSum, self).__init__(needs=needs)
self._cache = hashlib.sha256()
def _enqueue(self, data, pusher):
self._cache.update(data)
def _process(self, data):
yield data.hexdigest()
@ff.simple_in_memory_settings
class Document(ff.BaseModel):
"""
Define the processing graph needed to extract document-level features,
whether, and how those features should be persisted.
"""
raw = ff.ByteStreamFeature(
ff.ByteStream,
chunksize=128,
store=True)
checksum = ff.JSONFeature(
CheckSum,
needs=raw,
store=True)
tokens = ff.Feature(
Tokenizer,
needs=raw,
store=False)
counts = ff.JSONFeature(
WordCount,
needs=tokens,
store=True)
@ff.simple_in_memory_settings
class Corpus(ff.BaseModel):
"""
Define the processing graph needed to extract corpus-level features,
whether, and how those features should be persisted.
"""
docs = ff.Feature(
lambda doc_cls: (doc.counts for doc in doc_cls),
store=False)
total_counts = ff.JSONFeature(
WordCount,
needs=docs,
store=True)
def process_urls(urls):
for url in urls:
Document.process(raw=url)
def summarize_document(doc):
return 'doc {_id} with checksum {cs} contains "the" {n} times'.format(
_id=doc._id,
cs=doc.checksum,
n=doc.counts.get('the', 0))
def process_corpus(document_cls):
corpus_id = Corpus.process(docs=document_cls)
return Corpus(corpus_id)
def summarize_corpus(corpus):
return 'The entire text corpus contains "the" {n} times'.format(
n=corpus.total_counts.get("the", 0))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--url',
help='specify one or more urls of text files to ingest',
required=True,
action='append')
args = parser.parse_args()
process_urls(args.url)
for doc in Document:
print(summarize_document(doc))
corpus = process_corpus(Document)
print(summarize_corpus(corpus))
|
[
"john.vinyard@gmail.com"
] |
john.vinyard@gmail.com
|
9861da30017314ba4235383958ebe1ac1213b676
|
6d03e98f216062984ee9b68118426806ecdc94d2
|
/custom/plugins/git-prompt-custom/gitstatus.py
|
d88e798d71846f1617f11fc59395340faac97b3d
|
[
"MIT"
] |
permissive
|
sgpthomas/oh-my-zsh
|
fc02aa0899fa7e229a25dba0891318468e5bc9df
|
47551d647c094b2475947314679bfefad14c8742
|
refs/heads/master
| 2021-08-27T18:40:58.731450
| 2021-08-04T21:05:29
| 2021-08-04T21:05:29
| 155,802,190
| 0
| 0
|
NOASSERTION
| 2018-11-02T02:16:38
| 2018-11-02T02:16:37
| null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
#!/usr/bin/env python3
import sys
from subprocess import Popen, PIPE
# `git status --porcelain --branch` can collect all information
# branch, remote_branch, untracked, staged, changed, conflicts, ahead, behind
po = Popen(['git', 'status', '--porcelain', '--branch'], stdout=PIPE, stderr=PIPE)
stdout, sterr = po.communicate()
git="0"
branch="none"
dirty="0"
if po.returncode == 0:
git = "1"
lines = stdout.decode('utf-8').splitlines()
s = lines[0]
start = s.index(' ')
branch = 'ERROR'
if '.' in s:
branch = s[s.index(' ')+1:s.index('.')]
else:
branch = s[s.index(' ')+1:]
dirty = "0"
if len(lines) > 1: dirty = "1"
out = ' '.join([git, branch, dirty])
print(out, end='')
|
[
"sgpthomas@gmail.com"
] |
sgpthomas@gmail.com
|
4703e4c6245617739a5d4e76ba664eb9502ae538
|
5e5d42a75b60dabfbc36330606fc4e6297f5d185
|
/kariera/models.py
|
f2adf74bca33bfaffcb0c009ad742dcc1bd26da8
|
[] |
no_license
|
kintarus/netbox_app
|
24782b0f90568eedbd504b3225b1e64fa1c830b1
|
2d32830b49e04e6def23658922a90d6bca6e0112
|
refs/heads/master
| 2021-03-15T08:43:58.124970
| 2020-03-12T13:34:33
| 2020-03-12T13:34:33
| 246,838,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,002
|
py
|
from django.db import models
from django.utils import timezone
from tinymce.models import HTMLField
from ckeditor.fields import RichTextField
class Miasto(models.Model):
nazwa = models.CharField(max_length=200)
kod_pocztowy = models.CharField(max_length=6)
adres = models.Charfield(max_length=200)
class OfertaPracy(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
foto = models.FileField(upload_to='media/praca', blank=True, null=True)
zajawka = models.CharField(max_length=120)
text = RichTextField()
lokalizacja = models.ManyToManyField(Miasto)
aktualne = models.BooleanField(default=False, blank=True, null=True)
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def published(self):
self.published_date = timezone.now
self.save()
def __str__(self):
return self.title
|
[
"kawecki.piotr@gmail.com"
] |
kawecki.piotr@gmail.com
|
d6967a6615adeb9befa7ff6641d173fde31732ee
|
84c5b0d229f0f2ad98ed38c993112af20eabaf7f
|
/oniad_root/models/account_invoice.py
|
c685385135459e8998f5312f5288bcb9543bf826
|
[] |
no_license
|
victoralmau/oniad
|
86ddfd873969cb1d6cb6897d79d0a72118fe12db
|
8e45096c90406923e4c5fc69edcd9697d184040c
|
refs/heads/master
| 2022-12-06T12:43:59.084129
| 2020-04-01T08:23:50
| 2020-04-01T08:23:50
| 284,020,289
| 0
| 0
| null | 2020-07-31T11:24:46
| 2020-07-31T11:24:45
| null |
UTF-8
|
Python
| false
| false
| 4,604
|
py
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models
import logging
_logger = logging.getLogger(__name__)
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
oniad_address_id = fields.Many2one(
comodel_name='oniad.address',
string='Oniad Address'
)
@api.one
def oniad_root_complete_data(self, product, account_payment_ids):
#account.invoice.lines
payments_total = 0
for account_payment_id in account_payment_ids:
#account_invoice_line_vals
account_invoice_line_vals = {
'invoice_id': self.id,
'oniad_transaction_id': account_payment_id.oniad_transaction_id.id,
'product_id': product.id,#Producto Gasto
'name': account_payment_id.communication,
'quantity': 1,
'price_unit': account_payment_id.amount,
'account_id': product.property_account_income_id.id,
'purchase_price': account_payment_id.oniad_purchase_price,
'currency_id': account_payment_id.currency_id.id
}
#oniad_product_id
if account_payment_id.oniad_product_id.id>0:
account_invoice_line_vals['product_id'] = account_payment_id.oniad_product_id.id
#create
account_invoice_line_obj = self.env['account.invoice.line'].sudo().create(account_invoice_line_vals)
account_invoice_line_obj._onchange_product_id()
account_invoice_line_obj._onchange_account_id()
#price
price_unit = account_payment_id.amount/((account_invoice_line_obj.invoice_line_tax_ids.amount/100)+1)
payments_total = payments_total + account_payment_id.amount
#update
account_invoice_line_obj.update({
'price_unit': round(price_unit,4),
'name': account_payment_id.communication,
'price_subtotal': price_unit,
})
#Fix check totals
self.compute_taxes()
#check
if payments_total>self.amount_total:
amount_rest = payments_total-self.amount_total
for tax_line_id in self.tax_line_ids:
tax_line_id.amount = tax_line_id.amount + amount_rest
#update tax_line_ids
self.update({'tax_line_ids': self.tax_line_ids})
elif self.amount_total>payments_total:
amount_rest = self.amount_total-payments_total
for tax_line_id in self.tax_line_ids:
tax_line_id.amount = tax_line_id.amount - amount_rest
#update tax_line_ids
self.update({'tax_line_ids': self.tax_line_ids})
#update payment.invoice
_logger.info('Factura '+str(self.id)+' actualizada correctamente')
#operations
if self.partner_id.vat!=False and self.partner_id.vat!="":
self.action_invoice_open()
_logger.info('Factura '+str(self.id)+' validada correctamente')
self.action_auto_create_message_slack()#slack.message
#payments
if len(account_payment_ids)>0:
for account_payment_id in account_payment_ids:
if account_payment_id.payment_type=='inbound':
for move_line_id in account_payment_id.move_line_ids:
if move_line_id.credit>0:
_logger.info('Factura '+str(self.id)+' pre-asignar asiento contable '+str(move_line_id.id)+ ' del pago '+str(account_payment_id.id))
self.assign_outstanding_credit(move_line_id.id)
_logger.info('Factura '+str(self.id)+' asignado asiento contable '+str(move_line_id.id)+ ' del pago '+str(account_payment_id.id))
elif account_payment_id.payment_type=='outbound':
for move_line_id in account_payment_id.move_line_ids:
if move_line_id.debit>0:
_logger.info('Factura '+str(self.id)+' pre-asignar asiento contable '+str(move_line_id.id)+ ' del pago '+str(account_payment_id.id))
self.assign_outstanding_credit(move_line_id.id)
_logger.info('Factura '+str(self.id)+' asignado asiento contable '+str(move_line_id.id)+ ' del pago '+str(account_payment_id.id))
|
[
"informatica@arelux.com"
] |
informatica@arelux.com
|
c25cd342227a2db904b9cc8b7d0eb63c4bced298
|
6d54a7b26d0eb82152a549a6a9dfde656687752c
|
/src/test_driver/openiotsdk/integration-tests/common/fixtures.py
|
08f9b55d6d61dc1f9c51d3d437cad53e5c64c3c6
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
project-chip/connectedhomeip
|
81a123d675cf527773f70047d1ed1c43be5ffe6d
|
ea3970a7f11cd227ac55917edaa835a2a9bc4fc8
|
refs/heads/master
| 2023-09-01T11:43:37.546040
| 2023-09-01T08:01:32
| 2023-09-01T08:01:32
| 244,694,174
| 6,409
| 1,789
|
Apache-2.0
| 2023-09-14T20:56:31
| 2020-03-03T17:05:10
|
C++
|
UTF-8
|
Python
| false
| false
| 4,712
|
py
|
#
# Copyright (c) 2022 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import pathlib
import shutil
import chip.CertificateAuthority
import chip.native
import pytest
from chip import exceptions
from .fvp_device import FvpDevice
from .telnet_connection import TelnetConnection
from .terminal_device import TerminalDevice
log = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def rootDir():
return pathlib.Path(__file__).parents[5].absolute()
@pytest.fixture(scope="session")
def fvp(request):
if request.config.getoption('fvp'):
return request.config.getoption('fvp')
else:
return shutil.which('FVP_Corstone_SSE-300_Ethos-U55')
@pytest.fixture(scope="session")
def fvpConfig(request, rootDir):
if request.config.getoption('fvpConfig'):
return request.config.getoption('fvpConfig')
else:
return os.path.join(rootDir, 'config/openiotsdk/fvp/cs300.conf')
@pytest.fixture(scope="session")
def telnetPort(request):
return request.config.getoption('telnetPort')
@pytest.fixture(scope="session")
def networkInterface(request):
if request.config.getoption('networkInterface'):
return request.config.getoption('networkInterface')
else:
return None
@pytest.fixture(scope="session")
def otaProvider(request, rootDir):
if request.config.getoption('otaProvider'):
return request.config.getoption('otaProvider')
else:
return os.path.join(rootDir, 'out/chip-ota-provider-app')
@pytest.fixture(scope="session")
def softwareVersion(request):
if request.config.getoption('softwareVersion'):
version = request.config.getoption('softwareVersion')
params = version.split(':')
return (params[0], params[1])
else:
return ("1", "0.0.1")
@pytest.fixture(scope="function")
def device(fvp, fvpConfig, binaryPath, telnetPort, networkInterface):
connection = TelnetConnection('localhost', telnetPort)
device = FvpDevice(fvp, fvpConfig, binaryPath, connection, networkInterface, "FVPdev")
device.start()
yield device
device.stop()
@pytest.fixture(scope="session")
def controller(controllerConfig):
try:
chip.native.Init()
chipStack = chip.ChipStack.ChipStack(
persistentStoragePath=controllerConfig['persistentStoragePath'], enableServerInteractions=False)
certificateAuthorityManager = chip.CertificateAuthority.CertificateAuthorityManager(
chipStack, chipStack.GetStorageManager())
certificateAuthorityManager.LoadAuthoritiesFromStorage()
if (len(certificateAuthorityManager.activeCaList) == 0):
ca = certificateAuthorityManager.NewCertificateAuthority()
ca.NewFabricAdmin(vendorId=controllerConfig['vendorId'], fabricId=controllerConfig['fabricId'])
elif (len(certificateAuthorityManager.activeCaList[0].adminList) == 0):
certificateAuthorityManager.activeCaList[0].NewFabricAdmin(
vendorId=controllerConfig['vendorId'], fabricId=controllerConfig['fabricId'])
caList = certificateAuthorityManager.activeCaList
devCtrl = caList[0].adminList[0].NewController()
except exceptions.ChipStackException as ex:
log.error("Controller initialization failed {}".format(ex))
return None
except Exception:
log.error("Controller initialization failed")
return None
yield devCtrl
devCtrl.Shutdown()
certificateAuthorityManager.Shutdown()
chipStack.Shutdown()
os.remove(controllerConfig['persistentStoragePath'])
@pytest.fixture(scope="session")
def ota_provider(otaProvider, otaProviderConfig):
args = [
'--discriminator', otaProviderConfig['discriminator'],
'--secured-device-port', otaProviderConfig['port'],
'-c',
'--KVS', otaProviderConfig['persistentStoragePath'],
'--filepath', otaProviderConfig['filePath'],
]
device = TerminalDevice(otaProvider, args, "OTAprovider")
device.start()
yield device
device.stop()
os.remove(otaProviderConfig['persistentStoragePath'])
|
[
"noreply@github.com"
] |
project-chip.noreply@github.com
|
4eb9117dec685610b20c0f77a560dd71ea30a929
|
3d4fcc74c87e570e33ce723900a2615ba7948469
|
/ProxyPool/run.py
|
11093f3a96915ec1bcab59987c0ee58602d6aa64
|
[
"Apache-2.0"
] |
permissive
|
Scarecrow1024/Python
|
9e8b7f596e53535f80c81ea41cbc5eb2ef46feff
|
308ce3bbd10eb3e47b97ac65926ed37dfbd8b768
|
refs/heads/master
| 2021-01-19T11:16:00.917906
| 2017-11-11T04:39:26
| 2017-11-11T04:39:26
| 87,946,987
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
from proxypool.api import app
from proxypool.schedule import Schedule
def main():
s = Schedule()
s.run()
app.run()
if __name__ == '__main__':
main()
|
[
"postmaster@itu74.com"
] |
postmaster@itu74.com
|
8f70b094f28d355731ae9ffec5792ac3562bf27f
|
2bad0c6ccd4f49f4e12c0cabacadf77f5904be71
|
/weixin_test/weixin_config.py
|
76d0c905f43c494a42d99a4432c33e0b8b62d2b5
|
[] |
no_license
|
caozi/FISH-result-check-app
|
6ec24d845d94aebf476bc4739a215881c9514c14
|
a62f5570257a8fa6b8f2e0c53aa2715d5af82f72
|
refs/heads/master
| 2021-01-27T06:11:44.793878
| 2020-06-12T09:46:53
| 2020-06-12T09:46:53
| 243,474,594
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
# weixin configurations
TOKEN = 'hellowx'
appID = 'wx6c11f5e4bbd229bd'
appsecret = '1605f2bca63385b87ec35daffa2227ea'
template_ID = 'xXKmB_vixLBkmwAZ643JLCU4EiCPgqLv1IPHom029W4'
template_ID_FISH = '6l8cvoXLcfTl6I36iXqwdBRvCXQJFzZ6ur871PqR7mw'
|
[
"imcaozi@outlook.com"
] |
imcaozi@outlook.com
|
9879dd710062eb01d93600adb516df15be3de790
|
ccf2730ab05040f7f008717436319b9044290b31
|
/Питон/555.py
|
cf133157cd4a7cf361122439341b8842d6dbe4da
|
[] |
no_license
|
bakyt86/codingbat1
|
9e6725dbb99a848f7bb30c3bb5a126f7a99e4f51
|
b08b373a6fc2f4aa07ffb12e61e23e6a1036347a
|
refs/heads/master
| 2022-12-05T23:38:10.697839
| 2020-08-21T14:19:56
| 2020-08-21T14:19:56
| 284,875,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
x = []
a = int(input('Enter number '))
for i in range(1,a+1):
if a % i ==0:
x.append(i)
print(x)
|
[
"noreply@github.com"
] |
bakyt86.noreply@github.com
|
196c0db77a273eae8597887529a7c352a560a6a7
|
a9f7e40e18c935fb004fe813f98e298ded0581af
|
/Google_Code_Jam/GCJ2008/GCJ2008_R1A_A.py
|
53afb6e65b62316a298009f4a927889f5b0acfdc
|
[] |
no_license
|
happa64/AtCoder_Beginner_Contest
|
2eb350f500f4bd65f5491b98cdf002ac9b174165
|
2526e72de9eb19d1e1c634dbd577816bfe39bc10
|
refs/heads/master
| 2023-07-08T15:13:51.449555
| 2021-08-11T14:18:09
| 2021-08-11T14:18:09
| 255,601,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
# https://codingcompetitions.withgoogle.com/codejam/round/00000000004330f6/0000000000432f33
# Minimum Scalar Product
import sys
sys.setrecursionlimit(10 ** 7)
input = sys.stdin.readline
f_inf = float('inf')
mod = 10 ** 9 + 7
def resolve():
t = int(input())
for i in range(1, t + 1):
n = int(input())
v1 = list(map(int, input().split()))
v2 = list(map(int, input().split()))
v1.sort()
v2.sort(reverse=True)
res = sum(v1[i] * v2[i] for i in range(n))
print("Case #{}: {}".format(i, res))
if __name__ == '__main__':
resolve()
|
[
"happa_iidx@yahoo.co.jp"
] |
happa_iidx@yahoo.co.jp
|
352c29303a5086f912a8c758db6a69aef6fb84b1
|
8743b902674979e4f01f65fc14fb5771a56c4444
|
/backend/src/app/db/session.py
|
9f6871122b26c43b30c67ac3886dd076e0c2158d
|
[] |
no_license
|
rajatgermany/brand_evaluator
|
9218ae71d66c2b1f2035bc32861e96f0ac4f3e16
|
536e443affde566666fba6ff872f7995add162bd
|
refs/heads/master
| 2023-01-08T09:34:54.895024
| 2019-10-17T09:04:33
| 2019-10-17T09:04:33
| 214,797,622
| 4
| 3
| null | 2022-12-26T21:04:23
| 2019-10-13T10:05:06
|
Python
|
UTF-8
|
Python
| false
| false
| 390
|
py
|
from app.core import config
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
engine = create_engine(config.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)
db_session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine)
)
Session = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
[
"rajatuiet@gmail.com"
] |
rajatuiet@gmail.com
|
c2f5919ddc92d7619a3588b561bfa4b135d0a60a
|
9947a9eba45f7daf8ec7f96920c21855563f14c2
|
/polls/tests.py
|
cbf975a8fca0601e80f7969558f527870dddf2ec
|
[] |
no_license
|
hmjrun/djangosite
|
f27ad99e826b06cc6d7d3d21c3ec26a264b12119
|
c23fd6b010a01c673fd47d0f431777fbd4b21c95
|
refs/heads/master
| 2020-07-09T05:02:26.274990
| 2017-03-17T09:42:50
| 2017-03-17T09:42:50
| 67,414,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
from django.test import TestCase
# Create your tests here.
import datetime
from django.utils import timezone
from .models import Question
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for
questions whose
pub_date is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_data = time)
#We then check the output of was_published_recently()
#- which ought to be False.
self.assertIs(future_question.was_published_recently(),False)
|
[
"892532819@qq.com"
] |
892532819@qq.com
|
548009f7d5b6a332787587db938c530701e9c277
|
6b0ff78d9adf295d4a57dbd9e489201362729665
|
/communication_test.py
|
ea5df08bfac89b4215ca5240ddce32bd6272bdd9
|
[] |
no_license
|
Neboer/HCRC
|
5873f1866fd08dab3486b066a8f40c17f53baad0
|
dd023d73cda532baecc6998f625ace08daf8bce4
|
refs/heads/master
| 2021-04-23T18:49:08.668817
| 2020-03-30T15:01:25
| 2020-03-30T15:01:25
| 249,971,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 847
|
py
|
import unittest
from communication import *
class CommunicationCase(unittest.TestCase):
def test_check_user_from_server(self):
self.assertTrue(check_user_from_server('wsbz', '2956136'))
self.assertFalse(check_user_from_server('wsbz', '1234567'))
def test_add_user_to_server(self):
self.assertTrue(add_user_to_server('2333', '123456'))
def test_change_user_password_from_server(self):
self.assertTrue(change_user_password_from_server('wsbz', '123456', '123456'))
def test_del_user_from_server(self):
self.assertTrue(del_user_from_server('2333'))
def test_get_user_list_from_server(self):
self.assertTrue(len(get_user_list_from_server()) > 0)
def test_give_kit(self):
self.assertTrue(give_kit('wsbz', '萌新'))
if __name__ == '__main__':
unittest.main()
|
[
"gmyxgmyx@outlook.com"
] |
gmyxgmyx@outlook.com
|
1dc2127be89c07b2de0802f96871402251308198
|
9aca4c98a0d6ecaf263cf1cea03ec87a9c9fd926
|
/HINDI/views.py
|
37499c2edada6b47b3d1496aafb7fd0af3c8c37b
|
[] |
no_license
|
Aman-Kumar-123/Djano-translation
|
cc63150c06c776e6a1af025667ac7d0e61f2145d
|
768dd7a11af138b736328cabe0850a88cb98f609
|
refs/heads/master
| 2020-12-29T05:28:34.573257
| 2020-01-26T14:15:40
| 2020-01-26T14:15:40
| 238,471,202
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
from googletrans import Translator
T=Translator()
def index(request):
text=request.GET.get('text1','OUTPUT TEXT')
if(text is not 'OUTPUT TEXT'):
text=T.translate(text,dest='hi',src='en').text
print(text)
params={'textarea':text}
return render(request,'index.html',params)
|
[
"ak1373728@gmail.com"
] |
ak1373728@gmail.com
|
e4bb54ee58dd6c620bd8440a4deb1ed555dff818
|
14816fb4bc3785dc7ca887c27a0b6b219ac494f1
|
/obml-parser
|
f37197473f1b464f3713ab499150fb4f6942e5c6
|
[
"MIT"
] |
permissive
|
vinhjaxt/obml-parser
|
9ba0ec8a744c73e4af4bf39b4870094a2600e0a8
|
59d29516805c858d15f970edf84adb36a48ce393
|
refs/heads/master
| 2020-04-27T21:39:50.074739
| 2019-03-09T15:34:54
| 2019-03-09T15:34:54
| 174,706,902
| 0
| 0
| null | 2019-03-09T14:51:06
| 2019-03-09T14:51:06
| null |
UTF-8
|
Python
| false
| false
| 21,826
|
#!/usr/bin/env python3
# A barebones parser for OBML files used by Opera Mini
#
# (c) 2014–2016 Mantas Mikulėnas <grawity@gmail.com>
# Released under the MIT License
#
# Originally intended to extract original URLs from saved pages, after Opera dropped
# binary compatibilty between minor releases and left me with a bunch of unreadable
# saved pages in v15 — and it does that.
#
# An attempt to document the file format is at:
# https://gist.github.com/grawity/702b31cddcc5a6622724
import base64
import os
import struct
import sys
import urllib.parse
from pprint import pprint
class Parser():
def __init__(self, path):
self.path = path
self.fh = open(path, 'rb')
self.url_base = "\0"
self.last_x = 0
self.last_y = 0
def debug(self, typ, data):
if os.environ.get("DEBUG"):
c_on = "\033[33m" if sys.stdout.isatty() else ""
c_off = "\033[m" if sys.stdout.isatty() else ""
print(c_on, "#", typ, repr(data), c_off)
return data
# primitives
def read(self, length):
buf = self.fh.read(length)
if len(buf) < length:
raise IOError("Hit EOF after %d/%d bytes" % (len(buf), length))
return self.debug("raw[%d]" % length, buf)
def read_byte(self):
length = 1
buf = self.fh.read(length)
if len(buf) < length:
raise IOError("Hit EOF after %d/%d bytes" % (len(buf), length))
data, = struct.unpack('>B', buf)
return self.debug("byte", data)
def read_short(self):
length = 2
buf = self.fh.read(length)
if len(buf) < length:
raise IOError("Hit EOF after %d/%d bytes" % (len(buf), length))
data, = struct.unpack('>H', buf)
return self.debug("short", data)
def read_medium(self):
length = 3
buf = self.fh.read(length)
if len(buf) < length:
raise IOError("Hit EOF after %d/%d bytes" % (len(buf), length))
data_hi, data_lo = struct.unpack('>BH', buf)
return self.debug("medium", (data_hi << 16) | data_lo)
def read_blob(self):
length = self.read_short()
buf = self.fh.read(length)
if len(buf) < length:
raise IOError("Hit EOF after %d/%d bytes" % (len(buf), length))
return self.debug("chunk[%d]" % length, buf)
# other data types
def read_string(self):
buf = self.read_blob()
buf = buf.decode('utf-8')
return self.debug("-> str[%d]" % len(buf), buf)
def read_url(self, base=None):
buf = self.read_string()
if buf and buf[0] == "\0":
if not base:
base = self.url_base
buf = base + buf[1:]
return self.debug("-> url[%d]" % len(buf), buf)
def read_color(self):
a = self.read_byte()
r = self.read_byte()
g = self.read_byte()
b = self.read_byte()
return self.debug("-> color[argb]", (a, r, g, b))
def read_coords(self, rel_to_abs=False):
x = self.read_short()
y = self.read_medium()
if rel_to_abs:
self.last_x = x = (self.last_x + x) & 0xFFFF
self.last_y = y = (self.last_y + y) & 0xFFFFFF
else:
# in v15+, all positions are relative and never depend on
# earlier absolute coordinates (which are only used for sizes)
pass
return self.debug("-> coords[%s]" % ("rel" if rel_to_abs else "abs"), (x, y))
def infinity(start=0):
i = start
while True:
yield i
i += 1
def strhex(buf):
return " ".join(["%02X" % x for x in buf])
def rgba(argb_tuple):
a, r, g, b = argb_tuple
if a == 0 or a == 255:
return "rgb(%d, %d, %d)" % (r, g, b)
else:
return "rgba(%d, %d, %d, %.3f)" % (r, g, b, a/255)
def data_url(buf):
if buf.startswith(b"\x89PNG\r\n"):
img_type = "image/png"
elif buf.startswith(b"\xff\xd8"):
img_type = "image/jpeg"
else:
img_type = "application/octet-stream"
encoded = base64.b64encode(buf)
encoded = urllib.parse.quote(encoded)
return "data:%s;base64,%s" % (img_type, encoded)
def css_imgdata(buf):
return "url('%s')" % data_url(buf)
def parse_file(arg):
print("file =", arg)
f = Parser(arg)
expected_size = f.read_medium()
version = f.read_byte()
print("version =", version)
if version == 16:
assert(expected_size == 0x02d355)
expected_size = f.read_medium()
version = f.read_byte()
exp_total_bytes = expected_size + 7
elif version == 15:
raise ValueError("bad header for version %r" % version)
else:
exp_total_bytes = expected_size + 3
exp_links_bytes = 0
if version not in {12, 13, 15, 16}:
raise ValueError("unknown version %r" % version)
page_size = f.read_coords()
if version == 16:
assert(f.read(2) in {b'\x00\x00', b'\xff\xff'})
else:
assert(f.read(5) == b'S\x00\x00\xff\xff')
page_title = f.read_string()
f.read_blob() # 'C\x10\x10...' on v15, nil elsewhere
f.url_base = f.read_string()
page_url = f.read_url()
yield {"_type": "head",
"title": page_title,
"url": page_url,
"dimensions": page_size}
if version >= 15:
f.read(6)
else:
f.read_short()
f.read_medium()
# metadata section
while True:
print("--- metadata [%d] ---" % f.fh.tell())
type = f.read(1)
if None:
pass
elif type == b"C":
if version >= 15:
f.read(23)
else:
raise ValueError("unhandled metadata chunk %r/v%r" % (type, version))
elif type == b"M":
f.read(2)
f.read_blob()
elif type == b"S":
exp_links_bytes = f.read_medium()
break
else:
raise ValueError("unknown metadata chunk %r" % type)
print("section 1 ends at %d" % f.fh.tell())
# link sub-section
links_start = f.fh.tell()
links_end = f.fh.tell() + exp_links_bytes
while f.fh.tell() < links_end:
print("--- links [%d] ---" % f.fh.tell())
type = f.read(1)
if None:
pass
elif type == b"\x00":
# <option> selections
f.read(1)
n = f.read_byte()
options = []
for j in range(n):
opt_val = f.read_string()
opt_text = f.read_string()
options.append((opt_val, opt_text))
yield {"_type": "option_list",
"data": options}
elif type in {b"i", b"L", b"P", b"w", b"W"}:
# shared 'region' format
n = f.read_byte()
boxes = []
for j in range(n):
pos = f.read_coords()
size = f.read_coords()
boxes.append((pos, size))
if version >= 15:
link_url = f.read_url()
assert(f.read(2) == b"\x01t")
link_type = f.read_string()
elif version == 13:
link_url = f.read_url()
f.read(2)
link_type = f.read_string()
elif version == 12:
link_type = f.read_string()
link_url = f.read_url()
if type == b"i":
for pos, size in boxes:
if size[0] > 16 and size[1] > 16:
yield {"_type": "link",
"kind": "image",
"href": link_url,
"type": link_type,
"pos": pos,
"size": size}
else:
if not link_url.startswith("b:"):
for pos, size in boxes:
yield {"_type": "link",
"href": link_url,
"type": link_type,
"pos": pos,
"size": size}
elif type in {b"C", b"I", b"N", b"S"} and version >= 15:
# shared 'region' format
n = f.read_byte()
for j in range(n):
pos = f.read_coords()
size = f.read_coords()
f.read_blob()
assert(f.read(2) == b"\x01t")
f.read_blob()
elif type in {b"S"} and version == 13:
# shared 'region' format
n = f.read_byte()
for j in range(n):
pos = f.read_coords()
size = f.read_coords()
f.read_blob()
f.read(2)
f.read_blob()
elif type == b"C" and version == 12:
f.read(24)
elif type in {b"I", b"S"} and version == 12:
# shared 'region' format
n = f.read_byte()
for j in range(n):
pos = f.read_coords()
size = f.read_coords()
f.read_blob()
f.read_blob()
else:
raise ValueError("unknown link chunk %r/v%r" % (type, version))
print("section 2 ends at %d" % f.fh.tell())
if f.fh.tell() != links_end:
raise ValueError("link section ended at %d, expected %d" % (f.fh.tell(), links_end))
# content section
content_start = f.fh.tell()
content_end = exp_total_bytes
while f.fh.tell() < content_end:
print("--- content [%d] ---" % f.fh.tell())
type = f.read(1)
if None:
pass
elif type == b"o":
if version >= 15:
# TODO: not sure if actual type or just part of preceding "I"-block
f.read_blob()
else:
raise ValueError("unhandled content chunk %r/v%r" % (type, version))
elif type == b"B":
if version >= 15:
pos = f.read_coords(rel_to_abs=True)
size = f.read_coords()
color = f.read_color()
else:
pos = f.read_coords()
size = f.read_coords()
color = f.read_color()
yield {"_type": "box",
"pos": pos,
"size": size,
"fill": color}
elif type == b"F":
if version >= 15:
pos = f.read_coords(rel_to_abs=True)
size = f.read_coords()
color = f.read_color()
else:
pos = f.read_coords()
size = f.read_coords()
color = f.read_color()
field_type = f.read(1)
f.read(1)
field_id = f.read_string()
field_value = f.read_string()
if version >= 15:
f.read(5)
else:
f.read(3)
if field_type in {b"c", b"r"}:
# hack
pos = (pos[0] - 8, pos[1] - 8)
size = (size[0] + 8, size[1] + 8)
yield {"_type": "input",
"kind": {
b"a": "textarea",
b"c": "checkbox",
b"r": "radio",
b"s": "select",
b"x": "text",
}.get(field_type),
"value": field_value,
"pos": pos,
"size": size,
"color": color}
elif type == b"I":
addr = 0
if version == 16:
pos = f.read_coords(rel_to_abs=True)
size = f.read_coords()
color = f.read_color()
addr = f.read_medium()
f.read(2)
assert(len(f.read_blob()) == 4)
elif version == 15:
pos = f.read_coords(rel_to_abs=True)
size = f.read_coords()
color = f.read_color()
f.read(14)
else:
pos = f.read_coords()
size = f.read_coords()
color = f.read_color()
f.read(3)
addr = f.read_medium()
yield {"_type": "box",
"kind": "image",
"pos": pos,
"size": size,
"fill": color,
"blob": addr}
elif type == b"L":
f.read(9)
elif type == b"M":
f.read(2)
f.read_blob()
elif type == b"S":
exp_files_bytes = f.read_medium()
files_start = f.fh.tell()
files_end = f.fh.tell() + exp_files_bytes
while f.fh.tell() < files_end:
addr = f.fh.tell() - files_start
buf = f.read_blob()
yield {"_type": "file",
"addr": addr,
"data": buf}
print("files started at %d, ends at %d" % (files_start, f.fh.tell()))
if f.fh.tell() != files_end:
raise ValueError("content.files section ended at %d, expected %d" % (f.fh.tell(), files_end))
elif type == b"T":
if version == 16:
pos = f.read_coords(rel_to_abs=True)
size = f.read_coords()
color = f.read_color()
f.read(1)
font = 4 | (f.read_byte() & 1)
n = f.read_byte()
for j in range(n):
print("text link", f.read(1), f.read_blob())
text = f.read_string()
elif version == 15:
pos = f.read_coords(rel_to_abs=True)
size = f.read_coords()
color = f.read_color()
font = f.read_byte()
text = f.read_string()
else:
pos = f.read_coords()
size = f.read_coords()
color = f.read_color()
font = f.read_byte()
text = f.read_string()
yield {"_type": "text",
"text": text,
"font": font,
"color": color,
"pos": pos,
"size": size}
elif type == b"z":
if version == 16:
f.read(6)
else:
raise ValueError("unhandled content chunk %r/v%r" % (type, version))
else:
raise ValueError("unknown content chunk %r/v%r" % (type, version))
print("section 3 started at %d, ends at %d" % (content_start, f.fh.tell()))
if f.fh.tell() != content_end:
raise ValueError("content section ended at %d, expected %d" % (f.fh.tell(), content_end))
font_sizes = {
0: "11px", # medium
2: "12px", # large
4: "13px", # extra large
6: "10px", # small
}
for arg in sys.argv[1:]:
with open("%s.html" % arg, "w") as fout:
id_alloc = iter(infinity())
imgboxes = {}
option_lists = {}
n_option_lists = iter(infinity())
n_select_fields = iter(infinity())
fout.write('<!DOCTYPE html>\n')
fout.write('<meta charset="utf-8">\n')
fout.write('<style>\n'
'html {\n'
' background: #ddd;\n'
'}\n'
'.item {\n'
' position: absolute;\n'
'}\n'
'.body {\n'
' background: white;\n'
' z-index: -200;\n'
'}\n'
'.box {\n'
' z-index: -100;\n'
'}\n'
'.img {\n'
' z-index: -50;\n'
'}\n'
'.link {\n'
' display: block;\n'
' text-decoration: none;\n'
' z-index: 100;\n'
'}\n'
'.link:hover {\n'
' outline: 1px solid blue;\n'
'}\n'
'.imglink {\n'
' color: gray;\n'
' z-index: 150;\n'
'}\n'
'.text, .field {\n'
' font-family: sans-serif;\n'
' font-size: 11px;\n'
' line-height: 1.1;\n'
' white-space: pre;\n'
'}\n'
'.form {\n'
' border: none;\n'
' padding: none;\n'
'}\n'
'</style>\n')
for item in parse_file(arg):
type = item["_type"]
pprint(item)
if type == "head":
fout.write('<!-- origin: %s -->\n' % item["url"])
fout.write('<title>%s</title>\n' % item["title"])
page_w, page_h = item["dimensions"]
style = [
"left: %dpx" % 0,
"top: %dpx" % 0,
"width: %dpx" % page_w,
"height: %dpx" % page_h,
]
style = "; ".join(style)
fout.write('<div class="item body" style="%s"></div>' % style)
elif type == "text":
item_x, item_y = item["pos"]
item_w, item_h = item["size"]
font_size = item["font"] & ~1
font_weight = item["font"] & 1
style = [
"font-size: %s" % font_sizes[font_size],
"font-weight: %s" % ("bold" if font_weight else "normal"),
"color: %s" % rgba(item["color"]),
"left: %dpx" % item_x,
"top: %dpx" % item_y,
"width: %dpx" % item_w,
"height: %dpx" % item_h,
]
style = "; ".join(style)
fout.write('<div class="item text" style="%s">' % style)
fout.write(item["text"])
fout.write('</div>\n')
elif type == "box":
item_x, item_y = item["pos"]
item_w, item_h = item["size"]
style = [
"background-color: %s" % rgba(item["fill"]),
"left: %dpx" % item_x,
"top: %dpx" % item_y,
"width: %dpx" % item_w,
"height: %dpx" % item_h,
]
style = "; ".join(style)
if item.get("kind") == "image":
box_id = "imgbox_%d" % next(id_alloc)
fout.write('<div class="item img" style="%s" id="%s"></div>\n' % (style, box_id))
imgboxes.setdefault(item["blob"], []).append(box_id)
else:
fout.write('<div class="item box" style="%s"></div>\n' % style)
elif type == "option_list":
list_id = next(n_option_lists)
option_lists[list_id] = item["data"]
elif type == "input":
item_x, item_y = item["pos"]
item_w, item_h = item["size"]
style = [
"color: %s" % rgba(item["color"]),
"left: %dpx" % item_x,
"top: %dpx" % item_y,
"width: %dpx" % item_w,
"height: %dpx" % item_h,
]
style = "; ".join(style)
if item["kind"] == "textarea":
fout.write('<textarea class="item form field" style="%s">%s</textarea>\n' % (style, item["value"]))
elif item["kind"] == "text":
fout.write('<input class="item form field" style="%s" type="text" value="%s">\n' % (style, item["value"]))
elif item["kind"] in {"checkbox", "radio"}:
fout.write('<input class="item form" style="%s" type="%s" value="%s">\n' % (style, item["kind"], item["value"]))
elif item["kind"] == "select":
list_id = next(n_select_fields)
fout.write('<select class="item field" style="%s">\n' % style)
for opt_id, opt_text in option_lists[list_id]:
fout.write('<option>%s</option>\n' % opt_text)
fout.write('</select>\n')
elif type == "link":
item_x, item_y = item["pos"]
item_w, item_h = item["size"]
if item.get("kind") == "image":
klass = "link imglink"
style = [
"left: %dpx" % item_x,
"top: %dpx" % item_y,
]
text = "↖"
else:
klass = "link"
style = [
"left: %dpx" % item_x,
"top: %dpx" % item_y,
"width: %dpx" % item_w,
"height: %dpx" % item_h,
]
text = ""
style = "; ".join(style)
fout.write('<a class="item %s" href="%s" style="%s">%s</a>\n' % (klass, item["href"], style, text))
elif type == "file":
fout.write('<script>\n')
box_ids = imgboxes.get(item["addr"])
if box_ids:
fout.write('var bg = "%s";\n' % css_imgdata(item["data"]))
for box_id in box_ids:
fout.write('var div = document.getElementById("%s");\n' % box_id)
fout.write('div.style.backgroundImage = bg;\n')
fout.write('div.style.backgroundColor = "";\n')
fout.write('</script>\n')
|
[
"grawity@gmail.com"
] |
grawity@gmail.com
|
|
cbdf192fb8a2fcc0055421712037209c26498bbf
|
5dd93a32e9803cbe29de37434d9bd1209f871b84
|
/majiang2/src/majiang2/table_state_processor/drop_card_processor.py
|
988070dbf1dce95b42ed3dff1a6d43e278470161
|
[] |
no_license
|
cnbcloud/mjserver
|
71e9448478d6b6c04e852fc74968b3b2cb75f51c
|
b5b08a85d49c3bed460255a62dc5201b998d88d4
|
refs/heads/master
| 2021-01-21T17:46:29.073368
| 2017-07-27T09:25:49
| 2017-07-27T09:25:49
| 98,517,509
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,846
|
py
|
# -*- coding=utf-8
'''
Created on 2016年9月23日
@author: zhaol
'''
from majiang2.table_state.state import MTableState
from majiang2.table_state_processor.processor import MProcessor
from freetime.util import log as ftlog
from majiang2.table_state_processor.extend_info import MTableStateExtendInfo
from majiang2.player.player import MPlayer
from majiang2.ai.play_mode import MPlayMode
from majiang2.table_tile.table_tile_factory import MTableTileFactory
from majiang2.table.run_mode import MRunMode
"""
出牌的操作处理
人数:
吃 - 一个人
碰 - 一个人
杠 - 一个人
胡 - 多个人。赛制上,有抢胡/一炮多响等2个玩法
情况:
一个人出牌,可能有人吃,有人碰,有人杠,有人胡
有的人同时可以吃/碰/杠/胡
流程:
1)是最高优先级且唯一,响应此操作,本轮状态重置,结束。
2)是最高优先级但不唯一,只可能是一炮多响,本人胡牌,继续等待其他人胡牌,取消其他状态。
3)不是最高优先级,非和,等待其他人响应结束。选取最高优先级执行。
"""
class MDropCardProcessor(MProcessor):
"""
一个人出牌,其他人响应的状态处理
每个人都可以吃/碰/杠/和
根据优先级响应行为
默认处理:
超时,其他人的操作都视为放弃,
"""
def __init__(self, count, playMode):
super(MDropCardProcessor, self).__init__()
self.__processors = [{"state": MTableState.TABLE_STATE_NEXT, "extendInfo": None, "response": 0} for _ in range(count)]
self.__count = count
self.__tile = 0
self.__cur_seat_id = 0
self.__play_mode = playMode
@property
def count(self):
return self.__count
def setCount(self, count):
self.__count = count
def getTile(self):
"""
获取当前手牌
"""
return self.__tile
@property
def curSeatId(self):
"""获取当前座位号"""
return self.__cur_seat_id
def setCurSeatId(self, seatId):
self.__cur_seat_id = seatId
def getSeatsBySchedule(self):
"""根据座位号,获取吃牌的座位号列表"""
seats = []
for index in range(self.__count - 1):
seats.append((self.curSeatId + index + 1) % self.__count)
return seats
def getAutoDecideSeatsBySchedule(self, trustTee):
"""根据座位号,获取托管的座位号列表"""
seats = []
for index in range(self.__count - 1):
nextSeat = (self.curSeatId + index + 1) % self.__count
if self.isUserAutoDecided(nextSeat, trustTee, self.getStateBySeatId(nextSeat), self.getResponseBySeatId(nextSeat) == 0):
seats.append(nextSeat)
if len(seats) > 0:
ftlog.debug('MDropCardProcessor.getAutoDecideSeatsBySchedule seats:', seats)
return seats
def getStateBySeatId(self, seatId):
"""根据seatId获取出牌状态"""
return self.__processors[seatId]['state']
def setStateBySeatId(self, seatId, state):
"""设置某人的状态"""
self.__processors[seatId]['state'] = state
def getResponseBySeatId(self, seatId):
"""根据seatId获取响应状态"""
return self.__processors[seatId]['response']
def getState(self):
"""获取本轮出牌状态"""
state = 0
for seat in range(self.__count):
state = state | self.__processors[seat]['state']
ftlog.debug( 'MDropCardProcessor.getState return:', state )
return state
@property
def processors(self):
return self.__processors
def reset(self):
"""重置数据"""
self.__processors = [{"state": 0, "extendInfo": None, "response": 0} for _ in range(self.__count)]
self.__tile = 0
self.__cur_seat_id = 0
ftlog.debug('MDropCardProcessor.reset now processors:', self.__processors)
def resetSeatId(self, seatId):
"""重置某个座位,用户放弃"""
self.__processors[seatId] = {"state": 0, "extendInfo": None, "response": 0}
if self.getState() == 0:
# 加入此人的门前手牌当中
ftlog.debug('MDropCardProcessor.resetSeatId, seatId:', seatId
, ' choose cancel, put tile to men tiles:', self.__tile
, ' oldSeatId:', self.__cur_seat_id)
self.tableTileMgr.setMenTileInfo(self.__tile, self.__cur_seat_id)
ftlog.debug('MDropCardProcessor.resetSeatId now processors:', self.__processors)
return True
def resetSeatIdExceptWin(self, seatId):
"""重置用户状态,除了胡牌"""
if self.getStateBySeatId(seatId) & MTableState.TABLE_STATE_HU:
self.setStateBySeatId(seatId, MTableState.TABLE_STATE_HU)
else:
self.resetSeatId(seatId)
def initTile(self, tile, curSeat):
"""初始化本轮手牌,用做校验"""
ftlog.debug( 'MDropCardProcessor.initTile:', tile )
self.__tile = tile
self.__cur_seat_id = curSeat
@property
def tile(self):
return self.__tile
def initProcessor(self, actionID, seatId, state, extendInfo = None, timeOut = 9, response = False):
"""
初始化处理器
参数
seatId - 座位号
state - 状态集合,当前座位号可以做出的全部选择
"""
ftlog.debug('MDropCardProcessor.initProcessor seatId:', seatId, ' state:', state, ' extentdInfo:', extendInfo)
if state == MTableState.TABLE_STATE_NEXT:
return
self.setActionID(actionID)
self.__processors[seatId]['state'] = state
self.__processors[seatId]['extendInfo'] = extendInfo
# 用户未做出选择
self.__processors[seatId]['response'] = 1
if response == True:
self.__processors[seatId]['response'] = 0
self.setTimeOut(timeOut)
ftlog.debug('MDropCardProcessor.initProcessor end:', self.__processors)
def getExtendResultBySeatId(self,seatId):
"""当前座位号是否杠听"""
extendInfo = self.__processors[seatId]['extendInfo']
return extendInfo
def updateDuoHu(self, actionId, seatIds, state):
"""更新多胡"""
ftlog.debug('MDropCardProcessor.updateDuoHu actionId:', actionId
, ' seatIds:', seatIds
, ' state:', state)
if len(seatIds) <= 1:
ftlog.debug('MDropCardProcessor.updateDuoHu, only one player win, should not be here...')
return False
if actionId != self.actionID:
ftlog.debug('MDropCardProcessor.updateDuoHu wrong actionId, do not process actionId:', actionId
, ' actionIdInProcessor:', self.actionID)
return False
if not self.allResponsed():
ftlog.debug('MDropCardProcessor.updateDuoHu not all user responsed, do not process...')
return False
for seat in range(self.count):
if seat in seatIds:
continue
if self.getStateBySeatId(seat) >= MTableState.TABLE_STATE_HU:
ftlog.debug('MDropCardProcessor.updateDuoHu seatIds:', seatIds
, ' but seat:', seat
, ' also can win, not right...')
return False
return True
def updateProcessor(self, actionID, seatId, state, tile, pattern = None):
"""
用户做出了选择,state为0,表示放弃
用户的选择集合明确
参数
state - 最终做出的选择
tile - 操作的手牌
extendInfo - 扩展信息,吃的时候保存吃牌选择
返回值:
True - 最高优先级,执行操作
False - 非最高优先级,等待其他人的选择
"""
if actionID != self.actionID:
# 不是本轮处理的牌
ftlog.debug( 'timeout dropcard processor update' )
return False
ftlog.debug('MDropCardProcessor.updateProcessor actionID:', actionID
, ' seatId:', seatId
, ' state:', state
, ' tile:', tile
, ' pattern:', pattern)
userState = self.__processors[seatId]['state'] & state
if userState == MTableState.TABLE_STATE_NEXT:
return False
userExtend = self.__processors[seatId]['extendInfo']
userExtend.updateState(state, pattern)
self.__processors[seatId]['state'] = state
# 用户已做出选择
self.__processors[seatId]['response'] = 0
# 如果是最高优先级的,返回True,牌桌据此响应
# 如果不是最高优先级的,继续等待用户做出选择
# 所有用户都做出选择,选择最高优先级的响应
ftlog.debug('MDropCardProcessor.updateProcessor end:', self.__processors)
return self.isBiggestPriority(state, seatId)
def isBiggestPriority(self, state, seatId):
"""
是否是最高优先级
"""
seats = self.getSeatsBySchedule()
ftlog.debug('MDropCardProcessor.isBiggestPriority state:', state
, ' seatId:', seatId
, ' seats:', seats)
# 漏胡,出牌的人自己漏胡,如果其他人不胡,则自己胡
if (seatId == self.curSeatId):
for seat in seats:
if state < self.getStateBySeatId(seat):
ftlog.debug('MDropCardProcessor.isBiggestPriority check PASS_HU return False ...')
return False
ftlog.debug('MDropCardProcessor.isBiggestPriority check PASS_HU return True ...')
return True
curIndex = seats.index(seatId)
for index in range(len(seats)):
seat = seats[index]
if seat == seatId:
continue
comAction = self.__processors[seat]['state']
if index < curIndex:
# 如果index的优先级大于curIndex的,则非最大优先级
if self.isBigger(comAction, state):
ftlog.debug('MDropCardProcessor.isBiggestPriority biggest1 curIndex :', curIndex
, ' state:', state
, ' index:', index
, ' state:', state)
return False
else:
# 如果curIndex的优先级小于index的,
if not self.isBigger(state, comAction):
ftlog.debug('MDropCardProcessor.isBiggestPriority biggest2 curIndex :', curIndex
, ' state:', state
, ' index:', index
, ' state:', state)
return False
if comAction & MTableState.TABLE_STATE_HU and state & MTableState.TABLE_STATE_HU and self.tableTileMgr.canDuoHu():
return False
ftlog.debug('MDropCardProcessor.isBiggestPriority biggest at last...')
return True
def hasAutoDecideAction(self, curSeat, trustTeeSet):
"""是否有自动托管的行为
算法详情:
1)查看是否有托管的player
2)对有托管的玩家使用AI策略
3)留下托管玩家的最高优先级行为,如果是本轮状态的最高优先级,执行此操作
4)如果不是本轮的最高优先级行为,等待非托管玩家做出选择
"""
ftlog.debug('MDropCardProcessor.hasAutoDecideAction curSeat:', curSeat, ' processors:', self.__processors)
if self.getState() == MTableState.TABLE_STATE_NEXT:
return []
seats = self.getAutoDecideSeatsBySchedule(trustTeeSet)
ftlog.debug('MDropCardProcessor.hasAutoDecideAction seats:', seats)
if len(seats) == 0:
return []
bestPlayer = None
bestAction = None
winSeats = []
for seat in seats:
if not bestPlayer:
bestPlayer = self.players[seat]
bestAction = self.getMyselfHighPriority(bestPlayer.curSeatId, trustTeeSet)
ftlog.debug('MDropCardProcessor.hasAutoDecideAction firstSeat:', seat
, ' firstAction:', bestAction)
continue
secondPlayer = self.players[seat]
secondAction = self.getMyselfHighPriority(secondPlayer.curSeatId, trustTeeSet)
ftlog.debug('MDropCardProcessor.hasAutoDecideAction secondSeat:', seat
, ' secondAction:', secondAction)
if secondAction == 0:
continue
if bestAction == 0:
bestPlayer = secondPlayer
bestAction = secondAction
continue
ftlog.debug('MDropCardProcessor.hasAutoDecideAction bestPlayer 1:', bestPlayer.curSeatId
, ' secodPlayer',secondPlayer.curSeatId,bestAction, secondAction)
if self.isBigger(bestAction, secondAction):
if self.isUserAutoDecided(secondPlayer.curSeatId
, trustTeeSet
, self.getStateBySeatId(secondPlayer.curSeatId)
, self.getResponseBySeatId(secondPlayer.curSeatId) == 0):
self.resetSeatId(secondPlayer.curSeatId)
else:
if bestAction & MTableState.TABLE_STATE_HU and \
secondAction & MTableState.TABLE_STATE_HU and \
self.tableTileMgr.canDuoHu():
if bestPlayer.curSeatId not in winSeats:
winSeats.append(bestPlayer.curSeatId)
if secondPlayer.curSeatId not in winSeats:
winSeats.append(secondPlayer.curSeatId)
ftlog.debug('MDropCardProcessor.hasAutoDecideAction bestPlayer 2:', bestPlayer.curSeatId
, ' secodPlayer',secondPlayer.curSeatId,bestAction, secondAction)
continue
rSeat = bestPlayer.curSeatId
bestAction = secondAction
bestPlayer = secondPlayer
if self.isUserAutoDecided(bestPlayer.curSeatId
, trustTeeSet
, self.getStateBySeatId(bestPlayer.curSeatId)
, self.getResponseBySeatId(bestPlayer.curSeatId) == 0):
self.resetSeatId(rSeat)
ftlog.debug('MDropCardProcessor.hasAutoDecideAction bestPlayer 3:', bestPlayer.curSeatId
, ' secodPlayer',secondPlayer.curSeatId,bestAction, secondAction,len(winSeats))
exInfo = self.__processors[bestPlayer.curSeatId]['extendInfo']
ftlog.debug('MDropCardProcessor.hasAutoDecideAction seatId:', bestPlayer.curSeatId
, ' processor:', self.__processors[bestPlayer.curSeatId]
, ' extend:', exInfo.extend if exInfo else exInfo)
if len(winSeats) > 0:
for winSeat in winSeats:
if not self.isUserAutoDecided(winSeat
, trustTeeSet
, self.getStateBySeatId(winSeat)
, self.getResponseBySeatId(winSeat) == 0):
return []
return winSeats
elif bestAction > 0:
if self.isUserAutoDecided(bestPlayer.curSeatId
, trustTeeSet
, self.getStateBySeatId(bestPlayer.curSeatId)
, self.getResponseBySeatId(bestPlayer.curSeatId) == 0):
return [bestPlayer.curSeatId]
return []
else:
return []
def isBigger(self, p1, p2):
"""两个人的优先级比较,判断seat1的优先级是否比seat2大
参数:
seat1 座位1
p1 座位1的行为
seat2 座位2
p2 座位2的行为
seat2是seat1顺时针循环得到的玩家
"""
ftlog.debug('MDropCardProcessor.isBigger p1:', p1
, ' p2:', p2
, ' canDuoHu:', self.tableTileMgr.canDuoHu())
if (p1 & MTableState.TABLE_STATE_GRABTING) and (p2 & MTableState.TABLE_STATE_GRABTING):
return True
# 两家同时胡牌 上家也和那么优先级大于下家
if (p1 & MTableState.TABLE_STATE_HU) and (p2 & MTableState.TABLE_STATE_HU):
if self.tableTileMgr.canDuoHu():
return False
else:
return True
if p1 == p2:
return True
return p1 > p2
def getMyselfHighPriority(self, seatId, trustTeeSet):
"""获取本人的最高优先级行为"""
if not self.isUserAutoDecided(seatId
, trustTeeSet
, self.getStateBySeatId(seatId)
, self.getResponseBySeatId(seatId) == 0):
return self.__processors[seatId]['state']
userState = self.__processors[seatId]['state']
if userState == 0:
return userState
newState = 0
userExtend = self.__processors[seatId]['extendInfo']
if userState & MTableState.TABLE_STATE_HU:
pattern, newState = userExtend.getFirstPattern(MTableState.TABLE_STATE_HU)
userExtend.updateState(newState, pattern)
elif userState & MTableState.TABLE_STATE_GRABTING:
pattern, newState = userExtend.getFirstPattern(MTableState.TABLE_STATE_GRABTING)
ftlog.debug('getMyselfHighPriority pattern:', pattern, ' newState:', newState)
userExtend.updateState(newState, pattern)
elif userState & MTableState.TABLE_STATE_GANG:
pattern, newState = userExtend.getFirstPattern(MTableState.TABLE_STATE_GANG)
userExtend.updateState(newState, pattern)
elif userState & MTableState.TABLE_STATE_PENG:
pattern, newState = userExtend.getFirstPattern(MTableState.TABLE_STATE_PENG)
userExtend.updateState(newState, pattern)
elif userState & MTableState.TABLE_STATE_CHI:
pattern, newState = userExtend.getFirstPattern(MTableState.TABLE_STATE_CHI)
userExtend.updateState(newState, pattern)
else:
newState = MTableState.TABLE_STATE_DROP
self.__processors[seatId]['state'] = newState
ftlog.debug('After getMyselfHighPriority newState:', newState
, ' userExtend:', userExtend.extend
, ' procesor:', self.processors)
return newState
def allResponsed(self):
"""本轮出牌,所有人都已经响应
"""
response = 0
for seat in range(self.__count):
response += self.__processors[seat]['response']
return 0 == response
def testHighPriority():
dp = MDropCardProcessor(4)
exInfo = MTableStateExtendInfo()
#exInfo.setExtend( {'peng': [[18, 18, 18]], 'pengTing': [{'tile': 18, 'ting': [{'winNo des': [{'winTile': 17, 'pattern': [[17, 18, 19], [11, 11]], 'winTileCount': 3}], 'dropTile': 28}], 'pattern': [18, 18, 18]}], 'gang': [{'tile': 18, 'pattern': [18, 18, 18, 18], 'style': 1}], 'gangTing': [{'ti le': 18, 'ting': [{'winNodes': [{'winTile': 26, 'pattern': [[26, 27, 28], [11, 11]], 'winTileCount': 2}, {'winTile': 29, 'pattern': [[27, 28, 29], [11, 11]], 'winTileCount': 2}], 'dropTile': 19}], 'style': 1, 'pattern': [18, 18, 18, 18]}]})
exInfo.setExtend( {'peng': [[18, 18, 18]], 'pengTing': [{'tile': 18, 'ting': [{'winNodes': [{'winTile': 17, 'pattern': [[17, 18, 19], [11, 11]], 'winTileCount': 3}], 'dropTile': 28}], 'pattern': [18, 18, 18]}]})
dp.initProcessor(10, 0, 28, exInfo, 9)
dp.getMyselfHighPriority(0, -1)
def testDuoWinsProcessor():
"""
测试一炮多响时,多个和牌的状态更新
状态:
0号位出牌,1号位2号位两个人可和牌,能一炮多响
操作:
2号位先和牌
1号位再和牌
预期:
2号位胡牌结果先不确认
1号位胡牌的和牌结果确认
1/2号位同时胡牌
"""
dp = MDropCardProcessor(4,MPlayMode.LUOSIHU)
player3 = MPlayer('3', 1, 10003, 0)
player3.setSeatId(3)
player2 = MPlayer('2', 1, 10002, 0)
player2.setSeatId(2)
player1 = MPlayer('1', 1, 10001, 0)
player1.setSeatId(1)
player0 = MPlayer('0', 1, 10000, 0)
player0.setSeatId(0)
dp.players.append(player0)
dp.players.append(player1)
dp.players.append(player2)
dp.players.append(player3)
dp.setCurSeatId(0)
tableTileMgr = MTableTileFactory.getTableTileMgr(4, MPlayMode.LUOSIHU, MRunMode.CONSOLE)
dp.setTableTileMgr(tableTileMgr)
exInfoWin1 = MTableStateExtendInfo()
winInfo1 = {}
winInfo1['tile'] = 9
exInfoWin1.appendInfo(MTableState.TABLE_STATE_HU, winInfo1)
dp.initProcessor(19, 1, MTableState.TABLE_STATE_HU, exInfoWin1, 9)
exInfoWin2 = MTableStateExtendInfo()
winInfo2 = {}
winInfo2['tile'] = 9
exInfoWin2.appendInfo(MTableState.TABLE_STATE_HU, winInfo2)
dp.initProcessor(19, 2, MTableState.TABLE_STATE_HU, exInfoWin2, 9)
result2 = dp.updateProcessor(19, 2, MTableState.TABLE_STATE_HU, 9, None)
print 'result2:', result2
print dp.processors
autoSeatId = dp.hasAutoDecideAction(0, -1)
print 'autoSeatId2:', autoSeatId
result1 = dp.updateProcessor(19, 1, MTableState.TABLE_STATE_HU, 9, None)
print 'result1:', result1
autoSeatId = dp.hasAutoDecideAction(0, -1)
print 'autoSeatId1:', autoSeatId
wins = [1, 2]
result3 = dp.updateDuoHu(19, wins, MTableState.TABLE_STATE_HU)
print 'result3:', result3
print dp.processors
def testWinsProcessor():
"""
测试多个和牌的状态更新
状态:
0号位出牌,1号位2号位两个人可和牌,不能一炮多响
操作:
2号位先和牌
1号位再和牌
预期:
2号位胡牌结果先不确认
1号位胡牌的和牌结果确认
"""
dp = MDropCardProcessor(4,MPlayMode.LUOSIHU)
player3 = MPlayer('3', 1, 10003, 0)
player3.setSeatId(3)
player2 = MPlayer('2', 1, 10002, 0)
player2.setSeatId(2)
player1 = MPlayer('1', 1, 10001, 0)
player1.setSeatId(1)
player0 = MPlayer('0', 1, 10000, 0)
player0.setSeatId(0)
dp.players.append(player0)
dp.players.append(player1)
dp.players.append(player2)
dp.players.append(player3)
dp.setCurSeatId(0)
exInfoWin1 = MTableStateExtendInfo()
winInfo1 = {}
winInfo1['tile'] = 9
exInfoWin1.appendInfo(MTableState.TABLE_STATE_HU, winInfo1)
dp.initProcessor(19, 1, MTableState.TABLE_STATE_HU, exInfoWin1, 9)
exInfoWin2 = MTableStateExtendInfo()
winInfo2 = {}
winInfo2['tile'] = 9
exInfoWin2.appendInfo(MTableState.TABLE_STATE_HU, winInfo2)
dp.initProcessor(19, 2, MTableState.TABLE_STATE_HU, exInfoWin2, 9)
result2 = dp.updateProcessor(19, 2, MTableState.TABLE_STATE_HU, 9, None)
print 'result2:', result2
print dp.processors
result1 = dp.updateProcessor(19, 1, MTableState.TABLE_STATE_HU, 9, None)
print 'result1:', result1
print dp.processors
def testLouhuProcessor():
"""
测试漏和牌的状态更新
状态:
0号位出牌,1号位漏胡2号位和牌
操作:
1号位漏和牌
2号位和牌
预期:
1号位漏胡牌结果先不确认
2号位胡牌的和牌结果确认
"""
dp = MDropCardProcessor(4,MPlayMode.LUOSIHU)
player3 = MPlayer('3', 1, 10003, 0)
player3.setSeatId(3)
player2 = MPlayer('2', 1, 10002, 0)
player2.setSeatId(2)
player1 = MPlayer('1', 1, 10001, 0)
player1.setSeatId(1)
player0 = MPlayer('0', 1, 10000, 0)
player0.setSeatId(0)
dp.players.append(player0)
dp.players.append(player1)
dp.players.append(player2)
dp.players.append(player3)
dp.setCurSeatId(0)
exInfoWin1 = MTableStateExtendInfo()
winInfo1 = {}
exInfoWin1.appendInfo(MTableState.TABLE_STATE_PASS_HU, winInfo1)
dp.initProcessor(19, 1, MTableState.TABLE_STATE_PASS_HU, exInfoWin1, 9)
exInfoWin2 = MTableStateExtendInfo()
winInfo2 = {}
winInfo2['tile'] = 9
exInfoWin2.appendInfo(MTableState.TABLE_STATE_HU, winInfo2)
dp.initProcessor(19, 2, MTableState.TABLE_STATE_HU, exInfoWin2, 9)
result1 = dp.updateProcessor(19, 1, MTableState.TABLE_STATE_PASS_HU, 9, None)
print 'result1:', result1
print dp.processors
result2 = dp.updateProcessor(19, 2, MTableState.TABLE_STATE_HU, 9, None)
print 'result2:', result2
print dp.processors
def testChiTingWinsProcessor():
"""
测试多个和牌的状态更新
状态:
0号位出牌,1号位2号位两个人可和牌,不能一炮多响
操作:
2号位先和牌
1号位再和牌
预期:
2号位胡牌结果先不确认
1号位胡牌的和牌结果确认
"""
dp = MDropCardProcessor(4)
player3 = MPlayer('3', 1, 10003, 0)
player3.setSeatId(3)
player2 = MPlayer('2', 1, 10002, 0)
player2.setSeatId(2)
player1 = MPlayer('1', 1, 10001, 0)
player1.setSeatId(1)
player0 = MPlayer('0', 1, 10000, 0)
player0.setSeatId(0)
dp.players.append(player0)
dp.players.append(player1)
dp.players.append(player2)
dp.players.append(player3)
dp.setCurSeatId(1)
exInfoWin0 = MTableStateExtendInfo()
exInfoWin0.appendInfo(MTableState.TABLE_STATE_CHI & MTableState.TABLE_STATE_GRABTING, {"tile":14, "pattern":[12, 13, 14], "ting":{}})
dp.initProcessor(19, 0, MTableState.TABLE_STATE_CHI & MTableState.TABLE_STATE_GRABTING, exInfoWin0, 9)
exInfoWin2 = MTableStateExtendInfo()
exInfoWin2.appendInfo(MTableState.TABLE_STATE_HU, {"tile": 14})
dp.initProcessor(19, 2, MTableState.TABLE_STATE_HU, exInfoWin2, 9)
exInfoChi3 = MTableStateExtendInfo()
exInfoChi3.appendInfo(MTableState.TABLE_STATE_CHI, {"tile":14, " pattern":[12, 13, 14]})
dp.initProcessor(19, 3, MTableState.TABLE_STATE_CHI, exInfoChi3, 9)
result3 = dp.updateProcessor(19, 2, MTableState.TABLE_STATE_CHI, 9, [12, 13, 14])
print 'result3:', result3
print dp.processors
result0 = dp.updateProcessor(19, 0, MTableState.TABLE_STATE_CHI & MTableState.TABLE_STATE_GRABTING, 9, [12, 13, 14])
print 'result0:', result0
dp.resetSeatId(2)
print dp.processors
print dp.hasAutoDecideAction(1, -1)
def testNormalUpdateProcessor():
"""
测试吃碰杠的状态更新
状态:
同时可吃可碰
操作:
吃先同意
碰后取消
预期结果:
自动响应吃
"""
dp = MDropCardProcessor(4)
player3 = MPlayer('3', 1, 10003, 0)
player3.setSeatId(3)
player2 = MPlayer('2', 1, 10002, 0)
player2.setSeatId(2)
player1 = MPlayer('1', 1, 10001, 0)
player1.setSeatId(1)
player0 = MPlayer('0', 1, 10000, 0)
player0.setSeatId(0)
dp.players.append(player0)
dp.players.append(player1)
dp.players.append(player2)
dp.players.append(player3)
dp.setCurSeatId(1)
exInfoChi = MTableStateExtendInfo()
exInfoChi.setExtend({"chi":[[1, 2, 3]]})
dp.initProcessor(19, 2, MTableState.TABLE_STATE_CHI, exInfoChi, 9)
exInfoPeng = MTableStateExtendInfo()
exInfoPeng.setExtend({"peng": [[3, 3, 3]]})
dp.initProcessor(19, 3, MTableState.TABLE_STATE_PENG, exInfoPeng, 9)
dp.updateProcessor(19, 2, MTableState.TABLE_STATE_CHI, 3, [1,2,3])
print dp.processors
dp.resetSeatId(3)
print dp.hasAutoDecideAction(1, -1)
def testBigger():
s1 = 65536
s2 = 4
tableTileMgr = MTableTileFactory.getTableTileMgr(4, MPlayMode.LUOSIHU, MRunMode.CONSOLE)
dp = MDropCardProcessor(4, MPlayMode.LUOSIHU)
dp.setTableTileMgr(tableTileMgr)
print dp.isBigger(s1, s2)
if __name__ == "__main__":
#testHighPriority()
#testNormalUpdateProcessor()
#testWinsProcessor()
# testBigger()
#testChiTingWinsProcessor()
# testLouhuProcessor()
testDuoWinsProcessor()
|
[
"gamedev@jundeMac-mini.local"
] |
gamedev@jundeMac-mini.local
|
d0f0de4e9b7f9434238949bfb24c6dd45a98a526
|
4a418036130cb63caa503719b4162cce9753459b
|
/tests/test_data_dir.py
|
931e02b73c4ecca5c319aedfbeedc7afc69d0def
|
[
"Apache-2.0"
] |
permissive
|
kssteven418/Q-ASR
|
89a7dac24d74556453e7b54b26289fd1466070c4
|
aa1ec2ef78fd7606f8f365dfe3e66691a0e48178
|
refs/heads/qasr
| 2023-08-05T15:43:42.493513
| 2021-10-11T20:06:53
| 2021-10-11T20:06:53
| 353,027,973
| 33
| 1
|
Apache-2.0
| 2021-03-30T17:33:26
| 2021-03-30T14:20:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import exists, join
import pytest
class TestDataDir:
@pytest.mark.unit
def test_test_data_dir(self, test_data_dir):
"""" Just a dummy tests showing how to use the test_data_dir fixture. """
# test_data_dir contains the absolute path to nemo -> tests/.data
assert exists(test_data_dir)
assert exists(join(test_data_dir, "test_data.tar.gz"))
|
[
"noreply@github.com"
] |
kssteven418.noreply@github.com
|
4d508511b2d0c80e95d9287cc8c2a5d6ccdf657d
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/leetcode/LeetCode/1014 Best Sightseeing Pair.py
|
8673420a64436ca8315152ef43a71e1fdc5ce278
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
#!/usr/bin/python3
"""
Given an array A of positive integers, A[i] represents the value of the i-th
sightseeing spot, and two sightseeing spots i and j have distance j - i between
them.
The score of a pair (i < j) of sightseeing spots is (A[i] + A[j] + i - j) : the
sum of the values of the sightseeing spots, minus the distance between them.
Return the maximum score of a pair of sightseeing spots.
Example 1:
Input: [8,1,5,2,6]
Output: 11
Explanation: i = 0, j = 2, A[i] + A[j] + i - j = 8 + 5 + 0 - 2 = 11
Note:
2 <= A.length <= 50000
1 <= A[i] <= 1000
"""
from typing import List
class Solution:
def maxScoreSightseeingPair(self, A: List[int]) -> int:
"""
Attribute the result to the ending element
Count the current best score in all previous.
Distance will increase, then the score will decay
"""
ret = -float("inf")
prev_max = A[0]
for a in A[1:]:
ret = max(ret, prev_max - 1 + a)
prev_max = max(prev_max - 1, a)
return ret
def maxScoreSightseeingPair_error(self, A: List[int]) -> int:
"""
brute force O(N^2)
pre-processing, adjust A[i] as A[i] - i
error, no direction
"""
n = len(A)
B = []
for i in range(n):
B.append(A[i] - i)
# find top 2
m1, m2 = None, None
for i in range(n):
if m1 is None:
m1 = i
elif m2 is None:
m2 = i
elif B[i] + (i - m1) > B[m1]:
m1 = i
elif B[i] + (i - m2) > B[m2]:
m2 = i
return A[m2] + A[m1] - abs(m2 - m1)
if __name__ == "__main__":
assert Solution().maxScoreSightseeingPair([8,1,5,2,6]) == 11
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
4d6b3b1ee0d4efb7f0b25bddc8022cd46207de2b
|
bef3a54931d50d87f2773a2de5491e41b708601b
|
/python-day-1/find_num_in _list_prob16.py
|
1d0bfe19161fa49191bfc138e93f35baa459a1a7
|
[] |
no_license
|
karthikeyansa/python-placements-old
|
efd815db525dd15eb5f384c921b6b079678b7377
|
aa361a7b8e695d34f08ec7d6b067cc57ffdf358d
|
refs/heads/master
| 2020-12-03T18:21:55.994756
| 2020-01-02T17:27:47
| 2020-01-02T17:27:47
| 231,428,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
l1=[1,5,8,3]
n=int(input("Enter a number: "))
if n in l1:
print("TRUE")
else:
print("FALSE")
|
[
"karthikeyansa39@gmail.com"
] |
karthikeyansa39@gmail.com
|
81731503651177321f8bd9a41ec9e116500977b2
|
7d33bef8d46946bd3621a3bbd9607a53231b0b63
|
/DL/linear_regression_01_mpg.py
|
74dd3d28399731e6c2d2bfeec4752aa66c436427
|
[] |
no_license
|
encaion/Python_edu
|
a1d234d6a84f5cbefa4a84a8171e54c8a19a63cb
|
d3cee899955fff12d04648eb6128d9518ef1fcc4
|
refs/heads/master
| 2022-02-01T20:02:50.535883
| 2022-01-09T22:39:53
| 2022-01-09T22:39:53
| 164,076,683
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,223
|
py
|
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
df = pd.read_csv("auto_mpg.csv")
df.head(2)
df.isna().sum()
df2 = df.dropna()
df2.head(2)
set(df2["Origin"])
origin = df2.pop("Origin")
df2.head(2) # Origin이 빠짐
origin_dum = pd.get_dummies(origin, prefix = "class")
origin_dum.columns = ["USA", "Europe", "Japan"]
origin_dum.head(2)
df3 = pd.concat([df2, origin_dum], axis = 1)
df3.head(2)
train = df3.sample(frac = 0.8, random_state = 0)
test = df3.drop(train.index) # train 부분을 제외하고 입력
test.head()
train_stats = train.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
train_y = train.pop("MPG")
test_y = test.pop("MPG")
def norm(x):
return (x - train_stats["mean"]) / train_stats["std"]
normed_train_data = norm(train)
normed_test_data = norm(test)
model = keras.Sequential([layers.Dense(64, activation = "relu",
input_shape = [len(train.columns)]),
layers.Dense(64, activation = "relu"),
layers.Dense(1)])
model.compile(loss="mse",
optimizer = tf.keras.optimizers.RMSprop(learning_rate = 0.001),
metrics=["mae", "mse"])
model.summary()
history = model.fit(normed_train_data, train_labels,
epochs=1000, validation_split = 0.2, verbose = 0)
df_hist = pd.DataFrame(history.history)
df_hist["epoch"] = history.epoch
df_hist.tail()
import matplotlib.pyplot as plt
def plot_history(history):
df_hist = pd.DataFrame(history.history)
df_hist["epoch"] = history.epoch
plt.figure(figsize=(8,12))
plt.subplot(2,1,1)
plt.xlabel("Epoch")
plt.plot(df_hist["epoch"], df_hist["mae"],
label = "Train Error")
plt.plot(df_hist["epoch"], df_hist["val_mae"],
label = "Val Error")
plt.ylim([0,5])
plt.legend()
plt.subplot(2,1,2)
plt.xlabel("Epoch")
plt.plot(df_hist["epoch"], df_hist["mse"],
label = "Train Error")
plt.plot(df_hist["epoch"], df_hist["val_mse"],
label = "Val Error")
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
|
[
"noreply@github.com"
] |
encaion.noreply@github.com
|
787addd50367cdbdb7de9096164e023913e11434
|
8f437c9bd966b5b1badd3587f62699b1e4fdbf9d
|
/patchDeploy.py
|
bd77f3bb1f8a56f3e30b55cf1c51a6dd78b1cd0b
|
[] |
no_license
|
niski84/work
|
4aeec61514521351d5b3703d6dd711e456a5d289
|
8b5be787e0f61ed309f9b701ce1e3357b2b7b344
|
refs/heads/master
| 2021-06-20T03:58:34.718377
| 2017-04-11T02:05:57
| 2017-04-11T02:05:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,547
|
py
|
__author__ = 'Nick'
import subprocess
import time
import os.path
server=''
MacysNavAppWebVersion=''
MacysShopNServeVersion=''
def log():
f = open('/tmp/patchlog.txt','a')
f.write('run at: ' + time.strftime("%I:%M:%S")+'\n')
f.close()
def copyFile(src, dest):
if os.path.isfile(src):
cmd = "cp "+src+" "+dest
print cmd
subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().rstrip()
def setServer():
cmd = "hostname"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().rstrip()
if "nvapp" in output:
return 'navapp'
print server+" detected"
if "esu2v240" in output:
return 'legacy'
print server+" detected"
def getNavAppNodeRoots():
if server == 'navapp':
global MacysNavAppRoot
navAppBase = "/usr/WebSphere70/AppServer/profiles/storemacys_mngd/installedApps/storemacys/macys-navapp_cluster1.ear/"
cmd = 'ls ' +navAppBase + '| egrep -o "([0-9]{1,}\.)+[0-9]{1,}"'
MacysNavAppWebVersion = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().rstrip()
MacysNavAppRoot=navAppBase+"MacysNavAppWeb-"+MacysNavAppWebVersion+".war/"
print "the root of macysNavappweb is " + MacysNavAppRoot
global ShopNServeRoot
SNSbase = "/usr/WebSphere70/AppServer/profiles/storemacys_mngd/installedApps/storemacys/macys-shopapp_cluster1.ear/"
cmd = 'ls '+SNSbase+' | egrep -o "([0-9]{1,}\.)+[0-9]{1,}"'
MacysShopNServeVersion = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().rstrip()
ShopNServeRoot = SNSbase + "MacysShopNServe.war-"+MacysShopNServeVersion+".war/"
print "the root of ShopNServe is " + ShopNServeRoot
server=setServer()
log()
# | egrep -o "([0-9]{1,}\.)+[0-9]{1,}"
if server == 'legacy':
print 'copying legacy files'
legacyRoot="/usr/WebSphere70/AppServer/profiles/storemacys_mngd/installedApps/storemacys/"
copyFile("/tmp/header.jsp", legacyRoot+"macys-store_cluster1.ear/macys.war/web20/global/header/header.jsp")
copyFile("/tmp/responsive_header.jsp", legacyRoot+"macys-cache_cluster1.ear/macys.war/web20/global/header/responsive_header.jsp")
copyFile("/tmp/responsive_header.jsp", legacyRoot+"macys-store_cluster1.ear/macys.war/web20/global/header/responsive_header.jsp")
copyFile("/tmp/responsive_footer.jsp", legacyRoot+"macys-cache_cluster1.ear/macys.war/web20/global/footer/responsive_footer.jsp")
copyFile("/tmp/responsive_footer.jsp", legacyRoot+"macys-store_cluster1.ear/macys.war/web20/global/footer/responsive_footer.jsp")
if server == 'navapp':
print 'copying navapp files'
getNavAppNodeRoots()
copyFile("/tmp/faceted_navbar.jsp", MacysNavAppRoot+"web20/catalog/browse/faceted_navbar.jsp")
copyFile("/tmp/header.jsp", MacysNavAppRoot+"web20/global/header/header.jsp")
copyFile("/tmp/responsive_header.jsp", ShopNServeRoot+"/web20/global/header/responsive_header.jsp")
copyFile("/tmp/responsive_header.jsp", MacysNavAppRoot+"web20/global/header/responsive_header.jsp")
copyFile("/tmp/responsive_footer.jsp", ShopNServeRoot+"web20/global/footer/responsive_footer.jsp")
copyFile("/tmp/responsive_footer.jsp", MacysNavAppRoot+"web20/global/footer/responsive_footer.jsp")
copyFile("/tmp/responsive_base_script.jsp", ShopNServeRoot+"web20/global/tiles/responsive_base_script.jsp")
copyFile("/tmp/responsive_base_script.jsp", MacysNavAppRoot+"web20/global/tiles/responsive_base_script.jsp")
|
[
"nskitch@griddynamics.com"
] |
nskitch@griddynamics.com
|
93978ead2ff574265ed458d87c10e1ea9e3b2210
|
f8bd2f69e9b57fbdcdc712881294598dcb786a30
|
/utils/__init__.py
|
4b5e53c0ec35958520f1827c62cf7ad442961a4c
|
[] |
no_license
|
gen1998/comp-framingham
|
9db3354c24ac868dc8053a5a894a834345aaa666
|
94e0d2689714ab42877e23b785bb30d45761709d
|
refs/heads/master
| 2023-04-03T21:49:14.812473
| 2021-04-15T15:29:04
| 2021-04-15T15:29:04
| 354,811,541
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
import pandas as pd
def load_datasets(feats):
dfs = [pd.read_feather(f'features/{f}_train.ftr') for f in feats]
X_train = pd.concat(dfs, axis=1, sort=False)
dfs = [pd.read_feather(f'features/{f}_test.ftr') for f in feats]
X_test = pd.concat(dfs, axis=1, sort=False)
return X_train, X_test
def load_target(target_name):
train = pd.read_csv('./data/input/train.csv')
y_train = train[target_name]
print(f"y_train : {len(y_train)}")
return y_train
|
[
"hamham37337@gmail.com"
] |
hamham37337@gmail.com
|
17ab93a49e789f2cc2b34c5368696f83e65e2b54
|
0bd7c1f7bf6da5ef92b9013e1d913140f0249dfa
|
/cecilia-python/greedy-thinking/PartitionLabels.py
|
f36801ff0611f92a70396f1cff574f0966948557
|
[] |
no_license
|
Cecilia520/algorithmic-learning-leetcode
|
f1fec1fae71c4cf7410122f5ce969e829f451308
|
32941ee052d0985a9569441d314378700ff4d225
|
refs/heads/master
| 2022-05-02T03:00:57.505672
| 2022-03-19T09:51:28
| 2022-03-19T09:51:28
| 229,673,810
| 7
| 1
| null | 2022-03-19T09:34:57
| 2019-12-23T04:04:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,027
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : PartitionLabels.py
@Contact : 70904372cecilia@gmail.com
@License : (C)Copyright 2019-2020
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/2/8 15:14 cecilia 1.0 分隔字符串使同种字符出现在一起
问题描述:
字符串S由小写字母组成,要求尽可能地将该字符串进行切分多个片段,同一个字母只会出现在其中一个片段。
返回一个表示每个字符串片段的长度的列表。
示例1:
输入: S = "ababcbacadefegdehijhklij"
输出: [9,7,8]
解释:
划分结果为 "ababcbaca", "defegde", "hijhklij"。
每个字母最多出现在一个片段中。
像 "ababcbacadefegde", "hijhklij" 的划分是错误的,因为划分的片段数较少。
注意:
S的长度在[1, 500]之间。
S只包含小写字母'a'到'z'。
"""
def partitionLabel(S) -> []:
"""
分割字符串使得同种字母出现在一起
思路分析:
可以采用贪心思想来求解问题。——
1.首先遍历字符串将其转变成字典形式,存储每个字符最后一次出现的位置;
2.遍历字符串中的每个字符,记录当前字符的最大index;
3.判断索引的最大index是否和字典中已经存储的相等,相等,则加入list中,开始i+1查找下一个区间。
:param S: 字符串
:return:
算法分析:时间复杂度O(N),空间复杂度O(N)
"""
lastcharindex = {c: i for i, c in enumerate(S)}
# print(lastcharindex)
start = 0 #区间开始index
end = 0 # 区间终止index
answer = []
ans = []
for i, c in enumerate(S):
end = max(end, lastcharindex[c])
if end == i:
answer.append(i - start + 1)
ans.append(S[start:end])
start = i + 1
return answer, ans
if __name__ == '__main__':
S = "ababcbacadefegdehijhklij"
print(partitionLabel(S))
|
[
"cc15572018516@163.com"
] |
cc15572018516@163.com
|
b810aa905e16c61cadcf7cdb48eb042d8080ede0
|
53cdfe9275a32920674567da36090dcd0be3e38b
|
/tests/testers/test_global_two_stream_embedding_space_tester.py
|
ab650d79f8d3c3329e7d4e7e8bed03c6ced719d3
|
[
"MIT"
] |
permissive
|
vishalbelsare/pytorch-metric-learning
|
b1ea7e43884c447c480df938605b98149cc4d34b
|
786a8043f145f2954366271e00db1df230c22afa
|
refs/heads/master
| 2023-08-04T16:02:38.943772
| 2021-11-20T17:29:09
| 2021-11-20T17:29:09
| 238,865,329
| 0
| 0
|
MIT
| 2021-11-28T19:57:30
| 2020-02-07T07:20:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,738
|
py
|
import unittest
import numpy as np
import torch
from pytorch_metric_learning.testers import GlobalTwoStreamEmbeddingSpaceTester
from pytorch_metric_learning.utils import accuracy_calculator
from pytorch_metric_learning.utils import common_functions as c_f
class FakeDataset(torch.utils.data.Dataset):
def __init__(self, anchors, positives, labels):
self.anchors = anchors
self.positives = positives
self.labels = labels
def __len__(self):
return len(self.anchors)
def __getitem__(self, idx):
return self.anchors[idx], self.positives[idx], self.labels[idx]
class TestGlobalTwoStreamEmbeddingSpaceTester(unittest.TestCase):
def test_global_two_stream_embedding_space_tester(self):
embedding_angles = [0, 10, 20, 30, 50, 60, 70, 80]
embeddings1 = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles])
embedding_angles = [81, 71, 61, 31, 51, 21, 11, 1]
embeddings2 = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles])
labels = torch.LongTensor([1, 2, 3, 4, 5, 6, 7, 8, 9])
dataset_dict = {
"train": FakeDataset(embeddings1, embeddings2, labels),
}
model = c_f.Identity()
AC = accuracy_calculator.AccuracyCalculator(include=("precision_at_1",))
tester = GlobalTwoStreamEmbeddingSpaceTester(
accuracy_calculator=AC, dataloader_num_workers=0
)
all_accuracies = tester.test(dataset_dict, 0, model)
self.assertTrue(
np.isclose(all_accuracies["train"]["precision_at_1_level0"], 0.25)
)
@classmethod
def tearDown(self):
torch.cuda.empty_cache()
if __name__ == "__main__":
unittest.main()
|
[
"tkm45@cornell.edu"
] |
tkm45@cornell.edu
|
c41f5ec088bb986fdca5b3ef41e8cdda600a0430
|
9a5e5536ecacbce585617ac991361484e086105e
|
/django/project07/movies/urls.py
|
89a7bb8ae0ee6f9b28b0d11e8ef4aefd33aed871
|
[] |
no_license
|
kingssafy/til
|
8bc938f9fc2525e2efb6025a587ec03536f8bf2a
|
50ecf62465dfa7db57711b1a6130cbaaed90af30
|
refs/heads/master
| 2020-04-16T10:03:50.568961
| 2019-04-23T11:55:34
| 2019-04-23T11:55:34
| 165,488,462
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
from django.urls import path
from . import views
app_name = 'movies'
urlpatterns = [
path('<int:movie_pk>/scores/<int:score_pk>/delete', views.delete_score, name="delete_score"),
path('<int:movie_pk>/scores/new/', views.create_score, name="create_score"),
path('<int:movie_pk>/delete/', views.movie_delete, name="movie_delete"),
path('<int:movie_pk>/edit/', views.edit, name='edit'),
path('<int:movie_pk>/', views.detail, name="detail"),
path('', views.index, name="index"),
]
|
[
"inmak@INMAKs-MacBook-Air-2.local"
] |
inmak@INMAKs-MacBook-Air-2.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.