blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
baa63fd29bd69bce20182d81ba8ea10fc20aeaef
|
55c24645dd63a1c41037dcfb9fb45bc7bcdea4be
|
/venv/lib/python3.7/site-packages/dotenv/__init__.py
|
d412cb7ab9ae9061a2a73220df6a6d545add030c
|
[] |
no_license
|
abdullah-nawaz/flask-boilerplate
|
7c42801a21ee3e6a647cc8a7d92e0285f8e86cad
|
01bc7fe1140e8ec613de4a38546a07ddfbdbd254
|
refs/heads/master
| 2022-12-02T05:06:08.297759
| 2020-06-24T21:36:32
| 2020-06-24T21:36:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
from .compat import IS_TYPE_CHECKING
from .main import load_dotenv, get_key, set_key, unset_key, find_dotenv, dotenv_values
if IS_TYPE_CHECKING:
from typing import Any, Optional
def load_ipython_extension(ipython):
# type: (Any) -> None
from .ipython import load_ipython_extension
load_ipython_extension(ipython)
def get_cli_string(path=None, action=None, key=None, value=None, quote=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[str], Optional[str]) -> str
"""Returns a string suitable for running as a shell script.
Useful for converting a arguments passed to a fabric task
to be passed to a `local` or `run` command.
"""
command = ["dotenv"]
if quote:
command.append("-q %s" % quote)
if path:
command.append("-f %s" % path)
if action:
command.append(action)
if key:
command.append(key)
if value:
if " " in value:
command.append('"%s"' % value)
else:
command.append(value)
return " ".join(command).strip()
__all__ = [
"get_cli_string",
"load_dotenv",
"dotenv_values",
"get_key",
"set_key",
"unset_key",
"find_dotenv",
"load_ipython_extension",
]
|
[
"muhammadabdullah@wanclouds.net"
] |
muhammadabdullah@wanclouds.net
|
3074414644d1dd7e52c820a7a85ceea77e7b7715
|
c137d7fb6eaa1c1900a63b8dae6b027176a98b6f
|
/MxShop/MxShop/settings.py
|
9006c19e5c0acd8bbeaca3301b28ae570a00c216
|
[] |
no_license
|
LasterSmithKim/vuedjango
|
22220414ad2f928f0a0df1a0e68c9083e90c1cc7
|
4a5b7fee4dd3f2d31255d7dc9188ea977a75db29
|
refs/heads/master
| 2022-12-10T19:52:25.014956
| 2019-12-23T16:23:01
| 2019-12-23T16:23:01
| 225,315,491
| 0
| 0
| null | 2022-11-22T04:52:05
| 2019-12-02T07:47:12
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,922
|
py
|
"""
Django settings for MxShop project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
sys.path.insert(0, os.path.join(BASE_DIR, 'extra_apps'))
import datetime
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x*wj-nly56z6q5_9c67tg-q6ma$(+c)sp4b!^2sqe-a_ak683w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'users.UserProfile'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'DjangoUeditor',
'users.apps.UsersConfig',
'goods.apps.GoodsConfig',
'trade.apps.TradeConfig',
'user_operation.apps.UserOperationConfig',
'xadmin',
'crispy_forms',
'rest_framework',
'reversion',
'django_filters',
'corsheaders',#解决跨越问题
'rest_framework.authtoken',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'MxShop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MxShop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'vuedjango',
'USER': 'vuedjango',
'PASSWORD': 'smith123',
'HOST': '192.168.56.101',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#自定义用户验证
AUTHENTICATION_BACKENDS = [
'users.views.CustomBackend',
]
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
# 添加默认的静态目录[收集使用]
STATIC_ROOT = os.path.join(BASE_DIR, "static_all")
# 设置上传文件的路径
MEDIA_URL="/media/"
MEDIA_ROOT=os.path.join(BASE_DIR, "media")
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
#'rest_framework.authentication.TokenAuthentication',Token验证方法
#'rest_framework_jwt.authentication.JSONWebTokenAuthentication', 将jsonewebtoken验证方法应用于具体app的模型中,不要对全局配置
),
#api接口文档
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema'
}
#手机号验证 正则表达式
REGEX_MOBILE = "^1[358]\d{9}$|^147\d{8}$|^176\d{8}$"
#互亿网配置信息
APIUSER = "********"
APIKEY = "********"
|
[
"kingone@yeah.net"
] |
kingone@yeah.net
|
2275f0288c637137c69388bc3029ea244a9b7fb4
|
0b53826167ae4337a92360ef0a8b37f0a30e1aef
|
/plan_b.py
|
2e84b654e6fd23e18e9a743260ff6e065227906e
|
[] |
no_license
|
nyghtowl/Evergreen_Competition
|
712ac2c885e1622e12bce178e868c00aefd6fa2d
|
456c6342cab250f61e2c02ee9d1199864342d375
|
refs/heads/master
| 2021-01-20T00:41:23.141388
| 2014-07-19T21:48:55
| 2014-07-19T21:48:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,678
|
py
|
"""
beating the benchmark @StumbleUpon Evergreen Challenge
__author__ : Abhishek Thakur
"""
# -*- coding: utf-8 -*-
import numpy as np
from sklearn import metrics,preprocessing,cross_validation
from sklearn.feature_extraction.text import TfidfVectorizer
import sklearn.linear_model as lm
import pandas as p
loadData = lambda f: np.genfromtxt(open(f,'r'), delimiter=' ')
def main():
print "loading data.."
traindata = list(np.array(p.read_table('../data/train.tsv'))[:,2])
testdata = list(np.array(p.read_table('../data/test.tsv'))[:,2])
y = np.array(p.read_table('../data/train.tsv'))[:,-1]
tfv = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode',
analyzer='word',token_pattern=r'\w{1,}',ngram_range=(1, 2), use_idf=1,smooth_idf=1,sublinear_tf=1)
rd = lm.LogisticRegression(penalty='l2', dual=True, tol=0.0001,
C=1, fit_intercept=True, intercept_scaling=1.0,
class_weight=None, random_state=None)
X_all = traindata + testdata
lentrain = len(traindata)
print "fitting pipeline"
tfv.fit(X_all)
print "transforming data"
X_all = tfv.transform(X_all)
X = X_all[:lentrain]
X_test = X_all[lentrain:]
print "20 Fold CV Score: ", np.mean(cross_validation.cross_val_score(rd, X, y, cv=20, scoring='roc_auc'))
print "training on full data"
rd.fit(X,y)
pred = rd.predict_proba(X_test)[:,1]
testfile = p.read_csv('../data/test.tsv', sep="\t", na_values=['?'], index_col=1)
pred_df = p.DataFrame(pred, index=testfile.index, columns=['label'])
pred_df.to_csv('benchmark.csv')
print "submission file created.."
if __name__=="__main__":
main()
|
[
"warrick.melanie@gmail.com"
] |
warrick.melanie@gmail.com
|
5af284d0fe6592007bb1978462f2154dfd926980
|
9673935b03b25683b79e2e28a4584ebccd74cda9
|
/closed/QCT/code/dlrm/tensorrt/infer.py
|
ae0dbe685a2d63f1e731a74d0cc78e3da802a9ec
|
[
"Apache-2.0"
] |
permissive
|
wilderfield/inference_results_v0.7
|
7e737acec72ab0e79cf95a63987184f86c2cb0a2
|
d63bb28a2919c79b69460005e686688f3fa033f1
|
refs/heads/master
| 2023-01-03T03:24:28.608820
| 2020-10-29T18:08:46
| 2020-10-29T18:08:46
| 306,124,077
| 0
| 0
|
Apache-2.0
| 2020-10-21T19:15:17
| 2020-10-21T19:15:12
| null |
UTF-8
|
Python
| false
| false
| 4,431
|
py
|
#! /usr/bin/env python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import ctypes
sys.path.insert(0, os.getcwd())
# The plugin .so file has to be loaded at global scope and before `import torch` to avoid cuda version mismatch.
DLRM_INTERACTIONS_PLUGIN_LIBRARY="build/plugins/DLRMInteractionsPlugin/libdlrminteractionsplugin.so"
if not os.path.isfile(DLRM_INTERACTIONS_PLUGIN_LIBRARY):
raise IOError("{}\n{}\n".format(
"Failed to load library ({}).".format(DLRM_INTERACTIONS_PLUGIN_LIBRARY),
"Please build the DLRM Interactions plugin."
))
ctypes.CDLL(DLRM_INTERACTIONS_PLUGIN_LIBRARY)
DLRM_BOTTOM_MLP_PLUGIN_LIBRARY="build/plugins/DLRMBottomMLPPlugin/libdlrmbottommlpplugin.so"
if not os.path.isfile(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY):
raise IOError("{}\n{}\n".format(
"Failed to load library ({}).".format(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY),
"Please build the DLRM Bottom MLP plugin."
))
ctypes.CDLL(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY)
from code.common.runner import EngineRunner, get_input_format
from code.common import logging
import code.common.arguments as common_args
import json
import numpy as np
from sklearn.metrics import roc_auc_score
import tensorrt as trt
import torch
import time
def evaluate(ground_truths, predictions):
assert len(ground_truths) == len(predictions), "Number of ground truths are different from number of predictions"
return roc_auc_score(ground_truths, predictions)
def run_dlrm_accuracy(engine_file, batch_size, num_pairs=10000000, verbose=False):
if verbose:
logging.info("Running DLRM accuracy test with:")
logging.info(" engine_file: {:}".format(engine_file))
logging.info(" batch_size: {:}".format(batch_size))
logging.info(" num_pairs: {:}".format(num_pairs))
runner = EngineRunner(engine_file, verbose=verbose)
pair_dir = os.path.join(os.getenv("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "criteo", "full_recalib")
input_dtype, input_format = get_input_format(runner.engine)
if input_dtype == trt.DataType.FLOAT:
format_string = "fp32"
elif input_dtype == trt.DataType.HALF:
format_string = "fp16"
elif input_dtype == trt.DataType.INT8:
format_string = "int8"
if input_format == trt.TensorFormat.CHW4:
format_string += "_chw4"
else:
raise NotImplementedError("Unsupported DataType {:}".format(input_dtype))
numerical_inputs = np.load(os.path.join(pair_dir, "numeric_{:}.npy".format(format_string)))
categ_inputs = np.load(os.path.join(pair_dir, "categorical_int32.npy"))
predictions = []
refs = []
batch_idx = 0
for pair_idx in range(0, int(num_pairs), batch_size):
actual_batch_size = batch_size if pair_idx + batch_size <= num_pairs else num_pairs - pair_idx
numerical_input = np.ascontiguousarray(numerical_inputs[pair_idx:pair_idx + actual_batch_size])
categ_input = np.ascontiguousarray(categ_inputs[pair_idx:pair_idx + actual_batch_size])
start_time = time.time()
outputs = runner([numerical_input, categ_input], actual_batch_size)
if verbose:
logging.info("Batch {:d} (Size {:}) >> Inference time: {:f}".format(batch_idx, actual_batch_size, time.time() - start_time))
predictions.extend(outputs[0][:actual_batch_size])
batch_idx += 1
ground_truths = np.load(os.path.join(pair_dir, "ground_truth.npy"))[:num_pairs].tolist()
return evaluate(ground_truths, predictions)
def main():
args = common_args.parse_args(common_args.ACCURACY_ARGS)
logging.info("Running accuracy test...")
acc = run_dlrm_accuracy(args["engine_file"], args["batch_size"], args["num_samples"],
verbose=args["verbose"])
logging.info("Accuracy: {:}".format(acc))
if __name__ == "__main__":
main()
|
[
"guschmue@microsoft.com"
] |
guschmue@microsoft.com
|
497cc3f06a4891c34917d8b345fd83fd16cc7af6
|
5d0d3cfac10e144468cc25d948e7994c5f968fd0
|
/src/T2D23D.py
|
930dfe4b34f879e793ca67eeaa01c43a64b3839b
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
WYGNG/USTC_SSE_Project
|
1aff71631fd14dc26a0dd9190b76f97c5367d306
|
1c0cd4056f40445aed13ec1ae584608d625b9127
|
refs/heads/master
| 2022-12-26T13:53:48.543988
| 2020-09-27T08:02:08
| 2020-09-27T08:02:08
| 298,983,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,229
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import xlrd
import math
from scipy import optimize
# 计算角度,(x1, y1, z1)为顶点
def get_angle1(x1,y1,z1,x2,y2,z2,x3,y3,z3):
a=math.sqrt((x2-x3)**2+(y2-y3)**2+(z2-z3)**2)
b=math.sqrt((x1-x3)**2+(y1-y3)**2+(z1-z3)**2)
c=math.sqrt((x2-x1)**2+(y2-y1)**2+(z2-z1)**2)
if c*b==0:
cosA=1
else:
cosA=(a**2-c**2-b**2)/(-2*c*b)
if cosA < -1.0:
cosA=-1.0
elif cosA>1.0:
cosA=1.0
A=math.acos(cosA)
deg=math.degrees(A)
return deg
# 躯干12段连杆定义
# L = [40, 34, 34, 29, 29, 58, 58, 40, 50, 50, 42, 42]
# 通过关节点坐标计算比例系数的初值
def get_s(point,L):
s = []
s.append(math.sqrt((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2) / L[0])
s.append(math.sqrt((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2) / L[1])
s.append(math.sqrt((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2) / L[2])
s.append(math.sqrt((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2) / L[3])
s.append(math.sqrt((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2) / L[4])
s.append(math.sqrt((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2) / L[5])
s.append(math.sqrt((point[0] - point[12]) ** 2 + (point[1] - point[13]) ** 2) / L[6])
s.append(math.sqrt((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2) / L[7])
s.append(math.sqrt((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2) / L[8])
s.append(math.sqrt((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2) / L[9])
s.append(math.sqrt((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2) / L[10])
s.append(math.sqrt((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2) / L[11])
s_target = max(s)
#print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&",s_target)
return s_target
#由2D关节点坐标和比例系数s计算3D关节点坐标
def get_point_3d(point, s_target,L):
z0 = 525 / s_target
point_3d = []
point_3d.append([point[22] / s_target, point[23] / s_target, z0]) # 0
dz11 = math.sqrt(
max(L[10] ** 2 - ((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2) / (s_target ** 2), 0))
if point[33]<point[35]:
dz11=-dz11
z14 = z0 + dz11
point_3d.append([point[18] / s_target, point[19] / s_target, z14]) # 1
dz9 = math.sqrt(max(L[8] ** 2 - ((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2) / (s_target ** 2), 0))
if point[31]<point[33]:
dz9=-dz9
z12 = z14 + dz9
point_3d.append([point[14] / s_target, point[15] / s_target, z12]) # 2
dz8 = math.sqrt(max(L[7] ** 2 - ((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2) / (s_target ** 2), 0))
if point[30]<point[31]:
dz8=-dz8
z11 = z12 + dz8
point_3d.append([point[12] / s_target, point[13] / s_target, z11]) # 3
dz10 = math.sqrt(
max(L[9] ** 2 - ((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2) / (s_target ** 2), 0))
if point[32]<point[30]:
dz10=-dz10
z13 = z11 + dz10
point_3d.append([point[16] / s_target, point[17] / s_target, z13]) # 4
dz12 = math.sqrt(
max(L[11] ** 2 - ((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2) / (s_target ** 2), 0))
if point[34]<point[32]:
dz12=-dz12
z15 = z13 + dz12
point_3d.append([point[20] / s_target, point[21] / s_target, z15]) # 5
dz6 = math.sqrt(max(L[5] ** 2 - ((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2) / (s_target ** 2), 0))
if point[25]<point[31]:
dz6=-dz6
z6 = z12 + dz6
point_3d.append([point[2] / s_target, point[3] / s_target, z6]) # 6
dz2 = math.sqrt(max(L[1] ** 2 - ((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2) / (s_target ** 2), 0))
if point[27]<point[25]:
dz2=-dz2
z8 = z6 + dz2
point_3d.append([point[6] / s_target, point[7] / s_target, z8]) # 7
dz4 = math.sqrt(max(L[3] ** 2 - ((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2) / (s_target ** 2), 0))
if point[29]<point[27]:
dz4=-dz4
z10 = z8 + dz4
point_3d.append([point[10] / s_target, point[11] / s_target, z10]) # 8
dz1 = math.sqrt(max(L[0] ** 2 - ((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2) / (s_target ** 2), 0))
if point[24]<point[25]:
dz1=-dz1
z5 = z6 + dz1
point_3d.append([point[0] / s_target, point[1] / s_target, z5]) # 9
dz3 = math.sqrt(max(L[2] ** 2 - ((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2) / (s_target ** 2), 0))
if point[26]<point[24]:
dz3=-dz3
z7 = z5 + dz3
point_3d.append([point[4] / s_target, point[5] / s_target, z7]) #
dz5 = math.sqrt(max(L[4] ** 2 - ((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2) / (s_target ** 2), 0))
if point[28]<point[26]:
dz5=-dz5
z9 = z7 + dz5
point_3d.append([point[8] / s_target, point[9] / s_target, z9]) # 11
return point_3d
# 单帧优化定义的目标函数
def f(s, point, s_target,L):
dz1 = math.sqrt(max(L[0] ** 2 - ((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2) / (s_target ** 2), 0))
dz2 = math.sqrt(max(L[1] ** 2 - ((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2) / (s_target ** 2), 0))
dz3 = math.sqrt(max(L[2] ** 2 - ((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2) / (s_target ** 2), 0))
dz4 = math.sqrt(max(L[3] ** 2 - ((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2) / (s_target ** 2), 0))
dz5 = math.sqrt(max(L[4] ** 2 - ((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2) / (s_target ** 2), 0))
dz6 = math.sqrt(max(L[5] ** 2 - ((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2) / (s_target ** 2), 0))
dz8 = math.sqrt(max(L[7] ** 2 - ((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2) / (s_target ** 2), 0))
dz9 = math.sqrt(max(L[8] ** 2 - ((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2) / (s_target ** 2), 0))
dz10 = math.sqrt(
max(L[9] ** 2 - ((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2) / (s_target ** 2), 0))
dz11 = math.sqrt(
max(L[10] ** 2 - ((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2) / (s_target ** 2), 0))
dz12 = math.sqrt(
max(L[11] ** 2 - ((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2) / (s_target ** 2), 0))
y = 0
y += (s * math.sqrt(L[0] ** 2 - dz1 ** 2) - math.sqrt((point[0] - point[2]) ** 2 + (point[1] - point[3]) ** 2)) ** 2 +\
(s * math.sqrt(L[1] ** 2 - dz2 ** 2) - math.sqrt((point[2] - point[6]) ** 2 + (point[3] - point[7]) ** 2)) ** 2 +\
(s * math.sqrt(L[2] ** 2 - dz3 ** 2) - math.sqrt((point[0] - point[4]) ** 2 + (point[1] - point[5]) ** 2)) ** 2 +\
(s * math.sqrt(L[3] ** 2 - dz4 ** 2) - math.sqrt((point[6] - point[10]) ** 2 + (point[7] - point[11]) ** 2)) ** 2 +\
(s * math.sqrt(L[4] ** 2 - dz5 ** 2) - math.sqrt((point[4] - point[8]) ** 2 + (point[5] - point[9]) ** 2)) ** 2 +\
(s * math.sqrt(L[5] ** 2 - dz6 ** 2) - math.sqrt((point[2] - point[14]) ** 2 + (point[3] - point[15]) ** 2)) ** 2 +\
(s * math.sqrt(L[7] ** 2 - dz8 ** 2) - math.sqrt((point[12] - point[14]) ** 2 + (point[13] - point[15]) ** 2)) ** 2 +\
(s * math.sqrt(L[8] ** 2 - dz9 ** 2) - math.sqrt((point[14] - point[18]) ** 2 + (point[15] - point[19]) ** 2)) ** 2 +\
(s * math.sqrt(L[9] ** 2 - dz10 ** 2) - math.sqrt((point[12] - point[16]) ** 2 + (point[13] - point[17]) ** 2)) ** 2 +\
(s * math.sqrt(L[10] ** 2 - dz11 ** 2) - math.sqrt((point[18] - point[22]) ** 2 + (point[19] - point[23]) ** 2)) ** 2 +\
(s * math.sqrt(L[11] ** 2 - dz12 ** 2) - math.sqrt((point[16] - point[20]) ** 2 + (point[17] - point[21]) ** 2)) ** 2
# print("dz!!!!!!!!!!!!!!!!!!!!!!!",dz1,dz2,dz3,dz4,dz5,dz6,dz8,dz9,dz10,dz11,dz12)
# print("\n")
return y
# 多帧优化定义的目标函数
def f_s(s, begin, end,worksheet1, L):
z = 0
for i in range(end - begin + 1):
point = worksheet1.row_values(begin + i)
point.remove(point[0])
# s_target = get_s(point)
z += f(s[i], point, s[i], L)
return z
|
[
"321699849@qq.com"
] |
321699849@qq.com
|
12f5298b94e2213e4c9b120eec42f9982a07c04b
|
96328f51c177bd53ca1d89199684af04008b0ba9
|
/wiki_graph/util.py
|
c48945b102b570f0b07df5e5f23f8b6aa844ca7a
|
[
"MIT"
] |
permissive
|
mvwicky/wiki-graph
|
b4045bf8200d579e99f9f58e77672d4dfac93c50
|
a88e2f37e7d5b5ba93bcca67544746d682936f41
|
refs/heads/master
| 2020-03-11T16:52:19.213832
| 2018-05-14T16:23:00
| 2018-05-14T16:23:00
| 130,130,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
import collections
import itertools
import sys
def rec_sizeof(o, handlers={}, verbose=False):
"""Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
def dict_handler(d):
return itertools.chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
collections.deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter}
# user handlers take precedence
all_handlers.update(handlers)
# track which object id's have already been seen
seen = set()
# estimate sizeof object without __sizeof__
default_size = sys.getsizeof(0)
def sizeof(o):
# do not double count the same object
if id(o) in seen:
return 0
seen.add(id(o))
s = sys.getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=sys.stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
|
[
"mvanwickle@gmail.com"
] |
mvanwickle@gmail.com
|
98a07a8d052ae1d745b923459e18cd64001ebadb
|
d5a576c0b766124bd756922f818226d20867c6ef
|
/setup.py
|
a9c0a67377aef18bfcf1a11ca854655ec32fcda9
|
[
"CC0-1.0",
"BSD-3-Clause"
] |
permissive
|
fflewddur/python-phash
|
f6b32d4493858c2e9658d6b843dd816a5dcbfeb5
|
27152fd3c8b7a2cd032a33c25abeb423c582df65
|
refs/heads/master
| 2021-01-15T18:15:01.264905
| 2014-06-30T03:44:39
| 2014-06-30T03:44:39
| 21,335,530
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='phash',
version='0.1.0',
description='ctypes interface to libphash',
long_description=readme + '\n\n' + history,
author='Chris Adams',
author_email='chris@improbable.org',
url='https://github.com/acdha/python-phash',
packages=[
'phash',
],
package_dir={'phash': 'phash'},
scripts=['scripts/compare-images.py'],
include_package_data=True,
install_requires=[
'more-itertools',
],
license="BSD",
zip_safe=False,
keywords='phash',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
test_suite='tests',
)
|
[
"chris@improbable.org"
] |
chris@improbable.org
|
946673c401ad39509fd4681ff3e7bc0ff420ce49
|
3eacb8aa1e5e92dc354e61e3140009065257d643
|
/tests/test__order.py
|
0bb6ab3f4dddaf55af7934eafe1350d07e2e7f4b
|
[
"BSD-3-Clause"
] |
permissive
|
dask-image/dask-ndfilters
|
d0db13802185ad719b07752074e57bd8d8dd8529
|
3e947e791e2b3dd3a59de04b9cb70987a75f2446
|
refs/heads/master
| 2021-01-20T00:45:51.188492
| 2018-08-30T21:59:18
| 2018-08-30T21:59:18
| 89,183,177
| 5
| 2
|
BSD-3-Clause
| 2018-08-30T21:36:20
| 2017-04-24T00:56:22
|
Python
|
UTF-8
|
Python
| false
| false
| 5,669
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import pytest
import numpy as np
import scipy.ndimage.filters as sp_ndf
import dask
import dask.array as da
import dask.array.utils as dau
import dask_ndfilters as da_ndf
assert dask
@pytest.mark.parametrize(
"da_func, extra_kwargs",
[
(da_ndf.minimum_filter, {}),
(da_ndf.median_filter, {}),
(da_ndf.maximum_filter, {}),
(da_ndf.rank_filter, {"rank": 0}),
(da_ndf.percentile_filter, {"percentile": 0}),
]
)
@pytest.mark.parametrize(
"err_type, size, footprint, origin",
[
(RuntimeError, None, None, 0),
(TypeError, 1.0, None, 0),
(RuntimeError, (1,), None, 0),
(RuntimeError, [(1,)], None, 0),
(RuntimeError, 1, np.ones((1,)), 0),
(RuntimeError, None, np.ones((1,)), 0),
(RuntimeError, None, np.ones((1, 0)), 0),
(RuntimeError, 1, None, (0,)),
(RuntimeError, 1, None, [(0,)]),
(ValueError, 1, None, 1),
(TypeError, 1, None, 0.0),
(TypeError, 1, None, (0.0, 0.0)),
(TypeError, 1, None, 1+0j),
(TypeError, 1, None, (0+0j, 1+0j)),
]
)
def test_order_filter_params(da_func,
extra_kwargs,
err_type,
size,
footprint,
origin):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
with pytest.raises(err_type):
da_func(d,
size=size,
footprint=footprint,
origin=origin,
**extra_kwargs)
@pytest.mark.parametrize(
"da_func, extra_kwargs",
[
(da_ndf.minimum_filter, {}),
(da_ndf.median_filter, {}),
(da_ndf.maximum_filter, {}),
(da_ndf.rank_filter, {"rank": 0}),
(da_ndf.percentile_filter, {"percentile": 0}),
]
)
def test_ordered_filter_shape_type(da_func,
extra_kwargs):
size = 1
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
assert all([(type(s) is int) for s in d.shape])
d2 = da_func(d, size=size, **extra_kwargs)
assert all([(type(s) is int) for s in d2.shape])
@pytest.mark.parametrize(
"sp_func, da_func, extra_kwargs",
[
(sp_ndf.minimum_filter, da_ndf.minimum_filter, {}),
(sp_ndf.median_filter, da_ndf.median_filter, {}),
(sp_ndf.maximum_filter, da_ndf.maximum_filter, {}),
(sp_ndf.rank_filter, da_ndf.rank_filter, {"rank": 0}),
(sp_ndf.percentile_filter, da_ndf.percentile_filter, {"percentile": 0}),
]
)
@pytest.mark.parametrize(
"size, footprint",
[
(1, None),
((1, 1), None),
(None, np.ones((1, 1))),
]
)
def test_ordered_filter_identity(sp_func,
da_func,
extra_kwargs,
size,
footprint):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
dau.assert_eq(
d, da_func(d, size=size, footprint=footprint, **extra_kwargs)
)
dau.assert_eq(
sp_func(a, size=size, footprint=footprint, **extra_kwargs),
da_func(d, size=size, footprint=footprint, **extra_kwargs)
)
@pytest.mark.parametrize(
"da_func, kwargs",
[
(da_ndf.minimum_filter, {"size": 1}),
(da_ndf.median_filter, {"size": 1}),
(da_ndf.maximum_filter, {"size": 1}),
(da_ndf.rank_filter, {"size": 1, "rank": 0}),
(da_ndf.percentile_filter, {"size": 1, "percentile": 0}),
]
)
def test_order_comprehensions(da_func, kwargs):
np.random.seed(0)
a = np.random.random((3, 12, 14))
d = da.from_array(a, chunks=(3, 6, 7))
l2s = [da_func(d[i], **kwargs) for i in range(len(d))]
l2c = [da_func(d[i], **kwargs)[None] for i in range(len(d))]
dau.assert_eq(np.stack(l2s), da.stack(l2s))
dau.assert_eq(np.concatenate(l2c), da.concatenate(l2c))
@pytest.mark.parametrize(
"sp_func, da_func, extra_kwargs",
[
(sp_ndf.minimum_filter, da_ndf.minimum_filter, {}),
(sp_ndf.median_filter, da_ndf.median_filter, {}),
(sp_ndf.maximum_filter, da_ndf.maximum_filter, {}),
(sp_ndf.rank_filter, da_ndf.rank_filter, {"rank": 1}),
(sp_ndf.percentile_filter, da_ndf.percentile_filter, {"percentile": 10}),
]
)
@pytest.mark.parametrize(
"size, footprint, origin",
[
(2, None, 0),
(None, np.ones((2, 3)), 0),
(None, np.ones((2, 3)), (0, 1)),
(None, np.ones((2, 3)), (0, -1)),
(None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, 0),
(None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (1, 2)),
(None, (np.mgrid[-2: 2+1, -2: 2+1]**2).sum(axis=0) < 2.5**2, (-1, -2)),
(5, None, 0),
(7, None, 0),
(8, None, 0),
(10, None, 0),
(5, None, 2),
(5, None, -2),
]
)
def test_ordered_filter_compare(sp_func,
da_func,
extra_kwargs,
size,
footprint,
origin):
a = np.arange(140.0).reshape(10, 14)
d = da.from_array(a, chunks=(5, 7))
dau.assert_eq(
sp_func(
a, size=size, footprint=footprint, origin=origin, **extra_kwargs
),
da_func(
d, size=size, footprint=footprint, origin=origin, **extra_kwargs
)
)
|
[
"kirkhamj@janelia.hhmi.org"
] |
kirkhamj@janelia.hhmi.org
|
f8b4ee7a035e6157e5582a93bf49c3a9b6ad803d
|
f1e11f9e50d061d05d581efa8805ab28b25d7b24
|
/climi/pppp/hw_spatial_pattern_check___
|
c14cab5e65815e0ec1c3c88f260e677acf70a59b
|
[
"MIT"
] |
permissive
|
ahheo/climi
|
bdccb5116046dfbf0aa6e38e7400447e427eeae0
|
2043a23876a9e620d44b2e9dd34d487ee3e0fc4b
|
refs/heads/main
| 2023-07-09T01:24:17.514954
| 2023-07-07T05:13:16
| 2023-07-07T05:13:16
| 302,750,076
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,876
|
#!/usr/bin/env python3
from climi.uuuu import *
from climi.pppp import *
import os
import warnings
import matplotlib as mpl
mpl.use('pdf', force=True)
import matplotlib.pyplot as plt
import numpy as np
import iris
_here_ = get_path_(__file__)
sites = dict(
SE = (19, 45),
W1 = (7, 51.5),
W2 = (-1.5, 53),
W3 = (8, 60)
)
dataroot = '/nobackup/rossby22/sm_chali/DATA/hc/med/'
outdir = '/nobackup/rossby24/users/sm_chali/DATA/hw2018/fig/'
ffmt = 'thr_tx_{}_ALL_1989-2008_90.nc'
datasets = ['EOBS20', 'ERA-Interim']
colors = plt.get_cmap('tab10').colors
y0y1 = (1989, 2008)
pp = 95
_djn = os.path.join
def _get_data1(dataset):
if dataset == datasets[0]:
return extract_period_cube( iris.load_cube(
'/home/rossby/imports/obs/EOBS/EOBS20/orig/'
'tx_ens_mean_0.1deg_reg_v20.0e.nc'
), *y0y1)
elif dataset == datasets[1]:
return extract_period_cube( concat_cube_( iris.load(
'/nobackup/rossby22/sm_chali/DATA/'
'hw2018/iii/obs/ERAI/tasmax*'
)), *y0y1)
def main():
warnings.filterwarnings('ignore', category=UserWarning)
#data
for dataset in datasets:
fig, ax = plt.subplots(figsize=(7.5, 4), tight_layout=True)
#fig = init_fig_(fx=7.5, fy=4, l=.09, r=.98, t=.965, b=.15)
#ax = fig.add_subplot(1, 1, 1)
data0 = iris.load_cube(_djn(dataroot, ffmt.format(dataset)))
data1 = _get_data1(dataset)
for site, c in zip(sites.keys(), colors):
data0_ = nearest_point_cube(data0, *sites[site])
data1_ = nearest_point_cube(data0, *sites[site])
data1__ = doy_f_cube(
data1_,
np.nanpercentile, f_Args=(pp,),
ws=15,
mF=np.nan)
data1___ = np.diff(data1_.collapsed(
'time',
iris.analysis.PERCENTILE, percent=[25, 75]).data)
iqr_ = data1___[0]
data = data0_.copy((data1__ - data0_).data/data1___)
mjjas = np.sum(data.data[120:272])
rm_t_aux_cube(data)
iris.coord_categorisation.add_day_of_year(data, 'time', name='doy')
doy = data.coord('doy').points
ax.plot(doy, data.data,
color=c,
lw=1.5 if site == list(sites.keys())[0] else .75,
label='{} ({:.1f})'.format(site, mjjas))
ax.axvspan(120, 272, fc='0.8', alpha=.5, zorder=-1)
ax.set_xlabel('Day of year')
ax.set_ylabel('Normalized $T_{95th} - T_{90th}$')
ax.set_xlim([min(doy), max(doy)])
ax.set_ylim([0,.4])
ax.legend(frameon=False)
fn = _djn(outdir, '{}-90_{}_clm.pdf'.format(pp, dataset))
plt.savefig(fn, dpi=300)
plt.close(fig)
if __name__ == '__main__':
main()
|
[
"mapulynn@gmail.com"
] |
mapulynn@gmail.com
|
|
b4a8594c676f389559ad8b7b703c3698c7687ed2
|
7e157b3bffb170ff87e29880611be9b040d61a27
|
/purano/annotator/processors/tfidf.py
|
a941fe233977a850b27bd0b8b8c0de80ed1d253d
|
[
"Apache-2.0"
] |
permissive
|
IlyaGusev/purano
|
8906d5e35e8a4f0f31144d874721d4e9e5f76359
|
07234a55e8c80d1e9d8aeb8197c58e36dd26da54
|
refs/heads/master
| 2022-06-21T09:57:45.624005
| 2021-07-27T18:41:37
| 2021-07-27T18:41:37
| 212,185,927
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,588
|
py
|
from typing import List, Optional
import numpy as np
import torch
from purano.annotator.processors import Processor
from purano.models import Document
from purano.proto.info_pb2 import Info as InfoPb
from purano.training.models.tfidf import load_idfs, get_tfidf_vector, SVDEmbedder
@Processor.register("tfidf")
class TfIdfProcessor(Processor):
def __init__(
self,
idfs_vocabulary: str,
svd_torch_model_path: str
):
word2idf, word2idx = load_idfs(idfs_vocabulary)
self.word2idf = word2idf
self.word2idx = word2idx
self.svd_torch_model = None # type: Optional[SVDEmbedder]
if svd_torch_model_path:
self.svd_torch_model = torch.load(svd_torch_model_path)
def __call__(
self,
docs: List[Document],
infos: List[InfoPb],
input_fields: List[str],
output_field: str,
):
embeddings = np.zeros((len(docs), len(self.word2idf)), dtype=np.float32)
for doc_num, (doc, info) in enumerate(zip(docs, infos)):
text = " ".join([getattr(doc, field) for field in input_fields])
data, indices = get_tfidf_vector(text, self.word2idf, self.word2idx)
for index, value in zip(indices, data):
embeddings[doc_num][index] = value
final_embeddings = embeddings
if self.svd_torch_model:
final_embeddings = self.svd_torch_model(torch.FloatTensor(final_embeddings))
for doc_num, info in enumerate(infos):
getattr(info, output_field).extend(final_embeddings[doc_num])
|
[
"phoenixilya@gmail.com"
] |
phoenixilya@gmail.com
|
0d339371d555fa7f40da404b5092acbd841c381b
|
08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2
|
/kubernetes/test/test_v1_namespace_list.py
|
eecef48f123f70650b3c855d6413abfa52021b87
|
[
"Apache-2.0"
] |
permissive
|
ex3cv/client-python
|
5c6ee93dff2424828d064b5a2cdbed3f80b74868
|
2c0bed9c4f653472289324914a8f0ad4cbb3a1cb
|
refs/heads/master
| 2021-07-12T13:37:26.049372
| 2017-10-16T20:19:01
| 2017-10-16T20:19:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_namespace_list import V1NamespaceList
class TestV1NamespaceList(unittest.TestCase):
""" V1NamespaceList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1NamespaceList(self):
"""
Test V1NamespaceList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_namespace_list.V1NamespaceList()
pass
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
4fa7a1d1958a21a196156bb7fb162e220b5a4c42
|
157d2a2f4031c58e5504bcbac5348ff53883facc
|
/rDj63/rDj63/urls.py
|
c4a78403fddc9db36bfb4f94aefa067e840b972c
|
[] |
no_license
|
optirg-39/Django_gekSh
|
d78b635fd3ee88addd084b68ec35c6284adfb55c
|
1129a6df35c110dfeeeaaf1a76b2ebc192a5f1ce
|
refs/heads/master
| 2023-04-15T13:09:03.067099
| 2021-04-26T12:15:35
| 2021-04-26T12:15:35
| 352,018,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
"""rDj63 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from enroll import views
urlpatterns = [
path('admin/', admin.site.urls),
path('sign_up/', views.sign_up, name = 'signin1'),
path('log_in/', views.log_in, name = 'login1'),
path('user_profile/', views.user_profile, name = 'profile1'),
path('logout_user/', views.user_logout, name = 'logout1'),
]
|
[
"opti39rg@gmail.com"
] |
opti39rg@gmail.com
|
ba7c90fd43394d63878f354d584a396087cd06f9
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p2DJ/New/R2/benchmark/startPyquil122.py
|
3fd855f9728d33e5489e4eb157beb0e6dabf2e1b
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
# qubit number=2
# total number=10
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=4
prog += CNOT(0,1) # number=7
prog += X(1) # number=8
prog += CNOT(0,1) # number=9
prog += X(1) # number=3
prog += CNOT(1,0) # number=5
prog += CNOT(1,0) # number=6
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil122.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
270375ee24a54029cec52788cd572a47ddcc8f30
|
0804c9f4d46e15a93a4322808b487eb00c28da95
|
/Chapter 10/BPApp/HiTech/engg/routes.py
|
0a37098872ecb6b648d051b9c3b2fa6c8071f0b3
|
[
"MIT"
] |
permissive
|
bpbpublications/Building-Web-Apps-with-Python-and-Flask
|
99b6e9312a41bea1ba6c6c5dce70f958b86ad768
|
4fbbe75fad9629f16ff5bf8bd603aa09dd04f9eb
|
refs/heads/main
| 2023-03-27T05:55:23.170813
| 2021-03-23T10:27:59
| 2021-03-23T10:27:59
| 339,637,598
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
from flask import Blueprint, render_template
engg=Blueprint('engg', __name__,template_folder='templates',static_folder='static')
@engg.route('/')
def index():
return render_template('engindex.html')
@engg.route('/courses')
def courses():
return '<h1>list of courses in Enginnering</h1>'
@engg.route('/faculty')
def faculty():
return '<h1>list of Engineering faculty members</h1>'
@engg.route('/form')
def form():
return render_template('form.html')
from flask import Flask
app=Flask(__name__)
app.register_blueprint(engg)
if __name__=='__main__':
app.run(debug=True)
|
[
"41231825+bpbpublications@users.noreply.github.com"
] |
41231825+bpbpublications@users.noreply.github.com
|
e62d95154c55d2a4f3766e12e5274d5f6283c5f7
|
213682d70d45739b8a4fd7c0fcf05437a0704c4d
|
/pipeline/ve/share/radical.pilot/examples/00_getting_started.py
|
f4a0df0d42b456b1edfef3b2a02e65281648c08e
|
[] |
no_license
|
ATLAS-Titan/misc
|
d272adfe13fcbbea1562ca98c718bc1465032421
|
0a20d158d0d9a95ef72b6a8d0bccbb68193e98c0
|
refs/heads/master
| 2020-12-24T06:31:10.696708
| 2017-07-18T14:55:41
| 2017-07-18T14:55:41
| 73,486,659
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,381
|
py
|
#!/usr/bin/env python
__copyright__ = 'Copyright 2013-2014, http://radical.rutgers.edu'
__license__ = 'MIT'
import os
import sys
import radical.pilot as rp
import radical.utils as ru
# ------------------------------------------------------------------------------
#
# READ the RADICAL-Pilot documentation: http://radicalpilot.readthedocs.org/
#
# ------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#
if __name__ == '__main__':
# we use a reporter class for nicer output
report = ru.LogReporter(name='radical.pilot')
report.title('Getting Started (RP version %s)' % rp.version)
# use the resource specified as argument, fall back to localhost
if len(sys.argv) > 2: report.exit('Usage:\t%s [resource]\n\n' % sys.argv[0])
elif len(sys.argv) == 2: resource = sys.argv[1]
else : resource = 'local.localhost'
# Create a new session. No need to try/except this: if session creation
# fails, there is not much we can do anyways...
session = rp.Session()
# all other pilot code is now tried/excepted. If an exception is caught, we
# can rely on the session object to exist and be valid, and we can thus tear
# the whole RP stack down via a 'session.close()' call in the 'finally'
# clause...
try:
# read the config used for resource details
report.info('read config')
config = ru.read_json('%s/config.json' % os.path.dirname(os.path.abspath(__file__)))
report.ok('>>ok\n')
report.header('submit pilots')
# Add a Pilot Manager. Pilot managers manage one or more ComputePilots.
pmgr = rp.PilotManager(session=session)
# Define an [n]-core local pilot that runs for [x] minutes
# Here we use a dict to initialize the description object
pd_init = {
'resource' : resource,
'runtime' : 15, # pilot runtime (min)
'exit_on_error' : True,
'project' : config[resource]['project'],
'queue' : config[resource]['queue'],
'access_schema' : config[resource]['schema'],
'cores' : config[resource]['cores'],
}
pdesc = rp.ComputePilotDescription(pd_init)
# Launch the pilot.
pilot = pmgr.submit_pilots(pdesc)
report.header('submit units')
# Register the ComputePilot in a UnitManager object.
umgr = rp.UnitManager(session=session)
umgr.add_pilots(pilot)
# Create a workload of ComputeUnits.
# Each compute unit runs '/bin/date'.
n = 128 # number of units to run
report.info('create %d unit description(s)\n\t' % n)
cuds = list()
for i in range(0, n):
# create a new CU description, and fill it.
# Here we don't use dict initialization.
cud = rp.ComputeUnitDescription()
cud.executable = '/bin/date'
cuds.append(cud)
report.progress()
report.ok('>>ok\n')
# Submit the previously created ComputeUnit descriptions to the
# PilotManager. This will trigger the selected scheduler to start
# assigning ComputeUnits to the ComputePilots.
umgr.submit_units(cuds)
# Wait for all compute units to reach a final state (DONE, CANCELED or FAILED).
report.header('gather results')
umgr.wait_units()
except Exception as e:
# Something unexpected happened in the pilot code above
report.error('caught Exception: %s\n' % e)
raise
except (KeyboardInterrupt, SystemExit) as e:
# the callback called sys.exit(), and we can here catch the
# corresponding KeyboardInterrupt exception for shutdown. We also catch
# SystemExit (which gets raised if the main threads exits for some other
# reason).
report.warn('exit requested\n')
finally:
# always clean up the session, no matter if we caught an exception or
# not. This will kill all remaining pilots.
report.header('finalize')
session.close(cleanup=False)
report.header()
#-------------------------------------------------------------------------------
|
[
"alessio.angius.research@gmail.com"
] |
alessio.angius.research@gmail.com
|
644fab70a5b3af9bb5dfa778fe89ab854f219ed8
|
63d3a6255f2677f9d92205d62163b9d22a74c5c7
|
/modules/dynadb/migrations/0058_auto_20161124_1741.py
|
89a757a370f4bc40d44bddae34790dc82e409e84
|
[
"Apache-2.0"
] |
permissive
|
GPCRmd/GPCRmd
|
9204f39b1bfbc800b13512b316e05e54ddd8af23
|
47d7a4e71025b70e15a0f752760873249932c54e
|
refs/heads/main
| 2023-09-04T11:13:44.285629
| 2023-08-29T13:43:01
| 2023-08-29T13:43:01
| 260,036,875
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-11-24 16:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dynadb', '0057_auto_20161116_2004'),
]
operations = [
migrations.AlterField(
model_name='dyndbsubmissionmolecule',
name='type',
field=models.SmallIntegerField(blank=True, choices=[(0, 'Orthosteric ligand'), (1, 'Allosteric ligand'), (2, 'Crystallographic waters, lipids or ions'), (3, 'Other')], default=0, null=True),
),
]
|
[
"adrian.garcia.recio@gmail.com"
] |
adrian.garcia.recio@gmail.com
|
03c962b553ec91380beb9b3231464bb8fae29bc0
|
fccb35b69307ae4848aeee484995100de624dedf
|
/toolbox/bulk_processing/invalid_address_processor.py
|
b00f886d424ecf8dbac31af077a0420d5a05d53a
|
[] |
no_license
|
ONSdigital/census-rm-toolbox
|
468d1cb8e901bc8ae87a693495b85080c9d85e9f
|
17eeae06f859091805e3fd3d5f8e620500af6741
|
refs/heads/master
| 2023-08-21T13:23:55.614636
| 2021-05-18T07:44:31
| 2021-05-18T07:44:31
| 207,325,373
| 0
| 1
| null | 2023-07-25T17:05:59
| 2019-09-09T14:12:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,827
|
py
|
import logging
import uuid
from datetime import datetime
from structlog import wrap_logger
from toolbox.bulk_processing.bulk_processor import BulkProcessor
from toolbox.bulk_processing.processor_interface import Processor
from toolbox.bulk_processing.validators import case_exists_by_id, is_uuid, max_length, mandatory
from toolbox.config import Config
from toolbox.logger import logger_initial_config
class InvalidAddressProcessor(Processor):
file_prefix = Config.BULK_INVALID_ADDRESS_FILE_PREFIX
routing_key = Config.INVALID_ADDRESS_EVENT_ROUTING_KEY
exchange = Config.EVENTS_EXCHANGE
bucket_name = Config.BULK_INVALID_ADDRESS_BUCKET_NAME
project_id = Config.BULK_INVALID_ADDRESS_PROJECT_ID
schema = {
"case_id": [is_uuid(), case_exists_by_id()],
"reason": [mandatory(), max_length(255)]
}
def build_event_messages(self, row):
address_resolution = "AR"
return [{
"event": {
"type": "ADDRESS_NOT_VALID",
"source": "RM_BULK_INVALID_ADDRESS_PROCESSOR",
"channel": address_resolution,
"dateTime": datetime.utcnow().isoformat() + 'Z',
"transactionId": str(uuid.uuid4())
},
"payload": {
"invalidAddress": {
"reason": row['reason'],
"collectionCase": {
"id": row['case_id']
}
}
}
}]
def main():
logger_initial_config()
logger = wrap_logger(logging.getLogger(__name__))
logger.info('Started bulk processing invalid addresses', app_log_level=Config.LOG_LEVEL,
environment=Config.ENVIRONMENT)
BulkProcessor(InvalidAddressProcessor()).run()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
ONSdigital.noreply@github.com
|
29a6f8130b12ecd4a5d0cc05a93f14e402d00970
|
fb8c10b50bd42db139330a8ed596e864bb8ae440
|
/Tkinter_GUI_Python/7941OT_8_code/8.03 validation mode demo.py
|
34ce49915abce5b3c1d9238126e6942966dd0de9
|
[] |
no_license
|
masb01/test
|
628c9bc4297e6c1b745503e297682258553d87cf
|
7e402d5744d4395ebd660f3c05044bf3df16ce7c
|
refs/heads/master
| 2020-03-18T20:59:38.142095
| 2018-06-10T15:15:16
| 2018-06-10T15:15:16
| 135,251,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,876
|
py
|
"""
Code illustration: 8.03
Validation Modes Demo
Tkinter GUI Application Development Hotshot
"""
import Tkinter as tk
class ValidateModeDemo():
def __init__(self):
self.root = tk.Tk()
vcmd = (self.root.register(self.validate), '%V')
# validate = none mode - will not call validate method ever.
tk.Label (text='None').pack()
tk.Entry(self.root, validate="none", validatecommand=vcmd).pack()
# validate = focus mode - will call validate method on focusin and focusout
tk.Label (text='Focus').pack()
tk.Entry(self.root, validate="focus", validatecommand=vcmd).pack()
# validate = focusin mode - - will call validate method on focusin
tk.Label (text='Focusin').pack()
tk.Entry(self.root, validate="focusin", validatecommand=vcmd).pack()
# validate = focusout mode - will call validate method on focusout
tk.Label (text='Focus Out').pack()
tk.Entry(self.root, validate="focusout", validatecommand=vcmd).pack()
# validate = Key mode - will call validate method only when you type something or edit the entry
tk.Label (text='key').pack()
tk.Entry(self.root, validate="key", validatecommand=vcmd).pack()
# validate = all mode - will call validate method on focus and key events
tk.Label (text='all').pack()
tk.Entry(self.root, validate="all", validatecommand=vcmd).pack()
self.root.mainloop()
def validate(self, v):
print 'Called Just Now Via Mode %s' %v
# this is where you will validate your data and return True or False
#depending on wether the data is valid or not
# for now let us just return True for all cases.
return True
app = ValidateModeDemo()
|
[
"masb160119672gmail.com"
] |
masb160119672gmail.com
|
173c766edb003276de453cf1019773545a9f23ff
|
4832856f115ef30fb9f611e92d7e473d531f6c4d
|
/setup.py
|
19d11a618feed45d888db08e66dea88ba2d3197c
|
[
"Apache-2.0"
] |
permissive
|
nycto-hackerone/OWASP-Nettacker
|
9c2a227eaf5175ce42181650911ae795846637a3
|
33f7e4a53b4773c91be57bfb535baec3478ca85c
|
refs/heads/master
| 2021-01-24T00:04:33.864977
| 2018-02-24T11:34:45
| 2018-02-24T11:34:45
| 122,752,364
| 1
| 1
|
Apache-2.0
| 2018-02-24T15:26:53
| 2018-02-24T15:26:53
| null |
UTF-8
|
Python
| false
| false
| 2,347
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
from setuptools import setup
from setuptools import find_packages
def package_files(directory):
"""
This function was created to crawl the directory and find files (none python files) using os.walk
Args:
directory: path to crawl
Returns:
list of package files in an array
"""
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
# read requirements from requirements.txt
requirements = open("requirements.txt").read().rsplit()
if int(sys.version_info[0]) is 2:
# add scapy requirement name for python 2.x
requirements.append("scapy")
else:
# add scapy requirement name for python 3.x
requirements.append("scapy-python3")
setup(
name="OWASP-Nettacker",
version='0.0.1',
description='OWASP Nettacker - Automated Penetration Testing Framework',
packages=find_packages(),
package_data={"": package_files("web") + ["../api/database.sqlite3"]}, # package files + database file
include_package_data=True,
install_requires=requirements,
url="https://github.com/viraintel/OWASP-Nettacker",
license="Apache-2.0",
author="Ali Razmjoo",
author_email="ali.razmjoo@owasp.org",
long_description="Automated Penetration Testing Framework - OWASP Nettacker project is created to"
" automate information gathering, vulnerability scanning and eventually generating"
" a report for networks, including services, bugs, vulnerabilities, misconfigurations,"
" and other information. This software will utilize TCP SYN, ACK, ICMP and many other"
" protocols in order to detect and bypass Firewall/IDS/IPS devices. By leveraging a"
" unique method in OWASP Nettacker for discovering protected services and devices such"
" as SCADA. It would make a competitive edge compared to other scanner making it one of"
" the bests.",
scripts=["scripts/nettacker.bat" if sys.platform == "win32" or sys.platform == "win64"
else "scripts/nettacker", "nettacker.py"] # script files for windows and other OS
)
|
[
"ali.razmjoo@owasp.org"
] |
ali.razmjoo@owasp.org
|
db9f504a531a4656b87987f9b9f0af2db5399ace
|
7896baeb297e131bab53cfbff712d1fd77bccede
|
/gombru/style_images_COCO.py
|
a633bfa3a8ce2a1b81682b5d1c45eaa1fa8824bc
|
[
"Apache-2.0"
] |
permissive
|
gombru/magenta_styleTransfer
|
599b85b24dd406a82df271bb769fe3dc1fa19f0b
|
bd41b0bf3bb18988653e4a355d95dac8632e814f
|
refs/heads/master
| 2020-04-11T23:11:47.133793
| 2019-02-12T12:12:50
| 2019-02-12T12:12:50
| 162,159,299
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,049
|
py
|
from magenta.models.image_stylization import image_stylization_transform
from PIL import Image
import os
import warnings
import random
warnings.filterwarnings("ignore")
results_path = "/home/Imatge/ssd2/ICDAR_2015_IndidentalSceneText/train/img_styled_icdar/"
num_styles = 96 # 9, 32, 34
# checkpoint = "/home/raulgomez/datasets/styleTransferMiro/models/multistyle-pastiche-generator-varied.ckpt"
# checkpoint = "/home/raulgomez/datasets/styleTransferMiro/models/miro"
checkpoint = "/home/Imatge/hd/datasets/styleTransferMiro/train/icdar"
# which_styles = []
# for i in range(num_styles): which_styles.append(i)
input_images_dir = "/home/Imatge/ssd2/ICDAR_2015_IndidentalSceneText/train/img/"
input_images = []
for file in os.listdir(input_images_dir): input_images.append(file.split('/')[-1])
# legible_ids_dir = "/home/Imatge/ssd2/COCO-Text/gt_COCO_format_legible/"
# legible_ids = []
# for file in os.listdir(legible_ids_dir): legible_ids.append(file.split('/')[-1].strip('.json'))
# final_ids = [id for id in input_images if id.strip('.jpg') in legible_ids]
# del input_images
# del legible_ids
# print("Number of images with legible text: " + str(len(final_ids)))
batch_size = 32
i=0
while True:
cur_styles = random.sample(range(0, 96), 4)
# cur_styles.remove(0)
# cur_styles.remove(6)
# cur_styles.remove(25)
print(" --> Starting batch from" + str(i) + " with styles " + str(cur_styles))
if i > len(input_images):
break
last_image = i + batch_size
if last_image > len(input_images):
last_image = len(input_images)
cur_input_images = input_images[i:last_image]
result_images = image_stylization_transform.multiple_input_images(checkpoint, num_styles, input_images_dir, cur_input_images, cur_styles)
for k, v in result_images.items():
v = v[0,:,:,:]
pil_image = Image.fromarray((v*255).astype('uint8'))
pil_image.save(results_path + k + '.png')
i+=batch_size
print(" --> " + str(i) + " out of " + str(len(input_images)))
print("DONE")
|
[
"raulgombru@gmail.com"
] |
raulgombru@gmail.com
|
42e9fb0e23fdf748d185ab5a38dff35ff23cb749
|
83048ab1abb6941ed0b19fb5e5ff4a9d14b48e8c
|
/CODEFORCES/park_light.py
|
880f197a915fce16db419ae24c4356fa190e9c66
|
[] |
no_license
|
harshitalpha/Algorithms
|
ebad07cc77516ab5c35ae414462d10a38d5ef97e
|
2f7dcf4c3bb4390267231c7c96f7e76399c0166e
|
refs/heads/master
| 2021-07-14T17:34:02.546583
| 2020-06-25T06:38:39
| 2020-06-25T06:38:39
| 178,813,562
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
import math
for _ in range(int(input())):
r, c = [int(s) for s in input().split()]
if r%2 == 0:
ans = (r/2)*c
else:
ans = (r-1)/2 * c
ans = ans + math.ceil(c/2)
print(int(ans))
|
[
"harshitsinghal1103@gmail.com"
] |
harshitsinghal1103@gmail.com
|
02ead0e966188d8db902b52401360de29dc3478e
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/smurf/smurf_end_to_end_test.py
|
f50fb3e7c366d7a6b332c231de7d1e830aa81c90
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203
| 2023-05-31T01:00:56
| 2023-05-31T01:06:45
| 242,478,569
| 0
| 0
|
Apache-2.0
| 2020-06-23T01:55:11
| 2020-02-23T07:59:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that training and evaluation work as expected."""
# pylint:skip-file
import contextlib
import io
from absl import flags
from absl.testing import absltest
from smurf import smurf_flags
from smurf import smurf_trainer
FLAGS = flags.FLAGS
class SmurfEndToEndTest(absltest.TestCase):
def test_training_on_spoof(self):
FLAGS.eval_on = ''
FLAGS.train_on = 'spoof:unused'
FLAGS.plot_dir = '/tmp/spoof_train'
FLAGS.check_data = True
FLAGS.num_train_steps = 1
FLAGS.epoch_length = 1
FLAGS.evaluate_during_train = False
FLAGS.height = 296
FLAGS.width = 296
f = io.StringIO()
with contextlib.redirect_stdout(f):
smurf_trainer.train_eval()
# Check that the relevant metrics are printed to stdout.
stdout_message = f.getvalue()
self.assertIn('total-loss: ', stdout_message)
self.assertIn('data-time: ', stdout_message)
self.assertIn('learning-rate: ', stdout_message)
self.assertIn('train-time: ', stdout_message)
def test_evaluating_on_spoof(self):
FLAGS.eval_on = 'spoof:unused'
FLAGS.check_data = False
FLAGS.train_on = ''
FLAGS.plot_dir = '/tmp/spoof_eval'
FLAGS.height = 296
FLAGS.width = 296
FLAGS.num_train_steps = 1
FLAGS.evaluate_during_train = True
f = io.StringIO()
with contextlib.redirect_stdout(f):
smurf_trainer.train_eval()
# Check that the relevant metrics are printed to stdout.
stdout_message = f.getvalue()
self.assertIn('spoof-EPE: ', stdout_message)
self.assertIn('spoof-occl-f-max: ', stdout_message)
self.assertIn('spoof-ER: ', stdout_message)
self.assertIn('spoof-best-occl-thresh: ', stdout_message)
self.assertIn('spoof-eval-time(s): ', stdout_message)
self.assertIn('spoof-inf-time(ms): ', stdout_message)
if __name__ == '__main__':
absltest.main()
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
b45b8e88f4ed303e52d90dfa4d01ea1f2386ac4f
|
df983affa658d3169aebcd95e255c7cafccf1aa0
|
/build/common_msgs/actionlib_msgs/catkin_generated/actionlib_msgs-extras.cmake.develspace.context.cmake.py
|
b6d5b0ec3ce04c0a47984337071656b8a6699bb8
|
[] |
no_license
|
Ektachaurasia/Backup
|
17045d3cd3185ca47c53b02298fe1c123ee8a058
|
e5ab8532f6cd599ebe4b501626ddba2c6c5d83ab
|
refs/heads/main
| 2023-06-20T21:49:54.092227
| 2021-07-28T04:31:15
| 2021-07-28T04:31:15
| 390,155,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,445
|
py
|
# generated from catkin/cmake/template/cfg-extras.context.py.in
DEVELSPACE = 'TRUE' == 'TRUE'
INSTALLSPACE = 'FALSE' == 'TRUE'
CATKIN_DEVEL_PREFIX = '/home/pi/catkin_ws/devel'
CATKIN_GLOBAL_BIN_DESTINATION = 'bin'
CATKIN_GLOBAL_ETC_DESTINATION = 'etc'
CATKIN_GLOBAL_INCLUDE_DESTINATION = 'include'
CATKIN_GLOBAL_LIB_DESTINATION = 'lib'
CATKIN_GLOBAL_LIBEXEC_DESTINATION = 'lib'
CATKIN_GLOBAL_PYTHON_DESTINATION = 'lib/python3/dist-packages'
CATKIN_GLOBAL_SHARE_DESTINATION = 'share'
CATKIN_PACKAGE_BIN_DESTINATION = 'lib/actionlib_msgs'
CATKIN_PACKAGE_ETC_DESTINATION = 'etc/actionlib_msgs'
CATKIN_PACKAGE_INCLUDE_DESTINATION = 'include/actionlib_msgs'
CATKIN_PACKAGE_LIB_DESTINATION = 'lib'
CATKIN_PACKAGE_LIBEXEC_DESTINATION = 'lib/actionlib_msgs'
CATKIN_PACKAGE_PYTHON_DESTINATION = 'lib/python3/dist-packages/actionlib_msgs'
CATKIN_PACKAGE_SHARE_DESTINATION = 'share/actionlib_msgs'
CMAKE_BINARY_DIR = '/home/pi/catkin_ws/build'
CMAKE_CURRENT_BINARY_DIR = '/home/pi/catkin_ws/build/common_msgs/actionlib_msgs'
CMAKE_CURRENT_SOURCE_DIR = '/home/pi/catkin_ws/src/common_msgs/actionlib_msgs'
CMAKE_INSTALL_PREFIX = '/home/pi/catkin_ws/install'
CMAKE_SOURCE_DIR = '/home/pi/catkin_ws/src'
PKG_CMAKE_DIR = '/home/pi/catkin_ws/devel/share/actionlib_msgs/cmake'
PROJECT_NAME = 'actionlib_msgs'
PROJECT_BINARY_DIR = '/home/pi/catkin_ws/build/common_msgs/actionlib_msgs'
PROJECT_SOURCE_DIR = '/home/pi/catkin_ws/src/common_msgs/actionlib_msgs'
|
[
"ektachaurasia02@gmail.com"
] |
ektachaurasia02@gmail.com
|
e5b0112718fb300ed3d147add9a9e182d8b2ea1e
|
7aec3f10b07403b542e1c14a30a6e00bb479c3fe
|
/Codewars/8 kyu/Convert number to reversed array of digits.py
|
e7cbae004c0ea231f393eff942a158cedabe542b
|
[] |
no_license
|
VictorMinsky/Algorithmic-Tasks
|
a5871749b377767176ba82308a6a0962e1b3e400
|
03a35b0541fe413eca68f7b5521eaa35d0e611eb
|
refs/heads/master
| 2020-08-02T23:18:06.876712
| 2020-01-16T19:08:49
| 2020-01-16T19:08:49
| 211,541,179
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
"""
Convert number to reversed array of digits
Given a random number:
C#: long;
C++: unsigned long;
You have to return the digits of this number within an array in reverse order.
Example:
348597 => [7,9,5,8,4,3]
"""
def digitize(n):
return list(reversed(list(map(int, list(str(n))))))
|
[
"panasyuk.vityu@gmail.com"
] |
panasyuk.vityu@gmail.com
|
a1ac053acc68c25f371d3926ce3b7044ee603984
|
e76fda1fba459456c4bc105e7a6dcc6277a1a26c
|
/django_cv/blog/migrations/0003_auto_20160717_0956.py
|
1c16fe69db75afb49377755969266de63f4546d1
|
[] |
no_license
|
lafabo/i-love-tutorials
|
6bb2a684a201975ab523d9721b02761a6269853c
|
eafcd47fd62e770107c7e1f08e0d6d60a539f1ec
|
refs/heads/master
| 2021-01-21T04:46:56.365199
| 2016-07-20T17:38:03
| 2016-07-20T17:38:03
| 47,709,568
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-17 09:56
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20160717_0754'),
]
operations = [
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(auto_created=True),
),
migrations.AlterField(
model_name='post',
name='published_date',
field=models.DateTimeField(auto_now=True, default=datetime.datetime(2016, 7, 17, 9, 56, 3, 256986, tzinfo=utc)),
preserve_default=False,
),
]
|
[
"lazyfatboy@ya.ru"
] |
lazyfatboy@ya.ru
|
f7ac703f00dbfce30712bfb9a545f0ea45d5721d
|
463bdbc8cdca6802f0ff224af0719b078d336f42
|
/semana-2/futbolista.py
|
9e3ed3c57cb464116fa991975c15d833f271f628
|
[] |
no_license
|
BrandonBaLu/poo--1719110177
|
21b99bf4484030c32a26dc12cc3848b9cee12c16
|
a3e72a46e284bdd7f2106e444d9d262f390fb296
|
refs/heads/master
| 2022-12-02T13:49:20.715214
| 2020-08-06T20:53:46
| 2020-08-06T20:53:46
| 265,976,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
class futbolista:
edad= 22
altura= 1.80
camiseta= 1
posicion= "delantero"
categoria="juvenil"
nombre= "Brandon BaLu"
def correr(self):
print("correr")
def saltar(self):
print("saltar")
def patear(self):
print("patear")
def gol(self):
print("meter gol")
def festejar(self):
print("festejar")
def _init_(self):
print("Futbolista")
print(self.edad)
print(self.altura)
print(self.camiseta)
print(self.posicion)
print(self.categoria)
print(self.nombre)
objeto = futbolista()
objeto.correr()
objeto.saltar()
objeto.patear()
objeto.gol()
objeto.festejar()
objeto._init_()
|
[
"replituser@example.com"
] |
replituser@example.com
|
cc2c49aa611b103c5981d71833739c0d1bfcef5b
|
98e4005eb908773cd0be5b46e297024395b43b1c
|
/tasks.py
|
40ed204392e83dfac108eef63ee1327a48ea4d32
|
[
"MIT"
] |
permissive
|
aparamon/block-timer
|
110f456a5ae4e1311731c321de49565915074a70
|
8c7159253610728aaa810742bdaa1f9064e1fc5d
|
refs/heads/master
| 2020-03-24T21:29:43.643985
| 2017-10-06T08:07:52
| 2017-10-06T08:07:52
| 143,035,520
| 0
| 0
| null | 2018-07-31T15:49:17
| 2018-07-31T15:49:17
| null |
UTF-8
|
Python
| false
| false
| 1,769
|
py
|
# -*- encoding: utf-8 -*-
# ! python3
import shutil
from invoke import run, task
@task
def clean():
"""remove build artifacts"""
shutil.rmtree('block_timer.egg-info', ignore_errors=True)
shutil.rmtree('build', ignore_errors=True)
shutil.rmtree('dist', ignore_errors=True)
shutil.rmtree('htmlcov', ignore_errors=True)
shutil.rmtree('__pycache__', ignore_errors=True)
@task
def lint():
"""check style with flake8"""
run("flake8 block_timer/ tests/")
@task
def test():
run("py.test --verbose --showlocals tests/")
@task
def check():
"""run tests quickly with the default Python"""
run("python setup.py --no-user-cfg --verbose check --metadata --restructuredtext --strict")
@task
def coverage():
"""check code coverage quickly with the default Python"""
run("coverage run --source block_timer -m py.test")
run("coverage report -m")
run("coverage html")
@task
def test_install():
"""try to install built package"""
run("pip uninstall block-timer --yes", warn=True)
run("pip install --use-wheel --no-index --find-links=file:./dist block-timer")
run("pip uninstall block-timer --yes")
@task
def build():
"""build package"""
run("python setup.py build")
run("python setup.py sdist")
run("python setup.py bdist_wheel")
@task
def publish():
"""publish package"""
check()
run('python setup.py sdist upload -r pypi') # Use python setup.py REGISTER
run('python setup.py bdist_wheel upload -r pypi')
@task
def publish_test():
"""publish package"""
check()
run('python setup.py sdist upload -r https://testpypi.python.org/pypi') # Use python setup.py REGISTER
run('python setup.py bdist_wheel upload -r https://testpypi.python.org/pypi')
|
[
"vaclav.dohnal@gmail.com"
] |
vaclav.dohnal@gmail.com
|
07b91e1d7deec94489258dd04edee096ab9d58e2
|
285c76618cf9569a6074bfe5e7f4260d1eedf62a
|
/jjj filter.py
|
fb5a33c3b46f73480634287e7d539dfdbd2ca8af
|
[] |
no_license
|
kiniamogh/options_analysis
|
90b13e335a4426fb98eeb9ef6da6eebeff2838f4
|
da162bf4fbe7b5c5b70d48b284de0ab0f639061d
|
refs/heads/master
| 2023-06-27T06:25:51.672197
| 2021-06-16T23:11:58
| 2021-06-16T23:11:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,337
|
py
|
import yfinance as yf
import pandas as pd
import numpy as np
import timeit
from datetime import datetime as dt
from scipy.stats import norm
start = timeit.default_timer()
# https://algotrading101.com/learn/yfinance-guide/
#https://aroussi.com/post/download-options-data
#prompt for a symbol
symbol = input('What is your stock ticker?: ')
min_delta = float(input('what is the minimum delta(e.g. 0.7 is 70%)?: '))
min_yield = float(input('what is the minimum weekly yield (e.g. .01 is 1%)?: '))
max_expiration = input('what is the latest expiration?(mm-dd-yyyy): ')
#hard-wire a symbol without the prompt
#symbol = 'Tna'
#print symbol
#print(symbol.upper())
#yfinance version of your symbol
ticker = yf.Ticker(symbol)
# print descriptive info about the ticker
#print(ticker.info)
#historical prices
#historical = ticker.history(start="2020-12-02", end="2020-12-04", interval="5m")
#print(historical)
#how far back you go - period
# “1d”, “5d”, “1mo”, “3mo”, “6mo”, “1y”, “2y”, “5y”, “10y”, “ytd”, “max”
#bars or candles - interval
# 1m, 2m, 5m, 15m, 30m, 60m, 90m, 1h, 1d, 5d, 1wk, 1mo, 3mo
#hist = ticker.history(period="3d", interval = "5m")
#print(hist)
#multiple_tickers = yf.download("AMZN AAPL GOOG", start="2017-01-01", end="2017-04-30")
#print(multiple_tickers)
#multi_ticker_history = yf.download("AMZN AAPL GOOG", period="3d", interval = "5m")
#print(multi_ticker_history)
#options
#pull in the entire options chain for one expiration
#expiration = input('What is your expiration date? (yyyy-mm-dd): ')
#expiration = '2021-01-08'
#exp_dates = []
exp_dates = ticker.options # this is the list of expiration dates
#print(exp_dates)
#opt = ticker.option_chain(expiration)
#print(opt)
#opt = ticker.option_chain(exp_dates)
df = pd.DataFrame()
for x in exp_dates:
opt = ticker.option_chain(x)
df = df.append(opt.calls, ignore_index=True)
#df = pd.DataFrame(opt.calls)
hist = ticker.history(period="3d", interval = "5m")
#print(hist)
df_history = pd.DataFrame(hist)
recent_value = df_history['Close'].iloc[-1]
print(recent_value)
df['recent_px'] = recent_value
#df['recent_px'] = 173.75
#intrinsic value = stock price - strike price
df['intrinsic_value'] = df['recent_px'] - df['strike']
df['intrinsic_value'] = np.where(df['intrinsic_value'] < 0, 0, df['intrinsic_value'])
#option price = mid
#mid = (bid + ask) / 2
df['option_px'] = (df['bid'] + df['ask']) / 2 #mid options price
#extrinsic value = option price - intrinsic value
df['extrinsic_value'] = df['option_px'] - df['intrinsic_value']
df['extrinsic_value'] = np.where(df['extrinsic_value'] < 0, 0, df['extrinsic_value'])
#yield = ( extrinsic / recent_px ) * 100
df['yield'] = (df['extrinsic_value'] / df['recent_px'] )
#contract_symbol = str(df['contractSymbol'].iloc[0])
#print(contract_symbol)
#beginning_index = contract_symbol.find('2')
#print(beginning_index)
#ending_index = beginning_index + 6
#print(ending_index)
#expiration_slice = contract_symbol[beginning_index:ending_index]
#print(expiration_slice)
df['contract_symbol'] = df['contractSymbol'].astype(str)
df['beginning_index'] = (df['contract_symbol'].str.find('2'))
df['ending_index'] = (df['beginning_index'] + 6)
begin_index = df['beginning_index'].iloc[0]
end_index = df['ending_index'].iloc[0]
df['expiration_slice'] = df['contract_symbol'].str.slice(begin_index,end_index)
todays_date = pd.to_datetime('today')
df['today'] = todays_date
df['expiration_combined'] = '20' + df['expiration_slice']
df['converted_expiration'] = pd.to_datetime(df['expiration_combined'])
df['days_to_expiration'] = (df['converted_expiration'] - df['today']).dt.days
#number of weeks
df['number_of_weeks'] = df['days_to_expiration'] / 7
#weekly yield
df['weekly_yield'] = np.where( df['number_of_weeks'] < 1, df['yield'], df['yield'] / df['number_of_weeks'])
# Greeks
df['T'] = df['days_to_expiration'] / 200
risk_free_rate = 0.00
df['r'] = risk_free_rate
df['v'] = df['impliedVolatility']
dividend_rate = .00
df['d'] = dividend_rate
df['S'] = df['recent_px']
df['K'] = df['strike']
df['T_sqrt'] = np.sqrt(df['T'])
df['d1'] = (np.log(df['S'].astype(float) / df['K']) + (( df['r'] - df['d'] ) + df['v'] * df['v'] / 2) * df['T'] ) / (df['v'] * df['T_sqrt'])
df['delta_calc'] = norm.cdf(df['d1'])
#jjj score
df['jjj'] = df['weekly_yield'] * df['delta_calc']
# df['d2'] = df['d1'] - df['v'] * df['T_sqrt']
#
# df['gamma'] = norm.pdf(df['d1']) / (df['S'] * df['v'] * df['T_sqrt'])
#
# df['theta'] = -(df['S'] * df['v'] * norm.pdf(df['d1'])) / (2 * df['T_sqrt']) - df['r'] * df['K'] * np.exp(-df['r'] * df['T']) * norm.cdf(df['d2'])
#
# df['vega'] = df['S'] * df['T_sqrt'] * norm.pdf(df['d1'])
#
# df['rho'] = df['K'] * df['T'] * np.exp(-df['r'] * df['T']) * norm.cdf(df['d2'])
#print(df)
#df.to_csv("greeks.csv")
#dfobj = df[['delta_calc', 'strike']]
#dfobj.to_csv('just_delta_strike.csv')
df_two_colums = df[['strike','delta_calc', 'yield', 'converted_expiration', 'weekly_yield', 'jjj' ]]
#print(df_two_colums)
df_two_colums.to_csv('two_columns.csv')
#filters out for delta threshold
find_delta = df_two_colums.loc[lambda df_two_columns: df_two_columns['delta_calc'] > min_delta, :]
#print(find_delta)
#find_delta.to_csv('find_delta.csv')
#filters out for expiration threshold
find_delta_first_expiration = find_delta.loc[lambda find_delta: find_delta['converted_expiration'] <= max_expiration, :]
#print(find_delta_first_expiration)
#filters out for yield threshold
#find_delta_and_yield = find_delta_first_expiration.loc[lambda find_delta_first_expiration: find_delta_first_expiration['yield'] > .008, :]
find_delta_and_yield = find_delta_first_expiration.loc[lambda find_delta_first_expiration: find_delta_first_expiration['weekly_yield'] > min_yield, :]
# find_delta_and_yield = find_delta.loc[lambda find_delta: find_delta['yield'] > .04, :]
print(find_delta_and_yield)
find_delta_and_yield.to_csv('find_delta_and_yield.csv')
#chooses the strike with the max yield
#max_value = find_delta_and_yield['yield'].max()
max_value = find_delta_and_yield['weekly_yield'].max()
print(max_value)
find_final_strike = find_delta_and_yield.loc[lambda find_delta_and_yield: find_delta_and_yield['weekly_yield'] == max_value, :]
print(find_final_strike)
stop = timeit.default_timer()
print('Time: ', stop - start)
|
[
"jsiddique@gmail.com"
] |
jsiddique@gmail.com
|
328968e0b146457abb4379014c28d200edcdd065
|
47516f1e2356b3e02d96beabf7d05f1f5d89066e
|
/test-mess/perlii/pypi/p.py
|
00a1199efe54867d49a3142ff2d18db91f72d6f4
|
[] |
no_license
|
su8/mega-drive
|
8b9de109921343302c274e3af82d035bdf7ab004
|
d92efdf21734b05a55954aec93fd39a10396924f
|
refs/heads/master
| 2021-04-03T04:16:49.454792
| 2018-05-06T12:02:22
| 2018-05-06T12:02:22
| 124,764,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
def multiply():
a=3
b=4
print("Will compute", a, "times", b)
c = 0
for i in range(0, a):
c = c + b
return c
|
[
"a@b.c"
] |
a@b.c
|
59ab40f18c28b7bc7c88eb68a4fd01b89ae91535
|
892a07f9393b51f27bdd865a8721599a5a5f63d8
|
/download_info.py
|
9fec5bccf1c7a6b8d435c4e5d6502f69c39d6a7b
|
[] |
no_license
|
TianyuDu/UTCourses
|
4263e2258e221794581418b37266b51d3070c066
|
3a2d57e67ec6109d22fe5b698ebd77a3c40a6dab
|
refs/heads/master
| 2020-09-10T01:44:10.965653
| 2019-11-18T23:50:30
| 2019-11-18T23:50:30
| 221,618,482
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,748
|
py
|
import numpy as np
import pandas as pd
import selenium
from selenium import webdriver
import time
class bot():
def __init__(self):
self.driver = webdriver.Chrome("./chromedriver")
self.driver.get("https://timetable.iit.artsci.utoronto.ca/")
def close(self):
self.driver.close()
def batch_retrive(self, department: str) -> pd.DataFrame:
department = department.upper()
search_box = self.driver.find_element_by_id("courseCode")
search_button = self.driver.find_element_by_id("searchButton")
search_box.clear()
search_box.click()
search_box.send_keys(department)
search_button.click()
course_lst = []
while course_lst == []:
time.sleep(1.0)
course_lst = self.driver.find_elements_by_class_name("perCourse")
course_info_lst = []
print(f"Total courses found: {len(course_lst)}")
for course in course_lst:
code, title = course.find_element_by_class_name("courseTitle").text.split(" ")
print(f"{code}\t{title}")
meeting_lst = course.find_elements_by_class_name("perMeeting")
for meeting in meeting_lst:
meeting_code = meeting.find_element_by_class_name("colCode").text
print(f"\t{meeting_code}")
try:
meeting_info = meeting.find_element_by_class_name(
"secLec" if meeting_code.startswith("LEC") else "secTut").text
except selenium.common.exceptions.NoSuchElementException:
meeting_info = meeting.find_element_by_class_name("secPra").text
info = [code, title, meeting_code, meeting_info]
course_info_lst.append(info)
course_info_df = pd.DataFrame(
np.array(course_info_lst),
columns=["Code", "Title", "Session", "Details"]
)
return course_info_df
def batch_download(self, code_lst: list, save_dir: str) -> None:
department_lst = [
x.text
for x in self.driver.find_elements_by_class_name("option")
]
print(department_lst)
code_lst = [
x[1:-1]
for dep in department_lst
for x in dep.split(" ")
if x.startswith("(") and x.endswith(")")
]
all_courses = []
for x in code_lst:
y = self.batch_retrive(x)
all_courses.append(y)
df = pd.concat(all_courses, axis=0)
print(df.head())
print(df.shape)
df.to_csv(save_dir)
if __name__ == "__main__":
b = bot()
code_lst = ["MAT"]
b.batch_download(code_lst, save_dir="./results.csv")
b.close()
|
[
"masterdu9951@icloud.com"
] |
masterdu9951@icloud.com
|
6a67fbcb39334683fc4c6b183bea6cd0f44d3110
|
5326f4145414e096f6f145a71f6c7e1669230e71
|
/challenges/c40_FilteringRecords/filtering_records/filtering_records.py
|
b54f0d705e269c4f69805aa012e0edf75ed8d7bc
|
[] |
no_license
|
andrew-rietz/FiftySeven_Coding_Challenges
|
a670bd6b1dcf6f99775c2100d297e01a26555af9
|
218894fbad8ac3389003ce7321fd4c4020239fd6
|
refs/heads/master
| 2022-10-17T15:41:01.705609
| 2019-10-03T05:27:45
| 2019-10-03T05:27:45
| 181,096,850
| 0
| 0
| null | 2022-09-16T18:10:31
| 2019-04-12T23:33:52
|
Python
|
UTF-8
|
Python
| false
| false
| 3,651
|
py
|
import sys
import os
from operator import itemgetter
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from util_functions.utils import table, user_inputs
class EmployeeDatabase():
"""Represents a database of employee information (first name, last name,
position/title, and separation date)
Attributes:
employees_data (list): A list of dictionaries. Each dictionary corresponds
to a single employee and tracks first/last names, title, and
separation date. Sample dictionary:
{
"first_name": "Foo",
"last_name": "Bar",
"position": "Programmer",
:sep_date": "",
}
filtered_data (list): A subset of employees_data based on a user search string
Methods:
load_data: Loads data into the `employees_data` attribute
get_filter_string: Prompts the user for a search string
filter: Filters the `employees_data` attribute to only those records that
include the user's search string in the employee first or last name
tabulate_filtered_data: Puts the filtered data into tabular form for printing
"""
def __init__(self, employees_data=None):
self.employees_data = employees_data
self.filtered_data = None
def load_data(self):
employee_info = [
{
"first_name": "John", "last_name": "Johnson",
"position": "Manager", "sep_date": "2016-12-31",
},
{
"first_name": "Tuo", "last_name": "Xiong",
"position": "Software Engineer", "sep_date": "2016-10-05",
},
{
"first_name": "Michaela", "last_name": "Michaelson",
"position": "District Manager", "sep_date": "2015-12-19",
},
{
"first_name": "Jake", "last_name": "Jacobsen",
"position": "Programmer", "sep_date": "",
},
{
"first_name": "Jacquelyn", "last_name": "Jackson",
"position": "DBA", "sep_date": "",
},
{
"first_name": "Sally", "last_name": "Weber",
"position": "Web Developer", "sep_date": "2015-12-18",
},
]
self.employees_data = employee_info
return employee_info
@staticmethod
def get_filter_string():
filter_string = input("Enter a search string: ").strip()
return filter_string.lower()
def filter(self, filter_string):
filtered_data = [
employee for
employee in self.employees_data
if(
(filter_string in employee["first_name"].lower()) or
(filter_string in employee["last_name"].lower())
)
]
self.filtered_data = filtered_data
return filtered_data
def tabulate_filtered_data(self):
table_data = [["Name", "Position", "Separation Date"]]
for employee in self.filtered_data:
table_data.append([
f'{employee["first_name"]} {employee["last_name"]}',
employee["position"],
employee["sep_date"],
])
ascii_table = table.ascii_table(data=table_data, user_alignment="left")
return ascii_table
def main():
employees = EmployeeDatabase()
employees.load_data()
filter_val = employees.get_filter_string()
employees.filter(filter_val)
print(employees.tabulate_filtered_data())
if __name__ == "__main__":
main()
|
[
"andrew.rietz@gmail.com"
] |
andrew.rietz@gmail.com
|
59539c4599d0962bcf71259515908994a8e5da65
|
a80884040ce1c178274a3068d216f440dd541844
|
/rxsci/state/store.py
|
85d2a5655120e091fe557dcc1599571345abfe42
|
[
"MIT"
] |
permissive
|
maki-nage/rxsci
|
a4aae51edc1ef684b55df22e34c11aa1d54ef740
|
915e59ebf593c4b313265bb87cf0e1209ec2ee0f
|
refs/heads/master
| 2023-01-19T14:32:11.638497
| 2023-01-17T08:06:35
| 2023-01-17T08:06:35
| 242,592,973
| 9
| 2
|
MIT
| 2022-11-08T21:54:16
| 2020-02-23T21:23:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,514
|
py
|
class Store(object):
def __init__(self, topology, store_factory):
"""one per partition
"""
self.states = []
for state in topology.states:
self.states.append(store_factory(
name=state.name,
data_type=state.data_type,
default_value=state.default_value
))
def add_key(self, state, key):
return self.states[state].add_key(key)
def del_key(self, state, key):
return self.states[state].del_key(key)
def set(self, state, key, value):
return self.states[state].set(key, value)
def get(self, state, key):
return self.states[state].get(key)
def iterate(self, state):
return self.states[state].iterate()
def add_map(self, state, key, map_key):
return self.states[state].add_map(key, map_key)
def del_map(self, state, key, map_key):
return self.states[state].del_map(key, map_key)
def get_map(self, state, key, map_key):
return self.states[state].get_map(key, map_key)
def iterate_map(self, state, key):
return self.states[state].iterate_map(key)
class StoreManager(object):
def __init__(self, store_factory):
"""Manages partitions
"""
self.partitions = None
self.active_partition = None
self.topology = None
self.states = []
self.create_store = store_factory
def set_topology(self, topology):
self.topology = topology
def get_store(self):
if self.active_partition is None:
# No partitioning provided, use a single store
assert not self.states
self.states = [Store(topology=self.topology, store_factory=self.create_store)]
self.active_partition = 0
return self.states[self.active_partition]
def add_key(self, state, key):
store = self.get_store()
return store.add_key(state, key)
def del_key(self, state, key):
store = self.get_store()
return store.del_key(state, key)
def set_state(self, state, key, value):
"""Sets value of key in state
Args:
state: A state id from topology
key: A unique key for this state
value: value to set
"""
store = self.get_store()
return store.set(state, key, value)
def get_state(self, state, key):
"""Retrieves value of key in state
Args:
state: A state id from topology
key: A unique key for this state
Returns:
value of key.
"""
store = self.get_store()
return store.get(state, key)
def iterate_state(self, state):
store = self.get_store()
return store.iterate(state)
def add_map(self, state, key, map_key):
store = self.get_store()
return store.add_map(state, key, map_key)
def del_map(self, state, key, map_key):
store = self.get_store()
return store.del_map(state, key, map_key)
def get_map(self, state, key, map_key):
store = self.get_store()
return store.get_map(state, key, map_key)
def iterate_map(self, state, key):
store = self.get_store()
return store.iterate_map(state, key)
def on_partitions_revoked(self, revoked):
return
def on_partitions_assigned(self, assigned):
return
def set_active_partition(self, partition):
self.active_partition = partition
|
[
"romain.picard@oakbits.com"
] |
romain.picard@oakbits.com
|
791272d372a823c1d6e970236552365d7b3a754a
|
7078044ab0f8f1c1f4062a0e295d0b0c66f49734
|
/Seq2Seq/py/fsa.py
|
d3cc4cf306fb02e077bd6b0dae8f687c8cb9b7c9
|
[] |
no_license
|
shixing/xing_rnn
|
854061ee0a1d9de5f3d761df8f1769a3f2b98bc9
|
48f9089f5df97ef2ee6a79f01430ab32dc101512
|
refs/heads/master
| 2022-11-10T11:35:16.798208
| 2018-05-17T19:08:50
| 2018-05-17T19:08:50
| 86,934,874
| 4
| 4
| null | 2022-10-20T20:53:39
| 2017-04-01T18:46:44
|
Python
|
UTF-8
|
Python
| false
| false
| 6,181
|
py
|
import re
import math
from logging_helper import mylog, mylog_section, mylog_subsection, mylog_line
class State:
def __init__(self, str_name):
self.name = str_name
self.weights = {} # {int_word: {str_state_name: (state_s, float_weight)}} and float_weigth are in log space
self.next_word_index_set = set()
self.next_word_index_set_ready = False
def process_link(self, state_d, int_word, float_weight):
if not int_word in self.weights:
self.weights[int_word] = {}
self.weights[int_word][state_d.name] = (state_d, float_weight)
def __repr__(self):
return "State({})".format(self.name)
def next_states(self, int_word, results):
#the fsa should not contains a *e* circle.
# results = [(state, weight)]
if int_word in self.weights:
for state_name in self.weights[int_word]:
state_s, float_weight = self.weights[int_word][state_name]
results.append((state_s, float_weight))
# check the *e* link
empty = -1
if empty in self.weights:
for state_name in self.weights[empty]:
state_s, float_weight = self.weights[empty][state_name]
temp = []
state_s.next_states(int_word, temp)
for s, w in temp:
new_w = float_weight + w
results.append((s,new_w))
def next_word_indices(self):
if self.next_word_index_set_ready:
return self.next_word_index_set
else:
# build next_word_index_set
for int_word in self.weights:
if int_word == -1: # *e*
for next_state_name in self.weights[int_word]:
state_s, float_weight = self.weights[int_word][next_state_name]
next_word_index_set = state_s.next_word_indices()
for w in next_word_index_set:
self.next_word_index_set.add(w)
else:
self.next_word_index_set.add(int_word)
self.next_word_index_set_ready = True
return self.next_word_index_set
class FSA:
def __init__(self,fsa_filename, word2index, weight_is_in_log = True):
self.fsa_filename = fsa_filename
self.start_state = None
self.end_state = None
self.patterns = [re.compile("\\(([^ ]+)[ ]+\\(([^ ]+)[ ]+\"(.*)\"[ ]*\\)\\)"),
re.compile("\\(([^ ]+)[ ]+\\(([^ ]+)[ ]+([^ ]+)[ ]*\\)\\)"),
re.compile("\\(([^ ]+)[ ]+\\(([^ ]+)[ ]+\"(.*)\"[ ]+([^ ]+)[ ]*\\)\\)"),
re.compile("\\(([^ ]+)[ ]+\\(([^ ]+)[ ]+([^ ]+)[ ]+([^ ]+)[ ]*\\)\\)"),
]
self.weight_is_in_log = weight_is_in_log
if self.weight_is_in_log:
self.default_weight = 0.0
else:
self.default_weight = 1.0
self.states = {} # {str_name: state_s}
self.word2index = word2index
self.index2word = {}
for word in self.word2index:
index = self.word2index[word]
self.index2word[index] = word
self.num_links = 0
def _process_one_line(self,line):
line = line.strip()
if len(line) == 0 or line.startswith('#'):
return None
for p in self.patterns:
r = re.match(p, line)
if r:
break
if r:
group = r.groups()
s = group[0]
d = group[1]
word = group[2]
if word == "*e*":
word = -1
else:
if not word in self.word2index:
print "{} is not in vocab".format(word)
word = -2
else:
word = self.word2index[word]
weight = self.default_weight
if len(group) == 4:
weight = float(group[3])
if not self.weight_is_in_log:
weight = math.log(weight)
return s,d,word,weight
else:
raise ValueError("Can not process line: ", line)
def load_fsa(self):
f = open(self.fsa_filename)
# the end state
line = f.readline().strip()
self.end_state = State(line)
self.states[line] = self.end_state
while True:
line = f.readline()
if not line:
break
s,d,word,weight = self._process_one_line(line)
if s not in self.states:
self.states[s] = State(s)
if d not in self.states:
self.states[d] = State(d)
if self.start_state == None:
self.start_state = self.states[s]
if word != -2:
self.states[s].process_link(self.states[d], word, weight)
self.num_links += 1
if "_EOS" not in self.states:
self.end_state.process_link(self.end_state, self.word2index["_EOS"], self.default_weight)
# FSA info
self.report_statics()
f.close()
def report_statics(self):
mylog_section("FSA")
mylog_subsection("FSA Info")
mylog("Number of States: {}".format(len(self.states)))
mylog("Number of Links: {}".format(self.num_links))
mylog("Start state: {}".format(self.start_state.name))
mylog("End state: {}".format(self.end_state.name))
def next_states(self, current_state, index, results):
if index in self.index2word:
current_state.next_states(index, results)
if __name__ == "__main__":
fsa_filename = "../data/fsa/fsa.txt"
word2index = {}
for i in xrange(0,26):
word2index[chr(i+ord('a'))] = i+1
word2index['_EOS'] = 0
fsa = FSA(fsa_filename,word2index)
fsa.load_fsa()
print fsa.end_state.weights
for i in fsa.end_state.next_word_indices():
results = []
fsa.next_states(fsa.end_state, i, results)
print i, fsa.index2word[i], results
|
[
"shixing19910105@gmail.com"
] |
shixing19910105@gmail.com
|
11da7904a42782276ec0655cb6e620a333aaf166
|
5839614a5e2fa0b59acd09a623115efa962ee89d
|
/conda/_vendor/auxlib/logz.py
|
ac0ff89768fcabff7dd1c707d97ee02d871516a7
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
schae234/conda
|
110c25b35a3a0cdafed2ace6895f275e82233481
|
5bb678fe9c1445e62857db4fc55f10602cfa96a3
|
refs/heads/master
| 2021-01-18T20:27:37.134990
| 2016-08-03T17:36:51
| 2016-08-03T17:36:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,975
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from json import JSONEncoder
from logging import getLogger, INFO, Handler, Formatter, StreamHandler, DEBUG
from pprint import pformat
from sys import stderr
log = getLogger(__name__)
root_log = getLogger()
DEBUG_FORMATTER = Formatter(
"[%(levelname)s] [%(asctime)s.%(msecs)03d] %(process)d %(name)s:%(funcName)s(%(lineno)d):\n"
"%(message)s\n",
"%Y-%m-%d %H:%M:%S")
INFO_FORMATTER = Formatter(
"[%(levelname)s] [%(asctime)s.%(msecs)03d] %(process)d %(name)s(%(lineno)d): %(message)s\n",
"%Y-%m-%d %H:%M:%S")
def set_root_level(level=INFO):
root_log.setLevel(level)
def attach_stderr(level=INFO):
has_stderr_handler = any(handler.name == 'stderr' for handler in root_log.handlers)
if not has_stderr_handler:
handler = StreamHandler(stderr)
handler.name = 'stderr'
if level is not None:
handler.setLevel(level)
handler.setFormatter(DEBUG_FORMATTER if level == DEBUG else INFO_FORMATTER)
root_log.addHandler(handler)
return True
else:
return False
def detach_stderr():
for handler in root_log.handlers:
if handler.name == 'stderr':
root_log.removeHandler(handler)
return True
return False
def initialize_logging(level=INFO):
attach_stderr(level)
class NullHandler(Handler):
def emit(self, record):
pass
class DumpEncoder(JSONEncoder):
def default(self, obj):
if hasattr(obj, 'dump'):
return obj.dump()
# Let the base class default method raise the TypeError
return super(DumpEncoder, self).default(obj)
_DUMPS = DumpEncoder(indent=2, ensure_ascii=False, sort_keys=True).encode
def jsondumps(obj):
return _DUMPS(obj)
def fullname(obj):
return obj.__module__ + "." + obj.__class__.__name__
request_header_sort_dict = {
'Host': '\x00\x00',
'User-Agent': '\x00\x01',
}
def request_header_sort_key(item):
return request_header_sort_dict.get(item[0], item[0].lower())
response_header_sort_dict = {
'Content-Length': '\x7e\x7e\x61',
'Connection': '\x7e\x7e\x62',
}
def response_header_sort_key(item):
return response_header_sort_dict.get(item[0], item[0].lower())
def stringify(obj):
def bottle_builder(builder, bottle_object):
builder.append("{0} {1}{2} {3}".format(bottle_object.method,
bottle_object.path,
bottle_object.environ.get('QUERY_STRING', ''),
bottle_object.get('SERVER_PROTOCOL')))
builder += ["{0}: {1}".format(key, value) for key, value in bottle_object.headers.items()]
builder.append('')
body = bottle_object.body.read().strip()
if body:
builder.append(body)
def requests_models_PreparedRequest_builder(builder, request_object):
builder.append("> {0} {1} {2}".format(request_object.method, request_object.path_url,
request_object.url.split(':', 1)[0].upper()))
builder.extend("> {0}: {1}".format(key, value)
for key, value in sorted(request_object.headers.items(),
key=request_header_sort_key))
builder.append('')
if request_object.body:
builder.append(request_object.body)
def requests_models_Response_builder(builder, response_object):
builder.append("< {0} {1} {2}".format(response_object.url.split(':', 1)[0].upper(),
response_object.status_code, response_object.reason))
builder.extend("> {0}: {1}".format(key, value)
for key, value in sorted(response_object.headers.items(),
key=response_header_sort_key))
builder.append('')
content_type = response_object.headers.get('Content-Type')
if content_type == 'application/json':
builder.append(pformat(response_object.json, indent=2))
builder.append('')
elif content_type.startswith('text/'):
builder.append(response_object.text)
try:
name = fullname(obj)
builder = [''] # start with new line
if name.startswith('bottle.'):
bottle_builder(builder, obj)
elif name.endswith('requests.models.PreparedRequest'):
requests_models_PreparedRequest_builder(builder, obj)
elif name.endswith('requests.models.Response'):
requests_models_PreparedRequest_builder(builder, obj.request)
requests_models_Response_builder(builder, obj)
else:
return None
builder.append('') # end with new line
return "\n".join(builder)
except Exception as e:
log.exception(e)
|
[
"kfranz@continuum.io"
] |
kfranz@continuum.io
|
298572d842e993f618c640c486277f700ba6ec7b
|
05b42178aaefd7efdb2fb19fdea8e58056d8d4bd
|
/leetcode/combination_sum/recursive_1.py
|
f41f141fd86c2c7bc3479dea3ee84f4c399a59cb
|
[] |
no_license
|
chrisjdavie/interview_practice
|
43ca3df25fb0538d685a59ac752a6a4b269c44e9
|
2d47d583ed9c838a802b4aa4cefe649c77f5dd7f
|
refs/heads/master
| 2023-08-16T18:22:46.492623
| 2023-08-16T16:04:01
| 2023-08-16T16:04:01
| 247,268,317
| 0
| 0
| null | 2020-03-14T17:35:12
| 2020-03-14T12:01:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,458
|
py
|
"""
https://leetcode.com/problems/combination-sum/
Given an array of distinct integers `candidates` and a target integer `target`, return a list of all unique combinations of `candidates` where the chosen numbers sum to `target`. You may return the combinations in any order.
The same number may be chosen from candidates an unlimited number of times. Two combinations are unique if the
frequency
of at least one of the chosen numbers is different.
The test cases are generated such that the number of unique combinations that sum up to `target` is less than `150` combinations for the given input.
---------------------------
While this worked the first time, it kinda went a bit pear-shaped with corner cases I hadn't really considered, so I'm
trying again
"""
import pytest
class Solution:
def combinationSum(self, candidates: list[int], target: int) -> list[list[int]]:
def _solve(i_start_cand: int, _target: int) -> list[list[int]]:
results = []
for i_cand, cand in enumerate(candidates[i_start_cand:]):
for i_mult in range(1, _target//cand+1):
mult = i_mult*cand
if mult == _target:
results.append((i_mult)*[cand])
else:
for res in _solve(i_start_cand+i_cand+1, _target - mult):
results.append((i_mult)*[cand] + res)
return results
return _solve(0, target)
@pytest.mark.parametrize(
"candidates,target,expected_combinations",
(
([2], 1, []),
([2], 2, [[2],]),
([3], 6, [[3, 3],]),
([2, 3], 6, [[2, 2, 2], [3, 3]]),
([5,], 6, []),
([2, 4], 8, [[2, 2, 2, 2], [2, 2, 4], [4, 4]]),
)
)
def test_unit(candidates, target, expected_combinations):
result = Solution().combinationSum(candidates, target)
for comb in expected_combinations:
assert comb in result
assert len(result) == len(expected_combinations)
@pytest.mark.parametrize(
"candidates,target,expected_combinations",
(
([2,3,6,7], 7, [[2,2,3],[7]]),
([2,3,5], 8, [[2,2,2,2],[2,3,3],[3,5]]),
([2], 1, []),
)
)
def test_leetcode(candidates, target, expected_combinations):
result = Solution().combinationSum(candidates, target)
for comb in expected_combinations:
assert comb in result
assert len(result) == len(expected_combinations)
|
[
"cjdavie@googlemail.com"
] |
cjdavie@googlemail.com
|
5685e8af0a83996ef0288ac44c99899d0a7c43ec
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2007/programming/libs/liblrdf/actions.py
|
bc89302e9235025b741e9b91d9bb59681be0b8c9
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005,2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "liblrdf-0.4.0"
def setup():
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "NEWS", "README")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
877f97e8de9331c59f06734cbd4df10c70b75efd
|
4b41a76c5c366ba2daa30843acea16609b8f5da7
|
/2017/01/AoC17_01_1.py
|
776cd60ef6af6c060d9c2488d256ca8fbd15d274
|
[] |
no_license
|
grandfoosier/AdventOfCode
|
c4706cfefef61e80060cca89b0433636e42bf974
|
a43fdd72fe4279196252f24a4894500a4e272a5d
|
refs/heads/master
| 2020-06-11T12:36:48.699811
| 2019-01-14T23:44:44
| 2019-01-14T23:44:44
| 75,665,958
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
fname = "AoC17_01_1.txt"
text = [line.rstrip('\n') for line in open(fname)][0]
print "\nInteger Stream Loaded\n"
n = len(text)
sol = sum(int(c) for i,c in enumerate(text)
if text[i] == text[(i+1)%n])
print sol
print "\n"
|
[
"noreply@github.com"
] |
grandfoosier.noreply@github.com
|
d51046eba4b9559778329e5ac3429bc3f38fdbf6
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/846.py
|
2e11d979e792075a6ed484de0808b1e4c66a2eee
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 898
|
py
|
from sys import argv
script, in_txt, out_txt = argv
def solver(in_txt, out_txt):
in_file = open(in_txt)
out_file = open(out_txt, 'w')
T = int(in_file.readline())
for t in range(T):
N = int(in_file.readline())
ls = []
for i in range(4):
x = map(int, in_file.readline().split())
ls.append(x)
f = set(ls[N-1])
N = int(in_file.readline())
ls = []
for i in range(4):
x = map(int, in_file.readline().split())
ls.append(x)
g = set(ls[N-1])
h = list(f & g)
if len(h) == 0:
line = "Case #%d: Volunteer cheated!" % (t+1)
if len(h) > 1:
line = "Case #%d: Bad magician!" % (t+1)
if len(h) == 1:
line = "Case #%d: %d" % (t+1,h[0])
out_file.write(line)
out_file.write('\n')
in_file.close()
out_file.close()
return
solver(in_txt, out_txt)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
013ed97a9e4b87e7c35ccbdd5a03e89df9b11bf3
|
469318e9a1ff8966199529affa1e4900a9f13ff0
|
/src/structured_data/_adt/sum_type.py
|
b356eb5a8c1981146ea9e7e04d39ee2dc238e3e9
|
[
"MIT"
] |
permissive
|
mwchase/python-structured-data
|
06e968a96d3953bd6f585aad66e3bea8921106e2
|
1059ddaf0728610576299f30485fad221cf5695c
|
refs/heads/master
| 2021-06-13T08:41:46.148869
| 2021-02-23T18:37:26
| 2021-02-23T18:37:26
| 136,821,798
| 2
| 0
|
MIT
| 2020-02-05T21:59:00
| 2018-06-10T15:40:41
|
Python
|
UTF-8
|
Python
| false
| false
| 4,464
|
py
|
"""Internal implementation of the Sum base class."""
from __future__ import annotations
import typing
from .. import _cant_modify
from . import constructor
from . import ordering
from . import prewritten_methods
from . import product_type
_T = typing.TypeVar("_T")
def _conditional_call(call: bool, func: typing.Callable, *args: typing.Any) -> None:
if call:
func(*args)
def _set_new_functions(cls: type, *functions: typing.Callable) -> typing.Optional[str]:
"""Attempt to set the attributes corresponding to the functions on cls.
If any attributes are already defined, fail *before* setting any, and
return the already-defined name.
"""
cant_set = product_type.cant_set_new_functions(cls, *functions)
if cant_set:
return cant_set
for function in functions:
setattr(
cls,
product_type.name_(cls, typing.cast(product_type.MethodLike, function)),
function,
)
return None
def _sum_new(_cls: typing.Type[_T], subclasses: typing.FrozenSet[type]) -> None:
def base(cls: typing.Type[_T], args: tuple) -> _T:
# By the way, this is for https://github.com/python/mypy/issues/7580
# When that's fixed, this can be made a one-liner again.
superclass = super(_cls, cls)
return superclass.__new__(cls, args) # type: ignore
new = vars(_cls).get("__new__", staticmethod(base))
def __new__(cls: typing.Type[_T], args: tuple) -> _T:
if cls not in subclasses:
raise TypeError
return new.__get__(None, cls)(cls, args)
_cls.__new__ = staticmethod(__new__) # type: ignore
class Sum(constructor.SumBase):
"""Base class of classes with disjoint constructors.
Examines PEP 526 __annotations__ to determine subclasses.
If repr is true, a __repr__() method is added to the class.
If order is true, rich comparison dunder methods are added.
The Sum class examines the class to find Ctor annotations.
A Ctor annotation is the adt.Ctor class itself, or the result of indexing
the class, either with a single type hint, or a tuple of type hints.
All other annotations are ignored.
The subclass is not subclassable, but has subclasses at each of the
names that had Ctor annotations. Each subclass takes a fixed number of
arguments, corresponding to the type hints given to its annotation, if any.
"""
__slots__ = ()
def __new__(cls, /, *args: typing.Any, **kwargs: typing.Any) -> Sum: # noqa: E225
if not issubclass(cls, constructor.ADTConstructor):
raise TypeError
return super().__new__(cls, *args, **kwargs)
# Both of these are for consistency with modules defined in the stdlib.
# BOOM!
def __init_subclass__(
cls: type,
*,
repr: bool = True, # pylint: disable=redefined-builtin
eq: bool = True, # pylint: disable=invalid-name
order: bool = False,
**kwargs: typing.Any,
) -> None:
super().__init_subclass__(**kwargs) # type: ignore
if issubclass(cls, constructor.ADTConstructor):
return
ordering.ordering_options_are_valid(eq=eq, order=order)
prewritten_methods.SUBCLASS_ORDER[cls] = constructor.make_constructors(cls)
source = prewritten_methods.PrewrittenSumMethods
cls.__init_subclass__ = source.__init_subclass__ # type: ignore
_sum_new(cls, frozenset(prewritten_methods.SUBCLASS_ORDER[cls]))
_conditional_call(repr, _set_new_functions, cls, source.__repr__)
equality_methods_were_set = eq and not _set_new_functions(
cls, source.__eq__, source.__ne__
)
if equality_methods_were_set:
cls.__hash__ = source.__hash__ # type: ignore
ordering.raise_for_collision(
(
order
and ordering.can_set_ordering(can_set=equality_methods_were_set)
and _set_new_functions(
cls, source.__lt__, source.__le__, source.__gt__, source.__ge__
)
),
cls.__name__,
)
def __bool__(self) -> bool:
return True
def __setattr__(self, name: str, value: typing.Any) -> None:
_cant_modify.guard(self, name)
super().__setattr__(name, value)
def __delattr__(self, name: str) -> None:
_cant_modify.guard(self, name)
super().__delattr__(name)
|
[
"max.chase@gmail.com"
] |
max.chase@gmail.com
|
7ed1a8bc8bf59dbe6985ba6d4568c3994d6222d7
|
b45b3e5e7389d071161fa52340cb119a29c76907
|
/ieWin_test.py
|
ea4039f4389737ebc027d2b25c466e520fb3c2e2
|
[] |
no_license
|
Metallicow/wxPythonDemos
|
2fc6882a11a0aa6bb35c42f163cfcd6b3456f4fd
|
396d1ade5930528ec7518b9c22dc93a274cb418f
|
refs/heads/master
| 2020-12-25T11:52:18.577898
| 2013-05-19T18:58:11
| 2013-05-19T18:58:11
| 11,283,970
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
#!/usr/bin/env
import wx
if wx.Platform == '__WXMSW__':
import wx.lib.iewin as iewin
else:
raise ImporrError("This test only works on windows")
class DemoFrame(wx.Frame):
""" This window displays a button """
def __init__(self, title = "Micro App"):
wx.Frame.__init__(self, None , -1, title)
btn = wx.Button(self, label = "Get HTML")
btn.Bind(wx.EVT_BUTTON, self.GetHTML )
self.Bind(wx.EVT_CLOSE, self.GetHTML)
self.htwin = iewin.IEHtmlWindow(self)
self.htwin.Navigate('http://cameochemicals.noaa.gov/')
S = wx.BoxSizer(wx.VERTICAL)
S.Add(btn, 0, wx.ALL, 5)
S.Add(self.htwin, 1, wx.EXPAND)
self.SetSizer(S)
self.SetSize((700,500))
self.Bind(wx.EVT_CLOSE, self.OnQuit)
def OnQuit(self,Event):
self.Destroy()
def GetHTML(self, event=None):
print "contents of HTML window as text: ", self.htwin.GetText(asHTML=False)[:500]
print "contents of HTML window as html: ", self.htwin.GetText(asHTML=True)
app = wx.App(False)
frame = DemoFrame()
frame.Show()
app.MainLoop()
|
[
"Chris.Barker@noaa.gov"
] |
Chris.Barker@noaa.gov
|
cba1ccfc085d6e62bbd832f0e0ea224fd7d50d46
|
f5dae529fa0a42678cbf0261e227e45101317034
|
/test/backward_compatibility/check_backward_compatibility.py
|
0f45a263b874e529e5b5f54e3d37924c58914a83
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
kamiedu/pytorch
|
0fa1d28f4332bf1fd2bb93169254f2bcc2c4d039
|
54a1e8509c9e88200139a37a7dd3a86660849591
|
refs/heads/master
| 2022-05-22T08:55:36.432225
| 2020-04-17T20:33:42
| 2020-04-17T20:36:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,535
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import datetime
import re
import sys
import torch
from torch._C import parse_schema
# The date specifies how long the whitelist exclusion should apply to.
#
# - If we NEVER give BC guarantee for an operator, you can put the
# date arbitrarily far in the future.
# - Otherwise, pick a date that is far enough in the future that you
# believe you can land your diff before then.
#
# Whitelist entries can be removed after the date listed on them passes.
white_list = [
('c10_experimental', datetime.date(2222, 1, 1)),
# We export some functions and classes for test_jit.py directly from libtorch.so,
# it's not important to have BC for them
('_TorchScriptTesting.*', datetime.date(9999, 1, 1)),
('aten::append*', datetime.date(2020, 4, 15)),
('aten::real*', datetime.date(2020, 4, 15)),
('aten::imag*', datetime.date(2020, 4, 15)),
('aten::quantize_per_tensor', datetime.date(2020, 4, 15)),
('aten::index_put', datetime.date(2020, 4, 10)),
('aten::index', datetime.date(2020, 4, 10)),
('aten::_index_put_impl', datetime.date(2020, 4, 10)),
('aten::index_put_', datetime.date(2020, 4, 10)),
('aten::quantize_per_tensor', datetime.date(2020, 4, 15)),
('aten::requires_grad_', datetime.date(2020, 4, 30)),
('quantized::batch_norm', datetime.date(2020, 4, 20)),
('aten::sizes', datetime.date(2020, 4, 30)),
('aten::strides', datetime.date(2020, 4, 30)),
('aten::backward', datetime.date(2020, 4, 30)),
]
# The nightly will fail to parse newly added syntax to schema declarations
# Add new schemas that will fail the nightly here
dont_parse_list = [
]
def white_listed(schema, white_list):
for item in white_list:
if item[1] < datetime.date.today():
continue
regexp = re.compile(item[0])
if regexp.search(schema.name):
return True
return False
def dont_parse(schema_line):
for item in dont_parse_list:
if item[1] < datetime.date.today():
continue
regexp = re.compile(item[0])
if regexp.search(schema_line):
return True
return False
def check_bc(new_schema_dict):
existing_schemas = torch._C._jit_get_all_schemas()
is_bc = True
broken_ops = []
for existing_schema in existing_schemas:
if white_listed(existing_schema, white_list):
print("Black list, skipping schema: ", str(existing_schema))
continue
print("processing existing schema: ", str(existing_schema))
new_schemas = new_schema_dict.get(existing_schema.name, [])
found = False
for new_schema in new_schemas:
if new_schema.is_backward_compatible_with(existing_schema):
found = True
break
if not found:
print('Can NOT find backward compatible schemas after changes '
'for schema {} from the following candidates:\n[\n{}\n]'
.format(
str(existing_schema),
"\n\t".join(str(s) for s in new_schemas)))
# TODO Print out more details about why candidates don't match.
broken_ops.append(str(existing_schema))
is_bc = False
if is_bc:
print('Found backward compatible schemas for all existing schemas')
else:
print('The PR is introducing backward incompatible changes to the '
'operator library. Please contact PyTorch team to confirm '
'whether this change is wanted or not. \n\nBroken ops: '
'[\n\t{}\n]'.format("\n\t".join(broken_ops)))
return is_bc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument(
'--new-schemas',
help='filename to load new schemas',
type=str,
default='schemas.txt')
args = parser.parse_args()
new_schema_dict = dict()
with open(args.new_schemas, 'r') as f:
while True:
line = f.readline()
if not line:
break
if dont_parse(line.strip()):
print("Not parsing schema line: ", line.strip())
continue
s = parse_schema(line.strip())
slist = new_schema_dict.get(s.name, [])
slist.append(s)
new_schema_dict[s.name] = slist
if not check_bc(new_schema_dict):
sys.exit(1)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
6e40e95fe1174dabcfdcd931cd6b707d1b097850
|
caef61baf7fc3f933d2fca8dceb7598be22adde1
|
/openapi_core/templating/datatypes.py
|
7087d9e345fd420f3213ea6bfee0e565185b661b
|
[
"BSD-3-Clause"
] |
permissive
|
dlarrick/openapi-core
|
679835b749d49f15da61f6f14df060e08010fee6
|
0865a4f54f38bdbe4a0de11addfa425c302aedfd
|
refs/heads/master
| 2021-05-24T14:22:24.708899
| 2021-03-31T15:25:18
| 2021-03-31T15:25:18
| 253,604,031
| 0
| 0
|
BSD-3-Clause
| 2020-04-06T20:07:12
| 2020-04-06T20:07:11
| null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
import attr
@attr.s
class TemplateResult(object):
pattern = attr.ib(default=None)
variables = attr.ib(default=None)
@property
def resolved(self):
if not self.variables:
return self.pattern
return self.pattern.format(**self.variables)
|
[
"maciag.artur@gmail.com"
] |
maciag.artur@gmail.com
|
59c5558142ea9d114fe38247f09d705fa9cdba2d
|
c4a33b613ffc77dccf96d33c3a5cc127405c0e95
|
/life/views.py
|
4a22d705c1485f87c2059c53d66383416843042d
|
[] |
no_license
|
tsokac2/new-irish-life
|
25f49bd0b74dfa7c0a449772249f6cb51925b643
|
d09934b60a1fd4fbd4540d412dc5dab726f5b502
|
refs/heads/main
| 2023-07-02T09:54:55.082587
| 2021-07-30T04:42:57
| 2021-07-30T04:42:57
| 379,245,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
from django.shortcuts import render
from .models import Life
def life(request):
""" A view to return the Life template """
life_section = Life.objects.all()
template = 'life/life.html'
context = {
'life_section': life_section
}
return render(request, template, context)
|
[
"tsokac2@gmail.com"
] |
tsokac2@gmail.com
|
13f587518e71891fcfa8d3c3adc7a7c6bae59559
|
19236d9e966cf5bafbe5479d613a175211e1dd37
|
/cohesity_management_sdk/models/principal.py
|
bb52d45d0c611c758c973745b3a5977b46a127fd
|
[
"MIT"
] |
permissive
|
hemanshu-cohesity/management-sdk-python
|
236c44fbd9604809027f8ddd0ae6c36e4e727615
|
07c5adee58810979780679065250d82b4b2cdaab
|
refs/heads/master
| 2020-04-29T23:22:08.909550
| 2019-04-10T02:42:16
| 2019-04-10T02:42:16
| 176,474,523
| 0
| 0
|
NOASSERTION
| 2019-03-19T09:27:14
| 2019-03-19T09:27:12
| null |
UTF-8
|
Python
| false
| false
| 2,583
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class Principal(object):
"""Implementation of the 'Principal.' model.
Specifies information about a single Principal.
Attributes:
domain (string): Specifies the domain name of the where the principal'
account is maintained.
full_name (string): Specifies the full name (first and last names) of
the principal.
object_class (ObjectClassEnum): Specifies the object class of the
principal (either 'kGroup' or 'kUser'). 'kUser' specifies a user
object class. 'kGroup' specifies a group object class. 'kComputer'
specifies a computer object class.
principal_name (string): Specifies the name of the principal.
sid (string): Specifies the unique Security id (SID) of the
principal.
"""
# Create a mapping from Model property names to API property names
_names = {
"domain":'domain',
"full_name":'fullName',
"object_class":'objectClass',
"principal_name":'principalName',
"sid":'sid'
}
def __init__(self,
domain=None,
full_name=None,
object_class=None,
principal_name=None,
sid=None):
"""Constructor for the Principal class"""
# Initialize members of the class
self.domain = domain
self.full_name = full_name
self.object_class = object_class
self.principal_name = principal_name
self.sid = sid
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
domain = dictionary.get('domain')
full_name = dictionary.get('fullName')
object_class = dictionary.get('objectClass')
principal_name = dictionary.get('principalName')
sid = dictionary.get('sid')
# Return an object of this model
return cls(domain,
full_name,
object_class,
principal_name,
sid)
|
[
"ashish@cohesity.com"
] |
ashish@cohesity.com
|
4d781c9a4ed8adc2dc2d0cdeded54192e62e110c
|
003ffcf8144565404636f3d74590a8d6b10a90a4
|
/492-construct-the-rectangle/492-construct-the-rectangle.py
|
563533a10a837712700af804ed787500a2abbf67
|
[] |
no_license
|
congve1/leetcode
|
fb31edf93049e21210d73f7b3e7b9b82057e1d7a
|
ce1e802b5052da2cdb919d6d7e39eed860e0b61b
|
refs/heads/master
| 2020-05-13T19:19:58.835432
| 2019-05-06T00:44:07
| 2019-05-06T00:44:07
| 181,652,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
|
[
"congve1@live.com"
] |
congve1@live.com
|
0312c4b5ff3caa1244cdf11977dc0ede1ef32a0c
|
4554fcb85e4c8c33a5b5e68ab9f16c580afcab41
|
/projecteuler/test_xiaobai_41.py
|
b6b7e14cd121456b1dd69da79870b0bbfcde910c
|
[] |
no_license
|
xshen1122/Follow_Huang_Python
|
12f4cebd8ddbc241a1c32cfa16288f059b530557
|
fcea6d1361aa768fb286e1ef4a22d5c4d0026667
|
refs/heads/master
| 2021-01-01T04:37:31.081142
| 2017-12-05T07:31:34
| 2017-12-05T07:31:34
| 97,211,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 791
|
py
|
# test_xiaobai_41.py
# coding: utf-8
'''
We shall say that an n-digit number is pandigital if it makes use of
all the digits 1 to n exactly once. For example, 2143 is a 4-digit
pandigital and is also prime.
What is the largest n-digit pandigital prime that exists?
'''
def checkPrime(number):
for i in range(2,number):
if number%i == 0:
return False
return True
def checkNumber(number):
n_list = []
for item in str(number):
n_list.append(item)
if len(n_list) == len(set(n_list)):
return True
else:
return False
if __name__ == '__main__':
number = 987654321
for number in range(987654321,1000,-1):
print number
if not checkNumber(number):
pass
else:
if checkPrime(number):
print 'the largest pandigital prime is ', number
break
else:
pass
|
[
"xueqin.shen@outlook.com"
] |
xueqin.shen@outlook.com
|
38bbe8aae94fbcd4dffe66ee910ac8d600b52462
|
ae6f8eec0f08045624c6042b723f99695c5e446c
|
/backend/course/admin.py
|
d2dcfb95302bae425f5dc9bbdb332bbc87f07efe
|
[] |
no_license
|
crowdbotics-apps/practual-life-21189
|
a15aa6f2fe6cd5dc8feb6f6a214aed258509b4f7
|
d33e5d7e5d5d9eba0c549a90cecdd96e90d5f8ae
|
refs/heads/master
| 2022-12-29T12:45:45.469879
| 2020-10-06T20:41:31
| 2020-10-06T20:41:31
| 301,849,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
from django.contrib import admin
from .models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
SubscriptionType,
Enrollment,
Category,
)
admin.site.register(Category)
admin.site.register(Module)
admin.site.register(Event)
admin.site.register(Subscription)
admin.site.register(SubscriptionType)
admin.site.register(Recording)
admin.site.register(Enrollment)
admin.site.register(Course)
admin.site.register(Group)
# Register your models here.
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
882dc53ad67fb969d878846c3bdb805805d7b2b1
|
353b36f7907569945d9f956730b31463df8fa1f4
|
/bpl_lib/transactions/Vote.py
|
82a48eea0e3afe81b8c61396acbb0a027d422154
|
[
"MIT"
] |
permissive
|
DuneRoot/bpl-lib
|
54b6a4387ecd404f1fcfa9d46b7ce68f136d90ac
|
3ac1026cfc01ca5a71515caa5e352e4517cba0cc
|
refs/heads/master
| 2020-03-23T19:56:49.291707
| 2019-02-20T19:08:55
| 2019-02-20T19:08:55
| 142,011,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
from bpl_lib.transactions.Transaction import Transaction
from bpl_lib.helpers.Constants import TRANSACTION_TYPE
from bpl_lib.address.Address import Address
from bpl_lib.crypto.Keys import Keys
class Vote(Transaction):
def __init__(self, fee, _error_use_class_method=True):
"""
Creates a vote transaction
:param fee: fee for transaction
:param _error_use_class_method: boolean flag, used to indicate if the transaction
was created from generate or from_dict
"""
if _error_use_class_method:
raise TypeError("Please use Vote.generate(args) or Vote.from_dict(args) to construct me.")
super().__init__(TRANSACTION_TYPE.VOTE, fee)
@classmethod
def generate(cls, votes, secret, second_secret=None, fee=None):
"""
Creates a vote transaction
:param votes: votes (list)
:param secret: secret passphrase (string or bytes)
:param second_secret: second secret passphrase (string or bytes)
:param fee: fee for transaction
:return: (Vote)
"""
self = cls(fee, _error_use_class_method=False)
self._sender_public_key = Keys(secret).get_public_key()
self._recipient_id = Address.from_secret(secret)
self._asset["votes"] = votes
self.sign(secret, second_secret)
return self
@classmethod
def from_dict(cls, transaction):
"""
Creates a vote transaction
:param transaction: transaction (dict)
:return: (Vote)
"""
self = cls(transaction["fee"], _error_use_class_method=False)
self._sender_public_key = transaction["senderPublicKey"]
self._recipient_id = transaction["recipientId"]
self._timestamp = transaction["timestamp"]
self._asset["votes"] = transaction["asset"]["votes"]
self.sign_from_dict(transaction)
return self
def _handle_transaction_type(self, buffer):
buffer.write_bytes("".join(self._asset["votes"]).encode())
return buffer
|
[
"johnyob132@gmail.com"
] |
johnyob132@gmail.com
|
1c03923938a1f6d9898a0b07bb9d16d14b83fab1
|
73e3990fdb1e38a053a047d204e26acb43d403e6
|
/hooks/post_gen_project.py
|
49fda6a120e8657cb108e878edb283020eea6efc
|
[
"MIT"
] |
permissive
|
dunderlabs/dunder_cookiecutter_template
|
ad972870570463ff3aa68d887d7bf92b25ef9d11
|
bda261b9f0e5c171470b9eaa80c416ba1a8e656d
|
refs/heads/master
| 2021-01-10T12:21:11.022232
| 2016-04-06T19:48:15
| 2016-04-06T19:48:15
| 52,767,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,151
|
py
|
import os
import hashlib
import random
import time
PROJECT_DIR = os.path.realpath(os.path.curdir)
# Use the system PRNG if possible
# https://github.com/django/django/blob/stable/1.9.x/django/utils/crypto.py#L18-L26
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
https://github.com/django/django/blob/stable/1.9.x/django/utils/crypto.py#L54-L77
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s%s" % (
random.getstate(),
time.time(),
settings.SECRET_KEY)).encode('utf-8')
).digest())
return ''.join(random.choice(allowed_chars) for i in range(length))
def generate_secret_key(project_directory):
env_path = os.path.join(project_directory, '.env.example')
with open(env_path) as f:
env_file = f.read()
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
env_file = env_file.replace('KEY_PLACE', get_random_string(50, chars))
env_file = env_file.replace('DEBUG_VALUE', 'True')
with open(env_path, 'w') as f:
f.write(env_file)
generate_secret_key(PROJECT_DIR)
|
[
"pmazulo@gmail.com"
] |
pmazulo@gmail.com
|
5d280846dece158fbb8c53206b775dc82c7999b2
|
523fb785bda41e33546c929a5c2de6c93f98b434
|
/LeetCode/208.implement-trie-prefix-tree.py
|
24c8114b532aab37979451a9d92a433188a77e05
|
[] |
no_license
|
lizhe960118/TowardOffer
|
afd2029f8f9a1e782fe56ca0ff1fa8fb37892d0e
|
a0608d34c6ed96c9071cc3b9bdf70c95cef8fcbd
|
refs/heads/master
| 2020-04-27T10:33:21.452707
| 2019-05-02T10:47:01
| 2019-05-02T10:47:01
| 174,259,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,385
|
py
|
#
# @lc app=leetcode id=208 lang=python3
#
# [208] Implement Trie (Prefix Tree)
#
# https://leetcode.com/problems/implement-trie-prefix-tree/description/
#
# algorithms
# Medium (37.24%)
# Total Accepted: 167K
# Total Submissions: 448.5K
# Testcase Example: '["Trie","insert","search","search","startsWith","insert","search"]\n[[],["apple"],["apple"],["app"],["app"],["app"],["app"]]'
#
# Implement a trie with insert, search, and startsWith methods.
#
# Example:
#
#
# Trie trie = new Trie();
#
# trie.insert("apple");
# trie.search("apple"); // returns true
# trie.search("app"); // returns false
# trie.startsWith("app"); // returns true
# trie.insert("app");
# trie.search("app"); // returns true
#
#
# Note:
#
#
# You may assume that all inputs are consist of lowercase letters a-z.
# All inputs are guaranteed to be non-empty strings.
#
#
#
class TrieNode(object):
def __init__(self):
self.child = {}
self.is_end = False
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
cur_node = self.root
for char_s in word:
if char_s not in cur_node.child:
next_node = TrieNode()
cur_node.child[char_s] = next_node
cur_node = cur_node.child[char_s]
cur_node.is_end = True
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
cur_node = self.root
for char_s in word:
if char_s not in cur_node.child:
return False
else:
cur_node = cur_node.child[char_s]
return cur_node.is_end
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
cur_node = self.root
for char_s in prefix:
if char_s not in cur_node.child:
return False
else:
cur_node = cur_node.child[char_s]
return True
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix)
|
[
"2957308424@qq.com"
] |
2957308424@qq.com
|
8dad47d2b5726e1d51d8266f3a07f170aa7f9599
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_enmeshes.py
|
8ab83b49bb6e84503d7c1bb5705287d69908efb8
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from xai.brain.wordbase.verbs._enmesh import _ENMESH
#calss header
class _ENMESHES(_ENMESH, ):
def __init__(self,):
_ENMESH.__init__(self)
self.name = "ENMESHES"
self.specie = 'verbs'
self.basic = "enmesh"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
355814c9f4e4dbeb520f70c969d171be39f800ac
|
37eda7bc5ea24e25a11d68c352b6c8e5a1ca2ae4
|
/barkscape/server/base_runner.py
|
4427eeda115344df19efe5ab9bef9147e3cddac5
|
[] |
no_license
|
bark-simulator/barkscape
|
d1c3668cc3cd9773380b4b2ed365f96ac01548c3
|
dc5265ef8f970488646e6ae91cd47563a0ef11f1
|
refs/heads/master
| 2023-05-07T21:05:11.338776
| 2021-05-31T13:18:55
| 2021-05-31T13:18:55
| 352,927,882
| 3
| 3
| null | 2021-03-31T19:37:49
| 2021-03-30T08:33:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
# Copyright (c) 2021 fortiss GmbH
#
# Authors: Julian Bernhard, Klemens Esterle, Patrick Hart and
# Tobias Kessler
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import sys, os, logging
import asyncio, json
import xviz_avs
from xviz_avs.server import XVIZBaseSession
# BARKSCAPE
from barkscape.server.bark_xviz_stream import BarkXvizStream
"""BaseRunner
Steps the runnable_object and the XVIZstream.
"""
class BaseRunner(XVIZBaseSession):
def __init__(
self, socket, request, runnable_object=None,
dt=0.2, logger=None, stream=None):
super().__init__(socket, request)
self._runnable_object = runnable_object
self._bark_xviz_stream = stream or BarkXvizStream()
self._socket = socket
self._dt = dt
self._logger = logger
def on_connect(self):
print("Web-client connected.")
def on_disconnect(self):
print("Web-client disconnect.")
"""Main functionality for stepping and sending visualization messages
"""
async def main(self):
t = 0
metadata = self._bark_xviz_stream.get_metadata()
await self._socket.send(json.dumps(metadata))
message = await self._bark_xviz_stream.get_message(t, self._runnable_object)
await self._socket.send(json.dumps(message))
await asyncio.sleep(self._dt)
|
[
"patrickhart.1990@gmail.com"
] |
patrickhart.1990@gmail.com
|
ed5f51e53bd578380ba9d9e7121d1bb5587ed8b7
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/504.py
|
866ce9e5a97deb28377cac17d45bd877278d543f
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
__________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def convertToBase7(self, num: int) -> str:
if num < 0:
return '-' + self.convertToBase7(-num)
elif num >= 7:
return self.convertToBase7(num//7)+str(num%7)
elif num < 7:
return str(num)
__________________________________________________________________________________________________
sample 32 ms submission
class Solution:
def convertToBase7(self, num: int) -> str:
if num < 0:
return "-" + self.convertToBase7(-num);
elif num == 0:
return "0"
res = [];
while num > 0:
res.append(str(num % 7))
num //= 7
return ''.join(res[::-1])
__________________________________________________________________________________________________
|
[
"strengthen@users.noreply.github.com"
] |
strengthen@users.noreply.github.com
|
97d89af9dfe4ef5088a883b84ba3d9d590cc0f80
|
80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019
|
/data/CodeChef/OMWG.py
|
9be4d99b7ea695d8b09d5f1beca4eb627affcc1c
|
[] |
no_license
|
Ritvik19/CodeBook
|
ef7764d89b790e902ede5802f36d5ca910d8a50e
|
2b4ed7938bbf156553d6ba5cba6216449528f0fc
|
refs/heads/master
| 2021-07-04T08:25:52.478719
| 2020-08-08T06:54:14
| 2020-08-08T06:54:14
| 138,744,302
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
for i in range(int(input())):
n, m = input().split()
n, m = int(n), int(m)
score = (n-1) + (m-1) + ((n-1)*(m-1)*2)
print(score)
|
[
"rastogiritvik99@gmail.com"
] |
rastogiritvik99@gmail.com
|
0e237708463f88a8264fb311025fee4c4efe4a2a
|
e71c8efe431fb5d8bf5076a054aeaeeccf3f6404
|
/django_site/torah/templatetags/torah_filters.py
|
252b9b0470086bd807c7f0db15ae01dbcaba3da9
|
[] |
no_license
|
suhailvs/torah
|
2f587744da4f01719b5cc47b78a3da11cab032f2
|
0bc59099b0635c31b296a74b5bd9fbfb8798553a
|
refs/heads/master
| 2021-11-10T23:12:54.719502
| 2021-11-06T12:26:48
| 2021-11-06T12:26:48
| 156,322,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,361
|
py
|
from django import template
register = template.Library()
PATTERN = 'abgdefzhjiklmnxopsqrct'
fiej = lambda p: p.replace('f','v').replace('i','y').replace('e','H').replace('j','T')
@register.filter(name='get_letternumber')
def get_letternumber(letter):
"""
Return Number curresponding to PaleoHebrew letter
"""
return PATTERN.index(letter)+1
@register.filter(name='get_words')
def get_words(line):
"""
Return list of words of given line
"""
return line.split(' ')
@register.filter(name='get_hebrewletter')
def get_hebrewletter(paleoletter):
"""
Return Hebrew Letter curresponding to PaleoHebrew letter
input: a
output: \u05d0
"""
HEBREW_UNICODE = ['\u05d0','\u05d1','\u05d2','\u05d3','\u05d4','\u05d5','\u05d6','\u05d7','\u05d8','\u05d9','\u05db','\u05dc','\u05de','\u05e0','\u05e1','\u05e2','\u05e4','\u05e6','\u05e7','\u05e8','\u05e9','\u05ea']
return HEBREW_UNICODE[PATTERN.index(paleoletter)]
@register.filter(name='replace_fie')
def replace_fie(paleoword):
"""
Replace f -> v, i -> y, e -> H
"""
return fiej(paleoword)
from torah.models import Word
@register.filter(name='get_englishword')
def get_englishword(paleoword):
"""
Return English meaning curresponding to PaleoHebrew Word
"""
w = Word.objects.get(name = paleoword[::-1])
return w.translation
|
[
"suhailvs@gmail.com"
] |
suhailvs@gmail.com
|
92a55865ce6f0721b8e14204bf9d1663b2200a98
|
9405aa570ede31a9b11ce07c0da69a2c73ab0570
|
/aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/CreateDemandRequest.py
|
6526229cdbc8a205e15c3771f39ddb7a45a15023
|
[
"Apache-2.0"
] |
permissive
|
liumihust/aliyun-openapi-python-sdk
|
7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
refs/heads/master
| 2020-09-25T12:10:14.245354
| 2019-12-04T14:43:27
| 2019-12-04T14:43:27
| 226,002,339
| 1
| 0
|
NOASSERTION
| 2019-12-05T02:50:35
| 2019-12-05T02:50:34
| null |
UTF-8
|
Python
| false
| false
| 3,911
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateDemandRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateDemand','ecs')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_DemandDescription(self):
return self.get_query_params().get('DemandDescription')
def set_DemandDescription(self,DemandDescription):
self.add_query_param('DemandDescription',DemandDescription)
def get_InstanceType(self):
return self.get_query_params().get('InstanceType')
def set_InstanceType(self,InstanceType):
self.add_query_param('InstanceType',InstanceType)
def get_InstanceChargeType(self):
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self,InstanceChargeType):
self.add_query_param('InstanceChargeType',InstanceChargeType)
def get_DemandName(self):
return self.get_query_params().get('DemandName')
def set_DemandName(self,DemandName):
self.add_query_param('DemandName',DemandName)
def get_Amount(self):
return self.get_query_params().get('Amount')
def set_Amount(self,Amount):
self.add_query_param('Amount',Amount)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_PeriodUnit(self):
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self,PeriodUnit):
self.add_query_param('PeriodUnit',PeriodUnit)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
44b9f6666d722193eb9a18f604bc1c34b6dab2cd
|
c317f6a390de255540c2fb6a2e637c20bec03762
|
/final/pwn-exzilla/container/server.py
|
476db68bfe9316a904eccb9facdbd2bd25009136
|
[] |
no_license
|
Kodsport/sakerhetssm-2021-solutions
|
a7329ef22862bcfc4c970d43ac210bbe951cf3a8
|
85bc2aa619d55139acf7c91483259088329c15e2
|
refs/heads/master
| 2023-05-12T00:54:24.546337
| 2021-06-07T14:12:32
| 2021-06-07T14:12:32
| 353,975,490
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,576
|
py
|
#!/bin/env python3
import string, json, base64, sys
sys.setrecursionlimit(30)
def f():
pass
CODE_TYPE = type(f.__code__)
FUNC_TYPE = type(f)
class NumberCell:
def __init__(self, n):
self.n = n
@staticmethod
def new():
while True:
try:
n = int(input("Cell value:\n> "))
return NumberCell(n)
except:
print("not a number")
def eval(self, _):
return str(self.n)
def view(self):
return str(self.n)
class FormulaCell:
def __init__(self, formula):
self.formula = formula
@staticmethod
def new():
print("Create a Formula Cell. Not for the faint of heart.")
while True:
try:
x = input("Formula import string:\n> ")
x = base64.b64decode(x)
x = json.loads(x)
assert len(x) == 14
x = CODE_TYPE(x[0], x[1], x[2], x[3], x[4], x[5], base64.b64decode(x[6]), tuple(x[7]), tuple(x[8]), tuple(x[9]), x[10], x[11], x[12], base64.b64decode(x[13]))
x = FUNC_TYPE(x, globals())
return FormulaCell(x)
except:
print("Bad import string")
def eval(self, sheet):
return str(self.formula(sheet))
def view(self):
c = self.formula.__code__
x = [c.co_argcount, c.co_posonlyargcount, c.co_kwonlyargcount, c.co_nlocals, c.co_stacksize, c.co_flags, base64.b64encode(c.co_code).decode(), c.co_consts, c.co_names, c.co_varnames, c.co_filename, c.co_name, c.co_firstlineno, base64.b64encode(c.co_lnotab).decode()]
x = base64.b64encode(json.dumps(x).encode())
return x.decode()
class Sheet:
def __init__(self, w, h):
self.grid = []
for _ in range(h):
row = []
for _ in range(w):
row.append(NumberCell(1))
self.grid.append(row)
def display(self):
col_widths = [1]*len(self.grid[0])
for row in self.grid:
for col in range(len(row)):
col_widths[col] = max(col_widths[col], len(row[col].eval(self)))
separator = "+----+" + "+".join(["-"*(n+2) for n in col_widths]) + "+"
print(separator)
self.display_row(" ", [chr(ord("A")+i) for i in range(len(col_widths))], col_widths)
print(separator)
for (i, row) in enumerate(self.grid):
self.display_row(str(i), [cell.eval(self) for cell in row], col_widths)
print(separator)
def display_row(self, first, values, col_widths):
print("| "+"%2s"%first+" | " + " | ".join([("%"+str(n)+"s")%val for (val, n) in zip(values,col_widths)]) + " |")
def edit(self, r, c):
while True:
try:
choice = int(input("1. Create number cell\n2. Create formula cell\n> "))
except:
print("Bad option!")
continue
if choice == 1:
self.grid[r][c] = NumberCell.new()
return
elif choice == 2:
self.grid[r][c] = FormulaCell.new()
return
else:
print("Bad option")
def view(self, r, c):
print("Cell:", self.grid[r][c].view())
the_sheets = {}
def new_sheet():
name = input("Name? ")
the_sheets[name] = Sheet(10, 10)
open_sheet(name)
def list_sheets():
print("The Sheets:")
for k in the_sheets.keys():
print(k)
def open_sheet(name=""):
if len(the_sheets) == 0:
print("There are no sheets yet! Create one first!")
return
if name == "":
list_sheets()
name = input("Name? ")
while name not in the_sheets:
list_sheets()
print("Sheet doesn't exist")
name = input("Name? ")
sheet = the_sheets[name]
while True:
print("------", name, "--------------------------")
sheet.display()
print("t <pos> - edit, w <pos> - view, e - close")
choice = input("> ").split(" ")
if choice[0] == "t":
try:
r, c = pos2rowcol(choice[1])
sheet.edit(r, c)
except:
print("Bad row or col!")
elif choice[0] == "w":
try:
r, c = pos2rowcol(choice[1])
sheet.view(r, c)
except:
print("Bad row or col!")
elif choice[0] == "e":
return
else:
print("Bad choice!")
def pos2rowcol(pos):
assert len(pos) >= 2
assert pos[0] in string.ascii_uppercase
assert all([c in string.digits for c in pos[1:]])
return int(pos[1:]), ord(pos[0])-ord("A")
def menu():
print("Menu")
print("1. Open sheet")
print("2. New sheet")
print("3. Exit")
def banner():
print(""" ______ __ __ ______ __ __ __ ______
/\ ___\ /\_\_\_\ /\___ \ /\ \ /\ \ /\ \ /\ __ \
\ \ __\ \/_/\_\/_ \/_/ /__ \ \ \ \ \ \____ \ \ \____ \ \ __ \
\ \_____\ /\_\/\_\ /\_____\ \ \_\ \ \_____\ \ \_____\ \ \_\ \_\
\/_____/ \/_/\/_/ \/_____/ \/_/ \/_____/ \/_____/ \/_/\/_/
Excel killa""")
def main():
banner()
while True:
menu()
try:
choice = int(input("> "))
except:
print("Bad choice!")
continue
if choice == 1:
open_sheet()
elif choice == 2:
new_sheet()
elif choice == 3:
print("Bye!")
break
else:
print("Bad choice!")
if __name__ == "__main__":
import random
random.seed(0)
example = Sheet(2, 12)
for i in range(10):
example.grid[i][0] = NumberCell(random.randint(1, 9)*100)
example.grid[i][1] = NumberCell(random.randint(1, 9)*100)
def sumcol0(sheet):
res = 0
for i in range(10):
res += int(sheet.grid[i][0].eval(sheet))
return str(res)
def sumcol1(sheet):
res = 0
for i in range(10):
res += int(sheet.grid[i][1].eval(sheet))
return str(res)
def total(sheet):
return str(int(sheet.grid[10][0].eval(sheet))+int(sheet.grid[10][1].eval(sheet)))
example.grid[10][0] = FormulaCell(sumcol0)
example.grid[10][1] = FormulaCell(sumcol1)
example.grid[11][0] = NumberCell(0)
example.grid[11][1] = FormulaCell(total)
the_sheets["budget"] = example
main()
|
[
"drwal.mateusz@gmail.com"
] |
drwal.mateusz@gmail.com
|
999925a84d6f0ad85d37484c414d300427a63c09
|
54857571461a579bed30cee27871aaa5fe396bcc
|
/nltk-0.9.7/src/nltk/wordnet/__init__.py
|
68c62dcb3b99882cb006753e2b13e209d780a975
|
[] |
no_license
|
ahmedBazaz/affective-text-classification
|
78375182e800b39e0e309e8b469e273c0d9590f0
|
719e9b26e60863c620662564fb9cfeafc004777f
|
refs/heads/master
| 2021-01-10T14:50:01.100274
| 2009-01-09T03:59:01
| 2009-01-09T03:59:01
| 48,296,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,177
|
py
|
# Natural Language Toolkit: WordNet Interface
#
# Copyright (C) 2001-2008 NLTK Project
# Author: Oliver Steele <steele@osteele.com>
# Steven Bird <sb@csse.unimelb.edu.au>
# David Ormiston Smith <daosmith@csse.unimelb.edu.au>>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
WordNet interface, based on Oliver Steele's Pywordnet, together
with an implementation of Ted Pedersen's Wordnet::Similarity package.
Usage
=====
>>> from nltk.wordnet import *
Retrieve words from the database
>>> N['dog']
dog (noun)
>>> V['dog']
dog (verb)
>>> ADJ['clear']
clear (adj)
>>> ADV['clearly']
clearly (adv)
Examine a word's senses and pointers:
>>> N['dog'].synsets()
[{noun: dog, domestic_dog, Canis_familiaris}, {noun: frump, dog}, {noun: dog}, {noun: cad, bounder, blackguard, dog, hound, heel}, {noun: frank, frankfurter, hotdog, hot_dog, dog, wiener, wienerwurst, weenie}, {noun: pawl, detent, click, dog}, {noun: andiron, firedog, dog, dog-iron}]
('dog' in {noun: dog, domestic dog, Canis familiaris}, 'dog' in {noun: frump, dog}, 'dog' in {noun: dog}, 'dog' in {noun: cad, bounder, blackguard, dog, hound, heel}, 'dog' in {noun: frank, frankfurter, hotdog, hot dog, dog, wiener, wienerwurst, weenie}, 'dog' in {noun: pawl, detent, click, dog}, 'dog' in {noun: andiron, firedog, dog, dog-iron})
Extract the first sense:
>>> N['dog'][0]
{noun: dog, domestic_dog, Canis_familiaris}
Get the first five pointers (relationships) from dog to other synsets:
>>> N['dog'][0].relations()
{'hypernym': [('noun', 2083346, 0), ('noun', 1317541, 0)],
'part holonym': [('noun', 2158846, 0)],
'member meronym': [('noun', 2083863, 0), ('noun', 7994941, 0)],
'hyponym': [('noun', 1322604, 0), ('noun', 2084732, 0), ...]}
Get those synsets of which 'dog' is a member meronym:
>>> N['dog'][0][MEMBER_MERONYM]
[{noun: Canis, genus Canis}, {noun: pack}]
"""
from util import *
from cache import *
from lexname import *
from dictionary import *
from similarity import *
from synset import *
from browse import *
from stemmer import *
from browser import *
|
[
"tytung@6129d76e-ddfe-11dd-a37d-c9d1c40e0883"
] |
tytung@6129d76e-ddfe-11dd-a37d-c9d1c40e0883
|
e28425c6be03c6d9956daa0eddac1c7fa2d826f5
|
816232db2f21e193612eaa60eda0d5897d31caaf
|
/COS_PRO/4차/6_자아도취수.py
|
edbcf8c6e72656e86c99fe03d7d9553c8241eb4a
|
[] |
no_license
|
Juyoung4/StudyAlgorithm
|
a60bfa7657eac57f59200bfa204aff1ad27c79f8
|
4b190e0bfeb268bef4be00ae9bedd9ca8946fbd6
|
refs/heads/master
| 2023-08-31T04:37:07.422641
| 2021-09-27T08:38:09
| 2021-09-27T08:38:09
| 282,757,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
def power(base, exponent):
val = 1
for i in range(exponent):
val *= base
return val
def solution(k):
answer = []
bound = power(10, k)
for i in range(bound // 10, bound):
current = i
calculated = 0
while current != 0:
calculated += (current%10)**k
current = current // 10
if calculated == i:
answer.append(i)
return answer
k = 3
ret = solution(k)
print("solution 함수의 반환 값은", ret, "입니다.")
|
[
"vallot7@naver.com"
] |
vallot7@naver.com
|
0d34f49f9c5a57b17efc8069e4270058d795f429
|
f95d0c620151ae16d0139f742b461e411ecf1f7c
|
/assignments/assignment2/cs231n/optim.py
|
a337f3432eaf4530675c0b50c45c671825d0b4c6
|
[] |
no_license
|
chintanbetrabet/CS231n
|
5116925618df18a23bc5a99d5003f3a6273e3f95
|
96cd5bb8ff32a80fcf5fe82e485669187696f267
|
refs/heads/master
| 2021-07-10T04:53:51.654828
| 2017-10-07T18:53:39
| 2017-10-07T18:53:39
| 104,069,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,106
|
py
|
import numpy as np
"""
This file implements various first-order update rules that are commonly used for
training neural networks. Each update rule accepts current weights and the
gradient of the loss with respect to those weights and produces the next set of
weights. Each update rule has the same interface:
def update(w, dw, config=None):
Inputs:
- w: A numpy array giving the current weights.
- dw: A numpy array of the same shape as w giving the gradient of the
loss with respect to w.
- config: A dictionary containing hyperparameter values such as learning rate,
momentum, etc. If the update rule requires caching values over many
iterations, then config will also hold these cached values.
Returns:
- next_w: The next point after the update.
- config: The config dictionary to be passed to the next iteration of the
update rule.
NOTE: For most update rules, the default learning rate will probably not perform
well; however the default values of the other hyperparameters should work well
for a variety of different problems.
For efficiency, update rules may perform in-place updates, mutating w and
setting next_w equal to w.
"""
def sgd(w, dw, config=None):
"""
Performs vanilla stochastic gradient descent.
config format:
- learning_rate: Scalar learning rate.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
w -= config['learning_rate'] * dw
#w-=1e-3*dw
return w, config
def sgd_momentum(w, dw, config=None):
"""
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a moving
average of the gradients.
"""
#print "HI"
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('momentum', 0.9)
v = config.get('velocity', np.zeros_like(w))
next_w = None
#############################################################################
# TODO: Implement the momentum update formula. Store the updated value in #
# the next_w variable. You should also use and update the velocity v. #
#############################################################################
pass
w-=config['learning_rate']*dw
v=config['momentum']*v-config['learning_rate']*dw
w+=v
next_w=w
#print "nw"
#print next_w
#############################################################################
# END OF YOUR CODE #
#############################################################################
config['velocity'] = v
return next_w, config
def rmsprop(x, dx, config=None):
"""
Uses the RMSProp update rule, which uses a moving average of squared gradient
values to set adaptive per-parameter learning rates.
config format:
- learning_rate: Scalar learning rate.
- decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
gradient cache.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- cache: Moving average of second moments of gradients.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('decay_rate', 0.99)
config.setdefault('epsilon', 1e-8)
config.setdefault('cache', np.zeros_like(x))
next_x = None
cache=config['decay_rate']*config['cache']+(1-config['decay_rate'])*(dx**2)
next_x=x-config['learning_rate']*dx/(pow(cache,.5)+config['epsilon'])
config['cache']=cache
#############################################################################
# TODO: Implement the RMSprop update formula, storing the next value of x #
# in the next_x variable. Don't forget to update cache value stored in #
# config['cache']. #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return next_x, config
def adam(x, dx, config=None):
"""
Uses the Adam update rule, which incorporates moving averages of both the
gradient and its square and a bias correction term.
config format:
- learning_rate: Scalar learning rate.
- beta1: Decay rate for moving average of first moment of gradient.
- beta2: Decay rate for moving average of second moment of gradient.
- epsilon: Small scalar used for smoothing to avoid dividing by zero.
- m: Moving average of gradient.
- v: Moving average of squared gradient.
- t: Iteration number.
"""
if config is None: config = {}
config.setdefault('learning_rate', 1e-3)
config.setdefault('beta1', 0.9)
config.setdefault('beta2', 0.999)
config.setdefault('epsilon', 1e-8)
config.setdefault('m', np.zeros_like(x))
config.setdefault('v', np.zeros_like(x))
config.setdefault('t', 0)
next_x = None
#############################################################################
# TODO: Implement the Adam update formula, storing the next value of x in #
# the next_x variable. Don't forget to update the m, v, and t variables #
# stored in config. #
#############################################################################
config['m']=config['beta1']*config['m']+(1-config['beta1'])*dx
config['v']=config['beta2']*config['v']+(1-config['beta2'])*dx**2
next_x=x-config['learning_rate']*config['m']/(np.sqrt(config['v']+config['epsilon']))
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return next_x, config
|
[
"chintanbetrabet@gmail.com"
] |
chintanbetrabet@gmail.com
|
5c4cd537a6ff217add8a3efe464d8da6c2abbd93
|
b726ec9abfbd53f03fa96ccd336ed165aaa306c8
|
/find_max_common_subsequence.py
|
16249cff0778023e0896a80ee83635bd50f86092
|
[] |
no_license
|
hcxie20/Algorithm
|
de0256aa3acacf96833e46a0b0c66517dae6cbfd
|
cace01b8441a8a1923b827de844965874e790d77
|
refs/heads/master
| 2021-08-15T23:18:00.364329
| 2020-12-02T05:53:32
| 2020-12-02T05:53:32
| 228,120,962
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
class Solution(object):
def __init__(self, str1, str2):
self.value = 0
self.str = ""
if str1 and str2:
dp = [[0 for i in range(len(str1) + 1)] for j in range(len(str2) + 1)]
for i in range(1, len(str2) + 1):
for j in range(1, len(str1) + 1):
if str1[j - 1] == str2[i - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1
else:
dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])
while i != 0:
if dp[i][j] == dp[i - 1][j - 1]:
self.str = str2[i - 1] + self.str
i -= 1
j -= 1
elif dp[i][j] == dp[i - 1][j]:
i -= 1
else:
j -= 1
pass
if __name__ == "__main__":
a = "abcdef"
b = ""
c = Solution(a, b)
|
[
"="
] |
=
|
7c04a3f7c065a01a0d8e43922f46a8251de774d9
|
54fc549a8af5bad5cfeb97af92a02448297f1ea9
|
/src/gather_reviews/template.py
|
b60cad3a592d610f9ae8012b9ac6cb7da4f2f52a
|
[] |
no_license
|
ace-racer/ReviewAnalysis
|
35659ba09917a345edb3e3701aa12ae78602333b
|
95fee3407791b5bbbc47619b06e603689e2249ed
|
refs/heads/master
| 2020-07-26T06:10:05.563327
| 2019-10-26T14:58:12
| 2019-10-26T14:58:12
| 208,559,897
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
class ReviewTemplate:
def __init__(self):
self.rating = 0
self.title = ""
self.review_text = ""
self.reviewer = ""
self.review_date = ""
self.review_upvotes = 0
self.review_downvotes = 0
|
[
"anuragchatterjee92@gmail.com"
] |
anuragchatterjee92@gmail.com
|
b87a8135b8a13d1fa8a6878cd59447c839828a69
|
e53b7bbcea1a6f06175a9f14e31d5725fe80e804
|
/Question_100/Q15_SobelFilter.py
|
d31f99bef7a488d33314a96b6c743a73bd861769
|
[] |
no_license
|
Zpadger/ObjectDetection
|
5777c8d78c71dca1af6bccf25b01288dca7100c3
|
aa0193a38f3d5c3a318501c3a59e89b73d3e244b
|
refs/heads/master
| 2020-08-16T02:58:45.412713
| 2019-12-14T08:18:51
| 2019-12-14T08:18:51
| 215,446,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
import cv2
import numpy as np
# Gray scale
def BGR2GRAY(img):
b = img[:, :, 0].copy()
g = img[:, :, 1].copy()
r = img[:, :, 2].copy()
# Gray scale
out = 0.2126 * r + 0.7152 * g + 0.0722 * b
out = out.astype(np.uint8)
return out
# sobel filter
def sobel_filter(img, K_size=3):
if len(img.shape) == 3:
H, W, C = img.shape
else:
img = np.expand_dims(img,axis=-1)
H, W, C = img.shape
# Zero padding
pad = K_size // 2
out = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)
out[pad: pad + H, pad: pad + W] = gray.copy().astype(np.float)
tmp = out.copy()
out_v = out.copy()
out_h = out.copy()
# sobel vertical
#纵方向
Kv = [[1., 2., 1.],[0., 0., 0.],[-1., -2., -1.]]
# sobel horizontal
#横方向
Kh = [[1., 0., -1.],[2., 0., -2.], [1., 0., -1.]]
# filtering
for y in range(H):
for x in range(W):
out_v[pad + y, pad + x] = np.sum(Kv * (tmp[y: y + K_size, x: x + K_size]))
out_h[pad + y, pad + x] = np.sum(Kh * (tmp[y: y + K_size, x: x + K_size]))
out_v = np.clip(out_v, 0, 255)
out_h = np.clip(out_h, 0, 255)
out_v = out_v[pad: pad + H, pad: pad + W].astype(np.uint8)
out_h = out_h[pad: pad + H, pad: pad + W].astype(np.uint8)
return out_v, out_h
# Read image
img = cv2.imread("imori.jpg").astype(np.float)
# grayscale
gray = BGR2GRAY(img)
# different filtering
out_v, out_h = sobel_filter(gray, K_size=3)
# Save result
cv2.imwrite("out_v.jpg", out_v)
cv2.imshow("result_v", out_v)
while cv2.waitKey(100) != 27:# loop if not get ESC
if cv2.getWindowProperty('result_v',cv2.WND_PROP_VISIBLE) <= 0:
break
cv2.destroyWindow('result_v')
cv2.imwrite("out_h.jpg", out_h)
cv2.imshow("result_h", out_h)
# loop if not get ESC or click x
while cv2.waitKey(100) != 27:
if cv2.getWindowProperty('result_h',cv2.WND_PROP_VISIBLE) <= 0:
break
cv2.destroyWindow('result_h')
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
Zpadger.noreply@github.com
|
ecf51cb881137d1f924dbc463f597d153a029da8
|
641ac23338d100a4aea52b8246c924f219a4b276
|
/fabfile.py
|
6ca0d13fd156aefa0294704b471dc351415f5463
|
[] |
no_license
|
loogica/videos
|
e1557f38913cdeb9c48d478a420c25ea6aac33ef
|
2ae2817f4630d5c351a510b81a6c007dbfdda09a
|
refs/heads/master
| 2021-03-13T00:04:20.355363
| 2013-08-17T22:24:04
| 2013-08-17T22:24:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 530
|
py
|
from fabric.api import env, run, put, local, cd, sudo
env.hosts = ['loogica.net']
def zip_output():
local('zip -r blog_static.zip output')
def send_data():
put('blog_static.zip', '/tmp')
def remote_deploy_zip():
with cd('/tmp'):
sudo('unzip blog_static.zip')
sudo('mv output/ /opt/apps/')
with cd('/opt/apps'):
sudo('rm -rf videos')
sudo('mv output videos')
sudo('chown -R deploy:www-data videos')
def deploy():
zip_output()
send_data()
remote_deploy_zip()
|
[
"felipecruz@loogica.net"
] |
felipecruz@loogica.net
|
a78b0ad15ac86982ce568a9763b73aef095c2af4
|
ef1458fae5fbd6b7a9281ccd4d9bc8289f3dd38b
|
/examples_UQ/MM2_surrogate_diam_batchgrid.py
|
f2a70af511b46ac0ec7e3de95da9b74e99547a93
|
[
"BSD-3-Clause"
] |
permissive
|
vt100/mystic
|
a42910537c3de90d1c2a5637bad5d866308e8863
|
7589eee4b9a7cb6056114ee6770579d173d9007b
|
refs/heads/master
| 2021-01-17T22:28:57.743493
| 2015-07-17T15:25:35
| 2015-07-17T15:25:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,409
|
py
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2009-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
#######################################################################
# scaling and mpi info; also optimizer configuration parameters
# hard-wired: use fmin solver
#######################################################################
#scale = 1.0
#npop = 20
nbins = [2,2,2]
#maxiter = 1000
#maxfun = 1e+6
#convergence_tol = 1e-4
#######################################################################
# the model function
#######################################################################
#from surrogate import marc_surr as model
from surrogate import ballistic_limit as limit
#######################################################################
# the subdiameter calculation
#######################################################################
def costFactory(i):
"""a cost factory for the cost function"""
def cost(rv):
"""compute the diameter as a calculation of cost
Input:
- rv -- 1-d array of model parameters
Output:
- diameter -- scale * | F(x) - F(x')|**2
"""
from surrogate import marc_surr as model
# prepare x and xprime
rv = list(rv)
params = rv[:-1] #XXX: assumes Xi' is at rv[-1]
params_prime = rv[:i]+rv[-1:]+rv[i+1:-1] #XXX: assumes Xi' is at rv[-1]
# get the F(x) response
Fx = model(params)
# get the F(x') response
Fxp = model(params_prime)
# compute diameter
scale = 1.0
return -scale * (Fx - Fxp)**2
return cost
#######################################################################
# make a pseudo-global optimizer from a steepest descent optimizer
#######################################################################
def optimize(cost,lower,upper,nbins):
from mystic.tools import random_seed
from pathos.pools import ProcessPool as Pool
random_seed(123)
# generate arrays of points defining a grid in parameter space
grid_dimensions = len(lower)
bins = []
for i in range(grid_dimensions):
step = abs(upper[i] - lower[i])/nbins[i]
bins.append( [lower[i] + (j+0.5)*step for j in range(nbins[i])] )
# build a grid of starting points
from pool_helper import local_optimize
from mystic.math.grid import gridpts
initial_values = gridpts(bins)
# run optimizer for each grid point
lb = [lower for i in range(len(initial_values))]
ub = [upper for i in range(len(initial_values))]
cf = [cost for i in range(len(initial_values))]
nnodes = len(lb)
# map:: params, energy, func_evals = local_optimize(cost,x0,lb,ub)
results = Pool(nnodes).map(local_optimize, cf, initial_values, lb, ub)
#print "results = %s" % results
# get the results with the lowest energy
best = list(results[0][0]), results[0][1]
func_evals = results[0][2]
for result in results[1:]:
func_evals += result[2] # add function evaluations
if result[1] < best[1]: # compare energy
best = list(result[0]), result[1]
# return best
print "solved: %s" % best[0]
scale = 1.0
diameter_squared = -best[1] / scale #XXX: scale != 0
return diameter_squared, func_evals
#######################################################################
# loop over model parameters to calculate concentration of measure
#######################################################################
def UQ(start,end,lower,upper):
#from pathos.pools import ProcessPool as Pool
from pathos.pools import ThreadPool as Pool
#from pool_helper import func_pickle # if fails to pickle, try using a helper
# run optimizer for each subdiameter
lb = [lower + [lower[i]] for i in range(start,end+1)]
ub = [upper + [upper[i]] for i in range(start,end+1)]
nb = [nbins[:] for i in range(start,end+1)]
for i in range(len(nb)): nb[i][-1] = nb[i][i]
cf = [costFactory(i) for i in range(start,end+1)]
#cf = [func_pickle(i) for i in cf]
#cf = [cost.name for cost in cf]
nnodes = len(lb)
#construct cost function and run optimizer
results = Pool(nnodes).map(optimize, cf,lb,ub,nb)
#print "results = %s" % results
results = zip(*results)
diameters = list(results[0])
function_evaluations = list(results[1])
total_func_evals = sum(function_evaluations)
total_diameter = sum(diameters)
print "subdiameters (squared): %s" % diameters
print "diameter (squared): %s" % total_diameter
print "func_evals: %s => %s" % (function_evaluations, total_func_evals)
return total_diameter
#######################################################################
# rank, bounds, and restart information
#######################################################################
if __name__ == '__main__':
from math import sqrt
function_name = "marc_surr"
lower_bounds = [60.0, 0.0, 2.1]
upper_bounds = [105.0, 30.0, 2.8]
# h = thickness = [60,105]
# a = obliquity = [0,30]
# v = speed = [2.1,2.8]
RVstart = 0; RVend = 2
RVmax = len(lower_bounds) - 1
# when not a random variable, set the value to the lower bound
for i in range(0,RVstart):
upper_bounds[i] = lower_bounds[i]
for i in range(RVend+1,RVmax+1):
upper_bounds[i] = lower_bounds[i]
lbounds = lower_bounds[RVstart:1+RVend]
ubounds = upper_bounds[RVstart:1+RVend]
#FIXME: these are *copies*... actual values contained in 'local_optimize'
maxiter = 1000
maxfun = 1e+6
convergence_tol = 1e-4
print "...SETTINGS..."
print "nbins = %s" % nbins
print "maxiter = %s" % maxiter
print "maxfun = %s" % maxfun
print "convergence_tol = %s" % convergence_tol
#print "crossover = %s" % crossover
#print "percent_change = %s" % percent_change
print "..............\n\n"
print " model: f(x) = %s(x)" % function_name
param_string = "["
for i in range(RVmax+1):
param_string += "'x%s'" % str(i+1)
if i == (RVmax):
param_string += "]"
else:
param_string += ", "
print " parameters: %s" % param_string
print " varying 'xi', with i = %s" % range(RVstart+1,RVend+2)
print " lower bounds: %s" % lower_bounds
print " upper bounds: %s" % upper_bounds
# print " ..."
nbins.append(None) #XXX: kind of hackish
diameter = UQ(RVstart,RVend,lower_bounds,upper_bounds)
# EOF
|
[
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] |
mmckerns@968178ea-60bd-409e-af13-df8a517b6005
|
31f23cfa6c6ca1b240f30f9db28cd4e53accc1dc
|
d9e26e516ab3863b6e7d00c4e3cdecf1af7028eb
|
/tests/test_parsers/test_xaf_association_parser.py
|
3a3efa1edd3f4a9aaba83169a8a2fb2a24c27680
|
[
"Apache-2.0"
] |
permissive
|
INCATools/ontology-access-kit
|
2f08a64b7308e8307d1aaac2a81764e7d98b5928
|
8d2a124f7af66fe2e796f9e0ece55585438796a5
|
refs/heads/main
| 2023-08-30T14:28:57.201198
| 2023-08-29T17:40:19
| 2023-08-29T17:40:19
| 475,072,415
| 67
| 15
|
Apache-2.0
| 2023-09-07T01:06:04
| 2022-03-28T15:50:45
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 949
|
py
|
import logging
import unittest
from oaklib.datamodels.association import Association
from oaklib.parsers.parser_base import ColumnReference
from oaklib.parsers.xaf_association_parser import XafAssociationParser
from tests import INPUT_DIR
GAF = INPUT_DIR / "test-uniprot.gaf"
class XafAssociationParserTest(unittest.TestCase):
"""Tests parsing of GAF and GAF-like formats."""
def test_parser(self):
"""Tests parsing associations."""
parser = XafAssociationParser(
subject_column=ColumnReference(1), object_column=ColumnReference(4)
)
with open(GAF) as file:
assocs = list(parser.parse(file))
for association in assocs:
logging.info(association)
self.assertIn(
Association(
subject="Q9BPZ7", predicate=None, object="GO:0005737", property_values=[]
),
assocs,
)
|
[
"noreply@github.com"
] |
INCATools.noreply@github.com
|
f0a8b55b7fdb9455813100ac46d0ddf18ded61b7
|
2b42c0f490128aab8aacf9ad572d280e7b702d92
|
/postgresqleu/confreg/management/commands/confreg_expire_waitlist.py
|
22da3c271f3d6e29755039f78dc2b8cf352ec509
|
[] |
no_license
|
danielgustafsson/pgeu-website
|
fc5bd38749aaf4bbdd4a112307b856e1fbc777b3
|
202e5c5118d12727902b4adc88eb465330b9705d
|
refs/heads/master
| 2021-01-18T02:37:08.440516
| 2016-07-19T21:41:49
| 2016-07-19T21:41:49
| 63,783,904
| 0
| 0
| null | 2016-07-20T13:30:56
| 2016-07-20T13:30:55
| null |
UTF-8
|
Python
| false
| false
| 2,470
|
py
|
#!/usr/bin/env python
#
# Expire waitlist offers that have expired, so others can get the
# seats.
#
# Copyright (C) 2015, PostgreSQL Europe
#
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.conf import settings
from datetime import datetime
from django.template import Context
from django.template.loader import get_template
from postgresqleu.mailqueue.util import send_simple_mail
from postgresqleu.confreg.models import RegistrationWaitlistEntry, RegistrationWaitlistHistory
class Command(BaseCommand):
help = 'Expire conference waitlist offers'
@transaction.atomic
def handle(self, *args, **options):
# Any entries that actually have an invoice will be canceled by the invoice
# system, as the expiry time of the invoice is set synchronized. In this
# run, we only care about offers that have not been picked up at all.
wlentries = RegistrationWaitlistEntry.objects.filter(registration__payconfirmedat__isnull=True, registration__invoice__isnull=True, offerexpires__lt=datetime.now())
template = get_template('confreg/mail/waitlist_expired.txt')
for w in wlentries:
reg = w.registration
# Create a history entry so we know exactly when it happened
RegistrationWaitlistHistory(waitlist=w,
text="Offer expired at {0}".format(w.offerexpires)).save()
# Notify conference organizers
send_simple_mail(reg.conference.contactaddr,
reg.conference.contactaddr,
'Waitlist expired',
u'User {0} {1} <{2}> did not complete the registration before the waitlist offer expired.'.format(reg.firstname, reg.lastname, reg.email),
sendername=reg.conference.conferencename)
# Also send an email to the user
send_simple_mail(reg.conference.contactaddr,
reg.email,
'Your waitlist offer for {0}'.format(reg.conference.conferencename),
template.render(Context({
'conference': reg.conference,
'reg': reg,
'offerexpires': w.offerexpires,
'SITEBASE': settings.SITEBASE,
})),
sendername = reg.conference.conferencename,
receivername = u"{0} {1}".format(reg.firstname, reg.lastname),
)
# Now actually expire the offer
w.offeredon = None
w.offerexpires = None
# Move the user to the back of the waitlist (we have a history entry for the
# initial registration date, so it's still around)
w.enteredon = datetime.now()
w.save()
|
[
"magnus@hagander.net"
] |
magnus@hagander.net
|
ca6779ba52b3bd0f2ee2aaa437414e97d2550a24
|
a7b66311c2ce113789933ec3162f1128b2862f13
|
/app/waterQual/30yr/reason/cyclicCmap.py
|
c7e0ae852714e2a90f261dece677fc8ec7437826
|
[
"MIT"
] |
permissive
|
ChanJeunlam/geolearn
|
214b2c42359ea1164b39117fad2d7470adeb6d35
|
791caa54eb70920823ea7d46714dc8a3e7fa7445
|
refs/heads/master
| 2023-07-16T04:13:15.526364
| 2021-08-16T05:24:18
| 2021-08-16T05:24:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
sd = np.datetime64('2000-01-01')
ed = np.datetime64('2000-12-31')
t = pd.date_range(sd, ed)
td = t.dayofyear.values-1
fig, ax = plt.subplots(1, 1)
nt = td.max()
# tLst = ['2000-01-01', '2000-03-01', '2000-06-01', '2000-09-01']
tLst = ['2000-{:02d}-01'.format(m+1) for m in range(12)]
for k in range(len(tLst)):
tt = pd.to_datetime(tLst[k]).dayofyear-1
xx = np.cos(tt/nt*np.pi*2)
yy = np.sin(tt/nt*np.pi*2)
ax.plot([0, xx], [0, yy], 'k-')
ax.text(xx, yy, tLst[k][5:])
x = np.cos(td/nt*np.pi*2)
y = np.sin(td/nt*np.pi*2)
ax.scatter(x, y, c=td, cmap='hsv',s=100)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_xticks([])
ax.set_aspect('equal', 'box')
fig.show()
|
[
"geofkwai@gmail.com"
] |
geofkwai@gmail.com
|
830c884047ab74b87a3b7d60e5412388c2189e4c
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2968/60797/295581.py
|
e68e8eb6ee4d1332321e959eacdf1950a4ee97e5
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
class Solution:
def isPalindrome(self, data):
for i in range(len(data)):
if data[i]!=data[len(data)-1-i]:
return False
return True
def find(self, data):
re = 0
for i in range(len(data)):
for i in range(i,len(data)):
if self.isPalindrome(data[i,j+1]):
re +=1
return re
if __name__ == '__main__':
ss = input()
data = ss
q = int(input())
for i in range(q):
line = input().split()
if line[0]==1:
data = data+line[1]
elif line[0]==2:
data = line[1][::-1]+data
elif line[0]==3:
s = Solution()
re = s.find(data)
print(re)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
c11c53b590a3c16bb3b7c28cae7424a53dea3d87
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/94/usersdata/169/57980/submittedfiles/mediaLista.py
|
c8d5077de447c7b9ad6542447f7fcc7e6877b905
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
# -*- coding: utf-8 -*-
def média(lista):
soma=0
for i in range(0,len(lista),1):
soma=soma+lista[i]
resultado=soma/len(lista)
return resultado
n=int(input('Digite a Quantidade de Números:'))
l1=[]
for i in range(0,n,1):
v=float(input('Digite o Valores de Inteiros da Lista:'))
l1.append(v)
print('%.2f' %l1[0])
print('%.2f' %l1[i])
print('%.2f' %média(l1))
print(l1)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
61e71bab3030183eea612146af02e8cf41f6e23b
|
af7ab3c9d189caf4a22b2a83a03da4560dba6166
|
/generated/administration_data/OrderStateNVL.py
|
c1789a2ccf2eca631fc260554e312bc933aef3d5
|
[] |
no_license
|
Eggwise/unit4_python_api
|
43418d16d84abb73ddd843b8b268883f02ff996b
|
421195392e408bd9e14bda0851817c5ab835ebaf
|
refs/heads/master
| 2021-01-15T13:43:21.486918
| 2016-09-19T14:10:02
| 2016-09-19T14:10:02
| 68,611,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
import requests, json
from generated.base.unit4_base import Unit4Base
class OrderStateNVL(Unit4Base):
def get_orderStateNVL(self, database, ):
request_args = locals()
url_template = 'api/{database}/OrderStateNVL'
url = url_template.format(**request_args)
#print(url)
url = self.authorize(url)
response = requests.get(url=url)
print(response.text)
return json.loads(response.text)
|
[
"dev@eggwise.com"
] |
dev@eggwise.com
|
0df5002081686537d5ff5e42c6673d79d491c180
|
473fc28d466ddbe9758ca49c7d4fb42e7d82586e
|
/app/src/main/java/com/syd/source/aosp/external/nanopb-c/tests/package_name/SConscript
|
8f1b9021d3f9cf30f232d3b3a4fab1b5ba771c6a
|
[
"Zlib"
] |
permissive
|
lz-purple/Source
|
a7788070623f2965a8caa3264778f48d17372bab
|
e2745b756317aac3c7a27a4c10bdfe0921a82a1c
|
refs/heads/master
| 2020-12-23T17:03:12.412572
| 2020-01-31T01:54:37
| 2020-01-31T01:54:37
| 237,205,127
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,351
|
# Check that alltypes test case works also when the .proto file defines
# a package name.
Import("env")
# Build a modified alltypes.proto
def modify_proto(target, source, env):
'''Add a "package test.package;" directive to the beginning of the .proto file.'''
data = open(str(source[0]), 'r').read()
open(str(target[0]), 'w').write("package test.package;\n\n" + data)
return 0
env.Command("alltypes.proto", "#alltypes/alltypes.proto", modify_proto)
env.Command("alltypes.options", "#alltypes/alltypes.options", Copy("$TARGET", "$SOURCE"))
env.NanopbProto(["alltypes", "alltypes.options"])
# Build a modified encode_alltypes.c
def modify_c(target, source, env):
'''Add package name to type names in .c file.'''
data = open(str(source[0]), 'r').read()
type_names = ['AllTypes', 'MyEnum', 'HugeEnum']
for name in type_names:
data = data.replace(name, 'test_package_' + name)
open(str(target[0]), 'w').write(data)
return 0
env.Command("encode_alltypes.c", "#alltypes/encode_alltypes.c", modify_c)
# Encode and compare results to original alltypes testcase
enc = env.Program(["encode_alltypes.c", "alltypes.pb.c", "$COMMON/pb_encode.o"])
refdec = "$BUILD/alltypes/decode_alltypes$PROGSUFFIX"
env.RunTest(enc)
env.Compare(["encode_alltypes.output", "$BUILD/alltypes/encode_alltypes.output"])
|
[
"997530783@qq.com"
] |
997530783@qq.com
|
|
cfe3d298f48a17667eeabcf5f110cf65a8e926b9
|
52b79e4cd1e26969a3ebb3bca8620519071bea98
|
/answers/05_basic_scripts/task_5_2a.py
|
83e4f23de538e9e128e63d8b8691d6ce8a62b918
|
[] |
no_license
|
hariram32/pyneng-answers-en
|
631bc149b8a219a2de86de82681ffba3d1ff30ee
|
84b7240b00d3a4ab9011952db662f716d1cd31b8
|
refs/heads/main
| 2023-03-16T00:12:38.954431
| 2021-03-09T15:40:10
| 2021-03-09T15:40:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,401
|
py
|
# -*- coding: utf-8 -*-
"""
Task 5.2a
Copy and modify the script from task 5.2 so that, if the user entered a host address
rather than a network address, convert the host address to a network address
and print the network address and mask, as in task 5.2.
An example of a network address (all host bits are equal to zero):
* 10.0.1.0/24
* 190.1.0.0/16
Host address example:
* 10.0.1.1/24 - host from network 10.0.1.0/24
* 10.0.5.195/28 - host from network 10.0.5.192/28
If the user entered the address 10.0.1.1/24, the output should look like this:
Network:
10 0 1 0
00001010 00000000 00000001 00000000
Mask:
/24
255 255 255 0
11111111 11111111 11111111 00000000
Check the script work on different host/mask combinations, for example:
10.0.5.195/28, 10.0.1.1/24
Hint:
The network address can be calculated from the binary host address and the netmask.
If the mask is 28, then the network address is the first 28 bits host addresses + 4 zeros.
For example, the host address 10.1.1.195/28 in binary will be:
bin_ip = "00001010000000010000000111000011"
Then the network address will be the first 28 characters from bin_ip + 0000
(4 because in total there can be 32 bits in the address, and 32 - 28 = 4)
00001010000000010000000111000000
Restriction: All tasks must be done using the topics covered in this and previous chapters.
"""
network = input("Введите адрес сети: ")
ip, mask = network.split("/")
ip_list = ip.split(".")
mask = int(mask)
oct1, oct2, oct3, oct4 = [
int(ip_list[0]),
int(ip_list[1]),
int(ip_list[2]),
int(ip_list[3]),
]
bin_ip_str = "{:08b}{:08b}{:08b}{:08b}".format(oct1, oct2, oct3, oct4)
bin_network_str = bin_ip_str[:mask] + "0" * (32 - mask)
net1, net2, net3, net4 = [
int(bin_network_str[0:8], 2),
int(bin_network_str[8:16], 2),
int(bin_network_str[16:24], 2),
int(bin_network_str[24:32], 2),
]
bin_mask = "1" * mask + "0" * (32 - mask)
m1, m2, m3, m4 = [
int(bin_mask[0:8], 2),
int(bin_mask[8:16], 2),
int(bin_mask[16:24], 2),
int(bin_mask[24:32], 2),
]
ip_output = """
Network:
{0:<8} {1:<8} {2:<8} {3:<8}
{0:08b} {1:08b} {2:08b} {3:08b}"""
mask_output = """
Mask:
/{0}
{1:<8} {2:<8} {3:<8} {4:<8}
{1:08b} {2:08b} {3:08b} {4:08b}
"""
print(ip_output.format(net1, net2, net3, net4))
print(mask_output.format(mask, m1, m2, m3, m4))
|
[
"nataliya.samoylenko@gmail.com"
] |
nataliya.samoylenko@gmail.com
|
43559fe498959af1ed9d3b2c78c4f80b28e8a436
|
f384d811159201c1d375cc36b2402c643b7cd66c
|
/bughipster/website/login.py
|
cff3e38fd64a121e83e8073cb6831e46721eaad6
|
[
"BSD-3-Clause"
] |
permissive
|
pombredanne/django-bug-hipster
|
f23202a78a99ef0487fd05e7cae7882b1696a1ad
|
5e9cfe1efd22494b8c82176a5d7f145f899f2ed2
|
refs/heads/master
| 2021-01-15T11:33:43.420214
| 2015-06-10T20:21:19
| 2015-06-10T20:21:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,144
|
py
|
"""
bughipster.website.login
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by Xavier Ordoquy, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django import http
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import (
authenticate, get_user_model, login as auth_login)
from django.utils.text import capfirst
# Basically, the LoginForm is the django.auth.contrib.forms.AuthenticationForm
# which has been changed to match the bugzilla field names.
# The LoginForm class is under Copyright (c) Django Software Foundation and
# individual contributors.
class AuthenticationForm(forms.Form):
Bugzilla_login = forms.CharField(max_length=254)
Bugzilla_password = forms.CharField(
label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(
UserModel.USERNAME_FIELD)
if self.fields['Bugzilla_login'].label is None:
self.fields['Bugzilla_login'].label = capfirst(
self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('Bugzilla_login')
password = self.cleaned_data.get('Bugzilla_password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class LoginMixin(object):
def post(self, request, *args, **kwargs):
if 'GoAheadAndLogIn' in request.POST:
login_form = AuthenticationForm(data=request.POST or None)
if login_form.is_valid():
auth_login(request, login_form.get_user())
return http.HttpResponseRedirect(request.get_full_path())
# We failed to login. Warn the user to go back ala Bugzilla style
context = self.get_context_data(
title="Invalid Username Or Password", **kwargs)
return self.response_class(
request=self.request,
template="login-failed.html",
context=context,
using=self.template_engine)
# By default, just call the parent class
return super(LoginMixin, self).post(request, *args, **kwargs)
|
[
"xordoquy@linovia.com"
] |
xordoquy@linovia.com
|
873ce238a7dfd0ef0948e5a9922e5e08f39636e5
|
39225163672910ad704e730e20d21a54c8e3be0f
|
/examples/demo_skyview.py
|
36fa93ae3c25f1dcce418ccc803ded48a25244ea
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
astrofrog/pywcsgrid2
|
4f422962523a928b4bacf259e7241e1eadcb50cc
|
d861e5a4987848a2ba3bed6b0d1278457589071a
|
refs/heads/master
| 2021-01-21T00:52:49.615248
| 2012-04-16T15:06:50
| 2012-04-16T15:06:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,682
|
py
|
import pyfits
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1.axes_grid import AxesGrid
#from pywcsgrid2.axes_wcs import GridHelperWcs, AxesWcs
import pywcsgrid2
# read in the first image
xray_name="pspc_skyview.fits"
f_xray = pyfits.open(xray_name)
header_xray = f_xray[0].header
# the second image
radio_name="radio_21cm.fits"
f_radio = pyfits.open(radio_name)
header_radio = f_radio[0].header
# grid helper
grid_helper = pywcsgrid2.GridHelper(wcs=header_xray)
# AxesGrid to display tow images side-by-side
fig = plt.figure(1, (6,3.5))
grid = AxesGrid(fig, (0.15, 0.15, 0.8, 0.75), nrows_ncols=(1, 2),
axes_pad=0.1, share_all=True,
cbar_mode="each", cbar_location="top", cbar_pad=0,
axes_class=(pywcsgrid2.Axes, dict(grid_helper=grid_helper)))
ax1 = grid[0]
# use imshow for a simply image display.
im = ax1.imshow(f_xray[0].data, origin="lower", vmin=0., cmap=cm.gray_r,
interpolation="nearest")
im.set_clim(4.e-05, 0.00018)
ticklocs = [6, 9, 12, 15]
cax1 = grid.cbar_axes[0]
cbar1 = cax1.colorbar(im)
cax1.toggle_label(True)
cax1.set_xticks([t*1.e-5 for t in ticklocs])
cax1.set_xticklabels(["$%d$" % t for t in ticklocs])
#cax1.xaxis.get_major_formatter().set_offset_string(r"$\times 10^{-5}$")
cax1.annotate(r"$\times 10^{-5}$",
xy=(1,1), xycoords="axes fraction",
xytext=(0, 15), textcoords="offset points",
va="bottom", ha="right", size="small")
ax2 = grid[1]
d = f_radio[0].data
# The second image have a different wcs. While imshow works, it will
# interpolate the second image into the image coordinate of the first
# image. You may use pcolormesh when the pixel size of the second
# image is larger than that of the first image. Or you may use
# inshow_affine.
#im2 = ax2[header_radio].pcolormesh(d, cmap=cm.gray_r)
im2 = ax2[header_radio].imshow_affine(d,
cmap=cm.gray_r, origin="lower")
grid.cbar_axes[1].colorbar(im2)
grid.cbar_axes[1].toggle_label(True)
# draw contour. The data points of the contour lines are created in
# the image coordinate of the second image and then are transformed to
# the image coordinate of the first image.
cont = ax2[header_radio].contour(d, colors="k")
# draw contour of the second image in the first axes.
cont2 = ax1[header_radio].contour(d, colors="k")
ax1.add_inner_title("X-ray", loc=2)
ax2.add_inner_title("Radio", loc=2)
ax1.locator_params("both", nbins=2) # since ax1 and ax2 shares a
# grid_helper, it affects not only
# ax1 but also ax2.
plt.show()
|
[
"lee.j.joon@gmail.com"
] |
lee.j.joon@gmail.com
|
13d8e1126016f032ec40167184632c2550e1b5fa
|
ec99f2c09b2c9a3860a2e5fdea061089cd147482
|
/webprogrammering/docs/eks4/eks4.py
|
1c9c60da03a7b5752964059290b3911bfa5a2618
|
[] |
no_license
|
sprotg/2019_3d
|
752d3cc19cbff99effeccc9207d5ca26de4ad97b
|
0250f9cd8045272ca6bf58dc59981adf28371c51
|
refs/heads/master
| 2020-07-05T16:03:03.465870
| 2020-03-16T13:49:47
| 2020-03-16T13:49:47
| 202,692,753
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
from flask import Flask
from flask import render_template
from flask import request
app = Flask(__name__)
@app.route("/")
@app.route('/index')
def index():
return render_template('formular.html')
@app.route("/modtag_data", methods=['POST'])
def modtag():
modtaget_navn = request.form['navn']
return render_template("vis.html", navn = modtaget_navn)
if __name__ == "__main__":
app.run(debug=True)
|
[
"spr@sde.dk"
] |
spr@sde.dk
|
ee2addb23de8f4b619906a7926cf93adef98483b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03681/s217062643.py
|
4121026c13621b021fe355d631d8f4c15cb0ffe3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import math
N_inu, M_saru = map(int, input().split())
d = abs(N_inu - M_saru)
if (d > 1):
ans = 0
else:
if (d == 1):
ans = math.factorial(N_inu) * math.factorial(M_saru)
else:
ans = 2 * math.factorial(N_inu) * math.factorial(M_saru)
ans = ans % (10 ** 9 + 7)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
917f40447e4de7c7a4b51c43d671f0a910974707
|
85fc4f742f16befdd1cea2bc0dcfb57f1f8107d8
|
/tests/cli/test_detect_embargo.py
|
8c129519d22286d8f54c8825cd5d2281aeb09b28
|
[
"LGPL-3.0-only",
"Apache-2.0"
] |
permissive
|
sdodson/doozer
|
fcd01e6775478dc02c3703774197a41b755ce033
|
1522990fa987c6f1b4eaa500c63772ac0e0768b5
|
refs/heads/master
| 2023-01-19T16:06:26.801202
| 2020-08-25T20:10:38
| 2020-08-25T20:10:38
| 291,060,114
| 0
| 0
|
Apache-2.0
| 2020-08-28T13:57:32
| 2020-08-28T13:57:31
| null |
UTF-8
|
Python
| false
| false
| 6,526
|
py
|
import io
import json
from unittest import TestCase
from unittest.mock import MagicMock, patch
import yaml
from doozerlib.cli import detect_embargo
class TestDetectEmbargoCli(TestCase):
def test_detect_embargoes_in_nvrs(self):
builds = [
{"id": 1, "nvr": "foo-1.2.3-1.p0"},
{"id": 2, "nvr": "bar-1.2.3-1.p1"}
]
nvrs = [b["nvr"] for b in builds]
expected = [builds[1]]
with patch("doozerlib.brew.get_build_objects", return_value=builds), \
patch("doozerlib.embargo_detector.EmbargoDetector.find_embargoed_builds", return_value=[2]):
actual = detect_embargo.detect_embargoes_in_nvrs(MagicMock(), nvrs)
self.assertListEqual(actual, expected)
def test_detect_embargoes_in_tags(self):
included_tags = ["a-candidate", "b-candidate"]
included_builds = [
[{"id": 11, "nvr": "foo11-1.2.3-1.p0"}, {"id": 12, "nvr": "foo12-1.2.3-1.p1"}, {"id": 13, "nvr": "foo13-1.2.3-1.p1"}],
[{"id": 21, "nvr": "foo21-1.2.3-1.p0"}, {"id": 22, "nvr": "foo22-1.2.3-1.p1"}, {"id": 23, "nvr": "foo23-1.2.3-1.p1"}],
]
excluded_tags = ["a", "b"]
excluded_builds = [
[{"id": 12, "nvr": "foo12-1.2.3-1.p1"}],
[{"id": 22, "nvr": "foo22-1.2.3-1.p1"}],
]
builds_to_detect = [b for builds in included_builds for b in builds if b["id"] in {11, 13, 21, 23}]
event_id = 42
expected = [b for builds in included_builds for b in builds if b["id"] in {13, 23}]
with patch("doozerlib.brew.get_latest_builds", return_value=included_builds), \
patch("doozerlib.brew.get_tagged_builds", return_value=excluded_builds), \
patch("doozerlib.embargo_detector.EmbargoDetector.find_embargoed_builds", return_value=[13, 23]) as find_embargoed_builds:
actual = detect_embargo.detect_embargoes_in_tags(MagicMock(), "all", included_tags, excluded_tags, event_id)
find_embargoed_builds.assert_called_once_with(builds_to_detect)
self.assertEqual(actual, expected)
def test_detect_embargoes_in_pullspecs(self):
pullspecs = ["example.com/repo:foo", "example.com/repo:bar"]
builds = [
{"id": 1, "nvr": "foo-1.2.3-1.p0"},
{"id": 2, "nvr": "bar-1.2.3-1.p1"}
]
nvrs = [("foo", "1.2.3", "1.p0"), ("bar", "1.2.3", "1.p1")]
expected = ([pullspecs[1]], [builds[1]])
fake_runtime = MagicMock()
fake_runtime.parallel_exec.return_value.get.return_value = nvrs
with patch("doozerlib.cli.detect_embargo.detect_embargoes_in_nvrs", return_value=[builds[1]]) as detect_embargoes_in_nvrs:
actual = detect_embargo.detect_embargoes_in_pullspecs(fake_runtime, pullspecs)
detect_embargoes_in_nvrs.assert_called_once_with(fake_runtime, [f"{n}-{v}-{r}" for n, v, r in nvrs])
self.assertEqual(actual, expected)
def test_detect_embargoes_in_releases(self):
releases = ["a", "b"]
release_pullspecs = {
"a": ["example.com/repo:dead", "example.com/repo:beef"],
"b": ["example.com/repo:foo", "example.com/repo:bar"],
}
builds = [
{"id": 1, "nvr": "foo-1.2.3-1.p0"},
{"id": 2, "nvr": "bar-1.2.3-1.p1"}
]
expected = ([releases[1]], [release_pullspecs["b"][1]], [builds[1]])
fake_runtime = MagicMock()
fake_runtime.parallel_exec.return_value.get.return_value = [release_pullspecs[k] for k in releases]
with patch("doozerlib.cli.detect_embargo.detect_embargoes_in_pullspecs") as detect_embargoes_in_pullspecs:
detect_embargoes_in_pullspecs.side_effect = lambda _, pullspecs: (["example.com/repo:bar"], [builds[1]]) if "example.com/repo:bar" in pullspecs else ([], [])
actual = detect_embargo.detect_embargoes_in_releases(fake_runtime, releases)
detect_embargoes_in_pullspecs.assert_called()
detect_embargoes_in_pullspecs.reset_mock()
self.assertEqual(actual, expected)
@patch("doozerlib.exectools.cmd_assert")
def test_get_nvr_by_pullspec(self, fake_cmd_assert):
pullspec = "registry-proxy.engineering.redhat.com/rh-osbs/openshift-ose-cluster-autoscaler:v4.3.25-202006081335"
expected = ("atomic-openshift-cluster-autoscaler-container", "v4.3.25", "202006081335")
fake_cmd_assert.return_value = ("""
{"config":{"Labels": {"com.redhat.component":"atomic-openshift-cluster-autoscaler-container", "version":"v4.3.25", "release":"202006081335"}}}
""", "")
actual = detect_embargo.get_nvr_by_pullspec(pullspec)
self.assertEqual(actual, expected)
@patch("doozerlib.exectools.cmd_assert")
def test_get_image_pullspecs_from_release_payload(self, fake_cmd_assert):
fake_cmd_assert.return_value = ("""
{"references":{"spec":{"tags":[{"name":"foo","from":{"name":"registry.example.com/foo:abc"}}, {"name":"bar","from":{"name":"registry.example.com/bar:def"}}]}}}
""", "")
actual = list(detect_embargo.get_image_pullspecs_from_release_payload("doesn't matter"))
expected = ["registry.example.com/foo:abc", "registry.example.com/bar:def"]
self.assertListEqual(actual, expected)
@patch("builtins.exit")
@patch('sys.stdout', new_callable=io.StringIO)
def test_print_result_and_exit(self, mock_stdout, mock_exit):
embargoed_builds = [{"id": 1}, {"id": 2}]
embargoed_pullspecs = ["a", "b"]
embargoed_releases = ["d", "e"]
expected = {
"has_embargoes": True,
"builds": embargoed_builds,
"pullspecs": embargoed_pullspecs,
"releases": embargoed_releases
}
detect_embargo.print_result_and_exit(embargoed_builds, embargoed_pullspecs, embargoed_releases, True, False)
mock_exit.assert_called_once_with(0)
actual = yaml.safe_load(mock_stdout.getvalue())
self.assertEqual(actual, expected)
mock_exit.reset_mock()
mock_stdout.truncate(0)
mock_stdout.seek(0)
detect_embargo.print_result_and_exit(embargoed_builds, embargoed_pullspecs, embargoed_releases, False, True)
mock_exit.assert_called_once_with(0)
actual = json.loads(mock_stdout.getvalue())
self.assertEqual(actual, expected)
mock_exit.reset_mock()
detect_embargo.print_result_and_exit(None, None, None, False, False)
mock_exit.assert_called_once_with(2)
|
[
"yuxzhu@redhat.com"
] |
yuxzhu@redhat.com
|
7e2eb39af0422a8717078f0128efb39342ff9ce9
|
f13acd0d707ea9ab0d2f2f010717b35adcee142f
|
/AtCoder_Virtual_Contest/macle_20220726/a/main.py
|
4f52c6d938d8e5669ac0da85c6db79449fbe884d
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
KATO-Hiro/AtCoder
|
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
|
bf43320bc1af606bfbd23c610b3432cddd1806b9
|
refs/heads/master
| 2023-08-18T20:06:42.876863
| 2023-08-17T23:45:21
| 2023-08-17T23:45:21
| 121,067,516
| 4
| 0
|
CC0-1.0
| 2023-09-14T21:59:38
| 2018-02-11T00:32:45
|
Python
|
UTF-8
|
Python
| false
| false
| 433
|
py
|
# -*- coding: utf-8 -*-
def main():
from collections import Counter
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
c = Counter(a + b)
ans = list()
for key, value in c.items():
if value == 1:
ans.append(key)
print(*sorted(ans))
if __name__ == "__main__":
main()
|
[
"k.hiro1818@gmail.com"
] |
k.hiro1818@gmail.com
|
5d6d47498a0f1f651904fa28ec48d7fc3776e771
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_3/fnkgar002/question1.py
|
4131eddc1868a0edb6d9606e520abe1fb271fe61
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
height = eval(input("Enter the height of the rectangle: \n"))
width = eval(input("Enter the width of the rectangle: \n"))
for i in range(height):
print(width*"*")
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
d5c8b24693ea9229cea030cafd7ed4a4cd3d7633
|
5ac726f23d0490d3d43819578cca590b62d0ff02
|
/wise_intern/Tracker/views.py
|
5b6bec7f3785285577f5079f1593baeedcb6eba6
|
[] |
no_license
|
youssriaboelseod/Software-HumanResources
|
52ab324bf43479d8bea20690f71690615c68ef25
|
821fdf195915af3f6b6ec16ef2fb6d9f70d986f7
|
refs/heads/master
| 2022-12-28T01:29:52.310176
| 2020-10-10T05:05:22
| 2020-10-10T05:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,707
|
py
|
from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Tracker
from django.contrib.auth.decorators import login_required
from django.views.generic import CreateView, DetailView, UpdateView, DeleteView
from Job_openings.models import Jobb
from Candidate.models import Candidate
from django.db.models import Q
# Create your views here.
@login_required
def tracker(request):
if request.user.is_superuser:
tracker = Tracker.objects.all().order_by('-date_posted')
else:
tracker = Tracker.objects.filter(user=request.user).order_by('-date_posted')
search_term = ''
if 'search' in request.GET:
search_term = request.GET['search']
tracker = tracker.filter(
Q(company_applied__company_name__icontains=search_term) |
Q(candidate_name__candidate_name__icontains=search_term) |
Q(position_applied__position__icontains=search_term) |
Q(phone__icontains=search_term)
)
context = {
'tracker': tracker, 'search_term': search_term, 'tracker_page': 'active',
}
return render(request, 'Tracker/tracker.html', context)
class TrackerCreateView(LoginRequiredMixin, CreateView):
model = Tracker
fields = ['current_CTC', 'expected_CTC', 'vendor', 'notice_period', 'email','user','phone', 'company_applied', 'position_applied', 'candidate_status', 'relevant_experience', 'total_experience', 'candidate_name']
def get_initial(self):
candidate_id = self.request.GET.get('candidate_id')
if candidate_id:
try:
candidate = Candidate.objects.get(id=candidate_id)
except Candidate.DoesNotExist:
return super().get_initial()
return {'candidate_name': candidate,
'phone': candidate.phone,
'email': candidate.email,
'user':self.request.user,
}
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class TrackerDetailView(LoginRequiredMixin, DetailView):
model = Tracker
class TrackerUpdateView(LoginRequiredMixin, UpdateView):
model = Tracker
fields = ['current_CTC', 'expected_CTC', 'notice_period', 'vendor', 'company_applied', 'phone','user', 'email', 'position_applied', 'candidate_status', 'relevant_experience', 'total_experience', 'candidate_name']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class TrackerDeleteView(LoginRequiredMixin, DeleteView):
model = Tracker
success_url = '/dashboard/tracker/'
|
[
"bhatnagar.aman1998@gmail.com"
] |
bhatnagar.aman1998@gmail.com
|
fdc8c637b2a2360cfe62203c066d247991a0ccea
|
02dde23ab7414142d9df823b873b5b2c87a691b9
|
/cinemago_app/app/__init__.py
|
3ac0b56ad4140b6fe00e3b6625258701d241f878
|
[] |
no_license
|
andrei-papou/cinemago
|
a4094b80a18dcec0072a1a20a5b8aa51c6da522f
|
580c71f0737de1070a0332d9c154100acbb22303
|
refs/heads/master
| 2021-05-30T04:51:16.196188
| 2016-01-18T19:37:19
| 2016-01-18T19:37:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
from schematics.exceptions import ValidationError
from itsdangerous import BadSignature, SignatureExpired
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.mail import Mail
from flask.ext.restful import Api
from config import config
from .exceptions import (
BadRequest,
Unauthorized,
Forbidden,
NotFound,
ScrapyServerError,
bad_request_handler,
bad_signature_handler,
unauthorized_handler,
signature_expired_handler,
forbidden_handler,
not_found_handler,
validation_error_handler,
scrapy_server_error_handler,
)
db = SQLAlchemy()
mail = Mail()
api = Api()
def create_app(config_mode):
app = Flask(__name__)
app.config.from_object(config[config_mode])
# url import
from . import routes
db.init_app(app)
mail.init_app(app)
api.init_app(app)
# blueprints here
from .seanses import seanses as seanses_blueprint
app.register_blueprint(seanses_blueprint)
from .admin import admin as admin_blueprint
app.register_blueprint(admin_blueprint)
from .scrapy_layer import scrapy_layer as scrapy_layer_blueprint
app.register_blueprint(scrapy_layer_blueprint)
# exception handlers registration
app.errorhandler(BadRequest)(bad_request_handler)
app.errorhandler(Unauthorized)(unauthorized_handler)
app.errorhandler(Forbidden)(forbidden_handler)
app.errorhandler(NotFound)(not_found_handler)
app.errorhandler(ValidationError)(validation_error_handler)
app.errorhandler(BadSignature)(bad_signature_handler)
app.errorhandler(SignatureExpired)(signature_expired_handler)
app.errorhandler(ScrapyServerError)(scrapy_server_error_handler)
return app
|
[
"popow.andrej2009@yandex.ru"
] |
popow.andrej2009@yandex.ru
|
a64f634024c805d7142b15712548433cb9621863
|
e299ad494a144cc6cfebcd45b10ddcc8efab54a9
|
/test/python_api/default-constructor/sb_breakpoint.py
|
2bdc539a001dcdeae2d958de621b5ef705e55df6
|
[
"NCSA"
] |
permissive
|
apple-oss-distributions/lldb
|
3dbd2fea5ce826b2bebec2fe88fadbca771efbdf
|
10de1840defe0dff10b42b9c56971dbc17c1f18c
|
refs/heads/main
| 2023-08-02T21:31:38.525968
| 2014-04-11T21:20:22
| 2021-10-06T05:26:12
| 413,590,587
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
"""
Fuzz tests an object after the default construction to make sure it does not crash lldb.
"""
import sys
import lldb
def fuzz_obj(obj):
obj.GetID()
obj.ClearAllBreakpointSites()
obj.FindLocationByAddress(sys.maxint)
obj.FindLocationIDByAddress(sys.maxint)
obj.FindLocationByID(0)
obj.GetLocationAtIndex(0)
obj.SetEnabled(True)
obj.IsEnabled()
obj.GetHitCount()
obj.SetIgnoreCount(1)
obj.GetIgnoreCount()
obj.SetCondition("i >= 10")
obj.GetCondition()
obj.SetThreadID(0)
obj.GetThreadID()
obj.SetThreadIndex(0)
obj.GetThreadIndex()
obj.SetThreadName("worker thread")
obj.GetThreadName()
obj.SetQueueName("my queue")
obj.GetQueueName()
obj.SetCallback(None, None)
obj.GetNumResolvedLocations()
obj.GetNumLocations()
obj.GetDescription(lldb.SBStream())
for bp_loc in obj:
s = str(bp_loc)
|
[
"91980991+AppleOSSDistributions@users.noreply.github.com"
] |
91980991+AppleOSSDistributions@users.noreply.github.com
|
579959a5d4e3ef7cb1e17f81d9439f8a8a0d30d7
|
e987cd566edc75997f9b02377514d4f3a0dba12c
|
/sys/src/Python/distribTools/__init__.py
|
8efdcba9816036b872c9cde5b2439ecd585cba74
|
[] |
no_license
|
7u83/maxdb-buildtools
|
f942adff2cd55d0a046b6ef3e18f6645b011a26e
|
ce9a56943f6195d6755e983035aa96cbe95e6cb2
|
refs/heads/master
| 2020-05-04T18:23:30.849371
| 2015-02-15T19:25:49
| 2015-02-15T19:25:49
| 30,428,297
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
#
# ========== licence begin LGPL
# Copyright (C) 2002 SAP AG
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ========== licence end
#
__all__ = [
'packBinaryToolsLib',
]
|
[
"7u83@mail.ru"
] |
7u83@mail.ru
|
18be60559ba6c0ac41c4a74f49b594fc1ad3161c
|
b7d1922311613b2dc47bc5c0284ff48bc5294f8c
|
/03-Lists_Basics/Exercises/More_exercises/1-Zeros_to_Back.py
|
bf646b60f3126a824475643c5c4c6ee8c218e616
|
[
"MIT"
] |
permissive
|
eclipse-ib/Software-University-Fundamentals_Module
|
c32bfa5f249f79de622016269a026d1114341e11
|
994ef75c70d1bae8e615dbb789aeffd6e0a42c34
|
refs/heads/main
| 2023-01-30T08:51:26.239640
| 2020-12-13T19:54:18
| 2020-12-13T19:54:18
| 306,145,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
numbers = input().split(", ")
new_numbers = []
for i in numbers:
if int(i) == 0:
pass
else:
new_numbers.append(int(i))
for i in numbers:
if int(i) == 0:
new_numbers.append(0)
print(f"{new_numbers}")
|
[
"65770519+eclipse-ib@users.noreply.github.com"
] |
65770519+eclipse-ib@users.noreply.github.com
|
766c4da836e8e2e56ca4b04d73c586eff2605af9
|
54ab0f79f5d68f4732ca7d205f72ecef99862303
|
/torch/jit/_monkeytype_config.py
|
9957541ff25d17f5d68863e9405b366e5fcaa0e9
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
csarofeen/pytorch
|
a9dd0f8ffa0642d72df2d5e109a8b4d9c2389cbc
|
e8557ec5e064608577f81e51ccfe7c36c917cb0f
|
refs/heads/devel
| 2023-04-30T02:42:13.558738
| 2023-03-14T00:50:01
| 2023-03-14T00:50:01
| 88,071,101
| 35
| 10
|
NOASSERTION
| 2023-06-21T17:37:30
| 2017-04-12T16:02:31
|
C++
|
UTF-8
|
Python
| false
| false
| 7,129
|
py
|
import torch
import inspect
import typing
import pathlib
import sys
from typing import Optional, Iterable, List, Dict
from collections import defaultdict
from types import CodeType
_IS_MONKEYTYPE_INSTALLED = True
try:
import monkeytype # type: ignore[import]
from monkeytype import trace as monkeytype_trace
from monkeytype.db.base import CallTraceThunk, CallTraceStore, CallTraceStoreLogger # type: ignore[import]
from monkeytype.config import _startswith, LIB_PATHS # type: ignore[import]
from monkeytype.tracing import CallTrace, CodeFilter # type: ignore[import]
except ImportError:
_IS_MONKEYTYPE_INSTALLED = False
# Checks whether a class is defind in `torch.*` modules
def is_torch_native_class(cls):
if not hasattr(cls, '__module__'):
return False
parent_modules = cls.__module__.split('.')
if not parent_modules:
return False
root_module = sys.modules.get(parent_modules[0])
return root_module is torch
def get_type(type):
"""
Helper function which converts the given type to a torchScript acceptable format.
"""
if isinstance(type, str):
return type
elif inspect.getmodule(type) == typing:
# If the type is a type imported from typing
# like Tuple, List, Dict then replace `typing.`
# with a null string. This needs to be done since
# typing.List is not accepted by TorchScript.
type_to_string = str(type)
return type_to_string.replace(type.__module__ + '.', '')
elif is_torch_native_class(type):
# If the type is a subtype of torch module, then TorchScript expects a fully qualified name
# for the type which is obtained by combining the module name and type name.
return type.__module__ + '.' + type.__name__
else:
# For all other types use the name for the type.
return type.__name__
def get_optional_of_element_type(types):
"""
Helper function to extracts the type of the element to be annotated to Optional
from the list of consolidated types and returns `Optional[element type]`.
TODO: To remove this check once Union support lands.
"""
elem_type = types[1] if type(None) == types[0] else types[0]
elem_type = get_type(elem_type)
# Optional type is internally converted to Union[type, NoneType], which
# is not supported yet in TorchScript. Hence, representing the optional type as string.
return 'Optional[' + elem_type + ']'
def get_qualified_name(func):
return func.__qualname__
if _IS_MONKEYTYPE_INSTALLED:
class JitTypeTraceStoreLogger(CallTraceStoreLogger):
"""A JitTypeCallTraceLogger that stores logged traces in a CallTraceStore."""
def __init__(self, store: CallTraceStore):
super().__init__(store)
def log(self, trace: CallTrace) -> None:
self.traces.append(trace)
class JitTypeTraceStore(CallTraceStore):
def __init__(self):
super().__init__()
# A dictionary keeping all collected CallTrace
# key is fully qualified name of called function
# value is list of all CallTrace
self.trace_records: Dict[str, list] = defaultdict(list)
def add(self, traces: Iterable[CallTrace]):
for t in traces:
qualified_name = get_qualified_name(t.func)
self.trace_records[qualified_name].append(t)
def filter(
self,
qualified_name: str,
qualname_prefix: Optional[str] = None,
limit: int = 2000
) -> List[CallTraceThunk]:
return self.trace_records[qualified_name]
def analyze(self, qualified_name: str) -> Dict:
# Analyze the types for the given module
# and create a dictionary of all the types
# for arguments.
records = self.trace_records[qualified_name]
all_args = defaultdict(set)
for record in records:
for arg, arg_type in record.arg_types.items():
all_args[arg].add(arg_type)
return all_args
def consolidate_types(self, qualified_name: str) -> Dict:
all_args = self.analyze(qualified_name)
# If there are more types for an argument,
# then consolidate the type to `Any` and replace the entry
# by type `Any`.
for arg, types in all_args.items():
types = list(types)
type_length = len(types)
if type_length == 2 and type(None) in types:
# TODO: To remove this check once Union suppport in TorchScript lands.
all_args[arg] = get_optional_of_element_type(types)
elif type_length > 1:
all_args[arg] = 'Any'
elif type_length == 1:
all_args[arg] = get_type(types[0])
return all_args
def get_args_types(self, qualified_name: str) -> Dict:
return self.consolidate_types(qualified_name)
class JitTypeTraceConfig(monkeytype.config.Config):
def __init__(self, s: JitTypeTraceStore):
super().__init__()
self.s = s
def trace_logger(self) -> JitTypeTraceStoreLogger:
"""
Returns a JitCallTraceStoreLogger that logs to the configured
trace store.
"""
return JitTypeTraceStoreLogger(self.trace_store())
def trace_store(self) -> CallTraceStore:
return self.s
def code_filter(self) -> Optional[CodeFilter]:
return jit_code_filter
else:
# When MonkeyType is not installed, we provide dummy class definitions
# for the below classes.
class JitTypeTraceStoreLogger: # type: ignore[no-redef]
def __init__(self):
pass
class JitTypeTraceStore: # type: ignore[no-redef]
def __init__(self):
self.trace_records = None
class JitTypeTraceConfig: # type: ignore[no-redef]
def __init__(self):
pass
monkeytype_trace = None # noqa: F811
def jit_code_filter(code: CodeType) -> bool:
"""
Custom CodeFilter for Torchscript to trace forward calls.
The custom CodeFilter is required while scripting a FX Traced forward calls.
FX Traced forward calls have `code.co_filename` start with '<' which is used
to exclude tracing of stdlib and site-packages in the default code filter.
Since we need all forward calls to be traced, this custom code filter
checks for code.co_name to be 'forward' and enables tracing for all such calls.
The code filter is similar to default code filter for monkeytype and
excludes tracing of stdlib and site-packages.
"""
# Filter code without a source file and exclude this check for 'forward' calls.
if code.co_name != 'forward' and (not code.co_filename or code.co_filename[0] == '<'):
return False
filename = pathlib.Path(code.co_filename).resolve()
return not any(_startswith(filename, lib_path) for lib_path in LIB_PATHS)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
c72d6c6b6adc5ad4348b71de4fe532736a5db64c
|
26ca1e0906feece27896bd267a1f58882fcb0513
|
/lessons/12.12.2019/zipfunction-demo.py
|
253272c92304aed5021a4a824beef37c50137168
|
[] |
no_license
|
icecore2/python-training2019
|
092984c6dec1b05e70f9f899ee213d126c45ff63
|
ee39f93adabab506c9eef68c5e686ddb59953de9
|
refs/heads/master
| 2020-09-02T21:19:27.959213
| 2020-04-23T20:06:08
| 2020-04-23T20:06:08
| 219,306,742
| 0
| 2
| null | 2020-01-17T15:07:06
| 2019-11-03T13:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 181
|
py
|
names = ["Carpetta", "Shalva", "Arona"]
prices = [990,1990,80]
data = zip(names, prices)
print(data)
print(type(data))
print("------------------")
for ob in data:
print(ob)
|
[
"admin@example.com"
] |
admin@example.com
|
6b1f643b86225f77adeb9ea7b55566123779f3d2
|
a140fe192fd643ce556fa34bf2f84ddbdb97f091
|
/.history/파일입출력02_20200705144347.py
|
8558bc1c70efc04967999b44f14cca489e0fd51c
|
[] |
no_license
|
sangha0719/py-practice
|
826f13cb422ef43992a69f822b9f04c2cb6d4815
|
6d71ce64bf91cc3bccee81378577d84ba9d9c121
|
refs/heads/master
| 2023-03-13T04:40:55.883279
| 2021-02-25T12:02:04
| 2021-02-25T12:02:04
| 342,230,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
# score_file = open("score.txt", "r", encoding="utf8")
# while True:
# line = score_file.readline()
# if not line:
# break
# print(line, end="")
# score_file.close()
score_file = open("score.txt", "r", encoding="utf8")
d="")
score_file.close()
|
[
"sangha0719@gmail.com"
] |
sangha0719@gmail.com
|
6d7b55b2264e8161c0ba4c4e3120b5ec27d882dd
|
36bab4f5fd13efadd53e8a9eb5060945c36cf3fd
|
/src/utils/model_info.py
|
c25771bbfdf7d5c768b878e4a6b891aa5a55897f
|
[] |
no_license
|
CheungBH/TimeSequenceProcess
|
a19530133a84518a472fd9693f2d13287eef632a
|
8690cbc6c371bccc37c2e1c1ecd58cd5e69018b4
|
refs/heads/master
| 2022-07-16T10:52:13.885456
| 2020-08-07T13:37:58
| 2020-08-07T13:37:58
| 242,521,733
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,503
|
py
|
# Code from https://github.com/simochen/model-tools.
import numpy as np
import time
import torch
import torchvision
from torch.autograd import Variable
from config.config import device
def print_model_param_nums(model, multiply_adds=True):
total = sum([param.nelement() for param in model.parameters()])
return total
def print_model_param_flops(model=None, input_height=224, input_width=224, multiply_adds=True):
prods = {}
def save_hook(name):
def hook_per(self, input, output):
prods[name] = np.prod(input[0].shape)
return hook_per
list_1=[]
def simple_hook(self, input, output):
list_1.append(np.prod(input[0].shape))
list_2={}
def simple_hook2(self, input, output):
list_2['names'] = np.prod(input[0].shape)
list_conv=[]
def conv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = (kernel_ops * (2 if multiply_adds else 1) + bias_ops) * output_channels * output_height * output_width * batch_size
list_conv.append(flops)
list_linear=[]
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn=[]
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu=[]
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling=[]
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = 0
flops = (kernel_ops + bias_ops) * output_channels * output_height * output_width * batch_size
list_pooling.append(flops)
list_upsample=[]
# For bilinear upsample
def upsample_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
flops = output_height * output_width * output_channels * batch_size * 12
list_upsample.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d):
net.register_forward_hook(conv_hook)
if isinstance(net, torch.nn.Linear):
net.register_forward_hook(linear_hook)
if isinstance(net, torch.nn.BatchNorm2d):
net.register_forward_hook(bn_hook)
if isinstance(net, torch.nn.ReLU):
net.register_forward_hook(relu_hook)
if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
net.register_forward_hook(pooling_hook)
if isinstance(net, torch.nn.Upsample):
net.register_forward_hook(upsample_hook)
return
for c in childrens:
foo(c)
if model == None:
model = torchvision.models.alexnet()
foo(model)
if device != "cpu":
input = Variable(torch.rand(3, 3, input_width, input_height).cuda(), requires_grad = True)
else:
input = Variable(torch.rand(3, 3, input_width, input_height), requires_grad = True)
out = model(input)
total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling) + sum(list_upsample))
# print(' + Number of FLOPs: %.5fG' % (total_flops / 3 / 1e9))
return total_flops / 3
def get_inference_time(model, repeat=10, height=416, width=416):
model.eval()
start = time.time()
with torch.no_grad():
inp = torch.randn(1, 3, height, width)
if device != "cpu":
inp = inp.cuda()
for i in range(repeat):
output = model(inp)
avg_infer_time = (time.time() - start) / repeat
return round(avg_infer_time, 4)
|
[
"534660436@qq.com"
] |
534660436@qq.com
|
53784f03ce7829e4fb56c21ce8b2f078c69eb7af
|
6c21316d93c94766d4dbbe891643ceb0eca8630f
|
/appendix/1/keras/02_save_model_keras.py
|
6aa98c4a8f3e279649dd76087868dba2b8d5ff21
|
[] |
no_license
|
takseki/deeplearning-tensorflow-keras
|
39beef782a2026aaa5c8060f9f3cb955d3db1da2
|
a6efd8df8408ddaac3ed52b1037a736aa70d44ff
|
refs/heads/master
| 2021-05-15T04:05:51.143111
| 2018-01-29T12:08:10
| 2018-01-29T12:08:10
| 119,783,663
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,053
|
py
|
import os
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from sklearn import datasets
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
np.random.seed(123)
'''
モデルファイル用設定
'''
MODEL_DIR = os.path.join(os.path.dirname(__file__), 'model')
if os.path.exists(MODEL_DIR) is False:
os.mkdir(MODEL_DIR)
'''
データの生成
'''
mnist = datasets.fetch_mldata('MNIST original', data_home='.')
n = len(mnist.data)
N = 30000 # MNISTの一部を使う
N_train = 20000
N_validation = 4000
indices = np.random.permutation(range(n))[:N] # ランダムにN枚を選択
X = mnist.data[indices]
X = X / 255.0
X = X - X.mean(axis=1).reshape(len(X), 1)
y = mnist.target[indices]
Y = np.eye(10)[y.astype(int)]
X_train, X_test, Y_train, Y_test = \
train_test_split(X, Y, train_size=N_train)
X_train, X_validation, Y_train, Y_validation = \
train_test_split(X_train, Y_train, test_size=N_validation)
'''
モデル設定
'''
n_in = len(X[0]) # 784
n_hiddens = [200, 200, 200]
n_out = len(Y[0]) # 10
p_keep = 0.5
activation = 'relu'
checkpoint = ModelCheckpoint(
filepath=os.path.join(
MODEL_DIR,
'model_{epoch:02d}_vloss{val_loss:.2f}.hdf5'),
save_best_only=True)
model = Sequential()
for i, input_dim in enumerate(([n_in] + n_hiddens)[:-1]):
model.add(Dense(n_hiddens[i], input_dim=input_dim))
model.add(Activation(activation))
model.add(Dropout(p_keep))
model.add(Dense(n_out))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999),
metrics=['accuracy'])
'''
モデル学習
'''
epochs = 50
batch_size = 200
model.fit(X_train, Y_train, epochs=epochs,
batch_size=batch_size,
validation_data=(X_validation, Y_validation),
callbacks=[checkpoint])
|
[
"me@yusugomori.com"
] |
me@yusugomori.com
|
13860fa1eafedf68adeeb3a5c6820df45f2e07eb
|
578bdcf2720805c1075ba348764983d99031911f
|
/Udacity/Project2/BS_first_and_last.py
|
3f009075ede678361730daef255531c39bb073ec
|
[] |
no_license
|
mrudula-pb/Python_Code
|
994de4720289ded0a55017407d27b1d0f0b08c65
|
0dcdc6589d3c614bd1e6a03aa5c2b55664b9e6b2
|
refs/heads/master
| 2023-03-25T16:52:27.420925
| 2021-03-22T21:40:37
| 2021-03-22T21:40:37
| 350,476,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,751
|
py
|
# Given a sorted array that may have duplicate values, use binary search to find the first and last indexes of a given value.
#
# For example, if you have the array [0, 1, 2, 2, 3, 3, 3, 4, 5, 6] and the given value is 3, the answer will be [4, 6] (because the value 3 occurs first at index 4 and last at index 6 in the array).
#
# The expected complexity of the problem is 𝑂(𝑙𝑜𝑔(𝑛)) .
#
def binary_search(target, source):
if len(source) == 0:
return print("Array size is 0")
arr_length = len(source)
center = (arr_length - 1) // 2
if source[center] == target:
return center
elif source[center] < target:
return binary_search(target, source[center+1:])
else:
return binary_search(target, source[:center])
def find_first_index(arr, number):
index = binary_search(number, arr)
firstIndex = index
#if not index:
#return print("Element ", index ,"not present in arr")
while arr[index] == target:
if index == 0:
return index
elif arr[index - 1] == target:
index -= 1
firstIndex = index
else:
return firstIndex
def find_last_index(arr, number):
index = binary_search(number, arr)
lastIndex = index
if not index:
return print("Element ", index, "not present in arr")
while arr[index] == target:
if index == 0:
return index
elif arr[index + 1] == target:
index += 1
lastIndex = index
else:
return lastIndex
target = 2
arr = [0, 1, 2, 2, 3, 3, 3, 4, 5, 6]
print("First index of ", target, ":", find_first_index(arr, target))
print("Last index of ", target, ":", find_last_index(arr, target))
|
[
"mrudulapolavarapu@gmail.com"
] |
mrudulapolavarapu@gmail.com
|
9b577efe3af43ad442b2ecbcdc2087ad0a041ca3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02713/s488912314.py
|
5f097787063f1a8f1bf781024c3dc61745246b46
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
import math
k=int(input())
n=0
for a in range(1,k+1):
for b in range(1,k+1):
d = math.gcd(a,b)
for c in range(1,k+1):
n+=math.gcd(d,c)
print(n)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2d948670ff31eb0aacdf2b5fada3761978d73a49
|
02fc83e331257882b7701d347fe30bbc934fbc1e
|
/tk.py
|
fac1768ec8e270cdbf54c7e0185ff605fc4baaec
|
[
"Unlicense"
] |
permissive
|
tankle/GoodTranslate
|
991dc71190a1e909f48a91799ab5b141b46de8d8
|
5fec6791e6492cb1c181e4f866e1e0fb45e7ab29
|
refs/heads/master
| 2021-01-18T20:32:52.755140
| 2016-05-12T03:04:16
| 2016-05-12T03:04:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,205
|
py
|
import re
import sys
import ctypes
try:
import urllib2 as request
except:
from urllib import request
def get_d1():
req = request.Request(url='http://translate.google.com/', headers={'User-Agent': 'Mozilla/5.0'})
t = request.urlopen(req).read().decode('utf8')
a, b, h = re.search(r"TKK=eval\(\'\(\(function\(\){var a\\x3d(\-?\d+);var b\\x3d(\-?\d+);return (\d+)", t).groups()
return int(h), ctypes.c_int32(int(a) + int(b)).value
b, d1 = get_d1()
def RL(a, b):
for c in range(0, len(b)-2, 3):
d = b[c+2]
d = ord(d) - 87 if d >= 'a' else int(d)
xa = ctypes.c_uint32(a).value
d = xa >> d if b[c+1] == '+' else xa << d
a = a + d & 4294967295 if b[c] == '+' else a ^ d
return ctypes.c_int32(a).value
def calc_tk(a):
if sys.version_info >= (3,):
d = a.encode('utf-8')
else:
d = map(ord, a)
a = b
for di in d:
a = RL(a + di, "+-a^+6")
a = RL(a, "+-3^+b+-f")
a = ctypes.c_int32(a ^ d1).value
a = a if a >= 0 else ((a & 2147483647) + 2147483648)
a %= pow(10, 6)
return '%d.%d' % (a, a ^ b)
if __name__ == '__main__':
text = ' '.join(sys.argv[1:])
print(calc_tk(text))
|
[
"scturtle@gmail.com"
] |
scturtle@gmail.com
|
7c97a5eae902c35c8233bee53f36ce711c8da55f
|
ccb4cb8358fb896a88bbf0c6771462d898d7a492
|
/examples/venus_evening_chart.py
|
bc89920d2db7aab3a46d0afccdcec128b4e5fe3d
|
[
"MIT"
] |
permissive
|
skyfielders/python-skyfield
|
a30d34a680dcd285bc8cd39cedc2629f792d5821
|
61fb6324e312715e20aa75ec24dc87286442be1a
|
refs/heads/master
| 2023-08-31T13:10:32.863587
| 2023-08-10T14:25:56
| 2023-08-10T14:25:56
| 7,924,113
| 1,040
| 204
|
MIT
| 2023-08-28T19:44:50
| 2013-01-30T21:19:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,584
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from skyfield import almanac
from skyfield.api import load, wgs84
from skyfield.magnitudelib import planetary_magnitude
MONTH_NAMES = '0 Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
# Figure out the times of sunset over our range of dates.
eph = load('de421.bsp')
earth, sun, venus = eph['earth'], eph['sun'], eph['venus']
observer = wgs84.latlon(+40.0, 0.0)
ts = load.timescale()
start, end = ts.utc(2021, 3, 7), ts.utc(2022, 2, 7)
f = almanac.sunrise_sunset(eph, observer)
t, y = almanac.find_discrete(start, end, f)
sunsets = (y == 0)
t = t[sunsets]
# For each moment of sunset, ask Skyfield for the month number, the day
# number, and for Venus’s altitude, azimuth, and magnitude.
year, month, day, hour, minute, second = t.utc
month = month.astype(int)
day = day.astype(int)
apparent = (earth + observer).at(t).observe(venus).apparent()
alt, az, distance = apparent.altaz()
x, y = az.degrees, alt.degrees
m = planetary_magnitude(apparent)
# Convert magnitude to marker size, remembering that smaller magnitude
# numbers mean a brighter Venus (and thus a larger marker).
maxmag = max(m)
minmag = min(m)
size = 40 - 30 * (m - minmag) / (maxmag - minmag)
# Start with a smooth curve tracing Venus's motion.
fig, ax = plt.subplots(figsize=[9, 3])
ax.plot(x, y, c='#fff6', zorder=1)
# Next, put a circle representing Venus on the 1st of the month and on
# every fifth day after that. (Except for the 30th, which would sit too
# close to the 1st of the following month.)
fives = (day % 5 == 1) & (day < 30)
ax.scatter(x[fives], y[fives], size[fives], 'white',
edgecolor='black', linewidth=0.25, zorder=2)
# Put day and month labels off to the sides of the curve.
offset_x, offset_y = 10, 8
for i in np.flatnonzero(fives):
if i == 0:
continue # We can’t compute dx/dy with no previous point.
# Build a unit vector pointing in the direction Venus is traveling.
day_i = day[i]
xi = x[i]
yi = y[i]
dx = xi - x[i-1]
dy = yi - y[i-1]
length = np.sqrt(dx*dx + dy*dy)
dx /= length
dy /= length
# Offset the text at a right angle to the direction of travel.
side = 'right' if (year[i], month[i]) < (2021, 10) else 'left'
if side == 'left':
xytext = - offset_x*dy, offset_y*dx
else:
xytext = offset_x*dy, - offset_y*dx
# Label the dates 1, 11, and 21.
if day_i in (1, 11, 21):
ax.annotate(day_i, (xi, yi), c='white', ha='center', va='center',
textcoords='offset points', xytext=xytext, size=8)
# On the 15th of each month, put the month name.
if day_i == 16:
name = MONTH_NAMES[month[i]]
ax.annotate(name, (xi, yi), c='white', ha='center', va='center',
textcoords='offset points', xytext=2.2 * np.array(xytext))
# Finally, some decorations.
points = 'N NE E SE S SW W NW'.split()
for i, name in enumerate(points):
xy = 45 * i, 1
ax.annotate(name, xy, c='white', ha='center', size=12, weight='bold')
ax.set(
aspect=1.0,
title='Venus at sunset for 40°N latitude, April 2021 – January 2022',
xlabel='Azimuth (°)',
ylabel='Altitude (°)',
xlim=(195, 300),
ylim=(0, max(y) + 10.0),
xticks=np.arange(210, 300, 15),
)
sky = LinearSegmentedColormap.from_list('sky', ['black', 'blue'])
extent = ax.get_xlim() + ax.get_ylim()
ax.imshow([[0,0], [1,1]], cmap=sky, interpolation='bicubic', extent=extent)
fig.savefig('venus_evening_chart.png')
|
[
"brandon@rhodesmill.org"
] |
brandon@rhodesmill.org
|
67b744eb1a386ef9781575699805fb61d73dec1b
|
6a0a634265957e9dcd26bc80e3304e107fb004d0
|
/venvflask/lib/python3.7/site-packages/eth_account/signers/local.py
|
b3112edece1bdaa7c182b3ff9fb18a6e36a75012
|
[] |
no_license
|
ogutiann/PythonEthereumSmartContracts
|
8bd81aa14eab567d41b5dad74b67aba92a405ebd
|
d870e9fd1c7f68b8493db4c2b2af224f966d8e51
|
refs/heads/master
| 2023-01-04T14:23:12.396898
| 2020-10-29T12:12:46
| 2020-10-29T12:12:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,845
|
py
|
import warnings
from eth_account.signers.base import (
BaseAccount,
)
class LocalAccount(BaseAccount):
r"""
A collection of convenience methods to sign and encrypt, with an embedded private key.
:var bytes key: the 32-byte private key data
.. code-block:: python
>>> my_local_account.address # doctest: +SKIP
"0xF0109fC8DF283027b6285cc889F5aA624EaC1F55"
>>> my_local_account.key # doctest: +SKIP
b"\x01\x23..."
You can also get the private key by casting the account to :class:`bytes`:
.. code-block:: python
>>> bytes(my_local_account) # doctest: +SKIP
b"\\x01\\x23..."
"""
def __init__(self, key, account):
"""
:param eth_keys.PrivateKey key: to prefill in private key execution
:param ~eth_account.account.Account account: the key-unaware management API
"""
self._publicapi = account
self._address = key.public_key.to_checksum_address()
key_raw = key.to_bytes()
self._private_key = key_raw
self._key_obj = key
@property
def address(self):
return self._address
@property
def privateKey(self):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.signers.local.LocalAccount.key`.
This attribute will be removed in v0.5
"""
warnings.warn(
"privateKey is deprecated in favor of key",
category=DeprecationWarning,
)
return self._private_key
@property
def key(self):
"""
Get the private key.
"""
return self._private_key
def encrypt(self, password, kdf=None, iterations=None):
"""
Generate a string with the encrypted key, as in
:meth:`~eth_account.account.Account.encrypt`, but without a private key argument.
"""
return self._publicapi.encrypt(self.key, password, kdf=kdf, iterations=iterations)
def signHash(self, message_hash):
return self._publicapi.signHash(
message_hash,
private_key=self.key,
)
def sign_message(self, signable_message):
"""
Generate a string with the encrypted key, as in
:meth:`~eth_account.account.Account.sign_message`, but without a private key argument.
"""
return self._publicapi.sign_message(signable_message, private_key=self.key)
def signTransaction(self, transaction_dict):
warnings.warn(
"signTransaction is deprecated in favor of sign_transaction",
category=DeprecationWarning,
)
return self.sign_transaction(transaction_dict)
def sign_transaction(self, transaction_dict):
return self._publicapi.sign_transaction(transaction_dict, self.key)
def __bytes__(self):
return self.key
|
[
"sijoythomas@pop-os.localdomain"
] |
sijoythomas@pop-os.localdomain
|
e29dc15662e20df9a68545bb651642a23299991c
|
07504838d12c6328da093dce3726e8ed096cecdb
|
/pylon/resources/datapoints/multiplier_s.py
|
e1f6f1673af46c99867dc958f2dc2a793e6010aa
|
[] |
no_license
|
lcoppa/fiat-lux
|
9caaa7f3105e692a149fdd384ec590676f06bf00
|
7c166bcc08768da67c241078b397570de159e240
|
refs/heads/master
| 2020-04-04T02:47:19.917668
| 2013-10-10T10:22:51
| 2013-10-10T10:22:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,282
|
py
|
"""multiplier_s standard datapoint type, originally defined in resource file
set standard 00:00:00:00:00:00:00:00-0. """
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:14.
import pylon.resources.base
from pylon.resources.standard import standard
class multiplier_s(pylon.resources.base.Scaled):
"""multiplier_s standard datapoint type. Multiplier Value multiplier."""
def __init__(self):
super().__init__(
size=1,
signed=False,
scaling=(0.01, 0),
invalid=2.55,
minimum=0,
maximum=2.54,
scope=0,
key=188
)
self._original_name = 'SNVT_multiplier_s'
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = multiplier_s()
pass
|
[
"lcoppa@rocketmail.com"
] |
lcoppa@rocketmail.com
|
b1bba65c64df29b31b76339751cbbc8806397ddc
|
14567e2f77d2bf697bb18c3c1e3d6744c11f41c8
|
/kfpt/old/ftp.py
|
dacb7eea6ca2d99b1bd4ef22df1c34b411c3282e
|
[] |
no_license
|
yanislong/junnan
|
268e64c288e18456da621d5485e04bf8eb8f5322
|
fc35f32a29a7b6da2a8ea334d0e53a21a81d97f3
|
refs/heads/master
| 2021-01-01T20:08:05.825407
| 2017-09-08T02:24:40
| 2017-09-08T02:24:40
| 98,772,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
from ftplib import FTP
import os
ftp = FTP()
ftp.set_debuglevel(2)
ftp.connect("123.126.34.27","12221")
for i in range(1):
try:
ftp.login("TTuser7017","791241")
print "ok"
except:
print "no"
print ftp.getwelcome()
print "****************come in path"
ftp.cmd("/tmp/")
ftp.retrlines('LIST')
ftp.cwd("")
print "************ show file"
ftp.dir('/tmp/')
print "**********show now dir"
ftp.pwd()
print "*************show filler file"
ftp.nlst
bufsize = 1024
filename ="long1.xlsx"
file_handle = open("/root/long.xlsx","rb")
down_file = open("./down","wb").write
#ftp.storbinary('STOR %s' % os.path.basename(filename),file_handle,bufsize)
ftp.storbinary('STOR /home/main_admin/long.txt',file_handle,bufsize)
ftp.retrbinary("RETR %s" % os.path.basename(filename),down_file,bufsize)
ftp.set_debuglevel(0)
file_handle.close()
ftp.quit
print ">>>>>..end..<<<<<<"
|
[
"335916781@qq.com"
] |
335916781@qq.com
|
30830b98e7dfdae390d1c5750b4945123531013a
|
c9b1e04ba65ba3e0af2a8ae86b88187b72bcaa0b
|
/.svn/pristine/30/30830b98e7dfdae390d1c5750b4945123531013a.svn-base
|
434e78a9bc49b211fa0f15acea01356572d0765a
|
[] |
no_license
|
feitianyiren/TaskCoach
|
7762a89d5b521cfba0827323a9e8a91d1579810b
|
0b7427562074845ac771e59e24a750aa5b432589
|
refs/heads/master
| 2020-04-08T04:56:35.491490
| 2016-01-12T13:29:03
| 2016-01-12T13:29:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,967
|
import patterns, wx
from i18n import _
import domain.date as date
import task
def newTaskMenuText():
# There is a bug in wxWidget/wxPython on the Mac that causes the
# INSERT accelerator to be mapped so some other key sequence ('c' in
# this case) so that whenever that key sequence is typed, this command
# is invoked. Hence, we use a different accelarator on the Mac.
menuText = _('&New task...')
if '__WXMAC__' in wx.PlatformInfo:
menuText += u'\tCtrl+N'
else:
menuText += u'\tCtrl+INS'
return menuText
def newSubTaskMenuText():
# See comments in newTaskMenuText() above
menuText = _('New &subtask...')
if '__WXMAC__' in wx.PlatformInfo:
menuText += u'\tShift+Ctrl+N'
else:
menuText += u'\tShift+Ctrl+INS'
return menuText
class TaskList(patterns.CompositeSet):
# FIXME: TaskList should be called TaskCollection or TaskSet
newItemMenuText = newTaskMenuText()
newItemHelpText = _('Insert a new task')
editItemMenuText = _('&Edit task...')
editItemHelpText = _('Edit the selected task')
deleteItemMenuText = _('&Delete task\tCtrl+DEL')
deleteItemHelpText = _('Delete the selected task(s)')
newSubItemMenuText = newSubTaskMenuText()
newSubItemHelpText = _('Insert a new subtask into the selected task')
def _nrInterestingTasks(self, isInteresting):
interestingTasks = [task for task in self if isInteresting(task)]
return len(interestingTasks)
def nrCompleted(self):
return self._nrInterestingTasks(task.Task.completed)
def nrOverdue(self):
return self._nrInterestingTasks(task.Task.overdue)
def nrInactive(self):
return self._nrInterestingTasks(task.Task.inactive)
def nrDueToday(self):
return self._nrInterestingTasks(task.Task.dueToday)
def nrBeingTracked(self):
return self._nrInterestingTasks(task.Task.isBeingTracked)
def allCompleted(self):
nrCompleted = self.nrCompleted()
return nrCompleted > 0 and nrCompleted == len(self)
def efforts(self):
result = []
for task in self:
result.extend(task.efforts())
return result
def __allDates(self):
realDates = [aDate for task in self
for aDate in (task.startDate(), task.dueDate(), task.completionDate())
if aDate != date.Date()]
if realDates:
return realDates
else:
return [date.Date()]
def minDate(self):
return min(self.__allDates())
def maxDate(self):
return max(self.__allDates())
def originalLength(self):
''' Provide a way for bypassing the __len__ method of decorators. '''
return len(self)
class SingleTaskList(TaskList):
pass
|
[
"hieronymus_schweiz@yahoo.de"
] |
hieronymus_schweiz@yahoo.de
|
|
1630c7c38774560507877f3e076ad65cc552781d
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2539/60757/284819.py
|
3db560fcc8fdfff1cf8c9c9c7072d6a8c1b3457c
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
arr=eval(input())
so=sorted(arr)
if arr==so:
print(0)
else:
start=0
end=len(arr)
for i in range(len(arr)):
if arr[i]!=so[i]:
start=i
break
for i in range(len(arr)-1,-1,-1):
if arr[i]!=so[i]:
end=i
break
print(end-start+1)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
a0b37fce115cdddf856c63213f99260b151e182f
|
f2e38023f424ea53e270fd93e41e5ec1c8cdb2cf
|
/infra/bots/gen_compile_isolate.py
|
ea55908bea0858d98775a084ec6753f1588e93e2
|
[
"BSD-3-Clause"
] |
permissive
|
skui-org/skia
|
3dc425dba0142390b13dcd91d2a877c604436e1c
|
1698b32e63ddc8a06343e1a2f03c2916a08f519e
|
refs/heads/m85
| 2021-01-22T21:02:53.355312
| 2020-08-16T10:53:34
| 2020-08-16T13:53:35
| 85,350,036
| 23
| 11
|
BSD-3-Clause
| 2020-09-03T09:23:38
| 2017-03-17T19:59:37
|
C++
|
UTF-8
|
Python
| false
| false
| 6,792
|
py
|
#!/usr/bin/env python
#
# Copyright 2019 Google LLC
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import difflib
import os
import re
import subprocess
import sys
# Any files in Git which match these patterns will be included, either directly
# or indirectly via a parent dir.
PATH_PATTERNS = [
r'.*\.c$',
r'.*\.cc$',
r'.*\.cpp$',
r'.*\.gn$',
r'.*\.gni$',
r'.*\.h$',
r'.*\.mm$',
r'.*\.storyboard$',
]
# These paths are always added to the inclusion list. Note that they may not
# appear in the isolate if they are included indirectly via a parent dir.
EXPLICIT_PATHS = [
'../.gclient',
'.clang-format',
'.clang-tidy',
'bin/fetch-clang-format',
'bin/fetch-gn',
'buildtools',
'infra/bots/assets/android_ndk_darwin/VERSION',
'infra/bots/assets/android_ndk_linux/VERSION',
'infra/bots/assets/android_ndk_windows/VERSION',
'infra/bots/assets/cast_toolchain/VERSION',
'infra/bots/assets/clang_linux/VERSION',
'infra/bots/assets/clang_win/VERSION',
'infra/canvaskit',
'infra/pathkit',
'resources',
'third_party/externals',
]
# If a parent path contains more than this many immediate child paths (ie. files
# and dirs which are directly inside it as opposed to indirect descendants), we
# will include the parent in the isolate file instead of the children. This
# results in a simpler isolate file which should need to be changed less often.
COMBINE_PATHS_THRESHOLD = 3
# Template for the isolate file content.
ISOLATE_TMPL = '''{
'includes': [
'run_recipe.isolate',
],
'variables': {
'files': [
%s
],
},
}
'''
# Absolute path to the infra/bots dir.
INFRABOTS_DIR = os.path.realpath(os.path.dirname(os.path.abspath(__file__)))
# Absolute path to the compile.isolate file.
ISOLATE_FILE = os.path.join(INFRABOTS_DIR, 'compile.isolate')
def all_paths():
"""Return all paths which are checked in to git."""
repo_root = os.path.abspath(os.path.join(INFRABOTS_DIR, os.pardir, os.pardir))
output = subprocess.check_output(['git', 'ls-files'], cwd=repo_root).rstrip()
return output.splitlines()
def get_relevant_paths():
"""Return all checked-in paths in PATH_PATTERNS or EXPLICIT_PATHS."""
paths = []
for f in all_paths():
for regexp in PATH_PATTERNS:
if re.match(regexp, f):
paths.append(f)
break
paths.extend(EXPLICIT_PATHS)
return paths
class Tree(object):
"""Tree helps with deduplicating and collapsing paths."""
class Node(object):
"""Node represents an individual node in a Tree."""
def __init__(self, name):
self._children = {}
self._name = name
self._is_leaf = False
@property
def is_root(self):
"""Return True iff this is the root node."""
return self._name is None
def add(self, entry):
"""Add the given entry (given as a list of strings) to the Node."""
# Remove the first element if we're not the root node.
if not self.is_root:
if entry[0] != self._name:
raise ValueError('Cannot add a non-matching entry to a Node!')
entry = entry[1:]
# If the entry is now empty, this node is a leaf.
if not entry:
self._is_leaf = True
return
# Add a child node.
if not self._is_leaf:
child = self._children.get(entry[0])
if not child:
child = Tree.Node(entry[0])
self._children[entry[0]] = child
child.add(entry)
# If we have more than COMBINE_PATHS_THRESHOLD immediate children,
# combine them into this node.
immediate_children = 0
for child in self._children.itervalues():
if child._is_leaf:
immediate_children += 1
if not self.is_root and immediate_children >= COMBINE_PATHS_THRESHOLD:
self._is_leaf = True
self._children = {}
def entries(self):
"""Return the entries represented by this node and its children.
Will not return children in the following cases:
- This Node is a leaf, ie. it represents an entry which was explicitly
inserted into the Tree, as opposed to only part of a path to other
entries.
- This Node has immediate children exceeding COMBINE_PATHS_THRESHOLD and
thus has been upgraded to a leaf node.
"""
if self._is_leaf:
return [self._name]
rv = []
for child in self._children.itervalues():
for entry in child.entries():
if not self.is_root:
entry = self._name + '/' + entry
rv.append(entry)
return rv
def __init__(self):
self._root = Tree.Node(None)
def add(self, entry):
"""Add the given entry to the tree."""
split = entry.split('/')
if split[-1] == '':
split = split[:-1]
self._root.add(split)
def entries(self):
"""Return the list of entries in the tree.
Entries will be de-duplicated as follows:
- Any entry which is a sub-path of another entry will not be returned.
- Any entry which was not explicitly inserted but has children exceeding
the COMBINE_PATHS_THRESHOLD will be returned while its children will not
be returned.
"""
return self._root.entries()
def relpath(repo_path):
"""Return a relative path to the given path within the repo.
The path is relative to the infra/bots dir, where the compile.isolate file
lives.
"""
repo_path = '../../' + repo_path
repo_path = repo_path.replace('../../infra/', '../')
repo_path = repo_path.replace('../bots/', '')
return repo_path
def get_isolate_content(paths):
"""Construct the new content of the isolate file based on the given paths."""
lines = [' \'%s\',' % relpath(p) for p in paths]
lines.sort()
return ISOLATE_TMPL % '\n'.join(lines)
def main():
"""Regenerate the compile.isolate file, or verify that it hasn't changed."""
testing = False
if len(sys.argv) == 2 and sys.argv[1] == 'test':
testing = True
elif len(sys.argv) != 1:
print >> sys.stderr, 'Usage: %s [test]' % sys.argv[0]
sys.exit(1)
tree = Tree()
for p in get_relevant_paths():
tree.add(p)
content = get_isolate_content(tree.entries())
if testing:
with open(ISOLATE_FILE, 'rb') as f:
expect_content = f.read()
if content != expect_content:
print >> sys.stderr, 'Found diff in %s:' % ISOLATE_FILE
a = expect_content.splitlines()
b = content.splitlines()
diff = difflib.context_diff(a, b, lineterm='')
for line in diff:
sys.stderr.write(line + '\n')
print >> sys.stderr, 'You may need to run:\n\n\tpython %s' % sys.argv[0]
sys.exit(1)
else:
with open(ISOLATE_FILE, 'wb') as f:
f.write(content)
if __name__ == '__main__':
main()
|
[
"skia-commit-bot@chromium.org"
] |
skia-commit-bot@chromium.org
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.