blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
07fadb59835412186c216ed94671aaf72d7c9b8e | e8b291e05b200832b2f3e1a94ab66c225883cc98 | /authentication/serializers.py | f791f7b68bca3c452d3326955e4c4c4f61d04c45 | [] | no_license | Ramesh7128/emailwatch | ffaa22cd712dede2b68b10c3cfd58675a2c9e379 | bf150d1587c7874019f753da5d19dfd2636fb540 | refs/heads/master | 2023-01-09T10:23:15.354834 | 2019-04-24T11:40:40 | 2019-04-24T11:40:40 | 180,546,082 | 0 | 0 | null | 2023-01-04T15:08:03 | 2019-04-10T09:16:03 | JavaScript | UTF-8 | Python | false | false | 3,505 | py | from rest_framework import serializers
from authentication.models import User
from django.contrib.auth import authenticate
class SocialRegisterationLoginSerializer(serializers.Serializer):
"""
Seraializer for reqisteration/login a new user.
"""
access_token = serializers.CharField(
allow_blank=False,
trim_whitespace=True,
write_only=True
)
refresh_token = serializers.CharField(
allow_blank=False,
trim_whitespace=True,
write_only=True
)
email = serializers.CharField()
token = serializers.CharField(allow_blank=True, read_only=True)
username = serializers.CharField(allow_blank=True, read_only=True)
def create(self, validated_data):
# validate the token sent with the email sent.
print(validated_data, 'inside serializer')
return User.objects.creat_social_user(**validated_data)
class RegisterationSerializer(serializers.ModelSerializer):
"""
Serializer for registeration request and create a new user.
"""
password = serializers.CharField(
max_length=200,
min_length=8,
write_only=True
)
token = serializers.CharField(max_length=200, read_only=True)
class Meta:
model = User
fields = ['email', 'username', 'password', 'token']
def create(self, validated_data):
# use the create user method we wrote earlier to create a new_user.
return User.objects.create_user(**validated_data)
class LoginSerializer(serializers.Serializer):
email = serializers.CharField(max_length=255)
username = serializers.CharField(max_length=255, read_only=True)
password = serializers.CharField(max_length=128, write_only=True)
token = serializers.CharField(max_length=255, read_only=True)
def validate(self, data):
# The 'validate' method is where we make sure that the user.
# the validate method is where we make sure that the user is a valid user.
email = data.get('email', None)
password = data.get('password', None)
if email is None:
raise serializers.ValidationError(
'Email field is required to log in')
if password is None:
raise serializers.ValidationError(
'password field is required to log in')
user = authenticate(username=email, password=password)
if user is None:
raise serializers.ValidationError(
'User credentials not matching')
if not user.is_active:
raise serializers.ValidationError('User has been deactivated')
return {
'email': user.email,
'username': user.username,
'token': user.token
}
class UserSerializer(serializers.ModelSerializer):
"""
Handles serialization and deserialization of user objects.
"""
password = serializers.CharField(
max_length=128, min_length=8, write_only=True)
class Meta:
model = User
fields = ['username', 'email', 'password']
# read_only_fields = ('token',)
def update(self, instance, validated_data):
"""
performs an update on User instance.
"""
password = validated_data.pop('password', None)
for (key, value) in validated_data.items():
setattr(instance, key, value)
if password is not None:
instance.set_password(password)
instance.save()
return instance
| [
"ramesh7128@gmail.com"
] | ramesh7128@gmail.com |
6838f5d83e2e305daf8e7e1d283c3de9d6b2e773 | 69698589b1962608cb0fa6099aeafd8eb263ddc4 | /降维/PCA降维/利用PCA对半导体制造数据降维.py | 955d949e1f9fee3aaf7eb119ffd801bcb70b0f37 | [] | no_license | haibiyu/Machine-Learning-Action | de7e18aeb31c48340566d1ab4550d4864094b409 | 3ff16b6881393c1230f5c06dba632651e5d444e0 | refs/heads/master | 2022-07-01T13:47:15.923282 | 2020-05-11T09:49:44 | 2020-05-11T09:49:44 | 259,905,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,464 | py | # !/usr/bin/env python
# -*-coding:utf-8 -*-
"""
# File : 利用PCA对半导体制造数据降维.py
# Time :2020/3/14 21:45
# Author :haibiyu
# version :python 3.6
# Description:
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.sans-serif'] = [u'SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False
def load_data_set(file_name,delim='\t'):
"""
获取数据
:param file_name: 文件路径
:param delim: 分隔符
:return: 返回处理后的数据
"""
fr = open(file_name)
string_arr = [line.strip().split(delim) for line in fr.readlines()]
data_arr = [np.array(line).astype(float) for line in string_arr]
return np.mat(data_arr)
def replace_nan_with_mean():
"""
将数据中NaN值替换为平均值
"""
data_mat = load_data_set('./半导体制造数据/secom.data',' ')
numFeat = data_mat.shape[1]
for i in range(numFeat):
# 计算所有非NaN的平均值
mean_val = np.mean(data_mat[np.nonzero(~np.isnan(data_mat[:, i].A))[0], i])
# 将所有NaN值设置为平均值
data_mat[np.nonzero(np.isnan(data_mat[:,i].A))[0], i] = mean_val
return data_mat
def pca(data_mat, variance_ratio=0.99):
"""
利用PCA对数据进行降维,获取降维后的数据和重构后的数据
:param data_mat: 原始数据,m*n的矩阵
:param top_k_feat: 需要降到的维度数
:return:
"""
mean_vals = np.mean(data_mat, axis=0)
mean_removed = (data_mat - mean_vals) # 去均值化
cov_mat = np.cov(mean_removed, rowvar=0) # 计算协方差矩阵 n*n
# 通常用奇异值分解SVD 代替 特征值分解eig
U, S, V = np.linalg.svd(cov_mat) # 获得SVD后的 U(n*n)、S(n*n)、V(n*n),特征值S已降序排列
# 获取保留方差99%的最小维度数top_k_feat
top_k_feat = get_top_k_feat(S, variance_ratio)
print("降维后保留方差{}的最小维度数为:{}".format(variance_ratio,top_k_feat))
plot_top_variance_ratio(S, top_k_feat)
red_vects = U[:, :top_k_feat] # 取前top_k_feat列的特征向量
red_data_mat = mean_removed * red_vects # 将原始数据转换到降维后的空间上
recon_mat = red_data_mat * red_vects.T + mean_vals # 重构原始数据
return red_data_mat, recon_mat
def get_top_k_feat(eig_values,variance_ratio=0.99):
"""
根据variance_ratio确定保留的特征数
:param eig_values: 特征值,从大到小排序
:param variance_ratio: 主成分的方差和所占的最小比例阈值
:return:
"""
sum_S = float(np.sum(eig_values))
curr_S = 0
for i in range(len(eig_values)):
curr_S += float(eig_values[i])
if curr_S / sum_S >= variance_ratio:
return i + 1
def plot_top_variance_ratio(eigvalues,k):
"""
绘制前k个主成分占总方差的百分比
:param eigvalues:特征值
:param k:降维后的维度数目
"""
plt.plot(np.arange(1, k+1), eigvalues[:k] / np.sum(eigvalues) * 100,'o-')
plt.xlabel("主成分数目")
plt.ylabel("方差的百分比")
plt.xlim(0, k)
plt.ylim(0,)
plt.title("前{}个主成分占总方差的百分比".format(k))
plt.show()
if __name__ == '__main__':
# 对数据进行处理
data_mat = replace_nan_with_mean()
# 获取降维后的数据和重构后的数据
red_data_mat, recon_mat = pca(data_mat,0.99)
| [
"haibiyu@163.com"
] | haibiyu@163.com |
45911d2107f8a058ae9e21da641dce20f9ebbfc4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03478/s738324430.py | 8d0f54df40a8cd3400f6e4a4451aea2c0ae3acda | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | def calc_sum_digits(n):
sumdigit = 0
while n > 0:
sumdigit += n % 10
n //= 10
return sumdigit
N, A, B = map(int, input().split())
result = 0
for n in range(1, N+1):
if A <= calc_sum_digits(n) <= B:
result += n
print(result) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6c9ffa106c3d482bebc5f45b5ab810671a452d45 | 70fec09ceb625608d561937955c285c0c39f6d95 | /tomodachi/helpers/logging.py | 6f70f688795efb29e251f11cf8b1df6ce6a5f8f4 | [
"MIT"
] | permissive | kalaspuff/tomodachi | b285e2c73696d14e3c84a479745e00824fba7190 | deca849ec2b4cdc3d27f06e9ce0056fac0146a1a | refs/heads/master | 2023-08-31T00:32:12.042486 | 2023-08-21T13:02:24 | 2023-08-21T13:02:24 | 62,165,703 | 191 | 28 | MIT | 2023-09-11T23:32:51 | 2016-06-28T18:43:51 | Python | UTF-8 | Python | false | false | 1,787 | py | from typing import Any
from tomodachi import context, logging
def log(service: Any, *args: Any, **kwargs: Any) -> None:
name: str = context("service.logger") or ""
level = None
message = None
if len(args) == 1:
message = args[0]
if len(args) == 2:
if type(args[0]) is int:
level = args[0]
elif type(args[0]) is str and str(args[0]).upper() in (
"NOTSET",
"DEBUG",
"INFO",
"WARN",
"WARNING",
"ERROR",
"FATAL",
"CRITICAL",
):
level = getattr(logging, str(args[0]).upper())
else:
name = args[0]
message = args[1]
if len(args) == 3:
name = args[0]
level = int(args[1]) if type(args[1]) is int else getattr(logging, str(args[1]).upper())
message = args[2]
if "level" in kwargs:
level = 0
level_ = kwargs.pop("level", 0)
if type(level_) is int:
level = int(level_)
else:
level = int(getattr(logging, str(level_).upper()))
if "lvl" in kwargs:
level = 0
level_ = kwargs.pop("lvl", 0)
if type(level_) is int:
level = int(level_)
else:
level = int(getattr(logging, str(level_).upper()))
if "name" in kwargs:
name = kwargs.pop("name", None) or ""
if not message and "message" in kwargs:
message = kwargs.pop("message", None)
if not message and "msg" in kwargs:
message = kwargs.pop("msg", None)
if not level:
level = logging.INFO
if not name:
name = context("service.logger")
if not message:
message = ""
logging.getLogger(name or None).log(level, message, **kwargs)
| [
"hello@carloscar.com"
] | hello@carloscar.com |
2d3beeff7f88a9ebdfa69239ba98dea009416491 | 49c174fa2363461bbefd07af08f2d62b2d12b591 | /robots/LoCoBot/locobot_calibration/scripts/artag_camera.py | 84c6517cf003f248e2a81c48df3d658a9cf2e2d7 | [
"MIT"
] | permissive | Improbable-AI/pyrobot | 452f68ca503fb4aff247d6166cff8914471fa9d8 | 326b49057421ae7d5feefdca93b580846aaef730 | refs/heads/master | 2022-07-07T20:58:23.358744 | 2022-04-01T22:02:39 | 2022-04-01T22:02:39 | 221,547,610 | 1 | 2 | MIT | 2022-04-04T20:24:45 | 2019-11-13T20:41:11 | Python | UTF-8 | Python | false | false | 895 | py | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import threading
import rospy
import copy
from ar_track_alvar_msgs.msg import AlvarMarkers
ROSTOPIC_AR_POSE_MARKER = '/ar_pose_marker'
class ARTagCamera(object):
def __init__(self, configs):
self.ar_tag_pose = None
self.ar_tag_lock = threading.RLock()
rospy.Subscriber(
ROSTOPIC_AR_POSE_MARKER,
AlvarMarkers,
self.alvar_callback)
def alvar_callback(self, msg):
self.ar_tag_lock.acquire()
self.ar_tag_pose = msg
self.ar_tag_lock.release()
def get_ar_tag_pose(self):
self.ar_tag_lock.acquire()
ar_tag_pose = copy.deepcopy(self.ar_tag_pose)
self.ar_tag_lock.release()
return ar_tag_pose
| [
"kalyan051993@gmail.com"
] | kalyan051993@gmail.com |
29c085f9787e37d6f79717df659b17a72d8ec18d | 13b5372316dd8a47c7dfe9abf43839f4bc61ba9d | /mysite/settings.py | ec1a2a6092dbf30e5285c63b399ab1e61cf1bc62 | [] | no_license | YaCpotato/Django-RFID-register-API | 6b7db5a07ca0ac182645ac47436aed37006e7ac3 | a36e680ccdf8f80d5d9c21c5ab6d5cae0547c74a | refs/heads/master | 2020-08-05T18:00:56.197117 | 2019-10-07T16:34:04 | 2019-10-07T16:34:04 | 212,646,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,221 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p-$50yl%^e%k4uf^01+2z4l^q2kmud8++8kvohc*n6e82!(_07'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com','localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'logsys',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ja'
TIME_ZONE = 'Asia/Tokyo'
USE_TZ = True
AUTH_USER_MODEL = "logsys.User"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"yasshisshy@gmail.com"
] | yasshisshy@gmail.com |
1703fd234063fecf90463ba8e557062e7b98db89 | a560ad8d5f523b720b47f0be27c2fdba232a3a4b | /src/configuration/config_p.py | f44293b7d749ac53b0ed151d187db84a655dd68f | [
"MIT"
] | permissive | vollov/py-lab | a27cb422e5a4ac44d4364c89e98202207cd2a1d5 | 0a1a3c93c5decaa5246fab981bcc2563cc42c6d0 | refs/heads/master | 2021-06-01T13:25:51.829046 | 2021-01-23T16:39:08 | 2021-01-23T16:39:08 | 33,277,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | import ConfigParser,ast,os
class configp():
def __init__(self):
current_directory = os.path.dirname(os.path.abspath(__file__))
self._config_file_path = os.path.join(current_directory, 'conf.ini')
self.config = ConfigParser.ConfigParser()
self.config.read(self._config_file_path)
def get(self, section,option):
return self.config.get(section, option)
def test():
con = configp()
print con.get('My Section','foodir')
if __name__=='__main__':test() | [
"dike.zhang@gmail.com"
] | dike.zhang@gmail.com |
9db0cafb8a56ca93e6c9a4097abf460afe3f71e3 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/authorization/v20150701/_inputs.py | 3caabfd0cd01b9038f1146d2ea779d3cf6007169 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 3,337 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'PermissionArgs',
'RoleAssignmentPropertiesArgs',
]
@pulumi.input_type
class PermissionArgs:
def __init__(__self__, *,
actions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
not_actions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Role definition permissions.
:param pulumi.Input[Sequence[pulumi.Input[str]]] actions: Allowed actions.
:param pulumi.Input[Sequence[pulumi.Input[str]]] not_actions: Denied actions.
"""
if actions is not None:
pulumi.set(__self__, "actions", actions)
if not_actions is not None:
pulumi.set(__self__, "not_actions", not_actions)
@property
@pulumi.getter
def actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowed actions.
"""
return pulumi.get(self, "actions")
@actions.setter
def actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "actions", value)
@property
@pulumi.getter(name="notActions")
def not_actions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Denied actions.
"""
return pulumi.get(self, "not_actions")
@not_actions.setter
def not_actions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "not_actions", value)
@pulumi.input_type
class RoleAssignmentPropertiesArgs:
def __init__(__self__, *,
principal_id: pulumi.Input[str],
role_definition_id: pulumi.Input[str]):
"""
Role assignment properties.
:param pulumi.Input[str] principal_id: The principal ID assigned to the role. This maps to the ID inside the Active Directory. It can point to a user, service principal, or security group.
:param pulumi.Input[str] role_definition_id: The role definition ID used in the role assignment.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "role_definition_id", role_definition_id)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> pulumi.Input[str]:
"""
The principal ID assigned to the role. This maps to the ID inside the Active Directory. It can point to a user, service principal, or security group.
"""
return pulumi.get(self, "principal_id")
@principal_id.setter
def principal_id(self, value: pulumi.Input[str]):
pulumi.set(self, "principal_id", value)
@property
@pulumi.getter(name="roleDefinitionId")
def role_definition_id(self) -> pulumi.Input[str]:
"""
The role definition ID used in the role assignment.
"""
return pulumi.get(self, "role_definition_id")
@role_definition_id.setter
def role_definition_id(self, value: pulumi.Input[str]):
pulumi.set(self, "role_definition_id", value)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
92248a35db9e68515e4e085e21c1b7010262f4c0 | 7daab7f2e91d62ba0383fa050f3dea1dc9752975 | /iniciante/1066_pares_impares_positivos_e_negativos.py | 8b84148a9a7da0955c6dade7d456691e2be198db | [] | no_license | luandadantas/URI-Python | 97ccdaa3835b2d2fa403f148969ca7e893d3f119 | 2cb67f39725b20e6fcbbeaf27d04c4ba05dba665 | refs/heads/master | 2022-12-04T02:51:14.374361 | 2020-08-14T17:59:58 | 2020-08-14T17:59:58 | 255,736,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | valor1 = int(input())
valor2 = int(input())
valor3 = int(input())
valor4 = int(input())
valor5 = int(input())
positivos = 0
negativos = 0
pares = 0
impares = 0
for valor in [valor1, valor2, valor3, valor4, valor5]:
if valor > 0:
positivos += 1
if valor < 0:
negativos += 1
if valor % 2 == 0:
pares += 1
if valor % 2 == 1:
impares += 1
print("{} valor(es) par(es)".format(pares))
print("{} valor(es) impar(es)".format(impares))
print("{} valor(es) positivo(s)".format(positivos))
print("{} valor(es) negativo(s)".format(negativos)) | [
"ludanttas@gmail.com"
] | ludanttas@gmail.com |
862d8a3ca39f798075f14ad549a35b883b69cf4e | fd1a6a8c27f3f7d91a1fa4a4914181f8ae0fd795 | /易中标js破解(中低)/yibiaoparse.py | 4b09382fa0e618bbb5b3f816d47e55860309f0a7 | [] | no_license | heyanglin/js- | 078fdaa7892dbe8a94d2965e3fd700c205e7a8ee | 8c0b36b2df9c942f0c590c21e6696ab75de2a3a0 | refs/heads/master | 2020-07-05T14:24:56.565195 | 2019-12-19T04:26:51 | 2019-12-19T04:26:51 | 202,672,163 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# author:Administrator
# datetime:2019/6/19 10:57
# software: PyCharm
import execjs
import requests
import json
import os,io
os.environ["EXECJS_RUNTIME"] = "Node"
#生成js运行文件
with open('2.js','r') as f:
jss = f.read()
# print(jss)
fun = execjs.compile(jss)
page= """{"typeNum":0,"limit":20,"start":16,"title":""}"""
# 得到salt,data的值
salt = fun.call('r')
data = fun.call('o',page,salt)
post_data={
'salt':salt,
'data':data,
}
print(post_data)
#开始请求
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
# 'Authorization': 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyTm8iOiJ0ZXN0X3BlcnNvbjIiLCJleHAiOjE1NjI0MDY1NTEsImlhdCI6MTU2MjMyMDE1MX0.AF9mJnJjMOhoIIiXXOXHhrlGuH0T6cgF4EwUan6W49s',
'Origin': 'http://192.168.1.33:8888',
'Referer': 'http://192.168.1.33:8888/',
}
url = 'http://www.ebidwin.cn/ow/bidInfo/bidResultList'
resp = requests.post(url,headers=headers,data=post_data)
# print(resp)
jsons = json.loads(resp.text)
res_salt = jsons['data']['salt']
res_data = jsons['data']['data']
#解密
result = fun.call('a',res_data,res_salt)
# print(requests.utils.unquote(result))
print(result)
| [
"someone@someplace.com"
] | someone@someplace.com |
3a8a6742d44c4a2169d12e211ea01c4f92cca229 | b805f0f0eed9c93ff564a719cb18de438a8572ee | /src/products/admin.py | f9f9c6aaa871a922d2ae3b74863216b0c40574a6 | [
"MIT"
] | permissive | pratikbarjatya/ecommerce | 1a3caae355d4dd06b4044fcce2d3fb48eae76034 | e18ba50c33c4b8f96c57785027f30b396104c47c | refs/heads/master | 2021-01-20T01:43:50.564162 | 2015-10-25T17:31:28 | 2015-10-25T17:31:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | from django.contrib import admin
# Register your models here.
from .models import Product,Variation,ProductImage,Category,ProductFeatured
class ProductImageInline(admin.TabularInline):
model=ProductImage
extra=0
class VariationInline(admin.TabularInline):
model=Variation
extra=0
class ProductAdmin(admin.ModelAdmin):
list_display=['__str__','price']
inlines=[VariationInline,ProductImageInline,]
class Meta:
model=Product
admin.site.register(Product,ProductAdmin)
admin.site.register(Variation)
admin.site.register(ProductImage)
admin.site.register(Category)
admin.site.register(ProductFeatured) | [
"abhijit.bangera@hotmail.com"
] | abhijit.bangera@hotmail.com |
769e7d2055b1f12f280a29e7ebdd5b927ab628f7 | 99ed69aafb483b126f13fb8f0f5b31ad42e9829d | /pictures/urls.py | 19bb0699b4eb77e54dc18cb184126955f3e0af81 | [
"MIT"
] | permissive | Jackson-coder-arch/Instagram-pics | 3aba5da42a7cf8486651f91410364d1eafbcb722 | 9b4332d9f3144c4f655a0bf1313f0c4ef9481c4f | refs/heads/master | 2023-03-28T12:17:24.767827 | 2021-04-03T23:15:18 | 2021-04-03T23:15:18 | 351,892,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | from django.urls import path,re_path
from . import views
urlpatterns = [
path('',views.home, name = 'home'),
path('NewPost/',views.NewPost, name ='NewPost'),
path('profile/',views.profile,name ='profile')
] | [
"jacksonikonya@gmail.com"
] | jacksonikonya@gmail.com |
205155f6e5a48bf727a20d8fc592fec3365e0554 | 3be42b83a15d022f5863c96ec26e21bac0f7c27e | /spinoffs/oryx/oryx/experimental/__init__.py | 2364b4ee11a23f4fefa3f7fd65193a83401b1011 | [
"Apache-2.0"
] | permissive | ogrisel/probability | 846f5c13cddee5cf167b215e651b7479003f15d2 | 8f67456798615f9bf60ced2ce6db5d3dba3515fe | refs/heads/master | 2022-11-09T10:53:23.000918 | 2020-07-01T23:16:03 | 2020-07-01T23:17:25 | 276,580,359 | 2 | 1 | Apache-2.0 | 2020-07-02T07:37:58 | 2020-07-02T07:37:57 | null | UTF-8 | Python | false | false | 852 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint as: python3
"""Module for experimental Oryx libraries."""
from oryx.experimental import mcmc
from oryx.experimental import nn
from oryx.experimental import optimizers
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
4fa176b40ebe223d94a0b8fd292d2a84b55ae1d8 | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/grr/grr/core/grr_response_core/lib/parsers/cron_file_parser_test.py | f2c76e65b62e96e7ef20d6f26c6135a88e905aed | [
"Apache-2.0",
"MIT"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 1,306 | py | #!/usr/bin/env python
"""Tests for grr.parsers.cron_file_parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from absl import app
from grr_response_core.lib.parsers import cron_file_parser
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr.test_lib import test_lib
class TestCronTabParsing(test_lib.GRRBaseTest):
"""Test parsing of cron files."""
def testCronTabParser(self):
"""Ensure we can extract jobs from a crontab file."""
parser = cron_file_parser.CronTabParser()
results = []
path = os.path.join(self.base_path, "parser_test", "crontab")
plist_file = open(path, "rb")
pathspec = rdf_paths.PathSpec.OS(path=path)
results.extend(list(parser.ParseFile(None, pathspec, plist_file)))
self.assertLen(results, 1)
for result in results:
self.assertEqual(result.jobs[0].minute, "1")
self.assertEqual(result.jobs[0].hour, "2")
self.assertEqual(result.jobs[0].dayofmonth, "3")
self.assertEqual(result.jobs[0].month, "4")
self.assertEqual(result.jobs[0].dayofweek, "5")
self.assertEqual(result.jobs[0].command, "/usr/bin/echo \"test\"")
def main(args):
test_lib.main(args)
if __name__ == "__main__":
app.run(main)
| [
"a.songer@protonmail.com"
] | a.songer@protonmail.com |
6eb47e6751955fabbe37ab1d26ad336f8bf3ec86 | 97def1949bca845f40a2fb99fe7496e698f51764 | /examples/doc/samples/comparisons/cutstock/cutstock_lpsolve.py | a0a1c6de6dfd1616b7b64c59d7e7a5bf8f49ec6f | [
"BSD-3-Clause"
] | permissive | flexciton/pyomo | e009e5d300d27d943408a1ee5e0e1770d772a7fe | 817bebc9c10f527263b2b8402fb1c038f1b37cf1 | refs/heads/master | 2023-03-03T08:56:22.922613 | 2022-01-18T15:22:57 | 2022-01-18T15:22:57 | 241,679,253 | 1 | 1 | NOASSERTION | 2022-04-11T16:48:48 | 2020-02-19T17:24:37 | Python | UTF-8 | Python | false | false | 2,072 | py | from lpsolve55 import *
from cutstock_util import*
# Reading in Data using the cutstock_util
cutcount = getCutCount()
patcount = getPatCount()
Cuts = getCuts()
Patterns = getPatterns()
PriceSheet = getPriceSheetData()
SheetsAvail = getSheetsAvail()
CutDemand = getCutDemand()
CutsInPattern = getCutsInPattern()
########################################
varcount = cutcount + patcount + 1 + 1
PatCountStart = 2
# Objective Coeff Array
ObjCoeff = range(varcount)
for i in range(varcount):
if i == 0:
ObjCoeff[i] = PriceSheet
else:
ObjCoeff[i] = 0
#Arrays for constraints
TotCostB = range(varcount)
for i in TotCostB:
TotCostB[i] = 0
TotCostB[0] = -PriceSheet
TotCostB[1] = 1
RawAvailB = range(varcount)
for i in RawAvailB:
RawAvailB[i] = 0
RawAvailB[0] = 1
SheetsB = range(varcount)
for i in SheetsB:
SheetsB[i] = 0
SheetsB[0] = 1
for i in range(patcount):
SheetsB[i+PatCountStart] = -1
CutReqB = [[0 for col in range(varcount)] for row in range(cutcount)]
for i in range(cutcount):
for j in range(patcount):
CutReqB[i][j+PatCountStart] = CutsInPattern[i][j]
CutReqB[i][patcount+PatCountStart+i] = -1
###################################################
lp = lpsolve('make_lp', 0, varcount)
ret = lpsolve('set_lp_name', lp, 'CutStock')
lpsolve('set_verbose', 'CutStock', IMPORTANT)
#Define Objective
ret = lpsolve('set_obj_fn', 'CutStock', ObjCoeff)
#Define Constraints
ret = lpsolve('add_constraint', 'CutStock', TotCostB, EQ, 0)
ret = lpsolve('add_constraint', 'CutStock', RawAvailB, LE, SheetsAvail)
ret = lpsolve('add_constraint', 'CutStock', SheetsB, EQ, 0)
for i in range(cutcount):
ret = lpsolve('add_constraint', 'CutStock', CutReqB[i], EQ, CutDemand[i])
lpsolve('solve', 'CutStock')
#ret = lpsolve('write_lp', 'CutStock', 'cutstock.lp')
lpsolve('solve', 'CutStock')
statuscode = lpsolve('get_status', 'CutStock')
print lpsolve('get_statustext', 'CutStock', statuscode)
print lpsolve('get_objective', 'CutStock')
print lpsolve('get_variables', 'CutStock')[0]
| [
"jsiirola@users.noreply.github.com"
] | jsiirola@users.noreply.github.com |
73f4edde57e72fa7e63e4a92f229752ca1fc8510 | 2ebdbbf06978fd60f47933cfffd37a5a5460ee31 | /Sect-A/source/sect07_class/s722_init_class.py | a5713295190ad89f453cb5131794fdb540b71b11 | [] | no_license | lukejskim/sba19-seoulit | f55dd6279d44a7a235a9fa6c008c7f65045c9d0c | 7652c2b718cb8f7efaeca7c2bdf7a5e699bccbce | refs/heads/master | 2020-07-02T22:21:20.393202 | 2019-09-19T03:03:48 | 2019-09-19T03:03:48 | 201,684,313 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # 클래스 초기화 함수, __init__() 재정의
class MyClass:
def __init__(self, name): # 초기화 함수 재정의
self.name = name
def sayHello(self):
hello = "Hello, " + self.name + "\t It's Good day !"
print(hello)
# 객체 생성, 인스턴스화
# myClass = MyClass()
myClass = MyClass('채영')
myClass.sayHello()
| [
"bluenine52@gmail.com"
] | bluenine52@gmail.com |
0be0ac7c19336cdca2defea65d64f98699597172 | d5cc0c9f8d94e9d020b3e50c0a125d2041dd3baa | /AttendifySite(Flask)/env/lib/python3.6/site-packages/turicreate/data_structures/sarray_builder.py | 001e211365f0220b1925c0e5a6334dd1fb301ca3 | [
"MIT"
] | permissive | arnavgup/Attendify_iOS | be896579de4560cff36a4b163384d0eeabbb7dd9 | c2efc3273a7b99c09d918567718ac87d7f0179d8 | refs/heads/master | 2022-10-31T13:16:11.081902 | 2018-12-09T00:11:42 | 2018-12-09T00:11:42 | 158,432,022 | 3 | 2 | MIT | 2022-10-10T10:53:53 | 2018-11-20T18:10:16 | Swift | UTF-8 | Python | false | false | 4,372 | py | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
'''
An interface for creating an SArray over time.
'''
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from ..cython.cy_sarray_builder import UnitySArrayBuilderProxy
from .sarray import SArray
class SArrayBuilder(object):
"""
An interface to incrementally build an SArray element by element.
Once closed, the SArray cannot be "reopened" using this interface.
Parameters
----------
num_segments : int, optional
Number of segments that can be written in parallel.
history_size : int, optional
The number of elements to be cached as history. Caches the last
`history_size` elements added with `append` or `append_multiple`.
dtype : type, optional
The type the resulting SArray will be. If None, the resulting SArray
will take on the type of the first non-None value it receives.
Returns
-------
out : SArrayBuilder
Examples
--------
>>> from turicreate import SArrayBuilder
>>> sb = SArrayBuilder()
>>> sb.append(1)
>>> sb.append([2,3])
>>> sb.close()
dtype: int
Rows: 3
[1, 2, 3]
"""
def __init__(self, dtype, num_segments=1, history_size=10):
self._builder = UnitySArrayBuilderProxy()
if dtype is None:
dtype = type(None)
self._builder.init(num_segments, history_size, dtype)
self._block_size = 1024
def append(self, data, segment=0):
"""
Append a single element to an SArray.
Throws a RuntimeError if the type of `data` is incompatible with
the type of the SArray.
Parameters
----------
data : any SArray-supported type
A data element to add to the SArray.
segment : int
The segment to write this element. Each segment is numbered
sequentially, starting with 0. Any value in segment 1 will be after
any value in segment 0, and the order of elements in each segment is
preserved as they are added.
"""
self._builder.append(data, segment)
def append_multiple(self, data, segment=0):
"""
Append multiple elements to an SArray.
Throws a RuntimeError if the type of `data` is incompatible with
the type of the SArray.
Parameters
----------
data : any SArray-supported type
A data element to add to the SArray.
segment : int
The segment to write this element. Each segment is numbered
sequentially, starting with 0. Any value in segment 1 will be after
any value in segment 0, and the order of elements in each segment is
preserved as they are added.
"""
if not hasattr(data, '__iter__'):
raise TypeError("append_multiple must be passed an iterable object")
tmp_list = []
for i in data:
tmp_list.append(i)
if len(tmp_list) >= self._block_size:
self._builder.append_multiple(tmp_list, segment)
tmp_list = []
if len(tmp_list) > 0:
self._builder.append_multiple(tmp_list, segment)
def get_type(self):
"""
The type the result SArray will be if `close` is called.
"""
return self._builder.get_type()
def read_history(self, num=10, segment=0):
"""
Outputs the last `num` elements that were appended either by `append` or
`append_multiple`.
Returns
-------
out : list
"""
if num < 0:
num = 0
if segment < 0:
raise TypeError("segment must be >= 0")
return self._builder.read_history(num, segment)
def close(self):
"""
Creates an SArray from all values that were appended to the
SArrayBuilder. No function that appends data may be called after this
is called.
Returns
-------
out : SArray
"""
return SArray(_proxy=self._builder.close())
| [
"gyao@andrew.cmu.edu"
] | gyao@andrew.cmu.edu |
a7906bf87088132f1a08cef90dff1ea329cfa1ed | c849b2d67cb4906d8ba8ea45aa7fce2170bbb46f | /sources/Tosafot Yom Tov/tosafot_yom_tov_on_tahorot.py | c8e933c838128950a497e2bc67e91ec41efe6186 | [] | no_license | BenjaminKozuch/Sefaria-Data | 8d7452ab7efc95f09ca44e658ee8df1ab2ca84a3 | f154d79ed20f907aff8880c684536c22f970a8a5 | refs/heads/master | 2020-05-29T11:04:35.975262 | 2016-04-05T03:00:26 | 2016-04-05T03:00:26 | 54,200,952 | 0 | 0 | null | 2016-03-18T12:53:54 | 2016-03-18T12:53:54 | null | UTF-8 | Python | false | false | 1,840 | py | # -*- coding: utf-8 -*-
import urllib
import urllib2
from urllib2 import URLError, HTTPError
import json
import pdb
import os
import sys
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, p)
os.environ['DJANGO_SETTINGS_MODULE'] = "sefaria.settings"
from local_settings import *
sys.path.insert(0, SEFARIA_PROJECT_PATH)
from sefaria.model import *
def post_index(index):
url = SEFARIA_SERVER+'api/v2/raw/index/'+index["title"].replace(" ", "_")
indexJSON = json.dumps(index)
values = {
'json': indexJSON,
'apikey': API_KEY
}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
try:
response = urllib2.urlopen(req)
print response.read()
except HTTPError, e:
print 'Error code: ', e.code
root = SchemaNode()
root.add_title("Tosafot Yom Tov on Mishnah Tahorot", "en", primary=True)
root.add_title(u"תוספות יום טוב על משנה טהרות", "he", primary=True)
root.key = "tosafot_yom_tov_tahorot"
sections = [("Tahorot", u"טהרות", 1)]
for sec in sections:
if sec[2] == 1:
intro_node = JaggedArrayNode()
intro_node.add_title(sec[0]+", Introduction", "en", primary=True)
intro_node.add_title(sec[1]+u", הקדמה", "he", primary=True)
intro_node.key = 'intro'+sec[0]
intro_node.sectionNames = ["Paragraph"]
intro_node.depth = 1
intro_node.addressTypes = ["Integer"]
root.append(intro_node)
main_node = JaggedArrayNode()
main_node.default = True
main_node.key = "default"
main_node.sectionNames = ["Perek", "Mishnah", "Comment"]
main_node.depth = 3
main_node.addressTypes = ["Integer", "Integer", "Integer"]
root.append(main_node)
root.validate()
index = {
"title": "Tosafot Yom Tov on Mishnah Tahorot",
"categories": ["Commentary2", "Mishnah", "Tosafot Yom Tov"],
"schema": root.serialize()
}
post_index(index)
| [
"skaplan@brandeis.edu"
] | skaplan@brandeis.edu |
3927ef4df50ddce90f329d071647ae915d3e27ae | 78f3fe4a148c86ce9b80411a3433a49ccfdc02dd | /2017/07/millennial-vote-20170731/graphic_config.py | 94b1323068174ac14ed698e7140ffa728e464d7e | [] | no_license | nprapps/graphics-archive | 54cfc4d4d670aca4d71839d70f23a8bf645c692f | fe92cd061730496cb95c9df8fa624505c3b291f8 | refs/heads/master | 2023-03-04T11:35:36.413216 | 2023-02-26T23:26:48 | 2023-02-26T23:26:48 | 22,472,848 | 16 | 7 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '17ZezeFdg7cYvdo04pV89-AcyMkSSsM_V27_4R2ykne4'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
| [
"ahurt@npr.org"
] | ahurt@npr.org |
ea9de5a0f9467ec173e776c67c0e3726fbc4e972 | 725ce8167897de0ffd42b97b7aefff43686b0d33 | /barbados/caches/tablescan.py | cf8c65e77e36b7438733656b6ae9e607decfbee9 | [] | no_license | vinceblake/barbados | a510424f82d77066b9b6fa0e1d4641cbbeb5f138 | 28b6c691e5c8150f51b8ee57a99239232b1417ef | refs/heads/master | 2023-02-15T07:54:47.381960 | 2021-01-13T02:51:49 | 2021-01-13T02:51:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | import json
from barbados.caches import Caches
from barbados.caches.base import CacheBase
from barbados.services.registry import Registry
from barbados.services.cache import Cache
from barbados.serializers import ObjectSerializer
from barbados.models.cocktailmodel import CocktailModel
from barbados.models.inventorymodel import InventoryModel
from barbados.models.ingredientmodel import IngredientModel
from barbados.models.menumodel import MenuModel
from barbados.factories.cocktailfactory import CocktailFactory
from barbados.factories.inventoryfactory import InventoryFactory
from barbados.factories.ingredientfactory import IngredientFactory
from barbados.factories.menufactory import MenuFactory
class TableScanCache(CacheBase):
@property
def cache_key(self):
raise NotImplementedError
@property
def model_class(self):
raise NotImplementedError
@property
def factory_class(self):
raise NotImplementedError
@classmethod
def populate(cls):
"""
Populate the cache with its expected value(s).
:return: None
"""
pgconn = Registry.get_database_connection()
with pgconn.get_session() as session:
cache_objects = []
objects = cls.factory_class.produce_all_objs(session=session)
for result_object in objects:
cache_objects.append(ObjectSerializer.serialize(result_object, 'dict'))
Cache.set(cls.cache_key, json.dumps(cache_objects))
class CocktailScanCache(TableScanCache):
cache_key = 'cocktail_scan_cache'
model_class = CocktailModel
factory_class = CocktailFactory
class IngredientScanCache(TableScanCache):
cache_key = 'ingredient_scan_cache'
model_class = IngredientModel
factory_class = IngredientFactory
class MenuScanCache(TableScanCache):
cache_key = 'menu_scan_cache'
model_class = MenuModel
factory_class = MenuFactory
class InventoryScanCache(TableScanCache):
cache_key = 'inventory_scan_cache'
model_class = InventoryModel
factory_class = InventoryFactory
Caches.register_cache(CocktailScanCache)
Caches.register_cache(IngredientScanCache)
Caches.register_cache(MenuScanCache)
Caches.register_cache(InventoryScanCache)
| [
"grant@grantcohoe.com"
] | grant@grantcohoe.com |
2aef79885420e7847ba448d8f8a082511c15a162 | 4c852fab792606580acb3f3a61b7f86ae25930b0 | /Python/MIT-CompThinking/MITx600.1x/ProblemSets/theof/printing out all available letters.py | dd1d06b519f64693fb90a40e66cecf0d0e339e61 | [] | no_license | hmchen47/Programming | a9767a78a35c0844a1366391f48b205ff1588591 | 9637e586eee5c3c751c96bfc5bc1d098ea5b331c | refs/heads/master | 2022-05-01T01:57:46.573136 | 2021-08-09T04:29:40 | 2021-08-09T04:29:40 | 118,053,509 | 2 | 1 | null | 2021-09-20T19:54:02 | 2018-01-19T00:06:04 | Python | UTF-8 | Python | false | false | 650 | py | #!/usr/bin/env python
# _*_ coding = UTF-8 _*_
def getAvailableLetters(lettersGuessed):
'''
lettersGuessed: list, what letters have been guessed so far
returns: string, comprised of letters that represents what letters have not
yet been guessed.
'''
# FILL IN YOUR CODE HERE...
L2 = []
import string
for c in string.ascii_lowercase:
L2.append(c)
#print L2
def removeDupsBetter(L1,L2):
L1Start = L1[:]
for e in L1Start:
if e in L2:
L2.remove(e)
return ''.join(str(e) for e in L2)
return removeDupsBetter(lettersGuessed,L2) | [
"h.m.chen@ieee.org"
] | h.m.chen@ieee.org |
335a6acaca425ac8950b4095a444908a724f8a8f | 07d73cb816ad1d35c7a96935ed8d4c9fad9e03da | /{{cookiecutter.project_name}}/bin/update | 849d75d4da1ad611ee60f500ae7f193f7a510c82 | [
"MIT",
"Unlicense"
] | permissive | mkell43/template-python | a11eb8294d916567e5d50ff34abe2e5eab943931 | 3efcb0a5837cfe5b53c5a7761732df916ed64bd7 | refs/heads/main | 2023-07-16T02:56:55.203305 | 2021-08-18T18:58:35 | 2021-08-18T18:58:35 | 398,657,388 | 0 | 0 | Unlicense | 2021-08-21T21:13:27 | 2021-08-21T21:13:26 | null | UTF-8 | Python | false | false | 2,051 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import importlib
import tempfile
import shutil
import subprocess
import sys
CWD = os.getcwd()
TMP = tempfile.gettempdir()
CONFIG = {
"full_name": "{{ cookiecutter.full_name }}",
"email": "{{ cookiecutter.email }}",
"github_username": "{{ cookiecutter.github_username }}",
"github_repo": "{{ cookiecutter.github_repo }}",
"default_branch": "{{ cookiecutter.default_branch }}",
"project_name": "{{ cookiecutter.project_name }}",
"package_name": "{{ cookiecutter.package_name }}",
"project_short_description": "{{ cookiecutter.project_short_description }}",
"python_major_version": {{ cookiecutter.python_major_version }},
"python_minor_version": {{ cookiecutter.python_minor_version }},
}
def install(package='cookiecutter'):
try:
importlib.import_module(package)
except ImportError:
print("Installing cookiecutter")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])
def run():
print("Generating project")
from cookiecutter.main import cookiecutter
os.chdir(TMP)
cookiecutter(
'https://github.com/jacebrowning/template-python.git',
no_input=True,
overwrite_if_exists=True,
extra_context=CONFIG,
)
def copy():
for filename in [
'.coveragerc',
'.gitattributes',
'.gitignore',
'.isort.cfg',
'.mypy.ini',
'.pydocstyle.ini',
'.pylint.ini',
'.scrutinizer.yml',
'.verchew.ini',
'CONTRIBUTING.md',
'Makefile',
os.path.join('bin', 'checksum'),
os.path.join('bin', 'open'),
os.path.join('bin', 'update'),
os.path.join('bin', 'verchew'),
'pytest.ini',
'scent.py',
]:
src = os.path.join(TMP, CONFIG['project_name'], filename)
dst = os.path.join(CWD, filename)
print("Updating " + filename)
shutil.copy(src, dst)
if __name__ == '__main__':
install()
run()
copy()
| [
"jacebrowning@gmail.com"
] | jacebrowning@gmail.com | |
065fb3d6006b67322a915670b180ffe5efba5694 | db9463b7271c5a88f473390f393fbc5f00b6f46b | /visbrain/utils/tests/test_others.py | 37a60c9de6af5f5218c065bc0f3957d04540ca77 | [
"BSD-3-Clause"
] | permissive | lassemadsen/visbrain | 6fbee27acaa46741899782a2ba347f6050275411 | be096aa8a7058c329e7120d0bdb45d3c9eb8be42 | refs/heads/master | 2022-11-08T05:00:21.857939 | 2022-10-25T10:33:36 | 2022-10-25T10:33:36 | 191,604,064 | 0 | 0 | NOASSERTION | 2019-06-12T16:05:22 | 2019-06-12T16:05:21 | null | UTF-8 | Python | false | false | 566 | py | """Test functions in others.py."""
from visbrain.utils.others import (get_dsf, set_if_not_none)
class TestOthers(object):
"""Test functions in others.py."""
def test_get_dsf(self):
"""Test function get_dsf."""
assert get_dsf(100, 1000.) == (10, 100.)
assert get_dsf(100, None) == (1, 100.)
def test_set_if_not_none(self):
"""Test function set_if_not_none."""
a = 5.
assert set_if_not_none(a, None) == 5.
assert set_if_not_none(a, 10., False) == 5.
assert set_if_not_none(a, 10.) == 10.
| [
"e.combrisson@gmail.com"
] | e.combrisson@gmail.com |
fa1812af10cf0f984d450fedbc15640cf16be484 | 1f51c4e89a71ea3fcc2cc921613aacc19e078b69 | /14_Introduction to Importing Data in Python-(part-1)/02_Importing data from other file types/13_loading-mat-files.py | 79ee9f57f5e58d78452f6efc7cc604fe998745c0 | [
"MIT"
] | permissive | CodeHemP/CAREER-TRACK-Data-Scientist-with-Python | 871bafbd21c4e754beba31505965572dd8457adc | 13ebb10cf9083343056d5b782957241de1d595f9 | refs/heads/main | 2023-03-26T08:43:37.054410 | 2021-03-22T15:08:12 | 2021-03-22T15:08:12 | 471,015,287 | 1 | 0 | MIT | 2022-03-17T13:52:32 | 2022-03-17T13:52:31 | null | UTF-8 | Python | false | false | 775 | py | '''
13 - Loading .mat file
In this exercise, you'll figure out how to load a MATLAB file using scipy.io.loadmat()
and you'll discover what Python datatype it yields.
The file 'albeck_gene_expression.mat' is in your working directory. This file contains
gene expression data from the Albeck Lab at UC Davis. You can find the data and some
great documentation here.
Instructions:
- Import the package scipy.io.
- Load the file 'albeck_gene_expression.mat' into the variable mat; do so using the
function scipy.io.loadmat().
- Use the function type() to print the datatype of mat to the IPython shell.
'''
# Import package
import scipy.io
# Load MATLAB file: mat
mat = scipy.io.loadmat('albeck_gene_expression.mat')
# Print the datatype type of mat
print(type(mat))
| [
"ifaizymohd@gmail.com"
] | ifaizymohd@gmail.com |
702ae2e137a34f2c9eac6d52110d38a6f60ade83 | 5f2b22d4ffec7fc1a4e40932acac30256f63d812 | /analysis-of-legal-documents/project/process/cnews_loader_withoutSeqLens.py | bdc9b80dfb3188f3ceee15a1a35c7055208b4ff7 | [] | no_license | Thpffcj/Python-Learning | 45734dd31e4d8d047eec5c5d26309bc7449bfd0d | 5dacac6d33fcb7c034ecf5be58d02f506fd1d6ad | refs/heads/master | 2023-08-04T21:02:36.984616 | 2021-09-21T01:30:04 | 2021-09-21T01:30:04 | 111,358,872 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,648 | py | # coding: utf-8
import sys
from collections import Counter
import numpy as np
import tensorflow.contrib.keras as kr
if sys.version_info[0] > 2:
is_py3 = True
else:
reload(sys)
sys.setdefaultencoding("utf-8")
is_py3 = False
def native_word(word, encoding='utf-8'):
"""如果在python2下面使用python3训练的模型,可考虑调用此函数转化一下字符编码"""
if not is_py3:
return word.encode(encoding)
else:
return word
def native_content(content):
if not is_py3:
return content.decode('utf-8')
else:
return content
def open_file(filename, mode='r'):
"""
常用文件操作,可在python2和python3间切换.
mode: 'r' or 'w' for read or write
"""
if is_py3:
return open(filename, mode, encoding='utf-8', errors='ignore')
else:
return open(filename, mode)
def read_file(filename):
"""读取文件数据"""
contents, labels = [], []
with open_file(filename) as f:
for line in f:
try:
label, content = line.strip().split('\t')
if content:
contents.append(list(native_content(content)))
labels.append(native_content(label))
except:
pass
return contents, labels
def build_vocab(train_dir, vocab_dir, vocab_size=5000):
"""根据训练集构建词汇表,存储"""
data_train, _ = read_file(train_dir)
all_data = []
for content in data_train:
all_data.extend(content) # 将每行单词添加到all_data中,形成的是一维list
counter = Counter(all_data)
count_pairs = counter.most_common(vocab_size - 1) # 将所有单词对应它的出现频率存放子啊count_pairs中,类似dicts
words, _ = list(zip(*count_pairs)) # 将dict中的key放在第一个中,对应值放在第二个中,类似[('a','b','c'),(1,2,3)]
# 添加一个 <PAD> 来将所有文本pad为同一长度
words = ['<PAD>'] + list(words)
open_file(vocab_dir, mode='w').write('\n'.join(words) + '\n')
def read_vocab(vocab_dir):
"""读取词汇表"""
# words = open_file(vocab_dir).read().strip().split('\n')
with open_file(vocab_dir) as fp:
# 如果是py2 则每个值都转化为unicode
words = [native_content(_.strip()) for _ in fp.readlines()]
word_to_id = dict(zip(words, range(len(words)))) # 读取词汇以及每个词对应的id
return words, word_to_id
def read_category():
"""读取分类目录,固定"""
categories = ['体育', '财经', '房产', '家居', '教育', '科技', '时尚', '时政', '游戏', '娱乐']
categories = [native_content(x) for x in categories]
cat_to_id = dict(zip(categories, range(len(categories))))
return categories, cat_to_id # 读取所有分类,及其id
def to_words(content, words):
"""将id表示的内容转换为文字"""
return ''.join(words[x] for x in content)
def process_file(filename, word_to_id, cat_to_id, max_length=600):
"""将文件转换为id表示"""
contents, labels = read_file(filename)
data_id, label_id = [], []
for i in range(len(contents)):
data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
label_id.append(cat_to_id[labels[i]])
# 使用keras提供的pad_sequences来将文本pad为固定长度
x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length)
y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id)) # 将标签转换为one-hot表示,即转换为一个二值矩阵
return x_pad, y_pad
def batch_iter(x1, x2, y, batch_size):
"""生成批次数据"""
data_len = len(x1)
# print('---------不太理解这个num_batch为什么要怎么算----------------')
num_batch = int((data_len - 1) / batch_size) - 1
# print('---------不太理解这个num_batch为什么要怎么算----------------')
indices = np.random.permutation(np.arange(data_len)) # 洗牌
x1_shuffle = x1[indices]
x2_shuffle = x2[indices]
y_shuffle = y[indices]
for i in range(num_batch):
# print('---------不太理解这个start_id为什么要怎么算----------------')
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
# print('---------不太理解这个end_id为什么要怎么算----------------')
yield x1_shuffle[start_id:end_id], x2_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
| [
"1441732331@qq.com"
] | 1441732331@qq.com |
2f2d6c104a4727cc6ace6c9678d9dc45d95d1401 | eafc5b935d0f086dffbcfe1516ba05ab6ce18540 | /source/w3c/demo_mysql_show_databases.py | 30ea6b4eae940c951db24e9548276bc018c5d252 | [] | no_license | YaooXu/Software_test | 1df4195da7dab6f05862afe458c10b1bee1dcaf8 | 936dda4de0a1bcf6cfc87d5148f6219b625a99fe | refs/heads/master | 2020-11-28T04:09:58.724233 | 2020-01-19T13:50:38 | 2020-01-19T13:50:38 | 229,699,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py |
import mysql.connector
mydb = mysql.connector.connect(
host="localhost",
user="myusername",
passwd="mypassword"
)
mycursor = mydb.cursor()
mycursor.execute("SHOW DATABASES")
for x in mycursor:
print(x)
| [
"yuanruize@sina.com"
] | yuanruize@sina.com |
a5af8ae75f4ece2c26d59e219306eb61266a2f0c | 39b0d9c6df77671f540c619aff170441f953202a | /PYTHON LIBRARY/SUB_3/pathlib_symlink_to.py | 48a4dcc8bef3c8cf48f9e42a1e86301f9be8596e | [] | no_license | yeboahd24/Python201 | e7d65333f343d9978efff6bf86ce0447d3a40d70 | 484e66a52d4e706b8478473347732e23998c93c5 | refs/heads/main | 2023-02-06T10:24:25.429718 | 2020-12-26T01:08:04 | 2020-12-26T01:08:04 | 306,487,550 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # Use symlink_to() to create a symbolic link. The link will be named based on the path’s
# value and will refer to the name given as an argument to symlink_to().
import pathlib
p = pathlib.Path('example_link')
p.symlink_to('index.txt')
print(p)
print(p.resolve().name) | [
"noreply@github.com"
] | yeboahd24.noreply@github.com |
2f84648acb1917f62b8824fdb5590a908a1fca86 | 12e956d80079f2808aae687b2cfbe1384deb35e2 | /api/views.py | 1cc372f8d8cfb23489216446a98295069a7bedc7 | [] | no_license | khushal111/Patient_Doctor_App | e141a16c09fd400b5abd5b849ebe02073b6082b9 | 5964c242d06023cbe34b9d5d0a16c6228ed5b734 | refs/heads/master | 2022-06-29T03:14:26.743811 | 2019-11-07T17:45:24 | 2019-11-07T17:45:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from django.shortcuts import render
from .models import Prescription
from .serializers import PrescriptionSerializer
from rest_framework import viewsets
class PrescriptionViewSet(viewsets.ModelViewSet):
serializer_class = PrescriptionSerializer
queryset = Prescription.objects.all()
| [
"kylemaa95@gmail.com"
] | kylemaa95@gmail.com |
de182fe89d8bc26cd4f9eb12b282489dc1e77349 | 150464efa69db3abf328ef8cd912e8e248c633e6 | /_4.python/__code/Python自學聖經(第二版)/ch33/filewrite/filewrite.py | 9bc15311d12904f928cb141311e56326bb751810 | [] | no_license | bunshue/vcs | 2d194906b7e8c077f813b02f2edc70c4b197ab2b | d9a994e3afbb9ea84cc01284934c39860fea1061 | refs/heads/master | 2023-08-23T22:53:08.303457 | 2023-08-23T13:02:34 | 2023-08-23T13:02:34 | 127,182,360 | 6 | 3 | null | 2023-05-22T21:33:09 | 2018-03-28T18:33:23 | C# | UTF-8 | Python | false | false | 713 | py | import os, sys
def base_path(path):
if getattr(sys, 'frozen', None):
basedir = sys._MEIPASS
else:
basedir = os.path.dirname(__file__)
return os.path.join(basedir, path)
tmp=base_path("") #取得暫存目錄
cwd=os.getcwd() #取得目前的工作目錄
file1="file1.txt"
file2=os.path.join(tmp,"file2.txt")
file3=os.path.join(cwd,"file3.txt")
f1=open(file1,'w') #寫入工作目錄
f1.write("file1 txt")
f1.close()
print(file1,"寫入成功!")
f2=open(file2,'w') #寫入 tmp 目錄
f2.write("file2 txt")
f2.close()
print(file2,"寫入成功!")
f3=open(file3,'w') #寫入 pwd 目錄
f3.write("file3 txt")
f3.close()
print(file3,"寫入成功!")
key=input("按任意鍵結束!") | [
"david@insighteyes.com"
] | david@insighteyes.com |
de09319ec0e8ac2dc77f7838a59b05565e829784 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/defaultRangeUtils.py | 26a406861d88271fddf15189e16a4244d7860242 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 300 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\common\modules\nice\client\_nastyspace\defaultRangeUtils.py
from eve.client.script.ui.util.defaultRangeUtils import FetchRangeSetting
from eve.client.script.ui.util.defaultRangeUtils import UpdateRangeSetting
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
fb2d0a433f46dfef15d4ed2718491bf49c53bc61 | 6a609bc67d6a271c1bd26885ce90b3332995143c | /exercises/math/fast_power.py | 5d59cee28aa49b1fcc93768bb25df40530f28039 | [] | no_license | nahgnaw/data-structure | 1c38b3f7e4953462c5c46310b53912a6e3bced9b | 18ed31a3edf20a3e5a0b7a0b56acca5b98939693 | refs/heads/master | 2020-04-05T18:33:46.321909 | 2016-07-29T21:14:12 | 2016-07-29T21:14:12 | 44,650,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | # -*- coding: utf-8 -*-
"""
Calculate the a^n % b where a, b and n are all 32bit integers.
Example
For 2^31 % 3 = 2
For 100^1000 % 1000 = 0
Challenge
O(logn)
"""
class Solution:
"""
@param a, b, n: 32bit integers
@return: An integer
"""
def fastPower(self, a, b, n):
if n == 1:
return a % b
elif n == 0:
return 1 % b
elif n < 0:
return -1
# (a * b) % p = ((a % p) * (b % p)) % p
result = self.fastPower(a, b, n / 2)
result = (result * result) % b
if n % 2 == 1:
result = (result * a) % b
return result
| [
"wanghan15@gmail.com"
] | wanghan15@gmail.com |
6fde1ae6e4c647c4fde009202a3e94db05805c30 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_interlinking.py | affa9fb8d987c586a7f8143611a47511eb6e2d67 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py |
from xai.brain.wordbase.verbs._interlink import _INTERLINK
#calss header
class _INTERLINKING(_INTERLINK, ):
def __init__(self,):
_INTERLINK.__init__(self)
self.name = "INTERLINKING"
self.specie = 'verbs'
self.basic = "interlink"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
233e04fcf6506d8a3ffcee5406e8061482bbf178 | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/python/lxml.py | e2170e0115cfd92004cf74afa4ff88b7961d1972 | [
"MIT",
"BSD-3-Clause"
] | permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | from .base import PipBaseRecipe
class LxmlRecipe(PipBaseRecipe):
def __init__(self, *args, **kwargs):
super(LxmlRecipe, self).__init__(*args, **kwargs)
self.sha256 = '736f72be15caad8116891eb6aa4a078b' \
'590d231fdc63818c40c21624ac71db96'
self.name = 'lxml'
self.version = '3.8.0' # < 4 for apache-airflow
| [
"clayton.stangeland@gmail.com"
] | clayton.stangeland@gmail.com |
56929c26fc1cf02790a98401f43424fa08ca800e | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_06_01/aio/_container_service_client.py | e859946dcaf202e06e448c829e00e8209b572eb0 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 4,898 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ..._serialization import Deserializer, Serializer
from ._configuration import ContainerServiceClientConfiguration
from .operations import AgentPoolsOperations, ManagedClustersOperations, Operations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ContainerServiceClient: # pylint: disable=client-accepts-api-version-keyword
"""The Container Service Client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerservice.v2019_06_01.aio.operations.Operations
:ivar managed_clusters: ManagedClustersOperations operations
:vartype managed_clusters:
azure.mgmt.containerservice.v2019_06_01.aio.operations.ManagedClustersOperations
:ivar agent_pools: AgentPoolsOperations operations
:vartype agent_pools:
azure.mgmt.containerservice.v2019_06_01.aio.operations.AgentPoolsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2019-06-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ContainerServiceClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.managed_clusters = ManagedClustersOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.agent_pools = AgentPoolsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ContainerServiceClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | test-repo-billy.noreply@github.com |
68a1455380858827d9d8af0bc4cc0ed58d3db25b | 0bd7c1f7bf6da5ef92b9013e1d913140f0249dfa | /cecilia-python/greedy-thinking/MaxProfit-Ⅱ.py | fa987195663b384497d5d5818948131fd456811e | [] | no_license | Cecilia520/algorithmic-learning-leetcode | f1fec1fae71c4cf7410122f5ce969e829f451308 | 32941ee052d0985a9569441d314378700ff4d225 | refs/heads/master | 2022-05-02T03:00:57.505672 | 2022-03-19T09:51:28 | 2022-03-19T09:51:28 | 229,673,810 | 7 | 1 | null | 2022-03-19T09:34:57 | 2019-12-23T04:04:04 | Python | UTF-8 | Python | false | false | 2,586 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : MaxProfit-Ⅱ.py
@Contact : 70904372cecilia@gmail.com
@License : (C)Copyright 2019-2020
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/2/5 12:48 cecilia 1.0 买卖股票的最佳时机Ⅱ(简单)
问题描述:
给定一个数组,它的第 i 个元素是一支给定股票第 i 天的价格。
设计一个算法来计算你所能获取的最大利润。你可以尽可能地完成更多的交易(多次买卖一支股票)。
注意:你不能同时参与多笔交易(你必须在再次购买前出售掉之前的股票)。
示例1:
输入: [7,1,5,3,6,4]
输出: 7
解释: 在第 2 天(股票价格 = 1)的时候买入,在第 3 天(股票价格 = 5)的时候卖出, 这笔交易所能获得利润 = 5-1 = 4 。
随后,在第 4 天(股票价格 = 3)的时候买入,在第 5 天(股票价格 = 6)的时候卖出, 这笔交易所能获得利润 = 6-3 = 3
示例2:
输入: [1,2,3,4,5]
输出: 4
解释: 在第 1 天(股票价格 = 1)的时候买入,在第 5 天 (股票价格 = 5)的时候卖出, 这笔交易所能获得利润 = 5-1 = 4 。
注意你不能在第 1 天和第 2 天接连购买股票,之后再将它们卖出。
因为这样属于同时参与了多笔交易,你必须在再次购买前出售掉之前的股票。
示例3:
输入: [7,6,4,3,1]
输出: 0
解释: 在这种情况下, 没有交易完成, 所以最大利润为 0。
"""
def maxPrice(prices) -> int:
"""
买卖股票的最佳时机Ⅱ
解决方案:贪心算法(只要今天的股票价格比昨天高,就卖出去)
分析:为何可以这样想?——按照示例1来讲,第二天买入,第四天卖出去,收益(6-1),一般情况我们还会这样想:
怎么不去判断第三天卖出了呢?根据题目的意思,当天卖出以后,当天还可以买入,所以其实可以第三天卖出,第三天买入,第四天又卖出((5-1)+ (6-5) === 6 - 1)。
所以算法可以直接简化为只要今天比昨天大,就卖出。
:param prices: 股票价格数组
:return:
算法分析:时间复杂度O(n), 空间复杂度O(1),仅仅使用变量
"""
profits = 0
for i in range(1, len(prices)):
if prices[i] > prices[i - 1]:
profits += prices[i] - prices[i - 1]
return profits
if __name__ == '__main__':
prices = [7, 1, 5, 3, 6, 4]
print(maxPrice(prices))
| [
"cc15572018516@163.com"
] | cc15572018516@163.com |
b201ee6feb1416bd3cbda627e72d8e88d5850c0c | d4dda2e2992ca16b8fe628e417f8a4243af0ed4a | /step13_offdiagonalLHEstudy/getsmearing.py | 5cfca6ec5e81bf48a0275a1aed1d0eb2a722cf79 | [] | no_license | hroskes/anomalouscouplings | 01f46c0d38f5332c58538b0bdea373704cf06fcc | 391eb7fbd52d8605b09ca2e461b1789e019b1da0 | refs/heads/production | 2021-11-24T22:37:48.932830 | 2021-10-29T18:38:54 | 2021-10-29T18:38:54 | 60,651,233 | 0 | 2 | null | 2017-01-24T14:20:56 | 2016-06-07T22:37:23 | Python | UTF-8 | Python | false | false | 7,019 | py | #!/usr/bin/env python
import os
import ROOT
from helperstuff import config
from helperstuff.samples import Sample
from helperstuff.utilities import cache, mkdir_p, tfiles, tlvfromptetaphim
TF1 = cache(ROOT.TF1)
s = Sample("VBF", "0+", config.productionforcombine)
f = tfiles[s.withdiscriminantsfile()]
t = f.candTree
hlherecojetpt = ROOT.TH1F("hlherecojetpt", "", 100, -100, 100)
hlherecojeteta = ROOT.TH1F("hlherecojeteta", "", 100, -1, 1)
hlherecojetphi = ROOT.TH1F("hlherecojetphi", "", 100, -1, 1)
hlhegenelectronpt = ROOT.TH1F("hlhegenelectronpt", "", 100, -10, 10)
hlhegenelectroneta = ROOT.TH1F("hlhegenelectroneta", "", 100, -.2, .2)
hlhegenelectronphi = ROOT.TH1F("hlhegenelectronphi", "", 100, -.2, .2)
hgenrecoelectronpt = ROOT.TH1F("hgenrecoelectronpt", "", 100, -10, 10)
hgenrecoelectroneta = ROOT.TH1F("hgenrecoelectroneta", "", 100, -.2, .2)
hgenrecoelectronphi = ROOT.TH1F("hgenrecoelectronphi", "", 100, -.2, .2)
hlherecoelectronpt = ROOT.TH1F("hlherecoelectronpt", "", 100, -10, 10)
hlherecoelectroneta = ROOT.TH1F("hlherecoelectroneta", "", 100, -.2, .2)
hlherecoelectronphi = ROOT.TH1F("hlherecoelectronphi", "", 100, -.2, .2)
hlhegenmuonpt = ROOT.TH1F("hlhegenmuonpt", "", 100, -10, 10)
hlhegenmuoneta = ROOT.TH1F("hlhegenmuoneta", "", 100, -.2, .2)
hlhegenmuonphi = ROOT.TH1F("hlhegenmuonphi", "", 100, -.2, .2)
hgenrecomuonpt = ROOT.TH1F("hgenrecomuonpt", "", 100, -10, 10)
hgenrecomuoneta = ROOT.TH1F("hgenrecomuoneta", "", 100, -.2, .2)
hgenrecomuonphi = ROOT.TH1F("hgenrecomuonphi", "", 100, -.2, .2)
hlherecomuonpt = ROOT.TH1F("hlherecomuonpt", "", 100, -10, 10)
hlherecomuoneta = ROOT.TH1F("hlherecomuoneta", "", 100, -.2, .2)
hlherecomuonphi = ROOT.TH1F("hlherecomuonphi", "", 100, -.2, .2)
hists = [
hlherecojetpt, hlherecojeteta, hlherecojetphi,
hlhegenelectronpt, hlhegenelectroneta, hlhegenelectronphi,
hgenrecoelectronpt, hgenrecoelectroneta, hgenrecoelectronphi,
hlherecoelectronpt, hlherecoelectroneta, hlherecoelectronphi,
hlhegenmuonpt, hlhegenmuoneta, hlhegenmuonphi,
hgenrecomuonpt, hgenrecomuoneta, hgenrecomuonphi,
hlherecomuonpt, hlherecomuoneta, hlherecomuonphi,
]
length = t.GetEntries()
for i, entry in enumerate(t, start=1):
jets = []
LHEjets = []
electrons = []
genelectrons = []
LHEelectrons = []
muons = []
genmuons = []
LHEmuons = []
for pt, eta, phi, m, id in zip(t.LHEDaughterPt, t.LHEDaughterEta, t.LHEDaughterPhi, t.LHEDaughterMass, t.LHEDaughterId):
if abs(id) == 11:
LHEelectrons.append(tlvfromptetaphim(pt, eta, phi, m))
elif abs(id) == 13:
LHEmuons.append(tlvfromptetaphim(pt, eta, phi, m))
for pt, eta, phi, m, id in zip(t.LHEAssociatedParticlePt, t.LHEAssociatedParticleEta, t.LHEAssociatedParticlePhi, t.LHEAssociatedParticleMass, t.LHEAssociatedParticleId):
if 1 <= abs(id) <= 6 or id == 21:
LHEjets.append(tlvfromptetaphim(pt, eta, phi, m))
for pt, eta, phi, id in zip(*[[getattr(t, "GenLep{}{}".format(j, var)) for j in range(1, 5)] for var in ("Pt", "Eta", "Phi", "Id")]):
m = 0
if abs(id) == 11:
genelectrons.append(tlvfromptetaphim(pt, eta, phi, m))
elif abs(id) == 13:
genmuons.append(tlvfromptetaphim(pt, eta, phi, m))
for pt, eta, phi, id in zip(t.LepPt, t.LepEta, t.LepPhi, t.LepLepId):
if abs(id) == 11:
electrons.append(tlvfromptetaphim(pt, eta, phi, m))
elif abs(id) == 13:
muons.append(tlvfromptetaphim(pt, eta, phi, m))
for pt, eta, phi, mass in zip(t.JetPt, t.JetEta, t.JetPhi, t.JetMass):
jets.append(tlvfromptetaphim(pt, eta, phi, 0))
for lhejet in LHEjets:
if not jets: continue
recojet = min(jets, key=lambda jet: jet.DeltaR(lhejet))
if lhejet != min(LHEjets, key=lambda jet: jet.DeltaR(recojet)): continue
hlherecojetpt.Fill(recojet.Pt() - lhejet.Pt())
hlherecojeteta.Fill(recojet.Eta() - lhejet.Eta())
hlherecojetphi.Fill(recojet.Phi() - lhejet.Phi())
for lheelectron in LHEelectrons:
recoelectron = min(electrons, key=lambda electron: electron.DeltaR(lheelectron))
if lheelectron != min(LHEelectrons, key=lambda electron: electron.DeltaR(recoelectron)): continue
hlherecoelectronpt.Fill(recoelectron.Pt() - lheelectron.Pt())
hlherecoelectroneta.Fill(recoelectron.Eta() - lheelectron.Eta())
hlherecoelectronphi.Fill(recoelectron.Phi() - lheelectron.Phi())
for genelectron in genelectrons:
recoelectron = min(electrons, key=lambda electron: electron.DeltaR(genelectron))
if genelectron != min(genelectrons, key=lambda electron: electron.DeltaR(recoelectron)): continue
hgenrecoelectronpt.Fill(recoelectron.Pt() - genelectron.Pt())
hgenrecoelectroneta.Fill(recoelectron.Eta() - genelectron.Eta())
hgenrecoelectronphi.Fill(recoelectron.Phi() - genelectron.Phi())
for lheelectron in LHEelectrons:
genelectron = min(genelectrons, key=lambda electron: electron.DeltaR(lheelectron))
if lheelectron != min(LHEelectrons, key=lambda electron: electron.DeltaR(genelectron)): continue
hlhegenelectronpt.Fill(genelectron.Pt() - lheelectron.Pt())
hlhegenelectroneta.Fill(genelectron.Eta() - lheelectron.Eta())
hlhegenelectronphi.Fill(genelectron.Phi() - lheelectron.Phi())
for lhemuon in LHEmuons:
recomuon = min(muons, key=lambda muon: muon.DeltaR(lhemuon))
if lhemuon != min(LHEmuons, key=lambda muon: muon.DeltaR(recomuon)): continue
hlherecomuonpt.Fill(recomuon.Pt() - lhemuon.Pt())
hlherecomuoneta.Fill(recomuon.Eta() - lhemuon.Eta())
hlherecomuonphi.Fill(recomuon.Phi() - lhemuon.Phi())
for genmuon in genmuons:
recomuon = min(muons, key=lambda muon: muon.DeltaR(genmuon))
if genmuon != min(genmuons, key=lambda muon: muon.DeltaR(recomuon)): continue
hgenrecomuonpt.Fill(recomuon.Pt() - genmuon.Pt())
hgenrecomuoneta.Fill(recomuon.Eta() - genmuon.Eta())
hgenrecomuonphi.Fill(recomuon.Phi() - genmuon.Phi())
for lhemuon in LHEmuons:
genmuon = min(genmuons, key=lambda muon: muon.DeltaR(lhemuon))
if lhemuon != min(LHEmuons, key=lambda muon: muon.DeltaR(genmuon)): continue
hlhegenmuonpt.Fill(genmuon.Pt() - lhemuon.Pt())
hlhegenmuoneta.Fill(genmuon.Eta() - lhemuon.Eta())
hlhegenmuonphi.Fill(genmuon.Phi() - lhemuon.Phi())
if i % 1000 == 0 or i == length:
print i, "/", length
c = ROOT.TCanvas()
saveasdir = os.path.join(config.plotsbasedir, "offdiagonalLHEstudy", "resolution")
mkdir_p(saveasdir)
for h in hists:
for ext in "png eps root pdf".split():
h.Draw()
f = TF1("f"+h.GetName(), "gaus(0)", h.GetXaxis().GetXmin(), h.GetXaxis().GetXmax())
f.SetParameters(h.GetEntries(), h.GetMean(), h.GetRMS())
h.Fit(f)
c.SaveAs(os.path.join(saveasdir, h.GetName()+"."+ext))
| [
"jroskes1@jhu.edu"
] | jroskes1@jhu.edu |
7d12a67de1a5c944cff5b801c157aed83ebebbf3 | 800af8d10309c2c0bb1a55f9aeaa501475cab559 | /status_app/receivers.py | 8b537798311225c59a12b6a13146381478e9c59d | [
"Apache-2.0"
] | permissive | vegitron/status-app | 2ca0862fb7daf4b05adcc4dac6bf743008746abd | 2bafcaf94aa50e443d6521de204da7e27b4d8ac7 | refs/heads/master | 2020-04-13T19:55:26.941839 | 2014-04-10T23:28:23 | 2014-04-10T23:28:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | from status_app.signals import request_signal
from status_app.dispatch import dispatch
from status_app.models import RawEvent
from django.dispatch import receiver
from datetime import datetime
import socket
def request_receiver(sender, status_code, path_info, request_time, **kwargs):
if status_code >= 200 and status_code < 400:
dispatch('application_response', RawEvent.PASS_FAIL, datetime.now(), True, '', socket.gethostname())
else:
dispatch('application_response', RawEvent.PASS_FAIL, datetime.now(), False, path_info, socket.gethostname())
dispatch('application_response_time', RawEvent.INTERVAL, datetime.now(), request_time, '', socket.gethostname())
def get_signal():
return request_signal
| [
"pmichaud@uw.edu"
] | pmichaud@uw.edu |
cafc8b76a370cd5d56727b8016d1b128aa1559a9 | 71bec5b969aa3c9f40f839cff24ac598d8f7fd28 | /DomeOne/DomeTornadoQuery.py | 3501946537a1c1f6035212e30a0e6ce64071b759 | [] | no_license | dong-c-git/TornadoProjectDome | 6c3ba2f69c333c30bbdf723dd750ce0118436a09 | 4124fe36f409a87b4615eafd9ba59dabc21a12f6 | refs/heads/master | 2020-08-17T20:11:57.250887 | 2019-11-01T04:46:22 | 2019-11-01T04:46:22 | 215,706,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,864 | py | #coding:utf-8
import tornado.web
import tornado.ioloop
import tornado.options
from tornado.web import MissingArgumentError
import tornado.httpserver
#tornado获取数据类型方法模型
tornado.options.define("port",default=8090,type=int,help="need runserver give port")
class IndexHandler(tornado.web.RequestHandler):
"""访问首页"""
# def get(self):
# # self.write("hello this is tornado server")
def post(self):
query_arg = self.get_query_argument("a")
query_args = self.get_query_arguments("a")
body_arg = self.get_body_argument("a")
body_args = self.get_body_arguments("a",strip=False)
arg = self.get_argument("a")
args = self.get_argumens("a")
default_arg = self.get_argument("b","itcast")
default_args = self.get_arguments("b")
try:
missing_arg = self.get_argument("c")
except MissingArgumentError as e:
missing_arg = "we catched the MissingArgumentError"
print(e)
missing_args = self.get_arguments("c")
rep = "query_arg:%s<br/>" % query_arg
rep += "query_args:%s<br/>" % query_args
rep += "body_arg:%s<br/>" % body_arg
rep += "body_args:%s<br/>" % body_args
rep += "arg:%s<br/>" % arg
rep += "args:%s<br/>" % args
rep += "default_arg:%s<br/>" % default_arg
rep += "default_args:%s<br/>" % default_args
rep += "missing_arg:%s<br/>" % missing_arg
rep += "missing_args:%s<br/>" % missing_args
self.write(rep)
if __name__ == '__main__':
tornado.options.parse_command_line()
app = tornado.web.Application([(r"/",IndexHandler),])
http_server = tornado.httpserver.HTTPServer(app)
http_server.bind(tornado.options.options.port)
http_server.start(0)
tornado.ioloop.IOLoop.current().start()
| [
"dc111000@hotmail.com"
] | dc111000@hotmail.com |
5acc3e2a04a63b176b35628dea94a3af18ae9626 | c04fb2e9ee96987b308cd60defb179f800486858 | /_unittests/ut_documentation/test_notebook_cheat_sheet_html.py | c287e6ab4a557832a54e5343e74a39d6c73816e3 | [
"MIT"
] | permissive | sdpython/ensae_projects | ca30304e04c938b1d79abef5e54dac0dc531421f | 36033021726144e66fd420cc902f32187a650b18 | refs/heads/master | 2023-02-08T01:32:47.892757 | 2023-02-02T00:18:00 | 2023-02-02T00:18:00 | 45,864,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,674 | py | # -*- coding: utf-8 -*-
"""
@brief test log(time=32s)
"""
import sys
import os
import unittest
import shutil
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, is_travis_or_appveyor
from pyquickhelper.pycode import fix_tkinter_issues_virtualenv
from pyquickhelper.ipythonhelper import execute_notebook_list_finalize_ut
from ensae_projects.automation.notebook_test_helper import ls_notebooks, execute_notebooks, clean_function_notebook
import ensae_projects
class TestNotebookCheatSheetHtml(unittest.TestCase):
def test_notebook_cheatsheet_html(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if is_travis_or_appveyor() == "appveyor":
# connectivity issue
return
fix_tkinter_issues_virtualenv()
temp = get_temp_folder(__file__, "temp_cheat_sheet_html")
keepnote = ls_notebooks("cheat_sheets")
self.assertTrue(len(keepnote) > 0)
keepnote = [_ for _ in keepnote if "chsh_files" not in _]
if len(keepnote) > 0:
fold = os.path.dirname(keepnote[0])
copy = [os.path.join(fold, "geo_data.zip")]
for c in copy:
shutil.copy(c, temp)
res = execute_notebooks(temp, keepnote,
lambda i, n: "deviner" not in n and "html" in n,
fLOG=fLOG,
clean_function=clean_function_notebook)
execute_notebook_list_finalize_ut(
res, fLOG=fLOG, dump=ensae_projects)
if __name__ == "__main__":
unittest.main()
| [
"xavier.dupre@gmail.com"
] | xavier.dupre@gmail.com |
fb4b6effdfe059fec31fd68302a5b80865c22312 | 879df09d88e0a0db13c05ae9be9f4561197ac06e | /settings.py | 4a824c3c11b61dc139c9698b85d3ca041d5cf29b | [
"MIT"
] | permissive | prateeknagpal/crypto-trader | 240b6515ce31c1c67635d1284a96db2818989bab | 7f2db4dbc66f080f9e72399031c081093aefc1ba | refs/heads/master | 2021-09-01T22:20:53.025411 | 2017-12-28T22:11:28 | 2017-12-28T22:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | import os
from decimal import Decimal
API_VERSION = 1
API_URL = 'https://api.gemini.com'
API_WS_URL = 'wss://api.gemini.com'
STARTING_NONCE = 800 # must always increase incrementally
SYMBOL = 'ethusd' # currency pair to trade
POLL_DELAY = 30 # runloop interval in seconds
MAX_ACTIVE_ORDERS = 3 # maximum number of active orders to track
USD_MIN_ORDER_AMT = Decimal(1.00) # min amount to use when making new orders
USD_MAX_ORDER_AMT = Decimal(5.00) # max amount to use when making new orders
MAX_GAIN_RATIO = Decimal(0.01) # maximum percentage gains before selling the order
MAX_LOSS_RATIO = Decimal(-0.006) # maximum percentage losses before selling the order
OVERPAY_RATIO = Decimal(0.005) # percentage to pay over current price in order to guarantee orders closing quickly
USD_MAX_NET_GAINS = 100 # total maximum USD gains before quitting the program
USD_MAX_NET_LOSS = -20 # total maximum USD losses before quitting the program
DATA_DIR = f'./data' # where to store the state and logs
try:
from secrets import * # copy and edit secrets_default.py to secrets.py
except ImportError:
print('Copy secrets_default.py to secrets.py to add your API credentials')
raise SystemExit(1)
| [
"git@nicksweeting.com"
] | git@nicksweeting.com |
33a9c0319e08a8b7003a880482162b1368ca8458 | 37d4af0a33d47d6b264acb769a276a500871ab90 | /Python_Code_Beginner/07_语法进阶/hm_03_全局变量.py | c698dde858327048a610c338650901b68ed452d8 | [] | no_license | yzjbryant/YZJ_MIX_Code | 86fe3c8a265c69e5493f70b9753491c0f97d75d4 | ab3a7b8731730a714f013f59bbf551d3d50c3b33 | refs/heads/master | 2022-11-09T01:07:48.944817 | 2020-07-01T09:17:01 | 2020-07-01T09:17:01 | 271,439,307 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | #全局变量
num = 10
def demo1():
print("demo1 ==> %d" % num)
def demo2():
print("demo2 ==> %d" % num)
demo1()
demo2() | [
"yinzhijian2018@163.com"
] | yinzhijian2018@163.com |
30c8cb418a4fa444872b6d5bcdc2a135eb4ac4d2 | 0c466d2632b4c85f2cf0312e94ee826867039bc6 | /tests/conftest.py | f1394cf45d937fa5d0a868eaedadc268640de372 | [
"Apache-2.0"
] | permissive | rst0git/pytest-salt-factories | 6c917c9749b6eed92dcdc5cac411d9db83608d7f | d614c15700327e0d03a7464f4076523b93357857 | refs/heads/master | 2022-11-24T20:41:03.138928 | 2020-07-29T17:24:07 | 2020-07-29T17:24:07 | 282,475,745 | 0 | 0 | null | 2020-07-25T15:52:59 | 2020-07-25T15:52:58 | null | UTF-8 | Python | false | false | 2,316 | py | import functools
import logging
import os
import stat
import tempfile
import textwrap
import pytest
import salt.version
log = logging.getLogger(__name__)
pytest_plugins = ["pytester"]
def pytest_report_header():
return "salt-version: {}".format(salt.version.__version__)
class Tempfiles:
"""
Class which generates temporary files and cleans them when done
"""
def __init__(self, request):
self.request = request
def makepyfile(self, contents, prefix=None, executable=False):
"""
Creates a python file and returns it's path
"""
tfile = tempfile.NamedTemporaryFile("w", prefix=prefix or "tmp", suffix=".py", delete=False)
contents = textwrap.dedent(contents.lstrip("\n")).strip()
tfile.write(contents)
tfile.close()
if executable is True:
st = os.stat(tfile.name)
os.chmod(tfile.name, st.st_mode | stat.S_IEXEC)
self.request.addfinalizer(functools.partial(self._delete_temp_file, tfile.name))
with open(tfile.name) as rfh:
log.debug(
"Created python file with contents:\n>>>>> %s >>>>>\n%s\n<<<<< %s <<<<<\n",
tfile.name,
rfh.read(),
tfile.name,
)
return tfile.name
def makeslsfile(self, contents, name=None):
"""
Creates an sls file and returns it's path
"""
if name is None:
tfile = tempfile.NamedTemporaryFile("w", suffix=".sls", delete=False)
name = tfile.name
with open(name, "w") as wfh:
contents = textwrap.dedent(contents.lstrip("\n")).strip()
wfh.write(contents)
self.request.addfinalizer(functools.partial(self._delete_temp_file, name))
with open(name) as rfh:
log.debug(
"Created SLS file with contents:\n>>>>> %s >>>>>\n%s\n<<<<< %s <<<<<\n",
name,
rfh.read(),
name,
)
return name
def _delete_temp_file(self, fpath):
"""
Cleanup the temporary path
"""
if os.path.exists(fpath):
os.unlink(fpath)
@pytest.fixture
def tempfiles(request):
"""
Temporary files fixture
"""
return Tempfiles(request)
| [
"pedro@algarvio.me"
] | pedro@algarvio.me |
9db38d413747215cf959508e754da4cbf30e1ba7 | de9b8b7192a0a81e9249823bb2b86f0b7e452863 | /.history/classes/Handler_20171107110755.py | c6aa301d0a2f1cbd3fc1178b1fa74d6704968c24 | [
"MIT"
] | permissive | reecebenson/uwe-dadsa-tennis-a | f5eaeb1b96d4e61f29279514e68eeea8ad6533db | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | refs/heads/master | 2023-07-08T16:13:23.963348 | 2017-11-30T12:07:01 | 2017-11-30T12:07:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,270 | py | # DADSA - Assignment 1
# Reece Benson
import json
import random
from math import ceil, floor
from classes import Player
from classes import Season
from classes import Tournament
from classes import Round
from classes import Match
class Handler():
# Define the variables we will be using
app = None
prize_money = None
player_count = None
seasons = { }
def __init__(self, _app):
if(_app.debug):
print("[LOAD]: Loaded Handler!")
# Define our Application within this Handler class
self.app = _app
# Used to load all data into memory
def load(self):
# This function will create our seasons and implement the genders & players
self.load_seasons()
self.load_players()
self.load_prize_money()
#TODO: Implement load_seasons()
# Used to load seasons into memory
def load_seasons(self):
with open('./data/seasons.json') as tData:
data = json.load(tData)
for season in data:
# If the season does not yet exist, create it
if(not season in self.get_seasons()):
self.seasons.update({ season: Season.Season(self.app, season, data[season]) })
# Generate our rounds from our player list from scratch
def generate_rounds(self):
# Write our new data to memory
for seasonId in self.get_seasons():
season = self.get_season(seasonId)
players = season.players()
# Generate our rounds
for gender in players:
# Default Values
round_cap = 3
# Do we have a Round Cap overrider for this gender?
if(gender + "_cap" in season.settings()):
round_cap = season.settings()[gender + "_cap"]
# Create our first round
_round_one = Round.Round(self.app, gender, "round_1")
_round_one.set_cap(round_cap)
# Create our first round data
rand_players = random.sample(players[gender], len(players[gender]))
for i in range(len(rand_players) // 2):
# Grab our versus players
p_one = rand_players[i * 2]
p_two = rand_players[(i * 2) + 1]
# Generate some scores
p_one_score = random.randint(0, round_cap - 1)
p_two_score = random.randint(0, round_cap - 1)
# Make a random player the winner
who = random.randint(0, 1)
if(who == 0): p_one_score = round_cap
else: p_two_score = round_cap
# Append our random data as a Match
#round_data[gender].append({ p_one.name(): p_one_score, p_two.name(): p_two_score })
#round_data[round_name][gender].append(Match.Match(round_name, p_one, p_two, p_one_score, p_two_score))
_round_one.add_match(Match.Match(_round_one, p_one, p_two, p_one_score, p_two_score))
# Append our first round to our season
season.add_round(gender, _round_one)
# Items in Round
print("{0} has {1} matches".format(_round_one.name(), _round_one.match_count()))
# Get the winners from each round
for r in range(2, season.settings()['round_count'] + 1):
# Define variables
round_name = "round_"+str(r)
# Define our Round
_round = Round.Round(self.app, gender, round_name)
# Items in Round
print("{0} has {1} matches".format(_round.name(), _round.match_count()))
break
# Debug
if(self.app.debug):
print("[LOAD]: Generated {1} rounds for season: '{0}'".format(season.name(), season.settings()['round_count']))
# End of generate_rounds()
# Used to load prize money
def load_prize_money(self):
with open('./data/rankingPoints.json') as tData:
data = json.load(tData)
# Fallback on a non-existant occurrence
if(self.player_count == None):
self.player_count = 100
# Make our prize_money a dictionary
if(self.prize_money == None):
self.prize_money = { }
# Set the prize money to the actual rank and points received
self.prize_money = [ pts for pts in data for rank in data[pts] ]
# We want to set the prize money for all indexes possible via the player
self.prize_money += [ 0 ] * ( self.player_count - len(self.prize_money))
# Used to load players from all seasons into memory
def load_players(self):
# Set our player (in gender) count
self.player_count = 0
with open('./data/players.json') as tData:
data = json.load(tData)
# Players are classed within Seasons
for season in data:
# If the season does not yet exist, ignore this input
if(not season in self.get_seasons()):
continue
# Players are then stored within Gender classifications
for gender in data[season]:
if(not gender in self.get_season(season).players()):
self.get_season(season).players()[gender] = [ ]
# Append our player in the season, within the gender
for player in data[season][gender]:
#TODO: Change to using Player class
self.get_season(season).add_player(player, gender)
# Update our player count
if(len(self.get_season(season).players()[gender]) > self.player_count):
self.player_count = len(self.get_season(season).players()[gender])
def get_seasons(self):
return self.seasons
def get_season(self, season_id):
if(season_id in self.seasons):
return self.seasons[season_id]
else:
return None | [
"business@reecebenson.me"
] | business@reecebenson.me |
1dfa2fdb959acac2846a946ecd49380c5954d3c5 | 78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227 | /2226.py | 52c964524b018bed57f7213a24f68afd26aaddff | [] | no_license | GenryEden/kpolyakovName | 97db13ef93061a8c2afc6cc5acd91337f79063f1 | c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9 | refs/heads/master | 2023-05-23T21:22:51.983756 | 2021-06-21T08:56:49 | 2021-06-21T08:56:49 | 350,466,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | def toCountSystem(x, y):
ans = ''
while x:
ans += str(x % y)
x //=y
return ans[::-1]
print(toCountSystem(9**9 + 3**21 - 7, 3).count('0')) | [
"a926788@gmail.com"
] | a926788@gmail.com |
7e08d310b39198d59087ba94ec915a777c101447 | a2fae6522c0526e81032d700e750dbc4b55e308b | /twemoir/lib/adminfields/widgets.py | 05702f365ff10dd8bbd84727dcc1eb164d28b411 | [] | no_license | fish2000/django-twemoir | e895039e4ecd0a01baa9e35002fe0e00e20f6a4f | 8caa7e5319055f54e0d89457780605994622e8d9 | refs/heads/master | 2020-06-05T13:16:47.036385 | 2014-01-21T02:42:30 | 2014-01-21T02:42:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,054 | py | import simplejson
from django.forms import Widget
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.forms.widgets import flatatt
class JsonPairInputs(Widget):
"""
:author: Huy Nguyen
A widget that displays JSON Key Value Pairs as a list of text input box pairs
Usage (in forms.py)::
jsonfield = forms.CharField(
label="Example JSON Key Value Field",
required = False,
widget = JsonPairInputs(
val_attrs={'size':35},
key_attrs={'class':'large'}
)
)
"""
def __init__(self, key_attrs={}, val_attrs={}, *args, **kwargs):
"""
:param key_attrs: HTML attributes applied to the 1st input box
:param val_attrs: HTML attributes applied to the 2nd input box
"""
self.key_attrs = key_attrs
self.val_attrs = val_attrs
Widget.__init__(self, *args, **kwargs)
def render(self, name, value, attrs=None):
"""
Renders this widget into an HTML string
:param name: Name of the field
:type name: str
:param value: A json string of a two-tuple list automatically passed in by django
:type value: str
:param attrs: automatically passed in by django (unused in this function)
:type attrs: dict
"""
if value is None or value is '':
value = '{}'
if type(value) == type({}):
twotuple = value.items()
else:
twotuple = simplejson.loads(force_unicode(value))
ret = []
if value and len(value) > 0:
for k,v in twotuple:
ctx = {'key':k,
'value':v,
'fieldname':name,
'key_attrs': flatatt(self.key_attrs),
'val_attrs': flatatt(self.val_attrs) }
ret.append('<input type="text" name="json_key[%(fieldname)s]" value="%(key)s" %(key_attrs)s> <input type="text" name="json_value[%(fieldname)s]" value="%(value)s" %(val_attrs)s><br />' % ctx)
return mark_safe("".join(ret))
def value_from_datadict(self, data, files, name):
"""
Returns the simplejson representation of the key-value pairs
sent in the POST parameters
:param data: request.POST or request.GET parameters
:type data: dict
:param files: request.FILES
:type files: list
:param name: The name of the field associated with this widget
:type name: str
"""
if data.has_key('json_key[%s]' % name) and data.has_key('json_value[%s]' % name):
keys = data.getlist("json_key[%s]" % name)
values = data.getlist("json_value[%s]" % name)
twotuple = []
for key, value in zip(keys, values):
if len(key) > 0:
twotuple += [(key,value)]
jsontext = simplejson.dumps(twotuple)
return jsontext
| [
"fish2000@gmail.com"
] | fish2000@gmail.com |
86b9aa05550020a91166857df0221582f99bafbf | 590126fdbce9d0f92d6c49722c1a953b06e7a4d5 | /aat/exchange/exchange.py | 5d6d4313b8668c578f16f27f38b09ffa8ad0522a | [
"Apache-2.0"
] | permissive | galdamour/aat | 666fda492f0d13e5658d4f778fdbfdc4cdc321de | 458cb1ac33878a76bd9bf844e8362a5a0a9ec291 | refs/heads/master | 2023-01-11T05:32:42.086921 | 2020-11-16T16:40:54 | 2020-11-16T16:40:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,541 | py | from abc import abstractmethod
from .base.market_data import _MarketData
from .base.order_entry import _OrderEntry
_EXCHANGES = {}
class Exchange(_MarketData, _OrderEntry):
'''Generic representation of an exchange. There are two primary functionalities of an exchange.
Market Data Source:
exchanges can stream data to the engine
Order Entry Sink:
exchanges can be queried for data, or send data
'''
def __init__(self, exchange):
self._exchange = exchange
def exchange(self):
return self._exchange
@staticmethod
def registerExchange(exchange_name, clazz):
_EXCHANGES[exchange_name] = clazz
@staticmethod
def exchanges(exchange=None):
if exchange:
if exchange not in _EXCHANGES:
raise Exception(f'Unknown exchange type: {exchange}')
return _EXCHANGES[exchange]
return list(_EXCHANGES.keys())
@abstractmethod
async def connect(self):
'''connect to exchange. should be asynchronous.
For OrderEntry-only, can just return None
'''
async def lookup(self, instrument):
'''lookup an instrument on the exchange'''
return []
# ****************** #
# Inherited methods #
# From _MarketData
#
# async def tick(self):
# def instruments(self):
# def subscribe(self, instrument):
# From _OrderEntry
#
# async def newOrder(self, order: Order):
# def accounts(self) -> List:
# ************************** #
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
5691bf63eeb65a0f6313e9b740c2eb8a3b14b60c | 401c6b56c22c762a88a46ce70a35a8d19f0fb863 | /Libs/Oryx.Web.Core.WebInstance/OryxWeb/wwwroot/Chat/chat-master/tests/test_semantic.py | ab8a21d8e90522154aa7516e53f8881e038a7dcf | [
"MIT"
] | permissive | OryxLib/Oryx.FastAdmin | 5f42993e3d7a1a61439a9efd2ee5889bbba6875c | b798d534baf3a07c8bff72e1a80faba119296cb6 | refs/heads/master | 2022-12-10T19:58:33.381271 | 2020-04-15T15:46:46 | 2020-04-15T15:46:46 | 255,535,071 | 3 | 2 | null | 2022-12-08T10:09:40 | 2020-04-14T07:03:15 | C# | UTF-8 | Python | false | false | 2,513 | py | # -*- coding: utf-8 -*-
import sys
sys.path.append("../")
from unittest import TestCase, main
from chat.semantic import synonym_cut, similarity, similarity2, build_semantic_matrix
from chat.mytools import time_me
class TestMe(TestCase):
def setUp(self):
pass
@time_me()
def test_similarity(self):
data = [
("黄克功", "王怀安"),
("黄克功", "黄克功"),
("宋朝的历史", "明朝的历史"),
("电脑", "打印机"),
("怎么了?,。。。。", "怎么了?..,#$"),
("我喜欢你", "你喜欢我"),
("我要取票", "我要取票"),
("存钱", "取钱"),
("镇店之宝", "有什么镇店之宝"),
("中国", "中华人民共和国"),
("喧闹的大街上人山人海", "热闹的街道上人来人往"),
("专心致志", "全神贯注"),
("爷爷爱吃土豆", "祖父喜欢吃马铃薯"),
("联想电脑多少钱", "联想笔记本价格"),
("今天天气怎么样", "我想去上海"),
("今天天气怎么样", "今天开心吗"),
("怎么花呗不能支付", "花呗付款不了怎么回事"),
("蚂蚁借呗的额度为什么会下降", "为什么借呗额度被降低了,没有不良记录"),
("蚂蚁借呗的额度为什么会下降", "为什么借呗额度被降低了"),
("花呗自动还款需要手续费ma", "花呗自动还款还要收手续费吗"),
("花呗怎么付款不鸟了", "帮忙看一下我花呗怎么用不了"),
("花呗被冻结怎么恢复", "花呗被封了怎么解除"),
("我借呗能不能开通", "如何开启借呗"),
("使用花呗已付款,订单显示没有付款", "花呗扣款了美团订单显示未付款")
]
for s1, s2 in data:
sv1 = synonym_cut(s1, 'wf')
sv2 = synonym_cut(s2, 'wf')
print(s1, 'VS', s2)
print(sv1, 'VS', sv2)
print("similarity1: ", similarity(sv1, sv2))
print('similarity2: ', similarity2(s1, s2), '\n')
def test_build_semantic_matrix(self):
matrix = build_semantic_matrix("为什么我的银行卡已经绑定了,花呗要求我还要绑银行卡", "为什么我的银行卡绑定了,花呗还是要求我绑定银行卡")
print(matrix, matrix.shape)
if __name__ == '__main__':
main()
| [
"407815932@qq.com"
] | 407815932@qq.com |
a042776fb0942df15cc3e28901ae082c4d7fe0a3 | 2218e1da5cb944e4509f8641ca051de137645c5e | /LeetCode practice/Top 100/77.combine.py | ab72990344fa03dc58b3f19fe2588873033fee6f | [] | no_license | Hegemony/Python-Practice | 9e76ebb414433e51c2074602fb0a871891647839 | b68ea41688e9e305635c63fdc43402e2b6fe6524 | refs/heads/main | 2023-05-05T14:00:59.921803 | 2021-06-01T15:38:30 | 2021-06-01T15:38:30 | 301,602,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | class Solution:
def combine(self, n: int, k: int):
nums = [i + 1 for i in range(n)]
res = []
def dfs(path, result):
if len(path) == k:
res.append(list(path))
return
for i in range(len(result)):
path.append(result[i])
print(path)
dfs(path, result[i + 1:])
path.pop()
dfs([], nums)
return res
print(Solution().combine(4, 2))
| [
"noreply@github.com"
] | Hegemony.noreply@github.com |
9d71d7dc901c16a0974b3b44d71e98018617359f | dfd0797c88aec7b02866d3c559cb1bc64ce87b44 | /Chapter 9 - Classes/9-11 Imported_Admin.py | e4c7baedb57267181d77835059dec5bdc4cefd2c | [] | no_license | 8BitJustin/2020-Python-Crash-Course | d97f9b79c7a1e1c88c9bc2b035b0e98b2ef23025 | 1f078d7fa62e2b07f8d6c01f85e60baed8293779 | refs/heads/master | 2020-12-20T00:19:44.173143 | 2020-06-14T18:42:08 | 2020-06-14T18:42:08 | 235,893,110 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | """
Start with your work from Exercise 9-8. Store the classes User, Privileges,
and Admin in one module. Create a separate file, make an Admin instance,
and call show_privileges() to show that everything is working correctly
"""
# Imported specific class (Admin) from user_module module
from user_module import Admin
# Creates superuser variable using Admin class.
superuser = Admin('justin', 'olson')
# Uses the describe_admin method within Admin class.
superuser.describe_admin()
# Uses the 'self.privileges' within Admin to access the Privileges class,
# then use the show_privileges method.
superuser.privileges.show_privileges()
"""
From top to bottom:
superuser variable was created using the imported Admin class. Then
superuser used the describe_admin() within the Admin class. Finally,
the superuser accessed the show_privileges() method within the Privileges
class by accessing self.privileges (privileges) within Admin class. This is
tied to Privileges(), which is a backdoor so-to-speak with accessing the
Privileges class, which wasn't actually imported into this file.
""" | [
"j.olson.digital@gmail.com"
] | j.olson.digital@gmail.com |
217f1a826831df2df46f299e04687a86a0071b73 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/138/usersdata/224/52701/submittedfiles/volumeTV.py | 4330c4185b1d9894c3e8f74c13da563d89f1168c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # -*- coding: utf-8 -*-
v=int(input('Volume inicial: '))
t=int(input('Numero de trocas: '))
soma=0
f=0
for i in range(1,t+1,1):
x=float(input('Digite o novo valor: '))
while soma+v<100:
soma=soma+x
soma=soma+1
if (soma+v)>100:
soma=100+x
f=soma+v
print(f)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c573390d969c45e825f56c0c47303210f5f54069 | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/pip/_internal/utils/compat.py | bdbe668e8be325a31f213285c178e2079c2ca3fd | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e268be7334f2b33e4e72eabee4ae81bc84828a2ebf6a0c8dc2404f36d2a061f3
size 9596
| [
"github@cuba12345"
] | github@cuba12345 |
ed99964064e075f5b68ac99ec5f52a6602c7edfa | 4c733e36833100685e6fae445a98676182275145 | /inctax.py | bbb25914d755ba6065f9d44cc36b7159063ba1fd | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | GoFroggyRun/taxcalc-ghost | 36d29d81024f18b4232be88f84a51e25e4b6844a | a03affdc6f6a064b2f607ec9f2f7de667b5d9858 | refs/heads/master | 2021-01-10T17:32:56.752078 | 2016-03-21T17:22:42 | 2016-03-21T17:22:42 | 52,900,916 | 0 | 1 | null | 2016-03-21T17:22:42 | 2016-03-01T18:45:12 | Python | UTF-8 | Python | false | false | 6,143 | py | """
INCome TAX input-output capabilities for Tax-Calculator.
"""
# CODING-STYLE CHECKS:
# pep8 --ignore=E402 inctax.py
# pylint --disable=locally-disabled inctax.py
# (when importing numpy, add "--extension-pkg-whitelist=numpy" pylint option)
import argparse
import sys
from taxcalc import IncomeTaxIO
def main():
"""
Contains command-line interface to the Tax-Calculator IncomeTaxIO class.
"""
# parse command-line arguments:
parser = argparse.ArgumentParser(
prog='python inctax.py',
description=('Writes to a file the federal income tax OUTPUT for the '
'tax filing units specified in the INPUT file with the '
'OUTPUT computed from the INPUT for the TAXYEAR using '
'the Tax-Calculator. '
'The INPUT file is a CSV-formatted file that contains '
'variable names that are a subset of the '
'Records.VALID_READ_VARS set. The OUTPUT file is in '
'Internet-TAXSIM format. The OUTPUT filename is the '
'INPUT filename (excluding the .csv suffix or '
'.gz suffix, or both) followed by '
'a string equal to "-YY" (where the YY is the last two '
'digits in the TAXYEAR) and all that is followed by a '
'trailing string. The trailing string is ".out-inctax" '
'if no --reform option is specified; otherwise the '
'trailing string is ".out-inctax-REFORM" (excluding any '
'".json" ending to the REFORM filename). The OUTPUT '
'file contains the first 28 Internet-TAXSIM output '
'variables. Use --iohelp flag for more information. '
'For details on the Internet-TAXSIM version 9.3 '
'OUTPUT format, go to '
'http://users.nber.org/~taxsim/taxsim-calc9/'))
parser.add_argument('--iohelp',
help=('optional flag to show INPUT and OUTPUT '
'variable definitions and exit without trying '
'to read the INPUT file, so INPUT and TAXYEAR '
'can be any meaningless pair of character (as '
'long as the second character is a digit) '
'(e.g., "i 0" or "x 1" or ". 9")'),
default=False,
action="store_true")
parser.add_argument('--reform',
help=('REFORM is name of optional file that contains '
'tax reform provisions; the provisions are '
'specified using JSON that may include '
'//-comments. No REFORM filename implies use '
'of current-law policy.'),
default=None)
parser.add_argument('--blowup',
help=('optional flag that triggers the default '
'imputation and blowup (or aging) logic built '
'into the Tax-Calculator that will age the '
'INPUT data from Records.PUF_YEAR to TAXYEAR. '
'No --blowup option implies INPUT data are '
'considered raw data that are not aged or '
'adjusted in any way.'),
default=False,
action="store_true")
parser.add_argument('--weights',
help=('optional flag that causes OUTPUT to have an '
'additional variable [29] containing the s006 '
'sample weight, which will be aged if the '
'--blowup option is used'),
default=False,
action="store_true")
parser.add_argument('--records',
help=('optional flag that causes the output file to '
'be a CSV-formatted file containing for each '
'INPUT filing unit the TAXYEAR values of each '
'variable in the Records.VALID_READ_VARS set. '
'If the --records option is specified, the '
'output file name will be the same as if the '
'option was not specified, except that the '
'".out-inctax" part is replaced by ".records"'),
default=False,
action="store_true")
parser.add_argument('INPUT',
help=('INPUT is name of required CSV file that '
'contains a subset of variables included in '
'the Records.VALID_READ_VARS set. '
'INPUT must end in ".csv".'))
parser.add_argument('TAXYEAR',
help=('TAXYEAR is calendar year for which federal '
'income taxes are computed (e.g., 2013).'),
type=int)
args = parser.parse_args()
# optionally show INPUT and OUTPUT variable definitions and exit
if args.iohelp:
IncomeTaxIO.show_iovar_definitions()
return 0
# instantiate IncometaxIO object and do federal income tax calculations
inctax = IncomeTaxIO(input_data=args.INPUT,
tax_year=args.TAXYEAR,
policy_reform=args.reform,
blowup_input_data=args.blowup,
output_records=args.records)
if args.records:
inctax.output_records(writing_output_file=True)
else:
inctax.calculate(writing_output_file=True,
output_weights=args.weights)
# return no-error exit code
return 0
# end of main function code
if __name__ == '__main__':
sys.exit(main())
| [
"martin.holmer@gmail.com"
] | martin.holmer@gmail.com |
be375352f73f8ca216f943d0e938d1f7f484b7e3 | c93fc506c39e002ae67bc380a365d1f33d5ac386 | /supervised_learning/models/pin_position/edge_predict/train_with_data_fnn.py | 61ca43cde4e9374082d9f4be9430fd88878cc4b3 | [] | no_license | quantumiracle/store3 | 9f1f2d5c7103b3ded5e556854e111701e8104ccb | 8b553c657c4efa6391547913831be5756a09924a | refs/heads/master | 2020-07-25T02:09:17.528485 | 2019-09-15T21:48:31 | 2019-09-15T21:48:31 | 208,126,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,891 | py | """
pure vector observation based learning: position of tactip and target
task: tactip following the cylinder to reach the ball target
use 382 pins
"""
import tensorflow as tf
# from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Flatten
import numpy as np
import matplotlib.pyplot as plt
import gym, threading, queue
from gym_unity.envs import UnityEnv
import argparse
from PIL import Image
from deform_visualize import plot_list_new, plot_list_new_sim2
import pickle
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=False)
parser.add_argument('--rotat_test', dest='rotat_test', action='store_true', default=False)
args = parser.parse_args()
class Classifier(object):
def __init__(self, obs_dim, label_dim, ini_lr=1e-3):
self.hidden_dim=500
self.sess = tf.Session()
self.label = tf.placeholder(tf.float32, [None, label_dim], 'label')
self.obs = tf.placeholder(tf.float32, [None, obs_dim], 'obs')
self.lr = tf.placeholder_with_default(ini_lr, shape=(), name='lr')
self.training = tf.placeholder_with_default(False, shape=(), name='training') # BN signal
l1 = tf.layers.dense(self.obs, self.hidden_dim, tf.nn.relu)
l2 = tf.layers.dense(l1, self.hidden_dim, tf.nn.relu)
l3 = tf.layers.dense(l2, self.hidden_dim, tf.nn.relu)
self.predict = 2*tf.layers.dense(l3, label_dim, activation=tf.nn.tanh) # predict position and rotation
self.loss = tf.reduce_mean(tf.square(self.predict-self.label)) # pos
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
# self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)
self.train_op = self.optimizer.minimize(self.loss)
self.sess.run(tf.global_variables_initializer())
def train(self, batch_s, batch_label, lr, decay):
# self.optimizer.learning_rate = lr
# if decay:
# self.train_op = self.optimizer.minimize(self.loss)
loss,_=self.sess.run([self.loss, self.train_op], {self.training: True, self.obs: batch_s, self.label: batch_label, self.lr: lr})
# if decay:
# print(self.optimizer._lr)
return loss
def predict_one_value(self, s):
s = s[np.newaxis, :]
predict = self.sess.run(self.predict, {self.obs: s})
return predict
def predict_value(self, s):
predict = self.sess.run(self.predict, {self.obs: s})
return predict
def save(self, path):
saver = tf.train.Saver()
saver.save(self.sess, path)
def load(self, path):
saver=tf.train.Saver()
saver.restore(self.sess, path)
def state_process(s):
factor=0.5674
x0=s[1::3]
z0=s[3::3]
x=np.array(x0)/factor
z=np.array(z0)/factor
data=np.transpose([x,z]).reshape(-1) # (x,y,x,y,...)
label=s[0]
return [label], data
def Predict(input, model_path = './model/class_obj'):
obs_dim = 182 # total 280: 0 object index, 1-3 rotation value, 4-6 average contact point position, 7-279 pins positions
state_dim = 1 # 2 position
lr=2e-2
classifier = Classifier(obs_dim, state_dim, lr)
classifier.load(model_path)
predict = classifier.predict_one_value(input)
return predict
if __name__ == '__main__':
model_path = './model/comparison/random0.2/class_obj'
training_episodes = 80000
input_dim = 182 # total 280: 0 object index, 1-3 rotation value, 4-6 average contact point position, 7-279 pins positions
output_dim = 1
lr=5e-4
decay=0 # decay signal of lr
classifier = Classifier(input_dim, output_dim, lr)
if args.train:
data_file=open('compare_data/raw_data02.pickle', "rb")
raw_data=pickle.load(data_file)
data=[]
label=[]
for i in range(len(raw_data)):
s=raw_data[i]
label_i, data_i=state_process(s)
''' add noise '''
data_i=data_i+np.random.normal(0, 1e-2, data_i.shape)
data.append(data_i)
label.append(label_i)
loss_list=[]
# classifier.load(model_path)
for eps in range(training_episodes):
if eps%40000==0 and eps>1:
lr *=0.5
decay=1
else:
decay=0
loss = classifier.train(data, label, lr, decay)
if eps==0:
loss_list.append(loss)
else:
loss_list.append(0.9*loss_list[-1]+0.1*loss)
print('Eps: {}, Loss: {}'.format(eps, loss))
if eps % 100 ==0:
plt.yscale('log')
plt.plot(np.arange(len(loss_list)), loss_list)
plt.savefig('classify_trainwithdataobj2.png')
classifier.save(model_path)
np.savetxt('trainwithdata.txt', np.array(loss_list)[:, np.newaxis], fmt='%.4f', newline=', ')
round_loss_list=list(np.around(np.array(loss_list),4))
print(round_loss_list)
# test with testing dataset, all at once
if args.test:
test_data_file=open('data/raw_data.pickle', "rb")
raw_data=pickle.load(test_data_file)
data=[]
label=[]
classifier.load(model_path)
for i in range(80):
s=raw_data[i]
label_i, data_i=state_process(s)
print(label_i)
data.append(data_i)
label.append(label_i)
predict = classifier.predict_one_value(data_i)[0]
print(predict)
xy=data_i.reshape(-1,2)
# plot_list_new_sim2(xy,i,predict, label_i)
print(i)
| [
"1402434478@qq.com"
] | 1402434478@qq.com |
2a08847d1a4c5afc6f9242526a20c18f304652f7 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoEgamma/EgammaHFProducers/python/hfClusterShapes_cfi.py | 284de7823cb9863138c4bfc4645b9b29b1e096fd | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 731 | py | import FWCore.ParameterSet.Config as cms
# HFEMClusterShape producer
hfEMClusters = cms.EDProducer("HFEMClusterProducer",
hits = cms.InputTag("hfreco"),
minTowerEnergy = cms.double(4.0),
seedThresholdET = cms.double(5.0),
maximumSL = cms.double(98),
maximumRenergy = cms.double(50),
usePMTFlag = cms.bool(True),
forcePulseFlagMC=cms.bool(False),
usePulseFlag = cms.bool(True),
correctionType = cms.int32(1)
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
d14ebc07ab2006ac2b7441d8b84c1ddcf1c557a9 | edb88981aa1420af7e074068ed7818b9d904a3dd | /trunk/minds/util/patterns_tester.py | b1f5e484e945b74cdc21aec4745075e1fc83562b | [] | no_license | BackupTheBerlios/mindretrieve-svn | 101c0f1dfc25d20d5f828b6fd0d43301b773af4e | 463745fcf1c1d5b1f6c201c30bcc339c99b437ed | refs/heads/master | 2021-01-22T13:57:31.225772 | 2006-04-28T04:24:43 | 2006-04-28T04:24:43 | 40,801,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,352 | py | """
Look for a series of patterns in a file object.
This is used as a helper to unit testing.
"""
import re
from StringIO import StringIO
import sys
import unittest
# this is a debugging aid. Can we make it easier to activate?
def _debug_mismatch(data, i, pattern):
left = max(0,i-10)
print >>sys.stderr, data[left:i] + '<$>' + data[i:i+20]
print >>sys.stderr, 'Pattern not matched: ', pattern
assert pattern
def checkStrings(data, patterns, no_pattern=None):
"""
Search for the series of strings in data.
In addition check for 'no_pattern' does not appear after the last pattern.
Return none if all matched; or return the pattern not matched.
"""
i = 0
for p in patterns:
j = data.find(p,i)
if j < 0:
return p
i = j+len(p)
if no_pattern and (data.find(no_pattern, i) >= 0):
return no_pattern
return None
def checkPatterns(data, patterns, no_pattern=None):
"""
Similar to checkStrings() but use regular expressions.
Note: the whole re pattern must appear within a line.
"""
i = 0
for p in patterns:
m = re.compile(p,re.I).search(data, i)
if not m:
#_debug_mismatch(data,i,p)
return p
i = m.end()
if no_pattern and re.compile(no_pattern,re.I).search(data, i):
#_debug_mismatch(data,i,no_pattern)
return no_pattern
return None
def showFile(fp, label, maxchars=1024):
""" show a buffered file (e.g. StringIO), truncate after max chars """
fp.seek(0)
data = fp.read(maxchars)
if fp.read(1):
extra = '...'
else:
extra = ''
document = """
--%s%s
%s%s
--^end-----------------------------------------------------------------
""" % (label, '-' * (70-len(label)), data, extra)
return document
# ----------------------------------------------------------------------
# Test the tester
SAMPLE_FILE = """
<html>
<head>
<title>Home</title>
<link rel="stylesheet" href="/main.css" type="text/css">
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head>
</html>
"""
# todo: give a little explanation on each test
class TestPatternTester(unittest.TestCase):
def test00(self):
p = checkPatterns('', [])
self.assertEqual(p, None)
def test01(self):
p = checkPatterns('', ['X'])
self.assertEqual(p, 'X')
def test10(self):
p = checkPatterns('xyz', [])
self.assertEqual(p, None)
def testCheckedOK(self):
p = checkPatterns(SAMPLE_FILE,
['html', '.text.css.', '</html>']) # <-- .text.css. is an re
self.assertEqual(p, None)
def testCheckedRe(self):
p = checkPatterns(SAMPLE_FILE,
['html', '<title>.*</title>', '</html>'])
self.assertEqual(p, None)
def testOrderWrong(self):
p = checkPatterns(SAMPLE_FILE,
['html', r'\</html\>', '.text.css.'])
self.assertEqual(p, '.text.css.')
def testNoPatternGood(self):
p = checkPatterns(SAMPLE_FILE,
['html', '.text.css.', '</html>'],
'<')
self.assertEqual(p, None)
def testNoPatternBad(self):
p = checkPatterns(SAMPLE_FILE,
['html', '.text.css.', '</head>'],
'<')
self.assertEqual(p, '<')
class TestCheckStrings(unittest.TestCase):
def test00(self):
p = checkStrings('', [])
self.assertEqual(p, None)
def test01(self):
p = checkStrings('', ['X'])
self.assertEqual(p, 'X')
def test10(self):
p = checkStrings('xyz', [])
self.assertEqual(p, None)
def testCheckedOK(self):
p = checkStrings(SAMPLE_FILE,
['html', 'text/css', '</html>'])
self.assertEqual(p, None)
def testOrderWrong(self):
p = checkStrings(SAMPLE_FILE,
['html', '</html>', 'text/css'])
self.assertEqual(p, 'text/css')
def testNoPatternGood(self):
p = checkStrings(SAMPLE_FILE,
['html', 'text/css', '</html>'],
'<')
self.assertEqual(p, None)
def testNoPatternBad(self):
p = checkStrings(SAMPLE_FILE,
['html', 'text/css', '</head>'],
'<')
self.assertEqual(p, '<')
if __name__ == '__main__':
unittest.main() | [
"tungwaiyip@785ff9d5-dded-0310-b5f2-a5aff206d990"
] | tungwaiyip@785ff9d5-dded-0310-b5f2-a5aff206d990 |
53192f430b8060cf4c1bfb05a71a5d1b8e0f0bee | 570ca07ec6266c875dc736a3d8c4b4ddc61579fd | /todo/views.py | fd8a700e8d825928ff8bdb4cdf3696834f14d36c | [
"MIT"
] | permissive | Wilo/to-do-list | 85024a59a0f8192a419297e66a69b0e31df45b43 | 185c0025a3b8a1e44adb1842c7e49af5062507a3 | refs/heads/master | 2021-01-17T05:42:56.683726 | 2014-08-02T20:47:15 | 2014-08-02T20:47:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py | from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from forms import *
from models import *
# Create your views here.
def list(request):
todo = Todo.objects.all()
return render(request, 'index.html', {'list': todo})
def add(request):
if request.method == "POST":
form = TodoForm(request.POST)
if form.is_valid():
form.save()
form = TodoForm()
return list(request)
else:
form = TodoForm()
return render(request, 'add.html', {'form': form, 'add': True})
def edit(request, **kwargs):
pk = kwargs.get('pk')
todo = Todo.objects.get(id=pk)
if request.method == "POST":
form = TodoForm(request.POST, instance=todo)
if form.is_valid():
form.save()
form = TodoForm()
return HttpResponseRedirect(reverse('lista'))
else:
form = TodoForm(instance=todo)
return render(request, 'add.html', {'form': form, 'add': False, 'id': pk})
def delete(request, **kwargs):
pk = kwargs.get('pk')
todo = Todo.objects.get(id=pk)
if request.method == "POST":
todo.delete()
return HttpResponseRedirect(reverse('lista'))
return render(request, 'delete.html', {'todo': todo}) | [
"leonardoorozcop@gmail.com"
] | leonardoorozcop@gmail.com |
cd9ccb5e2390a834796c340271af293d646cd570 | 8ad2e97aed97d581487f2b604c10264a52022253 | /people/items.py | 9d711c1252cdace2b3ace584b15b574a7be895a3 | [] | no_license | SimeonYS/people | 9aca784793a7ae7762b788878ff1fcb7ee511ba5 | a24b19c1282cb57f8b0dab9424e957a0d71d2bff | refs/heads/main | 2023-03-26T20:02:59.275628 | 2021-03-23T14:09:54 | 2021-03-23T14:09:54 | 350,736,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | import scrapy
class PeopleItem(scrapy.Item):
title = scrapy.Field()
content = scrapy.Field()
date = scrapy.Field()
link = scrapy.Field()
| [
"simeon.simeonov@ADPVT.com"
] | simeon.simeonov@ADPVT.com |
18d3f0a11f1b03da2aa4b0647595191a42d5c7ea | b0f2c47881f39ceb5a989b9638483f7439bfb5cf | /Problem85.py | e1438a737933a65d31f6e5622f7f1748dc8b5611 | [] | no_license | chrisvail/Project_Euler | 9ba264c8ec9d158b33ec677811e59d1e0e52fef2 | 41623c27b3e1344f9d8ebdfac4df297d0666cc07 | refs/heads/main | 2023-02-13T20:26:42.752780 | 2021-01-15T16:38:27 | 2021-01-15T16:38:27 | 329,964,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | from itertools import count
def main():
best = [2000000, 0 , 0]
for n in count(1):
for m in range(n + 1):
total = sum([sum([(n + 1 - x) * (m + 1 - y) for x in range(1, n + 1)]) for y in range(1, m + 1)])
if abs(total - 2000000) < best[0]:
best = [abs(total - 2000000), n, m]
if best[0] <= 2:
print("Within 2")
print("Closest answer is where:\n\tn = {}\n\tm = {} \
\nTherefore the answer is: {}".format(best[1], best[2], best[1] * best[2]))
return 0
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | chrisvail.noreply@github.com |
1f3646b5f84c6d011827e3285f63bde47ff349cf | e41651d8f9b5d260b800136672c70cb85c3b80ff | /Notification_System/temboo/Library/Google/Spreadsheets/UpdateWorksheet.py | eaffb1484ed80159e3bb8320808a69eac7555076 | [] | no_license | shriswissfed/GPS-tracking-system | 43e667fe3d00aa8e65e86d50a4f776fcb06e8c5c | 1c5e90a483386bd2e5c5f48f7c5b306cd5f17965 | refs/heads/master | 2020-05-23T03:06:46.484473 | 2018-10-03T08:50:00 | 2018-10-03T08:50:00 | 55,578,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,673 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# UpdateWorksheet
# Updates existing worksheet metadata such as: Title, Row Count, and Column Count.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateWorksheet(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateWorksheet Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateWorksheet, self).__init__(temboo_session, '/Library/Google/Spreadsheets/UpdateWorksheet')
def new_input_set(self):
return UpdateWorksheetInputSet()
def _make_result_set(self, result, path):
return UpdateWorksheetResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateWorksheetChoreographyExecution(session, exec_id, path)
class UpdateWorksheetInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateWorksheet
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid Access Token retrieved during the OAuth process. This is required when authenticating with OAuth unless providing the ClientID, ClientSecret, and RefreshToken.)
"""
super(UpdateWorksheetInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(UpdateWorksheetInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(UpdateWorksheetInputSet, self)._set_input('ClientSecret', value)
def set_ColumnCount(self, value):
"""
Set the value of the ColumnCount input for this Choreo. ((required, integer) The number of columns that you want to specify for the worksheet.)
"""
super(UpdateWorksheetInputSet, self)._set_input('ColumnCount', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((optional, password) Deprecated (retained for backward compatibility only).)
"""
super(UpdateWorksheetInputSet, self)._set_input('Password', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth Refresh Token used to generate a new Access Token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(UpdateWorksheetInputSet, self)._set_input('RefreshToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: xml (the default) and json.)
"""
super(UpdateWorksheetInputSet, self)._set_input('ResponseFormat', value)
def set_RowCount(self, value):
"""
Set the value of the RowCount input for this Choreo. ((required, integer) The number of rows that you want to specify for the worksheet.)
"""
super(UpdateWorksheetInputSet, self)._set_input('RowCount', value)
def set_SpreadsheetKey(self, value):
"""
Set the value of the SpreadsheetKey input for this Choreo. ((required, string) The unique key associated with the spreadsheet that contains a worksheet you want to update.)
"""
super(UpdateWorksheetInputSet, self)._set_input('SpreadsheetKey', value)
def set_Title(self, value):
"""
Set the value of the Title input for this Choreo. ((required, string) The new title of the worksheet.)
"""
super(UpdateWorksheetInputSet, self)._set_input('Title', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) Deprecated (retained for backward compatibility only).)
"""
super(UpdateWorksheetInputSet, self)._set_input('Username', value)
def set_WorksheetId(self, value):
"""
Set the value of the WorksheetId input for this Choreo. ((required, string) The unique ID associated with the worksheet that you want to update.)
"""
super(UpdateWorksheetInputSet, self)._set_input('WorksheetId', value)
class UpdateWorksheetResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateWorksheet Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (Response from Google.)
"""
return self._output.get('Response', None)
class UpdateWorksheetChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateWorksheetResultSet(response, path)
| [
"shriswissfed@gmail.com"
] | shriswissfed@gmail.com |
52e2db735ddcef06b0f7ee93f8f2486af36d3b04 | 48517a9b7ec7b0f0bf0a03291b7d1e3def751c0a | /Choose Your Own Colors/corner_to_corner_5.py | 39a6a805f3f67ed65503e30b22b7b83f71056fc7 | [
"MIT"
] | permissive | Breakfast-for-Pigeons/Unicorn-HAT | 1ae033bf11c05b9cc739b1eacfc77665506e0bc8 | 9ff1388ee627a8e81f361929e9e9b708db4e2832 | refs/heads/master | 2021-06-06T12:22:48.162031 | 2020-10-22T17:31:51 | 2020-10-22T17:31:51 | 74,648,524 | 1 | 0 | null | 2018-10-02T17:37:31 | 2016-11-24T07:28:23 | Python | UTF-8 | Python | false | false | 2,468 | py | #!/usr/bin/python3
"""
Corner to Corner 5 - Choose Your Own Color
Selects a color and then sends it to one of four functions.
Can move a square from the lower left corner to the upper right corner.
Can move a square from the upper right corner to the lower left corner.
Can move a square from the lower right corner to the upper left corner.
Can move a square from the upper left corner to the lower right corner.
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
from corner_to_corner_1_v2 import corner_to_corner_1_v2
from corner_to_corner_2_v2 import corner_to_corner_2_v2
from corner_to_corner_3_v2 import corner_to_corner_3_v2
from corner_to_corner_4_v2 import corner_to_corner_4_v2
from bfp_unicornhat import print_header
from bfp_unicornhat import stop
########################################################################
# Import Variables #
########################################################################
from bfp_unicornhat import C1
from bfp_unicornhat import C2
from bfp_unicornhat import C3
from bfp_unicornhat import C4
from bfp_unicornhat import C5
from bfp_unicornhat import C6
from bfp_unicornhat import C7
from bfp_unicornhat import C8
########################################################################
# Functions #
########################################################################
def corner_to_corner_5():
"""
Moves a square from one corner to the opposite corner.
"""
off = (0, 0, 0)
corner_to_corner_4_v2(off)
corner_to_corner_1_v2(C1)
corner_to_corner_3_v2(C2)
corner_to_corner_2_v2(C3)
corner_to_corner_4_v2(C4)
corner_to_corner_1_v2(C5)
corner_to_corner_3_v2(C6)
corner_to_corner_2_v2(C7)
corner_to_corner_4_v2(C8)
if __name__ == '__main__':
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
corner_to_corner_5()
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
| [
"noreply@github.com"
] | Breakfast-for-Pigeons.noreply@github.com |
87201465d144f08dc791178512de0d903d59f97a | c263b3eae99bcad488b6e334e906a30b3a29ba78 | /boolean_parser/actions/clause.py | 4db123ffd38fe680892632e76662f638c21fce34 | [
"BSD-3-Clause"
] | permissive | havok2063/boolean_parser | 675953df535f83b76cea29b324084105c1b1d1ca | cbc3c5b74695da838418aaa3dd3ad08f413ec4a7 | refs/heads/main | 2023-01-13T01:49:34.237899 | 2022-12-01T15:00:28 | 2022-12-01T15:00:28 | 170,555,761 | 18 | 3 | BSD-3-Clause | 2023-01-03T10:37:50 | 2019-02-13T18:12:26 | Python | UTF-8 | Python | false | false | 5,908 | py | # !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: clause.py
# Project: actions
# Author: Brian Cherinka
# Created: Sunday, 17th February 2019 12:52:31 pm
# License: BSD 3-clause "New" or "Revised" License
# Copyright (c) 2019 Brian Cherinka
# Last Modified: Sunday, 17th February 2019 12:53:16 pm
# Modified By: Brian Cherinka
from __future__ import print_function, division, absolute_import
#
# Parsing Action classses
#
class BaseAction(object):
''' Base object representing a clause action
An action to perform after parsing a string clause. If set, actions run
Actions are attached to clauses with the ``setParseAction`` on a given ``pyparsing``
element. :py:meth:`pyparsing.ParserElement.setParseAction` accepts a function or class to
be applied to the ``pyparsing`` element during the parsing process. Multiple actions can
be attached by passing a list of functions or classes. This class extracts the parsed data
from the ``pyparsing`` element and makes it accessible as a variety of named attributes.
Attributes:
name: str
The name of the extracted parameter
base: str
The base name of the extracted parameter, if any.
fullname: str
The full name of the extracted parameter as base + name
data: dict
The extracted parsed parameters from the pyparse clause
parsed_clause: :py:class:`pyparsing.ParseResults`
The original pyparsed results object
input_clause: str
The original input clause element
'''
def __init__(self, data):
self.parsed_clause = data
self.data = data[0].asDict()
# parse the basic parameter name
self._parse_parameter_name()
def _parse_parameter_name(self):
''' parse the parameter name into a base + name '''
name = self.data.get('parameter', None)
assert name.count(
'.') <= 1, f'parameter {name} cannot have more than one . '
if '.' in name:
self.base, self.name = name.split('.', 1)
else:
self.base = None
self.name = name
@property
def fullname(self):
''' The full parameter name, including any base '''
return f'{self.base}.{self.name}' if self.base else self.name
class Word(BaseAction):
''' Class action for handling word clauses
This action performs a basic word parse. The basic word
is assigned as the ``name`` attribute. Example word clauses:
"alpha" or "alpha and beta or not charlie".
'''
def __init__(self, data):
super(Word, self).__init__(data)
def __repr__(self):
return f'{self.name}'
@property
def input_clause(self):
''' Original input clause as a string '''
return f'{self.fullname}'
class Condition(BaseAction):
''' Class action for handling conditional clauses
This action performs a basic conditional parse. The syntax for a
conditional expressions is defined as "parameter operand value" or
for "between" conditions, "parameter between value and value2". The parameter name,
operand, and parameter value is assigned as the ``name``, ``operator``, and
``value`` attribute, respectively. Example conditional clauses:
"x > 5" or "x > 5 and y < 3". When using a "between" condition, e.g.
"x between 3 and 5", an additional ``value2`` attribute is assigned the second
parameter value. For bitwise operands of '&' and '|', the value can also accept a negation
prefix, e.g. "x & ~256", which evaluates to "x & -257".
Allowed operands for conditionals are:
'>', '>=, '<', '<=', '==', '=', '!=', '&', '|'
In addition to the Base Attributes, the ``Condition`` action provides
additional attributes containing the parsed condition parameters.
Attributes:
operator: str
The operand used in the condition
value: str
The parameter value in the condition
value2: str
Optional second value, assigned when a "between" condition is used.
'''
def __init__(self, data):
super(Condition, self).__init__(data)
# extract the conditional operator and value
self.operator = self.data.get('operator', None)
self._extract_values()
def __repr__(self):
more = 'and' + self.value2 if hasattr(self, 'value2') else ''
return self.name + self.operator + self.value + more
@property
def input_clause(self):
''' Original input clause as a string '''
if self.operator == 'between':
return f'{self.fullname} {self.operator} {self.value} and {self.value2}'
else:
return f'{self.fullname} {self.operator} {self.value}'
def _extract_values(self):
''' Extract the value or values from the condition '''
self.value = self.data.get('value', None)
if not self.value:
if self.operator == 'between':
self.value = self._check_bitwise_value(self.data.get('value1'))
self.value2 = self._check_bitwise_value(
self.data.get('value2'))
self.value = self._check_bitwise_value(self.value)
def _check_bitwise_value(self, value):
''' Check if value has a bitwise ~ in it
Removes any bitwise ~ found in a value for a condition.
If the operand is a bitwise & or |, convert the ~value to its
integer appropriate. E.g. ~64 -> -65.
Parameters:
value: str
A string numerical value
Returns:
The str value or value converted to the proper bitwise negative
'''
if '~' in value:
value = value.replace('~', '')
if self.operator in ['&', '|']:
value = str(-1 * (int(value)) - 1)
return value
| [
"havok2063@hotmail.com"
] | havok2063@hotmail.com |
6ebb3533a73b107738006b3db204498153ef1cba | 20d1b971f58b0a6ab30f2682b773e7280ac77bc2 | /loops/170820, Lists/lr_dvumernie_spiski/8/8.1'divine_k_string.py | e624544d8e58779b0d5434ca7c3d94fed73fb580 | [] | no_license | M2401/P1 | 1a39ba384030a1483c77d86db933d11c30e45abf | c08f3ba1e07678b7b20836465175d51c89a43078 | refs/heads/master | 2023-03-03T21:34:31.601512 | 2021-02-06T17:44:54 | 2021-02-06T17:44:54 | 312,105,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | #1. Задана матрица порядка n и число к. Разделить элементы k-й строки на диагональный элемент,
# расположенный в этой строке.
import random
def pr(a, n):
for i in range(n):
for j in range(n):
print(a[i][j], end='\t')
print()
print()
def main():
n = int(input('Введите порядок матрицы n '))
a = []
for i in range(n):
b = []
for j in range(n):
b.append(random.randint(1, 10))
a.append(b)
pr(a, n)
#почему неправильно работает цикл: for j in range(n):
#a[k][j] = a[k][j]//a[k][k]??
k = int(input('введите номер строки '))
p = a[k][k]
j = 0
while j < n:
a[k][j] = a[k][j]//p
j += 1
for i in range(n):
for j in range(n):
print(a[i][j], end='\t')
print()
print()
main() | [
"flussooo@gmail.com"
] | flussooo@gmail.com |
e3bcf84be53aaddcc116ab28636370c0e2ff9d75 | 4d5a91c312e9d633f73098bcc42ba9386893bd86 | /pajbot/modules/basic/dbmanage.py | f9b062683151352d7e5933d207371752d4897328 | [
"MIT"
] | permissive | leecopland/bullbot | ffc45062d802695fe2486f26643c1d1b9429e19c | 52e463293097b58084afb4f9f1d85b0656a67d44 | refs/heads/master | 2022-12-10T14:02:28.113368 | 2021-03-25T05:04:28 | 2021-03-25T05:04:28 | 172,211,287 | 1 | 0 | MIT | 2022-09-16T18:28:35 | 2019-02-23T12:23:22 | Python | UTF-8 | Python | false | false | 1,594 | py | import logging
import pajbot.models
from pajbot.modules import BaseModule
from pajbot.modules import ModuleType
from pajbot.modules.basic import BasicCommandsModule
log = logging.getLogger(__name__)
class DBManageModule(BaseModule):
ID = __name__.split('.')[-1]
NAME = 'DB Managing commands'
ENABLED_DEFAULT = True
DESCRIPTION = '!reload/!commit'
CATEGORY = 'Feature'
PARENT_MODULE = BasicCommandsModule
MODULE_TYPE = ModuleType.TYPE_ALWAYS_ENABLED
def reload(self, **options):
message = options['message']
bot = options['bot']
source = options['source']
bot.whisper(source.username, 'Reloading things from DB...')
if message and message in bot.reloadable:
bot.reloadable[message].reload()
else:
bot.reload_all()
def commit(self, **options):
message = options['message']
bot = options['bot']
source = options['source']
bot.whisper(source.username, 'Committing cached things to db...')
if message and message in bot.commitable:
bot.commitable[message].commit()
else:
bot.commit_all()
def load_commands(self, **options):
self.commands['reload'] = pajbot.models.command.Command.raw_command(self.reload,
level=1000,
description='Reload a bunch of data from the database')
self.commands['commit'] = pajbot.models.command.Command.raw_command(self.commit,
level=1000,
description='Commit data from the bot to the database')
| [
"pajlada@bithack.se"
] | pajlada@bithack.se |
57ac4bddf8a6c8e345fc31c97d40daa542272bd8 | bf535fdf7418b8092d6721d4e66e61f8c9dd4929 | /tasks/task_12.py | be64338587693c511cef05d0f510958040a6a902 | [
"MIT"
] | permissive | AlexRogalskiy/python | edb7808d48f4f8b8b4e4311678fb7364c7b54aeb | 78a38746de51688dc118ba921da08b920fe4caf2 | refs/heads/master | 2021-06-29T03:14:23.472651 | 2018-06-26T05:36:02 | 2018-06-26T05:36:02 | 97,952,461 | 0 | 0 | MIT | 2020-07-23T09:19:20 | 2017-07-21T13:52:00 | Python | UTF-8 | Python | false | false | 238 | py | import itertools
flatten = lambda x: list(itertools.chain.from_iterable(x))
s = [['"', 'An', 'investment'], ['in'], ['knowledge'], ['pays'], ['the', 'best'], ['interest."', '--'], ['Benjamin'], ['Franklin']]
print(' '.join(flatten(s)))
| [
"alexander.rogalsky@yandex.ru"
] | alexander.rogalsky@yandex.ru |
e1e7db7fb239a768eeb45685f63fcfd5a37115fd | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2899/50263/235338.py | 15ff2da2c947377ae171b13bfdf0500d85a905d3 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | num = eval(input())
if num == 1:
print("true")
elif num == 4:
print("true")
elif num == 16:
print("true")
elif num == 5:
print("false")
else:
print(num) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
29dd50bcff3f11ac5d166c8318908bb812620709 | 97ca8019389d6da727ee31b4ae42a520c21ccd64 | /Remove Element.py | 89155750ec5d87d07bcbc2c3d8b9e3333a1ffb41 | [] | no_license | AngleMAXIN/LeetCode_Problems | db51ae2e9f7b81d1e581bfee8f9949b1dbf27642 | 58c0190e718956d6960e2a1ea363d0a2e8d76e06 | refs/heads/master | 2021-06-14T00:38:44.337383 | 2019-11-22T15:13:20 | 2019-11-22T15:13:20 | 113,146,794 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | #/usr/bin/env python
# -*- python: utf-8 -*-
# problem:
# Given nums = [3,2,2,3], val = 3,
# Your function should return length = 2, with the first two elements of nums being 2.
class Solution:
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
if not nums:
return 0
i = 0
while i < len(nums): #循环条件是i始终都在nums的长度内
if nums[i] == val:
del nums[i] #del掉一个元素后,nums的长度就会减一
else:
i = i + 1
return len(nums)
# 思路:
# 首先判断列表是否为空,如果是,则返回0,
# 如果不是,从零开始以此遍历列表的元素,
# 遇到与val相等的值,就把它del掉,注意,此时
# 的i不能加1,否则会错过前面的元素,应该是如果没
# 有遇到与val相等的值再加1
| [
"1678190746@qq.com"
] | 1678190746@qq.com |
206aec11e8d8d7adff9fbb61ae155763fb665704 | bc441bb06b8948288f110af63feda4e798f30225 | /flowable_sdk/model/collector_center/target_range_pb2.pyi | acf831ae8dbcd27511dec805a8061bef8572a5a8 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,179 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from flowable_sdk.model.collector_center.cmdb_host_search_pb2 import (
CmdbHostSearch as flowable_sdk___model___collector_center___cmdb_host_search_pb2___CmdbHostSearch,
)
from flowable_sdk.model.collector_center.cmdb_host_strategy_pb2 import (
CmdbHostStrategy as flowable_sdk___model___collector_center___cmdb_host_strategy_pb2___CmdbHostStrategy,
)
from flowable_sdk.model.collector_center.cmdb_relation_search_pb2 import (
CmdbRelationSearch as flowable_sdk___model___collector_center___cmdb_relation_search_pb2___CmdbRelationSearch,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class TargetRange(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
type = ... # type: typing___Text
@property
def cmdbRelationSearch(self) -> flowable_sdk___model___collector_center___cmdb_relation_search_pb2___CmdbRelationSearch: ...
@property
def cmdbHostSearch(self) -> flowable_sdk___model___collector_center___cmdb_host_search_pb2___CmdbHostSearch: ...
@property
def cmdbHostStrategy(self) -> flowable_sdk___model___collector_center___cmdb_host_strategy_pb2___CmdbHostStrategy: ...
def __init__(self,
*,
type : typing___Optional[typing___Text] = None,
cmdbRelationSearch : typing___Optional[flowable_sdk___model___collector_center___cmdb_relation_search_pb2___CmdbRelationSearch] = None,
cmdbHostSearch : typing___Optional[flowable_sdk___model___collector_center___cmdb_host_search_pb2___CmdbHostSearch] = None,
cmdbHostStrategy : typing___Optional[flowable_sdk___model___collector_center___cmdb_host_strategy_pb2___CmdbHostStrategy] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> TargetRange: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> TargetRange: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"cmdbHostSearch",b"cmdbHostSearch",u"cmdbHostStrategy",b"cmdbHostStrategy",u"cmdbRelationSearch",b"cmdbRelationSearch"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"cmdbHostSearch",b"cmdbHostSearch",u"cmdbHostStrategy",b"cmdbHostStrategy",u"cmdbRelationSearch",b"cmdbRelationSearch",u"type",b"type"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
89d977f1c10c4e2b4f3e188c6752cd828a68f39f | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/others/CenterMask2/models/centermask2/centermask/layers/iou_loss.py | 2368211598cd9535144ecba1f6491e5c97949dd6 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,677 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import torch
from torch import nn
class IOULoss(nn.Module):
def __init__(self, loc_loss_type='iou'):
super(IOULoss, self).__init__()
self.loc_loss_type = loc_loss_type
def forward(self, pred, target, weight=None,pos_mask=None):
pred_left = pred[:, 0]
pred_top = pred[:, 1]
pred_right = pred[:, 2]
pred_bottom = pred[:, 3]
target_left = target[:, 0]
target_top = target[:, 1]
target_right = target[:, 2]
target_bottom = target[:, 3]
target_aera = (target_left + target_right) * \
(target_top + target_bottom)
pred_aera = (pred_left + pred_right) * \
(pred_top + pred_bottom)
w_intersect = torch.min(pred_left, target_left) + \
torch.min(pred_right, target_right)
h_intersect = torch.min(pred_bottom, target_bottom) + \
torch.min(pred_top, target_top)
g_w_intersect = torch.max(pred_left, target_left) + \
torch.max(pred_right, target_right)
g_h_intersect = torch.max(pred_bottom, target_bottom) + \
torch.max(pred_top, target_top)
ac_uion = g_w_intersect * g_h_intersect
# add
if pos_mask is not None:
ac_uion = ac_uion + 1
ac_uion = ac_uion - pos_mask
area_intersect = w_intersect * h_intersect
area_union = target_aera + pred_aera - area_intersect
ious = (area_intersect + 1.0) / (area_union + 1.0)
gious = ious - (ac_uion - area_union) / ac_uion
if self.loc_loss_type == 'iou':
losses = -torch.log(ious)
elif self.loc_loss_type == 'linear_iou':
losses = 1 - ious
elif self.loc_loss_type == 'giou':
losses = 1 - gious
else:
raise NotImplementedError
if weight is not None:
return (losses * weight).sum()
else:
return losses.sum()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
2010af1393a38b6eeb38db3684d5fe1954404bc0 | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/L82/82-77_MD_NVT_rerun/set_2.py | 587208a34f0653593d706645a6b17a656119210c | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L82/MD_NVT_rerun/ti_one-step/82_77/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_2.in'
temp_pbs = filesdir + 'temp_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_2.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
a8ec2943d15f417b5213574b8a7ae8aa115369b5 | f2978751f45a0e88a9761f6da4f66e0c6610bd9d | /hardPython/ex18.py | 23fc87c289dd639f00ab2099c76807db795bb09b | [] | no_license | mchenyuxiang/HardPython | c489dbd52b8e5c4fe71da824297f309529f237a7 | 1ab22e753d4e44d17cf203d2f325371c9ef4443d | refs/heads/master | 2021-01-12T12:27:49.067010 | 2017-09-07T04:42:56 | 2017-09-07T04:42:56 | 72,502,316 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | #--coding:utf-8--
def print_two(*args):
arg1,arg2 = args
print "arg1:%r,arg2:%r"%(arg1,arg2)
def print_two_again(arg1,arg2):
print "arg1:%r,arg2:%r"%(arg1,arg2)
def print_one(arg1):
print "arg1:%r"%arg1
def print_none():
print "I got nothin'."
print_two("Zed","Shaw")
print_two_again("Zed","Shaw")
print_one("First!")
print_none()
| [
"="
] | = |
9484fe6fdcd3e9324a6d4e8c49055758749ac739 | 435723c2128a8a125ebc0bd4fdd57b2e438174a0 | /tests/dust/screens/test_calzetti.py | 79521e13c7b4ac3ee9912ad4414f589fa6e5a4d0 | [] | no_license | galacticusorg/analysis-python | 824e7a0311329531e42eb06fc99298cf371ec75f | 09e03f8d25ab6711b4e2783454acca1422e7bc59 | refs/heads/master | 2022-03-10T18:39:03.766749 | 2022-03-03T14:49:25 | 2022-03-03T14:49:25 | 203,855,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | #! /usr/bin/env python
import os,sys
import unittest
import numpy as np
from galacticus.dust.screens.manager import ScreenLaw
from galacticus.dust.screens.calzetti import Calzetti
from galacticus import rcParams
class TestCalzetti(unittest.TestCase):
def test_Calzetti(self):
rcParams.update("dustCalzetti","Rv",4.06)
DUST = Calzetti()
self.assertEqual(DUST.attrs["Rv"],4.06)
self.assertIsNotNone(DUST.curve)
wavelengths = np.array([0.01,0.12,1.0,2.2,5.0])
self.assertTrue(type(DUST.curve(wavelengths)),np.ndarray)
[self.assertTrue(type(DUST.curve(w)),float) for w in wavelengths]
return
if __name__ == "__main__":
unittest.main()
| [
"alex.i.merson@gmail.com"
] | alex.i.merson@gmail.com |
a9d79218e3a19e563dfc0d7fe2c48ed14e8ec8ef | 7e35f686eaa2acff06291457af4fd6680e2738c1 | /基础题目/已知三角形的两边长及其夹角,求第三边长.py | 536fa48e07707bb805f3991db25b73db83f33f85 | [] | no_license | cassieeric/Python-Exercises_Interview_questions | 5ba68296cbf777ac7bb9aeda57ee7a04856f613a | 1934e5ce82d77747d52229522dd1515a61dc80e2 | refs/heads/master | 2021-07-04T03:05:10.271998 | 2020-08-15T01:19:37 | 2020-08-15T01:19:37 | 150,816,386 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import math
a = int(input('请输入三角形的一条边长:'))
b = int(input('请输入三角形的另一条边长:'))
C = int(input('请输入三角形的两条边长的夹角度数:'))
# c = math.sqrt(a*a + b*b - 2*a*b*math.cos(C*math.pi/180))
c = math.sqrt(a**2 + b**2 - 2*a*b*math.cos(C*math.pi/180))
print(c)
| [
"noreply@github.com"
] | cassieeric.noreply@github.com |
0a5c4e84014d2a61d3b50297b4dcf5da4b196d9e | 9dab41a71bf19a9ad17ee3e9f77c0f58aebd1d6d | /python/uline/uline/uline/handlers/app/bank/inlet/chain_batch_active.py | 815af06316321503854022567df4e0acf1ca2c17 | [] | no_license | apollowesley/Demo | f0ef8ec6c4ceb0aec76771da8dd9a62fb579eac8 | 471c4af95d3a7222d6933afc571a8e52e8fe4aee | refs/heads/master | 2021-02-15T04:01:51.590697 | 2018-01-29T01:44:29 | 2018-01-29T01:44:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,731 | py | # -*- coding: utf-8 -*-
'''
BEGIN
ajax请求 激活商户支付方式和状态
必选参数:
可选参数:
成功:
{"code": 200, "msg": "成功"}
失败:
{"code": 406, "msg": "失败"}
END
'''
import json
from tornado.httpclient import AsyncHTTPClient
from tornado.web import asynchronous, authenticated
from tornado import gen
from uline.handlers.baseHandlers import BkAdminHandler
from uline.public import common
from form import ActivatedBatchChainInfo
from uline.public import log
from uline.public.constants import ACTIVATED_STATUS, PAYMENT
from uline.settings import env, MESSAGE_URL
import tcelery
from uline.public.permit import check_permission
tcelery.setup_nonblocking_producer()
from uline.settings import CHAIN_LOGIN_URL
class ActivatedBatchChainInletStatusHandler(BkAdminHandler):
@authenticated
@check_permission
def prepare(self):
form = ActivatedBatchChainInfo(self)
self.f_rsp = common.f_rsp(code=406, msg='fail')
if not form.validate():
self.finish(self.f_rsp)
self.dt_id = form.dt_id.data
self.create_at = self.update_at = common.timestamp_now()
self.activated_status = 2
@asynchronous
@gen.coroutine
def get(self):
self.rsp = common.scc_rsp(code=200, msg='success')
with self.db.get_db() as cur:
try:
payment_types = yield self.get_unactivated_payment_type(cur)
dt_unactivated_payment_type = yield self.get_dt_unactivated_payment_type(cur)
if dt_unactivated_payment_type:
payment_name = '、'.join(PAYMENT[str(payment_type[0])]
for payment_type in dt_unactivated_payment_type if payment_type in payment_types)
if payment_name:
msg = u'渠道商{}的费率未激活'.format(payment_name)
rsp = common.f_rsp(code=407, msg=msg)
self.write(rsp)
self.finish()
return
self.email = yield self.get_email(cur)
yield self.activated_dt_payment(cur)
yield self.activated_dt_inlet(cur)
yield self.add_activated_dt_info(cur, payment_types)
self.dt_info = yield self.get_dt_info(cur)
except Exception as err:
log.exception.info(err)
cur.connection.rollback()
self.rsp = common.f_rsp(code=406, msg='fail')
count = yield self.is_send_email()
# 如果激活邮件发给渠道商则设置为待发送
if not count and self.dt_info[3] == 2:
self.save_activated_dt_email_info('ready', 3)
if not count and self.dt_info[3] == 1:
addition_info = u'(浦发银行厦门分行O2O平台合作伙伴)' if env == 'SPD_PROD' else ''
http_client = AsyncHTTPClient()
data = {
'env': env,
'reciver': self.dt_info[0],
'title': u'uline连锁商户激活信息',
'body': u"""
{1},您好:
以下帐号重要信息请注意保密:
优畅技术文档:http://docs.uline.cc
连锁商户编号:{2}
登录帐号:{0}
初始登录密码:开户时填写的联系手机号 (登录后要求修改初始密码)
登陆地址:{3}
温馨提示:
请妥善保管您的账号及密码,为安全起见,新申请的账号,首次登录后请立即修改管理员密码.
广州优畅信息技术有限公司{4}
客服电话:4008047555""".format(str(self.dt_info[2]) + ".mr", self.dt_info[1], self.dt_info[2], CHAIN_LOGIN_URL, addition_info)
}
url = MESSAGE_URL + '/v1/email'
response = yield http_client.fetch(url, body=json.dumps(data), method='POST')
if response.body == '1':
self.save_activated_dt_email_info('fail', 1)
else:
self.save_activated_dt_email_info('success', 2)
self.write(self.rsp)
@gen.coroutine
def get_unactivated_payment_type(self, cursor):
query = """select payment_type from dt_payment where dt_id=%(dt_id)s and activated_status=1;"""
cursor.execute(query, {"dt_id": self.dt_id})
ret = cursor.fetchall()
raise gen.Return(ret)
@gen.coroutine
def activated_dt_payment(self, cursor):
query = """update dt_payment set
activated_status=%(activated_status)s, update_at=%(update_at)s
where dt_id=%(dt_id)s and activated_status=1;"""
cursor.execute(query, {
"activated_status": self.activated_status,
"dt_id": self.dt_id,
"update_at": self.update_at
})
@gen.coroutine
def activated_dt_inlet(self, cursor):
query = """update dt_inlet_info set
activated_status=%(activated_status)s, update_at=%(update_at)s
where dt_id=%(dt_id)s"""
cursor.execute(query, {
"activated_status": self.activated_status,
"dt_id": self.dt_id,
"update_at": self.update_at
})
@gen.coroutine
def add_activated_dt_info(self, cursor, payment_types):
activated_user = yield self.get_bk_email(cursor)
for _, payment_type in enumerate(payment_types):
query = """insert into
activated_dt_info (dt_id, payment_type, comment, activated_user, activated_status, create_at) values(%s, %s,%s, %s, %s, %s)"""
cursor.execute(query, (self.dt_id, payment_type,
ACTIVATED_STATUS[str(self.activated_status)
], activated_user, self.activated_status,
self.create_at))
@gen.coroutine
def get_bk_email(self, cursor):
query = """select email from bk_user where bk_id=%s"""
cursor.execute(query, (self.current_user,))
ret = cursor.fetchone()
raise gen.Return(ret[0])
@gen.coroutine
def get_email(self, cursor):
query = """select email from dt_user where dt_id=%s"""
cursor.execute(query, (self.dt_id,))
ret = cursor.fetchone()
raise gen.Return(ret[0])
@gen.coroutine
def is_send_email(self):
query = """select count(1) from activated_dt_email_info where dt_id=%s and status=2"""
ret = self.db.selectSQL(query, (self.dt_id,))
raise gen.Return(ret[0])
@gen.coroutine
def get_dt_info(self, cursor):
query = """select
dt_inlet_info.email,
dt_inlet_info.dt_name,
dt_user.dt_id,
dt_inlet_info.activate_email_tag
from
dt_user
inner join dt_inlet_info on dt_inlet_info.dt_id=dt_user.dt_id
where dt_user.dt_id=%s"""
cursor.execute(query, (self.dt_id,))
ret = cursor.fetchone()
raise gen.Return(ret)
@gen.coroutine
def save_activated_dt_email_info(self, comment, status):
query = """insert into
activated_dt_email_info (dt_id,email,comment,status,create_at)
values (%s, %s, %s, %s, %s)"""
self.db.executeSQL(query, (self.dt_id, self.email, comment, status, self.create_at))
@gen.coroutine
def get_dt_unactivated_payment_type(self, cursor):
query = """SELECT payment_type FROM dt_payment
WHERE dt_id=(SELECT parent_id FROM dt_inlet_info WHERE dt_id=%s) and activated_status=1;"""
cursor.execute(query, (self.dt_id, ))
ret = cursor.fetchall()
raise gen.Return(ret if ret else '')
| [
"36821277@qq.com"
] | 36821277@qq.com |
9963aa430500b53625a4d9c20ddb4cc59760d221 | b8faf65ea23a2d8b119b9522a0aa182e9f51d8b1 | /vmraid/patches/v11_0/create_contact_for_user.py | db127c67728578fb09622c876e2f999ba7017b8d | [
"MIT"
] | permissive | vmraid/vmraid | a52868c57b1999a8d648441eb9cd05815204345d | 3c2e2a952003ba7ea2cf13673b9e79e127f4166e | refs/heads/main | 2022-07-29T18:59:28.585133 | 2022-04-22T08:02:52 | 2022-04-22T08:02:52 | 372,473,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | import re
import vmraid
from vmraid.core.doctype.user.user import create_contact
def execute():
"""Create Contact for each User if not present"""
vmraid.reload_doc("integrations", "doctype", "google_contacts")
vmraid.reload_doc("contacts", "doctype", "contact")
vmraid.reload_doc("core", "doctype", "dynamic_link")
contact_meta = vmraid.get_meta("Contact")
if contact_meta.has_field("phone_nos") and contact_meta.has_field("email_ids"):
vmraid.reload_doc("contacts", "doctype", "contact_phone")
vmraid.reload_doc("contacts", "doctype", "contact_email")
users = vmraid.get_all("User", filters={"name": ("not in", "Administrator, Guest")}, fields=["*"])
for user in users:
if vmraid.db.exists("Contact", {"email_id": user.email}) or vmraid.db.exists(
"Contact Email", {"email_id": user.email}
):
continue
if user.first_name:
user.first_name = re.sub("[<>]+", "", vmraid.safe_decode(user.first_name))
if user.last_name:
user.last_name = re.sub("[<>]+", "", vmraid.safe_decode(user.last_name))
create_contact(user, ignore_links=True, ignore_mandatory=True)
| [
"sowrisurya@outlook.com"
] | sowrisurya@outlook.com |
bac7a1ad408b67c33cd3445a1697388b22649542 | db0e8aa3a92a30c9b1cc8da03725e951ff64f3f1 | /lenv/lib/python3.6/site-packages/django/contrib/sites/requests.py | 233d8409457a3ba44b5279fbcbbfb684e47d7e3a | [
"BSD-3-Clause"
] | permissive | shrey-c/DataLeakageDjango | ffeef61caa347520747fc70cf3f7f8b84a9610cf | a827c5a09e5501921f9fb97b656755671238dd63 | refs/heads/master | 2022-11-30T03:30:12.313025 | 2020-07-12T06:47:44 | 2020-07-12T06:47:44 | 242,569,637 | 6 | 1 | BSD-3-Clause | 2022-11-22T05:20:22 | 2020-02-23T18:33:04 | Python | UTF-8 | Python | false | false | 788 | py | from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class RequestSite(object):
"""
A class that shares the primary interface of Site (i.e., it has
``domain`` and ``name`` attributes) but gets its data from a Django
HttpRequest object rather than from a database.
The save() and delete() methods raise NotImplementedError.
"""
def __init__(self, request):
self.domain = self.name = request.get_host()
def __str__(self):
return self.domain
def save(self, force_insert=False, force_update=False):
raise NotImplementedError('RequestSite cannot be saved.')
def delete(self):
raise NotImplementedError('RequestSite cannot be deleted.')
| [
"shreyansh.chheda@gmail.com"
] | shreyansh.chheda@gmail.com |
8bd1c9457bbd2f09555ef5cfa3d565624a9c733b | 0362023a283a492733336dbe899714236b9a06ef | /SEGUNDO/M3-POOpython/ejercicios/Alex Catalan BUENOejercicio_control_errores/Llista.py | a022f3c5a3d4da8707bdf6f17ecf9a045afa675d | [] | no_license | alexcatmu/CFGS_DAM | 205b8bcc6d09f8351894c5f70e1a354ff25c17a3 | 1a4384dee8833b5d8034fdf0909a0774cbe5b1c0 | refs/heads/master | 2020-04-07T18:27:15.225638 | 2019-12-17T19:34:39 | 2019-12-17T19:34:39 | 158,610,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | class Llista:
def __init__(self, num = 0):
if(not(isinstance(num,int))):
raise ValueError("Parametro erroneo, tipo de dato no valido", type(self))
elif(num < 0):
raise NotImplementedError("Parametro erroneo, debe ser un numero positivo", type(self))
self.array_nums = []
#num debe ser numerico
for i in range(num):
self.array_nums.append(None)
def elemento_posicion(self,element, position):
try:
self.array_nums[position] = element
except IndexError:
raise IndexError("Fuera de rango", type(self))
except ValueError:
raise ValueError("Valor no valido",type(self))
except TypeError:
raise TypeError("Debe ser de tipo entero", type(self))
except Exception as e:#Es de ValueError cuando introducimos una letra
print("Detectat error "+ str(e))
prueba = Llista(4)
print(prueba.array_nums)
prueba.elemento_posicion(4,1)
print(prueba.array_nums) | [
"alex.catalan.catalan@gmail.com"
] | alex.catalan.catalan@gmail.com |
b4ada9f63387a5be7a94668e4a18c2a727699ac6 | 96fab383fd53d404eb8fbcc5fb4fe814ffd82661 | /fastlink/fastlink/apps.py | 242681808afb7418a7df19b64329fdc8040bc0c0 | [] | no_license | dobestan/fastlink | 41809245f0cc25aecf690ea9db6ee7eec75ee227 | 1e4f25bf112a4007e6afe90530d80c88b43dda71 | refs/heads/master | 2021-01-10T13:01:22.268285 | 2016-02-04T16:57:01 | 2016-02-04T16:57:01 | 51,088,310 | 0 | 1 | null | 2016-02-04T16:57:01 | 2016-02-04T16:20:13 | Python | UTF-8 | Python | false | false | 174 | py | from django.apps import AppConfig
class FastlinkAppConfig(AppConfig):
name = 'fastlink'
def ready(self):
from .signals.post_save import post_save_resource
| [
"dobestan@gmail.com"
] | dobestan@gmail.com |
df1b6f0a5174307023b0b2d15db49f8e13ea1e1c | 085ce75a507df6e755cabb7a65c4a2a8c98762ba | /dockerfiles/root/.pycharm_helpers/python_stubs/-252567642/zlib.py | 23b2d0fd3442cd37ac55b116c740129aefbca602 | [] | no_license | Arhzi/habr-docker-article | d44302db1fe157d81fe0818e762e82218f50e31f | 6fb094860b612e307beadaeb22981aa0ee64e964 | refs/heads/master | 2021-01-23T20:41:47.398025 | 2015-12-10T08:56:33 | 2015-12-10T08:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,927 | py | # encoding: utf-8
# module zlib
# from /usr/local/lib/python2.7/lib-dynload/zlib.so
# by generator 1.137
"""
The functions in this module allow compression and decompression using the
zlib library, which is based on GNU zip.
adler32(string[, start]) -- Compute an Adler-32 checksum.
compress(string[, level]) -- Compress string, with compression level in 0-9.
compressobj([level]) -- Return a compressor object.
crc32(string[, start]) -- Compute a CRC-32 checksum.
decompress(string,[wbits],[bufsize]) -- Decompresses a compressed string.
decompressobj([wbits]) -- Return a decompressor object.
'wbits' is window buffer size.
Compressor objects support compress() and flush() methods; decompressor
objects support decompress() and flush().
"""
# no imports
# Variables with simple values
DEFLATED = 8
DEF_MEM_LEVEL = 8
MAX_WBITS = 15
ZLIB_VERSION = '1.2.8'
Z_BEST_COMPRESSION = 9
Z_BEST_SPEED = 1
Z_DEFAULT_COMPRESSION = -1
Z_DEFAULT_STRATEGY = 0
Z_FILTERED = 1
Z_FINISH = 4
Z_FULL_FLUSH = 3
Z_HUFFMAN_ONLY = 2
Z_NO_FLUSH = 0
Z_SYNC_FLUSH = 2
__version__ = '1.0'
# functions
def adler32(string, start=None): # real signature unknown; restored from __doc__
"""
adler32(string[, start]) -- Compute an Adler-32 checksum of string.
An optional starting value can be specified. The returned checksum is
a signed integer.
"""
pass
def compress(string, level=None): # real signature unknown; restored from __doc__
"""
compress(string[, level]) -- Returned compressed string.
Optional arg level is the compression level, in 0-9.
"""
pass
def compressobj(level=None): # real signature unknown; restored from __doc__
"""
compressobj([level]) -- Return a compressor object.
Optional arg level is the compression level, in 0-9.
"""
pass
def crc32(string, start=None): # real signature unknown; restored from __doc__
"""
crc32(string[, start]) -- Compute a CRC-32 checksum of string.
An optional starting value can be specified. The returned checksum is
a signed integer.
"""
pass
def decompress(string, wbits=None, bufsize=None): # real signature unknown; restored from __doc__
"""
decompress(string[, wbits[, bufsize]]) -- Return decompressed string.
Optional arg wbits is the window buffer size. Optional arg bufsize is
the initial output buffer size.
"""
pass
def decompressobj(wbits=None): # real signature unknown; restored from __doc__
"""
decompressobj([wbits]) -- Return a decompressor object.
Optional arg wbits is the window buffer size.
"""
pass
# classes
class error(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| [
"sirnikolasd@yandex.ru"
] | sirnikolasd@yandex.ru |
16bde3d556f038d4d58862121c5017b380526751 | 44dbb043e52f00c9a797b1bea8f1df50dd621842 | /nntplib-example-3.py | 0c6beb82e255947dd47abacd9b5402d47a9aa76d | [] | no_license | peterdocter/standardmodels | 140c238d3bef31db59641087e3f3d5413d4baba1 | 7addc313c16b416d0970461998885833614570ad | refs/heads/master | 2020-12-30T16:59:30.489486 | 2016-12-13T06:32:03 | 2016-12-13T06:32:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | import nntplib
import string, random
import StringIO, rfc822
SERVER = "news.spam.egg"
GROUP = "comp.lang.python"
# connect to server
server = nntplib.NNTP(SERVER)
resp, count, first, last, name = server.group(GROUP)
for i in range(10):
try:
id = random.randint(int(first), int(last))
resp, id, message_id, text = server.article(str(id))
except (nntplib.error_temp, nntplib.error_perm):
pass # no such message (maybe it was deleted?)
else:
break # found a message!
else:
raise SystemExit
text = string.join(text, "\n")
file = StringIO.StringIO(text)
message = rfc822.Message(file)
for k, v in message.items():
print k, "=", v
print message.fp.read() | [
"415074476@qq.com"
] | 415074476@qq.com |
8529eaf059a12424bb2811930ff8608b45b8abcf | 09c39de5aad7b283cfac2f09a2b93e43086846d2 | /Unit 03 Conditionals and Control Flow/02 PygLatin/PygLatin PART2/9-Move it on back.py | 1cac4dbb960f695de75cbd184784c5864d08e7b4 | [
"MIT"
] | permissive | lpython2006e/python-samples | b4e84080259faf75b41fb2fd4fb9d2fbc9f857aa | b94ba67ce0d7798ecf796dadae206aa75da58301 | refs/heads/master | 2023-01-21T13:16:13.295163 | 2020-11-29T11:01:50 | 2020-11-29T11:01:50 | 278,653,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | pyg = 'ay'
original = raw_input('Enter a word:')
if len(original) > 0 and original.isalpha():
word = original.lower()
first = word[0]
new_word = word + first + pyg
print(pyg)
else:
print('empty')
| [
"lent@hivetech.vn"
] | lent@hivetech.vn |
daa6063529c244841de6f2f0caaed32db43d8119 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/40/usersdata/130/24581/submittedfiles/funcoes.py | 4052f0d988cac6a87cf753291e3a58713709e011 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #ARQUIVO COM SUAS FUNCOES
def absoluto(x):
if x<0:
x=x*(-1)
resultado=x
return resultado
def pi(x):
So=3
a=2
for i in range(1,x+1,1):
if (a//2)%2==0:
So=So-4/(a*(a+1)*(a+2))
else:
So=So+4/(a*(a+1)*(a+2))
a=a+2
pi=So
return pi
def cosseno(x):
S=1
i=2
while True:
x=i
M=1
for i in range(1,x+1,1):
M=M*i
if((i//2)%2)==0:
S=S+((x**i)/M)
else:
S=S-((x**i)/M)
i=i+2
if absoluto(S)>=e:
break
def aurea(x):
A=2*x
return A | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
86239dd8825f9994ac7cc262b1f6c1887f9b5f4b | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/dmlc_minpy/minpy-master/examples/nn/cnn_customop.py | 9a3a2c812faee140c621d87942c0897b9d004406 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 4,212 | py | """Convolution Neural Network example with both MinPy ndarray and MXNet symbol."""
import sys
import argparse
import minpy
import minpy.numpy as np
import numpy
import mxnet as mx
from minpy.nn.io import NDArrayIter
# Can also use MXNet IO here
# from mxnet.io import NDArrayIter
from minpy.core import Function
from minpy.nn import layers
from minpy.nn.model import ModelBase
from minpy.nn.solver import Solver
from examples.utils.data_utils import get_CIFAR10_data
from minpy.primitive import customop
# Please uncomment following if you have GPU-enabled MXNet installed.
# from minpy.context import set_context, gpu
# set_context(gpu(0)) # set the global context as gpu(0)
batch_size = 128
input_size = (3, 32, 32)
flattened_input_size = 3 * 32 * 32
hidden_size = 512
num_classes = 10
@customop('numpy')
def my_softmax(x, y):
probs = numpy.exp(x - numpy.max(x, axis=1, keepdims=True))
probs /= numpy.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -numpy.sum(numpy.log(probs[numpy.arange(N), y])) / N
return loss
def my_softmax_grad(ans, x, y):
def grad(g):
N = x.shape[0]
probs = numpy.exp(x - numpy.max(x, axis=1, keepdims=True))
probs /= numpy.sum(probs, axis=1, keepdims=True)
probs[numpy.arange(N), y] -= 1
probs /= N
return probs
return grad
my_softmax.def_grad(my_softmax_grad)
class ConvolutionNet(ModelBase):
def __init__(self):
super(ConvolutionNet, self).__init__()
# Define symbols that using convolution and max pooling to extract better features
# from input image.
net = mx.sym.Variable(name='X')
net = mx.sym.Convolution(
data=net, name='conv', kernel=(7, 7), num_filter=32)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Pooling(
data=net,
name='pool',
pool_type='max',
kernel=(2, 2),
stride=(2, 2))
net = mx.sym.Flatten(data=net)
# Create forward function and add parameters to this model.
self.conv = Function(
net, input_shapes={'X': (batch_size, ) + input_size}, name='conv')
self.add_params(self.conv.get_params())
# Define ndarray parameters used for classification part.
output_shape = self.conv.get_one_output_shape()
conv_out_size = output_shape[1]
self.add_param(name='w1', shape=(conv_out_size, hidden_size)) \
.add_param(name='b1', shape=(hidden_size,)) \
.add_param(name='w2', shape=(hidden_size, num_classes)) \
.add_param(name='b2', shape=(num_classes,))
def forward(self, X, mode):
out = self.conv(X=X, **self.params)
out = layers.affine(out, self.params['w1'], self.params['b1'])
out = layers.relu(out)
out = layers.affine(out, self.params['w2'], self.params['b2'])
return out
def loss(self, predict, y):
return my_softmax(predict, y)
def main(args):
# Create model.
model = ConvolutionNet()
# Create data iterators for training and testing sets.
data = get_CIFAR10_data(args.data_dir)
train_dataiter = NDArrayIter(
data=data['X_train'],
label=data['y_train'],
batch_size=batch_size,
shuffle=True)
test_dataiter = NDArrayIter(
data=data['X_test'],
label=data['y_test'],
batch_size=batch_size,
shuffle=False)
# Create solver.
solver = Solver(
model,
train_dataiter,
test_dataiter,
num_epochs=10,
init_rule='gaussian',
init_config={'stdvar': 0.001},
update_rule='sgd_momentum',
optim_config={'learning_rate': 1e-3,
'momentum': 0.9},
verbose=True,
print_every=20)
# Initialize model parameters.
solver.init()
# Train!
solver.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Multi-layer perceptron example using minpy operators")
parser.add_argument(
'--data_dir',
type=str,
required=True,
help='Directory that contains cifar10 data')
main(parser.parse_args())
| [
"659338505@qq.com"
] | 659338505@qq.com |
4a5d4da532f4802dd20815dfac7206ad889904b8 | dd8227454b817ccf2ceb24b3dfd4260d4ded7a72 | /scripts/item/consume_2435043.py | 13ed0760599dc002e3b64452a101b17cdadd7c63 | [
"MIT"
] | permissive | Snewmy/swordie | 0dd3c17808b064c2cb2bd9576b51daf01ae5d686 | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | refs/heads/master | 2023-06-30T21:14:05.225798 | 2021-07-06T14:32:39 | 2021-07-06T14:32:39 | 389,497,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | # Heroes Phantom Damage Skin
success = sm.addDamageSkin(2435043)
if success:
sm.chat("The Heroes Phantom Damage Skin has been added to your account's damage skin collection.")
| [
"vcalheirosdoc@gmail.com"
] | vcalheirosdoc@gmail.com |
2e8e809c480deb7a245b236f0b6ba0a2c56aee2b | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/anomalydetector/azure-ai-anomalydetector/azure/ai/anomalydetector/operations/__init__.py | 00fccdc398f92af40bdf0644db345b88ab2f66a4 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 613 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._anomaly_detector_client_operations import AnomalyDetectorClientOperationsMixin
__all__ = [
'AnomalyDetectorClientOperationsMixin',
]
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
7dc56d8754b8e16411207641759855115ea4c5e8 | 1b7947f81b2a82d3ff377c39e7228f1aaf743277 | /day2/urllib_demo2/music163.py | 77092b703cf3519ae89e972c387e284ff06fd398 | [] | no_license | gaohj/python1902crawer | 92fbaeb8151bd8a71f3c87a1566f6dcded3ef745 | aa0dce86ba50db12fb0262a62ccc6a9ab60ad0c2 | refs/heads/master | 2020-08-14T20:08:47.775547 | 2019-10-25T06:55:21 | 2019-10-25T06:55:21 | 215,226,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py |
import urllib.request
import urllib.parse
import json
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
}
#post接口
url = "https://music.163.com/weapi/v1/resource/comments/A_PL_0_2783240?csrf_token="
data = {
"params":"JKDq4LtELoh0m6USC17tdjp/BdF7vswdDOMZs7F+LHW3GOVspWTYYszzBIKCdEcWn2Q2eG1UHmhbYVwrSNwrGg4ljF2MvPTnpXDHRdvHw2nu1bt/uYCa1gEhMGQENYuBUwfYG/lLSYROzcPgyoIeGgfz0ioUviVXJPehwweNGsk8Awo5KLnpXvYfsAbjtrZB0yRWtFluWojJpHIoDquyClYfaSRLEb1WL4vNAPuA8BI=",
"encSecKey":"bb8a4561f8d79aca80d57b8f9d21576dfb866548feadf33a8f4c4bb884f18cc2e8b0d7fe81d18bdd565024b56e2e546ea75246c90bf6305c06fc1617fce4bfba10b7ef39e2fd50aacdad303ea615aff20af49c11a6a382d33516536b790a74dc4a02ff76178ea548a435cbe8c81b39e88cea9afb4b18aa57293d4cfc56c503f5",
}
data = urllib.parse.urlencode(data).encode()
#str -> bytes encode
#bytes -> str decode
req = urllib.request.Request(url=url,headers=headers,data=data)
response = urllib.request.urlopen(req)
content = response.read().decode()
data_dict = json.loads(content)
hotComments = data_dict['hotComments']
for hotComment in hotComments:
nickname = hotComment['user']['nickname']
content = hotComment['content']
print(nickname,":",content)
| [
"gaohj@126.com"
] | gaohj@126.com |
34018ff3a28fba4a4eeedc2a0596b54872e6bdda | 44f96b540afca6447535cdc5ab15ee34cc2f591b | /FirstProj/ShoppingApp/models.py | 74bc3117f80263ad4832cae5bd87cb68d973b456 | [] | no_license | Shreya549/FirstDjangoApp | 087ddf30d8d73f451da473fa12fad7633b4b4142 | 2ad5d98d0c49d9b82fc7a431c8df111f3f6a2353 | refs/heads/master | 2022-04-10T07:43:23.344458 | 2020-03-28T13:10:33 | 2020-03-28T13:10:33 | 250,259,665 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from django.db import models
# Create your models here.
class Product(models.Model):
name = models.CharField(max_length = 100)
price = models.IntegerField()
desc = models.TextField()
img = models.ImageField(upload_to = 'pics')
| [
"shreya.chatterjee2018@vitstudent.ac.in"
] | shreya.chatterjee2018@vitstudent.ac.in |
a01ef31f02f164ca0ed8d5220c4573c32ed3678b | 2eba5ec3f7462ed9f8f8e858c258f73bfeda6f6a | /Test1/윤성우_nene_ver3.py | fd464614fe59fceb309b05e2a354899b90dd1fbc | [] | no_license | sungwooman91/Test | d16e93e40a74054c860f9f7fdd2bb70f0189fc43 | 59b22517bc2c6e60b9d0604b878d79a1624530da | refs/heads/master | 2021-09-06T09:55:45.740088 | 2018-02-05T07:55:57 | 2018-02-05T07:55:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,156 | py | import urllib.request
import os
from pandas import DataFrame
import xml.etree.ElementTree as ET
import time
print("START")
result = []
dir_name = "V3_BigData"
dir_nene = "Nene_data"
dir_delimiter = "\\"
file_name = "nene"
count = "nene_count.txt"
csv = '.csv'
result_limit = 3
time_table = time.strftime("%c",time.localtime(time.time()))
time_table = time_table.replace("/","_")
time_table = time_table.replace(":","_")
def make_dir(number):
os.mkdir(dir_name + dir_delimiter + dir_nene + str(number))
return None
def make_nene(index_number,file_number):
dir_totalname = dir_name + dir_delimiter + dir_nene + str(index_number) + dir_delimiter + file_name + '_' + str(file_number) + csv
nene_table.to_csv(dir_totalname, encoding="cp949", mode='w', index=True)
return None
response = urllib.request.urlopen('http://nenechicken.com/subpage/where_list.asp?target_step2=%s&proc_type=step1&target_step1=%s'%(urllib.parse.quote('전체'),urllib.parse.quote('전체')))
xml = response.read().decode('UTF-8')
root = ET.fromstring(xml)
for element in root.findall('item'):
store_name = element.findtext('aname1')
store_sido = element.findtext('aname2')
store_gungu = element.findtext('aname3')
store_address = element.findtext('aname5')
result.append([store_name]+[store_sido]+[store_gungu]+[store_address])
nene_table = DataFrame(result,columns=('store','sido','gungu','store_address'))
try:
os.mkdir(dir_name)
except:pass
try:
with open(dir_name + dir_delimiter + count, 'r') as file:
file_number = file.readline()
file_number = int(file_number)
index_num = int(file_number/result_limit)
if file_number % result_limit != 0:
index_num += 1
if file_number % result_limit == 1:
make_dir(index_num)
make_nene(index_num, time_table)
file_number +=1
with open(dir_name + dir_delimiter + count, 'w') as file:
file.write(str(file_number))
except FileNotFoundError:
with open(dir_name + dir_delimiter + count, 'w') as file:
file.write("2")
make_dir(1)
make_nene(1,time_table)
print("END!!!") | [
"you@example.com"
] | you@example.com |
ff0f35ac3c3c28da4438125b3998d1c2ce5d5da5 | ba80ca143ba35fd481730786a27ebdb1f88ce835 | /algorithm/oop/vending_machine/money/money.py | 98038d955a7311a82085d5b84c181fa32bd45995 | [] | no_license | uiandwe/TIL | c541020b65adc53578aeb1c3ba4c6770b3b2e8b3 | 186544469374dd0279099c6c6aa7555ee23e42fe | refs/heads/master | 2022-02-15T08:33:07.270573 | 2022-01-01T15:22:54 | 2022-01-01T15:22:54 | 63,420,931 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | # -*- coding: utf-8 -*-
class Money:
VALID_MONEY = 'money의 타입은 int 형 입니다.'
def __init__(self, money):
self.valid_money(money)
self._money = money
def valid_money(self, money):
assert type(money) == int, Money.VALID_MONEY
@property
def money(self):
return self._money
def __str__(self):
return "{}".format(self.money)
def __ge__(self, other):
return self._money >= other._money
def __sub__(self, other):
return self._money - other._money
| [
"noreply@github.com"
] | uiandwe.noreply@github.com |
aed762f6d9dea07279af3b949111d028114c4e4d | 172f8b508c89b47376bd41cfc167cabf9ba94cb8 | /dropthesoap/service.py | 4976d7b871415ed9e151bb0fd100573a2dba0401 | [
"MIT"
] | permissive | d3sbar/dropthesoap | 1ee3d8ba744d5756001a339793cbeb2bc9666f1a | 73fc9a9aa725e5bc568c7896fd0d9a2af858a352 | refs/heads/master | 2020-05-24T04:22:25.074507 | 2019-05-16T19:46:01 | 2019-05-16T19:46:01 | 187,091,092 | 0 | 0 | null | 2019-05-16T19:42:54 | 2019-05-16T19:42:54 | null | UTF-8 | Python | false | false | 6,931 | py | from .schema import xs, wsdl, soap
from .schema.model import Namespace, get_root, etree
class Request(object):
def __init__(self, transport_request, envelope):
self.transport_request = transport_request
self.envelope = envelope
self.header = None
class Method(object):
def __init__(self, func, request, response):
self.func = func
self.request = request
self.response = response
self.need_context = False
self.header = None
def __call__(self, ctx, request):
if self.header:
if ctx.envelope.Header:
ctx.header = self.header.from_node(ctx.envelope.Header._any[0])
else:
ctx.header = None
args = [ctx] if self.need_context else []
if self.request._unpack_params:
for name in self.request._params:
args.append(getattr(request, name))
else:
args.append(request)
return self.response.normalize(self.func(*args))
class Fault(Exception):
def __init__(self, code, message):
self.code = code
Exception.__init__(self, message)
def make_message_element(name, obj):
if isinstance(obj, xs.element):
return obj
else:
return xs.element(name, obj)
class Service(object):
def __init__(self, name, tns, additional_schemas=None):
self.name = name
self.methods = {}
self.req2method = {}
self.schema = xs.schema(Namespace(tns))
self.additional_schemas = additional_schemas or []
def expose(self, request=None, response=None):
if (callable(request)
and not isinstance(request, (xs.Type, xs.element))
and type(request) is not type):
decorated_func = request
request = None
else:
decorated_func = None
def inner(func):
name = func.__name__
req_name = name + 'Request'
if request is None:
defaults = func.__defaults__
if defaults:
names = func.__code__.co_varnames[:func.__code__.co_argcount][-len(defaults):]
else:
names = []
defaults = []
celements = [xs.element(n, t) for n, t in zip(names, defaults)]
request_elem = xs.element(req_name)(xs.cts(*celements))
request_elem._params = names
request_elem._unpack_params = True
else:
request_elem = make_message_element(req_name, request)
req_name = request_elem.name
request_elem._unpack_params = False
self.schema(request_elem)
resp_name = name + 'Response'
if response is None:
response_elem = self.schema[resp_name]
else:
response_elem = make_message_element(resp_name, response)
self.schema(response_elem)
method = Method(func, request_elem, response_elem)
self.methods[name] = method
self.req2method[req_name] = method
return func
return inner(decorated_func) if decorated_func else inner
def wraps(self, original_func):
name = original_func.__name__
def inner(func):
self.methods[name].func = func
self.methods[name].need_context = True
func.__name__ = name
return func
return inner
def header(self, header):
def inner(func):
rheader = header
if isinstance(rheader, basestring):
rheader = self.schema[rheader]
self.methods[func.__name__].header = rheader
return func
return inner
def get_wsdl(self, url):
defs = wsdl.definitions.instance()
defs.types = wsdl.types.instance(
_any=map(get_root, [self.schema] + self.additional_schemas))
messages = defs.message = []
port = wsdl.portType.instance(name='%sPortType' % self.name)
operations = port.operation = []
defs.portType = [port]
binding = wsdl.binding.instance(
name='%sBinding' % self.name, type='tns:%sPortType' % self.name,
binding = wsdl.soap_binding.instance(transport='http://schemas.xmlsoap.org/soap/http', style='document'))
defs.binding = [binding]
boperations = binding.operation = []
for name, method in self.methods.iteritems():
req_name = method.request.name
resp_name = method.response.name
messages.append(wsdl.message.instance(name=req_name,
part=wsdl.part.instance(name='parameters', element='tns:%s' % req_name)))
messages.append(wsdl.message.instance(name=resp_name,
part=wsdl.part.instance(name='parameters', element='tns:%s' % resp_name)))
operations.append(wsdl.operation.instance(name=name,
input=wsdl.input.instance(message='tns:%s' % req_name),
output=wsdl.output.instance(message='tns:%s' % resp_name)))
binput = wsdl.input.instance(body=wsdl.soap_body.instance(use='literal'))
if method.header:
binput.header = wsdl.soap_header.instance(
use='literal', message='tns:%s' % method.header.name, part=method.header.name)
boperations.append(wsdl.operation.instance(
name=name,
operation=wsdl.soap_operation.instance(soapAction=name),
input=binput,
output=wsdl.output.instance(body=wsdl.soap_body.instance(use='literal'))))
for header in set(r.header for r in self.methods.itervalues() if r.header):
messages.append(wsdl.message.instance(name=header.name,
part=wsdl.part.instance(name=header.name, element='tns:%s' % header.name)))
defs.service = [wsdl.service.instance(
name=self.name,
port=wsdl.port.instance(
name='%sPort' % self.name,
binding='tns:%sBinding' % self.name,
address=wsdl.soap_address.instance(location=url))
)]
tree = get_root(defs)
tree.attrib['targetNamespace'] = self.schema.targetNamespace.namespace
tree.attrib['xmlns:tns'] = self.schema.targetNamespace.namespace
return etree.tostring(tree)
def call(self, transport_request, xml):
try:
envelope = soap.schema.fromstring(xml)
request = self.schema.from_node(envelope.Body._any[0])
ctx = Request(transport_request, envelope)
method = self.req2method[request.tag]
response = method(ctx, request)
except Fault as e:
response = soap.Fault.instance(faultcode=e.code, faultstring=e.message)
return response
| [
"bobrov@vl.ru"
] | bobrov@vl.ru |
1e4ae96ba11ca841546ece2806bf73b10ec15673 | 6e8f2e28479566dbaa338300b2d61f784ff83f97 | /.history/code/live_20210419192449.py | f23ac4d6aa23ef684980fb5dfdd0b471fdb2e457 | [] | no_license | eeng5/CV-final-project | 55a7d736f75602858233ebc380c4e1d67ab2b866 | 580e28819560b86f6974959efb1d31ef138198fc | refs/heads/main | 2023-04-09T21:28:21.531293 | 2021-04-21T19:57:22 | 2021-04-21T19:57:22 | 352,703,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,586 | py | import os
import cv2
import sys
import numpy as np
from models import SimpleModel
from preprocess import Datasets
import hyperparameters as hp
import tensorflow as tf
from skimage.transform import resize
from PIL import Image, ImageFont, ImageDraw
from scipy.spatial import distance as dist
from imutils import face_utils
from imutils.video import VideoStream
import fastai
import fastai.vision
import imutils
import argparse
import time
import dlib
def createPixelArray(arr):
array = np.array(arr, dtype=np.uint8)
array = array.reshape((48, 48, 1))
img = array / 255.
return img
weights_str = "/Users/Natalie/Desktop/cs1430/CV-final-project/code/checkpoints/simple_model/041321-113618/your.weights.e015-acc0.6121.h5"
os.chdir(sys.path[0])
model = tf.keras.models.load_model(weights_str)
#model = create_model()
model(tf.keras.Input(shape=(hp.img_size, hp.img_size, 3)))
model.load_weights(weights_str, by_name=False)
# model.compile(
# optimizer=model.optimizer,
# loss=model.loss_fn,
# metrics=["sparse_categorical_accuracy"],
# )
#face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
vs = VideoStream(src=0).start()
start = time.perf_counter()
data = []
time_value = 0
out = cv2.VideoWriter(
"liveoutput.avi", cv2.VideoWriter_fourcc("M", "J", "P", "G"), 10, (450, 253)
)
while True:
frame = vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_coord = face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(48, 48))
for coords in face_coord:
X, Y, w, h = coords
H, W, _ = frame.shape
X_1, X_2 = (max(0, X - int(w)), min(X + int(1.3 * w), W))
Y_1, Y_2 = (max(0, Y - int(0.1 * h)), min(Y + int(1.3 * h), H))
img_cp = gray[Y_1:Y_1+48, X_1:X_1+48].copy()
img_mod = createPixelArray(img_cp)
prediction = model.predict(img_mod)
prediction = np.argmax(prediction)
cv2.rectangle(
img=frame,
pt1=(X_1, Y_1),
pt2=(X_2, Y_2),
color=(128, 128, 0),
thickness=2,
)
cv2.putText(
frame,
str(prediction),
(10, frame.shape[0] - 25),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(225, 255, 255),
2,)
cv2.imshow("frame", frame)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
vs.stop()
out.release()
cv2.destroyAllWindows()
| [
"natalie_rshaidat@brown.edu"
] | natalie_rshaidat@brown.edu |
d8fab852c381fe6affdd605d97dda1f39af63a9f | cf7fed790b733b9a21ec6c65970e9346dba103f5 | /pyqt/getting_started/pyqt_thread.py | 9596b7a25da1a0f3c83e1b6191afc1df4f46539d | [
"MIT"
] | permissive | CospanDesign/python | a582050993efc1e6267683e38dd4665952ec6d40 | a3d81971621d8deed2f1fc738dce0e6eec0db3a7 | refs/heads/master | 2022-06-20T15:01:26.210331 | 2022-05-29T01:13:04 | 2022-05-29T01:13:04 | 43,620,126 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,855 | py | #! /usr/bin/python
import sys
import time
from PyQt4 import QtCore
from PyQt4 import QtGui
class MyApp(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setGeometry(300, 300, 280, 600)
self.setWindowTitle('threads')
self.layout = QtGui.QVBoxLayout(self)
self.testButton = QtGui.QPushButton("test")
self.connect(self.testButton, QtCore.SIGNAL("released()"), self.test)
self.listwidget = QtGui.QListWidget(self)
self.layout.addWidget(self.testButton)
self.layout.addWidget(self.listwidget)
self.threadPool = []
def add(self, text):
""" Add item to list widget """
print "Add: " + text
self.listwidget.addItem(text)
self.listwidget.sortItems()
def addBatch(self,text="test",iters=6,delay=0.3):
""" Add several items to list widget """
for i in range(iters):
time.sleep(delay) # artificial time delay
self.add(text+" "+str(i))
def addBatch2(self,text="test",iters=6,delay=0.3):
for i in range(iters):
time.sleep(delay) # artificial time delay
self.emit( QtCore.SIGNAL('add(QString)'), text+" "+str(i) )
def test(self):
self.listwidget.clear()
# adding in main application: locks ui
#self.addBatch("_non_thread",iters=6,delay=0.3)
# adding by emitting signal in different thread
self.threadPool.append( WorkThread() )
self.connect( self.threadPool[len(self.threadPool)-1], QtCore.SIGNAL("update(QString)"), self.add )
self.threadPool[len(self.threadPool)-1].start()
# generic thread using signal
self.threadPool.append( GenericThread(self.addBatch2,"from generic thread using signal ",delay=0.3) )
self.disconnect( self, QtCore.SIGNAL("add(QString)"), self.add )
self.connect( self, QtCore.SIGNAL("add(QString)"), self.add )
self.threadPool[len(self.threadPool)-1].start()
class WorkThread(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
def __del__(self):
self.wait()
def run(self):
for i in range(6):
time.sleep(0.3) # artificial time delay
self.emit( QtCore.SIGNAL('update(QString)'), "from work thread " + str(i) )
return
class GenericThread(QtCore.QThread):
def __init__(self, function, *args, **kwargs):
QtCore.QThread.__init__(self)
self.function = function
self.args = args
self.kwargs = kwargs
def __del__(self):
self.wait()
def run(self):
self.function(*self.args,**self.kwargs)
return
if __name__ == "__main__":
# run
app = QtGui.QApplication(sys.argv)
test = MyApp()
test.show()
app.exec_()
| [
"cospan@gmail.com"
] | cospan@gmail.com |
9ce3ee0c55905c85b6ffaeba4bdb6394819a76cc | d2479998a965eb43372920effeaf32c9c500603e | /docs/scripts/uiexample.py | 30f2db9a2d9e74bb996bc94f040a45590c8a941a | [
"BSD-2-Clause"
] | permissive | cy-fir/flexx | 0f246e0c4a5e6d4b29946c8fb0f73790fa35d07f | 343de1b1549975a365962274f264a48e56d2305e | refs/heads/master | 2021-01-18T09:40:11.624129 | 2016-06-04T23:07:37 | 2016-06-04T23:07:37 | 60,951,284 | 1 | 0 | null | 2016-06-12T06:39:50 | 2016-06-12T06:39:50 | null | UTF-8 | Python | false | false | 5,612 | py | """
Small sphinx extension to show a UI example + result
"""
import os
import sys
import hashlib
import warnings
import subprocess
import importlib.util
from sphinx.util.compat import Directive
from docutils import nodes
from flexx import app
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(THIS_DIR))
HTML_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '_build', 'html'))
if not os.path.isdir(HTML_DIR + '/ui'):
os.mkdir(HTML_DIR + '/ui')
if not os.path.isdir(HTML_DIR + '/ui/examples'):
os.mkdir(HTML_DIR + '/ui/examples')
SIMPLE_CODE_T = """
from flexx import app, ui
class App(ui.Widget):
def init(self):
""" # mind the indentation
class uiexample(nodes.raw): pass
def create_ui_example(filename, to_root, height=300):
""" Given a filename, export the containing app to HTML, return
generated HTML. Needs to be done via filename, not direct code, so
that PyScript can obtain source.
"""
code = open(filename, 'rb').read().decode()
fname = os.path.split(filename)[1]
filename_parts = 'ui', 'examples', fname[:-3] + '.html'
filename_abs = os.path.join(HTML_DIR, *filename_parts)
filename_rel = to_root + '/' + '/'.join(filename_parts)
# Import
try:
spec = importlib.util.spec_from_file_location("example", filename)
m = importlib.util.module_from_spec(spec)
spec.loader.exec_module(m)
except Exception as err:
err_text = str(err)
msg = 'Example not generated. <pre>%s</pre>' % err_text
if os.environ.get('READTHEDOCS', False):
msg = 'This example is not build on read-the-docs. <pre>%s</pre>' % err_text
open(filename_abs, 'wt', encoding='utf-8').write(msg)
warnings.warn('Could not import ui example: %s' % err_text)
return get_html(filename_rel, 60)
# Get class name
line1 = code.splitlines()[0]
class_name = None
if 'class App' in code:
class_name = 'App'
elif 'class MyApp' in code:
class_name = 'MyApp'
elif 'class Example' in code:
class_name = 'Example'
elif line1.startswith('# doc-export:'):
class_name = line1.split(':', 1)[1].strip()
#
if class_name:
assert class_name.isidentifier()
else:
msg = 'Could not determine app widget class in:<pre>%s</pre>' % code
warnings.warn(msg)
open(filename_abs, 'wt', encoding='utf-8').write(msg)
return get_html(filename_rel, height)
# Export
try:
app.export(m.__dict__[class_name], filename_abs, False)
except Exception as err:
err_text = str(err)
msg = 'Example not generated. <pre>%s</pre>' % err_text
open(filename_abs, 'wt', encoding='utf-8').write(msg.replace('\\n', '<br />'))
raise RuntimeError('Could not export ui example: %s\n%s' % (err_text, code) )
#print('Could not create ui example: %s\n%s' % (err_text, code) )
return get_html(filename_rel, height)
def get_html(filename_rel, height):
""" Get the html to embed the given page into another page using an iframe.
"""
# Styles
astyle = 'font-size:small; float:right;'
dstyle = 'width: 500px; height: %ipx; align: center; resize:both; overflow: hidden; box-shadow: 5px 5px 5px #777;'
istyle = 'width: 100%; height: 100%; border: 2px solid #094;'
# Show app in iframe, wrapped in a resizable div
html = ''
html += "<a target='new' href='%s' style='%s'>open in new tab</a>" % (filename_rel, astyle)
html += "<div style='%s'>" % dstyle % height
html += "<iframe src='%s' style='%s'>iframe not supported</iframe>" % (filename_rel, istyle)
html += "</div>"
return html
def visit_uiexample_html(self, node):
global should_export_flexx_deps
# Fix for rtd
if not hasattr(node, 'code'):
return
# Get code
code = ori_code = node.code.strip() + '\n'
# Is this a simple example?
if 'import' not in code:
code = SIMPLE_CODE_T + '\n '.join([line for line in code.splitlines()])
# Get id and filename
this_id = hashlib.md5(code.encode('utf-8')).hexdigest()
fname = 'example%s.html' % this_id
filename_py = os.path.join(HTML_DIR, 'ui', 'examples', 'example%s.py' % this_id)
# Write Python file
with open(filename_py, 'wb') as f:
f.write(code.encode())
# Get html file
html = create_ui_example(filename_py, '..', node.height)
self.body.append(html + '<br />')
def depart_uiexample_html(self, node):
pass
class UIExampleDirective(Directive):
has_content = True
def run(self):
# Get code and extact height
code = '\n'.join(self.content)
try:
height = int(self.content[0])
except Exception:
height = 300
else:
code = code.split('\n', 1)[1].strip()
# Code block
literal = nodes.literal_block(code, code)
literal['language'] = 'python'
literal['linenos'] = False
# iframe
iframe = uiexample('')
iframe.code = code
iframe.height = height
return[literal, iframe]
def setup(Sphynx):
#Sphynx.add_javascript('js-image-slider.js')
#Sphynx.add_stylesheet('js-image-slider.css')
Sphynx.add_node(uiexample, html=(visit_uiexample_html, depart_uiexample_html))
Sphynx.add_directive('uiexample', UIExampleDirective)
| [
"almar.klein@gmail.com"
] | almar.klein@gmail.com |
8ecc268008bf76d6edc3ac8b32ae86a39041e1b4 | 6ad7325b4c04dad9e7552882f53a6cb146474652 | /crypto_analyzer/crypto/models.py | 5f6b7218b7985d6f1899c232a9f304a3a0eb1429 | [] | no_license | abhaystoic/cryptoguru | fa04c55e437dd41a64843b35a8d60d398511eb23 | 96562e881b40ee77a4704b40400caba9084bbfca | refs/heads/master | 2020-03-11T10:26:30.950772 | 2018-10-14T13:23:10 | 2018-10-14T13:23:10 | 129,942,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,650 | py | # -*- coding: utf-8 -*-
"""
TODO: Remove 'blank=True, null=True' wherever it doesn't make sense.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import models
MAX_CHAR_LEN = 1024
MAX_CHAR_LEN_URL = 2000
MAX_CHAR_LEN_BIG = 10000
# Create your models here.
class Cryptos(models.Model):
name = models.CharField(max_length=MAX_CHAR_LEN)
coin_id = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
symbol = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
international_price_inr = models.FloatField(blank=True, null=True)
rank = models.IntegerField(blank=True, null=True)
price_usd = models.FloatField(blank=True, null=True)
price_btc = models.FloatField(blank=True, null=True)
volume_usd_24h = models.FloatField(blank=True, null=True)
market_cap_usd = models.FloatField(blank=True, null=True)
available_supply = models.FloatField(blank=True, null=True)
total_supply = models.FloatField(blank=True, null=True)
max_supply = models.FloatField(blank=True, null=True)
percent_change_1h = models.FloatField(blank=True, null=True)
percent_change_24h = models.FloatField(blank=True, null=True)
percent_change_7d = models.FloatField(blank=True, null=True)
data_last_updated = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
volume_inr_24h = models.FloatField(blank=True, null=True)
market_cap_inr = models.FloatField(blank=True, null=True)
last_updated = models.DateTimeField(auto_now_add=True, blank=True, null=True)
def __str__(self):
return self.name
class IndianCryptos(models.Model):
name = models.CharField(max_length=MAX_CHAR_LEN)
coin_id = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
symbol = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
international_price_inr = models.FloatField(blank=True, null=True)
rank = models.IntegerField(blank=True, null=True)
price_usd = models.FloatField(blank=True, null=True)
price_btc = models.FloatField(blank=True, null=True)
volume_usd_24h = models.FloatField(blank=True, null=True)
market_cap_usd = models.FloatField(blank=True, null=True)
available_supply = models.FloatField(blank=True, null=True)
total_supply = models.FloatField(blank=True, null=True)
max_supply = models.FloatField(blank=True, null=True)
percent_change_1h = models.FloatField(blank=True, null=True)
percent_change_24h = models.FloatField(blank=True, null=True)
percent_change_7d = models.FloatField(blank=True, null=True)
data_last_updated = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
volume_inr_24h = models.FloatField(blank=True, null=True)
market_cap_inr = models.FloatField(blank=True, null=True)
last_updated = models.DateTimeField(auto_now_add=True, blank=True, null=True)
def __str__(self):
return self.name
class Exchanges(models.Model):
name = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
exchange_id = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
buy = models.FloatField(blank=True, null=True)
sell = models.FloatField(blank=True, null=True)
last = models.FloatField(blank=True, null=True)
average = models.FloatField(blank=True, null=True)
currency_symbol = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
volume = models.FloatField(blank=True, null=True)
international_price_dollar = models.FloatField(blank=True, null=True)
international_price_inr = models.FloatField(blank=True, null=True)
last_updated = models.DateTimeField(blank=True, null=True, auto_now_add=True)
def __str__(self):
return self.name
class ExchangesInfo(models.Model):
name = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
exchange_id = models.CharField(blank=True, null=True, max_length=MAX_CHAR_LEN)
url = models.URLField(blank=True, null=True, max_length=MAX_CHAR_LEN_URL)
website = models.URLField(blank=True, null=True, max_length=MAX_CHAR_LEN_URL)
active = models.NullBooleanField(default=False)
bitcoin_org_recognized = models.NullBooleanField(default=False)
last_updated = models.DateTimeField(blank=True, null=True, auto_now_add=True)
def __str__(self):
return self.name
class Message(models.Model):
first_name = models.TextField(max_length=MAX_CHAR_LEN_URL)
last_name = models.TextField(blank=True, null= True)
email = models.EmailField(max_length=MAX_CHAR_LEN, blank=True, null= True)
message = models.TextField()
last_updated = models.DateTimeField(blank=True, null=True, auto_now_add=True)
def __str__(self):
return ' '.join([self.first_name, self.last_name])
class FAQ(models.Model):
title = models.TextField(blank=True, null= True)
content = models.TextField(blank=True, null= True)
last_updated = models.DateTimeField(blank=True, null=True, auto_now_add=True)
def __str__(self):
return self.title
class User(models.Model):
email = models.EmailField(unique=True)
full_name = models.CharField(blank=True, null= True, max_length=MAX_CHAR_LEN)
provider = models.CharField(blank=True, null= True, max_length=MAX_CHAR_LEN)
token = models.CharField(blank=True, null= True, max_length=MAX_CHAR_LEN_BIG)
image_url = models.URLField(blank=True, null= True, max_length=MAX_CHAR_LEN_URL)
uid = models.CharField(blank=True, null= True, max_length=MAX_CHAR_LEN)
preferred_currency_code = models.CharField(max_length=MAX_CHAR_LEN, default='INR')
enable_notification = models.BooleanField(default=True)
last_updated = models.DateTimeField(blank=True, null=True, auto_now_add=True)
def __str__(self):
return self.email
| [
"iabhaygupta90@gmail.com"
] | iabhaygupta90@gmail.com |
6de0cdbb4029d1b8614623b599b9b04bb8352527 | 0be27c0a583d3a8edd5d136c091e74a3df51b526 | /no.of ai_greater_thn_aj.py | 2f9cb826b3435d34e935bb0c845cd199ceb9184a | [] | no_license | ssangitha/guvicode | 3d38942f5d5e27a7978e070e14be07a5269b01fe | ea960fb056cfe577eec81e83841929e41a31f72e | refs/heads/master | 2020-04-15T05:01:00.226391 | 2019-09-06T10:08:23 | 2019-09-06T10:08:23 | 164,405,935 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | n=int(input())
l=list(map(int,input().split()))
c=0
for i in range(n-1):
for j in range(i+1,n):
if l[i]<l[j]:
c+=1
print(c)
| [
"noreply@github.com"
] | ssangitha.noreply@github.com |
d4b30fe8753b69022b4ced22642564adad27249e | 100802fd56febbe28e11d45802e0ad661a9b98c4 | /Community/migrations/0011_auto_20170814_1452.py | 925f4afb0c0e91599fc192516ab064e71147f391 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ByrdOfAFeather/AlphaTrion | efc4cfcfa246adde5a0ce84eb5e295c0c61722f5 | 90b00b8f4e4c7fe3c495a5ded14b47c3210119ea | refs/heads/master | 2021-01-22T07:35:43.403968 | 2017-12-03T15:50:41 | 2017-12-03T15:50:41 | 102,306,857 | 0 | 2 | null | 2017-11-12T18:32:35 | 2017-09-04T01:39:45 | Python | UTF-8 | Python | false | false | 771 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-14 18:52
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Community', '0010_auto_20170813_1831'),
]
operations = [
migrations.AlterField(
model_name='communityinst',
name='date',
field=models.DateField(default=datetime.date(2017, 8, 14)),
),
migrations.AlterField(
model_name='communitypacingratings',
name='rating',
field=models.CharField(choices=[('v', 'Very Good'), ('g', 'Good'), ('d', 'Decent'), ('b', 'Bad'), ('h', 'Very Bad')], default='d', max_length=1),
),
]
| [
"matthew_a_byrd@outlook.com"
] | matthew_a_byrd@outlook.com |
6a85cca1f26260ec941c33e52aa6b830c28f2b58 | f942f82fb1b9c2eb0c4cf03ca2254f4207fd08d2 | /Website/migrations/0010_mainpage_map.py | b8735d8c31c533b9ff5088df895a6f34ada18e94 | [] | no_license | mahdy-world/Fatoma-Restaurant | 2b6aec149c20d5526d5d7a505479cc29c811d666 | a500397741e72d0cf28dbb8f64c914144835d6c2 | refs/heads/master | 2023-06-27T19:27:35.606292 | 2021-07-31T13:53:18 | 2021-07-31T13:53:18 | 391,366,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | # Generated by Django 3.2.3 on 2021-07-15 08:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Website', '0009_auto_20210713_1517'),
]
operations = [
migrations.AddField(
model_name='mainpage',
name='map',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='لينك الخريطة'),
),
]
| [
"salmazidan1997@gmail.com"
] | salmazidan1997@gmail.com |
cd3fc40018659f5a71bc693d5d4872929557c09f | c0caed81b5b3e1498cbca4c1627513c456908e38 | /src/python/bindings/app/pyrosetta_toolkit/window_modules/scripting/rosetta_scripts.py | 4128e2e9c3bbbdd354f557ba92578a27d4418321 | [
"LicenseRef-scancode-other-permissive"
] | permissive | malaifa/source | 5b34ac0a4e7777265b291fc824da8837ecc3ee84 | fc0af245885de0fb82e0a1144422796a6674aeae | refs/heads/master | 2021-01-19T22:10:22.942155 | 2017-04-19T14:13:07 | 2017-04-19T14:13:07 | 88,761,668 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 753 | py |
#!/usr/bin/python
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
## @file /GUIs/pyrosetta_toolkit/window_modules/scripting/rosetta_scripts.py
## @brief Rosetta script creator, if I/we can figure out a general way to parse all the information available...
## @author Jared Adolf-Bryfogle (jadolfbr@gmail.com)
class Rosetta_Script_Creator:
def __init__(self):
| [
"malaifa@yahoo.com"
] | malaifa@yahoo.com |
84bc6efbd7aee35a2b7b690cc31e5af6c753d0c3 | 6ef22466e1649ebae37dd19cba29a53b2f020317 | /imu9dof/scripts/imu9250.py | a51cf2ea32df15f18e0e58a31fcde0233430ad3f | [] | no_license | rishabhdevyadav/ROS_IMU_Filter | 7272cb7a950762c13e12b9bbf5f3cf6939cfea4d | f17438101c8e2cfc645fd89d79c76d5e7e6d5d40 | refs/heads/master | 2021-03-21T18:49:16.242474 | 2020-05-25T05:09:01 | 2020-05-25T05:09:01 | 247,321,664 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | #!/usr/bin/python
from __future__ import division
# coding: utf-8
import rospy
import tf
from sensor_msgs.msg import Imu, MagneticField
from std_msgs.msg import Float64
from geometry_msgs.msg import Vector3
import MPU9250
from math import radians
MAG_HARD_BIAS = (113.90899999999999, -40.54, -16.3515)
MAG_SOFT_BIAS = (0.9602207761635727, 0.9829804630346844, 1.0624072704609615)
#MAG_HARD_BIAS = (132.1605, -30.133499999999998, -23.2225)
#MAG_SOFT_BIAS = (1.0291878517105106, 0.9204656212387662, 1.061623641331525)
G_TO_MS2 = 9.8
mpu9250 = MPU9250.MPU9250()
# shutdown ROS on interrupt
def shutdown():
rospy.loginfo("Shutting Down Ploting")
rospy.sleep(1)
try:
rospy.init_node('IMU_Plotting', anonymous=True)
rospy.on_shutdown(shutdown)
rate = rospy.Rate(7)
imu_pub = rospy.Publisher('imu/data_raw', Imu, queue_size=10)
mag_pub = rospy.Publisher('imu/mag', MagneticField, queue_size=10)
imu_msg = Imu()
mag_msg = MagneticField()
rospy.loginfo("IMU STARTED")
while True and not rospy.is_shutdown():
try:
m9a = mpu9250.readAccel()
m9g = mpu9250.readGyro()
mag = mpu9250.readMagnet()
m9a = [G_TO_MS2 * x for x in m9a]
m9g = [radians(x) for x in m9g]
mx, my, mz = ((mag[x] - MAG_HARD_BIAS[x]) * MAG_SOFT_BIAS[x] for x in range(3))
# Fill mag msg
mag_msg.header.stamp = rospy.get_rostime()
mag_msg.magnetic_field.x = mx
mag_msg.magnetic_field.y = my
mag_msg.magnetic_field.z = mz
# create imu msg
q0 = 1.0 #W
q1 = 0.0 #X
q2 = 0.0 #Y
q3 = 0.0 #Z
#Fill imu message
imu_msg.header.stamp = rospy.get_rostime()
imu_msg.header.frame_id = 'imu_raw'
imu_msg.orientation.x = q1
imu_msg.orientation.y = q2
imu_msg.orientation.z = q3
imu_msg.orientation.w = q0
imu_msg.orientation_covariance[0] = 1e6
imu_msg.orientation_covariance[0] = 1e6
imu_msg.orientation_covariance[0] = 0.1
imu_msg.angular_velocity.x = m9g[0]
imu_msg.angular_velocity.y = m9g[1]
imu_msg.angular_velocity.z = m9g[2]
imu_msg.angular_velocity_covariance[0] = 1e6
imu_msg.angular_velocity_covariance[4] = 1e6
imu_msg.angular_velocity_covariance[8] = 0.1
imu_msg.linear_acceleration.x = m9a[0]
imu_msg.linear_acceleration.y = m9a[1]
imu_msg.linear_acceleration.z = m9a[2]
imu_msg.linear_acceleration_covariance[0] = 1e6
imu_msg.linear_acceleration_covariance[4] = 1e6
imu_msg.linear_acceleration_covariance[8] = 0.1
imu_pub.publish(imu_msg)
mag_pub.publish(mag_msg)
rate.sleep()
except KeyboardInterrupt:
break
except rospy.ROSInterruptException:
rospy.logwarn("ROS_NODE_ENDED")
except Exception as e:
rospy.logerr('IMU NODE EXCEPTION: ', e)
| [
"rishabhdevyadav95@gmail.com"
] | rishabhdevyadav95@gmail.com |
e55af56ffc42aa26714400df063eb385adc9fb93 | fb5c5d50d87a6861393d31911b9fae39bdc3cc62 | /Scripts/sims4communitylib/events/build_buy/events/build_buy_enter.py | fb2938792871539d80fe2bd229abeb49d918e76f | [
"CC-BY-4.0"
] | permissive | ColonolNutty/Sims4CommunityLibrary | ee26126375f2f59e5567b72f6eb4fe9737a61df3 | 58e7beb30b9c818b294d35abd2436a0192cd3e82 | refs/heads/master | 2023-08-31T06:04:09.223005 | 2023-08-22T19:57:42 | 2023-08-22T19:57:42 | 205,197,959 | 183 | 38 | null | 2023-05-28T16:17:53 | 2019-08-29T15:48:35 | Python | UTF-8 | Python | false | false | 1,721 | py | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from sims4communitylib.events.event_handling.common_event import CommonEvent
from zone import Zone
class S4CLBuildBuyEnterEvent(CommonEvent):
"""S4CLBuildBuyEnterEvent(zone)
An event that occurs upon entering Build/Buy on a lot.
:Example usage:
.. highlight:: python
.. code-block:: python
from sims4communitylib.events.event_handling.common_event_registry import CommonEventRegistry
from sims4communitylib.modinfo import ModInfo
class ExampleEventListener:
# In order to listen to an event, your function must match these criteria:
# - The function is static (staticmethod).
# - The first and only required argument has the name "event_data".
# - The first and only required argument has the Type Hint for the event you are listening for.
# - The argument passed to "handle_events" is the name of your Mod.
@staticmethod
@CommonEventRegistry.handle_events(ModInfo.get_identity().name)
def handle_event(event_data: S4CLBuildBuyEnterEvent):
pass
:param zone: The zone the player has entered Build/Buy on.
:type zone: Zone
"""
def __init__(self, zone: Zone):
self._zone = zone
@property
def zone(self) -> Zone:
"""The zone the event occurred on.
:return: The zone the event occurred on.
:rtype: Zone
"""
return self._zone
| [
"ColonolNutty@hotmail.com"
] | ColonolNutty@hotmail.com |
5970528efd5dd4f0d36f8fbf562458f334165056 | 97543ae8e1ad7bf3d17dd87171aaac04f6737b5f | /test/bibliopixel/control/extractor_test.py | a44234bd03bee8f4ce8e5ce43b9c72bd0307a31b | [
"MIT"
] | permissive | dr-aryone/BiblioPixel | a3c630bf1cd5db2b014b86775d283c61565a193e | fd97e6c651a4bbcade64733847f4eec8f7704b7c | refs/heads/master | 2020-05-27T16:19:15.043592 | 2019-03-23T08:52:37 | 2019-03-25T11:10:39 | 188,698,414 | 2 | 1 | MIT | 2019-05-26T15:12:38 | 2019-05-26T15:12:37 | null | UTF-8 | Python | false | false | 1,912 | py | import collections, fractions, unittest
from bibliopixel.control import extractor
KEYS_BY_TYPE = {
'note_on': ('channel', 'type', 'note', 'velocity'),
'control_change': ('channel', 'type', 'control', 'value'),
'pitchwheel': ('channel', 'type', 'pitch'),
}
NORMALIZERS = {
'pitch': lambda x: fractions.Fraction(x - 8192) / 8192,
'value': lambda x: fractions.Fraction(x) / 127,
'velocity': lambda x: fractions.Fraction(x) / 127,
}
C3 = {'type': 'note_on', 'note': 32, 'channel': 1, 'velocity': 96}
C3_OFF = {'type': 'note_off', 'note': 32, 'channel': 1, 'velocity': 0, 'x': 47}
BC = {'type': 'control_change', 'channel': 2, 'control': 2, 'value': 10}
BC3 = {'type': 'control_change', 'channel': 3, 'control': 2, 'value': 128}
MOD = {'type': 'control_change', 'channel': 2, 'control': 1, 'value': 128}
PB = {'type': 'control_change', 'channel': 2, 'control': 1, 'value': 128}
OTHER = {'type': 'other', 'channel': 32, 'thing': 'stuff'}
class ExtractorTest(unittest.TestCase):
def run_test(self, msg, expected, **kwds):
md = extractor.Extractor(
keys_by_type=KEYS_BY_TYPE,
normalizers=NORMALIZERS, **kwds)
expected = expected and collections.OrderedDict(expected)
self.assertEqual(md.extract(msg), expected)
def test_one(self):
expected = [
('channel', 1),
('type', 'note_on'),
('note', 32),
('velocity', fractions.Fraction(96) / 127)]
self.run_test(C3, expected)
self.run_test(C3, expected[1:], omit='channel')
def test_accept(self):
accept = {'channel': 2, 'type': 'control_change', 'control': 2}
for msg in C3, C3_OFF, BC3, MOD, OTHER:
self.run_test(msg, collections.OrderedDict(), accept=accept)
self.run_test(
BC,
[('value', fractions.Fraction(10) / 127)],
accept=accept)
| [
"tom@swirly.com"
] | tom@swirly.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.