blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6976ae2c9bf1b5aab381348924e7be61e3d86826 | aa28417be8935d6fa369fcb526174f9e1e30479a | /playstation/practice/字符串变形.py | 5e62f23e5ed85d838abb9634296be22909aa5c37 | [] | no_license | YYN117/Demo | d6fca95ed8a1a433ef06f1f3fc2e768414e863cb | 40690040a7422fd5d8f03a0d68f20f1be5d4a836 | refs/heads/master | 2020-04-14T12:29:30.129709 | 2019-01-02T13:31:10 | 2019-01-02T13:31:10 | 163,841,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # -*- coding:utf-8 -*-
def trans(s, n):
# write code here
a = s.split(' ')
a.reverse()
b = []
for i in a:
if len(i)==1:
if 65 <= ord(i) <= 90:
b.append(chr(ord(i) + 32))
elif 97 <= ord(i) <= 122:
b.append(chr(ord(i) - 32))
else:
c = []
for j in i:
if 65 <= ord(j) <= 90:
c.append(chr(ord(j) + 32))
elif 97 <= ord(j) <= 122:
c.append(chr(ord(j) - 32))
b.append(''.join(c))
d = ' '.join(b)
print(d)
trans("This is a sample",16)
| [
"41251061+YYN117@users.noreply.github.com"
] | 41251061+YYN117@users.noreply.github.com |
c3e2b659072f60e2a9c3b9710ef26d0bc548581f | 81a069a740a557e7b89ad03a33ec306f5ea5b293 | /cristianoronaldoyopmailcom_223/settings.py | 9d9ce5cab0ee854b478d3b331a3f7610bdfc262f | [] | no_license | payush/cristianoronaldoyopmailcom-223 | 0c1113b5417ab0f51c9796c7f158a7d3c38827be | f6b26672613a880e9638070cf616c7a40fc803ad | refs/heads/master | 2020-03-23T12:27:37.229193 | 2018-07-19T09:57:58 | 2018-07-19T09:57:58 | 141,560,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,157 | py | """
Django settings for cristianoronaldoyopmailcom_223 project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '97rbb@^rz7pd#xa_je*qqytx55e=eg$2$ev1zf8ihak4s797-9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cristianoronaldoyopmailcom_223.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cristianoronaldoyopmailcom_223.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
| [
"ayushpuroheet@gmail.com"
] | ayushpuroheet@gmail.com |
5bd988b720a123e3d2023d60f781fc45f6f0bd9e | d5a786c47e171b8e0ce1634d28b4f13be5bedb32 | /blog/views.py | 987eb902f61f82096ae2a964ad9e8e65773a7f55 | [] | no_license | RaphaelfsOliveira/djeven | 9b48728e026572a74273c32b7b6cb09821b3e6fb | 689b3c91617bbbe147122d029ec0906b99da1e66 | refs/heads/master | 2021-01-09T06:17:21.335289 | 2017-04-26T20:09:51 | 2017-04-26T20:09:51 | 80,952,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Post
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def home(request):
return render(request, 'blog/home.html')
| [
"raphaelbrf@gmail.com"
] | raphaelbrf@gmail.com |
cf0ab34e186cb551aefd62312852dd5ccd9505fc | 73f7cc0e71bfd38d3bfe97367324f1e7a5d8b451 | /engine_code/gapi/modules/auth/text_xml.py | 1434494619269625c21fba9be8e04088f6e542ee | [] | no_license | cash2one/my-test | ccc0ae860f936262a601c1b579d3c85196b562f9 | 8bd23f5963f4dc7398b7670e28768a3533bd5d14 | refs/heads/master | 2021-01-18T03:20:30.889045 | 2017-01-19T02:52:02 | 2017-01-19T02:52:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,456 | py | #!/usr/bin/python
# -*- coding=utf-8 -*-
# author : wklken@yeah.net
# date: 2012-05-25
# version: 0.1
import sys
import os
from xml.etree.ElementTree import ElementTree,Element
def read_xml(in_path):
'''''读取并解析xml文件
in_path: xml路径
return: ElementTree'''
tree = ElementTree()
tree.parse(in_path)
return tree
def write_xml(tree, out_path):
'''''将xml文件写出
tree: xml树
out_path: 写出路径'''
tree.write(out_path, encoding="utf-8")#,xml_declaration=True)
def if_match(node, kv_map):
'''''判断某个节点是否包含所有传入参数属性
node: 节点
kv_map: 属性及属性值组成的map'''
for key in kv_map:
if node.get(key) != kv_map.get(key):
return False
return True
#---------------search -----
def find_nodes(tree, path):
'''''查找某个路径匹配的所有节点
tree: xml树
path: 节点路径'''
return tree.findall(path)
def get_node_by_keyvalue(nodelist, kv_map):
'''''根据属性及属性值定位符合的节点,返回节点
nodelist: 节点列表
kv_map: 匹配属性及属性值map'''
result_nodes = []
for node in nodelist:
if if_match(node, kv_map):
result_nodes.append(node)
return result_nodes
#---------------change -----
def change_node_properties(nodelist, kv_map, is_delete=False):
'''''修改/增加 /删除 节点的属性及属性值
nodelist: 节点列表
kv_map:属性及属性值map'''
for node in nodelist:
for key in kv_map:
if is_delete:
if key in node.attrib:
del node.attrib[key]
else:
node.set(key, kv_map.get(key))
def change_node_text(nodelist, text, is_add=False, is_delete=False):
'''''改变/增加/删除一个节点的文本
nodelist:节点列表
text : 更新后的文本'''
for node in nodelist:
if is_add:
node.text += text
elif is_delete:
node.text = ""
else:
node.text = text
def create_node(tag, property_map, content,tailnum=None):
'''''新造一个节点
tag:节点标签
property_map:属性及属性值map
content: 节点闭合标签里的文本内容
return 新节点'''
element = Element(tag, property_map)
element.text = content
element.tail = tailnum
return element
def add_child_node(nodelist, element):
'''''给一个节点添加子节点
nodelist: 节点列表
element: 子节点'''
for node in nodelist:
node.append(element)
def del_node_by_tagkeyvalue(nodelist, tag, kv_map):
'''''同过属性及属性值定位一个节点,并删除之
nodelist: 父节点列表
tag:子节点标签
kv_map: 属性及属性值列表'''
for parent_node in nodelist:
children = parent_node.getchildren()
for child in children:
if child.tag == tag and if_match(child, kv_map):
parent_node.remove(child)
def change_dict(str_argv,dst_dict,str_len):
for i in range(1,str_len,2):
dst_dict[str_argv[i]] = sys.argv[i+1]
def change_str(src_data,dst_dict,str_len):
tmp1=src_data
tmp3=[]
str2=' '
flag=True
while flag:
tmp=tmp1
tmp2=tmp1.find(str2)
tmp1=tmp1[tmp1.find(str2)+1:]
if tmp2 == -1:
flag=False
tmp2=None
tmp3.append(tmp[:tmp2])
for i in range(0,str_len,2):
dst_dict[tmp3[i]]=tmp3[i+1]
def xml_return(ret,buf):
tree = read_xml("/gms/conf/return_val.xml")
root = tree.getroot()
nod = find_nodes(tree, "network")
if nod == []:
b=create_node("network", {}, ret)
root.append(b)
else:
change_node_text(nod, ret)
nod2 = find_nodes(tree, "network")
nod_infor = find_nodes(tree, "network/information")
if nod_infor == []:
tion=create_node("information", {}, buf)
add_child_node(nod2,tion)
else:
change_node_text(nod_infor, buf)
write_xml(tree, "./out3.xml")
#if __name__ == "__main__":
#tmp_dict={}
#if len(sys.argv) > 2 :
# change_dict(sys.argv,tmp_dict,len(sys.argv))
#else:
# change_str(sys.argv[1],tmp_dict,len(sys.argv[1]))
#cmd_ip="ifconfig eth0"+tmp_dict['ip']+" netmask "+tmp_dict['netmask']+" gateway "+tmp_dict['gateway']
#cmd_dns="nameserver "+tmp_dict["dns"]+">"+"/etc/resolv.conf"
#cmd_dns1="nameserver "+tmp_dict["dns1"]+">>"+"/etc/resolv.conf"
#print cmd_ip
#if os.system(cmd_ip) != 0:
# return -1
#if os.system(cmd_dns) != 0:
# return -2
#if os.system(cmd_dns1) != 0:
# return -3
#1. 读取xml文件
#tree = read_xml("/gms/conf/test.xml")
#2. 属性修改
#A. 找到父节点
#nodes = find_nodes(tree, "network")
#nod = find_nodes(tree, "network/ip")
#if nod == []:
# b=create_node("ip", {}, "192.168.0.2")
# add_child_node(nodes,b)
#else:
# change_node_text(nod, "1.1.1.1")
#B. 通过属性准确定位子节点
#result_nodes = get_node_by_keyvalue(nodes, )
#C. 修改节点属性
#change_node_properties(result_nodes, {"age": "1"})
#D. 删除节点属性
#change_node_properties(result_nodes, {"value":""}, True)
#3. 节点修改
#A.新建节点
#a = create_node("person", {"age":"15","money":"200000"}, "this is the firest content")
#B.插入到父节点之下
#add_child_node(result_nodes, a)
#4. 删除节点
#定位父节点
#del_parent_nodes = find_nodes(tree, "processers/services/service")
#准确定位子节点并删除之
#target_del_node = del_node_by_tagkeyvalue(del_parent_nodes, "chain", {"sequency" : "chain1"})
#5. 修改节点文本
#定位节点
#text_nodes = get_node_by_keyvalue(find_nodes(tree, "processers/services/service/chain"), {"sequency":"chain3"})
#change_node_text(text_nodes, "new text")
#6. 输出到结果文件
#write_xml(tree, "./out1.xml")
| [
"zhizhi1908@yeahh.net"
] | zhizhi1908@yeahh.net |
1fd3a2b4c611dfa98d2db9ba170171832ca778b9 | 7355c7a5fb5f636b07598d4b4018491b435b553c | /tfx/types/standard_artifacts.py | 3dc2bf6db3dda9323c32b5efe52d9cef9a70bd89 | [
"Apache-2.0"
] | permissive | DevenLu/tfx | 4a3ce025594ad006d37f9c4c69f08d8d49f09e8f | 1b99e7f33017bcd0e49a5a4ae1dc13440da35d3e | refs/heads/master | 2020-07-08T13:23:54.033534 | 2019-08-21T22:07:54 | 2019-08-21T22:08:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A set of standard TFX Artifact types."""
from tfx.types import artifact
class Examples(artifact.Artifact):
TYPE_NAME = 'ExamplesPath'
class ExternalArtifact(artifact.Artifact):
TYPE_NAME = 'ExternalPath'
class ExampleStatistics(artifact.Artifact):
TYPE_NAME = 'ExampleStatisticsPath'
class ExampleAnomalies(artifact.Artifact):
TYPE_NAME = 'ExampleValidationPath'
class Model(artifact.Artifact):
TYPE_NAME = 'ModelExportPath'
class ModelBlessing(artifact.Artifact):
TYPE_NAME = 'ModelBlessingPath'
class ModelEvaluation(artifact.Artifact):
TYPE_NAME = 'ModelEvalPath'
class PushedModel(artifact.Artifact):
TYPE_NAME = 'ModelPushPath'
class Schema(artifact.Artifact):
TYPE_NAME = 'SchemaPath'
class TransformGraph(artifact.Artifact):
TYPE_NAME = 'TransformPath'
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
353bf95f32bbe15d426b90b4624987d5ebe0dcab | fa38f67a6f5296ba64de8d771492f9db230cf5ed | /beatspread.py | 6f758e79570b8c1438f107d9eb05417058cedc2a | [] | no_license | traffaillac/traf-kattis | 5ebc2b0411c9f27da5d9080c269ad0add227a79c | 1b6bfdad48f3fab31902d85ed48b1bd0a8f44d0f | refs/heads/master | 2023-08-16T21:44:56.885553 | 2023-08-12T15:04:43 | 2023-08-12T15:04:43 | 236,475,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | for _ in range(int(input())):
s,d = (int(i) for i in input().split())
a,b = (s+d)//2, (s-d)//2
print(f'{a} {b}' if b>=0 and s&1==d&1 else 'impossible')
| [
"traf@kth.se"
] | traf@kth.se |
6829b76481bf86e1a23ec83e3e05484de249d009 | bb150497a05203a718fb3630941231be9e3b6a32 | /models/PaddleScience/tests/test_api/test_FCNet.py | 76b10aae7b71bcaadf357e383b365d8317bbf919 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 7,160 | py | """
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import numpy as np
import paddlescience as psci
import pytest
import paddle
from apibase import APIBase
from apibase import randtool
np.random.seed(22)
paddle.seed(22)
paddle.disable_static()
psci.config.set_dtype('float64')
def cal_FCNet(ins,
num_ins,
num_outs,
num_layers,
hidden_size,
activation='tanh'):
"""
calculate FCNet api
"""
net = psci.network.FCNet(
num_ins=num_ins,
num_outs=num_outs,
num_layers=num_layers,
hidden_size=hidden_size,
activation=activation)
for i in range(num_layers):
net._weights[i] = paddle.ones_like(net._weights[i])
res = net.nn_func(ins)
return res
def cal_with_np(ins,
num_ins,
num_outs,
num_layers,
hidden_size,
activation='tanh'):
"""
calculate with numpy
"""
w = []
for i in range(num_layers):
if i == 0:
lsize = num_ins
rsize = hidden_size
elif i == (num_layers - 1):
lsize = hidden_size
rsize = num_outs
else:
lsize = hidden_size
rsize = hidden_size
w.append(np.ones((lsize, rsize)))
u = ins
for i in range(num_layers - 1):
u = np.matmul(u, w[i])
if activation == 'tanh':
u = np.tanh(u)
elif activation == 'sigmoid':
u = 1 / (1 + np.exp(-u))
u = np.matmul(u, w[-1])
return u
class TestFCNet(APIBase):
"""
test flatten
"""
def hook(self):
"""
implement
"""
self.types = [np.float64]
# self.debug = True
# enable check grad
self.static = False
obj = TestFCNet(cal_FCNet)
@pytest.mark.api_network_FCNet
def test_FCNet0():
"""
default
"""
xy_data = np.array([[0.1, 0.5]])
u = cal_with_np(xy_data, 2, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=2,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet1():
"""
xy shape (9, 2)
"""
xy_data = randtool("float", 0, 10, (9, 2))
u = cal_with_np(xy_data, 2, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=2,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet2():
"""
xy shape (9, 3)
"""
xy_data = randtool("float", 0, 1, (9, 3))
u = cal_with_np(xy_data, 3, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=3,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet3():
"""
xy shape (9, 4)
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 1, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet4():
"""
xy shape (9, 4)
num_outs: 2
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 2, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=2,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet5():
"""
xy shape (9, 4)
num_outs: 3
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 2, 1)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet6():
"""
xy shape (9, 4)
num_outs: 3
hidden_size: 20
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 2, 20)
obj.delta = 1e-5
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=2,
hidden_size=20)
@pytest.mark.api_network_FCNet
def test_FCNet7():
"""
xy shape (9, 4)
num_outs: 3
hidden_size: 20
num_layers: 5
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 5, 20)
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=5,
hidden_size=20)
@pytest.mark.api_network_FCNet
def test_FCNet8():
"""
xy shape (9, 4)
num_outs: 3
hidden_size: 20
num_layers: 5
activation='sigmoid'
"""
xy_data = randtool("float", 0, 1, (9, 4))
u = cal_with_np(xy_data, 4, 3, 5, 20, activation='sigmoid')
obj.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=5,
hidden_size=20)
paddle.enable_static()
def static_fcnet(ins,
num_ins,
num_outs,
num_layers,
hidden_size,
activation='tanh'):
net = psci.network.FCNet(
num_ins, num_outs, num_layers, hidden_size, activation=activation)
net.make_network()
for i in range(num_layers):
net._weights[i] = paddle.ones_like(net._weights[i])
return net.nn_func(ins)
class TestFCNet(APIBase):
"""
test flatten
"""
def hook(self):
"""
implement
"""
self.types = [np.float64]
# self.debug = True
# enable check grad
self.dygraph = False
self.static = True
self.enable_backward = False
obj1 = TestFCNet(static_fcnet)
@pytest.mark.api_network_FCNet
def test_FCNet9():
"""
static
default
"""
xy_data = np.array([[0.1, 0.5]])
u = cal_with_np(xy_data, 2, 1, 2, 1)
obj1.run(res=u,
ins=xy_data,
num_ins=2,
num_outs=1,
num_layers=2,
hidden_size=1)
@pytest.mark.api_network_FCNet
def test_FCNet10():
"""
static
xy shape (9, 4)
num_outs: 3
hidden_size: 20
num_layers: 5
activation='sigmoid'
"""
# xy_data = randtool("float", 0, 1, (9, 4))
xy_data = np.array([[0.1, 0.5, 0.2, 0.4]])
u = cal_with_np(xy_data, 4, 3, 5, 20, activation='sigmoid')
obj1.run(res=u,
ins=xy_data,
num_ins=4,
num_outs=3,
num_layers=5,
hidden_size=20,
activation='sigmoid')
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
05d3daed5c13842cea79650ff0c744df26f7e996 | 3adbf4c196ce225f6bbf41d77d17fe312e5d4620 | /flexx/__main__.py | ccff891543f31cf807265ab42e5a2635d70df8cb | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | drorhilman/flexx | 9c818576aec1888092ab9819e2269d18b26f326b | 8de99132d0fa25b0fea81ed8ac7ff8f8f9e95661 | refs/heads/master | 2020-03-24T10:15:44.048180 | 2018-07-05T13:26:31 | 2018-07-05T13:26:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,326 | py | """
Flexx has a command line interface to perform some simple tasks.
Invoke it via ``python -m flexx``. Additional command line arguments
can be provided to configure Flexx, see
:func:`configuring flexx <flexx.config>`.
.. code-block:: none
"""
import sys
ALIASES = {'-h': 'help', '--help': 'help',
'--version': 'version',
}
class CLI:
""" Command line interface class. Commands are simply defined as methods.
"""
def __init__(self, args=None):
if args is None:
return
command = args[0] if args else 'help'
command = ALIASES.get(command, command)
if command not in self.get_command_names():
raise RuntimeError('Invalid command %r' % command)
func = getattr(self, 'cmd_' + command)
func(*args[1:])
def get_command_names(self):
commands = [d[4:] for d in dir(self) if d.startswith('cmd_')]
commands.sort()
return commands
def get_global_help(self):
lines = []
lines.append('Flexx command line interface')
lines.append(' python -m flexx <command> [args]')
lines.append('')
for command in self.get_command_names():
doc = getattr(self, 'cmd_' + command).__doc__
if doc:
summary = doc.strip().splitlines()[0]
lines.append('%s %s' % (command.ljust(15), summary))
return '\n'.join(lines)
def cmd_help(self, command=None):
""" show information on how to use this command.
"""
if command:
if command not in self.get_command_names():
raise RuntimeError('Invalid command %r' % command)
doc = getattr(self, 'cmd_' + command).__doc__
if doc:
lines = doc.strip().splitlines()
doc = '\n'.join([lines[0]] + [line[8:] for line in lines[1:]])
print('%s - %s' % (command, doc))
else:
print('%s - no docs' % command)
else:
print(self.get_global_help())
def cmd_version(self):
""" print the version number
"""
import sys
try:
import flexx
except ImportError:
sys.path.insert(0, '.')
import flexx
print(flexx.__version__)
def cmd_info(self, port=None):
""" show info on flexx server process corresponding to given port,
e.g. flexx info 8080
The kind of info that is provided is not standardized/documented yet.
"""
if port is None:
return self.cmd_help('info')
port = int(port)
try:
print(http_fetch('http://localhost:%i/flexx/cmd/info' % port))
except FetchError:
print('There appears to be no local server at port %i' % port)
def cmd_stop(self, port=None):
""" stop the flexx server process corresponding to the given port.
"""
if port is None:
return self.cmd_help('stop')
port = int(port)
try:
print(http_fetch('http://localhost:%i/flexx/cmd/stop' % port))
print('stopped server at %i' % port)
except FetchError:
print('There appears to be no local server at port %i' % port)
def cmd_log(self, port=None, level='info'):
""" Start listening to log messages from a server process - STUB
flexx log port level
"""
if port is None:
return self.cmd_help('log')
print('not yet implemented')
#print(http_fetch('http://localhost:%i/flexx/cmd/log' % int(port)))
class FetchError(Exception):
pass
def http_fetch(url):
""" Perform an HTTP request.
"""
from tornado.httpclient import HTTPClient
http_client = HTTPClient()
try:
response = http_client.fetch(url)
except Exception as err:
raise FetchError('http fetch failed: %s' % str(err))
finally:
http_client.close()
return response.body.decode()
# Prepare docss
_cli_docs = CLI().get_global_help().splitlines()
__doc__ += '\n'.join([' ' + line for line in _cli_docs])
def main():
# Main entry point (see setup.py)
CLI(sys.argv[1:])
if __name__ == '__main__':
main()
| [
"almar.klein@gmail.com"
] | almar.klein@gmail.com |
f1c1843044b9c187c5c7ffae3a14625d3b7e6f86 | 796613525c40a241b0f88ceb761838a5bca311e1 | /biasTF/BIAS_V2/src/MoreTransferFunctions.py | a243b13f8131edb9f58a22317611f42685ef40cc | [] | no_license | UAEDF/vbfHbb | 377e956a2d002eacd2090a4abbaa6bffb141454e | ecd5bfefa3db8d2c8283e306d68da42de44f7e39 | refs/heads/master | 2020-04-22T16:54:48.622168 | 2015-12-26T16:07:44 | 2015-12-26T16:07:44 | 12,751,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,460 | py | #!/usr/bin/env python
import ROOT
from ROOT import *
import sys,re,os
from optparse import OptionParser
####################################################################################################
def parser():
mp = OptionParser()
return mp
####################################################################################################
def printWToText(w):
old = os.dup( sys.stdout.fileno() )
out = file('stdouterr.txt','w')
os.dup2( out.fileno(), sys.stdout.fileno() )
w.Print()
os.dup2( old, sys.stdout.fileno() )
out.close()
#
out = file('stdouterr.txt','r')
text = out.read()
out.close()
#
os.remove('stdouterr.txt')
return text
####################################################################################################
def getObject(w,nam):
obj = w.obj(nam)
return obj
####################################################################################################
def line(nam,fun,x1,x2):
lin = TF1(nam,fun,x1,x2)
lin.SetLineColor(kViolet+3)
lin.SetLineStyle(kDashed)
return lin
####################################################################################################
def legend(a,b,c,d):
leg = TLegend(a,b,c,d)
leg.SetFillColor(0)
leg.SetFillStyle(0)
leg.SetTextFont(62)
leg.SetTextColor(kBlack)
leg.SetTextSize(0.045)
leg.SetBorderSize(0)
return leg
####################################################################################################
def pave(a,b,c,d):
pav = TPaveText(a,b,c,d,"NDC")
pav.SetFillColor(0)
pav.SetFillStyle(0)
pav.SetTextFont(62)
pav.SetTextColor(kViolet+3)
pav.SetTextSize(0.045)
pav.SetBorderSize(0)
pav.SetTextAlign(11)
return pav
####################################################################################################
def main():
mp = parser()
opts,args = mp.parse_args()
gROOT.SetBatch(1)
gROOT.ProcessLineSync(".x ../../common/styleCMSSara.C")
archive = {}
cplain = TCanvas("cplain","cplain",3600,1500)
cplain.Divide(4,2)
cratio = TCanvas("cratio","cratio",3600,1500)
cratio.Divide(4,2)
cplains = TCanvas("cplains","cplains",2400,1000)
cplains.Divide(4,2)
cratios = TCanvas("cratios","cratios",2400,1000)
cratios.Divide(4,2)
ftransfer = TFile.Open('transferFunctions.root','read')
tran = {}
for i in range(7):
if not (i==0 or i==4): tran[i] = [ftransfer.Get("fitRatio_sel%s_CAT%d_POL1"%('NOM' if i<4 else 'PRK',i)).Clone("trans_CAT%d"%i),ftransfer.Get("gUnc_sel%s_CAT%d_POL1"%('NOM' if i<4 else 'PRK',i)).Clone("trans_CAT%d"%i)]
else: tran[i] = [ftransfer.Get("fitRatio_sel%s_CAT%d_POL1"%('NOM' if i<4 else 'PRK',i)).Clone("trans_CAT%d"%i),None]
tran[i][0].SetLineColor(kGreen+3)
tran[i][0].SetLineStyle(kSolid)
if not tran[i][1]==None:
tran[i][1].SetFillColor(kGray+1)
tran[i][1].SetFillStyle(3454)
for fname in args:
fopen = TFile.Open(fname,'read')
w = fopen.Get("w")
print fname
alt = re.search('.*Alt([A-Za-z0-9_]*).root',fname).group(1)
text = printWToText(w)
for Line in text.split('\n'):
if '::qcd_model' in Line:
typ = re.search('(.*)::.*',Line).group(1)
nam = re.search('.*::(.*)\[.*',Line).group(1)
cat = re.search('.*CAT([0-9]*).*',nam).group(1)
obj = getObject(w,nam)
th1 = obj.createHistogram("mbbReg_CAT%d"%int(cat),240)
th1.SetName("h"+nam)
#print alt, cat, nam, '(%s)'%typ, obj, th1
archive[(alt,cat)] = {}
archive[(alt,cat)]['alt'] = alt
archive[(alt,cat)]['cat'] = cat
archive[(alt,cat)]['typ'] = typ
archive[(alt,cat)]['nam'] = nam
archive[(alt,cat)]['obj'] = obj
archive[(alt,cat)]['th1'] = th1
rat = th1.Clone("r"+nam)
rat.Divide(archive[(alt,cat)]['th1'],archive[(alt,'0' if int(cat)<4 else '4')]['th1'])
rat.GetYaxis().SetRangeUser(0.92,1.08)
pav = pave(0.6,0.7,0.9,0.9)
pav.AddText('Function: %s'%alt)
lin = line("lin","1.",th1.GetXaxis().GetXmin(),th1.GetXaxis().GetXmax())
archive[(alt,cat)]['rat'] = rat
cplain.cd(int(cat)+1)
th1.Draw()
#for ibin in range(th1.GetNbinsX()):
# print th1.GetBinContent(ibin), th1.GetBinError(ibin)
#print
pav.Draw()
cratio.cd(int(cat)+1)
archive[(alt,cat)]['pav'] = pav
archive[(alt,cat)]['lin'] = lin
rat.Draw("axis")
if not (int(cat)==0 or int(cat)==4): tran[int(cat)][1].Draw("E3")
tran[int(cat)][0].Draw("same")
rat.Draw("same")
pav.Draw("same")
lin.Draw("same")
gPad.Update()
pav.SetY1NDC(pav.GetY2NDC()-len(pav.GetListOfLines())*0.055)
leg = legend(0.6,0.5,0.9,pav.GetY1NDC()-0.02)
leg.AddEntry(rat,"CAT%d / CAT%d"%(int(cat),0 if int(cat)<4 else 4),"L")
leg.AddEntry(tran[int(cat)][0],"TF POL1","L")
leg.Draw()
gPad.Update()
leg.SetY1NDC(leg.GetY2NDC()-leg.GetNRows()*0.055)
archive[(alt,cat)]['leg'] = leg
cplains.cd(int(cat)+1)
th1.Draw()
pav.Draw()
cratios.cd(int(cat)+1)
rat.Draw()
tran[int(cat)][0].Draw("same")
if not (int(cat)==0 or int(cat)==4): tran[int(cat)][1].Draw("sameE3")
pav.Draw()
lin.Draw("same")
leg.Draw()
if not os.path.exists('plots'): os.makedirs('plots')
cplain.SaveAs("plots/c_%s_plain.pdf"%alt)
cratio.SaveAs("plots/c_%s_ratio.pdf"%alt)
cplains.SaveAs("plots/c_%s_plain.png"%alt)
cratios.SaveAs("plots/c_%s_ratio.png"%alt)
fopen.Close()
ftransfer.Close()
cplain.Close()
cratio.Close()
####################################################################################################
if __name__=='__main__':
main()
| [
"sara.alderweireldt@cern.ch"
] | sara.alderweireldt@cern.ch |
7c9e1c0a5c012818be68148a3a2adfb9fe3cdd8f | 43a1e9c15132398433ef1bd941e49eb0372136e6 | /day21/class_test.py | 1ef6ff0a6a57edd645b641af0ca7dd32e4a6df21 | [] | no_license | dlatnrud/pyworks | 3eaf253f7e9cf74e6504770885e4a63fd1c4e293 | 745ae5c6a85015800d049176b7d5aeb0df0f000a | refs/heads/master | 2023-08-12T16:14:50.936403 | 2021-10-15T00:48:04 | 2021-10-15T00:48:04 | 402,286,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py |
from libs.myclass import Car, Student
s1 = Student("콩쥐", 3)
print(s1)
s1.learn()
s2 = Student("팥쥐", 2)
print(s2)
car1 = Car("소나타", "흰색", 2500)
car2 = Car("BMW", "black", 3000)
print("\t 모델명 \t색상 \t배기량")
print("차량1 " + car1.model + '\t' + car1.color + '\t' + str(car1.cc))
print("차량2 " + car2.model + '\t ' + car2.color + '\t' + str(car2.cc))
| [
"dlatnrud2268@naver.com"
] | dlatnrud2268@naver.com |
aa2bde45f02c21dde8c35da4febe185068b1d850 | 172189e030da9b1cd55877ba8e76ed3ad7ab8e2a | /venv/Scripts/pip3-script.py | b8d0f92f006d806cd6fd661c6200993d17351521 | [] | no_license | class-yoo/practice02 | 8f3d44de85d2d39d5979840f0a86029bb925c995 | cc6ee1f472de7f0e84e17566ab629e6ea2871b39 | refs/heads/master | 2022-01-31T05:11:18.175308 | 2019-06-13T10:31:12 | 2019-06-13T10:31:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | #!D:\cafe24\dowork\pycharmProjects\practice02\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"mynameisyjh@gmail.com"
] | mynameisyjh@gmail.com |
dcc20f5683f3d92aa30cd10bbd9d1b271ee391ce | c380659f6a79eee18c2ea41ec2cff8b55d725243 | /src/pyAHP/where.py | 77a23578a1feab2cb7fb007809940bc0c440ad11 | [] | no_license | ai-se/softgoals | 49b0c7f8fa010697c339831bf0561f54f0e10910 | 41e9b467811c7a491aeedcc88d76910a83fe5c50 | refs/heads/master | 2021-01-17T00:11:04.123534 | 2017-06-04T02:56:11 | 2017-06-04T02:56:11 | 41,162,015 | 1 | 4 | null | 2015-12-01T03:45:40 | 2015-08-21T15:03:25 | Python | UTF-8 | Python | false | false | 5,731 | py | from __future__ import print_function, division
import sys,os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
from utilities.lib import *
__author__ = 'panzer'
def default_settings():
return O(
min_size = 8,
max_depth = 10,
prefix = "|.. "
)
class Row(O):
"""
Row Of a Binary Tree Node
"""
def __init__(self, decisions):
O.__init__(self)
self.decisions = decisions
self.meta = None
self.normalized = None
class TreeNode(O):
"""
Node of a binary Tree
"""
id_counter = 0
def __init__(self, rows, parent, level):
"""
:param parent: Node's parent
:param level: Level of a node. Starts from 0
:return:
"""
O.__init__(self)
self.id = TreeNode.id_counter
self._parent = parent
self.level = level
self.kids = None
self._rows = rows
TreeNode.id_counter += 1
def add_kid(self, kid):
"""
Add a child to the node
:param kid:
:return:
"""
if self.kids is None:
self.kids = []
self.kids.append(kid)
def get_rows(self):
return self._rows
class Where(O):
"""
Fastmap based clusterer
"""
def __init__(self, rows, **settings):
"""
:param rows: Rows to be clustered
:param settings:
:return:
"""
O.__init__(self)
self.rows = rows
self.limits = self.set_limits()
self.settings = default_settings().update(**settings)
def set_limits(self):
"""
Assign max and min values based on all the data
:return:
"""
maxs = [-sys.maxint]*len(self.rows[0].decisions)
mins = [sys.maxint]*len(self.rows[0].decisions)
for row in self.rows:
for i, decision in enumerate(row.decisions):
if decision > maxs[i]: maxs[i] = decision
if decision < mins[i]: mins[i] = decision
return O(maxs = maxs, mins = mins)
def too_deep(self, level):
"""
Check if the tree is too deep
:param level:
:return:
"""
return level > self.settings.max_depth
def too_few(self, rows):
"""
Check if a cluster contains the minimal rows
:param rows:
:return:
"""
return len(rows) < self.settings.min_size
def get_furthest(self, row, rows):
"""
Get furthest row from a set of rows wrt a current row
:param row:
:param rows:
:return:
"""
furthest, dist = None, 0
for one in rows:
if row.id == one.id: continue
tmp = self.euclidean(row, one)
if tmp > dist:
furthest, dist = one, dist
return furthest
def euclidean(self, one, two):
"""
Compute Euclidean distance
:param one:
:param two:
:return:
"""
one_normalized = self.normalize(one)
two_normalized = self.normalize(two)
dist = 0
for one_i, two_i in zip(one_normalized, two_normalized):
dist += (one_i - two_i) ** 2
return dist
def normalize(self, one):
"""
Normalize row
:param one:
:return:
"""
if one.normalized is None:
normalized = []
for i, decision in enumerate(one.decisions):
if self.limits.mins[i] == self.limits.maxs[i]:
value = 0
else:
value = (decision - self.limits.mins[i]) / (self.limits.maxs[i] - self.limits.mins[i])
normalized.append(value)
one.normalized = normalized
return one.normalized
def get_furthest2(self, rows):
"""
Get furthest extreme rows from a list of rows
:param rows:
:return:
"""
east, west, dist = None, None, -1
for i in range(len(rows)-1):
for j in range(i+1, len(rows)):
temp_dist = self.euclidean(rows[i], rows[j])
if temp_dist > dist:
east, west, dist = rows[i], rows[j], temp_dist
return east, west
def fastmap(self, node):
"""
Fastmap projection
:param node:
:return:
"""
def second(iterable): return iterable[1]
rows = shuffle(node.get_rows())
east, west = self.get_furthest2(rows)
c = self.euclidean(east, west)
lst = []
for one in rows:
a = self.euclidean(one, west)
b = self.euclidean(one, east)
if c == 0:
x = 0
else:
x = (a**2 + c**2 - b**2)/(2*c)
lst += [(x, one)]
lst = sorted(lst)
mid = len(lst)//2
wests = map(second, lst[:mid])
easts = map(second, lst[mid:])
west = wests[0]
east = easts[-1]
return wests, west, easts, east
def show(self, rows, node, level, has_kids = True):
"""
Print Node
:param rows:
:param node:
:param level:
:param has_kids:
:return:
"""
if not has_kids:
print(self.settings.prefix*level, len(rows), ' ; ', node.id)
else:
print(self.settings.prefix*level, len(rows))
def cluster(self, rows = None, level = 0, parent = None, verbose = False):
"""
Cluster rows
:param rows:
:param level:
:param parent:
:param verbose:
:return:
"""
if rows is None:
rows = self.rows
node = TreeNode(rows, parent, level)
if not self.too_deep(level) and not self.too_few(rows):
if verbose: self.show(rows, node, level, has_kids=True)
wests, west, easts, east = self.fastmap(node)
node.west, node.east = west, east
node.add_kid(self.cluster(wests, level=level+1, parent=node, verbose=verbose))
node.add_kid(self.cluster(easts, level=level+1, parent=node, verbose=verbose))
else:
if verbose: self.show(rows, node, level, has_kids=False)
east, west = self.get_furthest2(rows)
node.west, node.east = west, east
return node
def get_leaves(self, node):
leaves = []
if node.kids:
for kid in node.kids:
leaves += self.get_leaves(kid)
else:
leaves = [node]
return leaves
| [
"george.meg91@gmail.com"
] | george.meg91@gmail.com |
7669a41a8804ee7b4055f88380962bbbc771ea49 | 2daa10000d265cd039ee4489d5ade35837e48bb0 | /log/tasks/post_schedule.py | 1061173a98933287b3c1d908047a2a98453d201c | [] | no_license | mohsenamoon1160417237/invites | ef2d23e6e21965b99f0861efa9f2c36a5ead131e | eacef16787f8bfecfe10e5ab9500116419aa4643 | refs/heads/master | 2023-08-22T11:35:09.071118 | 2021-10-24T12:15:03 | 2021-10-24T12:15:03 | 420,020,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | from celery import shared_task
from celery.utils.log import get_task_logger
from log.models.PostLog import PostLog
from django.shortcuts import get_object_or_404
logger = get_task_logger(__name__)
@shared_task
def post_schedule(post_id):
post = get_object_or_404(PostLog , id=post_id)
post.status = PostLog.PUBLISH
post.save()
logger.info("the post saved as publish!") | [
"dramatic225@gmail.com"
] | dramatic225@gmail.com |
0c14fbbc574d2ff198fe9688adc63b8361eee419 | 908ad8a65600996b263bb53dd3054e742c533dab | /akshare/stock/stock_info.py | 5999c616aa544102b154cd70c65aef64b35bc27c | [
"MIT"
] | permissive | pangyouzhen/akshare | 47c7d9e944ac197d3df5cce81eb33da5feccd518 | 5050cda92624c642d70a196d93a343e53a12fe17 | refs/heads/master | 2023-05-09T00:27:26.011181 | 2021-05-30T07:58:59 | 2021-05-30T07:58:59 | 371,892,903 | 0 | 0 | MIT | 2021-05-29T10:07:23 | 2021-05-29T05:57:53 | null | UTF-8 | Python | false | false | 8,743 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/12/28 16:31
Desc: 股票基本信息
"""
import json
from io import BytesIO
import pandas as pd
import requests
def stock_info_sz_name_code(indicator: str = "B股列表") -> pd.DataFrame:
"""
深圳证券交易所-股票列表
http://www.szse.cn/market/product/stock/list/index.html
:param indicator: choice of {"A股列表", "B股列表", "CDR列表", "AB股列表"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
"""
url = "http://www.szse.cn/api/report/ShowReport"
indicator_map = {"A股列表": "tab1", "B股列表": "tab2", "CDR列表": "tab3", "AB股列表": "tab4"}
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1110",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(BytesIO(r.content), engine="xlrd")
if len(temp_df) > 10:
temp_df["A股代码"] = temp_df["A股代码"].astype(str).str.split('.', expand=True).iloc[:, 0].str.zfill(6).str.replace("000nan", "")
return temp_df
else:
return temp_df
def stock_info_sh_name_code(indicator: str = "主板A股") -> pd.DataFrame:
"""
上海证券交易所-股票列表
http://www.sse.com.cn/assortment/stock/list/share/
:param indicator: choice of {"主板A股": "1", "主板B股": "2", "科创板": "8"}
:type indicator: str
:return: 指定 indicator 的数据
:rtype: pandas.DataFrame
"""
indicator_map = {"主板A股": "1", "主板B股": "2", "科创板": "8"}
url = "http://query.sse.com.cn/security/stock/getStockListData.do"
headers = {
"Host": "query.sse.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.sse.com.cn/assortment/stock/list/share/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"jsonCallBack": "jsonpCallback66942",
"isPagination": "true",
"stockCode": "",
"csrcCode": "",
"areaName": "",
"stockType": indicator_map[indicator],
"pageHelp.cacheSize": "1",
"pageHelp.beginPage": "1",
"pageHelp.pageSize": "2000",
"pageHelp.pageNo": "1",
"pageHelp.endPage": "11",
"_": "1589881387934",
}
r = requests.get(url, params=params, headers=headers)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{"):-1])
temp_df = pd.DataFrame(json_data["result"])
return temp_df
def stock_info_sh_delist(indicator: str = "暂停上市公司"):
"""
上海证券交易所-暂停上市公司-终止上市公司
http://www.sse.com.cn/assortment/stock/list/firstissue/
:param indicator: choice of {"终止上市公司": "5", "暂停上市公司": "4"}
:type indicator: str
:return: 暂停上市公司 or 终止上市公司 的数据
:rtype: pandas.DataFrame
"""
indicator_map = {"终止上市公司": "5", "暂停上市公司": "4"}
url = "http://query.sse.com.cn/security/stock/getStockListData2.do"
headers = {
"Host": "query.sse.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.sse.com.cn/assortment/stock/list/share/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"jsonCallBack": "jsonpCallback66942",
"isPagination": "true",
"stockCode": "",
"csrcCode": "",
"areaName": "",
"stockType": indicator_map[indicator],
"pageHelp.cacheSize": "1",
"pageHelp.beginPage": "1",
"pageHelp.pageSize": "2000",
"pageHelp.pageNo": "1",
"pageHelp.endPage": "11",
"_": "1589881387934",
}
r = requests.get(url, params=params, headers=headers)
text_data = r.text
json_data = json.loads(text_data[text_data.find("{"):-1])
temp_df = pd.DataFrame(json_data["result"])
return temp_df
def stock_info_sz_delist(indicator: str = "暂停上市公司") -> pd.DataFrame:
"""
深证证券交易所-暂停上市公司-终止上市公司
http://www.szse.cn/market/stock/suspend/index.html
:param indicator: choice of {"暂停上市公司", "终止上市公司"}
:type indicator: str
:return: 暂停上市公司 or 终止上市公司 的数据
:rtype: pandas.DataFrame
"""
indicator_map = {"暂停上市公司": "tab1", "终止上市公司": "tab2"}
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "1793_ssgs",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(BytesIO(r.content), engine="xlrd")
temp_df["证券代码"] = temp_df["证券代码"].astype("str").str.zfill(6)
return temp_df
def stock_info_sz_change_name(indicator: str = "全称变更") -> pd.DataFrame:
"""
深证证券交易所-更名公司
http://www.szse.cn/market/companys/changename/index.html
:param indicator: choice of {"全称变更": "tab1", "简称变更": "tab2"}
:type indicator: str
:return: 全称变更 or 简称变更 的数据
:rtype: pandas.DataFrame
"""
indicator_map = {"全称变更": "tab1", "简称变更": "tab2"}
url = "http://www.szse.cn/api/report/ShowReport"
params = {
"SHOWTYPE": "xlsx",
"CATALOGID": "SSGSGMXX",
"TABKEY": indicator_map[indicator],
"random": "0.6935816432433362",
}
r = requests.get(url, params=params)
temp_df = pd.read_excel(BytesIO(r.content), engine="xlrd")
temp_df["证券代码"] = temp_df["证券代码"].astype("str").str.zfill(6)
return temp_df
def stock_info_change_name(stock: str = "688588") -> pd.DataFrame:
"""
新浪财经-股票曾用名
http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/300378.phtml
:param stock: 股票代码
:type stock: str
:return: 股票曾用名列表
:rtype: list
"""
url = f"http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/{stock}.phtml"
r = requests.get(url)
temp_df = pd.read_html(r.text)[3].iloc[:, :2]
temp_df.dropna(inplace=True)
temp_df.columns = ["item", "value"]
temp_df["item"] = temp_df["item"].str.split(":", expand=True)[0]
try:
name_list = temp_df[temp_df["item"] == "证券简称更名历史"].value.tolist()[0].split(" ")
return name_list
except:
return None
def stock_info_a_code_name() -> pd.DataFrame:
"""
沪深 A 股列表
:return: 沪深 A 股数据
:rtype: pandas.DataFrame
"""
big_df = pd.DataFrame()
stock_sh = stock_info_sh_name_code(indicator="主板A股")
stock_sh = stock_sh[["SECURITY_CODE_A", "SECURITY_ABBR_A"]]
stock_sh.columns = ["公司代码", "公司简称"]
stock_sz = stock_info_sz_name_code(indicator="A股列表")
stock_sz["A股代码"] = stock_sz["A股代码"].astype(str).str.zfill(6)
big_df = big_df.append(stock_sz[["A股代码", "A股简称"]], ignore_index=True)
big_df.columns = ["公司代码", "公司简称"]
stock_kcb = stock_info_sh_name_code(indicator="科创板")
stock_kcb = stock_kcb[["SECURITY_CODE_A", "SECURITY_ABBR_A"]]
stock_kcb.columns = ["公司代码", "公司简称"]
big_df = big_df.append(stock_sh, ignore_index=True)
big_df = big_df.append(stock_kcb, ignore_index=True)
big_df.columns = ["code", "name"]
return big_df
if __name__ == '__main__':
stock_info_sz_df = stock_info_sz_name_code(indicator="A股列表")
print(stock_info_sz_df)
stock_info_sz_df = stock_info_sz_name_code(indicator="B股列表")
print(stock_info_sz_df)
stock_info_sz_df = stock_info_sz_name_code(indicator="AB股列表")
print(stock_info_sz_df)
stock_info_sz_df = stock_info_sz_name_code(indicator="CDR列表")
print(stock_info_sz_df)
stock_info_sh_delist_df = stock_info_sh_delist(indicator="终止上市公司")
print(stock_info_sh_delist_df)
stock_info_sz_delist_df = stock_info_sz_delist(indicator="终止上市公司")
print(stock_info_sz_delist_df)
stock_info_sz_change_name_df = stock_info_sz_change_name(indicator="全称变更")
print(stock_info_sz_change_name_df)
stock_info_change_name_list = stock_info_change_name(stock="000503")
print(stock_info_change_name_list)
stock_info_a_code_name_df = stock_info_a_code_name()
print(stock_info_a_code_name_df)
| [
"jindaxiang@163.com"
] | jindaxiang@163.com |
b3889823658d4ea8723d6e2206876dad2817f7e7 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/CommonScripts/Scripts/DomainReputation/DomainReputation.py | 63cc309711c1842d76d42b7efd6895d62bd3645f | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 975 | py | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def domain_reputation():
results = demisto.executeCommand('domain', {'domain': demisto.get(demisto.args(), 'domain')})
for item in results:
if isError(item):
if is_offset_error(item): # call to is_offset_error is a temporary fix to ignore offset 1 error
results.remove(item)
else:
item['Contents'] = item['Brand'] + ' returned an error.\n' + str(item['Contents'])
demisto.results(results)
def is_offset_error(item) -> bool:
'''error msg: 'Offset: 1' will not be displayed to Users
This method is temporary and will be removed
once XSUP-18208 issue is fixed.'''
if item['Contents'] and 'Offset' in item['Contents']:
return True
return False
def main():
domain_reputation()
if __name__ in ('__main__', '__builtin__', 'builtins'): # pragma: no cover
main()
| [
"noreply@github.com"
] | demisto.noreply@github.com |
f21bc36aca61bad4889e5e3463d4efea8fa18d04 | 23fb7793e9d94e56714b618faacc4e85db8d74f9 | /explore/transform_angles.py | 7c1065a9ff9c1163f802df1697413f0c57cb4d89 | [
"BSD-3-Clause"
] | permissive | SasView/sasmodels | b4b6432c083deeaf77a96d352afbf10c696f4527 | 00fd0242007be2023cf7b4887b33da6247a6adcc | refs/heads/master | 2023-08-30T23:58:16.030202 | 2023-08-15T13:47:45 | 2023-08-15T13:47:45 | 30,761,174 | 17 | 31 | BSD-3-Clause | 2023-09-12T13:27:39 | 2015-02-13T15:04:20 | Python | UTF-8 | Python | false | false | 2,048 | py | #!/usr/bin/env python
"""
Small application to change theta, phi and psi from SasView 3.x models to the
new angle definition in SasView 4.x and above.
Usage: python explore/transform_angles.py theta phi psi
"""
from __future__ import print_function, division
import sys
import numpy as np
from numpy import pi, cos, sin, sqrt, exp, degrees, radians
from scipy.optimize import fmin
# Definition of rotation matrices comes from wikipedia:
# https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations
def Rx(angle):
"""Construct a matrix to rotate points about *x* by *angle* degrees."""
a = radians(angle)
R = [[1, 0, 0],
[0, +cos(a), -sin(a)],
[0, +sin(a), +cos(a)]]
return np.array(R)
def Ry(angle):
"""Construct a matrix to rotate points about *y* by *angle* degrees."""
a = radians(angle)
R = [[+cos(a), 0, +sin(a)],
[0, 1, 0],
[-sin(a), 0, +cos(a)]]
return np.array(R)
def Rz(angle):
"""Construct a matrix to rotate points about *z* by *angle* degrees."""
a = radians(angle)
R = [[+cos(a), -sin(a), 0],
[+sin(a), +cos(a), 0],
[0, 0, 1]]
return np.array(R)
def transform_angles(theta, phi, psi, qx=0.1, qy=0.1):
Rold = Rz(-psi)@Rx(theta)@Ry(-(90 - phi))
cost = lambda p: np.linalg.norm(Rz(-p[2])@Ry(-p[0])@Rz(-p[1]) - Rold)
result = fmin(cost, (theta, phi, psi))
theta_p, phi_p, psi_p = result
Rnew = Rz(-psi_p)@Ry(-theta_p)@Rz(-phi_p)
print("old: theta, phi, psi =", ", ".join(str(v) for v in (theta, phi, psi)))
print("new: theta, phi, psi =", ", ".join(str(v) for v in result))
try:
point = np.array([qx, qy, [0]*len(qx)])
except TypeError:
point = np.array([[qx],[qy],[0]])
for p in point.T:
print("q abc old for", p, (Rold@p.T).T)
print("q abc new for", p, (Rnew@p.T).T)
if __name__ == "__main__":
theta, phi, psi = (float(v) for v in sys.argv[1:])
#transform_angles(theta, phi, psi)
transform_angles(theta, phi, psi, qx=-0.017, qy=0.035)
| [
"pkienzle@nist.gov"
] | pkienzle@nist.gov |
3d44f956b37985fb6c1fff55f9c60a82d9c0bde3 | ed1e81a2325d310de7961274a06bfe6cdb7993d0 | /basic-python/xmlcreation.py | f448a461e86acb8db27c96bd5449e8453674cf27 | [] | no_license | fahimkhan/python | ce573298adf30ca8426b74f3ab275ab7f8047a91 | 1733ad39cf214362c8a76f8996740715888d2101 | refs/heads/master | 2021-01-15T15:50:27.323739 | 2016-08-24T11:02:56 | 2016-08-24T11:02:56 | 20,254,607 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | #!/usr/bin/python
import xml.etree.cElementTree as ET
root = ET.Element("root")
doc = ET.SubElement(root, "doc")
field1 = ET.SubElement(doc, "field1")
field1.set("name", "blah")
field1.text = "some value1"
field2 = ET.SubElement(doc, "field2")
field2.set("name", "asdfasd")
field2.text = "some vlaue2"
tree = ET.ElementTree(root)
tree.write("filename.xml")
| [
"fahim.elex@gmail.com"
] | fahim.elex@gmail.com |
ef82b63e0e7dbbd1085825847ec183ac0f11b914 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano2615.py | b9cfb7d4b3e6f3a41db23aa2782f28b088a22c4b | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/270000/E2E949DF-C719-1B48-80C3-156011763C93.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest2615.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"rsk146@scarletmail.rutgers.edu"
] | rsk146@scarletmail.rutgers.edu |
bfae3709907e8c2bfdecf0b16044bc79c317d929 | abad82a1f487c5ff2fb6a84059a665aa178275cb | /Codewars/8kyu/multiply-the-number/Python/test.py | 65310c35d9c4b0fb2c4cc05b73d6733837d2db71 | [
"MIT"
] | permissive | RevansChen/online-judge | 8ae55f136739a54f9c9640a967ec931425379507 | ad1b07fee7bd3c49418becccda904e17505f3018 | refs/heads/master | 2021-01-19T23:02:58.273081 | 2019-07-05T09:42:40 | 2019-07-05T09:42:40 | 88,911,035 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # Python - 2.7.6
Test.describe('Basic Tests')
Test.assert_equals(multiply(10), 250)
Test.assert_equals(multiply(5), 25)
Test.assert_equals(multiply(200), 25000)
Test.assert_equals(multiply(0), 0)
Test.assert_equals(multiply(-2), -10)
| [
"d79523@hotmail.com"
] | d79523@hotmail.com |
dbc68db512ec1e4767fb5aa260cf368e4c11642e | 29d09c634ffdd8cab13631d62bc6e3ad00df49bf | /Algorithm/swexpert/1216_회문2.py | f7bb26f33e8ca6571a0314bcc3dffae1b790dea9 | [] | no_license | kim-taewoo/TIL_PUBLIC | f1d32c3b4f46344c1c99f02e95cc6d2a888a0374 | ae86b542f8b1805b5dd103576d6538e3b1f5b9f4 | refs/heads/master | 2021-09-12T04:22:52.219301 | 2021-08-28T16:14:11 | 2021-08-28T16:14:11 | 237,408,159 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | T = 10
def chk_palindrome(list_to_chk, length):
for i in range(length//2):
if list_to_chk[i] != list_to_chk[-1-i]:
return False
return True
for _ in range(1, T+1):
t = int(input())
a = [list(input()) for _ in range(100)]
found = False
for l in range(100, 0, -1): # 가장 긴 100부터 1칸씩 내려가며 검사
for r in range(100):
if found: break
for s in range(100-l+1):
if found: break
chk_list = a[r][s:s+l] # 가로(각 행) 검사
chk_list2 = [a[x][r] for x in range(s,s+l)] # 세로(각 열) 검사
if chk_palindrome(chk_list, l) or chk_palindrome(chk_list2, l):
found = True
if found: break
print("#{} {}".format(t, l)) | [
"acoustic0419@gmail.com"
] | acoustic0419@gmail.com |
08506cafbe766926973725265dc18f740b64100d | a9ca402cc2a0757831d355781f388443067bae76 | /swagger_server/controllers/bsdf_material_controller.py | d4c0836bc326159fa12808677a6507173f728866 | [] | no_license | AntoineDao/LadyBugToolsAPIServer | 23a21fbc0a492df35923e33d096be5151adb7f52 | 2c13c96ed3e7c9e44f22875f80c2dd7c10cb2727 | refs/heads/master | 2020-03-26T18:23:51.461620 | 2018-09-10T22:40:47 | 2018-09-10T22:40:47 | 145,210,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | import connexion
import six
from swagger_server.models.bsdf_material_schema import BSDFMaterialSchema # noqa: E501
from swagger_server.models.error_model_schema import ErrorModelSchema # noqa: E501
from swagger_server.models.succesfully_created_schema import SuccesfullyCreatedSchema # noqa: E501
from swagger_server import util
def material_bsdf_post(bsdf_material): # noqa: E501
"""Create a new bsdf material object
Adds a new bsdf material object to the database # noqa: E501
:param bsdf_material: a bsdf material object
:type bsdf_material: dict | bytes
:rtype: SuccesfullyCreatedSchema
"""
if connexion.request.is_json:
bsdf_material = BSDFMaterialSchema.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def material_bsdf_uuid_put(uuid, bsdf_material): # noqa: E501
"""Modify an existing bsdf material file
Modifies any parameter (except uuid) of a material file by completely replacing the definition file. A finer grain method can be set up later. # noqa: E501
:param uuid: The unique identifier of the material.
:type uuid: str
:param bsdf_material: a bsdf material object
:type bsdf_material: dict | bytes
:rtype: None
"""
if connexion.request.is_json:
bsdf_material = BSDFMaterialSchema.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
| [
"antoinedao1@gmail.com"
] | antoinedao1@gmail.com |
351fe2da8b0c829785ddfedfdeb245bb586d9f7b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/invariance-equivariance_ID2466_for_PyTorch/eval_fewshot.py | 9c4cbe8bfed5b4736a93c1af7c925be9f0e067f0 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,704 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
from __future__ import print_function
import argparse
import socket
import time
import os
import mkl
import torch
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from models import model_pool
from models.util import create_model
from dataset.mini_imagenet import MetaImageNet
from dataset.tiered_imagenet import MetaTieredImageNet
from dataset.cifar import MetaCIFAR100
from dataset.transform_cfg import transforms_test_options, transforms_list
from eval.meta_eval import meta_test, meta_test_tune
from eval.cls_eval import validate, embedding
from dataloader import get_dataloaders
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
mkl.set_num_threads(2)
def parse_option():
parser = argparse.ArgumentParser('argument for training')
# load pretrained model
parser.add_argument('--model', type=str, default='resnet12', choices=model_pool)
parser.add_argument('--model_path', type=str, default="", help='absolute path to .pth model')
# parser.add_argument('--model_path', type=str, default="/raid/data/IncrementLearn/imagenet/neurips20/model/maml_miniimagenet_test_5shot_step_5_5ways_5shots/pretrain_maml_miniimagenet_test_5shot_step_5_5ways_5shots.pt", help='absolute path to .pth model')
# dataset
parser.add_argument('--dataset', type=str, default='miniImageNet', choices=['miniImageNet', 'tieredImageNet',
'CIFAR-FS', 'FC100', "toy"])
parser.add_argument('--transform', type=str, default='A', choices=transforms_list)
# specify data_root
parser.add_argument('--data_root', type=str, default='/raid/data/IncrementLearn/imagenet/Datasets/MiniImagenet/', help='path to data root')
parser.add_argument('--simclr', type=bool, default=False, help='use simple contrastive learning representation')
# meta setting
parser.add_argument('--n_test_runs', type=int, default=600, metavar='N',
help='Number of test runs')
parser.add_argument('--n_ways', type=int, default=5, metavar='N',
help='Number of classes for doing each classification run')
parser.add_argument('--n_shots', type=int, default=1, metavar='N',
help='Number of shots in test')
parser.add_argument('--n_queries', type=int, default=15, metavar='N',
help='Number of query in test')
parser.add_argument('--n_aug_support_samples', default=5, type=int,
help='The number of augmented samples for each meta test sample')
parser.add_argument('--num_workers', type=int, default=3, metavar='N',
help='Number of workers for dataloader')
parser.add_argument('--test_batch_size', type=int, default=1, metavar='test_batch_size',
help='Size of test batch)')
parser.add_argument('--batch_size', type=int, default=64, help='batch_size')
opt = parser.parse_args()
if opt.dataset == 'CIFAR-FS' or opt.dataset == 'FC100':
opt.transform = 'D'
if 'trainval' in opt.model_path:
opt.use_trainval = True
else:
opt.use_trainval = False
# set the path according to the environment
if not opt.data_root:
opt.data_root = './data/{}'.format(opt.dataset)
else:
if(opt.dataset=="toy"):
opt.data_root = '{}/{}'.format(opt.data_root, "CIFAR-FS")
else:
opt.data_root = '{}/{}'.format(opt.data_root, opt.dataset)
opt.data_aug = True
return opt
def main():
opt = parse_option()
opt.n_test_runs = 600
train_loader, val_loader, meta_testloader, meta_valloader, n_cls, _ = get_dataloaders(opt)
# load model
model = create_model(opt.model, n_cls, opt.dataset)
ckpt = torch.load(opt.model_path)["model"]
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in ckpt.items():
name = k.replace("module.","")
new_state_dict[name]=v
model.load_state_dict(new_state_dict)
# model.load_state_dict(ckpt["model"])
if torch.npu.is_available():
model = model.npu()
cudnn.benchmark = True
start = time.time()
test_acc, test_std = meta_test(model, meta_testloader)
test_time = time.time() - start
print('test_acc: {:.4f}, test_std: {:.4f}, time: {:.1f}'.format(test_acc, test_std, test_time))
start = time.time()
test_acc_feat, test_std_feat = meta_test(model, meta_testloader, use_logit=False)
test_time = time.time() - start
print('test_acc_feat: {:.4f}, test_std: {:.4f}, time: {:.1f}'.format(test_acc_feat, test_std_feat, test_time))
if __name__ == '__main__':
main()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
309fc39980c3f32d8daa830f99857d0155d49716 | c6b9b9f2fbc6c62e7a86b02718954661af3c564f | /configs/_base_/schedules/schedule_s_short.py | dea71cb530411533af0eec5170b5d1105c0c0d92 | [
"Apache-2.0"
] | permissive | open-mmlab/mmflow | a90ff072805ac79cbc0b277baded1e74d25cccf0 | 9fb1d2f1bb3de641ddcba0dd355064b6ed9419f4 | refs/heads/master | 2023-05-22T05:19:48.986601 | 2023-01-10T16:05:18 | 2023-01-10T16:05:18 | 428,493,460 | 808 | 110 | Apache-2.0 | 2023-09-05T13:19:38 | 2021-11-16T02:42:41 | Python | UTF-8 | Python | false | false | 413 | py | # optimizer
optimizer = dict(
type='Adam', lr=0.0001, weight_decay=0.0004, betas=(0.9, 0.999))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step', by_epoch=False, gamma=0.5, step=[300000, 400000, 500000])
runner = dict(type='IterBasedRunner', max_iters=600000)
checkpoint_config = dict(by_epoch=False, interval=50000)
evaluation = dict(interval=50000, metric='EPE')
| [
"meowzheng@outlook.com"
] | meowzheng@outlook.com |
b9c2eafabcc422185d25520e77910dd66ca153e6 | 425db5a849281d333e68c26a26678e7c8ce11b66 | /LeetCodeSolutions/LeetCode_1249.py | b201911e1129c8f2db7ad7a3446d8cf269ba10af | [
"MIT"
] | permissive | lih627/python-algorithm-templates | e8092b327a02506086414df41bbfb2af5d6b06dc | a61fd583e33a769b44ab758990625d3381793768 | refs/heads/master | 2021-07-23T17:10:43.814639 | 2021-01-21T17:14:55 | 2021-01-21T17:14:55 | 238,456,498 | 29 | 8 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
stack = []
res = [''] * len(s)
for idx, val in enumerate(s):
if val == '(':
stack.append([idx, '('])
res[idx] = '('
elif val == ')':
if stack:
stack.pop()
res[idx] = ')'
else:
res[idx] = val
for tmp in stack:
res[tmp[0]] = ''
return ''.join(res)
| [
"lih627@outlook.com"
] | lih627@outlook.com |
bcddc785198dd4dfe6ed7c983ffc98e275103776 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/allergies/76c89c05add142a5bedef7b724ee84dd.py | 230885176ab163f21562dbcc0189ce3e469cd325 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 887 | py | class Allergies():
def __init__(self, id):
list = []
if id > 255:
self.id = id % 256
else:
self.id = id
# Map to binary list, probably
self.allergies_match = [int(x) for x in bin(self.id)[2:]][::-1]
self.allergies_list = [
"eggs", "peanuts", "shellfish", "strawberries",
"tomatoes", "chocolate", "pollen", "cats"
]
# Using function because it's what worked.
self.list = self.list_Gen()
def list_Gen(self):
ret_list = []
for x in xrange(len(self.allergies_match)):
# print(x)
if self.allergies_match[x] == 1:
ret_list.append(self.allergies_list[x])
return ret_list
# list = list()
def is_allergic_to(self, item):
return item in self.list_Gen()
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
5c87d0b227b33ef6578fd3ac68063dd2ed9d815b | d638929e5b699e80c6af8e675b6695e622ddc51b | /alarm/alarm.py | f95c2a463cad2bd9d80da1f1bece6af3aaf009dd | [
"MIT"
] | permissive | hobojoe1848/pybites-alarm | 51636dbd53ef7777953450b9b672dd11cc1384b1 | 40d5ef42846840ef2140f04db2b9b73a259ed12e | refs/heads/main | 2023-08-19T18:14:32.403388 | 2021-10-31T10:07:16 | 2021-10-31T10:07:16 | 423,051,580 | 0 | 0 | MIT | 2021-10-31T04:23:02 | 2021-10-31T04:23:01 | null | UTF-8 | Python | false | false | 1,206 | py | from pathlib import Path
import time
from typing import Optional
from pydub import AudioSegment
from pydub.playback import _play_with_simpleaudio
def countdown_and_play_alarm(
seconds: int,
alarm_file: str,
display_timer: bool = False,
timeout: Optional[int] = None,
) -> None:
"""Countdown N seconds then play an alarm file"""
while seconds:
mins, secs = divmod(seconds, 60)
if display_timer:
print(f"{mins:02}:{secs:02}", end="\r")
time.sleep(1)
seconds -= 1
if display_timer:
print("00:00", end="\r")
play_alarm_file(alarm_file, timeout)
def play_alarm_file(alarm_file: str, timeout: Optional[int] = None) -> None:
"""
Looking at pydub/playback.py simpleaudio has the ability to stop the song
"""
file_type = Path(alarm_file).suffix.lstrip(".")
song = AudioSegment.from_file(alarm_file, file_type)
# I know, should not use "internal" functions, but this was the only way
# to stop the song after a number of seconds
playback = _play_with_simpleaudio(song)
if isinstance(timeout, int):
time.sleep(timeout)
playback.stop()
else:
playback.wait_done()
| [
"bobbelderbos@gmail.com"
] | bobbelderbos@gmail.com |
831e19fb1affdcc0a44354a8d57c591877ad3f8c | 53dfe70337a2923ec7872ab911a0b85cf233a708 | /dtree.py | eb2fccab07ffa58c612f5f6abf255930076a8a8a | [] | no_license | rcaseyatbat/CS155Kaggle | 268334214bb3e635133414cc59673da12007f7be | 5d50125995312dc42732edd730ab94012cbb36ce | refs/heads/master | 2021-01-25T08:59:56.228301 | 2015-02-18T21:35:19 | 2015-02-18T21:35:19 | 30,485,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | py | import sys
# I need this because my python installation is weird..
sys.path.append('/usr/local/lib/python2.7/site-packages')
from sklearn import tree
import csv
import numpy as np
import matplotlib.pyplot as plt
# NOTE: Decrease if you want to do some cross validation.
# (just changed to 4000 to train the final model, after selected leaf
# parameter via cross valiation)
NUM_TRAININGS = 4000
fin_name = 'kaggle_train_wc.csv'
fout_name = 'kaggle_test_wc.csv'
with open(fin_name, 'r') as fin:
next(fin)
data = np.array(list(csv.reader(fin))).astype(int)
X_train = data[:NUM_TRAININGS, 1:-1]
Y_train = data[:NUM_TRAININGS, -1]
# these will be empty unless you do some cross validation
X_test = data[NUM_TRAININGS:, 1:-1]
Y_test = data[NUM_TRAININGS:, -1]
# grab the real test data
with open(fout_name, 'r') as fout:
next(fout)
data = np.array(list(csv.reader(fout))).astype(int)
X_testFile = data[:, 1:]
#Y_testFile = data[:, -1] # Note: theres no Y predictions for the real test data :)
# Used for cross validation to select parameters
def get_error(G, Y):
error = 0
for i in range(len(G)):
if G[i] != Y[i]:
error += 1
return 1.0 * error / len(G)
#min_samples_leafs = [i for i in range(1, 25)]
# NOTE: Just decided 12 here from looking at graphs during cross validation.
# Change back to previous line if you want to see the range
min_samples_leafs = [12]
test_errors = []
train_errors = []
for min_samples_leaf in min_samples_leafs:
# initialize the tree model
clf = tree.DecisionTreeClassifier(criterion='gini',
min_samples_leaf=min_samples_leaf)
# train the model
clf = clf.fit(X_train, Y_train)
# make prediction
G_train = clf.predict(X_train)
G_test = clf.predict(X_test)
G_testFile = clf.predict(X_testFile)
print G_testFile
# compute error
# NOTE: Uncomment if doing gross val
#train_error = get_error(G_train, Y_train)
#train_errors.append(train_error)
#test_error = get_error(G_test, Y_test)
#test_errors.append(test_error)
f = open('predictions.csv','w')
f.write('Id,Prediction\n')
for (i, e) in enumerate(G_testFile):
#print i, e
f.write('%d,%d\n' % (i+1, e))
| [
"="
] | = |
190ab7fce3bf18f63578fa2eb65d119e36c79aae | 01d46b81fd351f157f896d99451610e0ebf467e7 | /rjgoptionssite/oldflasky/flasky-09SEP/controllers/download_controller.py | 769a20639ea0565207852b6451761d890f20f5dd | [] | no_license | hfwebbed/Stock-Option-Analytics | d30e389d48f92a327af5d04fbb182245b1e3dcde | 1049f2cd543bced34a9a3c50505b5c8e120ffcea | refs/heads/master | 2023-08-03T04:52:48.975821 | 2022-03-15T19:07:25 | 2022-03-15T19:07:25 | 193,752,461 | 29 | 8 | null | 2023-07-22T09:17:04 | 2019-06-25T17:20:25 | Python | UTF-8 | Python | false | false | 1,200 | py |
from flask import send_file
import shutil
import openpyxl
from openpyxl import load_workbook
import time
class DownloadController:
def __init__(self,parameterService,tickerRateService):
self.parameterService = parameterService
self.tickerRateService = tickerRateService
pass
def dispatch(self, request):
tickers, from_date, till_date = self.parameterService.init_params(1500)
tickers = "goog"
ticker_data = self.tickerRateService.get_rate(tickers, from_date, till_date)
dest_file = 'static/excel/excel_dummy2.xlsm'
shutil.copy('static/excel/excel_dummy1.xlsm', dest_file)
wb = load_workbook(filename=dest_file)
ws = wb["Summary"]
ws["b4"] = tickers
ws["b5"] = from_date
ws["b6"] = till_date
ws["d4"] = ticker_data.iloc[0]['Close']
#ws["d4"] = ticker_data[0]["Close"]
wb.save(dest_file)
print(time.time())
result = send_file(dest_file,
mimetype='text/csv',
attachment_filename='dummy.xlsm',
as_attachment=True)
print(time.time())
return result
| [
"30417960+hfwebbed@users.noreply.github.com"
] | 30417960+hfwebbed@users.noreply.github.com |
a9ca55a19c0e1c55bbe0e7079fa7a63ab9e5208c | 5ba2ea4694d9423bc5435badba93b7b8fedfadd0 | /webapp/data_import/faust_stadtarchiv/DataImportFaustStadtarchivWorker.py | ac7737bf40552d794b0a8ca29ce5d458cca12081 | [] | no_license | Digital-Botschafter-und-mehr/mein-stadtarchiv | bdf480d82b366253afd27c697143ad5d727f652f | a9876230edac695710d4ec17b223e065fa61937c | refs/heads/master | 2023-02-05T18:43:13.159174 | 2021-01-01T09:35:46 | 2021-01-01T09:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,877 | py | # encoding: utf-8
"""
Copyright (c) 2017, Ernesto Ruge
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from lxml import etree
from ..DataImportWorker import DataImportWorker
from .FaustStadtarchivCategory import save_category, get_category
from .FaustStadtarchivDocument import save_document
class DataImportFaustStadtarchivWorker(DataImportWorker):
identifier = 'faust-stadtarchiv'
def is_valid(self):
if self.xml is None:
return False
if self.xml.tag != 'Stadtarchiv':
return False
if not len(self.xml):
return False
if self.xml[0].tag != 'Findbuch':
return False
return True
def save_base_data(self):
categories = {}
datasets = self.xml.findall('./Findbuch')
for dataset in datasets:
primary = self.get_field(dataset, './/Bestand')
if not primary:
continue
if primary not in categories.keys():
categories[primary] = []
secondary = self.get_field(dataset, './/Klassifikation')
if not secondary:
continue
if secondary in categories[primary]:
continue
categories[primary].append(secondary)
for primary_raw, secondaries in categories.items():
primary = save_category(self._parent, primary_raw)
for secondary in secondaries:
save_category(primary, secondary)
def save_data(self):
categories = {}
datasets = self.xml.findall('./Findbuch')
for dataset in datasets:
primary_title = self.get_field(dataset, './/Bestand')
if not primary_title:
continue
if primary_title not in categories.keys():
categories[primary_title] = {
'parent': get_category(self._parent, primary_title),
'children': {}
}
secondary = self.get_field(dataset, './/Klassifikation')
if not secondary:
continue
if secondary in categories[primary_title]['children'].keys():
continue
categories[primary_title]['children'][secondary] = get_category(categories[primary_title]['parent'], secondary)
for dataset in datasets:
save_document(categories, dataset)
@property
def data(self):
if not self._data:
self.file.seek(0)
self._data = self.file.read()
self._data = self._data.decode(encoding='ISO-8859-1')
self._data = self._data.replace('<?xml version="1.0" encoding="ISO-8859-1"?>', '')
return self._data
@property
def xml(self):
if self._xml is None:
try:
parser = etree.XMLParser(encoding='ISO-8859-1')
self._xml = etree.fromstring(self.data, parser=parser)
self.nsmap = self._xml.nsmap
if not self.nsmap:
return self._xml
self.nsmap['ns'] = self.nsmap[None]
del self.nsmap[None]
except etree.XMLSyntaxError:
return
except ValueError:
return
return self._xml
def get_field(self, data, path):
result = data.find(path)
if result is None:
return
if not result.text:
return
return result.text
| [
"mail@ernestoruge.de"
] | mail@ernestoruge.de |
77c97608f89f50599a28a23bff835d368f149a12 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/eventhub/azure-eventhub/azure/eventhub/aio/_eventprocessor/in_memory_checkpoint_store.py | 22ef721c0ee08b96a6f06b1267650094a1962f37 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 1,664 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# -----------------------------------------------------------------------------------
from typing import Dict, Any, Iterable, Optional, Union
from azure.eventhub._eventprocessor.in_memory_checkpoint_store import InMemoryCheckpointStore as CheckPointStoreImpl
from .checkpoint_store import CheckpointStore
class InMemoryCheckpointStore(CheckpointStore):
def __init__(self):
self._checkpoint_store_impl = CheckPointStoreImpl()
async def list_ownership(
self, fully_qualified_namespace: str, eventhub_name: str, consumer_group: str, **kwargs: Any
) -> Iterable[Dict[str, Any]]:
return self._checkpoint_store_impl.list_ownership(fully_qualified_namespace, eventhub_name, consumer_group)
async def claim_ownership(
self, ownership_list: Iterable[Dict[str, Any]], **kwargs: Any
) -> Iterable[Dict[str, Any]]:
return self._checkpoint_store_impl.claim_ownership(ownership_list)
async def update_checkpoint(
self, checkpoint: Dict[str, Optional[Union[str, int]]], **kwargs: Any
) -> None:
self._checkpoint_store_impl.update_checkpoint(checkpoint)
async def list_checkpoints(
self, fully_qualified_namespace: str, eventhub_name: str, consumer_group: str, **kwargs: Any
) -> Iterable[Dict[str, Any]]:
return self._checkpoint_store_impl.list_checkpoints(fully_qualified_namespace, eventhub_name, consumer_group)
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
c6b1ec9abb66fcae482e064c75ae93ff5eabb333 | 10d5ce0b34806bd82715d544703e1cf1add4a146 | /TrafficGenerator/support/SSL_TLS_Support.py | 5ded90f2d52d6922cbd3fd4ad91ea306ba3c97d8 | [] | no_license | szabgab/ScapyTrafficGenerator3 | 17c05e4ca4c9dda0013b90eac328e2ff5d098c2f | 53c81b0796d436a1ec64b0ea46173d98d4bc1fa7 | refs/heads/main | 2023-03-12T02:24:23.410164 | 2020-12-22T08:11:55 | 2020-12-22T08:11:55 | 323,560,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,896 | py | from scapy.all import *
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from Scapy_Control import *
class SSL_TSL_Supprt():
def __init__(self):
self.defaultCipher="RSA_WITH_AES_128_CBC_SHA"
self.sshcipher=65664
def simple_clientHello(self,
**kwargs):
version= kwargs.get('tlsrecord_version') or "TLS_1_0"
if "ssl" in version.lower():
print 'ssl type'
clienthello = SSLv2ClientHello(version=version,
#cipher_suites= ['RSA_WITH_AES_128_CBC_SHA']
)
clientrecord = SSLv2Record(content_type='client_hello')
return SSL(records = [clientrecord/clienthello])
else:
print 'tls type'
#TLSExtension(type="supported_groups", length=0x8)/TLSExtEllipticCurves(length=0x6, elliptic_curves=['secp256r1', 'secp384r1', 'secp521r1'])).show()
tlsclienthello = TLSClientHello()
tlshandshake = TLSHandshake(type= 'client_hello')
tlsrecord = TLSRecord(content_type="handshake",
version= kwargs.get('tlsrecord_version') or "TLS_1_0")
return SSL(records = [tlsrecord/tlshandshake/tlsclienthello] )
def simple_serverHello(self,
**kwargs):
version= kwargs.get('tlsrecord_version') or "TLS_1_0"
if "ssl" in version.lower():
print 'ssl type'
serverhello = SSLv2ClientHello(version=version)
return SSL(records = [SSLv2Record(content_type='server_hello')/SSLv2ClientHello(version=version)/Raw(load=RamdomRawData(400))])
else:
#TLSExtension(type="supported_groups", length=0x8)/TLSExtEllipticCurves(length=0x6, elliptic_curves=['secp256r1', 'secp384r1', 'secp521r1'])).show()
tlsserverhello = TLSServerHello(cipher_suite=self.defaultCipher)
tlshandshake = TLSHandshake(type= 'server_hello')
tlsrecord = TLSRecord(content_type="handshake",
version= kwargs.get('tlsrecord_version') or "TLS_1_0")
return SSL(records = [tlsrecord/tlshandshake/tlsserverhello] )
def simple_server_certificate(self,
publiccertlen=141,
signaturelen=257,
subject=None,
issuer=None,
**kwargs):
version= kwargs.get('tlsrecord_version') or "TLS_1_0"
if not subject:
subject = 'nathan.s.super.awesome.server.1.0.com'
if not issuer:
issuer = 'Nathan Is Super'
#random value pupblic key
randompubliccert=RamdomRawData(publiccertlen)
#random value signature
randomsignature=RamdomRawData(signaturelen)
certificate = TLSCertificate(data=X509Cert(signature=ASN1_BIT_STRING(randomsignature),
pubkey=ASN1_BIT_STRING(randompubliccert),
#issuer=[X509RDN(oid=ASN1_OID('.2.5.4.3'), value=ASN1_PRINTABLE_STRING('DigiCert SHA2 High Assurance Server CA'))],
subject=[X509RDN(oid=ASN1_OID('.2.5.4.3'), value=ASN1_PRINTABLE_STRING(subject))],
issuer=[X509RDN(oid=ASN1_OID('.2.5.4.3'), value=ASN1_PRINTABLE_STRING(issuer))],
#subject=[X509RDN(oid=ASN1_OID('.2.5.4.3'), value=ASN1_PRINTABLE_STRING('nathan.s.super.awesome.server.1.0.com'))],
),
)
certificatelist = TLSCertificateList(certificates=[certificate])
certificatehandshake = TLSHandshake(type='certificate')
record = TLSRecord(version= version,
content_type="handshake")
return SSL(records=[record/certificatehandshake/certificatelist])
def simple_server_hello_done(self,
**kwargs):
version= kwargs.get('tlsrecord_version') or "TLS_1_0"
tlshandshake = TLSHandshake(type= 'server_hello_done')
tlsrecord = TLSRecord(content_type="handshake",
version=version)
return SSL(records = [tlsrecord/tlshandshake] )
def simple_ClientKeyExchange(self,
exchangelen=130,
**kwargs):
version= kwargs.get('tlsrecord_version') or "TLS_1_0"
if "ssl" in version.lower():
print 'ssl record version=', version
return SSL(records = SSLv2Record(content_type="client_master_key")/SSLv2ClientMasterKey(key_argument=RamdomRawData(8)))
else:
record = TLSRecord(content_type="handshake",
version= version)
tlshandshake = TLSHandshake(type= 'client_key_exchange')
return SSL(records = [record/tlshandshake/TLSClientKeyExchange()/Raw(load=RamdomRawData(exchangelen))])
def simple_Client_ChangeCipherSpec(self,
**kwargs):
version= kwargs.get('tlsrecord_version') or "TLS_1_0"
record = TLSRecord(content_type="change_cipher_spec",
version= version)
cipherSpec = TLSChangeCipherSpec()
return SSL(records = [record/cipherSpec])
def simple_Server_ChangeCipherSpec(self,
specmessagelen=21,
**kwargs):
version= kwargs.get('tlsrecord_version') or "TLS_1_0"
record = TLSRecord(content_type="change_cipher_spec",
version= version)
cipherSpec = TLSChangeCipherSpec(message=RamdomRawData(specmessagelen))
return SSL(records = [record/cipherSpec])
def encrypted_data(self,
encryptlen=40):
return SSL(records = [TLSRecord(content_type=0)/TLSCiphertext(data=RamdomRawData(encryptlen))])
def Finished(self,
finisheddatalen=12,
#rawlen=16,
**kwargs):
version= kwargs.get('tlsrecord_version') or "TLS_1_0"
record = TLSRecord(content_type="handshake",
version= version)
return SSL(records = [record/TLSHandshake(type="finished")/TLSFinished(data=RamdomRawData(finisheddatalen))])#/TLSHandshake(type=247)/Raw(load=RamdomRawData(rawlen))])
if __name__=="__main__":
pcap = "/home/nathanhoisington/test.pcap"
SSLSUP = SSL_TSL_Supprt()
packetstart = Ether()/IP(src="1.2.3.4", dst='4.3.2.1',flags="DF")/TCP(sport=12345, dport=443, flags="PA", ack=1111, seq=3222)
packetend = SSLSUP.simple_clientHello()
packet=packetstart/packetend
packet.show2()
#packet = SSLSUP.simple_serverHello()
#packet = SSLSUP.simple_server_certificate()
#packet = SSLSUP.simple_server_hello_done()
#packet = SSLSUP.simple_ClientKeyExchange()
#packet = SSLSUP.simple_Client_ChangeCipherSpec()
#packet = SSLSUP.Finished()
#packet = SSLSUP.simple_Server_ChangeCipherSpec()
#packet = SSLSUP.Finished()
#print ''
#packet.show()
#print ''
#print 'show 2'
#print ''
#packet.show2()
#print ''
wrpcap(pcap,packet)
#print ''
#print 'after writing'
#print ''
#print ''
#rdpcap(pcap)[0].show2()
'''
for scapy
y = rdpcap('testing/ts-test/Tools/TrafficGenerator/Pcaps/tls2.pcap')
clienthello=3[3]
serverhello = y[5]
cert = y[7]
serverhellodone = y[9]
clientkeyExchange = y[11]
clientchangecipherspec = y[13]
clientfinished = y[15]
serverchangecipherspec=y[17]
serverfinished=y[19]
''' | [
"gabor@szabgab.com"
] | gabor@szabgab.com |
d8904f842a18029786a44e9787a3ea3d4e287b8b | c9ad6ad969de505b3c8471c6f46dfd782a0fb498 | /0x11-python-network_1/2-post_email.py | f9456c986de8e9178377c07526c55742ec51eb58 | [] | no_license | enterpreneur369/holbertonschool-higher_level_programming | 002fd5a19b40c8b1db06b34c4344e307f24c17ac | dd7d3f14bf3bacb41e2116d732ced78998a4afcc | refs/heads/master | 2022-06-20T00:57:27.736122 | 2020-05-06T14:26:10 | 2020-05-06T14:26:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | #!/usr/bin/python3
""" Module 2-post_email
Python script that send a POST request
"""
import urllib.request
import sys
if __name__ == "__main__":
try:
url = sys.argv[1]
email = sys.argv[2]
values = {"email": email}
data = urllib.parse.urlencode(values)
data = data.encode("ascii")
req = urllib.request.Request(url, data)
with urllib.request.urlopen(req) as r:
html = r.read()
print("{}".format(html.decode("UTF-8")))
except IndexError:
pass
| [
"jose.calderon@holbertonschool.com"
] | jose.calderon@holbertonschool.com |
c8ea297268457b9ea391fff1005c0915bf107e5e | 9141823df0c7f40a405c5ed5d3a7ec5596ff5ad6 | /apps/login/urls.py | aacd3592e2a7fd23d16940f74f6dca6eb4d851b7 | [] | no_license | jqchang/dojo_secrets | 9ea70527e396a5205b2e7b19360e99a614e151b1 | e1d84d1cee201cbdde4b065ed50702c9caee7595 | refs/heads/master | 2021-01-21T06:42:41.697539 | 2017-02-23T21:18:44 | 2017-02-23T21:18:44 | 82,870,690 | 0 | 0 | null | 2017-02-23T21:13:19 | 2017-02-23T01:33:26 | Python | UTF-8 | Python | false | false | 397 | py | from django.conf.urls import url, include
from . import views
# from django.contrib import admin
urlpatterns = [
url(r'^$', views.index, name='login_index'),
# url(r'^success$', views.success, name='login_success'),
url(r'^process$', views.process, name='login_process'),
url(r'^login$', views.login, name='login_login'),
url(r'^logout$', views.logout, name='login_logout')
]
| [
"jqchang@gmail.com"
] | jqchang@gmail.com |
c1ebf23dd23f02933f1422f1713e8966feb7c239 | d972579395ced64fea4d40ec946c4aa053ef2c1b | /api/models.py | 9d38f73f74631163abb6bdba76a4baf3babb1b59 | [] | no_license | ziaurjoy/Serializer-and-ajax | 7a0e117e36e87b8889eb270a7c3c78b3f75f670e | 395a7802229badc139f9b4a6d5fbae563e093276 | refs/heads/master | 2022-06-09T09:32:57.054391 | 2020-05-03T15:26:34 | 2020-05-03T15:26:34 | 260,957,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | from django.db import models
# Create your models here.
class Task(models.Model):
title = models.CharField(max_length=50)
complited = models.BooleanField(default=False,blank=True,null=True)
def __str__(self):
return self.title | [
"ziaurjoy802@gmail.com"
] | ziaurjoy802@gmail.com |
c2304a67a1780051792c3fc974a55cd4a567394d | caf6ae544fce3b332b40a03462c0646a32c913e1 | /merchant/python/test/test_new_invoice.py | d5860b0d8d6e26a30956c2c4527a926aa0978c06 | [
"Apache-2.0"
] | permissive | coinsecure/plugins | 827eb0ce03a6a23b4819a618ee47600161bec1c7 | ad6f08881020c268b530d5242d9deed8d2ec84de | refs/heads/master | 2020-05-30T07:17:56.255709 | 2016-11-27T22:22:23 | 2016-11-27T22:22:23 | 63,496,663 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | # coding: utf-8
"""
coinMerchant Api Documentation
To generate an API key, please visit <a href='https://pay.coinsecure.in/payment-tools/api' target='_new' class='homeapi'>https://pay.coinsecure.in/payment-tools/api</a>.<br>Guidelines for use can be accessed at <a href='https://pay.coinsecure.in/api/guidelines'>https://pay.coinsecure.in/api/guidelines</a>.
OpenAPI spec version: 1.0B
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.new_invoice import NewInvoice
class TestNewInvoice(unittest.TestCase):
""" NewInvoice unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testNewInvoice(self):
"""
Test NewInvoice
"""
model = swagger_client.models.new_invoice.NewInvoice()
if __name__ == '__main__':
unittest.main() | [
"vivek0@users.noreply.github.com"
] | vivek0@users.noreply.github.com |
285827778cb5d7d41286c78da3a8c7d7e1a18d6e | 45e376ae66b78b17788b1d3575b334b2cb1d0b1c | /tests/terraform/checks/resource/aws/test_APIGatewayMethodSettingsCacheEnabled.py | 949fe13423e8a9120e84913042e47da6a765b876 | [
"Apache-2.0"
] | permissive | bridgecrewio/checkov | aeb8febed2ed90e61d5755f8f9d80b125362644d | e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d | refs/heads/main | 2023-08-31T06:57:21.990147 | 2023-08-30T23:01:47 | 2023-08-30T23:01:47 | 224,386,599 | 5,929 | 1,056 | Apache-2.0 | 2023-09-14T20:10:23 | 2019-11-27T08:55:14 | Python | UTF-8 | Python | false | false | 1,354 | py | import os
import unittest
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.aws.APIGatewayMethodSettingsCacheEnabled import check
from checkov.terraform.runner import Runner
class TestAPIGatewayMethodSettingsCacheEnabled(unittest.TestCase):
def test(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/example_APIGatewayMethodSettingsCacheEnabled"
report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
passing_resources = {
"aws_api_gateway_method_settings.pass",
}
failing_resources = {
"aws_api_gateway_method_settings.fail",
}
passed_check_resources = set([c.resource for c in report.passed_checks])
failed_check_resources = set([c.resource for c in report.failed_checks])
self.assertEqual(summary["passed"], 1)
self.assertEqual(summary["failed"], 1)
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
self.assertEqual(passing_resources, passed_check_resources)
self.assertEqual(failing_resources, failed_check_resources)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | bridgecrewio.noreply@github.com |
3039965ef509beb90baae8e5c128e86ed06be81f | ca7f34b5a105984ff3f3f4c794a3a4b95ab35abc | /iterm2_tools/shell_integration.py | 9676d0dae14f99722ef3110e7b84f0bbc5ba446c | [
"MIT"
] | permissive | Carreau/iterm2-tools | d6b0fa016759ace1315e6e708b389eb235a7dda8 | 3d42811b1c411f3a11b5550476fae78efa305164 | refs/heads/master | 2020-04-05T19:22:34.873301 | 2016-06-01T21:30:47 | 2016-06-01T21:30:47 | 60,203,359 | 0 | 0 | null | 2016-07-19T17:23:52 | 2016-06-01T19:02:26 | Python | UTF-8 | Python | false | false | 6,279 | py | """
Shell integration
See https://groups.google.com/d/msg/iterm2-discuss/URKCBtS0228/rs5Ive4PCAAJ
for documentation on the sequences,
https://github.com/gnachman/iterm2-website/tree/master/source/misc for example
implementations, and https://iterm2.com/shell_integration.html for a list of
what this lets you do in iTerm2.
Usage
=====
Say you have a basic REPL like::
input> run-command
command output
where ``input>`` is the prompt, ``run-command`` is the command typed by the user,
and ``command output`` is the output of ``run-command``. The basic REPL (in Python
3), would be::
while True:
before_prompt()
print("input> ", end='')
after_prompt()
command = input()
before_output()
return_val = run_command(command)
after_output(return_val)
(here ``return_val`` should be in the range 0-255).
Note that it is recommended to use the functions (like ``before_prompt()``) or the
context managers (like ``with Prompt()``) rather than the variables (like
``BEFORE_PROMPT``) directly. These print the codes directly to stdout, avoiding
potential issues with character counting.
It may be preferable to use the context managers rather than the functions,
in which case, the REPL would be::
while True:
with Prompt():
print("input> ", end='')
command = input() # raw_input() in Python 2
with Output() as o:
return_val = run_command(command)
o.set_command_status(return_val)
However, in many cases, it is impossible to run functions before and after the
prompt, e.g., when the prompt text is passed to ``(raw_)input()`` directly. In
that case, you should use the codes directly, wrapped with
``readline_invisible()``, like::
while True:
command = input(
readline_invisible(BEFORE_PROMPT) +
"input> " +
readline_invisible(AFTER_PROMPT
) # raw_input() in Python 2
with Output() as o:
return_val = run_command(command)
o.set_command_status(return_val)
Using ``readline_invisible()`` is important as it tells readline to not count the
codes as visible text. Without this, readline's editing and history commands
will truncate text.
Notes about iTerm2:
- iTerm2 assumes that the prompt sequences will be presented in a reasonable
way. Using the context managers should prevent most issues.
- The text that comes after the prompt before the first newline is read as a
command. If there is no command, or the command is just whitespace, the
output is effectively ignored (the same as if two before/after prompt
sequences were performed without any output sequence).
- iTerm2 does not support capturing multiline commands, although the output
won't include any part of the command if ``before_output()`` is used
correctly.
- iTerm2 expects there to be nothing between ``AFTER_OUTPUT`` and
``BEFORE_PROMPT``, except possibly more shell sequences. At the time of this
writing, iTerm2's "Select Output of Last Command" actually selects the text
between ``BEFORE_OUTPUT`` and ``BEFORE_PROMPT``, not ``BEFORE_OUTPUT`` and
``AFTER_OUTPUT`` as one would expect.
- Multiline prompts are supported just fine, although the arrow will always be
presented on the first line. It is not recommended to attempt to change this
by not including part of the prompt between the prompt sequences (see the
previous bullet point).
"""
from __future__ import print_function, division, absolute_import
import sys
from contextlib import contextmanager
# The "FinalTerm" shell sequences
BEFORE_PROMPT = '\033]133;A\a'
AFTER_PROMPT = '\033]133;B\a'
BEFORE_OUTPUT = '\033]133;C\a'
AFTER_OUTPUT = '\033]133;D;{command_status}\a' # command_status is the command status, 0-255
# iTerm2 specific sequences. All optional.
SET_USER_VAR = '\033]1337;SetUserVar={user_var_key}={user_var_value}\a'
# The current shell integration version is 1. We don't use this as an outdated
# shell integration version would only prompt the user to upgrade the
# integration that comes with iTerm2.
SHELL_INTEGRATION_VERSION = '\033]1337;ShellIntegrationVersion={shell_integration_version}\a'
# REMOTE_HOST and CURRENT_DIR are best echoed right after AFTER_OUTPUT.
# remote_host_hostname should be the fully qualified hostname. Integrations
# should allow users to set remote_host_hostname in case DNS is slow.
REMOTE_HOST = '\033]1337;RemoteHost={remote_host_username}@{remote_host_hostname}\a'
CURRENT_DIR = '\033]1337;CurrentDir={current_dir}\a'
def readline_invisible(code):
"""
Wrap ``code`` with the special characters to tell readline that it is
invisible.
"""
return '\001%s\002' % code
def before_prompt():
"""
Shell sequence to be run before the prompt.
"""
sys.stdout.write(BEFORE_PROMPT)
def after_prompt():
"""
Shell sequence to be run after the prompt.
"""
sys.stdout.write(AFTER_PROMPT)
def before_output():
"""
Shell sequence to be run before the command output.
"""
sys.stdout.write(BEFORE_OUTPUT)
def after_output(command_status):
"""
Shell sequence to be run after the command output.
The ``command_status`` should be in the range 0-255.
"""
if command_status not in range(256):
raise ValueError("command_status must be an integer in the range 0-255")
sys.stdout.write(AFTER_OUTPUT.format(command_status=command_status))
@contextmanager
def Prompt():
"""
iTerm2 shell integration prompt context manager
Use like::
with Prompt():
print("Prompt:", end='')
"""
before_prompt()
yield
after_prompt()
class Output(object):
"""
iTerm2 shell integration output context manager
Use like::
with Output() as o:
print("output")
o.set_command_status(status)
The command status should be in the range 0-255. The default status is 0.
"""
def __init__(self):
self.command_status = 0
def set_command_status(self, status):
self.command_status = status
def __enter__(self):
before_output()
return self
def __exit__(self, exc_type, exc_value, traceback):
after_output(self.command_status)
| [
"asmeurer@gmail.com"
] | asmeurer@gmail.com |
a077ee542a3cdeb2e8d4aa5dfae685ee2dd37922 | 180dc578d12fff056fce1ef8bd1ba5c227f82afc | /official/legacy/detection/modeling/architecture/nn_ops.py | c8e2f5b534a79a7dba8ff321417f77b8d8a47cf7 | [
"Apache-2.0"
] | permissive | jianzhnie/models | 6cb96c873d7d251db17afac7144c4dbb84d4f1d6 | d3507b550a3ade40cade60a79eb5b8978b56c7ae | refs/heads/master | 2023-07-12T05:08:23.314636 | 2023-06-27T07:54:20 | 2023-06-27T07:54:20 | 281,858,258 | 2 | 0 | Apache-2.0 | 2022-03-27T12:53:44 | 2020-07-23T05:22:33 | Python | UTF-8 | Python | false | false | 3,853 | py | # Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural network operations commonly shared by the architectures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
class NormActivation(tf.keras.layers.Layer):
"""Combined Normalization and Activation layers."""
def __init__(self,
momentum=0.997,
epsilon=1e-4,
trainable=True,
init_zero=False,
use_activation=True,
activation='relu',
fused=True,
name=None):
"""A class to construct layers for a batch normalization followed by a ReLU.
Args:
momentum: momentum for the moving average.
epsilon: small float added to variance to avoid dividing by zero.
trainable: `bool`, if True also add variables to the graph collection
GraphKeys.TRAINABLE_VARIABLES. If False, freeze batch normalization
layer.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0. If False, initialize it with 1.
use_activation: `bool`, whether to add the optional activation layer after
the batch normalization layer.
activation: 'string', the type of the activation layer. Currently support
`relu` and `swish`.
fused: `bool` fused option in batch normalziation.
name: `str` name for the operation.
"""
super(NormActivation, self).__init__(trainable=trainable)
if init_zero:
gamma_initializer = tf.keras.initializers.Zeros()
else:
gamma_initializer = tf.keras.initializers.Ones()
self._normalization_op = tf.keras.layers.BatchNormalization(
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
trainable=trainable,
fused=fused,
gamma_initializer=gamma_initializer,
name=name)
self._use_activation = use_activation
if activation == 'relu':
self._activation_op = tf.nn.relu
elif activation == 'swish':
self._activation_op = tf.nn.swish
else:
raise ValueError('Unsupported activation `{}`.'.format(activation))
def __call__(self, inputs, is_training=None):
"""Builds the normalization layer followed by an optional activation layer.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training: `boolean`, if True if model is in training mode.
Returns:
A normalized `Tensor` with the same `data_format`.
"""
# We will need to keep training=None by default, so that it can be inherit
# from keras.Model.training
if is_training and self.trainable:
is_training = True
inputs = self._normalization_op(inputs, training=is_training)
if self._use_activation:
inputs = self._activation_op(inputs)
return inputs
def norm_activation_builder(momentum=0.997,
epsilon=1e-4,
trainable=True,
activation='relu',
**kwargs):
return functools.partial(
NormActivation,
momentum=momentum,
epsilon=epsilon,
trainable=trainable,
activation=activation,
**kwargs)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
c29f4258256a0299f7c1ce8d83dab9055e20dd92 | 2b2b5e2a28038b8e2dea5bbec0f833cabfa0c256 | /eland/ml/pytorch/_pytorch_model.py | de1b550656bf9fcbea7b056e77b763c3bdce3cbc | [
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] | permissive | elastic/eland | 09b321d500c31abb04673a17bc9ea32f13d3358e | 95864a9ace67337b863ebeb65ded808cf5ba03b3 | refs/heads/main | 2023-09-01T18:13:38.645147 | 2023-08-31T09:34:36 | 2023-08-31T09:34:36 | 191,316,757 | 524 | 95 | Apache-2.0 | 2023-09-14T19:31:16 | 2019-06-11T07:24:06 | Python | UTF-8 | Python | false | false | 5,662 | py | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import json
import math
import os
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
)
from tqdm.auto import tqdm # type: ignore
from eland.common import ensure_es_client
from eland.ml.pytorch.nlp_ml_model import NlpTrainedModelConfig
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from elasticsearch._sync.client.utils import _quote
DEFAULT_CHUNK_SIZE = 4 * 1024 * 1024 # 4MB
DEFAULT_TIMEOUT = "60s"
class PyTorchModel:
"""
A PyTorch model managed by Elasticsearch.
These models must be trained outside of Elasticsearch, conform to the
support tokenization and inference interfaces, and exported as their
TorchScript representations.
"""
def __init__(
self,
es_client: Union[str, List[str], Tuple[str, ...], "Elasticsearch"],
model_id: str,
):
self._client: Elasticsearch = ensure_es_client(es_client)
self.model_id = model_id
def put_config(
self, path: Optional[str] = None, config: Optional[NlpTrainedModelConfig] = None
) -> None:
if path is not None and config is not None:
raise ValueError("Only include path or config. Not both")
if path is not None:
with open(path) as f:
config_map = json.load(f)
elif config is not None:
config_map = config.to_dict()
else:
raise ValueError("Must provide path or config")
self._client.ml.put_trained_model(model_id=self.model_id, **config_map)
def put_vocab(self, path: str) -> None:
with open(path) as f:
vocab = json.load(f)
self._client.perform_request(
method="PUT",
path=f"/_ml/trained_models/{self.model_id}/vocabulary",
headers={"accept": "application/json", "content-type": "application/json"},
body=vocab,
)
def put_model(self, model_path: str, chunk_size: int = DEFAULT_CHUNK_SIZE) -> None:
model_size = os.stat(model_path).st_size
total_parts = math.ceil(model_size / chunk_size)
def model_file_chunk_generator() -> Iterable[str]:
with open(model_path, "rb") as f:
while True:
data = f.read(chunk_size)
if not data:
break
yield base64.b64encode(data).decode()
for i, data in tqdm(
enumerate(model_file_chunk_generator()), unit=" parts", total=total_parts
):
self._client.ml.put_trained_model_definition_part(
model_id=self.model_id,
part=i,
total_definition_length=model_size,
total_parts=total_parts,
definition=data,
)
def import_model(
self,
*,
model_path: str,
config_path: Optional[str],
vocab_path: str,
config: Optional[NlpTrainedModelConfig] = None,
chunk_size: int = DEFAULT_CHUNK_SIZE,
) -> None:
self.put_config(path=config_path, config=config)
self.put_model(model_path, chunk_size)
self.put_vocab(vocab_path)
def infer(
self,
docs: List[Mapping[str, str]],
timeout: str = DEFAULT_TIMEOUT,
) -> Any:
if docs is None:
raise ValueError("Empty value passed for parameter 'docs'")
__body: Dict[str, Any] = {}
__body["docs"] = docs
__path = f"/_ml/trained_models/{_quote(self.model_id)}/_infer"
__query: Dict[str, Any] = {}
__query["timeout"] = timeout
__headers = {"accept": "application/json", "content-type": "application/json"}
return self._client.options(request_timeout=60).perform_request(
"POST", __path, params=__query, headers=__headers, body=__body
)
def start(self, timeout: str = DEFAULT_TIMEOUT) -> None:
self._client.options(request_timeout=60).ml.start_trained_model_deployment(
model_id=self.model_id, timeout=timeout, wait_for="started"
)
def stop(self) -> None:
self._client.ml.stop_trained_model_deployment(model_id=self.model_id)
def delete(self) -> None:
self._client.options(ignore_status=404).ml.delete_trained_model(
model_id=self.model_id
)
@classmethod
def list(
cls, es_client: Union[str, List[str], Tuple[str, ...], "Elasticsearch"]
) -> Set[str]:
client = ensure_es_client(es_client)
resp = client.ml.get_trained_models(model_id="*", allow_no_match=True)
return set(
[
model["model_id"]
for model in resp["trained_model_configs"]
if model["model_type"] == "pytorch"
]
)
| [
"noreply@github.com"
] | elastic.noreply@github.com |
1ba36e754a18fa91d89c43dbe3bf65dfd2bef5d8 | 95b37927e64e2901e664cc958ff01927734081fc | /ethereumetl/mappers/receipt_log_mapper.py | 0712b1469f387b381e854450096902201d0a30c5 | [
"MIT"
] | permissive | farooqarahim/ethereum-etl | 335d5ea74fcd4e62960ee035d31e320445fd8bf2 | ef462ba2c413088931e46638d8b8b1391a469f5d | refs/heads/master | 2020-03-23T22:01:18.156250 | 2018-07-23T16:58:44 | 2018-07-23T16:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,337 | py | # MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ethereumetl.domain.receipt_log import EthReceiptLog
from ethereumetl.utils import hex_to_dec
class EthReceiptLogMapper(object):
def json_dict_to_receipt_log(self, json_dict):
receipt_log = EthReceiptLog()
receipt_log.log_index = hex_to_dec(json_dict.get('logIndex', None))
receipt_log.transaction_hash = json_dict.get('transactionHash', None)
receipt_log.transaction_index = hex_to_dec(json_dict.get('transactionIndex', None))
receipt_log.block_hash = json_dict.get('blockHash', None)
receipt_log.block_number = hex_to_dec(json_dict.get('blockNumber', None))
receipt_log.address = json_dict.get('address', None)
receipt_log.data = json_dict.get('data', None)
receipt_log.topics = json_dict.get('topics', None)
return receipt_log
def web3_dict_to_receipt_log(self, dict):
receipt_log = EthReceiptLog()
receipt_log.log_index = dict.get('logIndex', None)
transaction_hash = dict.get('transactionHash', None)
if transaction_hash is not None:
transaction_hash = transaction_hash.hex()
receipt_log.transaction_hash = transaction_hash
block_hash = dict.get('blockHash', None)
if block_hash is not None:
block_hash = block_hash.hex()
receipt_log.block_hash = block_hash
receipt_log.block_number = dict.get('blockNumber', None)
receipt_log.address = dict.get('address', None)
receipt_log.data = dict.get('data', None)
if 'topics' in dict:
receipt_log.topics = [topic.hex() for topic in dict['topics']]
return receipt_log
def receipt_log_to_dict(self, receipt_log):
return {
'type': 'log',
'log_index': receipt_log.log_index,
'log_transaction_hash': receipt_log.transaction_hash,
'log_transaction_index': receipt_log.transaction_index,
'log_block_hash': receipt_log.block_hash,
'log_block_number': receipt_log.block_number,
'log_address': receipt_log.address,
'log_data': receipt_log.data,
'log_topics': '|'.join(receipt_log.topics)
}
| [
"medvedev1088@gmail.com"
] | medvedev1088@gmail.com |
3260047056822f3f7f151764bf6c76b00d2c5a54 | 6eb59488a043d78e5758922ee0136103d4fd419f | /tests/test_surround_delete.py | b0e2509fe5fc9e617ade38961f57ba8eba6bf5a1 | [
"MIT"
] | permissive | SublimeSix/plugin-surround | e038e3bf246900f454facc3ad765cc31d1d0732e | eba4fd9af4f4f686f94796a4d6cfe53b94f3e1d2 | refs/heads/master | 2020-03-19T17:30:53.230178 | 2018-07-14T20:36:41 | 2018-07-14T20:38:10 | 136,763,619 | 3 | 0 | MIT | 2018-06-24T20:45:49 | 2018-06-09T22:56:59 | Python | UTF-8 | Python | false | false | 2,025 | py | import os
import unittest
import sublime
from sublime import Region as R
from User.six.tests import ViewTest
from Six.lib.command_state import CommandState
from Six.lib.constants import Mode
from Six.lib.errors import AbortCommandError
from Six.lib.yank_registers import EditOperation
from User.six.surround import find_in_line
from User.six.surround import BRACKETS
class Test__six_surround_delete(ViewTest):
def testCanReplace(self):
self.view.run_command("append", { "characters": "aaa bbb ccc" })
self.view.sel().clear()
self.view.sel().add(R(5))
old = "'"
for new, brackets in BRACKETS.items():
# with self.subTest(bracket=new): # Not supported in Python 3.3
old_a, old_b = BRACKETS[old]
new_a, new_b = brackets
self.view.sel().clear()
self.view.sel().add(R(7))
self.view.run_command("insert", { "characters": old_b })
self.view.sel().clear()
self.view.sel().add(R(4))
self.view.run_command("insert", { "characters": old_a })
self.assertEquals(self.view.substr(4), old_a)
self.assertEquals(self.view.substr(8), old_b)
self.view.run_command("_six_surround_delete", { "old": old })
self.assertEquals(self.view.substr(4), "b")
self.assertEquals(self.view.substr(7), " ")
old = new
def testCanUndoInOneStep(self):
self.view.run_command("append", { "characters": "aaa 'bbb' ccc" })
self.view.sel().clear()
self.view.sel().add(R(5))
self.assertEquals(self.view.substr(4), "'")
self.assertEquals(self.view.substr(8), "'")
self.view.run_command("_six_surround_delete", { "old": "'" })
self.assertEquals(self.view.substr(4), 'b')
self.assertEquals(self.view.substr(7), ' ')
self.view.run_command("undo")
self.assertEquals(self.view.substr(4), "'")
self.assertEquals(self.view.substr(8), "'")
| [
"guillermo.lopez@outlook.com"
] | guillermo.lopez@outlook.com |
db164c4acb91643dac552db8d6754de1e2163630 | 7df7642c30f0cd09db47c42abe2738a00d8c9562 | /hearthstone/stringsfile.py | d72f60096e67c53bc2058e4ea64427e1caec8165 | [
"MIT"
] | permissive | mshirinyan/python-hearthstone | 601887c49385f041acd0c98c23170269b29ff5f5 | 3855e9565d45f0a5677fffe2f88cbe160cc6c7e1 | refs/heads/master | 2021-09-07T12:33:05.479242 | 2018-02-14T12:33:20 | 2018-02-14T16:05:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | """
Hearthstone Strings file
File format: TSV. Lines starting with `#` are ignored.
Key is always `TAG`
"""
import csv
from pkg_resources import resource_filename
_cache = {}
def load(fp):
reader = csv.DictReader(filter(lambda row: not row.startswith("#"), fp), delimiter="\t")
stripped_rows = [{k: v for k, v in row.items() if v} for row in reader]
return {stripped_row.pop("TAG"): stripped_row for stripped_row in stripped_rows}
def load_globalstrings(locale="enUS"):
path = "Strings/%s/GLOBAL.txt" % (locale)
if path not in _cache:
full_path = resource_filename("hearthstone", path)
with open(full_path, "r") as f:
_cache[path] = load(f)
return _cache[path]
if __name__ == "__main__":
import json
import sys
for path in sys.argv[1:]:
with open(path, "r") as f:
print(json.dumps(load(f)))
| [
"jerome@leclan.ch"
] | jerome@leclan.ch |
7517ade199886b75515bbcbb06d3d8a2b2e6f48c | 5e04d2979dd28a78fdd9e17136d7ce85dc247576 | /B/mar10_fileio.py | 984a7efad6220bac645caa03f841774632616813 | [] | no_license | ptyork/ptyork-au-aist2120-20sp | a821c0fe8b52eafbb15205b2f4bdacdae415ccd9 | 1cb928c59b5efe8cde26185bf781293c599e9823 | refs/heads/master | 2020-12-14T00:28:24.766261 | 2020-08-01T20:42:05 | 2020-08-01T20:42:05 | 234,577,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | import sys
print(sys.argv)
# exit()
#f = open('kennedy.txt')
#f = open('emails.txt')
if len(sys.argv) != 2:
print('ERROR: give me a file name, dang it!!')
exit()
filename = sys.argv[1] # [0] is always the name of the script...others are arguments
f = open(filename)
lines = f.readlines()
# print(lines)
# exit()
f.close()
linenum = 0
for line in lines:
linenum += 1
line = line.rstrip()
print(f"{linenum:3}: {line}")
#print(line, end='')
| [
"paul@yorkfamily.com"
] | paul@yorkfamily.com |
4428d1bfb6506986315772492de1c8636cf30025 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2021_01_01/operations/__init__.py | b30d1928382064e02bd6a6a8d0e62c67b220a026 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 2,517 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._app_service_certificate_orders_operations import AppServiceCertificateOrdersOperations
from ._certificate_orders_diagnostics_operations import CertificateOrdersDiagnosticsOperations
from ._certificate_registration_provider_operations import CertificateRegistrationProviderOperations
from ._domains_operations import DomainsOperations
from ._top_level_domains_operations import TopLevelDomainsOperations
from ._domain_registration_provider_operations import DomainRegistrationProviderOperations
from ._app_service_environments_operations import AppServiceEnvironmentsOperations
from ._app_service_plans_operations import AppServicePlansOperations
from ._certificates_operations import CertificatesOperations
from ._deleted_web_apps_operations import DeletedWebAppsOperations
from ._diagnostics_operations import DiagnosticsOperations
from ._global_model_operations import GlobalOperations
from ._provider_operations import ProviderOperations
from ._recommendations_operations import RecommendationsOperations
from ._resource_health_metadata_operations import ResourceHealthMetadataOperations
from ._web_site_management_client_operations import WebSiteManagementClientOperationsMixin
from ._static_sites_operations import StaticSitesOperations
from ._web_apps_operations import WebAppsOperations
from ._kube_environments_operations import KubeEnvironmentsOperations
__all__ = [
'AppServiceCertificateOrdersOperations',
'CertificateOrdersDiagnosticsOperations',
'CertificateRegistrationProviderOperations',
'DomainsOperations',
'TopLevelDomainsOperations',
'DomainRegistrationProviderOperations',
'AppServiceEnvironmentsOperations',
'AppServicePlansOperations',
'CertificatesOperations',
'DeletedWebAppsOperations',
'DiagnosticsOperations',
'GlobalOperations',
'ProviderOperations',
'RecommendationsOperations',
'ResourceHealthMetadataOperations',
'WebSiteManagementClientOperationsMixin',
'StaticSitesOperations',
'WebAppsOperations',
'KubeEnvironmentsOperations',
]
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
4b89aa25d517a40a3ecfeefcfed52951b89750b7 | 36feed24f91d0c9ab07b81208cbc195bdbac2d63 | /algorithms/047.Permutations_II/Permutations_II.py | dc5ba7b5b30b875d0797b2653075b1cdeda82cf6 | [] | no_license | borisnorm/leetcode-1 | da8ef87219d18c674f74721df1a8159bd856e1d7 | 6200c8704614e918c8bfa5357c648dd1b4f7eb74 | refs/heads/master | 2021-01-15T09:18:58.403345 | 2016-02-26T12:31:41 | 2016-02-26T12:31:41 | 63,475,809 | 1 | 0 | null | 2016-07-16T09:31:10 | 2016-07-16T09:31:07 | null | UTF-8 | Python | false | false | 739 | py | # Time: O(n!)
# Space: O(n)
class Solution:
# @param num, a list of integer
# @return a list of lists of integers
def permuteUnique(self, nums):
solutions = [[]]
for num in nums:
next = []
for solution in solutions:
for i in xrange(len(solution) + 1):
candidate = solution[:i] + [num] + solution[i:]
if candidate not in next:
next.append(candidate)
solutions = next
return solutions
if __name__ == "__main__":
print Solution().permuteUnique([1, 1, 2])
print Solution().permuteUnique([1, -1, 1, 2, -1, 2, 2, -1])
| [
"1012351692@qq.com"
] | 1012351692@qq.com |
4a9657ba76659ff9b7d54328426931dd9ba6a668 | 80c8d4e84f2ea188a375ff920a4adbd9edaed3a1 | /scikit-learn/getstart.py | a1ec7488efb339202a33afeaac8479175ed84d74 | [
"MIT"
] | permissive | Birkid/penter | 3a4b67801d366db15ca887c31f545c8cda2b0766 | 0200f40c9d01a84c758ddcb6a9c84871d6f628c0 | refs/heads/master | 2023-08-22T14:05:43.106499 | 2021-10-20T07:10:10 | 2021-10-20T07:10:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,818 | py | from sklearn import datasets #从sklearn包中加载数据集模块
from sklearn import svm
import pickle
#iris = datasets.load_iris() #加载鸢尾花数据集
from sklearn.model_selection import GridSearchCV,learning_curve
from sklearn.tree import DecisionTreeClassifier
digits = datasets.load_digits() #加载数字图像数据集 ,原始的样例是一张(8 x 8)的图片 digits.images[0]
"""
对于digits数据集,digits.data可以访问得到用来对数字进行分类的特征:
digits.target 就是数字数据集各样例对应的真实数字值。也就是我们的程序要学习的。
"""
# 算法,模型选择
clf = svm.SVC(gamma=0.001, C=100.)
#训练
clf.fit(digits.data[:-1], digits.target[:-1])
# partial_fit
# 这个方法的一般用在如果训练集数据量非常大,一次不能全部载入内存的时候。这时我们可以把训练集分成若干等分,重复调用partial_fit来一步步的学习训练集,非常方便。
#预测,我们可以让这个训练器预测没有作为训练数据使用的最后一张图像是什么数字。
print(clf.predict(digits.data[-1:]))
print(digits.target[-1])
# 模型持久化
s = pickle.dumps(clf)
clf2 = pickle.loads(s)
print(clf2.predict(digits.data[-1:]))
# https://joblib.readthedocs.io/en/latest/persistence.html
# from joblib import dump, load
# dump(clf, 'filename.joblib')
# clf3 = load('filename.joblib')
# print(clf3.predict(digits.data[-1:]))
# 练习
iris = datasets.load_iris()
clf_iris = svm.SVC()
clf_iris.fit(iris.data[:-1], iris.target[:-1])
print(clf_iris.predict(iris.data[-1:]))
print(iris.target[-1])
# 参数调优1:学习曲线(缺点:不能舍弃参数)
train_sizes, train_scores, test_scores = learning_curve(clf, iris.data,iris.target, cv=10, n_jobs=1, train_sizes=[0.1,0.325,0.55,0.775,1])
"""
1、estimator:用于预测的模型
2、X:预测的特征数据
3、y:预测结果
4、train_sizes:训练样本相对的或绝对的数字,这些量的样本将会生成learning curve,当其为[0.1, 0.325, 0.55, 0.775, 1. ]时代表使用10%训练集训练,32.5%训练集训练,55%训练集训练,77.5%训练集训练100%训练集训练时的分数。
5、cv:交叉验证生成器或可迭代的次数
6、scoring:调用的方法 https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
# 学习曲线模块
from sklearn.model_selection import learning_curve
# 导入digits数据集
from sklearn.datasets import load_digits
# 支持向量机
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np
digits = load_digits()
X = digits.data
y = digits.target
# neg_mean_squared_error代表求均值平方差
train_sizes, train_loss, test_loss = learning_curve(
SVC(gamma=0.01), X, y, cv=10, scoring='neg_mean_squared_error',
train_sizes=np.linspace(.1, 1.0, 5))
# loss值为负数,需要取反
train_loss_mean = -np.mean(train_loss, axis=1)
test_loss_mean = -np.mean(test_loss, axis=1)
# 设置样式与label
plt.plot(train_sizes, train_loss_mean, 'o-', color="r",
label="Training")
plt.plot(train_sizes, test_loss_mean, 'o-', color="g",
label="Cross-validation")
plt.xlabel("Training examples")
plt.ylabel("Loss")
# 显示图例
plt.legend(loc="best")
plt.show()
"""
# 参数调优2:网格搜索(缺点:不能舍弃参数)
# parameters = {'splitter':('best','random')
# ,'criterion':("gini","entropy")
# ,"max_depth":[*range(1,10)]
# ,'min_samples_leaf':[*range(1,50,5)]
# ,'min_impurity_decrease':[*np.linspace(0,0.5,20)]
# }
#
# clf = DecisionTreeClassifier(random_state=25)
# GS = GridSearchCV(clf, parameters, cv=10)
# GS.fit(Xtrain,Ytrain)
#
# GS.best_params_
#
# GS.best_score_
# 交叉验证
# from sklearn.datasets import load_boston
# from sklearn.model_selection import cross_val_score
# from sklearn.tree import DecisionTreeRegressor
# boston = load_boston()
# regressor = DecisionTreeRegressor(random_state=0)
# cross_val_score(regressor, boston.data, boston.target, cv=10,
# scoring = "neg_mean_squared_error")
"""
Transform(): Method using these calculated parameters apply the transformation to a particular dataset.
解释:在Fit的基础上,进行标准化,降维,归一化等操作(看具体用的是哪个工具,如PCA,StandardScaler等)。
Fit_transform(): joins the fit() and transform() method for transformation of dataset.
解释:fit_transform是fit和transform的组合,既包括了训练又包含了转换。
transform()和fit_transform()二者的功能都是对数据进行某种统一处理(比如标准化~N(0,1),将数据缩放(映射)到某个固定区间,归一化,正则化等)
fit_transform(trainData)对部分数据先拟合fit,找到该part的整体指标,如均值、方差、最大值最小值等等(根据具体转换的目的),然后对该trainData进行转换transform,从而实现数据的标准化、归一化等等。
根据对之前部分trainData进行fit的整体指标,对剩余的数据(testData)使用同样的均值、方差、最大最小值等指标进行转换transform(testData),从而保证train、test处理方式相同。所以,一般都是这么用:
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit_tranform(X_train)
sc.tranform(X_test)
1. 必须先用fit_transform(trainData),之后再transform(testData)
2. 如果直接transform(testData),程序会报错
3. 如果fit_transfrom(trainData)后,使用fit_transform(testData)而不transform(testData),虽然也能归一化,但是两个结果不是在同一个“标准”下的,具有明显差异。(一定要避免这种情况)
"""
# 数据预处理 https://zhuanlan.zhihu.com/p/38160930
| [
"350840291@qq.com"
] | 350840291@qq.com |
0c16543c22bf0a5523d861f24fc2de0d4fb253c8 | f038216be109882668ccd89b71efe0127d845bfb | /LeetCode/min_stack.py | 9dca4b9380e50d1dbf1198cffd736275f183daad | [] | no_license | kunalt4/ProblemSolvingDSandAlgo | 84b29a7eb2f73ea3b0450ed4b0707bc2d031c00d | 6a796dd1a778049418d47bc3b94b82c7a2680d26 | refs/heads/master | 2021-08-16T23:05:39.452968 | 2020-09-16T00:02:06 | 2020-09-16T00:02:06 | 221,677,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.queue = []
def push(self, x: int) -> None:
curMin = self.getMin()
if curMin == None or x < curMin:
curMin = x
self.queue.append((x,curMin))
def pop(self) -> None:
self.queue.pop()
def top(self) -> int:
if self.queue:
return self.queue[-1][0]
def getMin(self) -> int:
if self.queue:
return self.queue[-1][1]
return None
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| [
"noreply@github.com"
] | kunalt4.noreply@github.com |
ce6243ebd2da16359d4d0e2c1cf4296bce11b1eb | c049d678830eb37879589a866b39f8e72186a742 | /upcfcardsearch/c313.py | 99e6cce0e9eef77b67d3c2b5f3dec098d7f84f7a | [
"MIT"
] | permissive | ProfessorSean/Kasutamaiza | 682bec415397ba90e30ab1c31caa6b2e76f1df68 | 7a69a69258f67bbb88bebbac6da4e6e1434947e6 | refs/heads/main | 2023-07-28T06:54:44.797222 | 2021-09-08T22:22:44 | 2021-09-08T22:22:44 | 357,771,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | import discord
from discord.ext import commands
from discord.utils import get
class c313(commands.Cog, name="c313"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Sakeira_Angel_of_Radiance', aliases=['c313'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Sakeira, Angel of Radiance')
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2361296.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)
embed.add_field(name='Type (Attribute)', value='Fairy/Xyz/Effect (LIGHT)', inline=False)
embed.add_field(name='Rank (ATK/DEF)', value='0 (50/50)', inline=False)
embed.add_field(name='Monster Effect', value='3 monsters Special Summoned from the Extra Deck with the same Level/Rank/Link Rating\n(This card\'s original Rank is always treated as 1.)\nAt the start of the Damage Step, if this card battles a monster: Destroy that monster. Once per turn (Quick Effect): You can detach 1 material from this card, then target 1 face-up monster on the field; it gains 3000 ATK/DEF, but its effects are negated.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c313(bot)) | [
"professorsean3@gmail.com"
] | professorsean3@gmail.com |
97ee36d34266878ce39e0966a92bc7b4a28296ef | 6ea94d75b6e48952c1df2bda719a886f638ed479 | /build/catkin_generated/order_packages.py | 739b395ae277adbe22aa0c27c87e69448faf3ecb | [] | no_license | lievech/ork_ws | 634e26355503c69b76df7fca41402ea43c228f49 | e828846b962974a038be08a5ce39601b692d4045 | refs/heads/master | 2020-08-02T20:19:43.109158 | 2019-09-28T11:56:56 | 2019-09-28T11:56:56 | 211,493,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/lhn/ork_ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/lhn/catkin_ws/devel;/home/lhn/dev/catkin_ws/install;/home/lhn/dev/catkin_ws/devel;/opt/ros/kinetic".split(';') if "/home/lhn/catkin_ws/devel;/home/lhn/dev/catkin_ws/install;/home/lhn/dev/catkin_ws/devel;/opt/ros/kinetic" != "" else []
| [
"2328187416@qq.com"
] | 2328187416@qq.com |
6be89daa6031f02d723df31d1d37085327e40bca | 7aec3f10b07403b542e1c14a30a6e00bb479c3fe | /Codewars/7 kyu/The highest profit wins!.py | 5e228294dea1a2500dbb9f73de763563d9840210 | [] | no_license | VictorMinsky/Algorithmic-Tasks | a5871749b377767176ba82308a6a0962e1b3e400 | 03a35b0541fe413eca68f7b5521eaa35d0e611eb | refs/heads/master | 2020-08-02T23:18:06.876712 | 2020-01-16T19:08:49 | 2020-01-16T19:08:49 | 211,541,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | """
Story
Ben has a very simple idea to make some profit: he buys something and sells it again. Of course, this wouldn't give him any profit at all if he was simply to buy and sell it at the same price. Instead, he's going to buy it for the lowest possible price and sell it at the highest.
Task
Write a function that returns both the minimum and maximum number of the given list/array.
Examples
min_max([1,2,3,4,5]) == [1,5]
min_max([2334454,5]) == [5, 2334454]
min_max([1]) == [1, 1]
Remarks
All arrays or lists will always have at least one element, so you don't need to check the length. Also, your function will always get an array or a list, you don't have to check for null, undefined or similar.
"""
def min_max(lst):
return [min(lst), max(lst)]
| [
"panasyuk.vityu@gmail.com"
] | panasyuk.vityu@gmail.com |
d34712b924f654bbb796cbbac888511c65eded0f | 572ce2b8a9c687f302ea4953dd9bd978470d0c4b | /sqldocker/catalog/migrations/0001_initial.py | 5af04b2cede3bcf06af327b114a5f6c6cfa07f56 | [] | no_license | fainaszar/pythonPrograms | 5f539c8b80deb5d57e6aa984b0325389cf3b6f51 | 03f6c8b540981332e6f940308c7407a5038faac9 | refs/heads/master | 2021-09-07T18:10:43.603405 | 2018-02-27T05:27:37 | 2018-02-27T05:27:37 | 106,532,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,915 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-14 08:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('date_of_birth', models.DateField(blank=True, null=True)),
('date_of_death', models.DateField(blank=True, null=True, verbose_name='Died')),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('summary', models.TextField(help_text='Enter a breif descripiton of the book', max_length=10000)),
('isbn', models.CharField(help_text='13 character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>', max_length=13, verbose_name='ISBN')),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Author')),
],
),
migrations.CreateModel(
name='BookInstance',
fields=[
('id', models.UUIDField(default=uuid.uuid4, help_text='Unique ID for this paticular book accross whole library', primary_key=True, serialize=False)),
('imprint', models.CharField(max_length=200)),
('due_back', models.DateField(blank=True, null=True)),
('status', models.CharField(blank=True, choices=[('m', 'Maintenance'), ('o', 'On loan'), ('a', 'Available'), ('r', 'Reserved')], default='m', help_text='Book Availability', max_length=1)),
('book', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Book')),
],
options={
'ordering': ['due_back'],
},
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter a book genre(eg Science Fiction, French Poetry etc)', max_length=200)),
],
),
migrations.AddField(
model_name='book',
name='genre',
field=models.ManyToManyField(help_text='Select a genre for this book', to='catalog.Genre'),
),
]
| [
"fainaszar@gmail.com"
] | fainaszar@gmail.com |
b66f6fc5300779c6da72a45041d8f78a306152a0 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /2nx4JCytABfczdYGt_16.py | 65994004d9fdac0a92d175f39874b8cfce3ba52e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | """
In this challenge, you must build a function that inflects an infinitive
regular Italian verb of the first conjugation form to the present tense,
including the personal subjective pronoun.
All first conjugation Italian verbs share the same suffix: **ARE**. The first
thing to do is separate the verb root from the suffix.
* Root of "programmare" ( _to code_ ) = "programm".
* Root of "giocare" ( _to play_ ) = "gioc".
For each subjective pronoun the root is combined with a new suffix: see table
below (pronouns are numbered for coding ease, in real grammar they are grouped
in singular and plural, both from first to third):
#| Pronoun| Suffix
---|---|---
1| Io ( _I_ )| o
2| Tu ( _You_ )| i
3| Egli ( _He_ )| a
4| Noi ( _We_ )| iamo
5| Voi ( _You_ )| ate
6| Essi ( _They_ )| ano
* Present tense of verb "parlare" ( _to speak_ ) for third pronoun:
* Pronoun ("Egli") + Root ("parl") + Suffix ("a") = "Egli parla".
* Present tense of verb "lavorare" ( _to work_ ) for fourth pronoun:
* Pronoun ("Noi") + Root ("lavor") + Suffix ("iamo") = "Noi lavoriamo".
There are two exceptions for present tense inflection:
* If root ends with " **c** " or " **g** " the second and fourth pronoun suffixes add a " **h** " at the start:
* "Attaccare" ( _to attack_ ) = "Tu attacchi" (instead of _"Tu attacci"_ )
* "Legare" ( _to tie_ ) = "Noi leghiamo" (instead of _"Noi legiamo"_ )
* If root ends with " **i** " the second and fourth pronoun suffixes lose the starting " **i** " (so that second pronoun suffix disappears):
* "Inviare" ( _to send_ ) = "Noi inviamo" (instead of _"Noi inviiamo"_ )
* "Tagliare" ( _to cut_ ) = "Tu tagli" (instead of _"Tu taglii"_ )
* "Mangiare" ( _to eat_ ) = "Noi mangiamo" (instead of _"Noi mangiiamo"_ )
* "Cacciare" ( _to hunt_ ) = "Tu cacci" (instead of _"Tu caccii"_ )
Given a string `verb` being the infinitive form of the first conjugation
Italian regular verb, and an integer `pronoun` being the subjective personal
pronoun, implement a function that returns the inflected form as a string.
### Examples
conjugate("programmare", 5) ➞ "Voi programmate"
conjugate("iniziare", 2) ➞ "Tu inizi"
conjugate("mancare", 4) ➞ "Noi manchiamo"
### Notes
* In the returned string, pronouns must be capitalized and verbs must be in lowercase, separated by a space between them.
* Curious fact: first conjugation (verbs ending in "are") is also called "the living conjugation", because every new verb that enters the Italian dictionary is assigned to this category as a new regular verb; it often happens for verbs "borrowed" from English and for informatical neologisms: _chattare_ , _twittare_ , _postare_ , _spammare_... will _edabittare_ be the next?
"""
def conjugate(verb, pronoun):
d = {1:['Io', 'o'],
2:['Tu', 'i'],
3:['Egli', 'a'],
4:['Noi', 'iamo'],
5:['Voi', 'ate'],
6:['Essi', 'ano']}
root = verb[:-3]
pro, suff = d[pronoun]
if root[-1] in ('c', 'g') and pronoun in (2, 4):
root = root + 'h'
if root[-1] == 'i' and pronoun in (2, 4):
suff = suff[1:]
return pro + ' ' + root + suff
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
b9c29aff989b8cc73cf841b5c389bf6883295914 | f4335e8e7d3010506f570167bbba18156d3a4674 | /stubs/django/conf/locale/ko/formats.pyi | 581d846279c7333b7f12804a4726ab9dc0515996 | [] | no_license | rtpg/typehangar | 133686ea45ad6187b768290aeebda9cbcae25586 | 790d057497c4791a38f9e3e009b07935b4a12f45 | refs/heads/master | 2021-01-19T04:49:17.940793 | 2017-01-16T13:54:14 | 2017-01-16T13:54:14 | 69,260,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | pyi | # Stubs for django.conf.locale.ko.formats (Python 3.5)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any
DATE_FORMAT = ... # type: str
TIME_FORMAT = ... # type: str
DATETIME_FORMAT = ... # type: str
YEAR_MONTH_FORMAT = ... # type: str
MONTH_DAY_FORMAT = ... # type: str
SHORT_DATE_FORMAT = ... # type: str
SHORT_DATETIME_FORMAT = ... # type: str
DATE_INPUT_FORMATS = ... # type: Any
TIME_INPUT_FORMATS = ... # type: Any
DATETIME_INPUT_FORMATS = ... # type: Any
DECIMAL_SEPARATOR = ... # type: str
THOUSAND_SEPARATOR = ... # type: str
NUMBER_GROUPING = ... # type: int
| [
"raphael@rtpg.co"
] | raphael@rtpg.co |
b2cf969038c12fc06c64cc60d9d81294d425db03 | c68d238ac786a42c4dd47d4ab5820709aa4dcdb3 | /ExFin/users/migrations/0002_auto_20180326_0034.py | e344fcc2077b49f263dd413e0ee55312c480dc74 | [] | no_license | tenebranum/ExFin | b78d2a9651d5b9e8fb0fae3adccc48f7897221d2 | 7ac7b7a0be00537a6a600721009f4a28eb90c3ab | refs/heads/master | 2022-12-14T21:17:02.334600 | 2022-09-21T10:33:27 | 2022-09-21T10:33:27 | 139,338,729 | 0 | 0 | null | 2022-12-08T00:59:15 | 2018-07-01T15:07:52 | Python | UTF-8 | Python | false | false | 456 | py | # Generated by Django 2.0.2 on 2018-03-25 21:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='profile',
options={'verbose_name': 'Дополнительная информация', 'verbose_name_plural': 'Дополнительная информация'},
),
]
| [
"vetal969696@gmail.com"
] | vetal969696@gmail.com |
1861d39623e7386994c000de1bf394dddee1eeed | 2745f49a3205c0ae14346cb1d4115f0e50a9b52e | /app/users/adapters.py | c7ce2735de6da811ca245051e27d1667cb0100d1 | [] | no_license | caleffa/lomanegra-cursos-ministerio | 0430777f7f23e422c0a3aa48ad41c71b20c18bec | c92cf6d70c2cf9c2a7cfd39e88f852e222d21528 | refs/heads/master | 2023-07-03T06:04:40.293469 | 2021-08-09T23:55:14 | 2021-08-09T23:55:14 | 394,474,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | from typing import Any
from allauth.account.adapter import DefaultAccountAdapter, get_current_site
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
from django.conf import settings
from django.http import HttpRequest
from django.shortcuts import resolve_url
from encuestas.models import Encuesta
class AccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request: HttpRequest):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
def send_confirmation_mail(self, request, emailconfirmation, signup):
# Es una copia del original pero agrego el request al contexto del template
current_site = get_current_site(request)
activate_url = self.get_email_confirmation_url(
request,
emailconfirmation)
ctx = {
"user": emailconfirmation.email_address.user,
"activate_url": activate_url,
"current_site": current_site,
"key": emailconfirmation.key,
"request": request,
}
if signup:
email_template = 'account/email/email_confirmation_signup'
else:
email_template = 'account/email/email_confirmation'
self.send_mail(email_template,
emailconfirmation.email_address.email,
ctx)
def get_login_redirect_url(self, request):
encuestas_pendientes = Encuesta.objects.snoozed(request.user)
if encuestas_pendientes:
return resolve_url('encuestas:encuesta', encuesta=encuestas_pendientes.first().pk)
return super().get_login_redirect_url(request)
class SocialAccountAdapter(DefaultSocialAccountAdapter):
def is_open_for_signup(self, request: HttpRequest, sociallogin: Any):
return getattr(settings, "ACCOUNT_ALLOW_REGISTRATION", True)
| [
"lcaleffa@americavirtualsa.com"
] | lcaleffa@americavirtualsa.com |
bdd2d5e5b6e0af6e8bdedaddca15e291e15aa69b | e1dd6d9dccb822d472b7f4f9e8446dd9202eb5a1 | /sdk/test/test_scheduling_v1beta1_api.py | df9ee2f3b9b235097a92ca1fddfda040c1a0286e | [] | no_license | swiftdiaries/argo_client | 8af73e8df6a28f9ea5f938b5894ab8b7825e4cc2 | b93758a22d890cb33cbd81934042cfc3c12169c7 | refs/heads/master | 2020-05-17T12:11:57.556216 | 2019-07-24T23:23:33 | 2019-07-24T23:23:33 | 183,701,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,251 | py | # coding: utf-8
"""
Argo API Client
Generated python client for the Argo Workflows # noqa: E501
OpenAPI spec version: v1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import argo.sdk
from api.scheduling_v1beta1_api import SchedulingV1beta1Api # noqa: E501
from argo.sdk.rest import ApiException
class TestSchedulingV1beta1Api(unittest.TestCase):
"""SchedulingV1beta1Api unit test stubs"""
def setUp(self):
self.api = api.scheduling_v1beta1_api.SchedulingV1beta1Api() # noqa: E501
def tearDown(self):
pass
def test_create_scheduling_v1beta1_priority_class(self):
"""Test case for create_scheduling_v1beta1_priority_class
"""
pass
def test_delete_scheduling_v1beta1_collection_priority_class(self):
"""Test case for delete_scheduling_v1beta1_collection_priority_class
"""
pass
def test_delete_scheduling_v1beta1_priority_class(self):
"""Test case for delete_scheduling_v1beta1_priority_class
"""
pass
def test_get_scheduling_v1beta1_api_resources(self):
"""Test case for get_scheduling_v1beta1_api_resources
"""
pass
def test_list_scheduling_v1beta1_priority_class(self):
"""Test case for list_scheduling_v1beta1_priority_class
"""
pass
def test_patch_scheduling_v1beta1_priority_class(self):
"""Test case for patch_scheduling_v1beta1_priority_class
"""
pass
def test_read_scheduling_v1beta1_priority_class(self):
"""Test case for read_scheduling_v1beta1_priority_class
"""
pass
def test_replace_scheduling_v1beta1_priority_class(self):
"""Test case for replace_scheduling_v1beta1_priority_class
"""
pass
def test_watch_scheduling_v1beta1_priority_class(self):
"""Test case for watch_scheduling_v1beta1_priority_class
"""
pass
def test_watch_scheduling_v1beta1_priority_class_list(self):
"""Test case for watch_scheduling_v1beta1_priority_class_list
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"adhita94@gmail.com"
] | adhita94@gmail.com |
451ec70484000fda302a338852acf332709ecca6 | 1bad7d2b7fc920ecf2789755ed7f44b039d4134d | /ABC/138/D-1.py | 2543595f30ef39026a28d3c80847bb010a317fa7 | [] | no_license | kanekyo1234/AtCoder_solve | ce95caafd31f7c953c0fc699f0f4897dddd7a159 | e5ea7b080b72a2a2fd3fcb826cd10c4ab2e2720e | refs/heads/master | 2023-04-01T04:01:15.885945 | 2021-04-06T04:03:31 | 2021-04-06T04:03:31 | 266,151,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | from collections import deque
n, q = map(int, input().split())
ab = [list(map(int, input().split())) for i in range(n-1)]
px = [list(map(int, input().split())) for i in range(q)]
ans = [0]*n
adlist = [[] for i in range(n)]
for i in range(q):
ans[px[i][0]-1] += px[i][1]
for i in range(n-1):
x, y = ab[i]
adlist[x-1].append(y)
adlist[y-1].append(x)
# print(adlist)
print(ans)
deq = deque() # まだ見ていない場所をメモするところ
deq.append(1) # 1を見るっていうメモを残す
finish = set()
while deq:
print(deq)
now = deq.popleft() # 見てる場所
finish.add(now)
for i in range(len(adlist[now-1])):
line = adlist[now-1][i]
# print(line)
if line not in finish:
deq.append(line)
ans[line-1] += ans[now-1]
print(*ans)
| [
"kanekyohunter.0314@softbank.ne.jp"
] | kanekyohunter.0314@softbank.ne.jp |
892c213e53ab6f5e683ffc239c9749f4aedcd193 | e6d08b66d50a93137126f24f8a5bc7118fa32375 | /TM1py/Services/ChoreService.py | b3c461073283ef42f817a4957fb9a773aedd3a8a | [
"MIT"
] | permissive | BigFriendly/tm1py | bfb000f8299c335beca494859ed0ec0d2f54ade1 | 03210d672cc3797025b8de80c42037e1e11f369f | refs/heads/master | 2021-02-26T12:38:03.081027 | 2020-02-05T00:52:35 | 2020-02-05T21:26:53 | 245,526,018 | 0 | 0 | MIT | 2020-03-06T22:13:07 | 2020-03-06T22:13:06 | null | UTF-8 | Python | false | false | 7,472 | py | # -*- coding: utf-8 -*-
import functools
import json
from TM1py.Objects import Chore, ChoreTask
from TM1py.Services.ObjectService import ObjectService
def deactivate_activate(func):
""" Higher Order function to handle activation and deactivation of chores before updating them
:param func:
:return:
"""
@functools.wraps(func)
def wrapper(self, chore):
# Get Chore
chore_old = self.get(chore.name)
# Deactivate
if chore_old.active:
self.deactivate(chore.name)
# Do stuff
try:
response = func(self, chore)
except Exception as e:
raise e
# Activate if necessary
finally:
if chore.active:
self.activate(chore.name)
return response
return wrapper
class ChoreService(ObjectService):
""" Service to handle Object Updates for TM1 Chores
"""
def __init__(self, rest):
super().__init__(rest)
def get(self, chore_name):
""" Get a chore from the TM1 Server
:param chore_name:
:return: instance of TM1py.Chore
"""
request = "/api/v1/Chores('{}')?$expand=Tasks($expand=*,Process($select=Name),Chore($select=Name))" \
.format(chore_name)
response = self._rest.GET(request)
return Chore.from_dict(response.json())
def get_all(self):
""" get a List of all Chores
:return: List of TM1py.Chore
"""
request = "/api/v1/Chores?$expand=Tasks($expand=*,Process($select=Name),Chore($select=Name))"
response = self._rest.GET(request)
return [Chore.from_dict(chore_as_dict) for chore_as_dict in response.json()['value']]
def get_all_names(self):
""" get a List of all Chores
:return: List of TM1py.Chore
"""
request = "/api/v1/Chores?$select=Name"
response = self._rest.GET(request)
return [chore['Name'] for chore in response.json()['value']]
def create(self, chore):
""" create chore in TM1
:param chore: instance of TM1py.Chore
:return:
"""
request = "/api/v1/Chores"
response = self._rest.POST(request, chore.body)
if chore.active:
self.activate(chore.name)
return response
def delete(self, chore_name):
""" delete chore in TM1
:param chore_name:
:return: response
"""
request = "/api/v1/Chores('{}')".format(chore_name)
response = self._rest.DELETE(request)
return response
def exists(self, chore_name):
""" Check if Chore exists
:param chore_name:
:return:
"""
request = "/api/v1/Chores('{}')".format(chore_name)
return self._exists(request)
@deactivate_activate
def update(self, chore):
""" update chore on TM1 Server
does not update: DST Sensitivity!
:param chore:
:return: response
"""
# Update StartTime, ExecutionMode, Frequency
request = "/api/v1/Chores('{}')".format(chore.name)
# Remove Tasks from Body. Tasks to be managed individually
chore_dict_without_tasks = chore.body_as_dict
chore_dict_without_tasks.pop("Tasks")
self._rest.PATCH(request, json.dumps(chore_dict_without_tasks))
# Update Tasks individually
task_old_count = self._get_tasks_count(chore.name)
for i, task_new in enumerate(chore.tasks):
if i >= task_old_count:
self._add_task(chore.name, task_new)
else:
task_old = self._get_task(chore.name, i)
if task_new != task_old:
self._update_task(chore.name, task_new)
for j in range(i + 1, task_old_count):
self._delete_task(chore.name, i + 1)
def activate(self, chore_name):
""" activate chore on TM1 Server
:param chore_name:
:return: response
"""
request = "/api/v1/Chores('{}')/tm1.Activate".format(chore_name)
return self._rest.POST(request, '')
def deactivate(self, chore_name):
""" deactivate chore on TM1 Server
:param chore_name:
:return: response
"""
request = "/api/v1/Chores('{}')/tm1.Deactivate".format(chore_name)
return self._rest.POST(request, '')
def set_local_start_time(self, chore_name, date_time):
""" Makes Server crash if chore is activate (10.2.2 FP6) :)
:param chore_name:
:param date_time:
:return:
"""
request = "/api/v1/Chores('{}')/tm1.SetServerLocalStartTime".format(chore_name)
# function for 3 to '03'
fill = lambda t: str(t).zfill(2)
data = {
"StartDate": "{}-{}-{}".format(date_time.year, date_time.month, date_time.day),
"StartTime": "{}:{}:{}".format(fill(date_time.hour), fill(date_time.minute), fill(date_time.second))
}
return self._rest.POST(request, json.dumps(data))
def execute_chore(self, chore_name):
""" Ask TM1 Server to execute a chore
:param chore_name: String, name of the chore to be executed
:return: the response
"""
return self._rest.POST("/api/v1/Chores('" + chore_name + "')/tm1.Execute", '')
def _get_tasks_count(self, chore_name):
""" Query Chore tasks count on TM1 Server
:param chore_name: name of Chore to count tasks
:return: int
"""
request = "/api/v1/Chores('{}')/Tasks/$count".format(chore_name)
response = self._rest.GET(request)
return int(response.text)
def _get_task(self, chore_name, step):
""" Get task from chore
:param chore_name: name of the chore
:param step: integer
:return: instance of TM1py.ChoreTask
"""
request = "/api/v1/Chores('{}')/Tasks({})?$expand=*,Process($select=Name),Chore($select=Name)" \
.format(chore_name, step)
response = self._rest.GET(request)
return ChoreTask.from_dict(response.json())
def _delete_task(self, chore_name, step):
""" Delete task from chore
:param chore_name: name of the chore
:param step: integer
:return: response
"""
request = "/api/v1/Chores('{}')/Tasks({})".format(chore_name, step)
response = self._rest.DELETE(request)
return response
def _add_task(self, chore_name, chore_task):
""" Create Chore task on TM1 Server
:param chore_name: name of Chore to update
:param chore_task: instance of TM1py.ChoreTask
:return: response
"""
chore = self.get(chore_name)
if chore.active:
self.deactivate(chore_name)
try:
request = "/api/v1/Chores('{}')/Tasks".format(chore_name)
response = self._rest.POST(request, chore_task.body)
except Exception as e:
raise e
finally:
if chore.active:
self.activate(chore_name)
return response
def _update_task(self, chore_name, chore_task):
""" update a chore task
:param chore_name: name of the Chore
:param chore_task: instance TM1py.ChoreTask
:return: response
"""
request = "/api/v1/Chores('{}')/Tasks({})".format(chore_name, chore_task.step)
return self._rest.PATCH(request, chore_task.body)
| [
"MariusWirtz2@gmail.com"
] | MariusWirtz2@gmail.com |
bfe22330785926fc4d2c6cf528c9842dbfcbed22 | d21071464bef4f3fd51e554f280418d06975a77e | /leetcode/43. Multiply Strings.py | 747f0a44adaf0dd54d658f7622d6a2399503bed4 | [] | no_license | DeshErBojhaa/sports_programming | ec106dcc24e96231d447cdcac494d76a94868b2d | 96e086d4ee6169c0f83fff3819f38f32b8f17c98 | refs/heads/master | 2021-06-13T19:43:40.782021 | 2021-03-27T14:21:49 | 2021-03-27T14:21:49 | 164,201,394 | 1 | 0 | null | 2019-08-27T22:21:26 | 2019-01-05T09:39:41 | C++ | UTF-8 | Python | false | false | 1,373 | py | # 43. Multiply Strings
class Solution:
def multiply(self, num1: str, num2: str) -> str:
if num1 == '0' or num2 == '0':
return '0'
def str_sum(a, b):
if len(a) < len(b):
a, b = b, a
ans, carry = [], 0
b = '0' * (len(a) - len(b)) + b
for x, y in zip(reversed(a), reversed(b)):
add = int(x) + int(y) + carry
ans.append(add % 10)
carry = int(add > 9)
if carry:
ans.append(1)
return ''.join(reversed([str(x) for x in ans]))
if len(num1) < len(num2):
num1, num2 = num2, num1
num1, num2 = num1[::-1], num2[::-1]
ans = '0'
carry = 0
for i in range(len(num2)):
x = int(num2[i])
carry, tmp_ans = 0, []
for j in range(len(num1)):
sm = x * int(num1[j]) + carry
tmp_ans.append(sm%10)
carry = sm // 10
if carry:
tmp_ans.append(carry)
tmp_ans = tmp_ans[::-1]
for j in range(i):
tmp_ans.append(0)
ans = str_sum(ans, ''.join(map(str,tmp_ans)))
return ans
| [
"noreply@github.com"
] | DeshErBojhaa.noreply@github.com |
e855c443e9701c74c9c931a05f911ad23be542d4 | 182d36353a6e33dc1f27f2dc7c0ae95577941dca | /python大数据分析基础及实战/pandas_data_clean.py | 319213a7a6809af5f975e40bdc4f0d7f12be8953 | [] | no_license | tp-yan/PythonScript | d0da587162b1f621ed6852be758705690a6c9dce | 497c933217019046aca0d4258b174a13965348a7 | refs/heads/master | 2020-09-02T02:49:20.305732 | 2019-12-01T06:54:19 | 2019-12-01T06:54:19 | 219,115,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,629 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 7 13:27:15 2019
pandas数据处理: 1.数据清洗
处理缺失数据以及清除无意义的信息,如删除无关数据、重复数据,平滑噪声数据,处理缺失、异常值等
@author: tangpeng
"""
from pandas import Series,DataFrame, read_excel
data_source_path = r'C:\Users\tangpeng\Documents\my_data_source\big_data_source'
print("================数据清洗================")
# (1)处理重复数据
df = DataFrame({
'age':Series([26,34,27,34,88,21,27]),
'name':Series(['Tom','Lee','Jon','Lee','James','Curry','Curry'])
})
print(df,'\n')
print(df.duplicated()) # 默认检查所有列(即所有列的值都相同才算是重复行),将后面重复的行标记为True(即第一次出现的行不计为重复行),返回Series
print('\n')
# subset:只检查部分列的重复值
print(df.duplicated(subset='name')) # 只检查name这列,只要这列的值相同就被视为重复行,不管其他列的值
# keep=False:所有重复行都标记为True,包括第一行。keep='first'(默认)/'last':除了第一/最后一行外其他行都标记为True
print(df.duplicated(subset='age',keep=False)) # 只检查name这列,只要这列的值相同就被视为重复行,不管其他列的值
# 删除重复行,只保留一行
print(df.drop_duplicates())
print(df.drop_duplicates(['name'])) # 只检查 name 列
# (2)处理缺失值
# ①识别缺失数据
# Pandas使用NaN表示浮点和非浮点数组里的缺失数据,使用.isnull() .notnull():判断是否缺失
filename = r'\rz.xlsx'
df = read_excel(data_source_path+filename,sheet_name='Sheet2')
print(df)
print(df.isnull())
print(df.notnull())
# ②处理缺失数据
# 处理方式:数据补齐、删除对应行、不处理
# 1.删除对应行:dropna
newDf = df.dropna() # 删除包含NaN的行
print(newDf)
print(len(newDf)) # 返回行数
print(newDf.columns) # 含列名的Index
newDf = df.dropna(how='all') # 只有当所有列全为空时,该行才删除
print(newDf)
print(df.dropna(axis=1)) # 按列丢弃
print(df.dropna(how='all',axis=1)) # 按列丢弃
# 2.数据补齐:fillna
print(df.fillna('?'))
df.at[0,'数分'] = None
print(df.fillna(method='pad')) # 使用该列的前一个值填充,若该行没有前一行,则仍然为NaN
print(df.fillna(method='bfill')) # 使用该列的后一个值填充,若该行没有后一行,则仍然为NaN
# 使用平均值或其他统计量代替NaN
print(df.fillna(df.mean())) # 使用该列的平均数替代
print(df.fillna(df.mean()['高代':'解几'])) # 用其他列('解几')均值替代指定列('高代')的NaN
# 不同列填充不同值
print(df.fillna({'数分':100,'高代':0})) # 没有列出的列不变
# strip()、lstrip()、rstrip():清除字符型数据首尾指定的字符(默认空白符)
df2 = DataFrame({
'age':Series([26,34,27,34,88,21,27]),
'name':Series([' Tom','Lee ',' Jon',' Lee','James ','Curry ',' Curryy'])
})
print(df2['name'])
print(type(df2['name'])) # <class 'pandas.core.series.Series'>
print(type(df2['name'][0])) # <class 'str'>
print('+++++++++++++++++++++')
print(df2['name'].str) # Series的属性,StringMethods类的实例,str:包含了很多处理字符类型的函数
print(type(df2['name'].str)) # <class 'pandas.core.strings.StringMethods'>
print('+++++++++++++++++++++')
print(df2['name'].str.strip())
print(df2['name'].str.lstrip('L')) # 去除左边L开头的字符
print(df2['name'].str.rstrip('y')) # 去除右边y结尾的字符
'''
2.数据抽取
'''
| [
"tp1084165470@gmail.com"
] | tp1084165470@gmail.com |
3af0aa51c68aff6d586fb8fffd88f501e710c456 | 8e69eee9b474587925e22413717eb82e4b024360 | /v1.0.0.test/toontown/parties/InviteVisual.py | 4634f5f246c01da12300794e50bed844fd0c9bac | [
"MIT"
] | permissive | TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 6,527 | py | from datetime import datetime
import calendar
from direct.gui.DirectGui import DirectFrame, DirectLabel
from toontown.toonbase import TTLocalizer
from direct.showbase import PythonUtil
from direct.fsm.FSM import FSM
from toontown.parties import PartyGlobals
from toontown.parties import PartyUtils
from toontown.toonbase.ToontownGlobals import VALENTINES_DAY
class InviteVisual(DirectFrame):
notify = directNotify.newCategory('InviteVisual')
def __init__(self, parent):
DirectFrame.__init__(self, parent=parent)
self.gui = loader.loadModel('phase_5.5/models/parties/partyInviteGUI')
self.inviteThemesIdToInfo = {PartyGlobals.InviteTheme.Birthday: (self.gui.find('**/birthdayPage'), TTLocalizer.PartyPlannerBirthdayTheme,
(0.0, 0.0, 0.0, 1.0)),
PartyGlobals.InviteTheme.GenericMale: (
self.gui.find('**/genericMalePage'), TTLocalizer.PartyPlannerGenericMaleTheme,
(0.7, 0.7, 0.0, 1.0)),
PartyGlobals.InviteTheme.GenericFemale: (
self.gui.find('**/genericFemalePage'), TTLocalizer.PartyPlannerGenericFemaleTheme,
(0.0, 1.0, 0.5, 1.0)),
PartyGlobals.InviteTheme.Racing: (
self.gui.find('**/racingPage'), TTLocalizer.PartyPlannerRacingTheme,
(0.0, 0.0, 0.0, 1.0)),
PartyGlobals.InviteTheme.Valentoons: (
self.gui.find('**/valentinePage1'), TTLocalizer.PartyPlannerValentoonsTheme,
(0.0, 0.0, 0.0, 1.0)),
PartyGlobals.InviteTheme.VictoryParty: (
self.gui.find('**/victoryPartyPage'), TTLocalizer.PartyPlannerVictoryPartyTheme,
(0.0, 0.0, 0.0, 1.0)),
PartyGlobals.InviteTheme.Winter: (
self.gui.find('**/winterPartyPage1'), TTLocalizer.PartyPlannerWinterPartyTheme,
(1.0, 1.0, 1.0, 1.0))}
self.inviteThemeBackground = DirectFrame(parent=self, image=self.inviteThemesIdToInfo[0][0], relief=None)
self.whosePartyLabel = DirectLabel(parent=self, relief=None, pos=self.gui.find('**/who_locator').getPos(), text='.', text_scale=0.067, textMayChange=True)
self.activityTextLabel = DirectLabel(parent=self, relief=None, text='.\n.\n.\n.', pos=self.gui.find('**/what_locator').getPos(), text_scale=TTLocalizer.IVactivityTextLabel, textMayChange=True)
self.whenTextLabel = DirectLabel(parent=self, relief=None, text='.\n.\n.', pos=self.gui.find('**/when_locator').getPos(), text_scale=TTLocalizer.IVwhenTextLabel, textMayChange=True)
self.noFriends = False
return
def setNoFriends(self, noFriends):
self.noFriends = noFriends
self.inviteThemeBackground.show()
def updateInvitation(self, hostsName, partyInfo):
self.partyInfo = partyInfo
hostsName = TTLocalizer.GetPossesive(hostsName)
self.whosePartyLabel['text'] = TTLocalizer.PartyPlannerInvitationWhoseSentence % hostsName
if self.partyInfo.isPrivate:
publicPrivateText = TTLocalizer.PartyPlannerPrivate.lower()
else:
publicPrivateText = TTLocalizer.PartyPlannerPublic.lower()
activities = self.getActivitiesFormattedCorrectly()
if self.noFriends:
self.activityTextLabel['text'] = TTLocalizer.PartyPlannerInvitationThemeWhatSentenceNoFriends % (publicPrivateText, activities)
else:
self.activityTextLabel['text'] = TTLocalizer.PartyPlannerInvitationThemeWhatSentence % (publicPrivateText, activities)
if self.noFriends:
self.whenTextLabel['text'] = TTLocalizer.PartyPlannerInvitationWhenSentenceNoFriends % (PartyUtils.formatDate(self.partyInfo.startTime.year, self.partyInfo.startTime.month, self.partyInfo.startTime.day), PartyUtils.formatTime(self.partyInfo.startTime.hour, self.partyInfo.startTime.minute))
else:
self.whenTextLabel['text'] = TTLocalizer.PartyPlannerInvitationWhenSentence % (PartyUtils.formatDate(self.partyInfo.startTime.year, self.partyInfo.startTime.month, self.partyInfo.startTime.day), PartyUtils.formatTime(self.partyInfo.startTime.hour, self.partyInfo.startTime.minute))
self.changeTheme(partyInfo.inviteTheme)
def getActivitiesFormattedCorrectly(self):
activitiesString = ''
activityList = []
for activity in self.partyInfo.activityList:
text = TTLocalizer.PartyActivityNameDict[activity.activityId]['invite']
if text not in activityList:
activityList.append(text)
if len(activityList) == 1:
return '\n' + TTLocalizer.PartyPlannerInvitationThemeWhatActivitiesBeginning + activityList[0]
conjunction = TTLocalizer.PartyActivityConjunction
for activity in activityList:
activitiesString = '%s, %s' % (activitiesString, activity)
activitiesString = activitiesString[2:]
activitiesString = activitiesString[:activitiesString.rfind(',')] + conjunction + activitiesString[activitiesString.rfind(',') + 1:]
activitiesString = TTLocalizer.PartyPlannerInvitationThemeWhatActivitiesBeginning + activitiesString
return self.insertCarriageReturn(activitiesString)
def insertCarriageReturn(self, stringLeft, stringDone=''):
desiredNumberOfCharactersInLine = 42
if len(stringLeft) < desiredNumberOfCharactersInLine:
return stringDone + '\n' + stringLeft
for i in xrange(desiredNumberOfCharactersInLine - 6, len(stringLeft)):
if stringLeft[i] == ' ':
return self.insertCarriageReturn(stringLeft[i:], stringDone + '\n' + stringLeft[:i])
return stringDone + '\n' + stringLeft
def changeTheme(self, newTheme):
self.inviteThemeBackground['image'] = self.inviteThemesIdToInfo[newTheme][0]
self.whosePartyLabel['text_fg'] = self.inviteThemesIdToInfo[newTheme][2]
self.activityTextLabel['text_fg'] = self.inviteThemesIdToInfo[newTheme][2]
self.whenTextLabel['text_fg'] = self.inviteThemesIdToInfo[newTheme][2]
def close(self):
self.destroy()
del self | [
"s0mberdemise@protonmail.com"
] | s0mberdemise@protonmail.com |
2ec4319a8b318185cc3a485ae30115f3a6f43c4c | 36add5afc63ec09d63b8a877c29c17391938ee5c | /.history/utils_20201113150643.py | b1ea8873e12bf771558a629a51c7e7e9797d58a3 | [] | no_license | E-STAT/sentiment_api | e84eb04a9f21c7368ca20bdb97436ffea9f65f25 | bd9ee0d78d9eac8b6448b96c2560611a64f7b79d | refs/heads/master | 2023-01-12T13:06:14.654883 | 2020-11-20T11:30:22 | 2020-11-20T11:30:22 | 314,534,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,264 | py | import re
import string
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import TweetTokenizer
def process_tweet(tweet):
"""Process tweet function.
Input:
tweet: a string containing a tweet
Output:
tweets_clean: a list of words containing the processed tweet
"""
stemmer = PorterStemmer()
stopwords_english = stopwords.words('english')
# remove stock market tickers like $GE
tweet = re.sub(r'\$\w*', '', tweet)
# remove old style retweet text "RT"
tweet = re.sub(r'^RT[\s]+', '', tweet)
# remove hyperlinks
tweet = re.sub(r'https?:\/\/.*[\r\n]*', '', tweet)
# remove hashtags
# only removing the hash # sign from the word
tweet = re.sub(r'#', '', tweet)
# tokenize tweets
tokenizer = TweetTokenizer(preserve_case=False, strip_handles=True,
reduce_len=True)
tweet_tokens = tokenizer.tokenize(tweet)
tweets_clean = []
for word in tweet_tokens:
if (word not in stopwords_english and # remove stopwords
word not in string.punctuation): # remove punctuation
# tweets_clean.append(word)
stem_word = stemmer.stem(word) # stemming word
tweets_clean.append(stem_word)
return tweets_clean
def build_freqs(tweets, ys):
"""Build frequencies.
Input:
tweets: a list of tweets
ys: an m x 1 array with the sentiment label of each tweet
(either 0 or 1)
Output:
freqs: a dictionary mapping each (word, sentiment) pair to its
frequency
"""
# Convert np array to list since zip needs an iterable.
# The squeeze is necessary or the list ends up with one element.
# Also note that this is just a NOP if ys is already a list.
yslist = np.squeeze(ys).tolist()
# Start with an empty dictionary and populate it by looping over all tweets
# and over all processed words in each tweet.
freqs = {}
for y, tweet in zip(yslist, tweets):
for word in process_tweet(tweet):
pair = (word, y)
if pair in freqs:
freqs[pair] += 1
else:
freqs[pair] = 1
return freqs
| [
"owojori.tolulope@gmail.com"
] | owojori.tolulope@gmail.com |
a4899302ed7e52ae6969f56638557bd17b85fe82 | 0c1cf007f9d5d00ceefaf7be57e3f81c1c49fb11 | /lightning_asr/model/convolution.py | a50fc9319b9718886add9479c46efc446f3e0523 | [
"MIT"
] | permissive | sooftware/lightning-asr | f345f34dce132a6ccdb393b74c1f9bf0e1ccaac8 | 3b4d8222fad15c90a8c9b44ecacd67f309b34124 | refs/heads/main | 2023-04-30T17:46:21.737471 | 2021-05-19T11:56:33 | 2021-05-19T11:56:33 | 357,467,261 | 16 | 5 | MIT | 2021-05-12T14:22:05 | 2021-04-13T07:46:44 | Python | UTF-8 | Python | false | false | 7,518 | py | # MIT License
#
# Copyright (c) 2021 Soohwan Kim
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
from torch import Tensor
from typing import Tuple
from lightning_asr.model.activation import Swish, GLU
from lightning_asr.model.modules import LayerNorm, Transpose
class DepthwiseConv1d(nn.Module):
"""
When groups == in_channels and out_channels == K * in_channels, where K is a positive integer,
this operation is termed in literature as depthwise convolution.
Args:
in_channels (int): Number of channels in the input
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
bias (bool, optional): If True, adds a learnable bias to the output. Default: True
Inputs: inputs
- **inputs** (batch, in_channels, time): Tensor containing input vector
Returns: outputs
- **outputs** (batch, out_channels, time): Tensor produces by depthwise 1-D convolution.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
bias: bool = False,
) -> None:
super(DepthwiseConv1d, self).__init__()
assert out_channels % in_channels == 0, "out_channels should be constant multiple of in_channels"
self.conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
groups=in_channels,
stride=stride,
padding=padding,
bias=bias,
)
def forward(self, inputs: Tensor) -> Tensor:
return self.conv(inputs)
class PointwiseConv1d(nn.Module):
"""
When kernel size == 1 conv1d, this operation is termed in literature as pointwise convolution.
This operation often used to match dimensions.
Args:
in_channels (int): Number of channels in the input
out_channels (int): Number of channels produced by the convolution
stride (int, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
bias (bool, optional): If True, adds a learnable bias to the output. Default: True
Inputs: inputs
- **inputs** (batch, in_channels, time): Tensor containing input vector
Returns: outputs
- **outputs** (batch, out_channels, time): Tensor produces by pointwise 1-D convolution.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
stride: int = 1,
padding: int = 0,
bias: bool = True,
) -> None:
super(PointwiseConv1d, self).__init__()
self.conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
bias=bias,
)
def forward(self, inputs: Tensor) -> Tensor:
return self.conv(inputs)
class ConformerConvModule(nn.Module):
"""
Conformer convolution module starts with a pointwise convolution and a gated linear unit (GLU).
This is followed by a single 1-D depthwise convolution layer. Batchnorm is deployed just after the convolution
to aid training deep models.
Args:
in_channels (int): Number of channels in the input
kernel_size (int or tuple, optional): Size of the convolving kernel Default: 31
dropout_p (float, optional): probability of dropout
Inputs: inputs
inputs (batch, time, dim): Tensor contains input sequences
Outputs: outputs
outputs (batch, time, dim): Tensor produces by model convolution module.
"""
def __init__(
self,
in_channels: int,
kernel_size: int = 31,
expansion_factor: int = 2,
dropout_p: float = 0.1,
) -> None:
super(ConformerConvModule, self).__init__()
assert (kernel_size - 1) % 2 == 0, "kernel_size should be a odd number for 'SAME' padding"
assert expansion_factor == 2, "Currently, Only Supports expansion_factor 2"
self.sequential = nn.Sequential(
LayerNorm(in_channels),
Transpose(shape=(1, 2)),
PointwiseConv1d(in_channels, in_channels * expansion_factor, stride=1, padding=0, bias=True),
GLU(dim=1),
DepthwiseConv1d(in_channels, in_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2),
nn.BatchNorm1d(in_channels),
Swish(),
PointwiseConv1d(in_channels, in_channels, stride=1, padding=0, bias=True),
nn.Dropout(p=dropout_p),
)
def forward(self, inputs: Tensor) -> Tensor:
return self.sequential(inputs).transpose(1, 2)
class Conv2dSubampling(nn.Module):
"""
Convolutional 2D subsampling (to 1/4 length)
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
Inputs: inputs
- **inputs** (batch, time, dim): Tensor containing sequence of inputs
Returns: outputs, output_lengths
- **outputs** (batch, time, dim): Tensor produced by the convolution
- **output_lengths** (batch): list of sequence output lengths
"""
def __init__(self, in_channels: int, out_channels: int) -> None:
super(Conv2dSubampling, self).__init__()
self.sequential = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=2),
nn.ReLU(),
)
def forward(self, inputs: Tensor, input_lengths: Tensor) -> Tuple[Tensor, Tensor]:
outputs = self.sequential(inputs.unsqueeze(1))
batch_size, channels, subsampled_lengths, sumsampled_dim = outputs.size()
outputs = outputs.transpose(1, 2)
outputs = outputs.contiguous().view(batch_size, subsampled_lengths, channels * sumsampled_dim)
output_lengths = input_lengths >> 2
output_lengths -= 1
return outputs, output_lengths
| [
"sooftware@Soohwanui-MacBookPro.local"
] | sooftware@Soohwanui-MacBookPro.local |
70e6484e664647d51041b07444f98e59bc804062 | 4e44c4bbe274b0a8ccca274f29c4140dfad16d5e | /Push2_MIDI_Scripts/decompiled 10.1.2b5 scripts/Push2/master_track.py | 6945fed63946ff4e9f53dc50c482a386c8650f83 | [] | no_license | intergalacticfm/Push2_MIDI_Scripts | b48841e46b7a322f2673259d1b4131d2216f7db6 | a074e2337b2e5d2e5d2128777dd1424f35580ae1 | refs/heads/master | 2021-06-24T15:54:28.660376 | 2020-10-27T11:53:57 | 2020-10-27T11:53:57 | 137,673,221 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | # uncompyle6 version 3.0.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.13 (default, Jan 19 2017, 14:48:08)
# [GCC 6.3.0 20170118]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\Push2\master_track.py
# Compiled at: 2018-11-27 11:59:27
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import listens
from ableton.v2.control_surface import Component
from ableton.v2.control_surface.control import ToggleButtonControl
class MasterTrackComponent(Component):
toggle_button = ToggleButtonControl()
def __init__(self, tracks_provider=None, *a, **k):
assert tracks_provider is not None
super(MasterTrackComponent, self).__init__(*a, **k)
self._tracks_provider = tracks_provider
self.__on_selected_item_changed.subject = self._tracks_provider
self._previous_selection = self._tracks_provider.selected_item
self._update_button_state()
return
@listens('selected_item')
def __on_selected_item_changed(self, *a):
self._update_button_state()
if not self._is_on_master():
self._previous_selection = self._tracks_provider.selected_item
def _update_button_state(self):
self.toggle_button.is_toggled = self._is_on_master()
@toggle_button.toggled
def toggle_button(self, toggled, button):
if toggled:
self._previous_selection = self._tracks_provider.selected_item
self._tracks_provider.selected_item = self.song.master_track
else:
self._tracks_provider.selected_item = self._previous_selection
self._update_button_state()
def _is_on_master(self):
return self._tracks_provider.selected_item == self.song.master_track | [
"ratsnake.cbs@gmail.com"
] | ratsnake.cbs@gmail.com |
7cfef3ad9a45a8220295e0ff7f9630081978c9af | a8d8d9343b9cccd03245946cce2b07d247177e63 | /Jupyter/work/bitbank/modules/scheduler/scheduler.py | e98d5ef7b44c584145c84a724211d9fed23c294e | [] | no_license | yamaguchi-milkcocholate/milkcocholate | 27dad24c6636e98948199dbfac0d5b39d6807529 | c8b013344472459b386890cacf4a39b39e9bb5a7 | refs/heads/master | 2020-03-28T16:04:45.734261 | 2019-04-06T04:52:15 | 2019-04-06T04:52:15 | 148,657,236 | 0 | 1 | null | 2019-04-06T04:52:16 | 2018-09-13T15:17:46 | Python | UTF-8 | Python | false | false | 1,692 | py | import sched
import datetime
import time
class Scheduler:
def __init__(self, runner, start, end, second):
"""
:param runner: object
:param start: tuple
:param end: tuple
:param second:
"""
self.runner = runner
self.start = datetime.datetime(start[0], start[1], start[2], start[3], start[4], start[5])
self.end = datetime.datetime(end[0], end[1], end[2], end[3], end[4], end[5])
self.second = datetime.datetime(second[0], second[1], second[2], second[3], second[4], second[5])
self.scheduler = sched.scheduler(time.time, time.sleep)
def __call__(self):
"""
スケジューラ実行
:return: Runnerクラス(定期実行で実際に実行するprocessingメソッドをもつクラスのインスタンス)
"""
self.schedule()
print('end of schedule')
return self.runner
def processing(self, *args):
"""
定期実行で実際に実行する処理
:param args:
:return:
"""
self.runner.processing()
def schedule(self):
"""
スケジュールを設定
:return:
"""
print('start ', self.start)
print('second', self.second)
print('end ', self.end)
print()
time_i = int(time.mktime(self.start.timetuple()))
span = int(time.mktime(self.second.timetuple()) - time_i)
while time_i <= int(time.mktime(self.end.timetuple())):
self.scheduler.enterabs(time_i, 1, self.processing, argument=(datetime.datetime.fromtimestamp(time_i),))
time_i += span
self.scheduler.run()
| [
"zuuuubo.tetsu@outlook.jp"
] | zuuuubo.tetsu@outlook.jp |
35ab6be71b35fa4128942fbd689562ea1203dcb3 | dd2147a468dea361d0cc86eef516106771b3f486 | /FlatTreeProducer/test/crabConfig_TT_DYJetsToLL_M-10to50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8.py | 5355f6e5c5c6d01c7b08f6eb30eeda16385949e9 | [] | no_license | cirkovic/FlatTree | 2fe264d6d91ace3e09e0d9c648e7f2f61ad6150a | 6103cfc07a3fcf9fd3c8720e24b15b55e109af36 | refs/heads/master | 2020-07-30T02:05:29.234034 | 2016-12-07T09:34:52 | 2016-12-07T09:34:52 | 73,637,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
#config.General.requestName = 'FCNC_MC_analysis_TTbar_Hct_1'
config.General.workArea = 'crab_projects'
#config.General.transferOutputs = True
#config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'runFlatTreeMINIAOD_cfg.py'
config.JobType.inputFiles = ['conf.xml']
config.Data.inputDataset = '/DYJetsToLL_M-10to50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/RunIIFall15MiniAODv1-PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/MINIAODSIM'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
#config.Data.totalUnits = 100
#config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
#config.Data.publication = True
#config.Data.outputDatasetTag = 'CRAB3_tutorial_May2015_MC_analysis'
#config.Site.storageSite = 'T2_US_Nebraska'
config.Site.storageSite = 'T2_HU_Budapest'
| [
"predrag.cirkovic@cern.ch"
] | predrag.cirkovic@cern.ch |
579b1c0adfccd115f17b6c8ca30c0a740f1f152c | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/communication/azure-communication-networktraversal/samples/network_traversal_samples.py | e0a658cbd18786cc9af061a881171e1587388e80 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 3,611 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: network_traversal_samples.py
DESCRIPTION:
These samples demonstrate creating a user, issuing a token, revoking a token and deleting a user.
USAGE:
python network_traversal_samples.py
Set the environment variables with your own values before running the sample:
1) COMMUNICATION_SAMPLES_CONNECTION_STRING - the connection string in your ACS resource
2) AZURE_CLIENT_ID - the client ID of your active directory application
3) AZURE_CLIENT_SECRET - the secret of your active directory application
4) AZURE_TENANT_ID - the tenant ID of your active directory application
"""
import os
from azure.communication.networktraversal._shared.utils import parse_connection_str
class CommunicationRelayClientSamples(object):
def __init__(self):
self.connection_string = os.getenv('COMMUNICATION_SAMPLES_CONNECTION_STRING')
self.client_id = os.getenv('AZURE_CLIENT_ID')
self.client_secret = os.getenv('AZURE_CLIENT_SECRET')
self.tenant_id = os.getenv('AZURE_TENANT_ID')
def get_relay_config(self):
from azure.communication.networktraversal import (
CommunicationRelayClient
)
from azure.communication.identity import (
CommunicationIdentityClient
)
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
endpoint, _ = parse_connection_str(self.connection_string)
identity_client = CommunicationIdentityClient(endpoint, DefaultAzureCredential())
relay_client = CommunicationRelayClient(endpoint, DefaultAzureCredential())
else:
identity_client = CommunicationIdentityClient.from_connection_string(self.connection_string)
relay_client = CommunicationRelayClient.from_connection_string(self.connection_string)
print("Creating new user")
user = identity_client.create_user()
print("User created with id:" + user.properties.get('id'))
print("Getting relay configuration")
relay_configuration = relay_client.get_relay_configuration(user)
for iceServer in relay_configuration.ice_servers:
print("Icer server:")
print(iceServer)
def get_relay_config_no_identity(self):
from azure.communication.networktraversal import (
CommunicationRelayClient
)
if self.client_id is not None and self.client_secret is not None and self.tenant_id is not None:
from azure.identity import DefaultAzureCredential
endpoint, _ = parse_connection_str(self.connection_string)
relay_client = CommunicationRelayClient(endpoint, DefaultAzureCredential())
else:
relay_client = CommunicationRelayClient.from_connection_string(self.connection_string)
print("Getting relay configuration")
relay_configuration = relay_client.get_relay_configuration()
for iceServer in relay_configuration.ice_servers:
print("Icer server:")
print(iceServer)
if __name__ == '__main__':
sample = CommunicationRelayClientSamples()
sample.get_relay_config()
sample.get_relay_config_no_identity()
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
ea82efb595ff46fca54727748c1b999323c90b93 | a07fd8aca2d69ade2e388054dd2c1c9991232185 | /tests/test_tutorial/test_extra_models/test_tutorial005_py39.py | 7278e93c36ae40070c1e1c9c204a6b9fe699ffdc | [
"MIT"
] | permissive | vitalik/fastapi | 76b71bbbade19f12484c73dcbdca426197cc2db6 | 0276f5fd3aafb38dcbb430177a4685aeb58e5c69 | refs/heads/master | 2023-08-01T06:56:06.053824 | 2023-07-25T20:46:02 | 2023-07-25T20:46:02 | 315,668,229 | 1 | 0 | MIT | 2020-11-24T15:07:16 | 2020-11-24T15:07:15 | null | UTF-8 | Python | false | false | 1,668 | py | import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py39
@pytest.fixture(name="client")
def get_client():
from docs_src.extra_models.tutorial005_py39 import app
client = TestClient(app)
return client
@needs_py39
def test_get_items(client: TestClient):
response = client.get("/keyword-weights/")
assert response.status_code == 200, response.text
assert response.json() == {"foo": 2.3, "bar": 3.4}
@needs_py39
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/keyword-weights/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Read Keyword Weights Keyword Weights Get",
"type": "object",
"additionalProperties": {"type": "number"},
}
}
},
}
},
"summary": "Read Keyword Weights",
"operationId": "read_keyword_weights_keyword_weights__get",
}
}
},
}
| [
"noreply@github.com"
] | vitalik.noreply@github.com |
31a29ed36747fc61bbeb4a01851ced2c621d027f | 0d65e96ce358b7a6827734f6a5598f8a7ecf75e8 | /klokah/補充教材句型篇解析.py | cade41c9545c7c4ac6eb047e4cc7e3d86f0b49cd | [] | no_license | Taiwanese-Corpus/klokah_data_extract | 68f26cb8e851a6ea2e05995d02a7e4e01e4481b3 | 25cd44b68075b7650a8ec10c1c38eb16b3ca113d | refs/heads/master | 2021-01-18T12:45:46.420151 | 2015-11-04T13:03:32 | 2015-11-04T13:03:32 | 34,839,122 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,474 | py | from bs4 import BeautifulSoup
from os.path import dirname, join, abspath
class 補充教材句型篇解析:
專案目錄 = join(dirname(abspath(__file__)), '..')
def 解析全部檔案(self):
with open(join(self.專案目錄, '資料', 'dialectView.xml')) as 檔案:
for 方言 in BeautifulSoup(檔案.read(), 'xml').find_all('item'):
語言名 = 方言.find('languageCh').get_text(strip=True)
方言編號 = 方言.find('dialectId').get_text(strip=True)
方言名 = 方言.find('dialectCh').get_text(strip=True)
for 一筆資料 in self.解析一個方言檔案(方言編號):
一筆資料['languageCh'] = 語言名
一筆資料['dialectCh'] = 方言名
yield 一筆資料
def 解析一個方言檔案(self, 方言編號):
for 級 in ['junior', 'senior']:
with open(join(self.專案目錄, '資料', '補充教材', 級, 'classView.xml')) as 檔案:
for 檔案標仔 in BeautifulSoup(檔案.read(), 'xml').find_all('classId'):
for 一筆資料 in self.解析一個句型篇檔案(級, 方言編號, 檔案標仔.get_text(strip=True)):
yield 一筆資料
def 解析一個句型篇檔案(self, 級, 方言編號, 檔案編號):
資料陣列 = []
with open(join(self.專案目錄, '資料', '補充教材', 級, str(方言編號), str(檔案編號) + '.xml')) as 檔案:
for 方言 in BeautifulSoup(檔案.read(), 'xml').find_all('item'):
一筆資料 = {}
for 資料內容 in 方言.find_all(True):
一筆資料[資料內容.name] = 資料內容.get_text(strip=True)
資料陣列.append(self._資料欄位正規化(一筆資料))
return 資料陣列
def _資料欄位正規化(self, 資料):
正規化函式 = {
'1': self._一基本詞彙,
'2': self._二生活百句,
'3': self._三看圖識字,
'4': self._四選擇題一,
'5': self._五選擇題二,
'6': self._六配合題,
'7': self._七選擇題三,
'8': self._八唸唸看,
'9': self._九簡短對話,
'10': self._十看圖說話,
}
正規化函式[資料['typeId']](資料)
return 資料
def _一基本詞彙(self, 資料):
資料['資料'] = [(資料['wordAb'], 資料['wordCh'])]
def _二生活百句(self, 資料):
self._傳欄位名正規化(
[
('sentenceAAb', 'sentenceACh'),
('sentenceBAb', 'sentenceBCh'),
('sentenceCAb', 'sentenceCCh'),
],
資料
)
def _三看圖識字(self, 資料):
資料['資料'] = [(資料['recognizeAb'], 資料['recognizeCh'])]
def _四選擇題一(self, 資料):
self._傳欄位名正規化(
[
('choiceOneAAb', 'choiceOneACh'),
('choiceOneBAb', 'choiceOneBCh'),
('choiceOneCAb', 'choiceOneCCh'),
],
資料
)
def _傳欄位名正規化(self, 欄位對照, 資料):
資料陣列 = []
for 族欄位, 華欄位 in 欄位對照:
if 資料[族欄位]:
資料陣列.append((資料[族欄位], 資料[華欄位]))
資料['資料'] = 資料陣列
def _五選擇題二(self, 資料):
self._傳欄位名正規化(
[
('choiceTwoAAb', 'choiceTwoACh'),
('choiceTwoBAb', 'choiceTwoBCh'),
('choiceTwoCAb', 'choiceTwoCCh'),
],
資料
)
def _六配合題(self, 資料):
self._傳欄位名正規化(
[
('matchAAbA', 'matchAChA'),
('matchAAbB', 'matchAChB'),
('matchBAbA', 'matchBChA'),
('matchBAbB', 'matchBChB'),
('matchCAbA', 'matchCChA'),
('matchCAbB', 'matchCChB'),
('matchDAbA', 'matchDChA'),
('matchDAbB', 'matchDChB'),
('matchEAbA', 'matchEChA'),
('matchEAbB', 'matchEChB'),
],
資料
)
def _七選擇題三(self, 資料):
資料['資料'] = [(資料['choiceThreeAb'], 資料['choiceThreeCh'])]
def _八唸唸看(self, 資料):
self._傳欄位名正規化(
[
('oralReadingAAb', 'oralReadingACh'),
('oralReadingBAb', 'oralReadingBCh'),
('oralReadingCAb', 'oralReadingCCh'),
('oralReadingDAb', 'oralReadingDCh'),
('oralReadingEAb', 'oralReadingECh'),
],
資料
)
def _九簡短對話(self, 資料):
self._傳欄位名正規化(
[
('dialogueAAb', 'dialogueACh'),
('dialogueBAb', 'dialogueBCh'),
('dialogueCAb', 'dialogueCCh'),
('dialogueDAb', 'dialogueDCh'),
('dialogueEAb', 'dialogueECh'),
],
資料
)
def _十看圖說話(self, 資料):
self._傳欄位名正規化(
[
('pictureTalkAb', 'pictureTalkCh'),
],
資料
)
| [
"ihcaoe@gmail.com"
] | ihcaoe@gmail.com |
960b6662c8bb4ab84cb3afa154fccf1b85150481 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/benanne_kaggle-ndsb/kaggle-ndsb-master/configurations/bagging_20_cp8.py | 076d49cce4675181859ea0dde853b7ff7e22974b | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 5,727 | py | import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import dihedral_fast
import tmp_dnn
import tta
validation_split_path = "splits/bagging_split_20.pkl"
patch_sizes = [(95, 95), (95, 95)]
augmentation_params = {
'zoom_range': (1 / 1.6, 1.6),
'rotation_range': (0, 360),
'shear_range': (-20, 20),
'translation_range': (-10, 10),
'do_flip': True,
'allow_stretch': 1.3,
}
batch_size = 128 // 8
chunk_size = 32768 // 8
num_chunks_train = 840
momentum = 0.9
learning_rate_schedule = {
0: 0.003,
700: 0.0003,
800: 0.00003,
}
validate_every = 20
save_every = 20
def tf1(img):
ds_factor = np.maximum(img.shape[0], img.shape[1]) / 85.0
return data.build_rescale_transform(ds_factor, img.shape, patch_sizes[0])
def tf2(img):
tf = tf1(img)
tf_center, tf_uncenter = data.build_center_uncenter_transforms(img.shape)
tf_rot = data.build_augmentation_transform(rotation=45)
tf_rot = tf_uncenter + tf_rot + tf_center
return tf + tf_rot
scale_factors = [tf1, tf2]
augmentation_transforms_test = tta.build_quasirandom_transforms(35, **{
'zoom_range': (1 / 1.4, 1.4),
'rotation_range': (0, 360),
'shear_range': (-10, 10),
'translation_range': (-8, 8),
'do_flip': True,
'allow_stretch': 1.2,
})
data_loader = load.ZmuvMultiscaleDataLoader(scale_factors=scale_factors, num_chunks_train=num_chunks_train,
patch_sizes=patch_sizes, chunk_size=chunk_size, augmentation_params=augmentation_params,
augmentation_transforms_test=augmentation_transforms_test, validation_split_path=validation_split_path)
# Conv2DLayer = nn.layers.cuda_convnet.Conv2DCCLayer
# MaxPool2DLayer = nn.layers.cuda_convnet.MaxPool2DCCLayer
Conv2DLayer = tmp_dnn.Conv2DDNNLayer
MaxPool2DLayer = tmp_dnn.MaxPool2DDNNLayer
def build_model():
l0 = nn.layers.InputLayer((batch_size, 1, patch_sizes[0][0], patch_sizes[0][1]))
l0_45 = nn.layers.InputLayer((batch_size, 1, patch_sizes[1][0], patch_sizes[1][1]))
l0_both = nn.layers.concat([l0, l0_45], axis=0) # stack both
l0c = dihedral.CyclicSliceLayer(l0_both)
l1a = Conv2DLayer(l0c, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l1b = Conv2DLayer(l1a, num_filters=16, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l1 = MaxPool2DLayer(l1b, ds=(3, 3), strides=(2, 2))
l1r = dihedral_fast.CyclicConvRollLayer(l1)
l2a = Conv2DLayer(l1r, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l2b = Conv2DLayer(l2a, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l2 = MaxPool2DLayer(l2b, ds=(3, 3), strides=(2, 2))
l2r = dihedral_fast.CyclicConvRollLayer(l2)
l3a = Conv2DLayer(l2r, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3b = Conv2DLayer(l3a, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3c = Conv2DLayer(l3b, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3 = MaxPool2DLayer(l3c, ds=(3, 3), strides=(2, 2))
l3r = dihedral_fast.CyclicConvRollLayer(l3)
l4a = Conv2DLayer(l3r, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4b = Conv2DLayer(l4a, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4c = Conv2DLayer(l4b, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4 = MaxPool2DLayer(l4c, ds=(3, 3), strides=(2, 2))
l4r = dihedral_fast.CyclicConvRollLayer(l4)
l4f = nn.layers.flatten(l4r)
l5 = nn.layers.DenseLayer(nn.layers.dropout(l4f, p=0.5), num_units=1024, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l5fp = nn.layers.FeaturePoolLayer(l5, ds=2)
l5m = dihedral.DihedralPoolLayer(l5fp, pool_function=nn_plankton.rms) # reusing the dihedral pool layer here for 8-way cyclic pooling. Ew!
l6 = nn.layers.DenseLayer(nn.layers.dropout(l5m, p=0.5), num_units=1024, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu)
l6fp = nn.layers.FeaturePoolLayer(l6, ds=2)
l7 = nn.layers.DenseLayer(nn.layers.dropout(l6fp, p=0.5), num_units=data.num_classes, nonlinearity=T.nnet.softmax, W=nn_plankton.Orthogonal(1.0))
return [l0, l0_45], l7
def build_objective(l_ins, l_out):
lambda_reg = 0.0005
params = nn.layers.get_all_non_bias_params(l_out)
reg_term = sum(T.sum(p**2) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + lambda_reg * reg_term
return nn.objectives.Objective(l_out, loss_function=loss)
| [
"659338505@qq.com"
] | 659338505@qq.com |
3307b14e93f64351ac32c094b1588ce301c3bf9c | f0b549be6b291d98c20efc8a7b6322ae556f0068 | /data_structures/tree/binary_search_tree/binary_search_tree.py | 195990630439a798f46f1de4c5072e9efba16155 | [] | no_license | ehdgua01/Algorithms | 3607871d35521172e5f94c5dccb3b4e9e008fe61 | 107173ddf91f3588f10adbe294b64d680675a9ee | refs/heads/master | 2022-03-16T10:47:34.986441 | 2022-03-03T14:59:19 | 2022-03-03T15:28:44 | 249,157,085 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,148 | py | """
대용량의 데이터에 적합하지 않은 알고리즘이지만,
이진 탐색 트리 자료 구조를 학습하기 위한 알고리즘입니다.
"""
class Node(object):
def __init__(self, value) -> None:
self.left = None
self.right = None
self.parent = None
self.value = value
class BinarySearchTree(object):
def __init__(self) -> None:
self.root = None
def get_min(self, collection: Node, /) -> Node:
if collection.left:
return self.get_min(collection.left)
else:
return collection
def get_max(self, collection: Node, /) -> Node:
if collection.right:
return self.get_max(collection.right)
else:
return collection
def find_index(self, target: Node, /, collection=None):
if self.is_empty or collection is None:
return None
else:
if collection.value < target.value:
if collection.right:
collection = collection.right
else:
return collection
else:
if collection.left:
collection = collection.left
else:
return collection
return self.find_index(target, collection=collection)
def insert(self, node: Node, /) -> None:
if self.is_empty:
self.root = node
else:
index = self.find_index(node, collection=self.root)
node.parent = index
if index.value < node.value:
index.right = node
else:
index.left = node
def search(self, target, /, collection=None):
if self.is_empty or collection is None:
return None
else:
if collection.value == target:
return collection
elif collection.value < target:
return self.search(target, collection=collection.right)
else:
return self.search(target, collection=collection.left)
def remove(self, target, /):
if self.is_empty:
return None
collection = self.search(target, collection=self.root)
if collection is None:
return None
else:
self.__remove(collection, collection.parent)
def __remove(self, collection: Node, parent, /):
temp = None
if collection.right and collection.left:
temp = self.get_min(collection.right)
self.__remove(temp, temp.parent)
temp.left = collection.left
temp.right = collection.right
elif collection.right:
temp = collection.right
elif collection.left:
temp = collection.left
if temp:
temp.parent = parent
if parent:
is_left = parent.left == collection
if is_left:
parent.left = temp
else:
parent.right = temp
else:
self.root = temp
@property
def is_empty(self):
return self.root is None
| [
"ehdgua01@naver.com"
] | ehdgua01@naver.com |
bf650208e6b6746d1222cef1a8020c6fc0507a04 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/exclamations/_mans.py | 9abf726d7a0faffa49e8a16610dd007e86804acd | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.exclamations._man import _MAN
#calss header
class _MANS(_MAN, ):
def __init__(self,):
_MAN.__init__(self)
self.name = "MANS"
self.specie = 'exclamations'
self.basic = "man"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
dae6a8f58e1e7f55370b2c531273fc77c51f3f32 | a62c437ed0beca4bb32cd085c7ba7bad80ce2022 | /urls.py | 1528960e812fc75085bdf164dade44bdc4fba14c | [
"MIT"
] | permissive | Lvxingpai/viae-gateway | d23303324e533bbe85f6209d3ca0eb67c9f5b07f | 5d88c3f0c7d1edd3e42da6bed6b866374ff7977b | refs/heads/master | 2021-01-10T16:58:05.079719 | 2016-01-15T06:23:01 | 2016-01-15T06:23:01 | 49,177,568 | 1 | 0 | null | 2016-01-10T08:50:08 | 2016-01-07T03:09:39 | Python | UTF-8 | Python | false | false | 254 | py | from django.conf.urls import url
from app.views import tasks, pong
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
url(r'ping/?$', pong),
url(r'^tasks/?$', tasks)
]
| [
"haizi.zh@gmail.com"
] | haizi.zh@gmail.com |
d9c5c7a4043db90471483a4129edf0208f509295 | c97a3396b9a574a8b43240a3a9d139be5d8dd204 | /config/setting.py | 2749adf6382f19add77cf0b560943e49549760ff | [] | no_license | cs4224485/ATM | 524f69335b8d0ca3cf910b9af36737370ab23d6c | c6ce9be03b55390f20f2bc763ade3fe8998dec9e | refs/heads/master | 2020-03-27T06:23:08.788653 | 2018-08-26T02:11:14 | 2018-08-26T02:11:14 | 146,101,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | # Author: harry.cai
# DATE: 2018/1/31
import os
import logging
BASEDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
USER_DB_PATH = os.path.join(BASEDIR, 'account', 'userdb')
ADMIN_DB_PATH = os.path.join(BASEDIR, 'account', 'admindb')
LOGGER_DB_PATH = os.path.join(BASEDIR, 'log', 'logdb')
# 日志类型
LogType = {
'access': 'access_log',
'transaction': 'transaction_log'
}
# 日志级别
LogLevel = {
'global': logging.DEBUG,
'console': logging.WARNING,
'file': logging.INFO
}
# 交易类型
TransAction = {
'transfer': {'method': 'plus_reduce', 'interest': 0},
'repay': {'method': 'plus', 'interest': 0},
'withdraw': {'method': 'reduce', 'interest': 0.05},
'consume': {'method': 'reduce', 'interest': 0}
}
| [
"414804000@qq.com"
] | 414804000@qq.com |
dae6836cf32d21b82c2ab6ec8088998e119643f4 | 60ec1bf5342eca3d97629dcdf974f7731d7be12b | /streamblocks/migrations/0002_indexedparagraph_height.py | 99cb5c1e1beb6fc4e73c255c435ba02a862c3105 | [
"BSD-2-Clause"
] | permissive | andywar65/rpnew_base | 8eef1b71562a00889d170b1668faa487a753cb05 | 9281cb16783313a1cd23b1394f2bad485ac1b33d | refs/heads/master | 2020-09-07T15:06:23.205802 | 2020-03-09T17:24:13 | 2020-03-09T17:24:13 | 220,818,439 | 1 | 0 | BSD-2-Clause | 2020-02-16T12:30:04 | 2019-11-10T16:38:52 | Python | UTF-8 | Python | false | false | 468 | py | # Generated by Django 3.0.2 on 2020-02-07 15:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('streamblocks', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='indexedparagraph',
name='height',
field=models.CharField(choices=[('4', 'Medio'), ('5', 'Piccolo'), ('6', 'Molto piccolo')], default='4', max_length=1),
),
]
| [
"andy.war1965@gmail.com"
] | andy.war1965@gmail.com |
90dcd5b53232078c0c9160884ae5f2822bd1bd20 | 5241641cba4a6cf3b87284b72dcc5b6e70504f32 | /events/views.py | 842acbc90dfabe8acfe9851c847e7b8e158243a9 | [] | no_license | sdnnet3/coocooclub | a11505b2559b199164f2d881fa37a65cf9767aac | 5b1708194386048f62aa8222ef619f854758c556 | refs/heads/master | 2020-06-11T15:37:01.437796 | 2019-08-26T05:37:48 | 2019-08-26T05:37:48 | 194,009,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | from django.http import HttpResponse
from django.shortcuts import render
from . models import event
def eventPage(request):
eventList = event.objects.order_by('-date')
context = {'eventList':eventList}
return render(request, 'events/twocolumn1.html', context) | [
"clayton.hutton@gmail.com"
] | clayton.hutton@gmail.com |
54f5da8cad0ea0623c6b009e440ad3adf8dcbe11 | 1577e1cf4e89584a125cffb855ca50a9654c6d55 | /pyobjc/pyobjc/pyobjc-framework-Cocoa-2.5.1/PyObjCTest/test_nspathutilties.py | 081c53662d09d0758abb0d4e48365801798ec651 | [
"MIT"
] | permissive | apple-open-source/macos | a4188b5c2ef113d90281d03cd1b14e5ee52ebffb | 2d2b15f13487673de33297e49f00ef94af743a9a | refs/heads/master | 2023-08-01T11:03:26.870408 | 2023-03-27T00:00:00 | 2023-03-27T00:00:00 | 180,595,052 | 124 | 24 | null | 2022-12-27T14:54:09 | 2019-04-10T14:06:23 | null | UTF-8 | Python | false | false | 3,966 | py | from PyObjCTools.TestSupport import *
from objc import *
from Foundation import *
try:
unicode
except NameError:
unicode = str
class TestNSPathUtilities(TestCase):
def testSearchPaths(self):
self.assert_(
NSSearchPathForDirectoriesInDomains( NSAllLibrariesDirectory, NSAllDomainsMask, NO ),
"NSSearchPathForDirectoriesInDomains() failed to return anything." )
self.assertArgIsBOOL(NSSearchPathForDirectoriesInDomains, 2)
def testTrue(self):
for boolVal in (1, 1==1, YES, -1):
self.assert_(
NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,NSUserDomainMask, boolVal)[0][0] == '/', boolVal)
def testFalse(self):
for boolVal in (0, 1!=1, NO):
self.assert_(
NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,NSUserDomainMask, boolVal)[0][0] != '/', boolVal)
def testFunctions(self):
s = NSUserName()
self.assertIsInstance(s, unicode)
s = NSFullUserName()
self.assertIsInstance(s, unicode)
s = NSHomeDirectory()
self.assertIsInstance(s, unicode)
s = NSHomeDirectoryForUser('root')
self.assertIsInstance(s, unicode)
s = NSTemporaryDirectory()
self.assertIsInstance(s, unicode)
s = NSOpenStepRootDirectory()
self.assertIsInstance(s, unicode)
def testConstants(self):
self.assertEqual(NSApplicationDirectory, 1)
self.assertEqual(NSDemoApplicationDirectory, 2)
self.assertEqual(NSDeveloperApplicationDirectory, 3)
self.assertEqual(NSAdminApplicationDirectory, 4)
self.assertEqual(NSLibraryDirectory, 5)
self.assertEqual(NSDeveloperDirectory, 6)
self.assertEqual(NSUserDirectory, 7)
self.assertEqual(NSDocumentationDirectory, 8)
self.assertEqual(NSDocumentDirectory, 9)
self.assertEqual(NSCoreServiceDirectory, 10)
self.assertEqual(NSDesktopDirectory, 12)
self.assertEqual(NSCachesDirectory, 13)
self.assertEqual(NSApplicationSupportDirectory, 14)
self.assertEqual(NSDownloadsDirectory, 15)
self.assertEqual(NSAllApplicationsDirectory, 100)
self.assertEqual(NSAllLibrariesDirectory, 101)
self.assertEqual(NSUserDomainMask, 1)
self.assertEqual(NSLocalDomainMask, 2)
self.assertEqual(NSNetworkDomainMask, 4)
self.assertEqual(NSSystemDomainMask, 8)
self.assertEqual(NSAllDomainsMask, 0x0ffff)
@min_os_level('10.6')
def testConstants10_6(self):
self.assertEqual(NSAutosavedInformationDirectory, 11)
self.assertEqual(NSInputMethodsDirectory, 16)
self.assertEqual(NSMoviesDirectory, 17)
self.assertEqual(NSMusicDirectory, 18)
self.assertEqual(NSPicturesDirectory, 19)
self.assertEqual(NSPrinterDescriptionDirectory, 20)
self.assertEqual(NSSharedPublicDirectory, 21)
self.assertEqual(NSPreferencePanesDirectory, 22)
self.assertEqual(NSItemReplacementDirectory, 99)
@min_os_level('10.8')
def testConstants10_8(self):
self.assertEqual(NSApplicationScriptsDirectory, 23)
self.assertEqual(NSTrashDirectory, 102)
def testMethods(self):
self.assertResultIsBOOL(NSString.isAbsolutePath)
self.assertArgIsOut(NSString.completePathIntoString_caseSensitive_matchesIntoArray_filterTypes_, 0)
self.assertArgIsBOOL(NSString.completePathIntoString_caseSensitive_matchesIntoArray_filterTypes_, 1)
self.assertArgIsOut(NSString.completePathIntoString_caseSensitive_matchesIntoArray_filterTypes_, 2)
self.assertResultIsBOOL(NSString.getFileSystemRepresentation_maxLength_)
self.assertArgHasType(NSString.getFileSystemRepresentation_maxLength_, 0, b'o^' + objc._C_CHAR_AS_TEXT)
self.assertArgSizeInArg(NSString.getFileSystemRepresentation_maxLength_, 0, 1)
if __name__ == '__main__':
main( )
| [
"opensource@apple.com"
] | opensource@apple.com |
85886a94f7c1a38d4d18359f4ddc35d5a4e21590 | 95368a0ed3e5d50ff3b8a435ecab9e8332772ec0 | /fluent_utils/softdeps/comments.py | fda3da49895771ec2e1e48311a8e0c9e3f9f9262 | [
"Apache-2.0"
] | permissive | seroy/django-fluent-utils | 7ed4a850f5651d12f68b55b4588d1d5f631bc67d | dfd4b65a27830876dd71f9d7a20a51c889a0468b | refs/heads/master | 2021-05-10T10:24:45.711558 | 2017-11-21T10:14:27 | 2017-11-21T10:15:47 | 118,381,508 | 0 | 0 | null | 2018-01-21T23:00:58 | 2018-01-21T23:00:58 | null | UTF-8 | Python | false | false | 7,390 | py | """
Optional integration with django-contrib-comments
This avoids loading django_comments or django.contrib.comments unless it's installed.
All functions even work without having the app installed,
and return stub or dummy values so all code works as expected.
"""
import django
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.db import models
from django.dispatch import Signal
from django.utils.translation import ugettext_lazy as _
from fluent_utils.django_compat import is_installed
__all__ = (
'django_comments', # Main module
'signals', # Signals module
'get_model', # Get the comment model
'get_form', # Get the comment form
'get_public_comments_for_model', # Get publicly visible comments
'get_comments_are_open', # Utility to check if comments are open for a model.
'get_comments_are_moderated', # Utility to check if comments are moderated for a model.
'CommentModel', # Points to the comments model.
'CommentModerator', # Base class for all custom comment moderators
'CommentsRelation', # Generic relation back to the comments.
'CommentsMixin', # Model mixin for comments
'IS_INSTALLED',
)
django_comments = None
moderator = None
CommentModerator = None
get_model = None
IS_INSTALLED = False
if is_installed('django.contrib.comments'):
# Django 1.7 and below
from django.contrib import comments as django_comments
from django.contrib.comments import get_model, get_form, signals
from django.contrib.comments.moderation import moderator, CommentModerator
IS_INSTALLED = True
elif is_installed('django_comments'):
# as of Django 1.8, this is a separate app.
import django_comments
from django_comments import get_model, get_form, signals
from django_comments.moderation import moderator, CommentModerator
IS_INSTALLED = True
else:
def get_model():
return CommentManagerStub
def get_form():
raise NotImplementedError("No stub for comments.get_form() is implemented!")
class SignalsStub(object):
comment_will_be_posted = Signal(providing_args=["comment", "request"])
comment_was_posted = Signal(providing_args=["comment", "request"])
comment_was_flagged = Signal(providing_args=["comment", "flag", "created", "request"])
signals = SignalsStub()
def get_public_comments_for_model(model):
"""
Get visible comments for the model.
"""
if not IS_INSTALLED:
# No local comments, return empty queryset.
# The project might be using DISQUS or Facebook comments instead.
return CommentModelStub.objects.none()
else:
return CommentModel.objects.for_model(model).filter(is_public=True, is_removed=False)
def get_comments_are_open(instance):
"""
Check if comments are open for the instance
"""
if not IS_INSTALLED:
return False
try:
# Get the moderator which is installed for this model.
mod = moderator._registry[instance.__class__]
except KeyError:
# No moderator = no restrictions
return True
# Check the 'enable_field', 'auto_close_field' and 'close_after',
# by reusing the basic Django policies.
return CommentModerator.allow(mod, None, instance, None)
def get_comments_are_moderated(instance):
"""
Check if comments are moderated for the instance
"""
if not IS_INSTALLED:
return False
try:
# Get the moderator which is installed for this model.
mod = moderator._registry[instance.__class__]
except KeyError:
# No moderator = no moderation
return False
# Check the 'auto_moderate_field', 'moderate_after',
# by reusing the basic Django policies.
return CommentModerator.moderate(mod, None, instance, None)
# Can't use EmptyQueryset stub in Django 1.6 anymore,
# using this model to build a queryset instead.
class CommentManagerStub(models.Manager):
# Tell Django that related fields also need to use this manager:
# This makes sure that deleting a User won't cause any SQL queries
# on a non-existend django_comments_stub table.
use_for_related_fields = True
def get_queryset(self):
return super(CommentManagerStub, self).get_queryset().none()
if django.VERSION < (1, 7):
def get_query_set(self):
return super(CommentManagerStub, self).get_query_set().none()
def in_moderation(self):
return self.none()
def for_model(self):
return self.none()
class CommentModelStub(models.Model):
"""
Stub model that :func:`get_model` returns if *django.contrib.comments* is not installed.
"""
class Meta:
managed = False
app_label = 'django_comments'
db_table = "django_comments_stub"
objects = CommentManagerStub()
# add fields so ORM queries won't cause any issues.
content_type = models.ForeignKey(ContentType)
object_pk = models.TextField()
content_object = GenericForeignKey(ct_field="content_type", fk_field="object_pk")
site = models.ForeignKey(Site)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="%(class)s_comments")
user_name = models.CharField(max_length=50, blank=True)
user_email = models.EmailField(blank=True)
user_url = models.URLField(blank=True)
comment = models.TextField(max_length=3000)
submit_date = models.DateTimeField(default=None)
ip_address = models.GenericIPAddressField(unpack_ipv4=True, blank=True, null=True)
is_public = models.BooleanField(default=True)
is_removed = models.BooleanField(default=False)
CommentModel = get_model()
if IS_INSTALLED:
class CommentRelation(GenericRelation):
def __init__(self, to=CommentModel, **kwargs):
kwargs.setdefault('object_id_field', 'object_pk')
super(CommentRelation, self).__init__(to, **kwargs)
else:
class CommentRelation(models.Field):
def __init__(self, *args, **kwargs):
pass
def contribute_to_class(self, cls, name, virtual_only=False):
setattr(cls, name, CommentModelStub.objects.none())
class CommentsMixin(models.Model):
"""
Mixin for adding comments support to a model.
"""
enable_comments = models.BooleanField(_("Enable comments"), default=True)
# Reverse relation to the comments model.
# This is a stub when django.contrib.comments is not installed, so templates don't break.
# This avoids importing django.contrib.comments models when the app is not used.
all_comments = CommentRelation(verbose_name=_("Comments"))
class Meta:
abstract = True
# Properties
comments = property(get_public_comments_for_model, doc="Return the visible comments.")
comments_are_moderated = property(get_comments_are_moderated, doc="Check if comments are moderated")
@property
def comments_are_open(self):
"""
Check if comments are open
"""
if not self.enable_comments:
return False
return get_comments_are_open(self)
| [
"vdboor@edoburu.nl"
] | vdboor@edoburu.nl |
0a1a20b8bc8d9ad824d050a5ba78fdd7a944c3b1 | 8454441f899c3beb9fcea26cffc2f4c3cf75ff6a | /common/code/snippets/parasites/tweetable-polyglot-png-main/pack.py | cd7f50bd6f7027a29ee8897d091d8db24a8d38ad | [
"MIT"
] | permissive | nevesnunes/env | 4a837e8fcf4a6a597992103e0a0c3d0db93e1c78 | f2cd7d884d46275a2fcb206eeeac5a8e176b12af | refs/heads/master | 2023-08-22T15:49:35.897161 | 2023-08-15T13:51:08 | 2023-08-15T13:51:08 | 199,400,869 | 9 | 6 | MIT | 2023-06-22T10:59:51 | 2019-07-29T07:24:47 | Python | UTF-8 | Python | false | false | 1,941 | py | import zlib
import sys
PNG_MAGIC = b"\x89PNG\r\n\x1a\n"
if len(sys.argv) != 4:
print(f"USAGE: {sys.argv[0]} cover.png content.bin output.png")
# this function is gross
def fixup_zip(data, start_offset):
end_central_dir_offset = data.rindex(b"PK\x05\x06")
cdent_count = int.from_bytes(data[end_central_dir_offset+10:end_central_dir_offset+10+2], "little")
cd_range = slice(end_central_dir_offset+16, end_central_dir_offset+16+4)
central_dir_start_offset = int.from_bytes(data[cd_range], "little")
data[cd_range] = (central_dir_start_offset + start_offset).to_bytes(4, "little")
for _ in range(cdent_count):
central_dir_start_offset = data.index(b"PK\x01\x02", central_dir_start_offset)
off_range = slice(central_dir_start_offset+42, central_dir_start_offset+42+4)
off = int.from_bytes(data[off_range], "little")
data[off_range] = (off + start_offset).to_bytes(4, "little")
central_dir_start_offset += 1
png_in = open(sys.argv[1], "rb")
content_in = open(sys.argv[2], "rb")
png_out = open(sys.argv[3], "wb")
png_header = png_in.read(len(PNG_MAGIC))
assert(png_header == PNG_MAGIC)
png_out.write(png_header)
while True:
chunk_len = int.from_bytes(png_in.read(4), "big")
chunk_type = png_in.read(4)
chunk_body = png_in.read(chunk_len)
chunk_csum = int.from_bytes(png_in.read(4), "big")
if chunk_type == b"IDAT":
start_offset = png_in.tell()-4
content_dat = bytearray(content_in.read())
print("Embedded file starts at offset", hex(start_offset))
if sys.argv[2].endswith(".zip"):
print("Fixing up zip offsets...")
fixup_zip(content_dat, start_offset)
chunk_len += len(content_dat)
chunk_body += content_dat
chunk_csum = zlib.crc32(content_dat, chunk_csum)
png_out.write(chunk_len.to_bytes(4, "big"))
png_out.write(chunk_type)
png_out.write(chunk_body)
png_out.write(chunk_csum.to_bytes(4, "big"))
if chunk_type == b"IEND":
break
png_in.close()
content_in.close()
png_out.close()
| [
"9061071+nevesnunes@users.noreply.github.com"
] | 9061071+nevesnunes@users.noreply.github.com |
c1fbbf0d68a638d41feb44374be008c294de2af1 | 2bdedcda705f6dcf45a1e9a090377f892bcb58bb | /src/main/output/kid_part_day/year_air/netflix_number/DNS/man_money_eye/morning.py | 3cfcdaf46151c709896bd057eb2685a9b783a373 | [] | no_license | matkosoric/GenericNameTesting | 860a22af1098dda9ea9e24a1fc681bb728aa2d69 | 03f4a38229c28bc6d83258e5a84fce4b189d5f00 | refs/heads/master | 2021-01-08T22:35:20.022350 | 2020-02-21T11:28:21 | 2020-02-21T11:28:21 | 242,123,053 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | export async function getWebTranslation(text, sourceLanguage, targetLanguage) {
let https = require ('https');
let host = 'api.cognitive.microsofttranslator.com';
let path = '/translate?api-version=3.0';
let params = '&from=' + sourceLanguage + '&to=' + targetLanguage;
let content = JSON.stringify ([{'Text' : text}]);
let response_handler = function (response) {
let body = '';
response.on ('data', function (d) {
body += d;
});
response.on ('end', function () {
let json = JSON.parse(body)
console.log(json);
return json
});
response.on ('error', function (e) {
return {Error: + e.message};
});
};
let get_guid = function () {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
var r = Math.random() * 16 | 0, v = c == 'x' ? r : (r & 0x3 | 0x8);
var subscriptionKey = '7b54eb7f629e60ccdcc0afe930ad2dc9';
return v.toString(16);
});
}
let Translate = async function (content) {
let request_params = {
method : 'POST',
hostname : host,
path : path + params,
headers : {
'Content-Type' : 'application/json',
'4b6fe6c509421e55748a9ad8a94dabad' : subscriptionKey,
'X-ClientTraceId' : get_guid (),
}
};
let req = await https.request (request_params, response_handler);
req.write (content);
req.end();
}
return await Translate (content);
}
| [
"soric.matko@gmail.com"
] | soric.matko@gmail.com |
4d81faf8a6f057dae590eb378f38613b1f2d8f3a | 6e17999700d87263f3b2d146fc8b0502b31094cc | /setup.py | 86bed86eabe2291fdf92ca55990832adca2ef179 | [] | no_license | libargutxi/collective.newsticker | 9c85f75de24ad5be578c485b18f48d832b3ba402 | 11e596a5379608b920e20a1f231e6e29722457c4 | refs/heads/master | 2020-12-25T11:52:16.705745 | 2012-12-11T08:07:21 | 2012-12-11T08:07:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,938 | py | # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
version = '1.0rc2.dev0'
long_description = open("README.txt").read() + "\n" + \
open(os.path.join("docs", "INSTALL.txt")).read() + "\n" + \
open(os.path.join("docs", "CREDITS.txt")).read() + "\n" + \
open(os.path.join("docs", "HISTORY.txt")).read()
setup(name='collective.newsticker',
version=version,
description="News ticker inspired by the one on the BBC News website.",
long_description=long_description,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Plone",
"Framework :: Plone :: 4.1",
# "Framework :: Plone :: 4.2", # FIXME
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: OS Independent",
"Programming Language :: JavaScript",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Topic :: Office/Business :: News/Diary",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='plone jquery newsticker',
author='Héctor Velarde',
author_email='hector.velarde@gmail.com',
url='https://github.com/collective/collective.newsticker',
license='GPL',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['collective'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'five.grok>=1.2.0',
'zope.schema>=3.8.0', # required to use IContextAwareDefaultFactory
],
extras_require={
'test': ['plone.app.testing'],
},
entry_points="""
[z3c.autoinclude.plugin]
target = plone
""",
)
| [
"hector.velarde@gmail.com"
] | hector.velarde@gmail.com |
40c3139932cc04676b0b8dc6ab3baa716e931bc9 | 4e8cab639ddfa3e791b5b3a08aa491fb92c1ecaa | /Python_PostgresSQL/Python Refresher/errors_in_python.py | 7db3aaa46306a070397b8a7f319c0b86d4ef62ca | [] | no_license | LesediSekakatlela/SQL_projects | 49b91bebdf6f9b1176c40c3752232ab8d3d091dd | 9c78fc027dd137ef96446ea0946343293f3be007 | refs/heads/main | 2023-07-13T02:41:41.261558 | 2021-08-20T09:03:23 | 2021-08-20T09:03:23 | 386,646,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | def divide(dividend, divisor):
if divisor == 0:
raise ZeroDivisionError("Divisor cannot be 0.")
return dividend / divisor
students = [
{"name": "Bob", "grades": [75,90]},
{"name": "Rolf", "grades": [50]},
{"name": "Jen", "grades": [100,90]},
]
print("Welcom to the average grade program.")
try:
for student in students:
name = student["name"]
grades = student["grades"]
average = divide(sum(grades), len(grades))
print(f"{name} averaged {average}.")
except ZeroDivisionError:
print(f"ERROR: {name} has no grades!")
else:
print("-- All student averages calculated --")
finally:
print("-- End of student average calculation --")
| [
"leseditumelo32@gmail.com"
] | leseditumelo32@gmail.com |
7010d13dee74c17cf18df227a66134c0f8afed28 | 39f2ff90808f68c2d88778a1d60ccf27c1d18121 | /leetcode/python/258.py | fba101b1fd8d04e081c5832730d8c2acf0ceea0c | [] | no_license | JushuangQiao/MyCodes | f4912d997fce8c14f5357e497fe52280e8bdaddf | 2fd6842784ef8e56e4e5f742ce1313d17130c0d9 | refs/heads/master | 2021-01-10T23:53:13.346573 | 2018-05-12T11:57:03 | 2018-05-12T11:57:03 | 70,792,457 | 0 | 0 | null | 2017-04-19T10:31:55 | 2016-10-13T09:47:30 | Python | UTF-8 | Python | false | false | 330 | py | class Solution(object):
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
s = str(num)
while len(s) != 1:
s = str(sum([int(i) for i in s]))
return int(s)
'''if num == 0:
return 0
return num % 9 if num % 9 !=0 else 9'''
| [
"747848783@qq.com"
] | 747848783@qq.com |
ea712da6c3c5368cbe62fe07cdf80b5d4dfe2388 | 9c894d56f153156b82bc4bbde2db09fb04ec58cf | /17/mc/ExoDiBosonResonances/EDBRTreeMaker/test/c23000.py | ec854653b2df327b7979e936336071e57cb3f4fb | [] | no_license | gqlcms/run2_ntuple | 023bb97238980e3d4e7b8c112bc11e63658f1844 | 196c90facf042a64fddfef1e1c69681ccb9ab71c | refs/heads/master | 2020-08-04T09:01:43.466814 | 2019-10-01T11:40:36 | 2019-10-01T11:40:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,400 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'c2_3000'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName='Analysis'
config.JobType.sendExternalFolder=True# = 'Analysis'
config.JobType.inputFiles = ['Fall17_17Nov2017_V8_MC_L1FastJet_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK4PFPuppi.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK4PFPuppi.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WkkToWRadionToWWW_M3000-R0-06-TuneCUEP8M1_13TeV-madgraph/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
# This string is used to construct the output dataset name
name='WWW'
steam_dir='chench'
config.Data.outLFNDirBase='/store/user/chench/'#='/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/'+steam_dir+'/'+name+'/'
#config.Data.outLFNDirBase='/store/user/chench/'#='/eos/uscms/store/user/jingli/chench/'
config.Data.publication = False
config.Data.outputDatasetTag = 'c2_3000'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"c.chen@cern.ch"
] | c.chen@cern.ch |
3c30e065e142dc6f48ba905cc61fc78f98dfea69 | 5f4d82c3a6b89b75da63893b77892f9e252b7b06 | /first_year/combinatorial_algorithms/Labs/first/reverse_order/sorter_binary_insertions.py | 22984bc44d908dfb7fa01398d68f5be183655f44 | [] | no_license | jackiejohn/ifmo | 180813cbde45e3e4842452c9a57b5d54bbd207ce | c5ad17de8bfc6baa3c6166220849c564e1071e4b | refs/heads/master | 2021-06-02T06:58:47.726339 | 2017-12-28T16:46:19 | 2017-12-28T16:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | import time
f = open('data1024.txt')
j=0
k = []
numberofelements = int(f.readline())
while j<numberofelements:
i = int(f.readline())
k.append(i)
j=j+1
tit1=time.time()
for i in range(1,len(k)):
if k[i-1]>k[i]:
left = 0
right = i - 1
while True:
mid = (left + right) // 2
if k[mid]>k[i]:
right = mid - 1
else:
left = mid + 1
if left > right:
break
key = k[i]
for j in reversed(range(left+1,i+1)):
k[j] = k[j-1]
k[left] = key
tit2=time.time()
print(tit2-tit1)
| [
"zeionara@gmail.com"
] | zeionara@gmail.com |
0152c1fa815851d72ad325f7a22d2e29930e2d13 | 545bdb267ecc33ead36fadbbb94b0b9584a0d281 | /train_test/model.py | cd9663d35505c065df0fc99bcf241339af590da5 | [] | no_license | Sardhendu/DeepFaceRecognition | 20fb19fccd330505953a7a8796152caff224e8ae | b360a6df8c11f6ddcb5fd58aa55e2b70bb1df23d | refs/heads/master | 2021-09-10T15:08:39.868773 | 2018-03-28T08:28:12 | 2018-03-28T08:28:12 | 112,471,536 | 21 | 7 | null | null | null | null | UTF-8 | Python | false | false | 3,848 | py | from __future__ import division, print_function, absolute_import
from nn.loss import loss
from nn.network import *
from config import myNet, vars
import tensorflow as tf
def trainModel_FT(imgShape, params, init_wght_type='random'):
inpTensor = tf.placeholder(dtype=tf.float32, shape=[None, imgShape[0], imgShape[1], imgShape[2]])
logging.info('SHAPE: inpTensor %s', str(inpTensor.shape))
# Pad the input to make of actual size
X = tf.pad(inpTensor, paddings=[[0, 0], [3, 3], [3, 3], [0, 0]])
X = conv1(X, params)
X = conv2(X, params)
X = conv3(X, params)
X = inception3a(X, params, trainable=False)
X = inception3b(X, params, trainable=False)
X = inception3c(X, params, trainable=False)
X = inception4a(X, params, trainable=False)
X = inception4e(X, params, trainable=False)
if init_wght_type == 'pretrained':
logging.info(
'Initializing the last layer weights with inception pre-trained weight but the parameters are '
'trainable')
X = inception5a(X, params, trainable=True)
X = inception5b(X, params, trainable=True)
X = fullyConnected(X, params, trainable=True)
elif init_wght_type == 'random':
logging.info('Initializing the last layer weights with random values and the parameter is trainable')
X = inception5a_FT(X)
X = inception5b_FT(X)
X = fullyConnected_FT(X, [736, 128])
else:
raise ValueError('Provide a valid weight initialization type')
return dict(inpTensor=inpTensor, embeddings=X)
def getEmbeddings(imgShape, params):
inpTensor = tf.placeholder(dtype=tf.float32, shape=[None, imgShape[0], imgShape[1], imgShape[2]])
logging.info('GET EMBEDDINGS: SHAPE: inpTensor %s', str(inpTensor.shape))
# Pad the input to make of actual size
X = tf.pad(inpTensor, paddings=[[0, 0], [3, 3], [3, 3], [0, 0]])
X = conv1(X, params)
X = conv2(X, params)
X = conv3(X, params)
X = inception3a(X, params, trainable=False)
X = inception3b(X, params, trainable=False)
X = inception3c(X, params, trainable=False)
X = inception4a(X, params, trainable=False)
X = inception4e(X, params, trainable=False)
X = inception5a(X, params, trainable=False)
X = inception5b(X, params, trainable=False)
X = fullyConnected(X, params, trainable=False)
return dict(inpTensor=inpTensor, embeddings=X)
def trainEmbeddings(weightDict, init_wght_type):
logging.info('INITIALIZING THE NETWORK !! ...............................')
with tf.name_scope("learning_rate"):
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(myNet['learning_rate'],
global_step * vars['batchSize'], # Used for decay computation
vars['trainSize'], # Decay steps
myNet['learning_rate_decay_rate'], # Decay rate
staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
embeddingDict = trainModel_FT(myNet['image_shape'], params=weightDict,
init_wght_type=init_wght_type)
embeddingDict['triplet_loss'] = loss(embeddingDict['embeddings'])
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(
embeddingDict['triplet_loss'], global_step=global_step
)
embeddingDict['optimizer'] = optimizer
embeddingDict['learning_rate'] = learning_rate
return embeddingDict
def summaryBuilder(sess, outFilePath):
mergedSummary = tf.summary.merge_all()
writer = tf.summary.FileWriter(outFilePath)
writer.add_graph(sess.graph)
return mergedSummary, writer
| [
"sardhendumishra@gmail.com"
] | sardhendumishra@gmail.com |
a00d423fc4ebad8852831d27ef7fe2ef797459ae | cad9c13ad5864317d7687b44f39db42a402f36f0 | /venv/Scripts/soup-script.py | 0e1a7ceadb25db4f357a25515879b4e87932b898 | [] | no_license | handaeho/lab_python | 12b686eb0d57358509f2d0cd607064deced5b25d | da068ea62682ffa70c7d23dde4ef132c49a81364 | refs/heads/master | 2020-11-26T08:22:27.656109 | 2020-04-13T02:28:47 | 2020-04-13T02:28:47 | 229,013,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | #!C:\dev\lab-python\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'soup==0.1.0','console_scripts','soup'
__requires__ = 'soup==0.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('soup==0.1.0', 'console_scripts', 'soup')()
)
| [
"mrdh94@naver.com"
] | mrdh94@naver.com |
16b8749ac5d03d7fa63239a1514a756a3a9d7c18 | 727e50c524c229bc7736a757fbc51cc5939b7e10 | /peering/migrations/0034_auto_20190308_1954.py | 03ed8a1d6237d1de1c86a9282d59370a63321db8 | [
"Apache-2.0"
] | permissive | netravnen/peering-manager | 71fbe1801fe6e063ac1b4375cdb9fe3c8c3feee5 | c2a5149b3cb197291e0c9c10040738ce5fb29f02 | refs/heads/main | 2023-08-17T02:56:43.799975 | 2023-07-04T18:23:15 | 2023-07-04T18:23:15 | 149,284,135 | 0 | 0 | Apache-2.0 | 2023-09-11T08:18:27 | 2018-09-18T12:24:28 | Python | UTF-8 | Python | false | false | 711 | py | # Generated by Django 2.1.7 on 2019-03-08 18:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("peering", "0033_router_encrypt_passwords")]
operations = [
migrations.AlterModelOptions(
name="routingpolicy",
options={
"ordering": ["-weight", "name"],
"verbose_name_plural": "routing policies",
},
),
migrations.AddField(
model_name="routingpolicy",
name="weight",
field=models.PositiveSmallIntegerField(
default=0, help_text="The higher the number, the higher the priority"
),
),
]
| [
"guillaume@mazoyer.eu"
] | guillaume@mazoyer.eu |
2f478ff41ee28ca4a2b766dd50ae38fe69fbc4a1 | b9a23d1947f5f6328ca13c7e652499173f64da47 | /s_081/s_081_plotter.pyde | 9d43ff8402374529d9c685ad205a498e69d768ab | [] | no_license | berinhard/sketches | 96414a14ec40ca1281dcd8b2fec2c50db1d76e9a | f0e4be211397f205bcc6bd2c8b053b920a26bb62 | refs/heads/master | 2021-06-09T07:49:59.220785 | 2020-12-08T04:14:55 | 2020-12-08T04:23:43 | 137,092,663 | 41 | 15 | null | 2021-03-20T00:41:39 | 2018-06-12T15:34:49 | JavaScript | UTF-8 | Python | false | false | 2,108 | pyde | # Author: Berin
# Sketches repo: https://github.com/berinhard/sketches
from random import choice
from save_frames import save_video_frames
add_library('svg')
WHITE = color(235, 235, 235)
WHITE_WITH_ALPHA = color(235, 235, 235, 70)
BLACK = color(27, 27, 27)
RED = color(181, 32, 10, 7)
GOLDEN = color(218, 185, 32, 7)
GREEN = color(32, 181, 10, 7)
CYAN = color(20, 255, 255, 7)
PURPLE = color(255, 20, 255, 7)
DISTANCES = [20 * (i + 1) for i in range(15)]
ANGLES = [45, 135, 225, 315]
class SplitableLine(object):
def __init__(self, start_pos, angle=None, walking_distance=None):
self.start_pos = start_pos
self.walking_distance = walking_distance or choice(DISTANCES)
self.angle = angle or radians(choice(ANGLES))
self.end_pos = None
def split(self):
x = self.start_pos.x + cos(self.angle) * self.walking_distance
y = self.start_pos.y + sin(self.angle) * self.walking_distance
self.end_pos = PVector(x, y)
lerp_index = choice(range(1, 10)) / 10.0
pos = PVector.lerp(self.start_pos, self.end_pos, lerp_index)
return SplitableLine(pos, self.angle + HALF_PI)
def display(self):
stroke(0)
line(self.start_pos.x, self.start_pos.y, self.end_pos.x, self.end_pos.y)
splitable_lines = [
SplitableLine(PVector(200, 200), walking_distance=DISTANCES[-1]),
SplitableLine(PVector(600, 200), walking_distance=DISTANCES[-1]),
SplitableLine(PVector(200, 600), walking_distance=DISTANCES[-1]),
SplitableLine(PVector(600, 600), walking_distance=DISTANCES[-1]),
]
def setup():
global walker
size(800, 800)
#background(BLACK)
strokeWeight(1)
#frameRate(24)
stroke(0)
def draw():
global splitable_lines
beginRecord(SVG, 's_081.svg')
for i in range(1000):
new_lines = []
for s_line in splitable_lines:
new_lines.append(s_line.split())
s_line.display()
splitable_lines = new_lines
print frameCount
noLoop()
endRecord()
def keyPressed():
if key == 's':
saveFrame("#########.png") | [
"bernardoxhc@gmail.com"
] | bernardoxhc@gmail.com |
526c71bd6687a28464391300348158e387bdff04 | 9d91b256f737b90d397d7a9306ba0c5874027de1 | /tests/duration/test_add_sub.py | 92d97c70022b77b875b60c210a8b234536f30aa2 | [
"MIT"
] | permissive | devcode1981/pendulum | bde8e60526048c346fa4d420bf10fa338310efe1 | af128be06f6b42f8127ba906e418961396919ea7 | refs/heads/master | 2023-04-07T08:07:46.284600 | 2018-11-25T03:09:34 | 2018-11-25T03:09:34 | 158,993,326 | 1 | 0 | MIT | 2023-04-04T01:06:21 | 2018-11-25T03:10:25 | Python | UTF-8 | Python | false | false | 1,162 | py | import pendulum
from datetime import timedelta
from ..conftest import assert_duration
def test_add_interval():
p1 = pendulum.duration(days=23, seconds=32)
p2 = pendulum.duration(days=12, seconds=30)
p = p1 + p2
assert_duration(p, 0, 0, 5, 0, 0, 1, 2)
def test_add_timedelta():
p1 = pendulum.duration(days=23, seconds=32)
p2 = timedelta(days=12, seconds=30)
p = p1 + p2
assert_duration(p, 0, 0, 5, 0, 0, 1, 2)
def test_add_unsupported():
p = pendulum.duration(days=23, seconds=32)
assert NotImplemented == p.__add__(5)
def test_sub_interval():
p1 = pendulum.duration(days=23, seconds=32)
p2 = pendulum.duration(days=12, seconds=28)
p = p1 - p2
assert_duration(p, 0, 0, 1, 4, 0, 0, 4)
def test_sub_timedelta():
p1 = pendulum.duration(days=23, seconds=32)
p2 = timedelta(days=12, seconds=28)
p = p1 - p2
assert_duration(p, 0, 0, 1, 4, 0, 0, 4)
def test_sub_unsupported():
p = pendulum.duration(days=23, seconds=32)
assert NotImplemented == p.__sub__(5)
def test_neg():
p = pendulum.duration(days=23, seconds=32)
assert_duration(-p, 0, 0, -3, -2, 0, 0, -32)
| [
"sebastien@eustace.io"
] | sebastien@eustace.io |
af34211344ee131cb660ec7830500c7c4adce6fb | fee1f9ec7be6049a27396ca24fb12287d36f66af | /19100101/echojce/d6_exercise_stats_word.py | 5829b67c7d96bc59958eca82d0447526895c4da3 | [] | no_license | zhoujie454650/selfteaching-python-camp | 4a85c7a792157af84c1ecfc3468c1401f946a48a | 5bb6a0c35adb3e26fee0ac68f29e12ac11a13710 | refs/heads/master | 2020-05-01T09:49:06.986010 | 2019-05-14T12:32:50 | 2019-05-14T12:32:50 | 177,408,611 | 0 | 0 | null | 2019-03-24T11:59:04 | 2019-03-24T11:59:04 | null | UTF-8 | Python | false | false | 3,128 | py | # this is d6 excercise for defining functions
# date : 2019.3.23
# author by : qiming
# 示例字符串
string1 = '''
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
Python是一种计算机程序设计语言。是一种动态的、面向对象的脚本语言,最初被设计用于编写自动化脚本(shell),随着版本的不断更新和语言新功能的添加,越来越多被用于独立的、大型项目的开发。
'''
import collections
import re
def stats_text_en(string_en):
''' 统计英文词频
第一步:过滤英文字符,并将string拆分为list。
第二步:清理*-等标点符号。
第三步:使用collections库中的Counter函数进行词频统计并输出统计结果。
'''
result = re.sub("[^A-Za-z]", " ", string_en.strip())
newList = result.split( )
i=0
for i in range(0,len(newList)):
newList[i]=newList[i].strip('*-,.?!')
if newList[i]==' ':
newList[i].remove(' ')
else:
i=i+1
print('英文单词词频统计结果: ',collections.Counter(newList),'\n')
def stats_text_cn(string_cn):
''' 统计中文汉字字频
第一步:过滤汉字字符,并定义频率统计函数 stats()。
第二步:清除文本中的标点字符,将非标点字符组成新列表 new_list。
第三步:遍历列表,将字符同上一次循环中频率统计结果作为形参传给统计函数stats()。
第四步:统计函数在上一次统计结果基础上得出本次统计结果,赋值给newDict。
第五步:new_list遍历结束,输出倒序排列的统计结果。
'''
result1 = re.findall(u'[\u4e00-\u9fff]+', string_cn)
newString = ''.join(result1)
def stats(orgString, newDict) :
d = newDict
for m in orgString :
d[m] = d.get(m, 0) + 1
return d
new_list = []
for char in newString :
cn = char.strip('-*、。,:?!……')
new_list.append(cn)
words = dict()
for n in range(0,len(new_list)) :
words = stats(new_list[n],words)
newWords = sorted(words.items(), key=lambda item: item[1], reverse=True)
print('中文汉字字频统计结果: ',dict(newWords))
# 调用函数
stats_text_en(string1)
stats_text_cn(string1)
| [
"6396023+realcaiying@users.noreply.github.com"
] | 6396023+realcaiying@users.noreply.github.com |
a70f46a6bea169b6595b64c976d186f17c1cc171 | b47abf1a1e7daf4320c2c3a35d963ac6f7663702 | /mvpa/atlases/__init__.py | f09b72cb2af45c503c104bdd62bf243a69891440 | [
"MIT"
] | permissive | gorlins/PyMVPA | d9690399b24ae7d760735b4aa858e08912c9235d | 2a8fcaa57457c8994455144e9e69494d167204c4 | refs/heads/master | 2021-01-16T18:08:43.289333 | 2009-09-05T15:06:35 | 2009-09-05T15:06:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Import helper for PyMVPA anatomical atlases
Module Organization
===================
mvpa.atlases module contains support for various atlases
.. packagetree::
:style: UML
:group Base Implementations: base
:group Atlases from FSL: fsl
:group Helpers: warehouse transformation
"""
__docformat__ = 'restructuredtext'
if __debug__:
from mvpa.base import debug
debug('INIT', 'mvpa.atlases')
from mvpa.atlases.base import LabelsAtlas, ReferencesAtlas, XMLAtlasException
from mvpa.atlases.fsl import FSLProbabilisticAtlas
from mvpa.atlases.warehouse import Atlas, KNOWN_ATLASES, KNOWN_ATLAS_FAMILIES
if __debug__:
debug('INIT', 'mvpa.atlases end')
| [
"debian@onerussian.com"
] | debian@onerussian.com |
d545c9d0153fe73fb3024225c493f6309795b2bb | 11c036911cf893325199d9e9a91a11cd1dca7c90 | /bst_iterator/solution.py | b4cc675a147cbbf30c4d9a1c0e14e5e271338b95 | [] | no_license | arpiagar/HackerEarth | 34f817f69e94d88657c1d8991a55aca302cdc890 | 4a94f1b11a353ab6b2837a1ac77bfbd7c91f91d2 | refs/heads/master | 2021-07-18T14:23:05.124943 | 2021-02-09T21:58:12 | 2021-02-09T21:58:12 | 19,204,412 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,192 | py | #https://leetcode.com/problems/binary-search-tree-iterator/submissions/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 7 3
class BSTIterator:
def __init__(self, root: TreeNode):
self.node_list = [root]
self.node_visited = {root: 0}
def next(self) -> int:
"""
@return the next smallest number
"""
return self.add_to_list_and_map().val
def add_to_list_and_map(self):
node = self.node_list[0]
temp_node = node
if self.node_visited[temp_node] == 0:
self.node_visited[temp_node] = 1
if temp_node.left:
while temp_node.left !=None:
self.node_list.append(temp_node.left)
self.node_visited[temp_node.left] = 1
temp_node=temp_node.left
self.node_visited[temp_node] = 2
return temp_node
else:
self.node_visited[temp_node] = 1
return self.add_to_list_and_map()
elif self.node_visited[node] == 1:
self.node_visited[node] = 2
return node
else:
self.node_list = self.node_list[1:]
if node.right == None:
return self.add_to_list_and_map()
else:
self.node_list.append(node.right)
self.node_visited[node.right]=0
return self.add_to_list_and_map()
def hasNext(self) -> bool:
"""
@return whether we have a next smallest number
"""
if self.node_list:
print(len(self.node_list),self.node_list[0].left,self.node_list[0].right, self.node_list[0].val, self.node_visited[self.node_list[0]])
if len(self.node_list) == 1 and self.node_visited[self.node_list[0]]==2 and not self.node_list[0].right:
return False
return True
else:
return False
# Your BSTIterator object will be instantiated and called as such:
# obj = BSTIterator(root)
# param_1 = obj.next()
# param_2 = obj.hasNext()
| [
"arpit.agarwal@booking.com"
] | arpit.agarwal@booking.com |
91f420a5007fb80dea0f2198aa6fae2d6e6c238f | c5b7e98aa295b3bd0596e7fca1028e1e9bbba122 | /ARK.py | c4bf5b8be6a55dd47b4be6078c2115d417f9f47f | [] | no_license | sunomon/100at6low10 | a3439462fd8c2e92eb0b94e634cdf7c2c92f93e3 | d062161e542fe6d6168204f5a45ae6da62b6f589 | refs/heads/main | 2023-06-11T18:31:11.533914 | 2021-07-06T09:07:46 | 2021-07-06T09:07:46 | 382,562,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,377 | py | import time
import pyupbit
import datetime
import schedule
from fbprophet import Prophet
access = "PgXnWWPxxv88s7z2PSnz4aoqaYL0gxkRxReK0WDK"
secret = "wgCfiEmQVH76s9sblwFKQsOKOp91t2ic3XAHuNsK"
def get_target1_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target1_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target1_price
def get_target2_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target2_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target2_price
def get_target3_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target3_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target3_price
def get_target4_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target4_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target4_price
def get_target5_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target5_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target5_price
def get_target6_price(ticker, k):
df = pyupbit.get_ohlcv(ticker, interval="day", count=2)
target6_price = df.iloc[0]['close'] + (df.iloc[0]['high'] - df.iloc[0]['low']) * k
return target6_price
def get_start_time(ticker):
df = pyupbit.get_ohlcv(ticker, interval="day", count=1)
start_time = df.index[0]
return start_time
def get_balance(ticker):
balances = upbit.get_balances()
for b in balances:
if b['currency'] == ticker:
if b['balance'] is not None:
return float(b['balance'])
else:
return 0
return 0
def get_current_price(ticker):
return pyupbit.get_orderbook(tickers=ticker)[0]["orderbook_units"][0]["ask_price"]
predicted_close_price = 0
def predict_price(ticker):
global predicted_close_price
df = pyupbit.get_ohlcv(ticker, interval="minute60")
df = df.reset_index()
df['ds'] = df['index']
df['y'] = df['close']
data = df[['ds','y']]
model = Prophet()
model.fit(data)
future = model.make_future_dataframe(periods=24, freq='H')
forecast = model.predict(future)
closeDf = forecast[forecast['ds'] == forecast.iloc[-1]['ds'].replace(hour=9)]
if len(closeDf) == 0:
closeDf = forecast[forecast['ds'] == data.iloc[-1]['ds'].replace(hour=9)]
closeValue = closeDf['yhat'].values[0]
predicted_close_price = closeValue
predict_price("KRW-ARK")
schedule.every().hour.do(lambda: predict_price("KRW-ARK"))
upbit = pyupbit.Upbit(access, secret)
print("autotrade start")
while True:
try:
now = datetime.datetime.now()
start_time = get_start_time("KRW-ARK")
middle1_time = start_time + datetime.timedelta(hours=3)
middle2_time = start_time + datetime.timedelta(hours=9)
middle3_time = start_time + datetime.timedelta(hours=15)
end_time = start_time + datetime.timedelta(days=1)
schedule.run_pending()
if start_time < now < end_time - datetime.timedelta(hours=1):
target1_price = get_target1_price("KRW-ARK", 0.1)
target2_price = get_target2_price("KRW-ARK", 0.2)
target3_price = get_target3_price("KRW-ARK", 0.3)
target4_price = get_target4_price("KRW-ARK", 0.4)
target5_price = get_target5_price("KRW-ARK", 0.5)
target6_price = get_target6_price("KRW-ARK", 0.6)
current_price = get_current_price("KRW-ARK")
krw = get_balance("KRW")
ark = get_balance("ARK")
if target1_price <= current_price < target1_price*1.02 and target1_price*1.1 <= predicted_close_price:
if krw >= 1000000 and ark < 10000/(target1_price*1.02):
upbit.buy_market_order("KRW-ARK", 1000000)
if 5000 < krw < 1000000 and ark < 10000/(target1_price*1.02):
upbit.buy_market_order("KRW-ARK", krw*0.9995)
if target2_price <= current_price < target2_price*1.02 and target2_price*1.15 <= predicted_close_price:
if krw >= 1000000 and ark < 10000/(target2_price*1.02):
upbit.buy_market_order("KRW-ARK", 1000000)
if 5000 < krw < 1000000 and ark < 10000/(target2_price*1.02):
upbit.buy_market_order("KRW-ARK", krw*0.9995)
if target3_price <= current_price < target3_price*1.02 and target3_price*1.2 <= predicted_close_price:
if krw >= 1000000 and ark < 10000/(target3_price*1.02):
upbit.buy_market_order("KRW-ARK", 1000000)
if 5000 < krw < 1000000 and ark < 10000/(target3_price*1.02):
upbit.buy_market_order("KRW-ARK", krw*0.9995)
if target4_price <= current_price < target4_price*1.02 and target4_price*1.25 <= predicted_close_price:
if krw >= 1000000 and ark < 10000/(target4_price*1.02):
upbit.buy_market_order("KRW-ARK", 1000000)
if 5000 < krw < 1000000 and ark < 10000/(target4_price*1.02):
upbit.buy_market_order("KRW-ARK", krw*0.9995)
if target5_price <= current_price < target5_price*1.02 and target5_price*1.3 <= predicted_close_price:
if krw >= 1000000 and ark < 10000/(target5_price*1.02):
upbit.buy_market_order("KRW-ARK", 1000000)
if 5000 < krw < 1000000 and ark < 10000/(target5_price*1.02):
upbit.buy_market_order("KRW-ARK", krw*0.9995)
if target6_price <= current_price < target6_price*1.02 and target6_price*1.35 <= predicted_close_price:
if krw >= 1000000 and ark < 10000/(target6_price*1.02):
upbit.buy_market_order("KRW-ARK", 1000000)
if 5000 < krw < 1000000 and ark < 10000/(target6_price*1.02):
upbit.buy_market_order("KRW-ARK", krw*0.9995)
if ark > 1000000*1.001*1.2/current_price:
upbit.sell_market_order("KRW-ARK", ark*0.9995)
elif middle1_time < now < middle2_time:
ark = get_balance("ARK")
current_price = get_current_price("KRW-ARK")
if ark > 1000000*1.001*1.1/current_price:
upbit.sell_market_order("KRW-ARK", ark*0.9995)
elif middle2_time < now < middle3_time:
ark = get_balance("ARK")
current_price = get_current_price("KRW-ARK")
if ark > 1000000*1.001*1.05/current_price:
upbit.sell_market_order("KRW-ARK", ark*0.9995)
elif middle3_time < now < end_time - datetime.timedelta(hours=1):
ark = get_balance("ARK")
current_price = get_current_price("KRW-ARK")
if ark > 1000000*1.001*1.03/current_price or current_price > predicted_close_price:
upbit.sell_market_order("KRW-ARK", ark*0.9995)
else:
ark = get_balance("ARK")
current_price = get_current_price("KRW-ARK")
if ark > 1000000*1.001/current_price:
upbit.sell_market_order("KRW-ARK", ark*0.9995)
time.sleep(1)
except Exception as e:
print(e)
time.sleep(1)
| [
"noreply@github.com"
] | sunomon.noreply@github.com |
2c43299ecc34ec23afb270c90846c746c8306059 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02761/s462447361.py | 69384e889176bfda05dabb08fc5eb2678077220e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | def resolve():
N, M = list(map(int, input().split()))
SC = [list(map(int, input().split())) for _ in range(M)]
value = [None for _ in range(N)]
for s, c in SC:
if not (value[s-1] is None or value[s-1] == c):
print(-1)
return
value[s-1] = c
for i in range(N):
if value[i] is None:
if i == 0:
if N > 1:
value[i] = 1
else:
value[i] = 0
else:
value[i] = 0
if N > 1 and value[0] == 0:
print(-1)
else:
print("".join(map(str, value)))
if '__main__' == __name__:
resolve() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2aa898f0fc19d777a0f1a0ab64f2ad7965b9298b | bcbc5fbdaf73146c1473f925d8d3303ef9d1256f | /tests/logic_adapter_tests/test_data_cache.py | 007497cde3d393ad5e93ca65c3daac80d9cfd547 | [
"BSD-3-Clause"
] | permissive | korymath/ChatterBot | b1a3b2700d4eefbbc5a3460e174dd9d539131902 | b517e696e016b6c2fae4b5326029b16d45ee6471 | refs/heads/master | 2021-01-15T09:27:41.970135 | 2016-04-08T05:21:35 | 2016-04-08T05:21:35 | 55,752,170 | 1 | 0 | null | 2016-04-08T05:18:31 | 2016-04-08T05:18:31 | null | UTF-8 | Python | false | false | 2,270 | py | from unittest import TestCase
from chatterbot import ChatBot
from chatterbot.adapters.logic import LogicAdapter
from chatterbot.conversation import Statement
import os
class DummyMutatorLogicAdapter(LogicAdapter):
"""
This is a dummy class designed to modify a
the resulting statement before it is returned.
"""
def process(self, statement):
statement.add_extra_data("pos_tags", "NN")
self.context.storage.update(statement)
return 1, statement
class DataCachingTests(TestCase):
def setUp(self):
self.test_data_directory = 'test_data'
self.test_database_name = self.random_string() + ".db"
if not os.path.exists(self.test_data_directory):
os.makedirs(self.test_data_directory)
database_path = os.path.join(
self.test_data_directory,
self.test_database_name
)
self.chatbot = ChatBot(
"Test Bot",
io_adapter="chatterbot.adapters.io.NoOutputAdapter",
logic_adapter="tests.logic_adapter_tests.test_data_cache.DummyMutatorLogicAdapter",
database=database_path
)
self.chatbot.train([
"Hello",
"How are you?"
])
def random_string(self, start=0, end=9000):
"""
Generate a string based on a random number.
"""
from random import randint
return str(randint(start, end))
def remove_data(self):
import shutil
if os.path.exists(self.test_data_directory):
shutil.rmtree(self.test_data_directory)
def tearDown(self):
"""
Remove the test database.
"""
self.chatbot.storage.drop()
self.remove_data()
def test_additional_attributes_saved(self):
"""
Test that an additional data attribute can be added to the statement
and that this attribute is saved.
"""
response = self.chatbot.get_response("Hello")
found_statement = self.chatbot.storage.find("Hello")
self.assertIsNotNone(found_statement)
self.assertIn("pos_tags", found_statement.serialize())
self.assertEqual(
"NN",
found_statement.serialize()["pos_tags"]
)
| [
"gunthercx@gmail.com"
] | gunthercx@gmail.com |
709e78c2b8bc7044a4039b9309ee131eb6a4c2bf | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/2561.py | 5c5cf379a6e3f42b99e7e7d7cedcccfa7f4e7302 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 848 | py | def getint ():
return int(raw_input())
def printCase(c, s):
print "Case #" + str(c) + ": " + str(s)
def intersection (list1, list2):
first = set (list1)
second = set (list2)
return list(first.intersection(second))
def getPossibleCards (rows1, choice1, rows2, choice2):
firstpos = rows1[(choice1 - 1) * 4 : (choice1) * 4];
return intersection(firstpos, rows2[(choice2 - 1) * 4 : (choice2) * 4]);
for i in range(getint()):
a1 = getint();
rows1 = raw_input() + " " + raw_input() + " " + raw_input() + " " + raw_input()
a2 = getint();
rows2 = raw_input() + " " + raw_input() + " " + raw_input() + " " + raw_input()
pcards = getPossibleCards(rows1.split(" "), a1, rows2.split(" "), a2)
if len(pcards) == 0:
printCase(i+1,"Volunteer cheated!")
elif len(pcards) == 1:
printCase(i+1,pcards[0])
else:
printCase(i+1,"Bad magician!") | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
0f8f8c2a132dc4b5f32f59e3caeec2ca41fa62fd | 48894ae68f0234e263d325470178d67ab313c73e | /pm/pmwriter/utils.py | 3d645751e738fe862b42f2f2f4e85fd3d1f828ce | [
"BSD-3-Clause"
] | permissive | DreamerDDL/noc | 7f949f55bb2c02c15ac2cc46bc62d957aee43a86 | 2ab0ab7718bb7116da2c3953efd466757e11d9ce | refs/heads/master | 2021-05-10T18:22:53.678588 | 2015-06-29T12:28:20 | 2015-06-29T12:28:20 | 118,628,133 | 0 | 0 | null | 2018-01-23T15:19:51 | 2018-01-23T15:19:51 | null | UTF-8 | Python | false | false | 2,347 | py | ## -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## Various utilities
##----------------------------------------------------------------------
## Copyright (C) 2007-2014 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import cPickle as pickle
HAS_CPICKLE = True
except:
import pickle
HAS_CPICKLE = False
## Safe unpickler
if HAS_CPICKLE:
class SafeUnpickler(object):
PICKLE_SAFE = {
"copy_reg": set(["_reconstructor"]),
"__builtin__": set(["object"]),
}
@classmethod
def find_class(cls, module, name):
if not module in cls.PICKLE_SAFE:
raise pickle.UnpicklingError(
"Attempting to unpickle unsafe module %s" % module)
__import__(module)
mod = sys.modules[module]
if not name in cls.PICKLE_SAFE[module]:
raise pickle.UnpicklingError(
"Attempting to unpickle unsafe class %s" % name)
return getattr(mod, name)
@classmethod
def loads(cls, pickle_string):
pickle_obj = pickle.Unpickler(StringIO(pickle_string))
pickle_obj.find_global = cls.find_class
return pickle_obj.load()
else:
class SafeUnpickler(pickle.Unpickler):
PICKLE_SAFE = {
"copy_reg": set(["_reconstructor"]),
"__builtin__": set(["object"]),
}
def find_class(self, module, name):
if not module in self.PICKLE_SAFE:
raise pickle.UnpicklingError(
"Attempting to unpickle unsafe module %s" % module)
__import__(module)
mod = sys.modules[module]
if not name in self.PICKLE_SAFE[module]:
raise pickle.UnpicklingError(
"Attempting to unpickle unsafe class %s" % name)
return getattr(mod, name)
@classmethod
def loads(cls, pickle_string):
return cls(StringIO(pickle_string)).load()
def get_unpickler(insecure=False):
if insecure:
return pickle
else:
return SafeUnpickler
| [
"dv@nocproject.org"
] | dv@nocproject.org |
e5981c44bf76ad309ffd21b778e24f0e1ed246a9 | ec00584ab288267a7cf46c5cd4f76bbec1c70a6b | /interview-preparation/main_.py | 278fa19f1768df79f357dc82cd5c91b575a8ce5a | [] | no_license | rahuldbhadange/Python | b4cc806ff23953389c9507f43d817b3815260e19 | 7e162117f1acc12537c7eeb36d6983d804122ff3 | refs/heads/master | 2021-06-23T05:04:20.053777 | 2020-01-28T10:34:28 | 2020-01-28T10:34:28 | 217,307,612 | 0 | 0 | null | 2021-06-10T22:44:11 | 2019-10-24T13:35:42 | Python | UTF-8 | Python | false | false | 7,787 | py | #####################################
# Breadth First Search / Flood fill
# Davis MT
# 28.01.2018
#####################################
import turtle # import turtle library
import time
import sys
from collections import deque
wn = turtle.Screen() # define the turtle screen
wn.bgcolor("black") # set the background colour
wn.title("A BFS Maze Solving Program")
wn.setup(1300,700) # setup the dimensions of the working window
# this is the class for the Maze
class Maze(turtle.Turtle): # define a Maze class
def __init__(self):
turtle.Turtle.__init__(self)
self.shape("square") # the turtle shape
self.color("white") # colour of the turtle
self.penup() # lift up the pen so it do not leave a trail
self.speed(0)
# this is the class for the finish line - green square in the maze
class Green(turtle.Turtle):
def __init__(self):
turtle.Turtle.__init__(self)
self.shape("square")
self.color("green")
self.penup()
self.speed(0)
class Blue(turtle.Turtle):
def __init__(self):
turtle.Turtle.__init__(self)
self.shape("square")
self.color("blue")
self.penup()
self.speed(0)
# this is the class for the yellow or turtle
class Red(turtle.Turtle):
def __init__(self):
turtle.Turtle.__init__(self)
self.shape("square")
self.color("red")
self.penup()
self.speed(0)
class Yellow(turtle.Turtle):
def __init__(self):
turtle.Turtle.__init__(self)
self.shape("square")
self.color("yellow")
self.penup()
self.speed(0)
# grid = [
# "+++++++++++++++",
# "+s+ + +e+",
# "+ +++++ +++ + +",
# "+ + + + +",
# "+ + +++ + + +",
# "+ + + + + + +",
# "+ + + + + +",
# "+++++ + + + +",
# "+ + + +",
# "+++++++++++++++",
# ]
# grid = [
# "+++++++++",
# "+ ++s++++",
# "+ ++ ++++",
# "+ ++ ++++",
# "+ ++++",
# "++++ ++++",
# "++++ ++++",
# "+ e+",
# "+++++++++",
# ]
# grid = [
# "+++++++++++++++",
# "+ +",
# "+ +",
# "+ +",
# "+ e +",
# "+ +",
# "+ +",
# "+ +",
# "+ s +",
# "+++++++++++++++",
# ]
grid = [
"+++++++++++++++++++++++++++++++++++++++++++++++++++",
"+ + +",
"+ ++++++++++ +++++++++++++ +++++++ ++++++++++++",
"+s + + ++ +",
"+ +++++++ +++++++++++++ +++++++++++++++++++++ +",
"+ + + + + + +++ +",
"+ + + + + + ++++ + + +++++++++++++ +++ +",
"+ + + + + + + + + + + +",
"+ + ++++ + ++++++++++ + + ++++ + + ++ +",
"+ + + + + + + + ++ ++",
"+ ++++ + +++++++ ++++++++ +++++++++++++ ++ ++",
"+ + + + + ++ +",
"++++ + ++++++++++ +++++++++++ ++++++++++ +++ +",
"+ + + + + + + +++ +",
"+ + ++++ +++++++++++++ + ++++ + + + ++ +",
"+ + + + + + + + + + ++ ++",
"+ + + +++++++ ++++ + + + ++++++++++ ++ ++",
"+ + + + ++ ++",
"+ ++++++ + + + + +++ +++ ++",
"+ ++++++ ++++++ +++++++++ ++ ++ ++++++++++ ++",
"+ + + +++ + +++++++++ ++ +++++++ + ++",
"+ ++++ ++++ +++ + +++ +++ ++ ++ ++ ++ + ++",
"+ ++++ + + +++ +++ ++ ++++++++ ++ ++ ++ ++",
"+ ++ +++++++e+++ ++ ++ +++++++",
"+++++++++++++++++++++++++++++++++++++++++++++++++++",
]
def setup_maze(grid): # define a function called setup_maze
global start_x, start_y, end_x, end_y # set up global variables for start and end locations
for y in range(len(grid)): # read in the grid line by line
for x in range(len(grid[y])): # read each cell in the line
character = grid[y][x] # assign the varaible "character" the the x and y location od the grid
screen_x = -588 + (x * 24) # move to the x location on the screen staring at -588
screen_y = 288 - (y * 24) # move to the y location of the screen starting at 288
if character == "+":
maze.goto(screen_x, screen_y) # move pen to the x and y locaion and
maze.stamp() # stamp a copy of the turtle on the screen
walls.append((screen_x, screen_y)) # add coordinate to walls list
if character == " " or character == "e":
path.append((screen_x, screen_y)) # add " " and e to path list
if character == "e":
green.color("purple")
green.goto(screen_x, screen_y) # send green sprite to screen location
end_x, end_y = screen_x,screen_y # assign end locations variables to end_x and end_y
green.stamp()
green.color("green")
if character == "s":
start_x, start_y = screen_x, screen_y # assign start locations variables to start_x and start_y
red.goto(screen_x, screen_y)
def endProgram():
wn.exitonclick()
sys.exit()
def search(x,y):
frontier.append((x, y))
solution[x,y] = x,y
while len(frontier) > 0: # exit while loop when frontier queue equals zero
time.sleep(0)
x, y = frontier.popleft() # pop next entry in the frontier queue an assign to x and y location
if(x - 24, y) in path and (x - 24, y) not in visited: # check the cell on the left
cell = (x - 24, y)
solution[cell] = x, y # backtracking routine [cell] is the previous cell. x, y is the current cell
#blue.goto(cell) # identify frontier cells
#blue.stamp()
frontier.append(cell) # add cell to frontier list
visited.add((x-24, y)) # add cell to visited list
if (x, y - 24) in path and (x, y - 24) not in visited: # check the cell down
cell = (x, y - 24)
solution[cell] = x, y
#blue.goto(cell)
#blue.stamp()
frontier.append(cell)
visited.add((x, y - 24))
print(solution)
if(x + 24, y) in path and (x + 24, y) not in visited: # check the cell on the right
cell = (x + 24, y)
solution[cell] = x, y
#blue.goto(cell)
#blue.stamp()
frontier.append(cell)
visited.add((x +24, y))
if(x, y + 24) in path and (x, y + 24) not in visited: # check the cell up
cell = (x, y + 24)
solution[cell] = x, y
#blue.goto(cell)
#blue.stamp()
frontier.append(cell)
visited.add((x, y + 24))
green.goto(x,y)
green.stamp()
def backRoute(x, y):
yellow.goto(x, y)
yellow.stamp()
while (x, y) != (start_x, start_y): # stop loop when current cells == start cell
yellow.goto(solution[x, y]) # move the yellow sprite to the key value of solution ()
yellow.stamp()
x, y = solution[x, y] # "key value" now becomes the new key
# set up classes
maze = Maze()
red = Red()
blue = Blue()
green = Green()
yellow = Yellow()
# setup lists
walls = []
path = []
visited = set()
frontier = deque()
solution = {} # solution dictionary
# main program starts here ####
setup_maze(grid)
search(start_x,start_y)
backRoute(end_x, end_y)
wn.exitonclick()
| [
"46024570+rahuldbhadange@users.noreply.github.com"
] | 46024570+rahuldbhadange@users.noreply.github.com |
4827d7f523d7fa49654a251a821cafa30ee7bb77 | 820acfd783cc9752c51e2899b87994bb3044a895 | /tests/commands/source/test_xml.py | 930330c424cae5be04b493044dbc4ede77ddc33e | [] | no_license | trimailov/spinta | ea495a9d4ebfbd8c13c644691824edf967c90091 | c242ea8013a048a5cfd2bfc4dd8687d37bfc3ef7 | refs/heads/master | 2020-05-03T21:56:25.040555 | 2019-04-09T11:19:43 | 2019-04-09T11:19:43 | 178,834,328 | 0 | 0 | null | 2019-04-01T09:54:27 | 2019-04-01T09:54:27 | null | UTF-8 | Python | false | false | 1,009 | py | import operator
import pathlib
from responses import GET
from spinta.utils.itertools import consume
def test_xml(store, responses):
responses.add(
GET, 'http://example.com/data.xml',
status=200, content_type='application/xml; charset=utf-8',
body=(pathlib.Path(__file__).parents[2] / 'data/data.xml').read_bytes(),
stream=True,
)
assert consume(store.pull('xml')) == 8
assert sorted(store.getall('tenure', {'source': 'xml'}), key=operator.itemgetter('id'))[:2] == [
{
'type': 'tenure/:source/xml',
'id': '11a0764da48b674ce0c09982e7c43002b510d5b5',
'title': '1996–2000 metų kadencija',
'since': '1996-11-25',
'until': '2000-10-18',
},
{
'type': 'tenure/:source/xml',
'id': '1cc7ac9d26603972f6c471a284ff37b9868854d9',
'title': '2016–2020 metų kadencija',
'since': '2016-11-14',
'until': '',
},
]
| [
"sirexas@gmail.com"
] | sirexas@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.