blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2611064c93404e81e4be7e68a57d13a328ad1024 | 948d3b8c03e2fecc4f852cd8b4120e1b3378bfaf | /API/PYTHON/django/blogTest/blogTest/settings.py | e3f303742bd651ae7b04c05744ae3c35b65c17f0 | [] | no_license | ezhuo/ezhuo.github.io | e370abb4bfbbfcc5750a5f9fafa2b995bb1d7d48 | 977f3ecdd5dee4eb0f10a42572aaecb335145313 | refs/heads/master | 2021-05-05T20:13:35.446537 | 2019-01-26T08:39:26 | 2019-01-26T08:39:26 | 115,300,126 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | """
Django settings for blogTest project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wua53do$*nz_0wof1gyk(1)=^+$9*_puhmz#s!e54*hpddki#('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'news'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blogTest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blogTest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django-blog',
'USER': 'admin',
'PASSWORD': 'dxinfo*dxinfo',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"hi371@qq.com"
] | hi371@qq.com |
9f38297ffcb415afd27671f80d18b3c3ccc487db | cb57a9ea4622b94207d12ea90eab9dd5b13e9e29 | /lc/python/1768_merge_strings_alternately.py | 32222174bc34d1567b034641491b8b2e157d8c7a | [] | no_license | boknowswiki/mytraning | b59585e1e255a7a47c2b28bf2e591aef4af2f09a | 5e2f6ceacf5dec8260ce87e9a5f4e28e86ceba7a | refs/heads/master | 2023-08-16T03:28:51.881848 | 2023-08-10T04:28:54 | 2023-08-10T04:28:54 | 124,834,433 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | # string and two pointers
# time O(max(m,n))
# space O(1)
class Solution:
def mergeAlternately(self, word1: str, word2: str) -> str:
ret = []
n = len(word1)
m = len(word2)
if n == 0:
return word2
if m == 0:
return word1
i, j = 0, 0
idx = 0
while i < n and j < m:
if idx % 2 == 0:
ret.append(word1[i])
i += 1
else:
ret.append(word2[j])
j += 1
idx += 1
if i == n:
ret.extend(list(word2[j:]))
if j == m:
ret.extend(list(word1[i:]))
return "".join(ret)
class Solution(object):
def mergeAlternately(self, word1, word2):
m = len(word1)
n = len(word2)
i = 0
j = 0
result = []
while i < m or j < n:
if i < m:
result += word1[i]
i += 1
if j < n:
result += word2[j]
j += 1
return "".join(result)
class Solution(object):
def mergeAlternately(self, word1, word2):
result = []
n = max(len(word1), len(word2))
for i in range(n):
if i < len(word1):
result += word1[i]
if i < len(word2):
result += word2[i]
return "".join(result)
| [
"noreply@github.com"
] | noreply@github.com |
4018b0516499f17330c42c31ae7da61a6d32fc32 | 32ecfb8792c3ddbf44263e69d5ab4432f4072a8c | /rootfs_wifi/root/lwm2m.py | 5aafd5aa2fae6cbc7a6b05c66835afa4de422ca6 | [] | no_license | scw-92/151_wifi_rootfs | 314ac7f22fa06e3d348143b26025178870c19f6f | 7575fb33d6623cd2fbee56366671a710770f7db6 | refs/heads/master | 2020-04-17T07:24:20.660142 | 2019-01-18T08:16:06 | 2019-01-18T08:16:06 | 166,367,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,238 | py | # -*- coding: utf-8 -*-
import serial
import os
from time import sleep
class IotLwm2m(object):
"""IotLwm2m 使用方法 """
def __init__(self, name="aplex",serial_name = "/dev/ttyUSB0" ):
self.name = name #公司的名字
self.Power_on_init_list = ["at+miplver?","AT+MIPLCREATE","AT+MIPLADDOBJ=0,3303,2,11,6,1","AT+MIPLADDOBJ=0,3306,1,1,5,0","AT+MIPLOPEN=0,3600,30"] #
self.ack_read_list = [] #记录着OneNet平台下发的read请求的报文编号
self.ack_write_list = [] #记录着OneNet平台下发的read请求的报文编号
self.ack_execture_list = [] #记录着OneNet平台下发的read请求的报文编号
self.ack_look_list = [] #记录着OneNet平台下发的read请求的报文编号
self.serial_name = serial_name
self.serial = ""
def __str__(self):
return "%s 公司的提供的iot通过lwm2m协议接入Onenet的方法" % (self.name, )
def power_iot(self):
#os.system('echo 19 > /sys/class/gpio/export')
#os.system('echo out > /sys/class/gpio/gpio19/direction')
os.system('echo 1 > /sys/class/gpio/gpio19/value')
os.system('sleep 1')
os.system('echo 0 > /sys/class/gpio/gpio19/value')
sleep (2)
os.system('echo 1 > /sys/class/gpio/gpio19/value')
os.system('sleep 1')
os.system('echo 0 > /sys/class/gpio/gpio19/value')
sleep(8)
def setup_serial(self,speed = 9600,readtimeout = 1):
self.serial = serial.Serial(self.serial_name, speed,timeout = readtimeout)
if self.serial.isOpen():
print("open success")
else:
print("open failed")
def auto_connect(self):
for list in self.Power_on_init_list:
cmd_iot = list + "\r\n"
self.serial.write(cmd_iot.encode())
data = self.serial.read_all().decode()
print (data)
sleep (1)
def ack_iot(self): #iot终端向onenet平台的回复信息
sleep(0.5)
data = self.serial.read_all().decode()
send_data = ""
#print("recv:"+data)
#ack_data[1]表示当前来自云平台的报文id
if "+MIPLOBSERVE:" in data: #询问有没有这个实例
ack_data = data.split(',')
#print(ack_data)
send_data = "AT+MIPLOBSERVERSP=0,%s,1\r\n" % (ack_data[1],)
self.serial.write(send_data.encode())
sleep(0.5)
elif "+MIPLDISCOVER:" in data: #询问类型的成员
ack_data = data.split(',')
#'+MIPLDISCOVER: 0',
#'61245',
#'3303\r\n'
ack_data[2] = ack_data[2][0:4]
#print(ack_data)
#在这里根据对象类型的文档将对象类型的结构提前定义好,这里以3303对象类型为例
if ack_data[2] == "3303":
send_data = 'AT+MIPLDISCOVERRSP=0,%s,1,34,"5700;5701;5601;5602;5603;5604;5605"\r\n' % (ack_data[1],)
elif ack_data[2] == "3306":
send_data = 'AT+MIPLDISCOVERRSP=0,%s,1,24,"5850;5851;5852;5853;5750"\r\n' % (ack_data[1],)
#print(send_data)
self.serial.write(send_data.encode())
sleep(0.5)
elif "+MIPLREAD:" in data: #通知读取结果
print("recv:"+data)
ack_data = data.split(',')
#recv:+MIPLREAD: 0,
#4932,
#3303,
#0,
#5700
print(ack_data)
ack_data[4] = ack_data[4][0:4]
print (ack_data[2],ack_data[4])
if ack_data[2] == "3303" :
if ack_data[4] == "5700" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,4,4,20.123,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5701" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,1,5,aplex,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5601" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,4,4,20.135,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5602" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,4,4,80.123,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5603" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,4,4,44.55,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5604" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,4,4,55.66,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
elif ack_data[4] == "5605" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,2,3,zwd,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
if ack_data[2] == "3306" :
if ack_data[4] == "5853" :
send_data = 'AT+MIPLREADRSP=0,%s,1,%s,%s,%s,1,5,aplex,0,0\r\n' % (ack_data[1],ack_data[2],ack_data[3],ack_data[4])
self.serial.write(send_data.encode())
sleep(0.5)
elif "+MIPLWRITERSP:" in data: #通知写入的消息结果
print("recv:"+data)
ack_data = data.split(',')
send_data = 'AT+MIPLREADRSP=0,%s,1,3303,0,5700,4,4,20.123,0,0' % (ack_data[1],)
print(send_data)
self.serial.write(send_data.encode())
elif "+MIPLEXECUTERSP:" in data: #通知执行操作果
print("recv:"+data)
ack_data = data.split(',')
send_data = 'AT+MIPLREADRSP=0,%s,1,3303,0,5700,4,4,20.123,0,0' % (ack_data[1],)
print(send_data)
self.serial.write(send_data.encode())
elif "+MIPLOBSERVERSP:" in data: #通知观测指令是否有效
print("recv:"+data)
ack_data = data.split(',')
send_data = 'AT+MIPLREADRSP=0,%s,1,3303,0,5700,4,4,20.123,0,0' % (ack_data[1],)
print(send_data)
self.serial.write(send_data.encode())
else:
print(data)
#上报云平台对象的内部结构
'''
data = serial.read_all().decode()
print(observe_list[0])
send_data = "AT+MIPLNOTIFY=0,%s,3303,0,5700,4,2,34,0,0,0\r\n" % (observe_list[0],)
print(send_data)
serial.write(send_data.encode())
data = serial.read_all().decode()
'''
if __name__ == '__main__':
iot_lwm2m = IotLwm2m()
iot_lwm2m.power_iot()
iot_lwm2m.setup_serial()
iot_lwm2m.auto_connect()
while True:
iot_lwm2m.ack_iot() | [
"1142344150@qq.com"
] | 1142344150@qq.com |
754f3df17792c7911d0f110efed7a7832bb5de48 | f4b2d9a0de1f7a26a8fd5afe25446e62dfa0fdb5 | /Python/base_algorithm/base_sum.py | b3db43265b69011967ccd5ef53c5613268a1b43e | [] | no_license | Alexanderklau/LeetCode | e675425cca0b4e2e6f94d8c1ce6df92bbec32ac7 | 6090fa602ab29aef40d41661e473058eaaec490d | refs/heads/master | 2021-06-23T17:41:53.309882 | 2020-12-01T14:36:00 | 2020-12-01T14:36:00 | 148,267,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | # coding: utf-8
__author__ = "lau.wenbo"
"""
高斯解法
"""
def sum_of_n(n):
the_sum = 0
for i in range(1, n+1):
the_sum = the_sum + i
return the_sum
print(sum_of_n(100)) | [
"429095816@qq.com"
] | 429095816@qq.com |
a5e90c758d9db85ca4fb26d6193b20b07ffc150c | b75c24fe09dfcf2ab544f4209e282c6bd43b0a23 | /salalql/salalql/schema.py | 744ee165d584c34b787666cbc4beef802bb45bde | [] | no_license | Majdi-evet/GraphQL | 8786f5bd00f74f68ac5d7ab083dae0594eaff40f | 936c18bde356238be3094dc8fc0d0e2e018cb657 | refs/heads/master | 2021-06-16T23:16:08.418710 | 2019-09-10T12:22:39 | 2019-09-10T12:22:39 | 207,552,320 | 0 | 0 | null | 2021-06-09T18:25:07 | 2019-09-10T12:23:18 | Python | UTF-8 | Python | false | false | 273 | py | import graphene
import salgql.schema
class Query(salgql.schema.Query, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query)
class Mutation(salgql.schema.Mutation, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query, mutation=Mutation) | [
"majdi.mohammad.git@gmail.com"
] | majdi.mohammad.git@gmail.com |
a671c11a8cb33a56a991823ae0f4142848d9c74a | 003718246d9bf247b76242ce419adf430f9da3f6 | /VMtranslator/CodeWriter.py | 43152f7c3f48137d04f2e145fee0d9b1a968a199 | [] | no_license | talashaked/Nand | 029a3ecca29df2e2aeb49b62bca5fc84f09288d7 | 1b7d37deb024939e27be86617b942f55169ec743 | refs/heads/main | 2023-03-26T06:30:10.265729 | 2021-03-24T10:45:03 | 2021-03-24T10:45:03 | 351,038,856 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,751 | py | import os
class CodeWriter:
def __init__(self, ofileStr):
self.ofile = open(ofileStr,'w')
self.ArithmeticCommand = ['add', 'sub', 'neg', 'eq', 'gt', 'lt', 'and', 'or', 'not']
self.count =0 ## for the labels in the arithmetic functions
self.countCall = 0
self.curFuncName = ""
def setFileName(self, fileName):
"""
writes the current filename in the output file
:param fileName:
:return:
"""
self.ofile.write("//"+fileName+"\n")
self.cur_fileName = os.path.basename(fileName).split(".")[0]
def writeArithmetic(self, s):
"""
writes the matching arithmetic command in the output
:param s: the arithmetic command given
:return:
"""
count = self.count
if s == self.ArithmeticCommand[0]:
self.ofile.write("@SP\nA=M\nA=A-1\nD=M\nA=A-1\nM=D+M\n@SP\nM=M-1\n")
elif s == self.ArithmeticCommand[1]:
self.ofile.write("@SP\nA=M\nA=A-1\nD=M\nA=A-1\nM=M-D\n@SP\nM=M-1\n")
elif s==self.ArithmeticCommand[2]:
self.ofile.write("@SP\nA=M\nA=A-1\nM=-M\n")
elif s==self.ArithmeticCommand[3]:
self.ofile.write("@SP\nA=M-1\nA=A-1\nD=M\n@FIRSTNONNEG"+str(count)+"\nD;JGE\n@FIRSTNEG"+str(count)+"\n0;JMP\n(FIRSTNONNEG"+str(count)+")\n"
"@SP\nA=M-1\nD=M\n@SAMESIGN"+str(count)+"\nD;JGE\n@SECONDNEGFIRSTNONNEG"+str(count)+"\n0;JMP\n(FIRSTNEG"+str(count)+")\n@SP\n"
"A=M-1\nD=M\n@SECONDNONNEGFIRSTNEG"+str(count)+"\nD;JGE\n@SAMESIGN"+str(count)+"\n0;JMP\n(SAMESIGN"+str(count)+")\n@SP\nA=M-1\n"
"D=M\nA=A-1\nD=M-D\n@TEMP\nM=-1\n@FINISH"+str(count)+"\nD;JEQ\n@TEMP\nM=0\n@FINISH"+str(count)+"\n0;JMP\n"
"(SECONDNEGFIRSTNONNEG"+str(count)+")\n@TEMP\nM=0\n@FINISH"+str(count)+"\n0;JMP\n(SECONDNONNEGFIRSTNEG"+str(count)+")\n@TEMP\nM=0\n"
"@FINISH"+str(count)+"\n0;JMP\n(FINISH"+str(count)+")\n@TEMP\nD=M\n@SP\nA=M\nA=A-1\nA=A-1\nM=D\n@SP\nM=M-1\n")
elif s==self.ArithmeticCommand[4]:
self.ofile.write("@SP\nA=M-1\nA=A-1\nD=M\n@FIRSTNONNEG"+str(count)+"\nD;JGE\n@FIRSTNEG"+str(count)+"\n0;JMP\n(FIRSTNONNEG"+str(count)+")\n"
"@SP\nA=M-1\nD=M\n@SAMESIGN"+str(count)+"\nD;JGE\n@SECONDNEGFIRSTNONNEG"+str(count)+"\n0;JMP\n(FIRSTNEG"+str(count)+")\n@SP\n"
"A=M-1\nD=M\n@SECONDNONNEGFIRSTNEG"+str(count)+"\nD;JGE\n@SAMESIGN"+str(count)+"\n0;JMP\n(SAMESIGN"+str(count)+")\n@SP\nA=M-1\n"
"D=M\nA=A-1\nD=M-D\n@TEMP\nM=-1\n@FINISH"+str(count)+"\nD;JGT\n@TEMP\nM=0\n@FINISH"+str(count)+"\n0;JMP\n"
"(SECONDNEGFIRSTNONNEG"+str(count)+")\n@TEMP\nM=-1\n@FINISH"+str(count)+"\n0;JMP\n(SECONDNONNEGFIRSTNEG"+str(count)+")\n@TEMP\nM=0\n"
"@FINISH"+str(count)+"\n0;JMP\n(FINISH"+str(count)+")\n@TEMP\nD=M\n@SP\nA=M\nA=A-1\nA=A-1\nM=D\n@SP\nM=M-1\n")
elif s == self.ArithmeticCommand[5]:
self.ofile.write("@SP\nA=M-1\nA=A-1\nD=M\n@FIRSTNONNEG"+str(count)+"\nD;JGE\n@FIRSTNEG"+str(count)+"\n0;JMP\n(FIRSTNONNEG"+str(count)+")\n"
"@SP\nA=M-1\nD=M\n@SAMESIGN"+str(count)+"\nD;JGE\n@SECONDNEGFIRSTNONNEG"+str(count)+"\n0;JMP\n(FIRSTNEG"+str(count)+")\n@SP\n"
"A=M-1\nD=M\n@SECONDNONNEGFIRSTNEG"+str(count)+"\nD;JGE\n@SAMESIGN"+str(count)+"\n0;JMP\n(SAMESIGN"+str(count)+")\n@SP\nA=M-1\n"
"D=M\nA=A-1\nD=M-D\n@TEMP\nM=-1\n@FINISH"+str(count)+"\nD;JLT\n@TEMP\nM=0\n@FINISH"+str(count)+"\n0;JMP\n"
"(SECONDNEGFIRSTNONNEG"+str(count)+")\n@TEMP\nM=0\n@FINISH"+str(count)+"\n0;JMP\n(SECONDNONNEGFIRSTNEG"+str(count)+")\n@TEMP\nM=-1\n"
"@FINISH"+str(count)+"\n0;JMP\n(FINISH"+str(count)+")\n@TEMP\nD=M\n@SP\nA=M\nA=A-1\nA=A-1\nM=D\n@SP\nM=M-1\n")
elif s==self.ArithmeticCommand[6]:
self.ofile.write("@SP\nA=M\nA=A-1\nD=M\nA=A-1\nM=D&M\n@SP\nM=M-1\n")
elif s ==self.ArithmeticCommand[7]:
self.ofile.write("@SP\nA=M\nA=A-1\nD=M\nA=A-1\nM=D|M\n@SP\nM=M-1\n")
elif s==self.ArithmeticCommand[8]:
self.ofile.write("@SP\nA=M\nA=A-1\nM=!M\n")
self.count+=1
def WritePushPop(self,command, segment, index):
"""
if the commmand is push or pop, this function is called and writes the code
:param command: push/pop
:param segment: where to or from where
:param index: which index of the segment
:return:
"""
if command == 'C_PUSH':
self.push(segment,index)
elif command == 'C_POP' and segment != 'const':
self.pop(segment, index)
def pop(self,segment, index):
"""
writes the push command to the output file
:param segment: where to or from where
:param index: which index of the segment
:return:
"""
if segment =='static':
self.ofile.write("@SP\nA=M-1\nD=M\n@" + str(self.cur_fileName)+"."+str(index)+"\nM=D\n@SP\nM=M-1\n")
elif segment == 'local':
self.ofile.write("@"+str(index)+"\nD=A\n@LCL\nD=M+D\n@TMP\nM=D\n@SP\nA=M-1\nD=M\n@TMP\nA=M\nM=D\n@SP\nM=M-1\n")
elif segment == 'argument':
self.ofile.write("@"+str(index)+"\nD=A\n@ARG\nD=M+D\n@TMP\nM=D\n@SP\nA=M-1\nD=M\n@TMP\nA=M\nM=D\n@SP\nM=M-1\n")
elif segment == 'this':
self.ofile.write("@"+str(index)+"\nD=A\n@THIS\nD=M+D\n@TMP\nM=D\n@SP\nA=M-1\nD=M\n@TMP\nA=M\nM=D\n@SP\nM=M-1\n")
elif segment == 'that':
self.ofile.write("@"+str(index)+"\nD=A\n@THAT\nD=M+D\n@TMP\nM=D\n@SP\nA=M-1\nD=M\n@TMP\nA=M\nM=D\n@SP\nM=M-1\n")
elif segment == 'temp':
mem = int(index) + 5
self.ofile.write("@SP\nA=M-1\nD=M\n@+" + str(mem) + "\nM=D\n@SP\nM=M-1\n")
elif segment == 'pointer':
mem = int(index) + 3
self.ofile.write("@SP\nA=M-1\nD=M\n@+" + str(mem) + "\nM=D\n@SP\nM=M-1\n")
def push(self,segment, index):
"""
writes the push command to the output file
:param segment: where to or from where
:param index: which index of the segment
:return:
"""
if segment == 'static':
self.ofile.write("@" + str(self.cur_fileName)+"."+str(index)+"\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'constant':
self.ofile.write("@" + str(index) + "\nD=A\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'local':
self.ofile.write("@" + str(index) + "\nD=A\n@LCL\nA=M+D\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'argument':
self.ofile.write("@" + str(index) + "\nD=A\n@ARG\nA=M+D\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'this':
self.ofile.write("@" + str(index) + "\nD=A\n@THIS\nA=M+D\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'that':
self.ofile.write("@" + str(index) + "\nD=A\n@THAT\nA=M+D\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'temp':
mem = int(index) + 5
self.ofile.write("@" + str(mem) + "\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
elif segment == 'pointer':
mem = int(index) + 3
self.ofile.write("@" + str(mem) + "\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
def writeGoto(self, label):
"""
writes the goto command
:param label: the lebel to jump to
:return:
"""
self.ofile.write("@" +self.curFuncName+"$"+label + "\n0;JMP\n")
def writeIf(self,label):
"""
writes the if command
:param label: the label to jump to
:return:
"""
self.ofile.write("@SP\nM=M-1\nA=M\nD=M\n@" +self.curFuncName+"$"+label + "\nD;JNE\n")
def writeLabel(self, label):
"""
writes the label command
:param label: the label itself
:return:
"""
self.ofile.write("("+self.curFuncName+"$"+label +")\n")
def writeInit(self):
"""
writes the bootstrap code
:return:
"""
self.ofile.write("@256\nD=A\n@SP\nM=D\n")
self.writeCall("Sys.init","0")
def writeCall(self, functionName, numArgs):
"""
writes the call command to the output file
:param functionName: the func name
:param numArgs: number of args the function expects to get
:return:
"""
self.ofile.write("@return$"+functionName+"."+str(self.countCall)+"\nD=A\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
self.ofile.write("@LCL\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
self.ofile.write("@ARG\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
self.ofile.write("@THIS\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
self.ofile.write("@THAT\nD=M\n@SP\nA=M\nM=D\n@SP\nM=M+1\n")
i = int(numArgs)+5
self.ofile.write("@"+str(i)+"\nD=A\n@SP\nD=M-D\n@ARG\nM=D\n")
self.ofile.write("@SP\nD=M\n@LCL\nM=D\n")
self.ofile.write("@"+functionName+"\n0;JMP\n")
self.ofile.write("(return$"+functionName+"."+str(self.countCall)+")\n")
self.countCall += 1
def writeReturn(self):
"""
writes the commands that should be written while 'return' is invoked
:return:
"""
self.ofile.write("@LCL\nD=M\n@FRAME\nM=D\n")
self.ofile.write("@5\nD=A\n@FRAME\nD=M-D\nA=D\nD=M\n@RET\nM=D\n")
self.ofile.write("@SP\nM=M-1\n@SP\nA=M\nD=M\n@ARG\nA=M\nM=D\n")
self.ofile.write("@ARG\nD=M+1\n@SP\nM=D\n")
self.ofile.write("@1\nD=A\n@FRAME\nD=M-D\nA=D\nD=M\n@THAT\nM=D\n")
self.ofile.write("@2\nD=A\n@FRAME\nD=M-D\nA=D\nD=M\n@THIS\nM=D\n")
self.ofile.write("@3\nD=A\n@FRAME\nD=M-D\nA=D\nD=M\n@ARG\nM=D\n")
self.ofile.write("@4\nD=A\n@FRAME\nD=M-D\nA=D\nD=M\n@LCL\nM=D\n")
self.ofile.write("@RET\nA=M\n0;JMP\n")
def writeFunction(self, f, k):
"""
writes the function command when a function label is shown
:param f: the function name
:param k: number of parameters the function expects to get
:return:
"""
self.ofile.write("(" + f+ ")\n")
self.curFuncName=f
for i in range(int(k)):
self.push('constant', 0)
def close(self):
"""
closes the main output file
:return:
"""
self.ofile.close()
| [
"noreply@github.com"
] | noreply@github.com |
d9363243e94bbffc5dd60c4a6cb2b671408379da | 0b8f70df4468a24e3ab18b642d47772fbb0d5f10 | /Lista1/l1q26.py | cc6a05e918d9f3b9b91a5ed429402ebbe1e44592 | [] | no_license | lucasebs/EstruturaDeDados2017.1 | e2ab55e6dfd86d7da8b196e2bd1e22f08c2af737 | 9a8f8a0cd2a881db2705e6484efd7837d0f3e947 | refs/heads/master | 2020-12-13T12:53:28.453845 | 2017-10-02T15:53:11 | 2017-10-02T15:53:11 | 95,600,142 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from l1q25 import Pessoa
pessoas = [None]*2
for i in range(0, len(pessoas)):
pNome = raw_input("Primeiro Nome: ")
sNome = raw_input("Sobrenome: ")
cpf = int(input("CPF: "))
anoNasc = int(input("Ano de Nascimento: "))
pessoaTemp = Pessoa()
pessoaTemp.primeiroNome = pNome
pessoaTemp.sobrenome = sNome
pessoaTemp.cpf = cpf
pessoaTemp.anoNascimento = anoNasc
pessoas[i] = pessoaTemp
for i in range(0, len(pessoas)):
print(' ---- ' + str(i+1) + ' Pessoa' + ' ---- ')
print(pessoas[i])
| [
"noreply@github.com"
] | noreply@github.com |
89bfad9927cab9ec96b3795aa8887564a390caf1 | 6234d711a6352c694bb69946ff673e4829ab6916 | /feelings/groups/views/company.py | 91f38b17afdc2b90f2c6a890c1149c612963a773 | [
"MIT"
] | permissive | treehouse/livestream-django-feelings | c816beb4557d52d5aafb5f11a40f5e6a0c0f6ba5 | a246e456bb28f736cfb670486a1534e2d18efc78 | refs/heads/master | 2021-01-13T12:51:04.730505 | 2019-02-21T15:25:38 | 2019-02-21T15:25:38 | 78,469,589 | 32 | 24 | null | 2017-02-23T22:10:10 | 2017-01-09T21:14:02 | Python | UTF-8 | Python | false | false | 3,596 | py | from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import get_object_or_404
from django.urls import reverse, reverse_lazy
from django.views import generic
from braces.views import SetHeadlineMixin
from .. import forms
from .. import models
class Create(LoginRequiredMixin, SetHeadlineMixin, generic.CreateView):
form_class = forms.CompanyForm
headline = 'Create Company'
success_url = reverse_lazy('users:dashboard')
template_name = 'companies/form.html'
def form_valid(self, form):
form.instance.created_by = self.request.user
response = super().form_valid(form)
self.object.members.add(self.request.user)
return response
class Update(LoginRequiredMixin, SetHeadlineMixin, generic.UpdateView):
form_class = forms.CompanyForm
template_name = 'companies/form.html'
def get_queryset(self):
return self.request.user.companies.all()
def get_headline(self):
return f'Edit {self.object.name}'
def get_success_url(self):
return reverse('groups:companies:detail', kwargs={
'slug': self.object.slug})
class Detail(LoginRequiredMixin, generic.FormView):
form_class = forms.CompanyInviteForm
template_name = 'companies/detail.html'
def get_success_url(self):
self.get_object()
return reverse('groups:companies:detail', kwargs={
'slug': self.object.slug})
def get_queryset(self):
return self.request.user.companies.all()
def get_object(self):
self.object = self.request.user.companies.get(
slug=self.kwargs.get('slug')
)
return self.object
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['object'] = self.get_object()
return context
def form_valid(self, form):
response = super().form_valid(form)
models.CompanyInvite.objects.create(
from_user=self.request.user,
to_user=form.invitee,
company=self.get_object()
)
return response
class Leave(LoginRequiredMixin, SetHeadlineMixin, generic.FormView):
form_class = forms.LeaveForm
template_name = 'companies/form.html'
success_url = reverse_lazy('users:dashboard')
def get_object(self):
try:
self.object = self.request.user.companies.filter(
slug=self.kwargs.get('slug'),
).exclude(created_by=self.request.user).get()
except models.Company.DoesNotExist:
raise Http404
def get_headline(self):
self.get_object()
return f'Leave {self.object}?'
def form_valid(self, form):
self.get_object()
self.object.members.remove(self.request.user)
return super().form_valid(form)
class Invites(LoginRequiredMixin, generic.ListView):
model = models.CompanyInvite
template_name = 'companies/invites.html'
def get_queryset(self):
return self.request.user.companyinvite_received.filter(status=0)
class InviteResponse(LoginRequiredMixin, generic.RedirectView):
url = reverse_lazy('groups:companies:invites')
def get(self, request, *args, **kwargs):
invite = get_object_or_404(
models.CompanyInvite,
to_user=request.user,
uuid=kwargs.get('code'),
status=0
)
if kwargs.get('response') == 'accept':
invite.status = 1
else:
invite.status = 2
invite.save()
return super().get(request, *args, **kwargs)
| [
"kenneth@gigantuan.net"
] | kenneth@gigantuan.net |
e750c6d345b925cbc4af78c7184243b8a8c6fdb0 | e881401a3c7b258b491eb5cb4f1f0ea8ba7e9f84 | /Part 2 - Regression/Section 6 - Polynomial Regression/Regresion polinomica.py | da2751c306a568535caec7f17178db44fa893344 | [] | no_license | kronenflex/Machine-Learning-Personal-Projects | 77a0b54e9329e2b6b4296f2375a1ec9687106167 | 281bbf8f772839250b2cd5756f750d15bf5a78f9 | refs/heads/master | 2022-11-26T04:55:19.163276 | 2020-08-04T02:12:16 | 2020-08-04T02:12:16 | 279,182,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 20:41:13 2020
@author: DiegoIgnacioPavezOla
"""
#Regresion polinomica
import pandas as pd
import numpy as np
import matplotlib as plt
#importar el data set
dataset = pd.read_csv('Position_Salaries.csv')
# elegir las variables
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2:3].values
# Dividir el dataset no se realiza por la poca cantidad de data
# Visualizar los resultados de entrenamientos
import matplotlib.pyplot as plt1
plt1.scatter(X,y)
#titulo
plt1.title("Lugares de trabajo")
plt1.xlabel("Posicion")
plt1.ylabel("Sueldo $")
plt1.show()
#ajustar la regresion lineal con el dataset
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X,y)
#Ajustar la regresion polinomica con el dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 4)
X_poly = poly_reg.fit_transform(X)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, y)
#Visualizacion de los modelos linear
plt.scatter(X, y, color = "red")
plt.plot(X, lin_reg.predict(X), color = "blue")
plt.title("Modelo de regresion lineal")
plt.xlabel("Posicion del empleado")
plt.ylabel("Sueldo en $")
plt.show
#Visualizacion de los modelos polinomicos
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(X, y, color = "red")
plt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)), color = "blue")
plt.title("Modelo de regresion polinomica")
plt.xlabel("Posicion del empleado")
plt.ylabel("Sueldo en $")
plt.show
#Prediccion de nuestros modelos
lin_reg.predict([[6.5]])
lin_reg_2.predict(poly_reg.fit_transform([[6.5]]))
| [
"diego.pavezo@usach.cl"
] | diego.pavezo@usach.cl |
a374eddfa244d01cfba90dd3490a58a45e1a0b6a | efef2e4202b5f4355647c11e180cdf91779b2026 | /tests/testScriptSingleTimeseries.py | 7d92ad259810d3bf5ed5ce7deb60c31ba7bb19ab | [] | no_license | adityan619/tslib | 78eb86f0b0bb1d88bfb3d16b50d21a91488e2113 | 693f18305cad76cced370c5f7ccd5983dd922ab3 | refs/heads/master | 2020-04-19T10:05:48.867301 | 2018-10-10T02:38:12 | 2018-10-10T02:38:12 | 168,130,123 | 1 | 0 | null | 2019-01-29T09:46:04 | 2019-01-29T09:46:01 | null | UTF-8 | Python | false | false | 8,144 | py | #############################################################
#
# Single-Dimensional Time Series Imputation and Forecasting
#
# You need to ensure that this script is called from
# the tslib/ parent directory or tslib/tests/ directory:
#
# 1. python tests/testScriptSingleTimeseries.py
# 2. python testScriptSingleTimeseries.py
#
#############################################################
import sys, os
sys.path.append("../..")
sys.path.append("..")
sys.path.append(os.getcwd())
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import copy
from tslib.src.data import generateHarmonics as gH
from tslib.src.data import generateTrend as gT
import tslib.src.data.generateARMA as gA
from tslib.src.models.tsSVDModel import SVDModel
from tslib.src.models.tsALSModel import ALSModel
import tslib.src.tsUtils as tsUtils
def armaDataTest(timeSteps):
arLags = [0.4, 0.3, 0.2]
maLags = [0.5, 0.1]
startingArray = np.zeros(np.max([len(arLags), len(maLags)])) # start with all 0's
noiseMean = 0.0
noiseSD = 1.0
(observedArray, meanArray, errorArray) = gA.generate(arLags, maLags, startingArray, timeSteps, noiseMean, noiseSD)
return (observedArray, meanArray)
def trendDataTest(timeSteps):
dampening = 2.0*float(1.0/timeSteps)
power = 0.35
displacement = -2.5
f1 = gT.linearTrendFn
data = gT.generate(f1, power=power, displacement=displacement, timeSteps=timeSteps)
f2 = gT.logTrendFn
data += gT.generate(f2, dampening=dampening, displacement=displacement, timeSteps=timeSteps)
f3 = gT.negExpTrendFn
t3 = gT.generate(f3, dampening=dampening, displacement=displacement, timeSteps=timeSteps)
#plt.plot(t2)
#plt.show()
return data
def harmonicDataTest(timeSteps):
sineCoeffs = [-2.0, 3.0]
sinePeriods = [26.0, 30.0]
cosineCoeffs = [-2.5]
cosinePeriods = [16.0]
data = gH.generate(sineCoeffs, sinePeriods, cosineCoeffs, cosinePeriods, timeSteps)
#plt.plot(data)
#plt.show()
return data
# test for a single time series imputation and forecasting
def testSingleTS():
print("------------------- Test # 1 (Single TS). ------------------------")
p = 0.7
N = 50
M = 400
timeSteps = N*M
# train/test split
trainProp = 0.9
M1 = int(trainProp * M)
M2 = M - M1
trainPoints = N*M1
testPoints = N*M2
print("Generating data...")
harmonicsTS = harmonicDataTest(timeSteps)
trendTS = trendDataTest(timeSteps)
(armaTS, armaMeanTS) = armaDataTest(timeSteps)
meanTS = harmonicsTS + trendTS + armaMeanTS
combinedTS = harmonicsTS + trendTS + armaTS
#normalize the values to all lie within [-1, 1] -- helps with RMSE comparisons
# can use the tsUtils.unnormalize() function to convert everything back to the original range at the end, if needed
max1 = np.nanmax(combinedTS)
min1 = np.nanmin(combinedTS)
max2 = np.nanmax(meanTS)
min2 = np.nanmin(meanTS)
max = np.max([max1, max2])
min = np.min([min1, min2])
combinedTS = tsUtils.normalize(combinedTS, max, min)
meanTS = tsUtils.normalize(meanTS, max, min)
# produce timestamps
timestamps = np.arange('2017-09-10 20:30:00', timeSteps, dtype='datetime64[1m]') # arbitrary start date
# split the data
trainDataMaster = combinedTS[0:trainPoints] # need this as the true realized values for comparisons later
meanTrainData = meanTS[0:trainPoints] # this is only needed for various statistical comparisons later
# randomly hide training data: choose between randomly hiding entries or randomly hiding consecutive entries
(trainData, pObservation) = tsUtils.randomlyHideValues(copy.deepcopy(trainDataMaster), p)
# now further hide consecutive entries for a very small fraction of entries in the eventual training matrix
(trainData, pObservation) = tsUtils.randomlyHideConsecutiveEntries(copy.deepcopy(trainData), 0.9, int(M1 * 0.25), M1)
# interpolating Nans with linear interpolation
#trainData = tsUtils.nanInterpolateHelper(trainData)
# test data and hidden truth
testData = combinedTS[-1*testPoints: ]
meanTestData = meanTS[-1*testPoints: ] # this is only needed for various statistical comparisons
# time stamps
trainTimestamps = timestamps[0:trainPoints]
testTimestamps = timestamps[-1*testPoints: ]
# once we have interpolated, pObservation should be set back to 1.0
pObservation = 1.0
# create pandas df
key1 = 't1'
trainMasterDF = pd.DataFrame(index=trainTimestamps, data={key1: trainDataMaster}) # needed for reference later
trainDF = pd.DataFrame(index=trainTimestamps, data={key1: trainData})
meanTrainDF = pd.DataFrame(index=trainTimestamps, data={key1: meanTrainData})
testDF = pd.DataFrame(index=testTimestamps, data={key1: testData})
meanTestDF = pd.DataFrame(index=testTimestamps, data={key1: meanTestData})
# train the model
print("Training the model (imputing)...")
print('SVD')
nbrSingValuesToKeep = 5
mod = SVDModel(key1, nbrSingValuesToKeep, N, M1, probObservation=pObservation, svdMethod='numpy', otherSeriesKeysArray=[], includePastDataOnly=True)
mod.fit(trainDF)
imputedDf = mod.denoisedDF()
print(" RMSE (training imputation vs mean) = %f" %tsUtils.rmse(meanTrainDF[key1].values, imputedDf[key1].values))
print(" RMSE (training imputation vs obs) = %f" %tsUtils.rmse(trainMasterDF[key1].values, imputedDf[key1].values))
print('ALS')
# uncomment below to run the ALS algorithm ; comment out the above line
mod = ALSModel(key1, nbrSingValuesToKeep, N, M1, probObservation=pObservation, otherSeriesKeysArray=[], includePastDataOnly=True)
mod.fit(trainDF)
# imputed + denoised data
imputedDf = mod.denoisedDF()
print(" RMSE (training imputation vs mean) = %f" %tsUtils.rmse(meanTrainDF[key1].values, imputedDf[key1].values))
print(" RMSE (training imputation vs obs) = %f" %tsUtils.rmse(trainMasterDF[key1].values, imputedDf[key1].values))
print("Forecasting (#points = %d)..." %len(testDF))
# test data is used for point-predictions
forecastArray = []
for i in range(0, len(testDF)):
pastPoints = np.zeros(N-1) # need an N-1 length vector of past point
j = 0
if (i < N - 1): # the first prediction uses the end of the training data
while (j < N - 1 - i):
pastPoints[j] = trainMasterDF[key1].values[len(trainDF) - (N - 1 - i) + j]
j += 1
if (j < N - 1): # use the new test data
pastPoints[j:] = testDF[key1].values[i - (N - 1) + j:i]
keyToSeriesDFNew = pd.DataFrame(data={key1: pastPoints})
prediction = mod.predict(pd.DataFrame(data={}), keyToSeriesDFNew, bypassChecks=False)
forecastArray.append(prediction)
print(" RMSE (prediction vs mean) = %f" %tsUtils.rmse(meanTestDF[key1].values, forecastArray))
print(" RMSE (prediction vs obs) = %f" %tsUtils.rmse(testDF[key1].values, forecastArray))
print("Plotting...")
plt.plot(np.concatenate((trainMasterDF[key1].values, testDF[key1].values), axis=0), color='gray', label='Observed')
plt.plot(np.concatenate((meanTrainDF[key1].values, meanTestDF[key1].values), axis=0), color='red', label='True Means')
plt.plot(np.concatenate((imputedDf[key1].values, forecastArray), axis=0), color='blue', label='Forecasts')
plt.axvline(x=len(trainDF), linewidth=1, color='black', label='Training End')
legend = plt.legend(loc='upper left', shadow=True)
plt.title('Single Time Series (ARMA + Periodic + Trend) - $p = %.2f$' %p)
plt.show()
def main():
print("*******************************************************")
print("*******************************************************")
print("********** Running the Testing Scripts. ***************")
testSingleTS()
print("********** Testing Scripts Done. **********************")
print("*******************************************************")
print("*******************************************************")
if __name__ == "__main__":
main()
| [
"jehangiramjad@gmail.com"
] | jehangiramjad@gmail.com |
234615d0dfa6ec1b4bb50bbc470a76d507001e80 | 58be8fc8996b98b624fb9784527b2dc588d4587c | /pybamm/models/submodels/active_material/stress_driven_active_material.py | 61fbe41ec0392883bec8138e4988b5b026f60706 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | gwhite09/PyBaMM | b9f7b6b06bb37b6819e306356f5b8e90df8affff | 033ad6384582a3e5d29ad48eeaa7fe92b98e2a29 | refs/heads/main | 2023-08-22T19:49:26.112089 | 2021-09-17T17:02:34 | 2021-09-17T17:02:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,401 | py | #
# Class for varying active material volume fraction, driven by stress
#
import pybamm
from .base_active_material import BaseModel
class StressDriven(BaseModel):
"""Submodel for varying active material volume fraction, driven by stress, from
[1]_ and [2]_.
Parameters
----------
param : parameter class
The parameters to use for this submodel
domain : str
The domain of the model either 'Negative' or 'Positive'
options : dict
Additional options to pass to the model
x_average : bool
Whether to use x-averaged variables (SPM, SPMe, etc) or full variables (DFN)
**Extends:** :class:`pybamm.active_material.BaseModel`
References
----------
.. [1] Ai, W., Kraft, L., Sturm, J., Jossen, A., & Wu, B. (2019). Electrochemical
Thermal-Mechanical Modelling of Stress Inhomogeneity in Lithium-Ion Pouch
Cells. Journal of The Electrochemical Society, 167(1), 013512.
.. [2] Reniers, J. M., Mulder, G., & Howey, D. A. (2019). Review and performance
comparison of mechanical-chemical degradation models for lithium-ion
batteries. Journal of The Electrochemical Society, 166(14), A3189.
"""
def __init__(self, param, domain, options, x_average):
super().__init__(param, domain, options=options)
pybamm.citations.register("Reniers2019")
self.x_average = x_average
def get_fundamental_variables(self):
domain = self.domain.lower() + " electrode"
if self.x_average is True:
eps_solid_xav = pybamm.Variable(
"X-averaged " + domain + " active material volume fraction",
domain="current collector",
)
eps_solid = pybamm.PrimaryBroadcast(eps_solid_xav, domain)
else:
eps_solid = pybamm.Variable(
self.domain + " electrode active material volume fraction",
domain=domain,
auxiliary_domains={"secondary": "current collector"},
)
variables = self._get_standard_active_material_variables(eps_solid)
return variables
def get_coupled_variables(self, variables):
# obtain the rate of loss of active materials (LAM) by stress
# This is loss of active material model by mechanical effects
if self.x_average is True:
stress_t_surf = variables[
"X-averaged "
+ self.domain.lower()
+ " particle surface tangential stress"
]
stress_r_surf = variables[
"X-averaged " + self.domain.lower() + " particle surface radial stress"
]
else:
stress_t_surf = variables[
self.domain + " particle surface tangential stress"
]
stress_r_surf = variables[self.domain + " particle surface radial stress"]
if self.domain == "Negative":
beta_LAM = self.param.beta_LAM_n
stress_critical = self.param.stress_critical_n
m_LAM = self.param.m_LAM_n
else:
beta_LAM = self.param.beta_LAM_p
stress_critical = self.param.stress_critical_p
m_LAM = self.param.m_LAM_p
stress_h_surf = (stress_r_surf + 2 * stress_t_surf) / 3
# compressive stress make no contribution
stress_h_surf *= stress_h_surf > 0
# assuming the minimum hydrostatic stress is zero for full cycles
stress_h_surf_min = stress_h_surf * 0
j_stress_LAM = (
-(beta_LAM / self.param.t0_cr)
* ((stress_h_surf - stress_h_surf_min) / stress_critical) ** m_LAM
)
deps_solid_dt = j_stress_LAM
variables.update(
self._get_standard_active_material_change_variables(deps_solid_dt)
)
return variables
def set_rhs(self, variables):
Domain = self.domain + " electrode"
if self.x_average is True:
eps_solid = variables[
"X-averaged " + Domain.lower() + " active material volume fraction"
]
deps_solid_dt = variables[
"X-averaged "
+ Domain.lower()
+ " active material volume fraction change"
]
else:
eps_solid = variables[Domain + " active material volume fraction"]
deps_solid_dt = variables[
Domain + " active material volume fraction change"
]
self.rhs = {eps_solid: deps_solid_dt}
def set_initial_conditions(self, variables):
if self.domain == "Negative":
x_n = pybamm.standard_spatial_vars.x_n
eps_solid_init = self.param.epsilon_s_n(x_n)
elif self.domain == "Positive":
x_p = pybamm.standard_spatial_vars.x_p
eps_solid_init = self.param.epsilon_s_p(x_p)
if self.x_average is True:
eps_solid_xav = variables[
"X-averaged "
+ self.domain.lower()
+ " electrode active material volume fraction"
]
self.initial_conditions = {eps_solid_xav: pybamm.x_average(eps_solid_init)}
else:
eps_solid = variables[
self.domain + " electrode active material volume fraction"
]
self.initial_conditions = {eps_solid: eps_solid_init}
| [
"valentinsulzer@hotmail.com"
] | valentinsulzer@hotmail.com |
741609ccba647bdbc3c39638902316fcde154de0 | b069ee3fdc74a37c515935644ec36c133f2342c6 | /Baekjoon_Bronze/2562.py | 5cfe082af7f1c988778d693d5f5dc6abd0fcee04 | [] | no_license | gparkkii/Baekjoon_Challenge | 097a06df95d4b4c76058d8bfc48c71679d392dad | bd09081b21c60f825d52b795674cf3350bd91954 | refs/heads/master | 2023-01-12T15:59:42.364616 | 2020-11-11T09:23:04 | 2020-11-11T09:23:04 | 303,383,379 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | #9개의 서로 다른 자연수가 주어질 때, 이들 중 최댓값을 찾고 그 최댓값이 몇 번째 수인지를 구하는 프로그램을 작성하시오.
#예를 들어, 서로 다른 9개의 자연수 "3, 29, 38, 12, 57, 74, 40, 85, 61" 이 주어지면, 이들 중 최댓값은 85이고, 이 값은 8번째 수이다.
# 입력 : 첫째 줄부터 아홉 번째 줄까지 한 줄에 하나의 자연수가 주어진다. 주어지는 자연수는 100 보다 작다.
# 출력 : 첫째 줄에 최댓값을 출력하고, 둘째 줄에 최댓값이 몇 번째 수인지를 출력한다.
my_list = []
for i in range(9):
numbers = int(input())
my_list.append(numbers)
max_num = max(my_list)
max_index = my_list.index(max_num)
print(max_num)
print(max_index + 1) | [
"jyp933@gmail.com"
] | jyp933@gmail.com |
05ccf79be44a26f4b837408790aa157ee894c2d0 | 3a68dab676a753545f8fcfd83c6a684d2b52f237 | /models/cnn.py | 23e1887a1703c0deff7eb612dccd0cbe16fbada0 | [] | no_license | dongxy1014/part2-pytorch | ea517bc894bdf4e7dbdf4dce9225b41740193b2f | a3460f6a1978d6c5b1f9ab23b5ff8085ba4edb7b | refs/heads/main | 2023-08-24T00:50:31.513105 | 2021-09-28T07:59:06 | 2021-09-28T07:59:06 | 411,155,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,618 | py | """
Vanilla CNN model. (c) 2021 Georgia Tech
Copyright 2021, Georgia Institute of Technology (Georgia Tech)
Atlanta, Georgia 30332
All Rights Reserved
Template code for CS 7643 Deep Learning
Georgia Tech asserts copyright ownership of this template and all derivative
works, including solutions to the projects assigned in this course. Students
and other users of this template code are advised not to share it with others
or to make it available on publicly viewable websites including repositories
such as Github, Bitbucket, and Gitlab. This copyright statement should
not be removed or edited.
Sharing solutions with current or future students of CS 7643 Deep Learning is
prohibited and subject to being investigated as a GT honor code violation.
-----do not edit anything above this line---
"""
import torch
import torch.nn as nn
class VanillaCNN(nn.Module):
def __init__(self):
super(VanillaCNN, self).__init__()
#############################################################################
# : Initialize the Vanilla CNN #
# Conv: 7x7 kernel, stride 1 and padding 0 #
# Max Pooling: 2x2 kernel, stride 2 #
#############################################################################
self.cnn_layers = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=7, stride=1, padding=0),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.linear_layers = nn.Sequential(
nn.Linear(5408, 10)
)
#############################################################################
# END OF YOUR CODE #
#############################################################################
def forward(self, x):
outs = None
#############################################################################
# : Implement forward pass of the network #
#############################################################################
outs = self.cnn_layers(x)
outs = torch.flatten(outs, 1)
outs = self.linear_layers(outs)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return outs
| [
"xdong77@gatech.edu"
] | xdong77@gatech.edu |
754cb3332d03cd2aaadbafc442d09e0ca759ffc1 | 2dbaf907dc625f79a70cae0e1e2b5b7d575c82fe | /BookTicketApp/migrations/0001_initial.py | df03751b7d27267032a023e757721e6980e86044 | [] | no_license | rominbusa/myTMS | 5277ac20a716587ab7c551858ad597abc60aa176 | f7e2d9c47dcdd92bbc841254a23dbd1681011fcd | refs/heads/master | 2020-03-08T15:43:50.497400 | 2018-04-06T09:35:12 | 2018-04-06T09:35:12 | 128,219,697 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | # Generated by Django 2.0.2 on 2018-04-06 03:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('SignupApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='feedback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('feedback', models.TextField(max_length=200)),
('tmsuser', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='SignupApp.TMSUser')),
],
),
migrations.CreateModel(
name='PackageDetails',
fields=[
('pname', models.CharField(max_length=20, primary_key=True, serialize=False)),
('amount', models.CharField(max_length=5)),
],
),
migrations.CreateModel(
name='TMSBooking',
fields=[
('booking_id', models.CharField(max_length=6, primary_key=True, serialize=False)),
('source', models.CharField(max_length=20)),
('destination', models.CharField(max_length=20)),
('departure_date', models.DateField()),
('no_of_person', models.PositiveIntegerField(default=0)),
('amount', models.PositiveIntegerField()),
('package', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='BookTicketApp.PackageDetails')),
('tmsuser', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='SignupApp.TMSUser')),
],
),
]
| [
"rominbusa0@gmail.com"
] | rominbusa0@gmail.com |
412a015cd3a9f1493754a63bfb89444017dbccf6 | 588bd797932ebb4745e394955ee4ccdf35cda7ed | /beans/__init__.py | 64cd5eba99cc4673b35e0be13da3d3dc0c4c6f3c | [] | no_license | Bobo1553/ais_crude_oil_extract | fa6f2f762fca26e208f2a69bd03801c7a8724804 | cc78d19c8754d35080a2bed737cea59fc405ee15 | refs/heads/main | 2023-04-27T02:02:08.980771 | 2021-05-09T09:37:41 | 2021-05-09T09:37:41 | 349,096,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | # -*- encoding: utf -*-
"""
Create on 2020/10/25 16:01
@author: Xiao Yijia
""" | [
"646851868@qq.com"
] | 646851868@qq.com |
9b6642bb5a5146592391c89f3af0816da42b6a2a | c5e6e7aacb62fb236e421f77dd1bedaa27d7a555 | /test/test_nodes_node_internal_ip_address.py | 4514364bdbcbe521ab1c38e06cad49e5d0fdf383 | [
"MIT"
] | permissive | tenortim/isilon_sdk_python | 4a8ff70f46d31d797d201023ea3150e36faf4f3f | 8cc40a294c04feddf6b4eddb1d710d20dc6b4c87 | refs/heads/v8.2.1 | 2021-05-21T23:50:42.669164 | 2020-01-07T10:31:43 | 2020-01-07T10:31:43 | 252,866,640 | 0 | 0 | null | 2020-04-03T23:44:27 | 2020-04-03T23:44:18 | null | UTF-8 | Python | false | false | 996 | py | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_1
from isi_sdk_8_2_1.models.nodes_node_internal_ip_address import NodesNodeInternalIpAddress # noqa: E501
from isi_sdk_8_2_1.rest import ApiException
class TestNodesNodeInternalIpAddress(unittest.TestCase):
"""NodesNodeInternalIpAddress unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNodesNodeInternalIpAddress(self):
"""Test NodesNodeInternalIpAddress"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_1.models.nodes_node_internal_ip_address.NodesNodeInternalIpAddress() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"nalinkanwar@users.noreply.github.com"
] | nalinkanwar@users.noreply.github.com |
57869410b2cbb065c033061ca98fbdb7a1a23ac8 | e0034258aa9d279edf2a1d61a9cb7734f3b785d8 | /data_gui.py | eb26a8f11eaec80e24ea1fb20eb13ed70b8a4aef | [] | no_license | bernduwiesner/GenLotteryQt5 | b9aaaaa6f5bbdc2ad500e53178bd5e0fab1b96ff | 35a25b9b53f1543f5d66f6d05710965186934ece | refs/heads/master | 2020-06-24T16:23:49.233158 | 2019-07-26T12:44:09 | 2019-07-26T12:44:09 | 199,014,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,611 | py | from PyQt5.QtWidgets import (QDesktopWidget,
QDialog,
QLabel,
QLayout,
QVBoxLayout,
QPushButton,
)
from PyQt5.QtCore import Qt
from common import ResultsData
class ResultsWindow(QDialog):
"""The generator results window
"""
def __init__(self, parent, results):
QDialog.__init__(self, parent)
self.base_layout = QVBoxLayout(self)
self.base_layout.sizeConstraint = QLayout.SetDefaultConstraint
self.base_layout.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.init_ui(results)
def init_ui(self, results: ResultsData):
"""Create the controls on the frame
:param results: the data to display
:return:
"""
def unwrap(some: str) -> str:
"""Remove characters "[". "]" and "'" from a string
:param some: str the string to process
:return:
"""
return some.translate({ord(i): None for i in "[]'"})
row: int = 0
action: str = "Generated " if results.generated else "Stored "
label: str = action + results.lottery_type_name + " Lottery numbers"
type_lbl = QLabel(label)
self.base_layout.addWidget(type_lbl, row)
row += 1
for line in range(results.get_data_length()):
data_item = unwrap(results.get_data_item(line))
label = QLabel(f"Line {line + 1:02d}: " + data_item)
self.base_layout.addWidget(label, line + 1)
row += results.get_data_length()
ok_btn = QPushButton("OK", self)
ok_btn.resize(ok_btn.sizeHint())
ok_btn.clicked.connect(self.close_window)
self.base_layout.addWidget(ok_btn, row)
# MainWindow
self.setLayout(self.base_layout)
win_x, win_y, win_width, win_height = (0, 0, 0, 0)
self.setGeometry(win_x, win_y, win_width, win_height)
self.setWindowTitle("Generated Results")
self.centre()
self.show()
def centre(self) -> None:
"""Centre the window on the screen
:return: None
"""
geometry = self.frameGeometry()
centre = QDesktopWidget().availableGeometry().center()
geometry.moveCenter(centre)
self.move(geometry.topLeft())
def close_window(self, event) -> None:
"""Process the options chosen and perform the action chosen
:param event: not used
:return: None
"""
self.close()
| [
"bernduwiesner@yahoo.co.uk"
] | bernduwiesner@yahoo.co.uk |
d2244420117df6de6ae154e421a909ba1ed92d7f | 2132a3c68624e545858e343bb11768e69cf84e28 | /battlesystem2.py | 22dec3fc23dea9d40b3e6ecba18a16001836ef26 | [] | no_license | tovoispovo/Text-based-RPG-combat-system | cbbf6678ade4e8663a993616fc8d739c647bb2e6 | 5614312727c7faa69240b7b7fb79770ece0b8a6f | refs/heads/master | 2022-01-02T08:41:08.299705 | 2018-02-20T05:10:51 | 2018-02-20T05:10:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,709 | py | import random
class Dice():
def d10(self):
number = random.randint(0, 9)
return number
def d20(self):
number = random.randint(1, 20)
return number
class Player:
def __init__(self, name, health, attack, heal, maxhealth, healed, enemy):
self.name = name
self.health = 100
self.attack = 15
self.heal = 50
self.maxhealth = 100
self.healed = False
self.enemy = False
def heal(self):
if self.health < player.maxhealth:
self.health = player.health + player.heal
if player.health > player.maxhealth:
self.health = player.maxhealth
self.healed = True
return player.health
name = input("What is your charaters name? ")
roll = Dice()
player = Player(
name,
100,
15,
50,
100,
False,
False
)
goblinone = Player(
'Trogd0r',
25,
20,
10,
25,
False,
True
)
goblinone.health = 25
goblinone.attack = 20
goblinone.maxhealth = 25
goblintwo = Player(
'Termy Nator',
50,
5,
10,
50,
False,
True
)
goblintwo.health = 50
goblintwo.attack = 5
goblintwo.maxhealth = 50
goblinthree = Player(
'St3v3 J0bzzz',
75,
10,
10,
75,
False,
True
)
goblinthree.health = 75
goblinthree.attack = 10
goblinthree.maxhealth = 75
goblinfour = Player(
'Cassy Nova',
100,
8,
50,
100,
False,
True
)
goblinfour.health = 100
goblinfour.attack = 8
goblinfour.maxhealth = 100
#Greets player
print("Nice to meet you, " + player.name + ".")
print("4 wild goblins appear to take the sanctity of your holes away from you!!!")
print("IT'S TIME TO RUMBLE, MOTHERCLUCKER!")
combatactive = True
healcriteria = True
#Start of the combat loop
while combatactive == True:
print("Current HP: " + str(player.health) + "/" + str(player.maxhealth))
while player.healed == False and healcriteria == True:
heal = input("Would yee like to heal? Caution: may only be used once during combat. y/n :")
if heal == "y":
health = player.health
player.heal
player.healed = True
print(player.name + " healed for " + str(player.heal) + "HP for a total of " + str(player.health + player.heal) + "HP.")
elif heal == "n":
player.healed = False
break
else:
print("Invalid syntax. y/n only.")
healcriteria = True
#Prints monsters to attack
if goblinone.health > 0:
print("(1) for " + goblinone.name + " " +str(goblinone.health) + "/" + str(goblinone.maxhealth) + "HP")
else:
print(goblinone.name + " is dead.")
if goblintwo.health > 0:
print("(2) for " + goblintwo.name + " " +str(goblintwo.health) + "/" + str(goblintwo.maxhealth) + "HP")
else:
print(goblintwo.name + " is dead.")
if goblinthree.health > 0:
print("(3) for " + goblinthree.name + " " +str(goblinthree.health) + "/" + str(goblinthree.maxhealth) + "HP")
else:
print(goblinthree.name + " is dead.")
if goblinfour.health > 0:
print("(4) for " + goblinfour.name + " " +str(goblinfour.health) + "/" + str(goblinfour.maxhealth) + "HP")
else:
print(goblinfour.name + " is dead.")
#Player attack sequence
attack = input("Which foul beast shall yee attack? ")
attackable = ['1','2','3','4']
attackcomplete = False
while attackcomplete == False:
if attack == '1' and goblinone.health > 0:
smash = player.attack + roll.d20()
goblinone.health = goblinone.health - smash
print("Youve attacked " + goblinone.name + " for " + str(smash) + "HP.")
attackcomplete = True
break
elif attack == '1' and goblinone.health <= 0:
print("You've attacked a corpse!")
attackcomplete = True
break
if attack == '2' and goblintwo.health > 0:
smash = player.attack + roll.d20()
goblintwo.health = goblintwo.health - smash
print("Youve attacked " + goblintwo.name + " for " + str(smash) + "HP.")
attackcomplete = True
break
elif attack == '2' and goblintwo.health <= 0:
print("You've attacked a corpse!")
attackcomplete = True
break
if attack == '3' and goblinthree.health > 0:
smash = player.attack + roll.d20()
goblinthree.health = goblinthree.health - smash
print("Youve attacked " + goblinthree.name + " for " + str(smash) + "HP.")
attackcomplete = True
break
elif attack == '3' and goblinthree.health <= 0:
print("You've attacked a corpse!")
attackcomplete = True
break
if attack == '4' and goblinfour.health > 0:
smash = player.attack + roll.d20()
goblinfour.health = goblinfour.health - smash
print("Youve attacked " + goblinfour.name + " for " + str(smash) + "HP.")
attackcomplete = True
break
elif attack == '4' and goblinfour.health <= 0:
print("You've attacked a corpse!")
attackcomplete = True
break
if attack not in attackable:
print("invalid syntax. Please type 1, 2, 3 or 4.")
attackcomplete = False
attackcomplete = False
# Goblin attack sequence
if goblinone.health > 0:
print(goblinone.name + " attacks!")
x = roll.d20()
if x > 10:
print(goblinone.name + " hits for " + str(goblinone.attack) + " damage.")
player.health = player.health - goblinone.attack
else:
print(goblinone.name + " misses.")
if goblintwo.health > 0:
print(goblintwo.name + " attacks!")
x = roll.d20()
if x > 10:
print(goblintwo.name + " hits for " + str(goblintwo.attack) + " damage.")
player.health = player.health - goblintwo.attack
else:
print(goblintwo.name + " misses.")
if goblinthree.health > 0:
print(goblinthree.name + " attacks!")
x = roll.d20()
if x > 10:
print(goblinthree.name + " hits for " + str(goblinthree.attack) + " damage.")
player.health = player.health - goblinthree.attack
else:
print(goblinthree.name + " misses.")
if goblinfour.health > 0:
print(goblinfour.name + " attacks!")
x = roll.d20()
if x > 10:
print(goblinfour.name + " hits for " + str(goblinfour.attack) + " damage.")
player.health = player.health - goblinfour.attack
else:
print(goblinfour.name + " misses.")
#Sets combat active to false if all monsters are dead
if goblinone.health <= 0 and goblintwo.health <= 0 and goblinthree.health <= 0 and goblinfour.health <= 0:
combatactive = False
print("YOU BEAT THE GOBLINS, FAM!")
else:
combatactive = True
#Sets combat active to false if player is dead
if player.health <= 0:
print("Oh heck, you freakin died")
combatactive = False
| [
"noreply@github.com"
] | noreply@github.com |
f14308e3fd66781d5cbdd827da378221a727e027 | bccbb5244947574c63992dc812b5ef44519ec161 | /tests/test_command_runner.py | fcb536ca809e16f5103fd66573f5e2e7dd3eeea3 | [] | no_license | hal1932/pysvn | d4fab12dbb07838d947292146ca49e9a31119deb | a579744543765b574655377a2e1ada5be961e8d8 | refs/heads/master | 2020-03-14T06:35:46.835307 | 2018-05-01T16:17:10 | 2018-05-01T16:17:10 | 131,487,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | # coding: utf-8
from __future__ import print_function, unicode_literals
import unittest as ut
import xml.etree.ElementTree as et
from svn.command_runner import CommandRunner
class TestCommandRunner(ut.TestCase):
def setUp(self):
self.__runner = CommandRunner()
self.__runner.current_directory = 'C:/Users/yuta/Desktop/subversion/trunk'
def tearDown(self):
pass
@ut.skip
def test_run(self):
result, out, err = self.__runner.run('info', ['--xml'])
self.assertEqual(result, 0)
self.assertEqual(err, '')
root = et.fromstring(out)
self.assertEqual(root.tag, 'info')
entry = root.find('entry')
self.assertEqual(entry.find('url').text, 'https://svn.apache.org/repos/asf/subversion/trunk')
self.assertEqual(entry.find('wc-info/wcroot-abspath').text, 'C:/Users/yuta/Desktop/subversion/trunk')
if __name__ == '__main__':
ut.main()
| [
"yu.arai.19@gmail.com"
] | yu.arai.19@gmail.com |
34ee73eb008e29b9a16e36a650b80a2252b3d80b | f0579ea5564f6ce72b1c61570f253df907e54f71 | /webspider.py | 8fab6e75cfcbf4c6e0e8cc5a8352b4fbb1630240 | [] | no_license | EatCodeCat/github | 8d88b9ca80eeb309a79a3b31ca05b72fe8c8310f | b8ce8b1e5774d53fe62f905cef53d818d5e0e2ca | refs/heads/master | 2020-05-31T00:54:06.290052 | 2017-05-08T17:26:14 | 2017-05-08T17:26:14 | 7,825,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # coding=utf-8
__author__ = 'think'
import requests
class WebSpider:
def __init__(self, headers):
self.headers = {
'accept-encoding': 'gzip, deflate, sdch, br',
'accept-language': 'zh-CN,zh;q=0.8,en;q=0.6,ja;q=0.4,nb;q=0.2,sk;q=0.2,zh-TW;q=0.2',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)' +
' Chrome/57.0.2987.98 Safari/537.36'
}
self.headers = dict(self.headers, **headers)
def get_text(self, url):
r = requests.get(url, headers=self.headers)
return r.text
def set_headers(self, item):
self.headers = dict(self.headers, **item)
| [
"17387575@qq.com"
] | 17387575@qq.com |
d4def9555dab98c62b9164fd9cc1759780f1f230 | dfe151428bc948f80cb8254fa7b8828b884cb4d1 | /nxGraphAlgorithms/Functions/__pycache__/bool_functions.py | 0fd7c2f7b83c85a6586a6a6408db1387ac38ed39 | [] | no_license | Phi51/nxGraphAlgorithms | a892cda7bdb054880c89ed6fc2ea6ceb56385b02 | eb0f7daa4079d4b9c8b7bcd5da2bc07046dfa76b | refs/heads/master | 2020-05-17T05:06:42.339746 | 2019-05-07T23:38:45 | 2019-05-07T23:38:45 | 183,524,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | from global_properties import V
from local_properties import neighbors
def is_independent (G, S):
for v in S:
N = neighbors(G, v)
if list(set(N) & set(S)) !=[]:
return False
return True
def is_dominating(G, S):
S_complement = list(set(V(G))-set(S))
for v in S_complement:
N = neighbors(G, v)
if list(set(N) & set(S)) == []:
return False
return True
def is_clique (G, S):
for v in S:
N = neighbors(G, v)
if list(set(N) & set(S)) !=[]:
return False
return True
def is_matching(G, m):
for edge1 in m:
v, w = edge1
for edge2 in m:
if edge2 != edge1:
if v in edge1 or w in edge1:
return False
return True | [
"noreply@github.com"
] | noreply@github.com |
e95fae2b71d041eff7090fe472700f65339ffa56 | 3b7474148c07df7f4755106a3d0ada9b2de5efdc | /training/c31_pattern_design/e04_callback.py | b608b4cea7865efac231c64f8fa6e7dd59efcde1 | [] | no_license | juancsosap/pythontraining | 7f67466846138f32d55361d64de81e74a946b484 | 1441d6fc9544042bc404d5c7efffd119fce33aa7 | refs/heads/master | 2021-08-26T05:37:15.851025 | 2021-08-11T22:35:23 | 2021-08-11T22:35:23 | 129,974,006 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | class Provider:
def get(self, path, cbf):
with open(path) as file:
text = file.read()
result = self.analyze(text)
cbf(result)
def word_count(self, text):
split_text = text.lower().split(' ')
words = len(set(split_text))
count = len(split_text)
return (words, count)
def char_count(self, text):
characters = len(set(text))
count = len(text)
return (characters, count)
def analyze(self, text):
word_info = self.word_count(text)
char_info = self.char_count(text)
return (word_info, char_info)
class Requester:
def make(self, path):
p = Provider()
p.get(path, self.done)
def done(self, result):
print(result)
if __name__ == "__main__":
basedir = __file__[:__file__.rfind('/')+1]
r = Requester()
r.make(basedir + 'data.txt') | [
"juan.c.sosa.p@gmail.com"
] | juan.c.sosa.p@gmail.com |
8a14e9d9f3f99b3ada59c746306ae6b9972fac4f | 85a4004de49ad0dce506792f50e4ee842bd3dfbb | /src/main/python/apache/aurora/executor/common/status_checker.py | d491ef909b171e4fee5d9e52deb04d9106e2cea4 | [
"MIT",
"Apache-2.0"
] | permissive | aurora-scheduler/client | 042e5858e8decf7898cf92551a34dadf97d49dc4 | 67644c82a90f1b0eb80c419a25677a7de034749a | refs/heads/master | 2021-01-07T18:14:49.637481 | 2020-05-07T23:55:05 | 2020-05-07T23:55:05 | 241,779,671 | 0 | 2 | Apache-2.0 | 2020-06-16T01:47:03 | 2020-02-20T02:59:47 | Python | UTF-8 | Python | false | false | 5,070 | py | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import abstractmethod, abstractproperty
from mesos.interface.mesos_pb2 import TaskState
from twitter.common import log
from twitter.common.lang import Interface
from twitter.common.metrics import Observable
class StatusResult(object):
"""
Encapsulates a reason for failure and a status value from mesos.interface.mesos_pb2.TaskStatus.
As mesos 0.20.0 uses protobuf 2.5.0, see the EnumTypeWrapper[1] docs for more information.
https://code.google.com/p/protobuf/source/browse/tags/2.5.0/
python/google/protobuf/internal/enum_type_wrapper.py
"""
def __init__(self, reason, status):
self._reason = reason
if status not in TaskState.values():
raise ValueError('Unknown task state: %r' % status)
self._status = status
@property
def reason(self):
return self._reason
@property
def status(self):
return self._status
def __repr__(self):
return '%s(%r, status=%r)' % (
self.__class__.__name__,
self._reason,
TaskState.Name(self._status))
def __eq__(self, other):
if isinstance(other, StatusResult):
return self._status == other._status and self._reason == other._reason
return False
class StatusChecker(Observable, Interface):
"""Interface to pluggable status checkers for the Aurora Executor."""
@abstractproperty
def status(self):
"""Return None under normal operations. Return StatusResult to indicate status proposal."""
def name(self):
"""Return the name of the status checker. By default it is the class name. Subclassable."""
return self.__class__.__name__
def start(self):
"""Invoked once the task has been started."""
pass
def stop(self):
"""Invoked once a non-None status has been reported."""
pass
class StatusCheckerProvider(Interface):
@abstractmethod
def from_assigned_task(self, assigned_task, sandbox):
"""
:param assigned_task:
:type assigned_task: AssignedTask
:param sandbox: Sandbox of the task corresponding to this status check.
:type sandbox: DirectorySandbox
:return: Instance of a HealthChecker.
"""
pass
class Healthy(StatusChecker):
@property
def status(self):
return None
class ChainedStatusChecker(StatusChecker):
def __init__(self, status_checkers):
self._status_checkers = status_checkers
self._status = None
if not all(isinstance(h_i, StatusChecker) for h_i in status_checkers):
raise TypeError('ChainedStatusChecker must take an iterable of StatusCheckers.')
super(ChainedStatusChecker, self).__init__()
@property
def status(self):
"""
Return status that is computed from the statuses of the StatusCheckers. The computed status
is based on the priority given below (in increasing order of priority).
None -> healthy (lowest-priority)
TASK_RUNNING -> healthy and running
TASK_STARTING -> healthy but still in starting
Otherwise -> unhealthy (highest-priority)
"""
if not self._in_terminal_state():
cur_status = None
for status_checker in self._status_checkers:
status_result = status_checker.status
if status_result is not None:
if not isinstance(status_result, StatusResult):
raise TypeError('StatusChecker returned something other than a StatusResult: got %s' %
type(status_result))
if status_result.status == TaskState.Value('TASK_STARTING'):
# TASK_STARTING overrides other statuses
cur_status = status_result
elif status_result.status == TaskState.Value('TASK_RUNNING'):
if cur_status is None or cur_status == TaskState.Value('TASK_RUNNING'):
# TASK_RUNNING needs consensus (None is also included)
cur_status = status_result
else:
# Any other status leads to a terminal state
log.info('%s reported %s', status_checker.__class__.__name__, status_result)
self._status = status_result
return self._status
self._status = cur_status
return self._status
def _in_terminal_state(self):
return (self._status is not None and
self._status.status != TaskState.Value('TASK_RUNNING') and
self._status.status != TaskState.Value('TASK_STARTING'))
def start(self):
for status_checker in self._status_checkers:
status_checker.start()
def stop(self):
for status_checker in self._status_checkers:
status_checker.stop()
| [
"commit@ridv.xyz"
] | commit@ridv.xyz |
c66cffd0f099505fe925881a7201e8e6fdd53225 | e906902a1993bc23034bd4e492d8eda3fe10ef7f | /final/platforms.py | dd4093f850c93be1af6d81ad08995bb99a09ee30 | [] | no_license | bkwmt/PBC109-1RunningGame | 007922d39e10a1037370ce249ce011828408528f | 9e23147772dd62d1267b8e78d77182173614c89a | refs/heads/main | 2023-08-11T08:00:59.554673 | 2021-09-08T12:45:33 | 2021-09-08T12:45:33 | 319,727,708 | 0 | 0 | null | 2020-12-27T17:53:35 | 2020-12-08T18:34:39 | Python | UTF-8 | Python | false | false | 6,201 | py | import random
import pygame as pg
from settings import *
class Ground(pg.sprite.Sprite):
def __init__(self, x, y, w, h):
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((w, h)) # 設定地板在某寬度與高度
self.image.fill(GOLDENROD)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class Hole(pg.sprite.Sprite):
global Direction
def __init__(self):
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((160, GHEIGHT)) # 設定洞的寬度,略低於地板
self.image.fill(DARKSLATEBLUE)
self.rect = self.image.get_rect()
# self.rect.left = 3 * WIDTH + 50
self.rect.left = 4 * WIDTH + 50
self.rect.top = HEIGHT - GHEIGHT
def update(self):
# if Direction == 1:
if self.rect.right > -80:
self.rect.right -= PSPEED
if self.rect.right == -80:
# 當他的最右邊到一個畫面外的位置之後,從另外一邊循環進入
self.rect.left = 2 * WIDTH +50
# if Direction == -1:
# if self.rect.left < WIDTH + 80:
# self.rect.left += PSPEED
# if self.rect.left == WIDTH + 80:
# self.rect.right = -2 * WIDTH - 50
class Holeedge(pg.sprite.Sprite):
# 用來彌補視覺上的誤差(就是還沒有碰到洞卻掉了下去)
global Direction
def __init__(self):
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((260, GHEIGHT))
self.image.fill(DARKSLATEBLUE) # 暫時用黑色,之後再調成一樣。
self.rect = self.image.get_rect()
# self.rect.left = 3 * WIDTH
self.rect.left = 4 * WIDTH
self.rect.top = HEIGHT - GHEIGHT
def update(self):
# if Direction == 1:
if self.rect.right > -30:
self.rect.right -= PSPEED
if self.rect.right == -30:
self.rect.left = 2 * WIDTH
# if Direction == -1:
# if self.rect.left < WIDTH + 30:
# self.rect.left += PSPEED
# if self.rect.left == WIDTH + 30:
# self.rect.right = -2 * WIDTH
class Platform(pg.sprite.Sprite):
def __init__(self, x, y, w, h):
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((w, h)) # 設定平台在某寬度與高度
self.image.fill(CHOCOLATE)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def update(self):
if self.rect.right > -30:
self.rect.right -= PSPEED
class Highplatform1(pg.sprite.Sprite):
def __init__(self):
self.x = 3 * WIDTH
self.y = 170 - random.randint(-15, 20)
self.w = PW * 1.2 + (PW/random.randint(5, 10))
self.h = THICK
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((self.w, self.h))
self.image.fill(ROSYBROWN)
self.rect = self.image.get_rect()
self.rect.x = self.x
self.rect.y = self.y
def update(self):
if self.rect.right > 0:
self.rect.right -= 4 * PSPEED
if self.rect.right <= 0:
self.rect.left = WIDTH
class Highplatform2(pg.sprite.Sprite):
def __init__(self):
self.x = 3.5 * WIDTH
self.y = 170 - random.randint(-5, 5)
self.w = PW * 1.2 + (PW/random.randint(15, 35))
self.h = THICK
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((self.w, self.h))
self.image.fill(OLIVE)
self.rect = self.image.get_rect()
self.rect.x = self.x
self.rect.y = self.y
def update(self):
if self.rect.right > 0:
self.rect.right -= 4 * PSPEED
if self.rect.right <= 0:
self.rect.left = WIDTH
class Midplatform1(pg.sprite.Sprite):
def __init__(self):
self.x = -2 * WIDTH
self.y = HH + random.randint(-25, 5)
self.w = PW * 1.5 + (PW/random.randint(5, 15))
self.h = THICK
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((self.w, self.h))
self.image.fill(TAN)
self.rect = self.image.get_rect()
self.rect.x = self.x
self.rect.y = self.y
def update(self):
if self.rect.left < WIDTH:
self.rect.left += 3 * PSPEED
if self.rect.left >= WIDTH:
self.rect.right = 0
class Midplatform2(pg.sprite.Sprite):
def __init__(self):
self.x = -2.5 * WIDTH
self.y = HH + random.randint(-25, 5)
self.w = PW * 1.5 + (PW/random.randint(5, 15))
self.h = THICK
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((self.w, self.h))
self.image.fill(NAVY)
self.rect = self.image.get_rect()
self.rect.x = self.x
self.rect.y = self.y
def update(self):
if self.rect.left < WIDTH:
self.rect.left += 3 * PSPEED
if self.rect.left >= WIDTH:
self.rect.right = 0
class Lowplatform1(pg.sprite.Sprite):
def __init__(self):
self.x = 1.5 * WIDTH
self.y = HEIGHT - GHEIGHT - random.randint(135, 145)
self.w = HW / 2
self.h = THICK
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((self.w, self.h))
self.image.fill(LIGHTBLUE)
self.rect = self.image.get_rect()
self.rect.x = self.x
self.rect.y = self.y
def update(self):
if self.rect.right > 0:
self.rect.right -= 2 * PSPEED
if self.rect.right <= 0:
self.rect.left = WIDTH
class Lowplatform2(pg.sprite.Sprite):
def __init__(self):
self.x = 2 * WIDTH
self.y = HEIGHT - GHEIGHT - random.randint(135, 145)
self.w = HW / 2
self.h = THICK
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((self.w, self.h))
self.image.fill(INDIGO)
self.rect = self.image.get_rect()
self.rect.x = self.x
self.rect.y = self.y
def update(self):
if self.rect.right > 0:
self.rect.right -= 2 * PSPEED
if self.rect.right <= 0:
self.rect.left = WIDTH
| [
"75352803+bkwmt@users.noreply.github.com"
] | 75352803+bkwmt@users.noreply.github.com |
7f64617c1f9ffa09fcadfbe29ce329539eae983a | 0f074e5adef64fa16e88dc2499e76f08b4c33c02 | /matplotlib/ipython and pylab/03 改变线条颜色和粗细.py | 5536a6d7b75055db76aeaa13f86025196647c11b | [] | no_license | guozhenjiang/Python | 0ac39adaf72df0bfee51795fabcfd959a69b1862 | 44b07bd767f3f2a947331111ab920200ac2412c6 | refs/heads/master | 2021-05-19T16:54:40.725132 | 2020-11-19T16:26:26 | 2020-11-19T16:27:11 | 252,035,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | # 导入 matplotlib 的所有内容(nympy 可以用 np 这个名字来使用)
from pylab import *
# 创建一个 8 * 6 点(point)的图,并设置分辨率为 80
# figure(figsize=(8,6), dpi=80)
figure(figsize=(10,6), dpi=80)
# 创建一个新的 1 * 1 的子图,接下来的图样绘制在其中的第 1 块(也是唯一的一块)
subplot(1,1,1)
X = np.linspace(-np.pi, np.pi, 256,endpoint=True)
C,S = np.cos(X), np.sin(X)
# 绘制余弦曲线,使用蓝色的、连续的、宽度为 1 (像素)的线条
# plot(X, C, color="blue", linewidth=1.0, linestyle="-")
plot(X, C, color="blue", linewidth=2.5, linestyle="-")
# 绘制正弦曲线,使用绿色的、连续的、宽度为 1 (像素)的线条
# plot(X, S, color="green", linewidth=1.0, linestyle="-")
plot(X, S, color="red", linewidth=2.5, linestyle="-")
# 设置横轴的上下限
xlim(-4.0,4.0)
# 设置横轴记号
xticks(np.linspace(-4,4,9,endpoint=True))
# 设置纵轴的上下限
ylim(-1.0,1.0)
# 设置纵轴记号
yticks(np.linspace(-1,1,5,endpoint=True))
# 以分辨率 72 来保存图片
# savefig("exercice_2.png",dpi=72)
# 在屏幕上显示
show() | [
"guo_zhen_jiang@163.com"
] | guo_zhen_jiang@163.com |
ab027e68d8e9f198dc5982b75f821c4069c2f8e4 | 878ad1a51e764be295372d53ee779e52b2d3bc08 | /dataset.py | fa29de7e15234e9f0ff5b70c53364ca19a2a0894 | [] | no_license | thomasopsomer/Plume | cc991ca18e8ef86a36c1a06dcd6eebfb7b565ca7 | 357649a2dc89886ad685981449b2cf02ba4d378e | refs/heads/master | 2021-03-27T06:29:01.880538 | 2017-02-21T14:20:32 | 2017-02-21T14:20:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,234 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
import pandas as pd
import numpy as np
from math import floor
cols = {
"temporal": [
u'precipintensity',
u'precipprobability',
u'temperature',
u'cloudcover',
u'pressure',
u'windbearingcos',
u'windbearingsin',
u'windspeed',
],
"static": [
u'hlres_50',
u'hlres_300',
u'hlres_500',
u'hlres_100',
u'hlres_1000',
#
u'hldres_50',
u'hldres_100',
u'hldres_500',
u'hldres_1000',
#
u'route_100',
u'route_300',
u'route_500',
u'route_1000',
#
u'green_5000',
u'natural_5000',
u'port_5000',
u'industry_1000',
u'roadinvdist',
],
}
# station by zone for train / dev
zone_station_dev = [
(0.0, 16.0),
(1.0, 18.0),
(3.0, 25.0),
(4.0, 23.0),
(5.0, 5.0),
]
zone_station_train = [
(0.0, 17.0),
(0.0, 20.0),
(1.0, 1.0),
(1.0, 22.0),
(2.0, 26.0),
(2.0, 28.0),
(3.0, 6.0),
(3.0, 9.0),
(4.0, 4.0),
(4.0, 10.0),
(5.0, 8.0),
(5.0, 11.0)
]
daytime_0 = 72.0
# split per pollutant
POLLUTANT = ['NO2', 'PM10', 'PM2_5']
ZONES = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
def get_count_by_station_by_zone(df):
""" """
res = df.groupby(["zone_id", "station_id"])[["ID"]] \
.count().to_dict()
return res
def get_zone_station(df):
""" """
# get dico of zone: [station_id,]
zone_station = df.groupby(["zone_id", "station_id"]).groups.keys()
Z = defaultdict(list)
for zone, station in zone_station:
Z[zone].append(station)
return Z
def add_block(df):
""" """
df["block"] = df.pollutant.map(str) + \
df.zone_id.map(lambda x: "-%s" % int(x)) + \
df.station_id.map(lambda x: "-%s" % int(x))
def split_pollutant_dataset(df, pm=False):
""" """
# add block
if "block" not in df:
add_block(df)
# split according to pollutant
NO2_df = df[df.pollutant == "NO2"]
if pm:
PM_df = df[(df.pollutant == "PM10") | (df.pollutant == "PM2_5")]
return NO2_df, PM_df
else:
PM10_df = df[df.pollutant == "PM10"]
PM25_df = df[df.pollutant == "PM2_5"]
return NO2_df, PM10_df, PM25_df
def split_train_dev(df, zone_station_train=None,
zone_station_dev=None,
seed=42):
"""
split the train data set in to a train and dev dataset
make sur that the dev dataset have different station
than the train dataset.
"""
# np.random.seed(seed)
# get the name of the pollutant
poll = df.pollutant.unique()[0]
#
if zone_station_train is None or zone_station_dev is None:
# Z = get_zone_station(df)
# Sample one station for each zone to put in dev set
# other are in the train set
# zone_station_train = []
# zone_station_dev = []
# for k, v in Z.items():
# n_station = len(v)
# i = np.random.randint(n_station)
# zone_station_dev.append((k, v.pop(i)))
# zone_station_train.extend([(k, s) for s in v])
zone_station_train = []
zone_station_dev = []
d = get_poll_zone_station(df)
for zone, stations in d[poll].items():
n_station = len(stations)
i = np.random.randint(n_station)
print i
zone_station_dev.append((zone, stations.pop(i)))
zone_station_train.extend([(zone, s) for s in stations])
# filter df on block column created with split_pollutant_dataset
train_blocks = ["%s-%i-%i" % (poll, z, s) for z, s in zone_station_train]
dev_blocks = ["%s-%i-%i" % (poll, z, s) for z, s in zone_station_dev]
df_train = df[df.block.apply(lambda x: x in train_blocks)]
df_dev = df[df.block.apply(lambda x: x in dev_blocks)]
return df_train, df_dev
def get_poll_zone_station(df):
""" """
poll_zone_station = df.groupby(["pollutant", "zone_id", "station_id"]) \
.groups.keys()
d = defaultdict(lambda : defaultdict(list))
for p, z, s in poll_zone_station:
d[p][z].append(s)
return d
def add_hours_day(df):
""" """
df["hour_of_day"] = df.daytime.map(lambda x: (x - daytime_0) % 24)
# df["day_of_year"] = df.hour_of_day.map(lambda x: x % 24)
def add_day_of_week(df):
""" """
df["day_of_week"] = df.daytime.map(lambda x: ((x - daytime_0) // 24) % 7)
def add_avg_temporal_per_zone(df):
""" """
avg_temp = df.groupby(["zone_id", "daytime"]).mean()[cols["temporal"]]
res = df.merge(avg_temp, left_on=["zone_id", "daytime"], right_index=True,
suffixes=("", "_avg"), copy=False)
return res
def preprocess_dataset(df):
""" """
# add hour of day (0-23)
add_hours_day(df)
add_day_of_week(df)
df.is_calmday = df.is_calmday.map(lambda x: 1 if x else -1)
# add avg temporal value per zone
# df = add_avg_temporal_per_zone(df)
return df
def get_Y(Y_df, X_df):
""" """
# return pd.merge(Y_df, X_df[["ID"]], how='inner').set_index("ID")
return Y_df.loc[Y_df.index.isin(X_df.index)]
if __name__ == '__main__':
""" """
# data path
X_train_path = "/Users/thomasopsomer/data/plume-data/X_train.csv"
X_test_path = "/Users/thomasopsomer/data/plume-data/X_test.csv"
Y_train_path = "/Users/thomasopsomer/data/plume-data/Y_train.csv"
# load all dataset
df = pd.read_csv(X_train_path)
df = preprocess_dataset(df)
# split for each pollutant
NO2_df, PM10_df, PM25_df = split_pollutant_dataset(df)
# split in train / dev for each pollutant
NO2_train = split_train_dev(NO2_df, zone_station_train, zone_station_dev)
NO2_dev = split_train_dev(NO2_df, zone_station_train, zone_station_dev)
PM10_train = split_train_dev(PM10_df, zone_station_train, zone_station_dev)
PM10_dev = split_train_dev(PM10_df, zone_station_train, zone_station_dev)
PM25_train = split_train_dev(PM25_df, zone_station_train, zone_station_dev)
PM25_dev = split_train_dev(PM25_df, zone_station_train, zone_station_dev)
| [
"thomasopsomer.enpc@gmail.com"
] | thomasopsomer.enpc@gmail.com |
6ff8cf46f9afbcf4558f4fc7c0f57921fcc8d9d4 | 68577bb693fe01cddce56da36a43702c6bdedc07 | /Programming/python/threads/events.001.py | a7ef176267b372f3242e604881eb1b4acfb8801b | [] | no_license | ceccopierangiolieugenio/scripts | 480ab9b94c135d47c4d7c916e35df537cfabbed3 | fe0eca7d76733e204c1c702e03b9ccc11ee421fd | refs/heads/master | 2023-03-31T16:57:37.064553 | 2023-03-26T13:21:36 | 2023-03-26T13:21:36 | 99,695,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | # Example from:
# https://www.bogotobogo.com/python/Multithread/python_multithreading_Event_Objects_between_Threads.php
import threading
import time
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s) %(message)s',)
def wait_for_event(e):
logging.debug('wait_for_event starting')
event_is_set = e.wait()
logging.debug('event set: %s', event_is_set)
def wait_for_event_timeout(e, t):
while not e.isSet():
logging.debug('wait_for_event_timeout starting')
event_is_set = e.wait(t)
logging.debug('event set: %s', event_is_set)
if event_is_set:
logging.debug('processing event')
else:
logging.debug('doing other things')
if __name__ == '__main__':
e = threading.Event()
t1 = threading.Thread(name='blocking',
target=wait_for_event,
args=(e,))
t1.start()
t2 = threading.Thread(name='non-blocking',
target=wait_for_event_timeout,
args=(e, 2))
t2.start()
logging.debug('Waiting before calling Event.set()')
time.sleep(3)
e.set()
logging.debug('Event is set') | [
"ceccopierangiolieugenio@googlemail.com"
] | ceccopierangiolieugenio@googlemail.com |
8a698f42edefb1e3b2656c09e16d2aab030f6ad5 | 8ab9184b9cfbfe4a8b8ecbc777b6cfc6df3c95fd | /wrangle_mall.py | 4746ff1ca8e6f37e7c105e9506d4e55a4cb8b2bf | [] | no_license | mariamnaqvi/clustering-exercises | 14265493cae83751abff45a62a1f6f018e3e98f0 | 3dfed7c22be3486606266091d741f249ca1cfdcf | refs/heads/main | 2023-06-06T10:48:59.785524 | 2021-06-21T21:10:33 | 2021-06-21T21:10:33 | 377,687,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,767 | py | import pandas as pd
import numpy as np
import os
from sklearn.preprocessing import MinMaxScaler
import scipy.stats as stats
# use get_db_url function to connect to the codeup db
from env import get_db_url
from sklearn.model_selection import train_test_split
# Acquire Data
def get_mallcustomer_data():
df = pd.read_sql('SELECT * FROM customers;', get_db_url('mall_customers'))
return df.set_index('customer_id')
# Prepare Data
def split_data(df, seed=123):
'''
This function takes in a pandas dataframe and a random seed. It splits the original
data into train, test and split dataframes and returns them.
Test dataset is 20% of the original dataset
Train is 56% (0.7 * 0.8 = .56) of the original dataset
Validate is 24% (0.3 * 0.7 = 0.24) of the original dataset
'''
train, test = train_test_split(df, train_size=0.8, random_state=seed)
train, validate = train_test_split(train, train_size=0.7, random_state=seed)
return train, validate, test
# One hot encoding
def one_hot_enocde(df):
'''
This function takes in a dataframe and one hot encodes the gender column. It also renames the one hot encoded column, concats dummy dataframe to the
original dataframe and drops the original column. It then returns the dataframe.
'''
dummy_df=pd.get_dummies(df['gender'], dummy_na=False,
drop_first=True)
# rename columns that have been one hot encoded
dummy_df = dummy_df.rename(columns={'Male': 'is_male'})
# join dummy df to original df
df = pd.concat([df, dummy_df], axis=1)
# drop encoded column
df = df.drop(['gender'], axis=1)
return df
# Scaling
def scale_data(X_train, X_validate, X_test):
'''
This function takes in the features for train, validate and test splits. It creates a MinMax Scaler and fits that to the train set.
It then transforms the validate and test splits and returns the scaled features for the train, validate and test splits.
'''
# create scaler
scaler = MinMaxScaler()
# Note that we only call .fit with the training data,
# but we use .transform to apply the scaling to all the data splits.
scaler.fit(X_train)
# convert scaled variables to a dataframe
X_train_scaled = pd.DataFrame(scaler.transform(X_train),index=X_train.index,
columns=X_train.columns)
X_validate_scaled = pd.DataFrame(scaler.transform(X_validate), index=X_validate.index,
columns=X_validate.columns)
X_test_scaled = pd.DataFrame(scaler.transform(X_test), index=X_test.index,
columns=X_test.columns)
return X_train_scaled, X_validate_scaled, X_test_scaled
| [
"naqvi.mariam01@gmail.com"
] | naqvi.mariam01@gmail.com |
bc24c290276bfbed509b530f38753792aafca2b5 | 0ce1cb643e73a8421fe4c980b269ef0661132792 | /marksmol.py | 42094978c76c298657c6ab9f57ee570346dbbc70 | [] | no_license | lukakostic/marksmol | e757dab00938acaed5b2f7b29cabfe9e6370bb2b | 34f39a549fd7a7f6b7512de90cf03ebd75a514db | refs/heads/master | 2020-05-17T11:55:19.458363 | 2019-04-29T19:39:34 | 2019-04-29T19:39:34 | 183,698,002 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,540 | py | import sys
import os
import pathlib
def parse(t,path,debug = False):
os.chdir(path)
#Current strings
code = ''
word = ''
line = ''
#State trackers
inQuotes = 0
inComment = 0 #not bool so it can have comments in comments
escapeNext = False
#Indentation/Parent tracking
indentation = 0
tagStack = []
rootIndentation = -1
#Functions/Templates stuff
inFunc = False
funcFirst = True
textNext = False
funcs = []
t = t.replace(' '*4, '\t').replace("\r\n",'\n') #Replace 4 spaces with tabs, replace \r\n with \n
class marksmolFunc:
def __init__(self,name,variables,text):
self.name = name
self.variables = variables
self.text = text
#DEBUG
def dprint(txt):
nonlocal debug
if debug:
print(txt)
#Get second to last tag (pop it)
def getPrevTag():
nonlocal tagStack
tag = tagStack.pop()
if len(tagStack) is not 0:
retTag = tagStack.pop()
tagStack.append(tag)
return retTag
return tag
def clearFn():
nonlocal funcs,inFunc,textNext,funcFirst
funcs.pop()
inFunc = False
textNext = False
funcFirst = True
return
def funcEnd(l):
nonlocal code, word, tagStack, line, indentation, inFunc, funcFirst, funcs, textNext, t
fnInd = -1
if funcs[-1].name == '$include':
textToPaste = pathlib.Path(funcs[-1].variables[0][1:]).read_text().replace('\r\n','\n')
textToPaste = '\t'*indentation + textToPaste.replace('\n','\n'+'\t'*indentation) #indent
t = t[:l+1] + '\n' + textToPaste + t[l+1:]
clearFn()
elif funcs[-1].name == '$includeText':
textToPaste = pathlib.Path(funcs[-1].variables[0][1:]).read_text().replace('\r\n','\n')
textToPaste = '\t'*indentation + '`' + textToPaste.replace('\n','\n'+'\t'*indentation) + '`' #indent
t = t[:l+1] + '\n' + textToPaste + t[l+1:]
clearFn()
elif funcs[-1].name == '$strip':
textToPaste = funcs[-1].variables[0].replace('\r\n','\n')
textToPaste = textToPaste.rstrip('`').lstrip('`').rstrip('\`').lstrip('\`')
textToPaste = '\t'*indentation + textToPaste.replace('\n','\n'+'\t'*indentation) #indent
t = t[:l+1] + '\n' + textToPaste + t[l+1:]
clearFn()
elif funcs[-1].name == '$str':
textToPaste = funcs[-1].variables[0].replace('\r\n','\n')
textToPaste = textToPaste.rstrip('`').lstrip('`').rstrip('\`').lstrip('\`')
textToPaste = '\t'*indentation + textToPaste.replace('\n','\n'+'\t'*indentation) #indent
t = t[:l+1] + '\n' + '`' + textToPaste + '`' + t[l+1:]
clearFn()
else:
for x in range(len(funcs)):
#dprint('search ['+str(x)+'] name: |' + funcs[x].name + '| vs |' + funcs[-1].name + '|')
if funcs[x].name == funcs[-1].name:
fnInd = x
break
dprint('fnInd : ' + str(fnInd))
if fnInd is (len(funcs)-1) or fnInd is -1:
#Is new
dprint('fn is new')
textNext = True
else:
#Do function stuff
dprint('fn is old')
textToPaste = funcs[fnInd].text.replace('\r\n','\n')
textToPaste = '\t'*indentation + textToPaste.replace('\n','\n'+'\t'*indentation) #indent
#dprint('FUNCTION TEXT:\n' + textToPaste)
#replace var names with var values
for i in range(len(funcs[-1].variables)):
#dprint(funcs[fnInd].variables[i] + ' replaced w: ' + funcs[-1].variables[i][1:])
textToPaste = textToPaste.replace(funcs[fnInd].variables[i],funcs[-1].variables[i][1:])
t = t[:l+1] + '\n' + textToPaste + t[l+1:]
#dprint('\n\n\nFUNCTION TEXT REPLACED:\n' + textToPaste + '\n\n')
dprint('\n\nFUNCTION T RESULT:\n' + t + '\n\n')
clearFn()
def endWord():
nonlocal code, word, tagStack, line, indentation, inFunc, funcFirst, funcs, textNext
if word is not '' and word.isspace() is False and indentation is not -2:
if not inFunc:
dprint('nw='+word)
lineEmpty = line.isspace() or line is ''
if lineEmpty:
tagStack.append(word)
dprint(str(indentation)+'tag+'+word)
if not lineEmpty:
line = line.rstrip(' ')
line += ' '
elif not word.startswith('`'):
line += '<'
line += word
else:
dprint('inFunc : ' + word)
if funcFirst:
dprint('funcName')
funcs.append(marksmolFunc(word,[],''))
funcFirst = False
else:
if textNext:
dprint('funcText')
funcs[-1].text = word[1:]
inFunc = False
textNext = False
funcFirst = True
else:
dprint('funcVar')
funcs[-1].variables.append(word)
word = ''
def endLine():
nonlocal code, word, tagStack, indentation, rootIndentation, line, inFunc
if inFunc:
return
if line.isspace() or line is '':
indentation = 0
return
dprint(str(rootIndentation)+'nl'+str(indentation))
ptag = ''
if len(tagStack) > 0:
ptag = tagStack[-1]
dprint('ptag:'+ptag)
if not ptag.startswith('`'):
line += '>'
dprint('>ptag:'+ptag)
if rootIndentation >= indentation:
tag = getPrevTag()
dprint('\tempty:'+tag)
if tag.startswith('`') is False:
code = code.rstrip('\n') + '</'+tag+'>\n' #if you want sameline
#code = code.rstrip('\n') + '\t'*rootIndentation+'</'+empt+'>\n' #if you want newline
while rootIndentation > indentation:
tag = getPrevTag()
dprint('\t\tNOT EMPTY:'+tag)
rootIndentation -= 1
if tag.startswith('`') is False:
code += '\t'*rootIndentation+'</' + tag + '>\n'
if line.startswith('`'):
line = line[1:]
code += '\t' * indentation + line + '\n'
line = ''
if indentation > rootIndentation:
rootIndentation = indentation
indentation = 0
dprint("\n\n Parsing in: " + path)
l = 0
while l < len(t):
#dprint('l:'+t[l])
if escapeNext:
word += t[l]
escapeNext = False
elif inComment > 0:
if t[l] is '}':
inComment -= 1
dprint('}')
elif t[l] is '{':
inComment += 1
dprint('{')
elif inQuotes > 0:
if t[l] is '"' and inQuotes is 1:
word+='"'
endWord()
inQuotes = 0
elif t[l] is "'" and inQuotes is 2:
word+="'"
endWord()
inQuotes = 0
elif t[l] is '`' and inQuotes is 3:
#word+='`'
endWord()
inQuotes = 0
textNext = False
elif t[l] is '\n' and inQuotes is 4:
#word+='`'
#endWord()
#endLine()
#t = t[:l+1] + word + t[l+1:]
endWord()
#word = ''
endLine()
inQuotes = 0
elif t[l] is "\\":
escapeNext = True
else:
word += t[l]
else:
if t[l] is '\t' and not inFunc:
endWord()
indentation += 1
elif t[l] is '\n':
endWord()
endLine()
elif t[l] is '$' and word is '':
inFunc = True
word = '$'
dprint('$')
elif t[l] is ',' and inFunc:
endWord()
elif t[l] is ':' and inFunc:
endWord()
dprint(':')
funcEnd(l)
elif t[l] is ';':
endWord()
preInd = indentation
endLine() #keep indentation
indentation = preInd
elif t[l] is '<':
endWord()
preInd = indentation
endLine() #keep indentation + 1
indentation = preInd+1
elif t[l] is '>':
endWord()
preInd = indentation
endLine() #keep indentation
indentation = preInd
inQuotes = 4
word = '`'
elif t[l] is '=':
endWord()
line += '='
elif t[l] is '#':
endWord()
line = line.rstrip(' ') + ' id='
elif t[l] is '.':
endWord()
line = line.rstrip(' ') + ' class='
elif t[l] is ' ':
endWord()
elif t[l] is '{':
inComment += 1
dprint('{')
elif t[l] is '"':
word='"'
inQuotes = 1
elif t[l] is "'":
word="'"
inQuotes = 2
elif t[l] is '`':
endWord()
word = '`'
inQuotes = 3
elif t[l] is '\\' and inQuotes is not 4:
escapeNext = True
else:
word += t[l]
l += 1
endWord()
endLine()
indentation = 0
tagStack.append('`')
line = '`'
endLine()
return code
def main():
folder = ''
rec = ''
debug = 'y'
for x in range(len(sys.argv)):
if sys.argv[x] == '--folder':
folder = sys.argv[x+1]
if sys.argv[x] == '--rec':
rec = sys.argv[x+1]
if sys.argv[x] == '--debug':
debug = sys.argv[x+1]
if folder == '':
folder = input("Folder/Root:")
if rec == '':
rec = input("Recursive (y/n):")
rec = rec[0].lower()=="y"
debug = debug[0].lower()=="y"
files = []
if rec is False:
files = [os.path.join(folder, f) for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f)) and f.endswith('.ms')]
else:
for (dirpath, dirnames, filenames) in os.walk(folder):
files.extend([os.path.join(dirpath, f) for f in filenames if f.endswith('.ms')])
for f in files:
txt = pathlib.Path(f).read_text()
file = open(os.path.splitext(f)[0]+'.html','w')
file.write(parse(txt,folder,debug))
print(file.name)
file.close()
if __name__ == '__main__':
main() | [
"41348897+lukakostic@users.noreply.github.com"
] | 41348897+lukakostic@users.noreply.github.com |
ee4b744b380f5efdea8e61700fb39461f20c0ec9 | 38aecb10c177a6458df8eebad3d8dc7a631a8c11 | /tv-script-generation/dlnd_tv_script_generation-predict.py | dfec9d139788e12e05cef249d4bb6a11c1cf75a7 | [
"MIT"
] | permissive | laozhuang727/udacity-deep-learning | d81f28c39393ab54ca37b86f378b5f5a523bf338 | f7bee830ecbcbccbef40d5091928380fc314e22b | refs/heads/master | 2021-01-21T16:11:08.314545 | 2017-05-20T11:48:51 | 2017-05-20T11:48:51 | 85,368,112 | 0 | 0 | null | 2017-03-23T02:06:21 | 2017-03-18T01:39:30 | Jupyter Notebook | UTF-8 | Python | false | false | 4,567 | py | # coding: utf-8
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
# ## Implement Generate Functions
# ### Get Tensors
# Get tensors from `loaded_graph` using the function
# [`get_tensor_by_name()`](https://www.tensorflow.org/api_docs/python/tf/Graph#get_tensor_by_name).
# Get the tensors using the following names:
# - "input:0"
# - "initial_state:0"
# - "final_state:0"
# - "probs:0"
#
# Return the tensors in the following tuple `(InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)`
# In[ ]:
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
InputTensor = loaded_graph.get_tensor_by_name("input:0")
InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0")
FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0")
ProbsTensor = loaded_graph.get_tensor_by_name("probs:0")
return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
# ### Choose Word
# Implement the `pick_word()` function to select the next word
# using `probabilities`.
# In[ ]:
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
index = np.argmax(probabilities)
return int_to_vocab[index]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
# ## Generate TV Script
# This will generate the TV script for you. Set `gen_length` to the length of TV script you want to generate.
# In[ ]:
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length - 1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
# # The TV Script is Nonsensical
# It's ok if the TV script doesn't make any sense. We trained on less than a megabyte of text. In order to get good results, you'll have to use a smaller vocabulary or get more data. Luckly there's more data! As we mentioned in the begging of this project, this is a subset of [another dataset](https://www.kaggle.com/wcukierski/the-simpsons-by-the-data). We didn't have you train on all the data, because that would take too long. However, you are free to train your neural network on all the data. After you complete the project, of course.
# # Submitting This Project
# When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_tv_script_generation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
| [
"laozhuang727@hotmail.com"
] | laozhuang727@hotmail.com |
75518d3e48ac30e892078c31e389903bc2d1a912 | d91906a0ace8979685b3dcd0d8ce91c7e9df4c58 | /db/admin.py | 9496142568cd9f039498fc51d0d6047c0659f624 | [] | no_license | charikova/SWP_project | 1f75a5219cf6b569308bdde5fc1b604914f0358a | 8bbdeeb57e5d576a1de303021c4df24351026aae | refs/heads/master | 2020-05-03T21:03:31.291044 | 2019-03-31T12:55:20 | 2019-03-31T12:55:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,463 | py | from django.contrib import admin
from django.contrib.auth.models import Group
from .models import Faculty, Auditorium, AdditionalProperties, Course, Preferences
from django.urls import path
from django.http import HttpResponseRedirect
from django.core.mail import EmailMessage, send_mail
from django_version import settings
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from string import Template
class MailAdmin(admin.ModelAdmin):
change_list_template = 'admin/db/db_changelist.html'
def get_urls(self):
urls = super().get_urls()
custom_urls = [
path('send_mails/', self.send_mails)
]
return custom_urls + urls
def send_mails(self, request):
t = Template('https://docs.google.com/forms/d/e/1FAIpQLScm7Akef32OptYwfi-D-Bg06TvYBxZgM-W-pArwwFR4JLZdYw/viewform?entry.592583197=$name&entry.966480905=$surname')
for faculty in Faculty.objects.all():
send_mail('Schedule creation', 'Hello!\nPlease fill the form for creating good schedule for you\n'
+ t.substitute(name=faculty.name, surname=faculty.surname), settings.EMAIL_HOST_USER,
[faculty.email], fail_silently=False)
self.message_user(request, 'Emails was sent successfully!')
return HttpResponseRedirect('../')
class PreferencesAdmin(admin.ModelAdmin):
change_list_template = 'admin/db/db_preferences.html'
def get_urls(self):
urls = super().get_urls()
custom_urls = [
path('refresh/', self.refresh)
]
return custom_urls + urls
def refresh(self, request):
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name('./Trygoogle-50a92384d71a.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open('Answers').sheet1
records = wks.get_all_records()
records.pop(0)
print(records)
for r in records:
for key, value in r.items():
print(key, value)
return HttpResponseRedirect('../')
admin.site.site_header = 'Automatically generated scheduling algorithm'
admin.site.register(Faculty, MailAdmin)
admin.site.register(Preferences, PreferencesAdmin)
admin.site.register(Course)
admin.site.register(Auditorium)
admin.site.register(AdditionalProperties)
| [
"nnigmat@gmail.com"
] | nnigmat@gmail.com |
ba59c94ed8fcead956fc3cac427eacf397080203 | fe977ee36551016186c72b18fe3dd3e252bca66a | /AMCProjectAPI0.3/api/migrations/0003_auto_20200914_1946.py | 3c86aa6e4000c806cc7d37be021bdfbb5c3ea2fd | [] | no_license | ConnorDetlefsen/AMCProject_Django | 1cb4bc360f26bed1b1599b11bd91871d077b0ffa | 4c00eaaac95cbf2b1b4a6f890e06031e7bd51304 | refs/heads/master | 2022-12-19T12:18:15.174340 | 2020-09-29T05:41:56 | 2020-09-29T05:41:56 | 295,041,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # Generated by Django 3.1.1 on 2020-09-14 19:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_question_questiontype'),
]
operations = [
migrations.AlterField(
model_name='answerlist2',
name='answer1',
field=models.TextField(max_length=364, null=True),
),
]
| [
"Detlefsen@chapman.edu"
] | Detlefsen@chapman.edu |
a4e14eb7af4471dc7ffa08f96007adf70020137c | 6bce0864840badbbd2016b1b2818b016347038e6 | /Drawing_dora.py | 8e89eb96398ebfaddd0af4e18230d75b9615e0a6 | [] | no_license | Emorznice/Louplus | e80327bdd0ce7eddb65054d337e8f84cd02a0108 | 2586898644c86df3b92faf5207a73df2e4a100d6 | refs/heads/master | 2023-08-30T13:24:10.755288 | 2021-11-03T10:55:12 | 2021-11-03T10:55:12 | 423,103,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,014 | py | from turtle import *
# 设置窗口大小
setup(500, 500)
# 设置画笔
speed(10)
shape('turtle')
colormode(255)
# 绘制圆
def drawRound(size, filled):
pendown()
if filled == True:
begin_fill()
setheading(180)
circle(size, 360)
if filled == True:
end_fill()
# 绘制矩形
def drawRect(length, width, filled):
setheading(0)
pendown()
if filled == True:
begin_fill()
forward(length)
right(90)
forward(width)
right(90)
forward(length)
right(90)
forward(width)
if filled == True:
end_fill()
# 绘制头部
def head():
# 绘制大圆
color('blue', 'blue')
penup()
goto(0, 100)
drawRound(75, True)
# 绘制小圆
color('white', 'white')
penup()
goto(0, 72)
drawRound(60, True)
# 绘制眼睛
def eyes():
# 左眼框
color('black', 'white')
penup()
goto(-15, 80)
drawRound(17, True)
# 右眼框
color('black', 'white')
penup()
goto(19, 80)
drawRound(17, True)
# 左眼珠
color('black', 'black')
penup()
goto(-8, 70)
drawRound(6, True)
color('white', 'white')
penup()
goto(-8, 66)
drawRound(2, True)
# 右眼珠
color('black', 'black')
penup()
goto(12, 70)
drawRound(6, True)
color('white', 'white')
penup()
goto(12, 66)
drawRound(2, True)
def nose():
color('red', 'red')
penup()
goto(0, 40)
drawRound(7, True)
def mouth():
color('black', 'black')
penup()
goto(-30, -20)
pendown()
setheading(-27)
circle(70, 55)
penup()
goto(0, 26)
pendown()
goto(0, -25)
# 胡子
def whisker():
color('black', 'black')
penup()
goto(10, 5)
pendown()
goto(-40, 5)
penup()
goto(10, 5)
pendown()
goto(40, 5)
penup()
goto(-10, 15)
pendown()
goto(-40, 20)
penup()
goto(10, 15)
pendown()
goto(40, 20)
penup()
goto(-10, -5)
pendown()
goto(-40, -10)
penup()
goto(10, -5)
pendown()
goto(40, -10)
def body():
# 蓝色身体
color('blue', 'blue')
penup()
goto(-50, -40)
drawRect(100, 80, True)
# 白色肚子
color('white', 'white')
penup()
goto(0, -30)
drawRound(40, True)
# 红色丝带
color('red', 'red')
penup()
goto(-60, -35)
drawRect(120, 10, True)
# 白色腿
color('white', 'white')
penup()
goto(15, -127)
pendown()
setheading(90)
begin_fill()
circle(14, 180)
end_fill()
def feet():
# 左脚
color('black', 'white')
penup()
goto(-30, -110)
drawRound(20, True)
# 右脚
color('black', 'white')
penup()
goto(30, -110)
drawRound(20, True)
def arms():
# 左胳膊
color('blue', 'blue')
penup()
begin_fill()
goto(-51, -50)
pendown()
goto(-51, -75)
left(70)
goto(-76, -85)
left(70)
goto(-86, -70)
left(70)
goto(-51, -50)
end_fill()
# 右胳膊
color('blue', 'blue')
penup()
begin_fill()
goto(49, -50)
pendown()
goto(49, -75)
left(70)
goto(74, -85)
left(70)
goto(84, -70)
left(70)
goto(49, -50)
end_fill()
def hands():
# 左手
color('black', 'white')
penup()
goto(-90, -71)
drawRound(15, True)
# 右手
color('black', 'white')
penup()
goto(90, -71)
drawRound(15, True)
def bell():
# 铜铃
color('yellow', 'yellow')
penup()
goto(0, -41)
drawRound(8, True)
# 花纹
color('black', 'black')
penup()
goto(-10, -47)
drawRect(20, 4, False)
# 金属丸
color('black', 'black')
penup()
goto(0, -53)
drawRound(2, True)
def package():
color('black', 'black')
penup()
goto(-25, -70)
pendown()
setheading(-90)
circle(25, 180)
goto(-25, -70)
hideturtle()
head()
eyes()
nose()
mouth()
whisker()
body()
feet()
arms()
hands()
bell()
package()
done()
| [
"2856987204@qq.com"
] | 2856987204@qq.com |
8527a82984c2cd8a19d450dc69773a45da4c0b51 | 79bc9a420df5c706b2ae06f4b75bf2bd2ba9646e | /emission/net/ext_service/push/query/trip_metrics.py | bad51c6afb3acaf63183dd918f4250efd1da085d | [
"BSD-3-Clause"
] | permissive | Andrew-Tan/e-mission-server | 7022786a13b4be87be62cfc2cc6d82543d063e5d | 91d59bee86e63d803e401f10f4b6a2502effedda | refs/heads/master | 2021-01-16T18:25:17.860723 | 2017-11-21T19:24:40 | 2017-11-21T19:24:40 | 100,073,534 | 0 | 0 | BSD-3-Clause | 2018-05-05T18:26:36 | 2017-08-11T22:13:44 | Jupyter Notebook | UTF-8 | Python | false | false | 4,279 | py | # Input spec sample at
# emission/net/ext_service/push/sample.specs/trip_metrics.query.sample sample
# finds all users who have at least one day in Feb 2017 with no more than 10
# walk sections and a walk distance of at least 1km during the evening commute
# hours
# Input: query spec
# Output: list of uuids
#
import logging
import numpy as np
import emission.core.wrapper.motionactivity as ecwm
import emission.net.api.metrics as enam
import emission.storage.decorations.local_date_queries as esdl
import emission.storage.decorations.location_queries as esdlq
import emission.storage.decorations.user_queries as esdu
import emission.storage.timeseries.geoquery as estg
import emission.storage.timeseries.timequery as estt
import emission.storage.timeseries.tcquery as esttc
import emission.storage.decorations.analysis_timeseries_queries as esda
def get_metric_list(checks):
metric_list = [e["metric"] for e in checks]
logging.debug("Returning %s" % metric_list)
return metric_list
def compare_value(threshold, summed_value):
if '$gt' in threshold:
return summed_value > threshold['$gt']
if '$gte' in threshold:
return summed_value >= threshold['$gte']
if '$lt' in threshold:
return summed_value < threshold['$lt']
if '$lte' in threshold:
return summed_value <= threshold['$lte']
return False
def matches_check(check, msts):
# We know that the metric in the check matches the result because that's the
# way that the metrics API works. So we just need to check mode versus threshold
# entry looks like this (for count)
# ModeStatTimeSummary({'fmt_time': '2017-01-20T00:00:00+00:00',
# 'nUsers': 1,
# 'UNKNOWN': 1,
# 'ts': 1484870400,
# 'AIR_OR_HSR': 2,
# 'local_dt': LocalDate(...)})
mode_list = check['modes']
summed_value = 0
for mode in mode_list:
summed_value = summed_value + msts.get(mode, 0)
return compare_value(check["threshold"], summed_value)
def is_matched_user(user_id, spec):
metric_list = get_metric_list(spec["checks"])
time_type = spec['time_type']
if 'from_local_date' in spec and 'to_local_date' in spec:
freq_metrics = enam.summarize_by_local_date(user_id,
spec["from_local_date"], spec["to_local_date"],
spec["freq"], metric_list, include_aggregate=False)
elif 'start_time' in spec and 'end_time' in spec:
freq_metrics = enam.summarize_by_timestamp(user_id,
spec["start_time"], spec["end_time"],
spec["freq"], metric_list, include_aggregate=False)
else:
# If no start and end times are specified, we assume that this is a
# timestamp query because we can come up with a reasonable start and end
# time for timestamps but not for local_dates, which are basically a filter.
# so if we run this on the first of a month, for example, we won't find
# anything, which seems bogus and not what people would expect
assert time_type == "timestamp", "time_type = %s, expected timestamp" % time_type
freq_metrics = enam.summarize_by_timestamp(user_id,
0, time.time(), spec["freq"], metric_list, include_aggregate=False)
assert(freq_metrics is not None)
assert('user_metrics' in freq_metrics)
curr_user_metrics = freq_metrics['user_metrics']
checks = spec['checks']
check_results = np.zeros(len(checks))
for i, check in enumerate(checks):
curr_metric_result = curr_user_metrics[i]
# curr_freq_result is a list of ModeStatTimeSummary objects, one for each
# grouped time interval in the range
# e.g. for daily, 2017-01-19, 2017-01-20, 2017-01-21, 2017-01-22, 2017-01-23, ....
for msts in curr_metric_result:
# We defined our check as being true if it is true for _any_ grouped time
# period in the range. So as long as we find a match for that check, we are
# good!
if matches_check(check, msts):
check_results[i] = True
logging.info("For user_id %s, check result array = %s, all? %s" % (user_id, check_results, np.all(check_results)))
return np.all(check_results)
def query(spec):
sel_uuids = esdu.get_all_uuids()
matched_uuid_list = [uuid for uuid in sel_uuids if is_matched_user(uuid, spec)]
logging.info("matched matched_uuid_list of length = %s = %s" %
(len(matched_uuid_list), matched_uuid_list))
return matched_uuid_list
| [
"shankari@eecs.berkeley.edu"
] | shankari@eecs.berkeley.edu |
1ee42e0fa0fd0e830473f4079c9058dd6869c849 | 7ab85ba79a6553659f0b324ecebb4bb39f8a8a1c | /shallow copy.py | 827e08006334256c38c0ceb955c5a8fd2ff5b596 | [] | no_license | subinmun1997/my_python | b75db77a035fa8f531d9872bf33a1818a002206a | 634acc948e7758f5d26084536c506e7da45cd53c | refs/heads/master | 2022-12-28T21:11:40.173378 | 2020-10-16T08:02:18 | 2020-10-16T08:02:18 | 292,875,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | r1=['John',('man','USA'),[175,23]]
r2=list(r1)
print(r1 is r2)
print(r1[0] is r2[0])
print(r1[1] is r2[1])
print(r1[2] is r2[2]) | [
"qzxy812@gmail.com"
] | qzxy812@gmail.com |
a29f1f6b1182042af906093e312c851c26cba225 | 354bc81b676cbf73e6ed883f24d11d600f1321d3 | /odoo/my_env/bin/rst2html.py | 365b7980e819f45da70d5acfdf0b54f29438ece4 | [] | no_license | anand-ichh/Odoo | 866bf09c99065d7821740735573be23a872e759f | c596eda039a2d6c2cec29195b28cfc155c95deed | refs/heads/main | 2023-06-14T07:15:14.866217 | 2021-07-12T10:44:21 | 2021-07-12T10:44:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | #!/opt/odoo/my_env/bin/python3.8
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| [
"root@oddoerpservernm@ichhapurti.com"
] | root@oddoerpservernm@ichhapurti.com |
52e48652c1ede98d55d64ed3b5c70975a9d90a48 | a7d7685f10c1f67e511bafb2240d50f9a8445c35 | /openmrsapi/tests/test_user.py | 8278343e5eff4302547f59368e2ca3e8d873702f | [
"MIT"
] | permissive | isears/openmrsapi | f8aeb4783da93fbcc40195deb16b6adf4c4c5162 | 50e5329b1ecc74f8e4d94f71e4b0e0207ac705d6 | refs/heads/master | 2020-03-10T22:51:29.689390 | 2018-04-22T15:45:59 | 2018-04-22T15:46:07 | 129,628,010 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | import unittest
import openmrsapi
class TestUser(unittest.TestCase):
def setUp(self):
self.created_user_uuids = list()
def tearDown(self):
for uuid in self.created_user_uuids:
openmrsapi.user.remove(uuid)
def test_create(self):
new_user = openmrsapi.user.add(
'openmrsapi-createtest',
'Openmrsapi123',
first_name='Openmrsapi',
last_name='CreateTest',
role='Organizational: Doctor',
gender='M'
)
assert 'uuid' in new_user
self.created_user_uuids.append(new_user['uuid'])
def test_delete(self):
new_user = openmrsapi.user.add(
'openmrsapi-deletetest',
'Openmrsapi123',
first_name='Openmrsapi',
last_name='DeleteTest',
role='Organizational: Doctor',
gender='M'
)
res = openmrsapi.user.remove(new_user['uuid'])
assert len(res) == 0 | [
"isaac.j.sears@gmail.com"
] | isaac.j.sears@gmail.com |
9d617e8e56b480d3f2c9796faf890e935037a64c | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/network/v20180701/get_network_watcher.py | a1c1900c872cb4134a105e35685ba9cbea6c876c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 4,324 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetNetworkWatcherResult',
'AwaitableGetNetworkWatcherResult',
'get_network_watcher',
]
@pulumi.output_type
class GetNetworkWatcherResult:
"""
Network watcher in a resource group.
"""
def __init__(__self__, etag=None, location=None, name=None, provisioning_state=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetNetworkWatcherResult(GetNetworkWatcherResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkWatcherResult(
etag=self.etag,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type)
def get_network_watcher(network_watcher_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkWatcherResult:
"""
Use this data source to access information about an existing resource.
:param str network_watcher_name: The name of the network watcher.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['networkWatcherName'] = network_watcher_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20180701:getNetworkWatcher', __args__, opts=opts, typ=GetNetworkWatcherResult).value
return AwaitableGetNetworkWatcherResult(
etag=__ret__.etag,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
306efc4d66d57b80b9a10c625c04f08557d7f834 | 8adcfe7485ea04bc1f83cac7d92bb51b97582f64 | /ALGORITHM/210531/프로그래머스 타겟 넘버.py | c31258c8edf6e3e7fa3b78f33fb5b9e9aed6108b | [] | no_license | NoJeong/TIL | fdceb6efc5d2d56f8dd2e27271ea0faacfe336ae | c79c34b84f025aa40cd3a8e28fd0898bcb40b608 | refs/heads/master | 2023-06-24T22:18:50.665917 | 2021-07-23T06:21:21 | 2021-07-23T06:21:21 | 280,307,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | import sys
sys.stdin = open('프로그래머스 타겟 넘버.txt')
def solution(numbers, target):
answer = 0
def dfs(numbers, target, index):
nonlocal answer
if index < len(numbers):
numbers[index] *= 1
dfs(numbers, target, index + 1)
numbers[index] *= -1
dfs(numbers, target, index + 1)
elif sum(numbers) == target:
answer += 1
dfs(numbers, target, 0)
return answer
a = list(map(int,input().split()))
b = int(input())
solution(a,b)
| [
"op032@naver.com"
] | op032@naver.com |
95cc3c807b14ed682f8a1c433a4b589994a6caed | f34a61d20773c705270d379782c64c36c574cfe4 | /mrcnn/model.py | 85d35fec18e1013c0b20aa4fabdbd85d1da40be8 | [
"MIT"
] | permissive | MarMarhoun/Cascade-MRCNN | b2d4544bd9cf3922baca5a4d82fbabc660c0dba8 | 56fc8611b1623cda4f061d101f508184964be86e | refs/heads/master | 2023-08-28T13:15:33.794261 | 2021-10-03T19:02:15 | 2021-10-03T19:02:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135,905 | py | """
Mask R-CNN
The main Mask R-CNN model implementation.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import random
import datetime
import re
import math
import logging
from collections import OrderedDict
import multiprocessing
import numpy as np
import tensorflow as tf
import keras
import keras.backend as K
import keras.layers as KL
import keras.engine as KE
import keras.models as KM
from mrcnn import utils
# Requires TensorFlow 1.3+ and Keras 2.0.8+.
from distutils.version import LooseVersion
assert LooseVersion(tf.__version__) >= LooseVersion("1.3")
assert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')
############################################################
# Utility Functions
############################################################
def log(text, array=None):
"""Prints a text message. And, optionally, if a Numpy array is provided it
prints it's shape, min, and max values.
"""
if array is not None:
text = text.ljust(25)
text += ("shape: {:20} ".format(str(array.shape)))
if array.size:
text += ("min: {:10.5f} max: {:10.5f}".format(array.min(),array.max()))
else:
text += ("min: {:10} max: {:10}".format("",""))
text += " {}".format(array.dtype)
print(text)
class BatchNorm(KL.BatchNormalization):
"""Extends the Keras BatchNormalization class to allow a central place
to make changes if needed.
Batch normalization has a negative effect on training if batches are small
so this layer is often frozen (via setting in Config class) and functions
as linear layer.
"""
def call(self, inputs, training=None):
"""
Note about training values:
None: Train BN layers. This is the normal mode
False: Freeze BN layers. Good when batch size is small
True: (don't use). Set layer in training mode even when making inferences
"""
return super(self.__class__, self).call(inputs, training=training)
def compute_backbone_shapes(config, image_shape):
"""Computes the width and height of each stage of the backbone network.
Returns:
[N, (height, width)]. Where N is the number of stages
"""
if callable(config.BACKBONE):
return config.COMPUTE_BACKBONE_SHAPE(image_shape)
# Currently supports ResNet only
assert config.BACKBONE in ["resnet50", "resnet101"]
return np.array(
[[int(math.ceil(image_shape[0] / stride)),
int(math.ceil(image_shape[1] / stride))]
for stride in config.BACKBONE_STRIDES])
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
############################################################
# Proposal Layer
############################################################
def apply_box_deltas_graph(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)] boxes to update
deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply
"""
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= tf.exp(deltas[:, 2])
width *= tf.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
result = tf.stack([y1, x1, y2, x2], axis=1, name="apply_box_deltas_out")
return result
def clip_boxes_graph(boxes, window):
"""
boxes: [N, (y1, x1, y2, x2)]
window: [4] in the form y1, x1, y2, x2
"""
# Split
wy1, wx1, wy2, wx2 = tf.split(window, 4)
y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)
# Clip
y1 = tf.maximum(tf.minimum(y1, wy2), wy1)
x1 = tf.maximum(tf.minimum(x1, wx2), wx1)
y2 = tf.maximum(tf.minimum(y2, wy2), wy1)
x2 = tf.maximum(tf.minimum(x2, wx2), wx1)
clipped = tf.concat([y1, x1, y2, x2], axis=1, name="clipped_boxes")
clipped.set_shape((clipped.shape[0], 4))
return clipped
class ProposalLayer(KE.Layer):
"""Receives anchor scores and selects a subset to pass as proposals
to the second stage. Filtering is done based on anchor scores and
non-max suppression to remove overlaps. It also applies bounding
box refinement deltas to anchors.
Inputs:
rpn_probs: [batch, num_anchors, (bg prob, fg prob)]
rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]
anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates
Returns:
Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]
"""
def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):
super(ProposalLayer, self).__init__(**kwargs)
self.config = config
self.proposal_count = proposal_count
self.nms_threshold = nms_threshold
def call(self, inputs):
# Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]
scores = inputs[0][:, :, 1]
# Box deltas [batch, num_rois, 4]
deltas = inputs[1]
deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])
# Anchors
anchors = inputs[2]
# Improve performance by trimming to top anchors by score
# and doing the rest on the smaller subset.
pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])
ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,
name="top_anchors").indices
scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),
self.config.IMAGES_PER_GPU)
pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),
self.config.IMAGES_PER_GPU,
names=["pre_nms_anchors"])
# Apply deltas to anchors to get refined anchors.
# [batch, N, (y1, x1, y2, x2)]
boxes = utils.batch_slice([pre_nms_anchors, deltas],
lambda x, y: apply_box_deltas_graph(x, y),
self.config.IMAGES_PER_GPU,
names=["refined_anchors"])
# Clip to image boundaries. Since we're in normalized coordinates,
# clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]
window = np.array([0, 0, 1, 1], dtype=np.float32)
boxes = utils.batch_slice(boxes,
lambda x: clip_boxes_graph(x, window),
self.config.IMAGES_PER_GPU,
names=["refined_anchors_clipped"])
# Filter out small boxes
# According to Xinlei Chen's paper, this reduces detection accuracy
# for small objects, so we're skipping it.
# Non-max suppression
def nms(boxes, scores):
indices = tf.image.non_max_suppression(
boxes, scores, self.proposal_count,
self.nms_threshold, name="rpn_non_max_suppression")
proposals = tf.gather(boxes, indices)
# Pad if needed
padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)
proposals = tf.pad(proposals, [(0, padding), (0, 0)])
return proposals
proposals = utils.batch_slice([boxes, scores], nms,
self.config.IMAGES_PER_GPU)
return proposals
def compute_output_shape(self, input_shape):
return (None, self.proposal_count, 4)
############################################################
# ROIAlign Layer
############################################################
def log2_graph(x):
"""Implementation of Log2. TF doesn't have a native implementation."""
return tf.log(x) / tf.log(2.0)
class PyramidROIAlign(KE.Layer):
"""Implements ROI Pooling on multiple levels of the feature pyramid.
Params:
- pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]
Inputs:
- boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized
coordinates. Possibly padded with zeros if not enough
boxes to fill the array.
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- feature_maps: List of feature maps from different levels of the pyramid.
Each is [batch, height, width, channels]
Output:
Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].
The width and height are those specific in the pool_shape in the layer
constructor.
"""
def __init__(self, pool_shape, **kwargs):
super(PyramidROIAlign, self).__init__(**kwargs)
self.pool_shape = tuple(pool_shape)
def call(self, inputs):
# Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords
boxes = inputs[0]
# Image meta
# Holds details about the image. See compose_image_meta()
image_meta = inputs[1]
# Feature Maps. List of feature maps from different level of the
# feature pyramid. Each is [batch, height, width, channels]
feature_maps = inputs[2:]
# Assign each ROI to a level in the pyramid based on the ROI area.
y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)
h = y2 - y1
w = x2 - x1
# Use shape of first image. Images in a batch must have the same size.
image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]
# Equation 1 in the Feature Pyramid Networks paper. Account for
# the fact that our coordinates are normalized here.
# e.g. a 224x224 ROI (in pixels) maps to P4
image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)
roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))
roi_level = tf.minimum(5, tf.maximum(
2, 4 + tf.cast(tf.round(roi_level), tf.int32)))
roi_level = tf.squeeze(roi_level, 2)
# Loop through levels and apply ROI pooling to each. P2 to P5.
pooled = []
box_to_level = []
for i, level in enumerate(range(2, 6)):
ix = tf.where(tf.equal(roi_level, level))
level_boxes = tf.gather_nd(boxes, ix)
# Box indices for crop_and_resize.
box_indices = tf.cast(ix[:, 0], tf.int32)
# Keep track of which box is mapped to which level
box_to_level.append(ix)
# Stop gradient propogation to ROI proposals
level_boxes = tf.stop_gradient(level_boxes)
box_indices = tf.stop_gradient(box_indices)
# Crop and Resize
# From Mask R-CNN paper: "We sample four regular locations, so
# that we can evaluate either max or average pooling. In fact,
# interpolating only a single value at each bin center (without
# pooling) is nearly as effective."
#
# Here we use the simplified approach of a single value per bin,
# which is how it's done in tf.crop_and_resize()
# Result: [batch * num_boxes, pool_height, pool_width, channels]
pooled.append(tf.image.crop_and_resize(
feature_maps[i], level_boxes, box_indices, self.pool_shape,
method="bilinear"))
# Pack pooled features into one tensor
pooled = tf.concat(pooled, axis=0)
# Pack box_to_level mapping into one array and add another
# column representing the order of pooled boxes
box_to_level = tf.concat(box_to_level, axis=0)
box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)
box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],
axis=1)
# Rearrange pooled features to match the order of the original boxes
# Sort box_to_level by batch then box index
# TF doesn't have a way to sort by two columns, so merge them and sort.
sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]
ix = tf.nn.top_k(sorting_tensor, k=tf.shape(
box_to_level)[0]).indices[::-1]
ix = tf.gather(box_to_level[:, 2], ix)
pooled = tf.gather(pooled, ix)
# Re-add the batch dimension
shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)
pooled = tf.reshape(pooled, shape)
return pooled
def compute_output_shape(self, input_shape):
return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )
############################################################
# Detection Target Layer
############################################################
def overlaps_graph(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
"""
# 1. Tile boxes2 and repeat boxes1. This allows us to compare
# every boxes1 against every boxes2 without loops.
# TF doesn't have an equivalent to np.repeat() so simulate it
# using tf.tile() and tf.reshape.
b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),
[1, 1, tf.shape(boxes2)[0]]), [-1, 4])
b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])
# 2. Compute intersections
b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)
b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)
y1 = tf.maximum(b1_y1, b2_y1)
x1 = tf.maximum(b1_x1, b2_x1)
y2 = tf.minimum(b1_y2, b2_y2)
x2 = tf.minimum(b1_x2, b2_x2)
intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)
# 3. Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
# 4. Compute IoU and reshape to [boxes1, boxes2]
iou = intersection / union
overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])
return overlaps
def detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, gt_masks2, config):
"""Generates detection targets for one image. Subsamples proposals and
generates target class IDs, bounding box deltas, and masks for each.
Inputs:
proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [MAX_GT_INSTANCES] int class IDs
gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.
gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.
deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]
masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox
boundaries and resized to neural network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
# Assertions
asserts = [
tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],
name="roi_assertion"),
]
with tf.control_dependencies(asserts):
proposals = tf.identity(proposals)
# Remove zero padding
proposals, _ = trim_zeros_graph(proposals, name="trim_proposals")
gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name="trim_gt_boxes")
gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,
name="trim_gt_class_ids")
gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2, name="trim_gt_masks")
gt_masks2 = tf.gather(gt_masks2, tf.where(non_zeros)[:, 0], axis=2, name="trim_gt_masks2")
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = tf.where(gt_class_ids < 0)[:, 0]
non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]
crowd_boxes = tf.gather(gt_boxes, crowd_ix)
gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)
gt_boxes = tf.gather(gt_boxes, non_crowd_ix)
gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)
gt_masks2 = tf.gather(gt_masks2, non_crowd_ix, axis=2)
# Compute overlaps matrix [proposals, gt_boxes]
overlaps = overlaps_graph(proposals, gt_boxes)
# Compute overlaps with crowd boxes [proposals, crowd_boxes]
crowd_overlaps = overlaps_graph(proposals, crowd_boxes)
crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
# Determine positive and negative ROIs
roi_iou_max = tf.reduce_max(overlaps, axis=1)
# 1. Positive ROIs are those with >= 0.5 IoU with a GT box
positive_roi_bool = (roi_iou_max >= 0.5)
positive_indices = tf.where(positive_roi_bool)[:, 0]
# 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.
negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]
# Subsample ROIs. Aim for 33% positive
# Positive ROIs
positive_count = int(config.TRAIN_ROIS_PER_IMAGE *
config.ROI_POSITIVE_RATIO)
positive_indices = tf.random_shuffle(positive_indices)[:positive_count]
positive_count = tf.shape(positive_indices)[0]
# Negative ROIs. Add enough to maintain positive:negative ratio.
r = 1.0 / config.ROI_POSITIVE_RATIO
negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.cond(
tf.greater(tf.shape(positive_overlaps)[1], 0),
true_fn = lambda: tf.argmax(positive_overlaps, axis=1),
false_fn = lambda: tf.cast(tf.constant([]),tf.int64)
)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
transposed_masks2 = tf.expand_dims(tf.transpose(gt_masks2, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
roi_masks2 = tf.gather(transposed_masks2, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI coordinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes, box_ids, config.MASK_SHAPE)
masks2 = tf.image.crop_and_resize(tf.cast(roi_masks2, tf.float32), boxes, box_ids, config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
masks2 = tf.squeeze(masks2, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
masks2 = tf.round(masks2)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
masks2 = tf.pad(masks2, [[0, N + P], (0, 0), (0, 0)])
return rois, roi_gt_class_ids, deltas, masks, masks2
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]
Masks cropped to bbox boundaries and resized to neural
network output size.
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1]
gt_boxes = inputs[2]
gt_masks = inputs[3]
gt_masks2 = inputs[4]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
names = ["rois", "target_class_ids", "target_bbox", "target_mask", "target_mask2"]
outputs = utils.batch_slice(
[proposals, gt_class_ids, gt_boxes, gt_masks, gt_masks2],
lambda w, x, y, z, z2: detection_targets_graph(
w, x, y, z, z2, self.config),
self.config.IMAGES_PER_GPU, names=names)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0], self.config.MASK_SHAPE[1]), # masks
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0], self.config.MASK_SHAPE[1]) # masks
]
def compute_mask(self, inputs, mask=None):
return [None, None, None, None, None]
############################################################
# Detection Layer
############################################################
def refine_detections_graph(rois, probs, deltas, window, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, num_classes]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
window: (y1, x1, y2, x2) in normalized coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where
coordinates are normalized.
"""
# Class IDs per ROI
class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)
# Class probability of the top class of each ROI
indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)
class_scores = tf.gather_nd(probs, indices)
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices)
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids, keep)
pre_nms_scores = tf.gather(class_scores, keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.gather(pre_nms_rois, ixs),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indices
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores, keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are normalized.
detections = tf.concat([
tf.gather(refined_rois, keep),
tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],
tf.gather(class_scores, keep)[..., tf.newaxis]
], axis=1)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
return detections
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where
coordinates are normalized.
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
mrcnn_class = inputs[1]
mrcnn_bbox = inputs[2]
image_meta = inputs[3]
# Get windows of images in normalized coordinates. Windows are the area
# in the image that excludes the padding.
# Use the shape of the first image in the batch to normalize the window
# because we know that all images get resized to the same size.
m = parse_image_meta_graph(image_meta)
image_shape = m['image_shape'][0]
window = norm_boxes_graph(m['window'], image_shape[:2])
# Run detection refinement graph on each item in the batch
detections_batch = utils.batch_slice(
[rois, mrcnn_class, mrcnn_bbox, window],
lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
self.config.IMAGES_PER_GPU)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in
# normalized coordinates
return tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])
def compute_output_shape(self, input_shape):
return (None, self.config.DETECTION_MAX_INSTANCES, 6)
############################################################
# Region Proposal Network (RPN)
############################################################
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the feature map
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location * depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True,
fc_layers_size=1024):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
fc_layers_size: Size of the 2 FC layers
Returns:
logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)
probs: [batch, num_rois, NUM_CLASSES] classifier probabilities
bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
"""
# ROI Pooling
# Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_classifier")([rois, image_meta] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
name="mrcnn_class_conv1")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
name="mrcnn_class_conv2")(x)
x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
# Classifier head
mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
name='mrcnn_class_logits')(shared)
mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
name="mrcnn_class")(mrcnn_class_logits)
# BBox head
# [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
def build_fpn_mask_graph(rois, feature_maps, image_meta,
pool_size, num_classes, train_bn=True):
"""Builds the computation graph of the Primary mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
def build_fpn_mask2_graph(rois, feature_maps, image_meta,
pool_size, train_bn=True):
"""Builds the computation graph of the Cascaded mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, 1]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = PyramidROIAlign([pool_size, pool_size],
name="roi_align_mask2")([rois, image_meta] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask2_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask2_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask2_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask2_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask2_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask2_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask2_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask2_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask2_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(1, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask2")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
# Trim target bounding box deltas to the same length as rpn_bbox.
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits,
active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
# Find predictions of classes that are not in the dataset.
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits)
# Erase losses of predictions of classes that are not in the active
# classes of the image.
loss = loss * pred_active
# Computer loss mean. Use only predictions that contribute
# to the loss to get a correct mean.
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_ix), tf.int64)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
loss = K.switch(tf.size(y_true) > 0,
K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask2_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
num_classes = 1
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
# Permute predicted masks to [N, num_classes, height, width]
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
# Only positive ROIs contribute to the loss. And only
# the class specific mask of each ROI.
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
# Gather the masks (predicted and true) that contribute to loss
y_true = tf.gather(target_masks, positive_ix) # [_,H,W]
y_pred = tf.gather_nd(pred_masks, indices) # [_,H,W]
# Compute binary cross entropy. If no positive ROIs, then return 0.
# shape: [batch, roi, num_classes]
# loss = K.switch(tf.size(y_true) > 0,
# K.binary_crossentropy(target=y_true, output=y_pred),
# tf.constant(0.0))
# loss = K.mean(loss)
loss = K.switch(tf.size(y_true) > 0,
weighted_pixelwise_crossentropy(y_true=y_true, y_pred=y_pred),
tf.constant(0.0))
return loss
def weighted_pixelwise_crossentropy(y_true, y_pred):
epsilon = tf.cast(10e-8, y_pred.dtype.base_dtype)
y_pred = tf.clip_by_value(y_pred, epsilon, 1. - epsilon)
loss1= tf.multiply(y_true, tf.log(y_pred))
loss2= tf.multiply(tf.subtract(tf.ones_like(y_true),y_true) , tf.log(tf.subtract(tf.ones_like(y_pred),y_pred)))
loss= tf.negative(tf.reduce_mean(tf.add(tf.multiply(loss1,1.2),tf.multiply(loss2,0.8))))
return loss
############################################################
# Data Generator
############################################################
def load_image_gt(dataset, config, image_id, augment=False, augmentation=None,
use_mini_mask=False):
"""Load and return ground truth data for an image (image, mask, bounding boxes).
augment: (deprecated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
use_mini_mask: If False, returns full-size masks that are the same height
and width as the original image. These can be big, for example
1024x1024x100 (for 100 instances). Mini masks are smaller, typically,
224x224 and are generated by extracting the bounding box of the
object and resizing it to MINI_MASK_SHAPE.
Returns:
image: [height, width, 3]
shape: the original shape of the image before resizing and cropping.
class_ids: [instance_count] Integer class IDs
bbox: [instance_count, (y1, x1, y2, x2)]
mask: [height, width, instance_count]. The height and width are those
of the image unless use_mini_mask is True, in which case they are
defined in MINI_MASK_SHAPE.
"""
# Load image and mask
image = dataset.load_image(image_id)
mask, mask2, class_ids = dataset.load_mask(image_id)
original_shape = image.shape
image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=config.IMAGE_MIN_DIM,
min_scale=config.IMAGE_MIN_SCALE,
max_dim=config.IMAGE_MAX_DIM,
mode=config.IMAGE_RESIZE_MODE)
mask = utils.resize_mask(mask, scale, padding, crop)
mask2 = utils.resize_mask(mask2, scale, padding, crop)
# Random horizontal flips.
# TODO: will be removed in a future update in favor of augmentation
if augment:
logging.warning("'augment' is deprecated. Use 'augmentation' instead.")
if random.randint(0, 1):
image = np.fliplr(image)
mask = np.fliplr(mask)
mask2 = np.fliplr(mask2)
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
image = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask = det.augment_image(mask.astype(np.uint8), hooks=imgaug.HooksImages(activator=hook))
mask2 = det.augment_image(mask2.astype(np.uint8), hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image.shape == image_shape, "Augmentation shouldn't change image size"
assert mask.shape == mask_shape, "Augmentation shouldn't change mask size"
assert mask2.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
mask = mask.astype(np.bool)
mask2 = mask2.astype(np.bool)
# Note that some boxes might be all zeros if the corresponding mask got cropped out.
# and here is to filter them out
_idx = np.sum(mask, axis=(0, 1)) > 0
mask = mask[:, :, _idx]
mask2 = mask2[:, :, _idx]
class_ids = class_ids[_idx]
# Bounding boxes. Note that some boxes might be all zeros
# if the corresponding mask got cropped out.
# bbox: [num_instances, (y1, x1, y2, x2)]
bbox = utils.extract_bboxes(mask)
# Active classes
# Different datasets have different classes, so track the
# classes supported in the dataset of this image.
active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)
source_class_ids = dataset.source_class_ids[dataset.image_info[image_id]["source"]]
active_class_ids[source_class_ids] = 1
# Resize masks to smaller size to reduce memory usage
if use_mini_mask:
mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)
mask2 = utils.minimize_mask(bbox, mask2, config.MINI_MASK_SHAPE)
# Image meta data
image_meta = compose_image_meta(image_id, original_shape, image.shape,
window, scale, active_class_ids)
return image, image_meta, class_ids, bbox, mask, mask2
def build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, gt_mask2, config):
"""Generate targets for training Stage 2 classifier and mask heads.
This is not used in normal training. It's useful for debugging or to train
the Mask RCNN heads without using the RPN head.
Inputs:
rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.
gt_class_ids: [instance count] Integer class IDs
gt_boxes: [instance count, (y1, x1, y2, x2)]
gt_masks: [height, width, instance count] Ground truth masks. Can be full
size or mini-masks.
Returns:
rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]
class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific
bbox refinements.
masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped
to bbox boundaries and resized to neural network output size.
"""
assert rpn_rois.shape[0] > 0
assert gt_class_ids.dtype == np.int32, "Expected int but got {}".format(
gt_class_ids.dtype)
assert gt_boxes.dtype == np.int32, "Expected int but got {}".format(
gt_boxes.dtype)
assert gt_masks.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks.dtype)
assert gt_masks2.dtype == np.bool_, "Expected bool but got {}".format(
gt_masks2.dtype)
# It's common to add GT Boxes to ROIs but we don't do that here because
# according to XinLei Chen's paper, it doesn't help.
# Trim empty padding in gt_boxes and gt_masks parts
instance_ids = np.where(gt_class_ids > 0)[0]
assert instance_ids.shape[0] > 0, "Image must contain instances."
gt_class_ids = gt_class_ids[instance_ids]
gt_boxes = gt_boxes[instance_ids]
gt_masks = gt_masks[:, :, instance_ids]
gt_masks2 = gt_masks2[:, :, instance_ids]
# Compute areas of ROIs and ground truth boxes.
rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \
(rpn_rois[:, 3] - rpn_rois[:, 1])
gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \
(gt_boxes[:, 3] - gt_boxes[:, 1])
# Compute overlaps [rpn_rois, gt_boxes]
overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))
for i in range(overlaps.shape[1]):
gt = gt_boxes[i]
overlaps[:, i] = utils.compute_iou(
gt, rpn_rois, gt_box_area[i], rpn_roi_area)
# Assign ROIs to GT boxes
rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)
rpn_roi_iou_max = overlaps[np.arange(
overlaps.shape[0]), rpn_roi_iou_argmax]
# GT box assigned to each ROI
rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]
rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]
# Positive ROIs are those with >= 0.5 IoU with a GT box.
fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]
# Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)
# TODO: To hard example mine or not to hard example mine, that's the question
# bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
# Subsample ROIs. Aim for 33% foreground.
# FG
fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)
if fg_ids.shape[0] > fg_roi_count:
keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)
else:
keep_fg_ids = fg_ids
# BG
remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]
if bg_ids.shape[0] > remaining:
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
else:
keep_bg_ids = bg_ids
# Combine indices of ROIs to keep
keep = np.concatenate([keep_fg_ids, keep_bg_ids])
# Need more?
remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]
if remaining > 0:
# Looks like we don't have enough samples to maintain the desired
# balance. Reduce requirements and fill in the rest. This is
# likely different from the Mask RCNN paper.
# There is a small chance we have neither fg nor bg samples.
if keep.shape[0] == 0:
# Pick bg regions with easier IoU threshold
bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]
assert bg_ids.shape[0] >= remaining
keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)
assert keep_bg_ids.shape[0] == remaining
keep = np.concatenate([keep, keep_bg_ids])
else:
# Fill the rest with repeated bg rois.
keep_extra_ids = np.random.choice(
keep_bg_ids, remaining, replace=True)
keep = np.concatenate([keep, keep_extra_ids])
assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \
"keep doesn't match ROI batch size {}, {}".format(
keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)
# Reset the gt boxes assigned to BG ROIs.
rpn_roi_gt_boxes[keep_bg_ids, :] = 0
rpn_roi_gt_class_ids[keep_bg_ids] = 0
# For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.
rois = rpn_rois[keep]
roi_gt_boxes = rpn_roi_gt_boxes[keep]
roi_gt_class_ids = rpn_roi_gt_class_ids[keep]
roi_gt_assignment = rpn_roi_iou_argmax[keep]
# Class-aware bbox deltas. [y, x, log(h), log(w)]
bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,
config.NUM_CLASSES, 4), dtype=np.float32)
pos_ids = np.where(roi_gt_class_ids > 0)[0]
bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(
rois[pos_ids], roi_gt_boxes[pos_ids, :4])
# Normalize bbox refinements
bboxes /= config.BBOX_STD_DEV
# Generate class-specific target masks
masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES), dtype=np.float32)
masks2 = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES), dtype=np.float32)
for i in pos_ids:
class_id = roi_gt_class_ids[i]
assert class_id > 0, "class id must be greater than 0"
gt_id = roi_gt_assignment[i]
class_mask = gt_masks[:, :, gt_id]
class_mask2 = gt_masks2[:, :, gt_id]
if config.USE_MINI_MASK:
# Create a mask placeholder, the size of the image
placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
placeholder2 = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)
# GT box
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]
gt_w = gt_x2 - gt_x1
gt_h = gt_y2 - gt_y1
# Resize mini mask to size of GT box
placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)
placeholder2[gt_y1:gt_y2, gt_x1:gt_x2] = np.round(utils.resize(class_mask2, (gt_h, gt_w))).astype(bool)
# Place the mini batch in the placeholder
class_mask = placeholder
class_mask2 = placeholder2
# Pick part of the mask and resize it
y1, x1, y2, x2 = rois[i].astype(np.int32)
m = class_mask[y1:y2, x1:x2]
m2 = class_mask2[y1:y2, x1:x2]
mask = utils.resize(m, config.MASK_SHAPE)
mask2 = utils.resize(m2, config.MASK_SHAPE)
masks[i, :, :, class_id] = mask
masks2[i, :, :, class_id] = mask2
return rois, roi_gt_class_ids, bboxes, masks, masks2
def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):
"""Given the anchors and GT boxes, compute overlaps and identify positive
anchors and deltas to refine them to match their corresponding GT boxes.
anchors: [num_anchors, (y1, x1, y2, x2)]
gt_class_ids: [num_gt_boxes] Integer class IDs.
gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]
Returns:
rpn_match: [N] (int32) matches between anchors and GT boxes.
1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
"""
# RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral
rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)
# RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]
rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))
# Handle COCO crowds
# A crowd box in COCO is a bounding box around several instances. Exclude
# them from training. A crowd box is given a negative class ID.
crowd_ix = np.where(gt_class_ids < 0)[0]
if crowd_ix.shape[0] > 0:
# Filter out crowds from ground truth class IDs and boxes
non_crowd_ix = np.where(gt_class_ids > 0)[0]
crowd_boxes = gt_boxes[crowd_ix]
gt_class_ids = gt_class_ids[non_crowd_ix]
gt_boxes = gt_boxes[non_crowd_ix]
# Compute overlaps with crowd boxes [anchors, crowds]
crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)
crowd_iou_max = np.amax(crowd_overlaps, axis=1)
no_crowd_bool = (crowd_iou_max < 0.001)
else:
# All anchors don't intersect a crowd
no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)
# Compute overlaps [num_anchors, num_gt_boxes]
overlaps = utils.compute_overlaps(anchors, gt_boxes)
# Match anchors to GT Boxes
# If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.
# If an anchor overlaps a GT box with IoU < 0.3 then it's negative.
# Neutral anchors are those that don't match the conditions above,
# and they don't influence the loss function.
# However, don't keep any GT box unmatched (rare, but happens). Instead,
# match it to the closest anchor (even if its max IoU is < 0.3).
#
# 1. Set negative anchors first. They get overwritten below if a GT box is
# matched to them. Skip boxes in crowd areas.
anchor_iou_argmax = np.argmax(overlaps, axis=1)
anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]
rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1
# 2. Set an anchor for each GT box (regardless of IoU value).
# If multiple anchors have the same IoU match all of them
gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]
rpn_match[gt_iou_argmax] = 1
# 3. Set anchors with high overlap as positive.
rpn_match[anchor_iou_max >= 0.7] = 1
# Subsample to balance positive and negative anchors
# Don't let positives be more than half the anchors
ids = np.where(rpn_match == 1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)
if extra > 0:
# Reset the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# Same for negative proposals
ids = np.where(rpn_match == -1)[0]
extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -
np.sum(rpn_match == 1))
if extra > 0:
# Rest the extra ones to neutral
ids = np.random.choice(ids, extra, replace=False)
rpn_match[ids] = 0
# For positive anchors, compute shift and scale needed to transform them
# to match the corresponding GT boxes.
ids = np.where(rpn_match == 1)[0]
ix = 0 # index into rpn_bbox
# TODO: use box_refinement() rather than duplicating the code here
for i, a in zip(ids, anchors[ids]):
# Closest gt box (it might have IoU < 0.7)
gt = gt_boxes[anchor_iou_argmax[i]]
# Convert coordinates to center plus width/height.
# GT Box
gt_h = gt[2] - gt[0]
gt_w = gt[3] - gt[1]
gt_center_y = gt[0] + 0.5 * gt_h
gt_center_x = gt[1] + 0.5 * gt_w
# Anchor
a_h = a[2] - a[0]
a_w = a[3] - a[1]
a_center_y = a[0] + 0.5 * a_h
a_center_x = a[1] + 0.5 * a_w
# Compute the bbox refinement that the RPN should predict.
rpn_bbox[ix] = [
(gt_center_y - a_center_y) / a_h,
(gt_center_x - a_center_x) / a_w,
np.log(gt_h / a_h),
np.log(gt_w / a_w),
]
# Normalize
rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV
ix += 1
return rpn_match, rpn_bbox
def generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):
"""Generates ROI proposals similar to what a region proposal network
would generate.
image_shape: [Height, Width, Depth]
count: Number of ROIs to generate
gt_class_ids: [N] Integer ground truth class IDs
gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.
Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.
"""
# placeholder
rois = np.zeros((count, 4), dtype=np.int32)
# Generate random ROIs around GT boxes (90% of count)
rois_per_box = int(0.9 * count / gt_boxes.shape[0])
for i in range(gt_boxes.shape[0]):
gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]
h = gt_y2 - gt_y1
w = gt_x2 - gt_x1
# random boundaries
r_y1 = max(gt_y1 - h, 0)
r_y2 = min(gt_y2 + h, image_shape[0])
r_x1 = max(gt_x1 - w, 0)
r_x2 = min(gt_x2 + w, image_shape[1])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))
x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:rois_per_box]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:rois_per_box]
if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
box_rois = np.hstack([y1, x1, y2, x2])
rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois
# Generate random ROIs anywhere in the image (10% of count)
remaining_count = count - (rois_per_box * gt_boxes.shape[0])
# To avoid generating boxes with zero area, we generate double what
# we need and filter out the extra. If we get fewer valid boxes
# than we need, we loop and try again.
while True:
y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))
x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))
# Filter out zero area boxes
threshold = 1
y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=
threshold][:remaining_count]
x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=
threshold][:remaining_count]
if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:
break
# Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape
# into x1, y1, x2, y2 order
x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)
y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)
global_rois = np.hstack([y1, x1, y2, x2])
rois[-remaining_count:] = global_rois
return rois
def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,
random_rois=0, batch_size=1, detection_targets=False,
no_augmentation_sources=None):
"""A generator that returns images and corresponding target class ids,
bounding box deltas, and masks.
dataset: The Dataset object to pick data from
config: The model config object
shuffle: If True, shuffles the samples before every epoch
augment: (deprecated. Use augmentation instead). If true, apply random
image augmentation. Currently, only horizontal flipping is offered.
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.
For example, passing imgaug.augmenters.Fliplr(0.5) flips images
right/left 50% of the time.
random_rois: If > 0 then generate proposals to be used to train the
network classifier and mask heads. Useful if training
the Mask RCNN part without the RPN.
batch_size: How many images to return in each call
detection_targets: If True, generate detection targets (class IDs, bbox
deltas, and masks). Typically for debugging or visualizations because
in trainig detection targets are generated by DetectionTargetLayer.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
Returns a Python generator. Upon calling next() on it, the
generator returns two lists, inputs and outputs. The contents
of the lists differs depending on the received arguments:
inputs list:
- images: [batch, H, W, C]
- image_meta: [batch, (meta data)] Image details. See compose_image_meta()
- rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)
- rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.
- gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs
- gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]
- gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width
are those of the image unless use_mini_mask is True, in which
case they are defined in MINI_MASK_SHAPE.
outputs list: Usually empty in regular training. But if detection_targets
is True then the outputs list contains target class_ids, bbox deltas,
and masks.
"""
b = 0 # batch item index
image_index = -1
image_ids = np.copy(dataset.image_ids)
error_count = 0
no_augmentation_sources = no_augmentation_sources or []
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)
anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,
config.RPN_ANCHOR_RATIOS,
backbone_shapes,
config.BACKBONE_STRIDES,
config.RPN_ANCHOR_STRIDE)
# Keras requires a generator to run indefinitely.
while True:
try:
# Increment index to pick next image. Shuffle if at the start of an epoch.
image_index = (image_index + 1) % len(image_ids)
if shuffle and image_index == 0:
np.random.shuffle(image_ids)
# Get GT bounding boxes and masks for image.
image_id = image_ids[image_index]
# If the image source is not to be augmented pass None as augmentation
if dataset.image_info[image_id]['source'] in no_augmentation_sources:
image, image_meta, gt_class_ids, gt_boxes, gt_masks, gt_masks2 = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=None,
use_mini_mask=config.USE_MINI_MASK)
else:
image, image_meta, gt_class_ids, gt_boxes, gt_masks, gt_masks2 = \
load_image_gt(dataset, config, image_id, augment=augment,
augmentation=augmentation,
use_mini_mask=config.USE_MINI_MASK)
# Skip images that have no instances. This can happen in cases
# where we train on a subset of classes and the image doesn't
# have any of the classes we care about.
if not np.any(gt_class_ids > 0):
continue
# RPN Targets
rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,
gt_class_ids, gt_boxes, config)
# Mask R-CNN Targets
if random_rois:
rpn_rois = generate_random_rois(
image.shape, random_rois, gt_class_ids, gt_boxes)
if detection_targets:
rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask, mrcnn_mask2 =\
build_detection_targets(
rpn_rois, gt_class_ids, gt_boxes, gt_masks, gt_mask2, config)
# Init batch arrays
if b == 0:
batch_image_meta = np.zeros(
(batch_size,) + image_meta.shape, dtype=image_meta.dtype)
batch_rpn_match = np.zeros(
[batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)
batch_rpn_bbox = np.zeros(
[batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)
batch_images = np.zeros(
(batch_size,) + image.shape, dtype=np.float32)
batch_gt_class_ids = np.zeros(
(batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)
batch_gt_boxes = np.zeros(
(batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)
batch_gt_masks = np.zeros(
(batch_size, gt_masks.shape[0], gt_masks.shape[1],
config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)
batch_gt_masks2 = np.zeros(
(batch_size, gt_masks2.shape[0], gt_masks2.shape[1],
config.MAX_GT_INSTANCES), dtype=gt_masks2.dtype)
if random_rois:
batch_rpn_rois = np.zeros(
(batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)
if detection_targets:
batch_rois = np.zeros(
(batch_size,) + rois.shape, dtype=rois.dtype)
batch_mrcnn_class_ids = np.zeros(
(batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)
batch_mrcnn_bbox = np.zeros(
(batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)
batch_mrcnn_mask = np.zeros(
(batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)
batch_mrcnn_mask2 = np.zeros(
(batch_size,) + mrcnn_mask2.shape, dtype=mrcnn_mask2.dtype)
# If more instances than fits in the array, sub-sample from them.
if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:
ids = np.random.choice(
np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)
gt_class_ids = gt_class_ids[ids]
gt_boxes = gt_boxes[ids]
gt_masks = gt_masks[:, :, ids]
gt_masks2 = gt_masks2[:, :, ids]
# Add to batch
batch_image_meta[b] = image_meta
batch_rpn_match[b] = rpn_match[:, np.newaxis]
batch_rpn_bbox[b] = rpn_bbox
batch_images[b] = mold_image(image.astype(np.float32), config)
batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids
batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes
batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks
batch_gt_masks2[b, :, :, :gt_masks2.shape[-1]] = gt_masks2
if random_rois:
batch_rpn_rois[b] = rpn_rois
if detection_targets:
batch_rois[b] = rois
batch_mrcnn_class_ids[b] = mrcnn_class_ids
batch_mrcnn_bbox[b] = mrcnn_bbox
batch_mrcnn_mask[b] = mrcnn_mask
batch_mrcnn_mask2[b] = mrcnn_mask2
b += 1
# Batch full?
if b >= batch_size:
inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,
batch_gt_class_ids, batch_gt_boxes, batch_gt_masks, batch_gt_masks2]
outputs = []
if random_rois:
inputs.extend([batch_rpn_rois])
if detection_targets:
inputs.extend([batch_rois])
# Keras requires that output and targets have the same number of dimensions
batch_mrcnn_class_ids = np.expand_dims(
batch_mrcnn_class_ids, -1)
outputs.extend(
[batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask, batch_mrcnn_mask2])
yield inputs, outputs
# start a new batch
b = 0
except (GeneratorExit, KeyboardInterrupt):
raise
except:
# Log it and skip the image
logging.exception("Error processing image {}".format(
dataset.image_info[image_id]))
error_count += 1
if error_count > 5:
raise
############################################################
# MaskRCNN Class
############################################################
class MaskRCNN():
"""Encapsulates the Mask RCNN model functionality.
The actual Keras model is in the keras_model property.
"""
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
def build(self, mode, config):
"""Build Mask R-CNN architecture.
input_shape: The shape of the input image.
mode: Either "training" or "inference". The inputs and
outputs of the model differ accordingly.
"""
assert mode in ['training', 'inference']
# Image size must be dividable by 2 multiple times
h, w = config.IMAGE_SHAPE[:2]
if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
# Inputs
input_image = KL.Input(
shape=[None, None, config.IMAGE_SHAPE[2]], name="input_image")
input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
name="input_image_meta")
if mode == "training":
# RPN GT
input_rpn_match = KL.Input(
shape=[None, 1], name="input_rpn_match", dtype=tf.int32)
input_rpn_bbox = KL.Input(
shape=[None, 4], name="input_rpn_bbox", dtype=tf.float32)
# Detection GT (class IDs, bounding boxes, and masks)
# 1. GT Class IDs (zero padded)
input_gt_class_ids = KL.Input(
shape=[None], name="input_gt_class_ids", dtype=tf.int32)
# 2. GT Boxes in pixels (zero padded)
# [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
input_gt_boxes = KL.Input(
shape=[None, 4], name="input_gt_boxes", dtype=tf.float32)
# Normalize coordinates
gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_gt_boxes)
# 3. GT Masks (zero padded)
# [batch, height, width, MAX_GT_INSTANCES]
if config.USE_MINI_MASK:
input_gt_masks = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
input_gt_masks2 = KL.Input(
shape=[config.MINI_MASK_SHAPE[0],
config.MINI_MASK_SHAPE[1], None],
name="input_gt_masks2", dtype=bool)
else:
input_gt_masks = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks", dtype=bool)
input_gt_masks2 = KL.Input(
shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
name="input_gt_masks2", dtype=bool)
elif mode == "inference":
# Anchors in normalized coordinates
input_anchors = KL.Input(shape=[None, 4], name="input_anchors")
# Build the shared convolutional layers.
# Bottom-up Layers
# Returns a list of the last layers of each stage, 5 in total.
# Don't create the thead (stage 5), so we pick the 4th item in the list.
if callable(config.BACKBONE):
_, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,
train_bn=config.TRAIN_BN)
else:
_, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,
stage5=True, train_bn=config.TRAIN_BN)
# Top-down Layers
# TODO: add assert to varify feature map sizes match what's in config
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)
P4 = KL.Add(name="fpn_p4add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])
P3 = KL.Add(name="fpn_p3add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])
P2 = KL.Add(name="fpn_p2add")([
KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])
# Attach 3x3 conv to all P layers to get the final feature maps.
P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p2")(P2)
P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p3")(P3)
P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p4")(P4)
P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding="SAME", name="fpn_p5")(P5)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)
# Note that P6 is used in RPN, but not in the classifier heads.
rpn_feature_maps = [P2, P3, P4, P5, P6]
mrcnn_feature_maps = [P2, P3, P4, P5]
# Anchors
if mode == "training":
anchors = self.get_anchors(config.IMAGE_SHAPE)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
# A hack to get around Keras's bad support for constants
anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
else:
anchors = input_anchors
# RPN Model
rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)
# Loop through pyramid layers
layer_outputs = [] # list of lists
for p in rpn_feature_maps:
layer_outputs.append(rpn([p]))
# Concatenate layer outputs
# Convert from list of lists of level outputs to list of lists
# of outputs across levels.
# e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
outputs = list(zip(*layer_outputs))
outputs = [KL.Concatenate(axis=1, name=n)(list(o))
for o, n in zip(outputs, output_names)]
rpn_class_logits, rpn_class, rpn_bbox = outputs
# Generate proposals
# Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
# and zero padded.
proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training"\
else config.POST_NMS_ROIS_INFERENCE
rpn_rois = ProposalLayer(
proposal_count=proposal_count,
nms_threshold=config.RPN_NMS_THRESHOLD,
name="ROI",
config=config)([rpn_class, rpn_bbox, anchors])
if mode == "training":
# Class ID mask to mark class IDs supported by the dataset the image
# came from.
active_class_ids = KL.Lambda(
lambda x: parse_image_meta_graph(x)["active_class_ids"]
)(input_image_meta)
if not config.USE_RPN_ROIS:
# Ignore predicted ROIs and use ROIs provided as an input.
input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
name="input_roi", dtype=np.int32)
# Normalize coordinates
target_rois = KL.Lambda(lambda x: norm_boxes_graph(
x, K.shape(input_image)[1:3]))(input_rois)
else:
target_rois = rpn_rois
# Generate detection targets
# Subsamples proposals and generates target outputs for training
# Note that proposal class IDs, gt_boxes, and gt_masks are zero
# padded. Equally, returned rois and targets are zero padded.
rois, target_class_ids, target_bbox, target_mask, target_mask2 =\
DetectionTargetLayer(config, name="proposal_targets")([
target_rois, input_gt_class_ids, gt_boxes, input_gt_masks, input_gt_masks2])
# Network Heads
# TODO: verify that this handles zero padded ROIs
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# Added Cascaded Mask Branch
mrcnn_mask2 = build_fpn_mask2_graph(rois, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
train_bn=config.TRAIN_BN)
# TODO: clean up (use tf.identify if necessary)
output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)
# Losses
rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name="rpn_class_loss")(
[input_rpn_match, rpn_class_logits])
rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name="rpn_bbox_loss")(
[input_rpn_bbox, input_rpn_match, rpn_bbox])
class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name="mrcnn_class_loss")(
[target_class_ids, mrcnn_class_logits, active_class_ids])
bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
[target_bbox, target_class_ids, mrcnn_bbox])
mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
[target_mask, target_class_ids, mrcnn_mask])
#Loss Corresponding to the newly created Cascaded Branch
mask_loss2 = KL.Lambda(lambda x: mrcnn_mask2_loss_graph(*x), name="mrcnn_mask2_loss")(
[target_mask2, target_class_ids, mrcnn_mask2])
# Model
inputs = [input_image, input_image_meta,
input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks, input_gt_masks2]
if not config.USE_RPN_ROIS:
inputs.append(input_rois)
outputs = [rpn_class_logits, rpn_class, rpn_bbox,
mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask, mrcnn_mask2,
rpn_rois, output_rois,
rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss, mask_loss2]
model = KM.Model(inputs, outputs, name='mask_rcnn')
else:
# Network Heads
# Proposal classifier and BBox regressor heads
mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
config.POOL_SIZE, config.NUM_CLASSES,
train_bn=config.TRAIN_BN,
fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)
# Detections
# output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
# normalized coordinates
detections = DetectionLayer(config, name="mrcnn_detection")(
[rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])
# Create masks for detections
detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
config.NUM_CLASSES,
train_bn=config.TRAIN_BN)
# Added Cascaded Mask Branch
mrcnn_mask2 = build_fpn_mask2_graph(detection_boxes, mrcnn_feature_maps,
input_image_meta,
config.MASK_POOL_SIZE,
train_bn=config.TRAIN_BN)
model = KM.Model([input_image, input_image_meta, input_anchors],
[detections, mrcnn_class, mrcnn_bbox,
mrcnn_mask, mrcnn_mask2, rpn_rois, rpn_class, rpn_bbox],
name='mask_rcnn')
# Add multi-GPU support.
if config.GPU_COUNT > 1:
from mrcnn.parallel_model import ParallelModel
model = ParallelModel(model, config.GPU_COUNT)
return model
def find_last(self):
"""Finds the last checkpoint file of the last trained model in the
model directory.
Returns:
The path of the last checkpoint file
"""
# Get directory names. Each directory corresponds to a model
dir_names = next(os.walk(self.model_dir))[1]
key = self.config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
# Pick last directory
dir_name = os.path.join(self.model_dir, dir_names[-1])
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
import errno
raise FileNotFoundError(
errno.ENOENT, "Could not find weight files in {}".format(dir_name))
checkpoint = os.path.join(dir_name, checkpoints[-1])
return checkpoint
def load_weights(self, filepath, by_name=False, exclude=None):
"""Modified version of the corresponding Keras function with
the addition of multi-GPU support and the ability to exclude
some layers from loading.
exclude: list of layer names to exclude
"""
import h5py
# Conditional import to support versions of Keras before 2.2
# TODO: remove in about 6 months (end of 2018)
try:
from keras.engine import saving
except ImportError:
# Keras before 2.2 used the 'topology' namespace.
from keras.engine import topology as saving
if exclude:
by_name = True
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
f = h5py.File(filepath, mode='r')
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
keras_model = self.keras_model
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
# Exclude some layers
if exclude:
layers = filter(lambda l: l.name not in exclude, layers)
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, layers)
else:
saving.load_weights_from_hdf5_group(f, layers)
if hasattr(f, 'close'):
f.close()
# Update the log directory
self.set_log_dir(filepath)
def get_imagenet_weights(self):
"""Downloads ImageNet trained weights from Keras.
Returns path to weights file.
"""
from keras.utils.data_utils import get_file
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\
'releases/download/v0.2/'\
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
return weights_path
def compile(self, learning_rate, momentum):
"""Gets the model ready for training. Adds losses, regularization, and
metrics. Then calls the Keras compile() function.
"""
# Optimizer object
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=momentum,
clipnorm=self.config.GRADIENT_CLIP_NORM)
# Add Losses
# First, clear previously set losses to avoid duplication
self.keras_model._losses = []
self.keras_model._per_input_losses = {}
loss_names = [
"rpn_class_loss", "rpn_bbox_loss",
"mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss", "mrcnn_mask2_loss"]
for name in loss_names:
layer = self.keras_model.get_layer(name)
if layer.output in self.keras_model.losses:
continue
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.add_loss(loss)
# Add L2 Regularization
# Skip gamma and beta weights of batch normalization layers.
reg_losses = [
keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
for w in self.keras_model.trainable_weights
if 'gamma' not in w.name and 'beta' not in w.name]
self.keras_model.add_loss(tf.add_n(reg_losses))
# Compile
self.keras_model.compile(
optimizer=optimizer,
loss=[None] * len(self.keras_model.outputs))
# Add metrics for losses
for name in loss_names:
if name in self.keras_model.metrics_names:
continue
layer = self.keras_model.get_layer(name)
self.keras_model.metrics_names.append(name)
loss = (
tf.reduce_mean(layer.output, keepdims=True)
* self.config.LOSS_WEIGHTS.get(name, 1.))
self.keras_model.metrics_tensors.append(loss)
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
"""Sets model layers as trainable if their names match
the given regular expression.
"""
# Print message on the first call (but not on recursive calls)
if verbose > 0 and keras_model is None:
log("Selecting layers to train")
keras_model = keras_model or self.keras_model
# In multi-GPU training, we wrap the model. Get layers
# of the inner model because they have the weights.
layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
else keras_model.layers
for layer in layers:
# Is the layer a model?
if layer.__class__.__name__ == 'Model':
print("In model: ", layer.name)
self.set_trainable(
layer_regex, keras_model=layer, indent=indent + 4)
continue
if not layer.weights:
continue
# Is it trainable?
trainable = bool(re.fullmatch(layer_regex, layer.name))
# Update layer. If layer is a container, update inner layer.
if layer.__class__.__name__ == 'TimeDistributed':
layer.layer.trainable = trainable
else:
layer.trainable = trainable
# Print trainable layer names
if trainable and verbose > 0:
log("{}{:20} ({})".format(" " * indent, layer.name,
layer.__class__.__name__))
def set_log_dir(self, model_path=None):
"""Sets the model log directory and epoch counter.
model_path: If None, or a format different from what this code uses
then set a new log directory and start epochs from 0. Otherwise,
extract the log directory and the epoch counter from the file
name.
"""
# Set date and epoch counter as if starting a new model
self.epoch = 0
now = datetime.datetime.now()
# If we have a model path with date and epochs use them
if model_path:
# Continue from we left of. Get epoch and date from the file name
# A sample model path might look like:
# \path\to\logs\coco20171029T2315\mask_rcnn_coco_0001.h5 (Windows)
# /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)
regex = r".*[/\\][\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})[/\\]mask\_rcnn\_[\w-]+(\d{4})\.h5"
m = re.match(regex, model_path)
if m:
now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),
int(m.group(4)), int(m.group(5)))
# Epoch number in file is 1-based, and in Keras code it's 0-based.
# So, adjust for that then increment by one to start from the next epoch
self.epoch = int(m.group(6)) - 1 + 1
print('Re-starting from epoch %d' % self.epoch)
# Directory for training logs
self.log_dir = os.path.join(self.model_dir, "{}{:%Y%m%dT%H%M}".format(
self.config.NAME.lower(), now))
# Path to save after each epoch. Include placeholders that get filled by Keras.
self.checkpoint_path = os.path.join(self.log_dir, "mask_rcnn_{}_*epoch*.h5".format(
self.config.NAME.lower()))
self.checkpoint_path = self.checkpoint_path.replace(
"*epoch*", "{epoch:04d}")
def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,
augmentation=None, custom_callbacks=None, no_augmentation_sources=None):
"""Train the model.
train_dataset, val_dataset: Training and validation Dataset objects.
learning_rate: The learning rate to train with
epochs: Number of training epochs. Note that previous training epochs
are considered to be done alreay, so this actually determines
the epochs to train in total rather than in this particaular
call.
layers: Allows selecting wich layers to train. It can be:
- A regular expression to match layer names to train
- One of these predefined values:
heads: The RPN, classifier and mask heads of the network
all: All the layers
3+: Train Resnet stage 3 and up
4+: Train Resnet stage 4 and up
5+: Train Resnet stage 5 and up
augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)
augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)
flips images right/left 50% of the time. You can pass complex
augmentations as well. This augmentation applies 50% of the
time, and when it does it flips images right/left half the time
and adds a Gaussian blur with a random sigma in range 0 to 5.
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))
])
custom_callbacks: Optional. Add custom callbacks to be called
with the keras fit_generator method. Must be list of type keras.callbacks.
no_augmentation_sources: Optional. List of sources to exclude for
augmentation. A source is string that identifies a dataset and is
defined in the Dataset class.
"""
assert self.mode == "training", "Create model in training mode."
# Pre-defined layer regular expressions
layer_regex = {
# all layers but the backbone
"heads": r"(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# From a specific Resnet stage and up
"3+": r"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"4+": r"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
"5+": r"(res5.*)|(bn5.*)|(mrcnn\_.*)|(rpn\_.*)|(fpn\_.*)",
# All layers
"all": ".*",
}
if layers in layer_regex.keys():
layers = layer_regex[layers]
# Data generators
train_generator = data_generator(train_dataset, self.config, shuffle=True,
augmentation=augmentation,
batch_size=self.config.BATCH_SIZE,
no_augmentation_sources=no_augmentation_sources)
val_generator = data_generator(val_dataset, self.config, shuffle=True,
batch_size=self.config.BATCH_SIZE)
# Create log_dir if it does not exist
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# Callbacks
callbacks = [
keras.callbacks.TensorBoard(log_dir=self.log_dir,
histogram_freq=0, write_graph=True, write_images=False),
keras.callbacks.ModelCheckpoint(self.checkpoint_path,
verbose=0, save_weights_only=True),
]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# Train
log("\nStarting at epoch {}. LR={}\n".format(self.epoch, learning_rate))
log("Checkpoint Path: {}".format(self.checkpoint_path))
self.set_trainable(layers)
self.compile(learning_rate, self.config.LEARNING_MOMENTUM)
# Work-around for Windows: Keras fails on Windows when using
# multiprocessing workers. See discussion here:
# https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009
if os.name is 'nt':
workers = 0
else:
workers = multiprocessing.cpu_count()
self.keras_model.fit_generator(
train_generator,
initial_epoch=self.epoch,
epochs=epochs,
steps_per_epoch=self.config.STEPS_PER_EPOCH,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=self.config.VALIDATION_STEPS,
max_queue_size=100,
workers=workers,
use_multiprocessing=True,
)
self.epoch = max(self.epoch, epochs)
def mold_inputs(self, images):
"""Takes a list of images and modifies them to the format expected
as an input to the neural network.
images: List of image matrices [height,width,depth]. Images can have
different sizes.
Returns 3 Numpy matrices:
molded_images: [N, h, w, 3]. Images resized and normalized.
image_metas: [N, length of meta data]. Details about each image.
windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
original image (padding excluded).
"""
molded_images = []
image_metas = []
windows = []
for image in images:
# Resize image
# TODO: move resizing to mold_image()
molded_image, window, scale, padding, crop = utils.resize_image(
image,
min_dim=self.config.IMAGE_MIN_DIM,
min_scale=self.config.IMAGE_MIN_SCALE,
max_dim=self.config.IMAGE_MAX_DIM,
mode=self.config.IMAGE_RESIZE_MODE)
molded_image = mold_image(molded_image, self.config)
# Build image_meta
image_meta = compose_image_meta(
0, image.shape, molded_image.shape, window, scale,
np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
# Append
molded_images.append(molded_image)
windows.append(window)
image_metas.append(image_meta)
# Pack into arrays
molded_images = np.stack(molded_images)
image_metas = np.stack(image_metas)
windows = np.stack(windows)
return molded_images, image_metas, windows
def unmold_detections(self, detections, mrcnn_mask, mrcnn_mask2, original_image_shape,
image_shape, window):
"""Reformats the detections of one image from the format of the neural
network output to a format suitable for use in the rest of the
application.
detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates
mrcnn_mask: [N, height, width, num_classes]
original_image_shape: [H, W, C] Original image shape before resizing
image_shape: [H, W, C] Shape of the image after resizing and padding
window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real
image is excluding the padding.
Returns:
boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels
class_ids: [N] Integer class IDs for each bounding box
scores: [N] Float probability scores of the class_id
masks: [height, width, num_instances] Instance masks
"""
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
masks2 = mrcnn_mask2[np.arange(N), :, :, class_ids]
# Translate normalized coordinates in the resized image to pixel
# coordinates in the original image before resizing
window = utils.norm_boxes(window, image_shape[:2])
wy1, wx1, wy2, wx2 = window
shift = np.array([wy1, wx1, wy1, wx1])
wh = wy2 - wy1 # window height
ww = wx2 - wx1 # window width
scale = np.array([wh, ww, wh, ww])
# Convert boxes to normalized coordinates on the window
boxes = np.divide(boxes - shift, scale)
# Convert boxes to pixel coordinates on the original image
boxes = utils.denorm_boxes(boxes, original_image_shape[:2])
# Filter out detections with zero area. Happens in early training when
# network weights are still random
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
masks2 = np.delete(masks2, exclude_ix, axis=0)
N = class_ids.shape[0]
# Resize masks to original image size and set boundary threshold.
full_masks = []
full_masks2 = []
for i in range(N):
# Convert neural network mask to full size mask
full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)
full_mask2 = utils.unmold_mask(masks2[i], boxes[i], original_image_shape)
full_masks.append(full_mask)
full_masks2.append(full_mask2)
full_masks = np.stack(full_masks, axis=-1) if full_masks else np.empty(original_image_shape[:2] + (0,))
full_masks2 = np.stack(full_masks2, axis=-1) if full_masks2 else np.empty(original_image_shape[:2] + (0,))
return boxes, class_ids, scores, full_masks, full_masks2
def detect(self, images, verbose=0):
"""Runs the detection pipeline.
images: List of images, potentially of different sizes.
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(
images) == self.config.BATCH_SIZE, "len(images) must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(images)))
for image in images:
log("image", image)
# Mold inputs to format expected by the neural network
molded_images, image_metas, windows = self.mold_inputs(images)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape,\
"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes."
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, mrcnn_mask2, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(images):
final_rois, final_class_ids, final_scores, final_masks, final_masks2 =\
self.unmold_detections(detections[i], mrcnn_mask[i], mrcnn_mask2[i],
image.shape, molded_images[i].shape,
windows[i])
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
"masks2": final_masks2,
})
return results
def detect_molded(self, molded_images, image_metas, verbose=0):
"""Runs the detection pipeline, but expect inputs that are
molded already. Used mostly for debugging and inspecting
the model.
molded_images: List of images loaded using load_image_gt()
image_metas: image meta data, also returned by load_image_gt()
Returns a list of dicts, one dict per image. The dict contains:
rois: [N, (y1, x1, y2, x2)] detection bounding boxes
class_ids: [N] int class IDs
scores: [N] float probability scores for the class IDs
masks: [H, W, N] instance binary masks
"""
assert self.mode == "inference", "Create model in inference mode."
assert len(molded_images) == self.config.BATCH_SIZE,\
"Number of images must be equal to BATCH_SIZE"
if verbose:
log("Processing {} images".format(len(molded_images)))
for image in molded_images:
log("image", image)
# Validate image sizes
# All images in a batch MUST be of the same size
image_shape = molded_images[0].shape
for g in molded_images[1:]:
assert g.shape == image_shape, "Images must have the same size"
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
if verbose:
log("molded_images", molded_images)
log("image_metas", image_metas)
log("anchors", anchors)
# Run object detection
detections, _, _, mrcnn_mask, _, _, _ =\
self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)
# Process detections
results = []
for i, image in enumerate(molded_images):
window = [0, 0, image.shape[0], image.shape[1]]
final_rois, final_class_ids, final_scores, final_masks =\
self.unmold_detections(detections[i], mrcnn_mask[i],
image.shape, molded_images[i].shape,
window)
results.append({
"rois": final_rois,
"class_ids": final_class_ids,
"scores": final_scores,
"masks": final_masks,
})
return results
def get_anchors(self, image_shape):
"""Returns anchor pyramid for the given image size."""
backbone_shapes = compute_backbone_shapes(self.config, image_shape)
# Cache anchors and reuse if image shape is the same
if not hasattr(self, "_anchor_cache"):
self._anchor_cache = {}
if not tuple(image_shape) in self._anchor_cache:
# Generate Anchors
a = utils.generate_pyramid_anchors(
self.config.RPN_ANCHOR_SCALES,
self.config.RPN_ANCHOR_RATIOS,
backbone_shapes,
self.config.BACKBONE_STRIDES,
self.config.RPN_ANCHOR_STRIDE)
# Keep a copy of the latest anchors in pixel coordinates because
# it's used in inspect_model notebooks.
# TODO: Remove this after the notebook are refactored to not use it
self.anchors = a
# Normalize coordinates
self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])
return self._anchor_cache[tuple(image_shape)]
def ancestor(self, tensor, name, checked=None):
"""Finds the ancestor of a TF tensor in the computation graph.
tensor: TensorFlow symbolic tensor.
name: Name of ancestor tensor to find
checked: For internal use. A list of tensors that were already
searched to avoid loops in traversing the graph.
"""
checked = checked if checked is not None else []
# Put a limit on how deep we go to avoid very long loops
if len(checked) > 500:
return None
# Convert name to a regex and allow matching a number prefix
# because Keras adds them automatically
if isinstance(name, str):
name = re.compile(name.replace("/", r"(\_\d+)*/"))
parents = tensor.op.inputs
for p in parents:
if p in checked:
continue
if bool(re.fullmatch(name, p.name)):
return p
checked.append(p)
a = self.ancestor(p, name, checked)
if a is not None:
return a
return None
def find_trainable_layer(self, layer):
"""If a layer is encapsulated by another layer, this function
digs through the encapsulation and returns the layer that holds
the weights.
"""
if layer.__class__.__name__ == 'TimeDistributed':
return self.find_trainable_layer(layer.layer)
return layer
def get_trainable_layers(self):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in self.keras_model.layers:
# If layer is a wrapper, find inner trainable layer
l = self.find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers
def run_graph(self, images, outputs, image_metas=None):
"""Runs a sub-set of the computation graph that computes the given
outputs.
image_metas: If provided, the images are assumed to be already
molded (i.e. resized, padded, and normalized)
outputs: List of tuples (name, tensor) to compute. The tensors are
symbolic TensorFlow tensors and the names are for easy tracking.
Returns an ordered dict of results. Keys are the names received in the
input and values are Numpy arrays.
"""
model = self.keras_model
# Organize desired outputs into an ordered dict
outputs = OrderedDict(outputs)
for o in outputs.values():
assert o is not None
# Build a Keras function to run parts of the computation graph
inputs = model.inputs
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
inputs += [K.learning_phase()]
kf = K.function(model.inputs, list(outputs.values()))
# Prepare inputs
if image_metas is None:
molded_images, image_metas, _ = self.mold_inputs(images)
else:
molded_images = images
image_shape = molded_images[0].shape
# Anchors
anchors = self.get_anchors(image_shape)
# Duplicate across the batch dimension because Keras requires it
# TODO: can this be optimized to avoid duplicating the anchors?
anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
model_in = [molded_images, image_metas, anchors]
# Run inference
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
model_in.append(0.)
outputs_np = kf(model_in)
# Pack the generated Numpy arrays into a a dict and log the results.
outputs_np = OrderedDict([(k, v)
for k, v in zip(outputs.keys(), outputs_np)])
for k, v in outputs_np.items():
log(k, v)
return outputs_np
############################################################
# Data Formatting
############################################################
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale, active_class_ids):
"""Takes attributes of an image and puts them in one 1D array.
image_id: An int ID of the image. Useful for debugging.
original_image_shape: [H, W, C] before resizing or padding.
image_shape: [H, W, C] after resizing and padding
window: (y1, x1, y2, x2) in pixels. The area of the image where the real
image is (excluding the padding)
scale: The scaling factor applied to the original image (float32)
active_class_ids: List of class_ids available in the dataset from which
the image came. Useful if training on images from multiple datasets
where not all classes are present in all datasets.
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] + # size=1
list(active_class_ids) # size=num_classes
)
return meta
def parse_image_meta(meta):
"""Parses an array that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed values.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32),
"active_class_ids": active_class_ids.astype(np.int32),
}
def parse_image_meta_graph(meta):
"""Parses a tensor that contains image attributes to its components.
See compose_image_meta() for more details.
meta: [batch, meta length] where meta length depends on NUM_CLASSES
Returns a dict of the parsed tensors.
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
active_class_ids = meta[:, 12:]
return {
"image_id": image_id,
"original_image_shape": original_image_shape,
"image_shape": image_shape,
"window": window,
"scale": scale,
"active_class_ids": active_class_ids,
}
def mold_image(images, config):
"""Expects an RGB image (or array of images) and subtracts
the mean pixel and converts it to float. Expects image
colors in RGB order.
"""
return images.astype(np.float32) - config.MEAN_PIXEL
def unmold_image(normalized_images, config):
"""Takes a image normalized with mold() and returns the original."""
return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)
############################################################
# Miscellenous Graph Functions
############################################################
def trim_zeros_graph(boxes, name='trim_zeros'):
"""Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
def batch_pack_graph(x, counts, num_rows):
"""Picks different number of values from each row
in x depending on the values in counts.
"""
outputs = []
for i in range(num_rows):
outputs.append(x[i, :counts[i]])
return tf.concat(outputs, axis=0)
def norm_boxes_graph(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes_graph(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
| [
"kanishgarg428@gmail.com"
] | kanishgarg428@gmail.com |
a5200b8cedea8db38dacd15c3aed4a169279d957 | f300750089a04502dcb6257ee0bd840637f7bc81 | /深度学习入门练习/matplotlib练习/imread.py | 2b0642e1c8740bc1315ef27aa248d39a8e992b32 | [] | no_license | flymysql/flypython | 84ecedc3a70a38f2ac926ca94cf211dbfa10a243 | 065da9a5fb0dce5793d757dbdebdc491c1b8e7e1 | refs/heads/master | 2020-04-05T17:58:51.100923 | 2019-06-03T07:16:36 | 2019-06-03T07:16:36 | 157,083,722 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | import matplotlib.pyplot as plt
from matplotlib.image import imread
img = imread('test.png') # 读入图像
plt.imshow(img)
plt.show() | [
"flyphp@outlook.com"
] | flyphp@outlook.com |
880021a654f4a378b35d60a2a77a416c09380f13 | 9bdcef7dcac5b20ee065f6fc2d3d1e4bf9505b8b | /src/no_pml.py | c7d5937bd29ab717304af07a5ac43f2045149159 | [] | no_license | gernot-ohner/fdtd | 2242f493ba4f59a87dad1032e451b2e2607cf510 | b927c124bf0dd8d5d00c5d63c60239afcfcfff2b | refs/heads/master | 2023-08-28T08:20:39.050319 | 2021-10-23T15:57:02 | 2021-10-23T15:57:02 | 420,452,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,763 | py | import scipy.constants as const
def calculate_constants(dx, dy, dt):
"""
Calculates the constants used in the updates equations for a vacuum.
:param dx: float
:param dy: float
:param dt: float
:return: List[4 floats]
"""
eps = const.epsilon_0
mu = const.mu_0
cex = dt / (eps * dy)
cey = dt / (eps * dx)
chzx = dt / (mu * dx)
chzy = dt / (mu * dy)
return [cex, cey, chzx, chzy]
def evolution(nt, fields, constants, history, sourcepoint, source):
"""
Calculate the behavior of the situation determined by the "constants" with a matrix implementation without a pml.
A source determined by the function "source" excites the field at point "sourcepoint".
The value of "hz" at each timestep is stored in "history" which is then returned.
:param nt: int
:param fields: List[3 np.ndarrays]
:param constants: List[4 floats]
:param history: np.ndarray of size (nx, ny, nt)
:param sourcepoint: Tuple(float, float)
:param source: function
:return:
"""
ex, ey, hz = fields
cex, cey, chzx, chzy = constants
for t in range(nt):
hz = hz - chzx * (ey[1:, :] - ey[:-1, :]) + chzy * (ex[:, 1:] - ex[:, :-1])
hz[sourcepoint] = source(t)
ex[:, 1:-1] = ex[:, 1:-1] + cex * (hz[:, 1:] - hz[:, :-1])
ey[1:-1, :] = ey[1:-1, :] - cey * (hz[1:, :] - hz[:-1, :])
history[:, :, t] = hz
return history
def loop_evolution(nx, ny, nt, fields, constants, history, sourcepoint, source):
"""
Calculate the behavior of the situation determined by the "constants" with a loop implementation.
A source determined by the function "source" excites the field at point "sourcepoint".
The value of "hz" at each timestep is stored in "history" which is then returned.
:param nx: int
:param ny: int
:param nt: int
:param fields: List[3 np.ndarrays]
:param constants: List[4 floats]
:param history: np.ndarray of size (nx, ny, nt)
:param sourcepoint: Tuple(float, float)
:param source: function
:return:
"""
ex, ey, hz = fields
cex, cey, chx, chy = constants
for t in range(nt):
for i in range(nx):
for j in range(1, ny - 1):
ex[i, j] = ex[i, j] + cex * (hz[i, j] - hz[i, j - 1])
for i in range(1, nx - 1):
for j in range(ny):
ey[i, j] = ey[i, j] - cey * (hz[i, j] - hz[i - 1, j])
for i in range(nx):
for j in range(ny):
hz[i, j] = hz[i, j] + chy * (ex[i, j + 1] - ex[i, j]) - chx * (ey[i + 1, j] - ey[i, j])
hz[sourcepoint] = source(t)
for i in range(nx):
for j in range(ny):
history[i,j,t] = hz[i,j]
return history
| [
"gernot.ohner@gmail.com"
] | gernot.ohner@gmail.com |
853cd9f91bd66aa8cb7c3785f5159eb5d757c4cd | 2da8347ee485eba61c72c5394a839c16bcbce835 | /src/__init__.py | 3201c1d604ee4134fd689df62ed5b517770ad24c | [] | no_license | svox1/PiconsTestConverter | 0abd288bf04515e7fe367bfa8a068603987a5e29 | f8ded7b74bede4cda57369ffbd391d5646daf061 | refs/heads/master | 2020-06-04T13:20:52.592127 | 2014-01-07T01:15:51 | 2014-01-07T01:15:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | # -*- coding: UTF-8 -*-
#######################################################################
#
# PiconsTestConverter
# Copyright (C) 2013 by svox
#
# In case of reuse of this source code please do not remove this copyright.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For more information on the GNU General Public License see:
# <http://www.gnu.org/licenses/>.
#
#######################################################################
| [
"mail@sebastian-wichmann.de"
] | mail@sebastian-wichmann.de |
b4c0472ccadd94cd2d5b8635aa3af2ec2da7fb48 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/476/usersdata/321/110683/submittedfiles/Av2_Parte3.py | 871c02d88b4454cc7f87fe1b0a0f024a5aa1caa1 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # -*- coding: utf-8 -*-
qa= int(input('Quantidade de elementos de a: '))
qb= int(input('Quantidade de elementos de b: '))
a= []
b= []
for i in range(qa):
a.append(int(input('Digite o valor%d de a: ' % i)))
for i in range(qb):
b.append(int(input('Digite o valor%d de b: ' % i)))
soma= 0
while a[i] == b[i]:
soma+=1
print(soma)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f4faeed009587fb8b26970859dabc3f70ff065e1 | 077aa7e969f2359b986ae7d70904510165ddc5fc | /.venv/lib/python3.6/dist-packages/aws_cdk/aws_ec2/_jsii/__init__.py | 14560b1b6206f580fd32e3b90c122f1d257d4fd1 | [] | no_license | lilitiny/pipe | 8fd483815d899d460a184a74a3b44b66669e833f | 5c840a3e9c51ca13338153a1245757cbaf76f7b2 | refs/heads/master | 2023-01-30T23:14:47.094481 | 2020-12-10T14:47:23 | 2020-12-10T14:47:23 | 320,216,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
import aws_cdk.assets._jsii
import aws_cdk.aws_cloudwatch._jsii
import aws_cdk.aws_iam._jsii
import aws_cdk.aws_kms._jsii
import aws_cdk.aws_logs._jsii
import aws_cdk.aws_s3._jsii
import aws_cdk.aws_s3_assets._jsii
import aws_cdk.aws_ssm._jsii
import aws_cdk.cloud_assembly_schema._jsii
import aws_cdk.core._jsii
import aws_cdk.cx_api._jsii
import aws_cdk.region_info._jsii
import constructs._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"@aws-cdk/aws-ec2", "1.77.0", __name__[0:-6], "aws-ec2@1.77.0.jsii.tgz"
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
| [
"ec2-user@ip-172-31-28-125.ec2.internal"
] | ec2-user@ip-172-31-28-125.ec2.internal |
f926b65b43588da453f042c7a401954e4f3e86e5 | 7ce2dd7b4b0671c8b051855e4bf1f6e85e8f7567 | /2018202147/src/scripts/Project3/TrafficsignClf.py | 65bd7db5b7eed8a524ef7e054fa7cd8a8a87055b | [] | no_license | weimingtom/ai20projects | b63286df9c5d72cb6ed2c188a2d58e80f82e4ca9 | ca6f993cfa569250b3116921f4b481d01de36197 | refs/heads/master | 2023-02-08T04:59:19.719235 | 2020-12-30T00:22:36 | 2020-12-30T00:22:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | import os
from torchvision import transforms,models
import torch.nn as nn
import torch
import PIL
# 用来预测的分类器
class TrafficsignClf():
# 构造函数
# 参数:
# model_path: 模型所在**绝对**路径
def __init__(self, model_path):
# 有 8 类
self.classes = ['speed_limit_30', 'speed_limit_40',
'go_straight', 'turn_left', 'turn_right',
'turn_around', 'slow', 'stop']
self.model = models.resnet18()
num_ftrs = self.model.fc.in_features
self.model.fc = nn.Linear(num_ftrs, 9)
# 加载模型
self.model.load_state_dict(torch.load(model_path))
self.model.eval()
# 预测
# 参数:
# img_path: 所需预测的图片所在的**绝对**路径
def predict(self, img_path):
img = PIL.Image.open(img_path).convert('RGB')
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
img = test_transform(img)
batch_t = torch.unsqueeze(img, 0)
output = self.model(batch_t)
_, predict = torch.max(output, 1)
# 返回类别标签和类别名
# 如果不把握,返回没有交通标志
if int(_) < 3:
return 8, "N/A"
return int(predict), self.classes[int(predict)]
if __name__ == '__main__':
clf = TrafficsignClf(os.getcwd() + '/model_resave.pth')
# 从摄像头获取画面
cap = cv2.VideoCapture(1)
ret, frame = cap.read()
if ret:
# 暂时存储摄像头拍摄到的帧至此
name = os.getcwd() + '/frame.jpg'
cv2.imwrite(name, frame)
_, sign = self.tfsClf.predict(name)
print(sign)
# 预测完后就可以删除图片了
os.remove(name)
#cv2.imshow('frame', frame)
cap.release() | [
"cuiguanyu2000@126.com"
] | cuiguanyu2000@126.com |
49a3ea622e52e19166cdf0f8105d210aa6a70f4b | f342108e7ca3d6eb094e7e243e25ceeaa24c335c | /main.py | 8300a32468e2edfe310e9a734f6c16213e8fbe63 | [] | no_license | Telekurysh/Looking-for-films | 80ef4bb3982fe90e3786e384cc66c3b3d09b0cc3 | 3fb914097f2d57d5c174021f29c593d0f65e9183 | refs/heads/master | 2023-07-19T19:03:40.765172 | 2020-01-07T14:09:47 | 2020-01-07T14:09:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QTableWidgetItem
from UI import Ui
import sqlite3
class MyWidget(QMainWindow, Ui):
def __init__(self):
super().__init__()
self.setupUi(self)
self.con = sqlite3.connect("films.db")
self.pushButton.clicked.connect(self.select)
def select(self):
title, duration, year = self.title.text(), self.duration.text(), self.year.text()
req = "SELECT * FROM Films "
if title:
if 'WHERE' in req:
req += " AND title = \"{}\"".format(title)
else:
req += "WHERE title = \"{}\"".format(title)
if duration:
if 'WHERE' in req:
req += " AND duration = {}".format(duration)
else:
req += "WHERE duration = {}".format(duration)
if year:
if 'WHERE' in req:
req += " AND year = {}".format(year)
else:
req += "WHERE year = {}".format(year)
cur = self.con.cursor()
result = cur.execute(req).fetchall()
self.tableWidget.setRowCount(len(result))
self.tableWidget.setColumnCount(len(result[0]))
self.tableWidget.setHorizontalHeaderLabels([i[0] for i in cur.description])
for i, elem in enumerate(result):
for j, val in enumerate(elem):
self.tableWidget.setItem(i, j, QTableWidgetItem(str(val)))
app = QApplication(sys.argv)
ex = MyWidget()
ex.show()
sys.exit(app.exec_())
| [
"noreply@github.com"
] | noreply@github.com |
cd7945760a4924d927021ded1d1b0d4d2a70b9a4 | 390565b36da4e69a08a69f1d59b7d58a3ffd2fdf | /python/baseline/reader.py | e0d40319da1df29dfc0b91e4cd315de3788011b3 | [
"Apache-2.0"
] | permissive | demoninpiano/baseline | 3a87afc831bbdef56f25652e2e90205dee33a290 | e3e541a241f673813cc4628de90411d1ecee5e3f | refs/heads/master | 2021-07-08T02:10:42.430721 | 2017-10-05T14:34:15 | 2017-10-05T14:34:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,106 | py | import baseline.data
import numpy as np
from collections import Counter
import re
import codecs
from baseline.utils import import_user_module, revlut
import os
def num_lines(filename):
lines = 0
with codecs.open(filename, encoding='utf-8', mode='r') as f:
for _ in f:
lines = lines + 1
return lines
def _build_vocab_for_col(col, files):
vocab = Counter()
vocab['<GO>'] = 1
vocab['<EOS>'] = 1
for file in files:
if file is None:
continue
with codecs.open(file, encoding='utf-8', mode='r') as f:
for line in f:
cols = re.split("\t", line)
text = re.split("\s", cols[col])
for w in text:
w = w.strip()
vocab[w] += 1
return vocab
class ParallelCorpusReader(object):
def __init__(self,
max_sentence_length=1000,
vec_alloc=np.zeros,
src_vec_trans=None,
trim=False):
self.vec_alloc = vec_alloc
self.src_vec_trans = src_vec_trans
self.max_sentence_length = max_sentence_length
self.trim = trim
def build_vocabs(self, files):
pass
def load_examples(self, tsfile, vocab1, vocab2):
pass
def load(self, tsfile, vocab1, vocab2, batchsz, shuffle=False):
examples = self.load_examples(tsfile, vocab1, vocab2)
return baseline.data.Seq2SeqDataFeed(examples, batchsz,
shuffle=shuffle, src_vec_trans=self.src_vec_trans,
vec_alloc=self.vec_alloc, trim=self.trim)
class TSVParallelCorpusReader(ParallelCorpusReader):
def __init__(self,
max_sentence_length=1000,
vec_alloc=np.zeros,
src_vec_trans=None,
trim=False, src_col_num=0, dst_col_num=1):
super(TSVParallelCorpusReader, self).__init__(max_sentence_length, vec_alloc, src_vec_trans, trim)
self.src_col_num = src_col_num
self.dst_col_num = dst_col_num
def build_vocabs(self, files):
src_vocab = _build_vocab_for_col(self.src_col_num, files)
dst_vocab = _build_vocab_for_col(self.dst_col_num, files)
return src_vocab, dst_vocab
def load_examples(self, tsfile, vocab1, vocab2):
GO = vocab2['<GO>']
EOS = vocab2['<EOS>']
mxlen = self.max_sentence_length
ts = []
with codecs.open(tsfile, encoding='utf-8', mode='r') as f:
for line in f:
splits = re.split("\t", line.strip())
src = re.split("\s+", splits[0])
dst = re.split("\s+", splits[1])
srcl = self.vec_alloc(mxlen, dtype=np.int)
tgtl = self.vec_alloc(mxlen, dtype=np.int)
src_len = len(src)
tgt_len = len(dst) + 2 # <GO>,...,<EOS>
end1 = min(src_len, mxlen)
end2 = min(tgt_len, mxlen)-2
tgtl[0] = GO
src_len = end1
tgt_len = end2+2
for j in range(end1):
srcl[j] = vocab1[src[j]]
for j in range(end2):
tgtl[j + 1] = vocab2[dst[j]]
tgtl[end2] = EOS
ts.append((srcl, tgtl, src_len, tgt_len))
return baseline.data.Seq2SeqExamples(ts)
class MultiFileParallelCorpusReader(ParallelCorpusReader):
def __init__(self, src_suffix, dst_suffix,
max_sentence_length=1000,
vec_alloc=np.zeros,
src_vec_trans=None,
trim=False):
super(MultiFileParallelCorpusReader, self).__init__(max_sentence_length, vec_alloc, src_vec_trans, trim)
self.src_suffix = src_suffix
self.dst_suffix = dst_suffix
if not src_suffix.startswith('.'):
self.src_suffix = '.' + self.src_suffix
if not dst_suffix.startswith('.'):
self.dst_suffix = '.' + self.dst_suffix
def build_vocabs(self, files):
src_vocab = _build_vocab_for_col(0, files)
dst_vocab = src_vocab
return src_vocab, dst_vocab
def load_examples(self, tsfile, vocab1, vocab2):
PAD = vocab1['<PAD>']
GO = vocab2['<GO>']
EOS = vocab2['<EOS>']
mxlen = self.max_sentence_length
ts = []
with codecs.open(tsfile + self.src_suffix, encoding='utf-8', mode='r') as fsrc:
with codecs.open(tsfile + self.dst_suffix, encoding='utf-8', mode='r') as fdst:
for src, dst in zip(fsrc, fdst):
src = re.split("\s+", src.strip())
dst = re.split("\s+", dst.strip())
srcl = self.vec_alloc(mxlen, dtype=np.int)
tgtl = self.vec_alloc(mxlen, dtype=np.int)
src_len = len(src)
tgt_len = len(dst) + 2
end1 = min(src_len, mxlen)
end2 = min(tgt_len, mxlen)-2
tgtl[0] = GO
src_len = end1
tgt_len = end2+2
for j in range(end1):
srcl[j] = vocab1[src[j]]
for j in range(end2):
tgtl[j + 1] = vocab2[dst[j]]
tgtl[end2] = EOS
ts.append((srcl, tgtl, src_len, tgt_len))
return baseline.data.Seq2SeqExamples(ts)
def create_parallel_corpus_reader(mxlen, alloc_fn, trim, src_vec_trans, **kwargs):
reader_type = kwargs.get('reader_type', 'default')
if reader_type == 'default':
print('Reading parallel file corpus')
pair_suffix = kwargs.get('pair_suffix')
reader = MultiFileParallelCorpusReader(pair_suffix[0], pair_suffix[1],
mxlen, alloc_fn,
src_vec_trans, trim)
elif reader_type == 'tsv':
print('Reading tab-separated corpus')
reader = TSVParallelCorpusReader(mxlen, alloc_fn, src_vec_trans, trim)
else:
mod = import_user_module("reader", reader_type)
return mod.create_parallel_corpus_reader(mxlen, alloc_fn,
src_vec_trans, trim, **kwargs)
return reader
def identity_trans_fn(x):
return x
class CONLLSeqReader(object):
UNREP_EMOTICONS = (
':)',
':(((',
':D',
'=)',
':-)',
'=(',
'(=',
'=[[',
)
def __init__(self, max_sentence_length=-1, max_word_length=-1, word_trans_fn=None,
vec_alloc=np.zeros, vec_shape=np.shape, trim=False):
self.cleanup_fn = identity_trans_fn if word_trans_fn is None else word_trans_fn
self.max_sentence_length = max_sentence_length
self.max_word_length = max_word_length
self.vec_alloc = vec_alloc
self.vec_shape = vec_shape
self.trim = trim
self.label2index = {"<PAD>": 0, "<GO>": 1, "<EOS>": 2}
@staticmethod
def web_cleanup(word):
if word.startswith('http'): return 'URL'
if word.startswith('@'): return '@@@@'
if word.startswith('#'): return '####'
if word == '"': return ','
if word in CONLLSeqReader.UNREP_EMOTICONS: return ';)'
if word == '<3': return '<3'
return word
def build_vocab(self, files):
vocab_word = Counter()
vocab_ch = Counter()
maxw = 0
maxs = 0
for file in files:
if file is None:
continue
sl = 0
with codecs.open(file, encoding='utf-8', mode='r') as f:
for line in f:
line = line.strip()
if line == '':
maxs = max(maxs, sl)
sl = 0
else:
states = re.split("\s", line)
sl += 1
w = states[0]
vocab_word[self.cleanup_fn(w)] += 1
maxw = max(maxw, len(w))
for k in w:
vocab_ch[k] += 1
self.max_word_length = min(maxw, self.max_word_length) if self.max_word_length > 0 else maxw
self.max_sentence_length = min(maxs, self.max_sentence_length) if self.max_sentence_length > 0 else maxs
print('Max sentence length %d' % self.max_sentence_length)
print('Max word length %d' % self.max_word_length)
return vocab_ch, vocab_word
@staticmethod
def read_lines(tsfile):
txts = []
lbls = []
txt = []
lbl = []
with codecs.open(tsfile, encoding='utf-8', mode='r') as f:
for line in f:
states = re.split("\s", line.strip())
if len(states) > 1:
txt.append(states[0])
lbl.append(states[-1])
else:
txts.append(txt)
lbls.append(lbl)
txt = []
lbl = []
return txts, lbls
def load(self, filename, words_vocab, chars_vocab, batchsz, shuffle=False):
ts = []
idx = 2 # GO=0, START=1, EOS=2
mxlen = self.max_sentence_length
maxw = self.max_word_length
txts, lbls = CONLLSeqReader.read_lines(filename)
for i in range(len(txts)):
xs_ch = self.vec_alloc((mxlen, maxw), dtype=np.int)
xs = self.vec_alloc((mxlen), dtype=np.int)
ys = self.vec_alloc((mxlen), dtype=np.int)
lv = lbls[i]
v = txts[i]
length = mxlen
for j in range(mxlen):
if j == len(v):
length = j
break
w = v[j]
nch = min(len(w), maxw)
label = lv[j]
if label not in self.label2index:
idx += 1
self.label2index[label] = idx
ys[j] = self.label2index[label]
xs[j] = words_vocab.get(self.cleanup_fn(w))
for k in range(nch):
xs_ch[j, k] = chars_vocab.get(w[k], 0)
ts.append((xs, xs_ch, ys, length, i))
examples = baseline.data.SeqWordCharTagExamples(ts)
return baseline.data.SeqWordCharLabelDataFeed(examples, batchsz=batchsz, shuffle=shuffle, vec_alloc=self.vec_alloc, vec_shape=self.vec_shape), txts
def create_seq_pred_reader(mxlen, mxwlen, word_trans_fn, vec_alloc, vec_shape, trim, **kwargs):
reader_type = kwargs.get('reader_type', 'default')
if reader_type == 'default':
print('Reading CONLL sequence file corpus')
reader = CONLLSeqReader(mxlen, mxwlen, word_trans_fn,
vec_alloc, vec_shape, trim)
else:
mod = import_user_module("reader", reader_type)
reader = mod.create_seq_pred_reader(mxlen, mxwlen, word_trans_fn,
vec_alloc, vec_shape, trim, **kwargs)
return reader
class SeqLabelReader(object):
def __init__(self):
pass
def build_vocab(self, files, **kwargs):
pass
def load(self, filename, index, batchsz, **kwargs):
pass
class TSVSeqLabelReader(SeqLabelReader):
REPLACE = { "'s": " 's ",
"'ve": " 've ",
"n't": " n't ",
"'re": " 're ",
"'d": " 'd ",
"'ll": " 'll ",
",": " , ",
"!": " ! ",
}
def __init__(self, mxlen=1000, mxfiltsz=0, clean_fn=None, vec_alloc=np.zeros, src_vec_trans=None):
super(TSVSeqLabelReader, self).__init__()
self.vocab = None
self.label2index = {}
self.clean_fn = clean_fn
self.mxlen = mxlen
self.mxfiltsz = mxfiltsz
self.vec_alloc = vec_alloc
if self.clean_fn is None:
self.clean_fn = lambda x: x
self.src_vec_trans = src_vec_trans
@staticmethod
def splits(text):
return list(filter(lambda s: len(s) != 0, re.split('\s+', text)))
@staticmethod
def do_clean(l):
l = l.lower()
l = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", l)
for k, v in TSVSeqLabelReader.REPLACE.items():
l = l.replace(k, v)
return l.strip()
@staticmethod
def label_and_sentence(line, clean_fn):
label_text = re.split('[\t\s]+', line)
label = label_text[0]
text = label_text[1:]
text = ' '.join(list(filter(lambda s: len(s) != 0, [clean_fn(w) for w in text])))
return label, text
def build_vocab(self, files, **kwargs):
"""Take a directory (as a string), or an array of files and build a vocabulary
Take in a directory or an array of individual files (as a list). If the argument is
a string, it may be a directory, in which case, all files in the directory will be loaded
to form a vocabulary.
:param files: Either a directory (str), or an array of individual files
:return:
"""
label_idx = len(self.label2index)
if type(files) == str:
if os.path.isdir(files):
base = files
files = filter(os.path.isfile, [os.path.join(base, x) for x in os.listdir(base)])
else:
files = [files]
vocab = Counter()
for file in files:
if file is None:
continue
with codecs.open(file, encoding='utf-8', mode='r') as f:
for line in f:
label, text = TSVSeqLabelReader.label_and_sentence(line, self.clean_fn)
if label not in self.label2index:
self.label2index[label] = label_idx
label_idx += 1
for w in TSVSeqLabelReader.splits(text):
vocab[w] += 1
return vocab, self.get_labels()
def get_labels(self):
labels = [''] * len(self.label2index)
for label, index in self.label2index.items():
labels[index] = label
return labels
def load(self, filename, index, batchsz, **kwargs):
PAD = index['<PAD>']
shuffle = kwargs.get('shuffle', False)
halffiltsz = self.mxfiltsz // 2
nozplen = self.mxlen - 2*halffiltsz
examples = []
with codecs.open(filename, encoding='utf-8', mode='r') as f:
for offset, line in enumerate(f):
label, text = TSVSeqLabelReader.label_and_sentence(line, self.clean_fn)
y = self.label2index[label]
toks = TSVSeqLabelReader.splits(text)
mx = min(len(toks), nozplen)
toks = toks[:mx]
x = self.vec_alloc(self.mxlen, dtype=int)
for j in range(len(toks)):
w = toks[j]
key = index.get(w, PAD)
x[j+halffiltsz] = key
examples.append((x, y))
return baseline.data.SeqLabelDataFeed(baseline.data.SeqLabelExamples(examples),
batchsz=batchsz, shuffle=shuffle, vec_alloc=self.vec_alloc, src_vec_trans=self.src_vec_trans)
def create_pred_reader(mxlen, zeropadding, clean_fn, vec_alloc, src_vec_trans, **kwargs):
reader_type = kwargs.get('reader_type', 'default')
if reader_type == 'default':
reader = TSVSeqLabelReader(mxlen, zeropadding, clean_fn, vec_alloc, src_vec_trans)
else:
mod = import_user_module("reader", reader_type)
reader = mod.create_pred_reader(mxlen, zeropadding, clean_fn, vec_alloc, src_vec_trans, **kwargs)
return reader
class PTBSeqReader(object):
def __init__(self, max_word_length, nbptt):
self.max_word_length = max_word_length
self.nbptt = nbptt
def build_vocab(self, files):
vocab_word = Counter()
vocab_ch = Counter()
maxw = 0
num_words_in_files = []
for file in files:
if file is None:
continue
with codecs.open(file, encoding='utf-8', mode='r') as f:
num_words = 0
for line in f:
sentence = line.split() + ['<EOS>']
num_words += len(sentence)
for w in sentence:
vocab_word[w] += 1
maxw = max(maxw, len(w))
for k in w:
vocab_ch[k] += 1
num_words_in_files.append(num_words)
self.max_word_length = min(maxw, self.max_word_length)
print('Max word length %d' % self.max_word_length)
return vocab_ch, vocab_word, num_words_in_files
def load(self, filename, words_vocab, chars_vocab, num_words, batchsz, vec_alloc=np.zeros):
xch = vec_alloc((num_words, self.max_word_length), np.int)
x = vec_alloc((num_words), np.int)
i = 0
with codecs.open(filename, encoding='utf-8', mode='r') as f:
for line in f:
sentence = line.split() + ['<EOS>']
num_words += len(sentence)
for w in sentence:
x[i] = words_vocab.get(w)
nch = min(len(w), self.max_word_length)
for k in range(nch):
xch[i, k] = chars_vocab.get(w[k], 0)
i += 1
return baseline.data.SeqWordCharDataFeed(x, xch, self.nbptt, batchsz, self.max_word_length)
def create_lm_reader(max_word_length, nbptt, **kwargs):
reader_type = kwargs.get('reader_type', 'default')
if reader_type == 'default':
reader = PTBSeqReader(max_word_length, nbptt)
else:
mod = import_user_module("reader", reader_type)
reader = mod.create_lm_reader(max_word_length, nbptt, **kwargs)
return reader
| [
"dpressel@gmail.com"
] | dpressel@gmail.com |
474b42b6750eb01cac86bb98978ade8ef0d017fb | 32cb456abbc05673ab8f31caf437ebc75bfa7c6f | /follow_alicate.py | b0ccd52383245c3243343a75e26b512fadf1e751 | [] | no_license | rodrimaia/follow-alicate | d915126bbb027957a28eedd6eaec74cc7bb0e4b2 | 8bcbf726a73da6b8f3ffe3bef5a59ebab03bdcbd | refs/heads/master | 2021-05-29T05:27:13.742671 | 2015-05-29T16:50:22 | 2015-05-29T16:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py |
def main():
print 'ei'
if __name__ == "__main__":
main()
| [
"rodrigo.maia.pereira@gmail.com"
] | rodrigo.maia.pereira@gmail.com |
cce05380b5ad5dd37f8144e502887aaf138de2ed | 5d3ac45872e312ad99c7d316665c66d28663fbfd | /loader.py | 98760d21d05c36e6cb7a6cd310a440457af64766 | [] | no_license | hueich/7-wonders | 3a26b9b7dfab6470d404a10d42ca64fb0bb75e05 | 9f62bc4f41ee5074df7f199596cdfdbb24eade7e | refs/heads/master | 2021-01-15T19:45:22.408161 | 2015-02-09T02:56:36 | 2015-02-09T03:11:23 | 11,115,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,013 | py | import json
import bonus as bonus_lib
import card as card_lib
import constants
import enum
import exception
import wonder as wonder_lib
def loadAssets(fp):
output = json.load(fp)
assets = {
constants.CARDS_KEY: _parseCards(output[constants.CARDS_KEY]),
constants.WONDERS_KEY: _parseWonders(output[constants.WONDERS_KEY])
}
return assets
def _parseCards(cards):
output = []
for card_info in cards:
_parseCard(card_info, output)
return output
def _parseCard(card_info, cards_list):
for min_players in _getMinPlayersList(card_info):
card_type = _parseEnum(card_info['type'], enum.CardType, 'card.type')
name = str(card_info['name'])
if not name:
raise exception.ParseError('card.name')
age = _parseEnum(card_info['age'], enum.Age, 'card.age')
bonus = _parseBonus(card_info['bonus'])
cost = _parseCost(card_info)
parents = _getStringListOrNone(card_info, 'parents')
children = _getStringListOrNone(card_info, 'children')
card_class = _getCardClassFromString(card_type)
card = card_class(name, age, min_players, bonus, cost, parents, children)
cards_list.append(card)
def _getMinPlayersList(card_info):
if 'min_players' not in card_info:
return [-1]
min_players = card_info['min_players']
if isinstance(min_players, list):
return [int(p) for p in min_players]
elif isinstance(min_players, int):
return [min_players]
raise exception.ParseError('card.min_players')
def _getCardClassFromString(card_type):
card_class = None
if card_type == enum.CardType.BASIC_RES:
card_class = card_lib.BasicResourceCard
elif card_type == enum.CardType.ADV_RES:
card_class = card_lib.AdvResourceCard
elif card_type == enum.CardType.SCIENCE:
card_class = card_lib.ScienceCard
elif card_type == enum.CardType.MILITARY:
card_class = card_lib.MilitaryCard
elif card_type == enum.CardType.CIVIL:
card_class = card_lib.CivilCard
elif card_type == enum.CardType.COMMERCE:
card_class = card_lib.CommerceCard
elif card_type == enum.CardType.GUILD:
card_class = card_lib.GuildCard
else:
raise exception.ParseError('card.type')
return card_class
def _parseBonus(bonus_info):
bonus_type = _parseEnum(bonus_info['type'], enum.BonusType, 'bonus.type')
bonus = None
if bonus_type == enum.BonusType.POINT:
points = int(bonus_info['points'])
bonus = bonus_lib.PointBonus(points)
elif bonus_type == enum.BonusType.RESOURCE:
resources = _parseResources(bonus_info['resources'])
bonus = bonus_lib.ResourceBonus(resources)
elif bonus_type == enum.BonusType.COIN:
coins = int(bonus_info['coins'])
bonus = bonus_lib.CoinBonus(coins)
elif bonus_type == enum.BonusType.SCIENCE:
science = _parseEnum(bonus_info['science'], enum.Science, 'bonus.science')
bonus = bonus_lib.ScienceBonus(science)
elif bonus_type == enum.BonusType.MILITARY:
shields = int(bonus_info['shields'])
bonus = bonus_lib.MilitaryBonus(shields)
elif bonus_type == enum.BonusType.TRADING:
resources = _parseResources(bonus_info['resources'])
relations = _parseRelations(bonus_info['relations'])
cost = _getIntOrNone(bonus_info, 'cost')
bonus = bonus_lib.TradingBonus(resources=resources, relations=relations, cost=cost)
elif bonus_type == enum.BonusType.CARD_COUNT:
relations = _parseRelations(bonus_info['relations'])
card_type = _getCardClassFromString(bonus_info['card_type'])
points_per_card = _getIntOrNone(bonus_info, 'points_per_card')
coins_per_card = _getIntOrNone(bonus_info, 'coins_per_card')
bonus = bonus_lib.CardCountBonus(relations=relations, card_type=card_type, points_per_card=points_per_card, coins_per_card=coins_per_card)
elif bonus_type == enum.BonusType.WONDER_COUNT:
relations = _parseRelations(bonus_info['relations'])
points_per_stage = _getIntOrNone(bonus_info, 'points_per_stage')
coins_per_stage = _getIntOrNone(bonus_info, 'coins_per_stage')
bonus = bonus_lib.WonderCountBonus(relations=relations, points_per_stage=points_per_stage, coins_per_stage=coins_per_stage)
elif bonus_type == enum.BonusType.DEFEAT_COUNT:
relations = _parseRelations(bonus_info['relations'])
points_per_defeat = _getIntOrNone(bonus_info, 'points_per_defeat')
bonus = bonus_lib.DefeatCountBonus(relations=relations, points_per_defeat=points_per_defeat)
else:
raise exception.ParseError('bonus.type')
return bonus
def _getIntOrNone(dct, key):
return int(dct[key]) if key in dct else None
def _getStringListOrNone(dct, key):
return [str(item) for item in dct[key]] if key in dct else None
def _parseEnum(value, enum_type, field=''):
if value in enum_type.values:
return value
else:
raise exception.ParseError(field)
def _parseCost(info):
cost = {}
cost_info = info['cost'] if 'cost' in info else {}
for res in cost_info:
if res in enum.Resource.values:
cost[res] = int(cost_info[res])
else:
raise exception.ParseError('cost')
return cost
def _parseResources(resources):
if not isinstance(resources, list):
raise exception.ParseError('resources', msg='Field must be a list.')
return [tuple(res) if isinstance(res, list) else res for res in resources]
def _parseRelations(relations):
return [_parseEnum(rel, enum.Relation, 'relations') for rel in relations]
def _parseWonders(wonders):
output = []
for wonder_info in wonders:
output.append(_parseWonder(wonder_info))
return output
def _parseWonder(wonder_info):
name = str(wonder_info['name'])
if not name:
raise exception.ParseError('wonder.name')
resource = _parseEnum(wonder_info['resource'], enum.Resource, 'wonder.resource')
stages = [_parseStage(stage) for stage in wonder_info['stages']]
wonder = wonder_lib.Wonder(name=name, resource=resource, stages=stages)
return wonder
def _parseStage(stage_info):
cost = _parseCost(stage_info)
bonus = _parseBonus(stage_info['bonus'])
stage = wonder_lib.Stage(cost=cost, bonus=bonus)
return stage
| [
"addison.luh@gmail.com"
] | addison.luh@gmail.com |
963115c00a45d5b8816066abe1c66e770a21c361 | 2b41d89e57023b759192ba78901646fe22e5692a | /Google IT Automation with Python/Google - Using Python to Interact with the Operating System/email_test.py | 51e661f119e16d4ae33568f226ece0516c2e8a31 | [
"Apache-2.0"
] | permissive | avillanm/Courses- | 0fd9a925a3603b7815b23842ab0cefa2df3c21bd | 88a8fc9c1abe22e3dd2989e6cb97a8f229a521b9 | refs/heads/master | 2022-12-13T10:58:37.241800 | 2020-09-11T05:22:59 | 2020-09-11T05:22:59 | 295,307,529 | 1 | 0 | Apache-2.0 | 2020-09-14T04:53:36 | 2020-09-14T04:53:35 | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python3
import unittest
from emails import find_email
class EmailsTest(unittest.TestCase):
def test_basic(self):
testcase = [None, "Bree", "Campbell"]
expected = "breee@abc.edu"
self.assertEqual(find_email(testcase), expected)
def test_one_name(self):
testcase = [None, "John"]
expected = "Missing parameters"
self.assertEqual(find_email(testcase), expected)
def test_two_name(self):
testcase = [None, "Roy","Cooper"]
expected = "No email address found"
self.assertEqual(find_email(testcase), expected)
if __name__ == '__main__':
unittest.main() | [
"noreply@github.com"
] | noreply@github.com |
0961a55413c0854c2148a4c91bfb17bbb9891d86 | 3122ac39f1ce0a882b48293a77195476299c2a3b | /clients/python-flask/generated/openapi_server/models/pipeline_run_node.py | f205f70d61ad88141b09f4ad9bdc2cfd5a55b15f | [
"MIT"
] | permissive | miao1007/swaggy-jenkins | 4e6fe28470eda2428cbc584dcd365a21caa606ef | af79438c120dd47702b50d51c42548b4db7fd109 | refs/heads/master | 2020-08-30T16:50:27.474383 | 2019-04-10T13:47:17 | 2019-04-10T13:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,765 | py | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.pipeline_run_nodeedges import PipelineRunNodeedges # noqa: F401,E501
from openapi_server import util
class PipelineRunNode(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class: str=None, display_name: str=None, duration_in_millis: int=None, edges: List[PipelineRunNodeedges]=None, id: str=None, result: str=None, start_time: str=None, state: str=None): # noqa: E501
"""PipelineRunNode - a model defined in OpenAPI
:param _class: The _class of this PipelineRunNode. # noqa: E501
:type _class: str
:param display_name: The display_name of this PipelineRunNode. # noqa: E501
:type display_name: str
:param duration_in_millis: The duration_in_millis of this PipelineRunNode. # noqa: E501
:type duration_in_millis: int
:param edges: The edges of this PipelineRunNode. # noqa: E501
:type edges: List[PipelineRunNodeedges]
:param id: The id of this PipelineRunNode. # noqa: E501
:type id: str
:param result: The result of this PipelineRunNode. # noqa: E501
:type result: str
:param start_time: The start_time of this PipelineRunNode. # noqa: E501
:type start_time: str
:param state: The state of this PipelineRunNode. # noqa: E501
:type state: str
"""
self.openapi_types = {
'_class': str,
'display_name': str,
'duration_in_millis': int,
'edges': List[PipelineRunNodeedges],
'id': str,
'result': str,
'start_time': str,
'state': str
}
self.attribute_map = {
'_class': '_class',
'display_name': 'displayName',
'duration_in_millis': 'durationInMillis',
'edges': 'edges',
'id': 'id',
'result': 'result',
'start_time': 'startTime',
'state': 'state'
}
self.__class = _class
self._display_name = display_name
self._duration_in_millis = duration_in_millis
self._edges = edges
self._id = id
self._result = result
self._start_time = start_time
self._state = state
@classmethod
def from_dict(cls, dikt) -> 'PipelineRunNode':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The PipelineRunNode of this PipelineRunNode. # noqa: E501
:rtype: PipelineRunNode
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self) -> str:
"""Gets the _class of this PipelineRunNode.
:return: The _class of this PipelineRunNode.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this PipelineRunNode.
:param _class: The _class of this PipelineRunNode.
:type _class: str
"""
self.__class = _class
@property
def display_name(self) -> str:
"""Gets the display_name of this PipelineRunNode.
:return: The display_name of this PipelineRunNode.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name: str):
"""Sets the display_name of this PipelineRunNode.
:param display_name: The display_name of this PipelineRunNode.
:type display_name: str
"""
self._display_name = display_name
@property
def duration_in_millis(self) -> int:
"""Gets the duration_in_millis of this PipelineRunNode.
:return: The duration_in_millis of this PipelineRunNode.
:rtype: int
"""
return self._duration_in_millis
@duration_in_millis.setter
def duration_in_millis(self, duration_in_millis: int):
"""Sets the duration_in_millis of this PipelineRunNode.
:param duration_in_millis: The duration_in_millis of this PipelineRunNode.
:type duration_in_millis: int
"""
self._duration_in_millis = duration_in_millis
@property
def edges(self) -> List[PipelineRunNodeedges]:
"""Gets the edges of this PipelineRunNode.
:return: The edges of this PipelineRunNode.
:rtype: List[PipelineRunNodeedges]
"""
return self._edges
@edges.setter
def edges(self, edges: List[PipelineRunNodeedges]):
"""Sets the edges of this PipelineRunNode.
:param edges: The edges of this PipelineRunNode.
:type edges: List[PipelineRunNodeedges]
"""
self._edges = edges
@property
def id(self) -> str:
"""Gets the id of this PipelineRunNode.
:return: The id of this PipelineRunNode.
:rtype: str
"""
return self._id
@id.setter
def id(self, id: str):
"""Sets the id of this PipelineRunNode.
:param id: The id of this PipelineRunNode.
:type id: str
"""
self._id = id
@property
def result(self) -> str:
"""Gets the result of this PipelineRunNode.
:return: The result of this PipelineRunNode.
:rtype: str
"""
return self._result
@result.setter
def result(self, result: str):
"""Sets the result of this PipelineRunNode.
:param result: The result of this PipelineRunNode.
:type result: str
"""
self._result = result
@property
def start_time(self) -> str:
"""Gets the start_time of this PipelineRunNode.
:return: The start_time of this PipelineRunNode.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time: str):
"""Sets the start_time of this PipelineRunNode.
:param start_time: The start_time of this PipelineRunNode.
:type start_time: str
"""
self._start_time = start_time
@property
def state(self) -> str:
"""Gets the state of this PipelineRunNode.
:return: The state of this PipelineRunNode.
:rtype: str
"""
return self._state
@state.setter
def state(self, state: str):
"""Sets the state of this PipelineRunNode.
:param state: The state of this PipelineRunNode.
:type state: str
"""
self._state = state
| [
"cliffano@gmail.com"
] | cliffano@gmail.com |
ccdd1734fc9702b0ed8d1756a44062042bd3ab70 | ed346f01861dc28a4c5ccb065ae1e1ea6131fa4d | /file_samples/pract13_ex3.py | c9851c77854f4b771da6122967607fcb68266a4e | [] | no_license | lucabianco78/QCBsciprolab | 69122534138ad32466c674b298b22421566f9817 | 9afc46563559acf5b75f3a8e48ad90e1b921bc1a | refs/heads/master | 2022-07-27T23:30:05.964541 | 2020-09-11T18:43:33 | 2020-09-11T18:43:33 | 148,633,966 | 0 | 0 | null | 2022-07-06T19:53:17 | 2018-09-13T12:25:39 | Jupyter Notebook | UTF-8 | Python | false | false | 1,842 | py | import random
import unittest
def sortCSV(mystr):
tmp = mystr.split(",")
tmp.sort(reverse=True)
return ",".join(tmp)
class Testing(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Testing, self).__init__(*args, **kwargs)
#create a random string
self.alphabet = "abcdefghkjilmnopqrstuvwyz"
self.data = ""
#create 15 random strings
for i in range(15):
word = ""
#each of them has a random length up to 20
j = random.randint(1,20)
for ind in range(j):
#pick up to 20 random letters
t = random.randint(1,len(self.alphabet)-1)
word += self.alphabet[t]
if(len(self.data) == 0):
self.data = word
else:
self.data += "," + word
def test_reslen(self):
self.assertTrue(len(self.data) == len(sortCSV(self.data)))
def test_elcount(self):
res = sortCSV(self.data).split(",")
self.assertTrue(len(self.data.split(",")) == len(res))
def test_elsorting(self):
res = sortCSV(self.data).split(",")
for ind in range(len(res)-1):
self.assertTrue(res[ind]> res[ind+1])
def test_empty(self):
self.assertEqual(sortCSV(""),"")
def test_onlyOne(self):
j = random.randint(1,20)
word = ""
for ind in range(j):
#pick up to 20 random letters
t = random.randint(0,len(self.alphabet)-1)
word += self.alphabet[t]
self.assertEqual(sortCSV(word), word)
if __name__ == "__main__":
mystr = "book,tree,final,example,testing,zed,all,hair,lady,figure,tap,spring,test,fin,tail"
print("Original:\n{}".format(mystr))
print("Sorted:\n{}".format(sortCSV(mystr)))
unittest.main()
| [
"luca.bianco@fmach.it"
] | luca.bianco@fmach.it |
6dc0a1d0e1425a823db217876fa1cdd142790f91 | e684304acc713dd5a0f47947bbf42dbea49dd1ec | /run.py | 5adab088518aa4a43504dd44028ceee9cb922456 | [
"MIT"
] | permissive | pranavaddepalli/filterbot | b5025cf734dabfabc0981052e8b2b0b4f1d044b2 | d6a4b0f4ac16b54eb63b1140f0ddc29a7da6d3e5 | refs/heads/master | 2022-12-07T16:25:28.512476 | 2020-08-31T17:15:21 | 2020-08-31T17:15:21 | 291,506,775 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,467 | py | import discord
import os
client = discord.Client()
banned = []
custom_responses = {}
custom_replyto = {}
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
global banned
global custom_responses
global custom_replyto
if message.author == client.user:
return
if message.content.startswith('t$currentversion'):
await message.channel.send('version' + '15')
return
if message.content.startswith('t$addfilter'):
banned_words = message.content.lower().split()[1:]
banned += banned_words
print(banned)
await message.channel.send('The following was added to the banned words list: ' + str(banned))
return
if message.content.startswith('t$bannedlist'):
await message.channel.send('You can\'t say these words: ' + str(banned))
return
if message.content.startswith('t$clearbannedlist'):
banned = []
await message.channel.send('Banned list cleared')
return
if message.content.startswith('t$filteroff'):
word = message.content.lower().split()[1:]
banned.remove(word[0])
await message.channel.send('Removed ' + word + 'from the banned list')
return
if message.content.startswith('t$clearcustomresponses'):
custom_responses = {}
await message.channel.send('Custom responses cleared')
return
if message.content.startswith('t$respondto'):
respondto_user = message.mentions[0]
respondto_content = message.content.split()[2:]
custom_responses[respondto_user] = respondto_content
await message.channel.send('added to custom responses')
return
if message.content.startswith('t$replyto'):
replyto_text = message.content.split()[1:]
await message.channel.send('What do you want me to reply with?')
def replyto_check(m):
return m.channel == message.channel and m.author == message.author
replyto_content = await client.wait_for('message', check=replyto_check)
print(type(" ".join(replyto_text)))
print(type(replyto_content.content))
custom_replyto[(" ".join(replyto_text))] = replyto_content.content
await message.channel.send("Done! Added to the custom replies list")
return
if message.content.startswith('t$customreplies'):
await message.channel.send('Current custom replies: ' + str(custom_replyto))
return
if message.content.startswith('t$customresponses'):
await message.channel.send('Current custom responses: ' + str(custom_responses))
return
if message.content.startswith('t$clearcustomreplies'):
custom_replyto = {}
await message.channel.send('Custom replies cleared')
return
# evaluate messages
for banned_word in banned:
if banned_word in message.content.lower():
await message.delete()
await message.channel.send('You can\'t say that here! I deleted your message.')
for user in custom_responses:
if message.author == user:
await message.channel.send(" ".join(custom_responses[user]))
for text in custom_replyto.keys():
if text in message.content.lower():
await message.channel.send(custom_replyto[text])
# run with environment variable token
client.run(os.environ['TOKEN']) | [
"pranav.addepalli@gmail.com"
] | pranav.addepalli@gmail.com |
e9c8a8e78424da203b64fe5bca78bb96704607eb | b30175445668d7a0be6b867c781ce7e6e64e8f51 | /code/tfspkl_main.py | 8c14293de3aa0c5275267377a062535dbccd9431 | [] | no_license | PrincetonCompMemLab/247-pickling | 266eb7f7bd8aa6f314e07174cdd92d41298ba637 | 20c513ff13ab1813d985f8ac2c90d9a6e4d0222f | refs/heads/main | 2023-04-02T06:35:40.923881 | 2021-04-14T05:35:15 | 2021-04-14T05:35:15 | 357,971,138 | 0 | 0 | null | 2021-04-14T16:26:24 | 2021-04-14T16:26:24 | null | UTF-8 | Python | false | false | 10,553 | py | '''
Filename: /scratch/gpfs/hgazula/247-project/tfs_pickling.py
Path: /scratch/gpfs/hgazula/247-project
Created Date: Tuesday, December 1st 2020, 8:19:27 pm
Author: Harshvardhan Gazula
Description: Contains code to pickle 247 data
Copyright (c) 2020 Your Company
'''
import os
import pickle
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, StratifiedKFold
from tfspkl_build_matrices import build_design_matrices
from tfspkl_config import build_config
from tfspkl_parser import arg_parser
from utils import main_timer
def save_pickle(args, item, file_name):
"""Write 'item' to 'file_name.pkl'
"""
add_ext = '' if file_name.endswith('.pkl') else '.pkl'
file_name = os.path.join(args.PKL_DIR, file_name) + add_ext
with open(file_name, 'wb') as fh:
pickle.dump(item, fh)
return
def find_switch_points(array):
"""Find indices where speaker switches and split the dataframe
"""
return np.where(array[:-1] != array[1:])[0] + 1
def get_sentence_length(section):
"""Sentence length = offset of the last word - onset of first word
"""
last_word_offset = section.iloc[-1, 2]
first_word_onset = section.iloc[0, 1]
return last_word_offset - first_word_onset
def append_sentence(args, section):
"""Join the words to form a sentence and append
Args:
section (DataFrame): [description]
"""
if args.project_id == 'tfs':
sentence = ' '.join(section['word'])
section['sentence'] = sentence
else:
section['sentence'] = None
return section
def append_sentence_length(section):
sentence_length = get_sentence_length(section)
section['sentence_signal_length'] = sentence_length
return section
def append_num_words(section):
section['num_words'] = len(section)
return section
def append_sentence_idx(section, idx):
section['sentence_idx'] = idx + 1
return section
def convert_labels_to_df(labels):
convo_df = pd.DataFrame(
labels, columns=['word', 'onset', 'offset', 'accuracy', 'speaker'])
return convo_df
def split_convo_to_sections(conversation):
convo_df = convert_labels_to_df(conversation)
speaker_switch_idx = find_switch_points(convo_df.speaker.values)
sentence_df = np.split(convo_df, speaker_switch_idx, axis=0)
return sentence_df
def process_sections(args, section_list):
# For each sentence df split
my_labels = []
for idx, section in enumerate(section_list):
section = append_sentence_length(section)
section = append_sentence(args, section)
section = append_num_words(section)
section = append_sentence_idx(section, idx)
my_labels.append(section)
return pd.concat(my_labels, ignore_index=True)
def create_sentence(args, conversation):
"""[summary]
Args:
labels ([type]): [description]
Returns:
[type]: [description]
"""
convo_sections = split_convo_to_sections(conversation)
conversation = process_sections(args, convo_sections)
return conversation
def word_stemming(conversation, ps):
conversation['stemmed_word'] = conversation['word'].apply(ps.stem)
return conversation
def shift_onsets(conversation, shift):
conversation['adjusted_onset'] = conversation['onset'] + shift
conversation['adjusted_offset'] = conversation['offset'] + shift
return conversation
def add_sentence_index(conversation, length):
conversation['sentence_idx'] += length
length = conversation['sentence_idx'].nunique()
return conversation, length
def add_conversation_id(conversation, conv_id):
conversation['conversation_id'] = conv_id
return conversation
def add_conversation_name(args, conversation, name):
if args.project_id == 'tfs':
conversation['conversation_name'] = os.path.basename(name)
else:
conversation['conversation_name'] = None
return conversation
def process_labels(args, trimmed_stitch_index, labels, conversations):
"""Adjust label onsets to account for stitched signal length.
Also peform stemming on the labels.
Args:
trimmed_stitch_index (list): stitch indices of trimmed signal
labels (list): of tuples (word, speaker, onset, offset, accuracy)
Returns:
DataFrame: labels
"""
trimmed_stitch_index.insert(0, 0)
trimmed_stitch_index.pop(-1)
new_labels = []
len_to_add = 0
for conv_id, (conversation_name, start, sub_list) in enumerate(
zip(conversations, trimmed_stitch_index, labels), 1):
sub_list = create_sentence(args, sub_list)
sub_list = shift_onsets(sub_list, start)
sub_list = add_conversation_id(sub_list, conv_id)
sub_list = add_conversation_name(args, sub_list, conversation_name)
sub_list, len_to_add = add_sentence_index(sub_list, len_to_add)
new_labels.append(sub_list)
return pd.concat(new_labels, ignore_index=True)
def inclass_word_freq(df):
df['word_freq_phase'] = df.groupby(['word', 'production'
])['word'].transform('count')
return df
def total_word_freq(df):
df['word_freq_overall'] = df.groupby(['word'])['word'].transform('count')
return df
def create_production_flag(df):
df['production'] = (df['speaker'] == 'Speaker1').astype(int)
return df
def filter_on_freq(args, df):
df = df.groupby('word').filter(
lambda x: len(x) >= args.vocab_min_freq).reset_index(drop=True)
return df
def stratify_split(df, split_str=None):
# Extract only test folds
if split_str is None:
skf = KFold(n_splits=5, shuffle=True, random_state=0)
elif split_str == 'stratify':
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
else:
raise Exception('wrong string')
folds = [t[1] for t in skf.split(df, df.word)]
return folds
def create_folds(args, df, split_str=None):
"""create new columns in the df with the folds labeled
Args:
args (namespace): namespace object with input arguments
df (DataFrame): labels
"""
fold_column_names = ['fold' + str(i) for i in range(5)]
folds = stratify_split(df, split_str=split_str)
# Go through each fold, and split
for i, fold_col in enumerate(fold_column_names):
# Shift the number of folds for this iteration
# [0 1 2 3 4] -> [1 2 3 4 0] -> [2 3 4 0 1]
# ^ dev fold
# ^ test fold
# | - | <- train folds
folds_ixs = np.roll(folds, i)
*_, dev_ixs, test_ixs = folds_ixs
df[fold_col] = 'train'
df.loc[dev_ixs, fold_col] = 'dev'
df.loc[test_ixs, fold_col] = 'test'
return df
def create_labels_pickles(args,
stitch_index,
labels,
convo_labels_size,
convs,
label_str=None):
labels_df = process_labels(args, stitch_index, labels, convs)
labels_df = create_production_flag(labels_df)
labels_df = inclass_word_freq(labels_df)
labels_df = total_word_freq(labels_df)
labels_df = create_folds(args, labels_df)
labels_dict = dict(labels=labels_df.to_dict('records'),
convo_label_size=convo_labels_size)
pkl_name = '_'.join([args.subject, label_str, 'labels'])
save_pickle(args, labels_dict, pkl_name)
if args.vocab_min_freq:
labels_df = filter_on_freq(args, labels_df)
labels_df = create_folds(args, labels_df, 'stratify')
label_folds = labels_df.to_dict('records')
pkl_name = '_'.join(
[args.subject, label_str, 'labels_MWF',
str(args.vocab_min_freq)])
save_pickle(args, label_folds, pkl_name)
@main_timer
def main():
args = arg_parser()
args = build_config(args)
(full_signal, full_stitch_index, trimmed_signal, trimmed_stitch_index,
binned_signal, bin_stitch_index, full_labels, trimmed_labels,
convo_full_examples_size, convo_trimmed_examples_size, electrodes,
electrode_names, conversations,
subject_id) = build_design_matrices(vars(args), delimiter=" ")
# Create pickle with full signal
full_signal_dict = dict(full_signal=full_signal,
full_stitch_index=full_stitch_index,
electrode_ids=electrodes,
electrode_names=electrode_names,
subject=subject_id)
save_pickle(args, full_signal_dict, args.subject + '_full_signal')
# Create pickle with full stitch index
save_pickle(args, full_stitch_index, args.subject + '_full_stitch_index')
# Create pickle with electrode maps
electrode_map = dict(subject=subject_id,
electrode_id=electrodes,
electrode_name=electrode_names)
save_pickle(args, electrode_map, args.subject + '_electrode_names')
# Create pickle with trimmed signal
trimmed_signal_dict = dict(trimmed_signal=trimmed_signal,
trimmed_stitch_index=trimmed_stitch_index,
electrode_ids=electrodes,
electrode_names=electrode_names,
subject=subject_id)
save_pickle(args, trimmed_signal_dict, args.subject + '_trimmed_signal')
# Create pickle with full stitch index
save_pickle(args, trimmed_stitch_index,
args.subject + '_trimmed_stitch_index')
# Create pickle with binned signal
binned_signal_dict = dict(binned_signal=binned_signal,
bin_stitch_index=bin_stitch_index,
electrode_ids=electrodes,
electrode_names=electrode_names,
subject=subject_id)
save_pickle(args, binned_signal_dict, args.subject + '_binned_signal')
# Create pickle with full stitch index
save_pickle(args, bin_stitch_index, args.subject + '_bin_stitch_index')
# Create pickle with trimmed labels
create_labels_pickles(args, trimmed_stitch_index, trimmed_labels,
convo_trimmed_examples_size, conversations,
'trimmed')
create_labels_pickles(args, full_stitch_index, full_labels,
convo_full_examples_size, conversations, 'full')
return
if __name__ == "__main__":
main()
| [
"hvgazula@umich.edu"
] | hvgazula@umich.edu |
c11be51346e8d98da85297b74e11d3f6223ea0f5 | 1d673ccf96a80bf9e8474b92332d91a916c9ea90 | /PyZMQCommunicator/MissionDesigner - 180522.py | 586926d49d054f3c0e09a86212496a349a57e08e | [] | no_license | Seunghwan17/ZMQCommunicator | 6309c0dc7c4c2f7b4fd9e0a61e1265ba3ff6a56f | 65686436435cacfb868b09275be0e0c18a0aeee8 | refs/heads/master | 2020-03-23T04:33:32.332919 | 2018-07-16T05:16:32 | 2018-07-16T05:16:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,888 | py | from dronekit import Command, CommandSequence
from DroneCommander import *
from pymavlink import *
def load_mission(drones, homePosition, fileName):
missionFile = open(fileName, 'r')
lines = missionFile.readlines()
for drone in drones:
clear_mission(drone)
count=1
for line in lines:
missionCommand = line.split();
droneID = int(missionCommand[1])
drone = drones[droneID]
if(missionCommand[0] == 't'):
altitude = int(missionCommand[2])
add_takeoff(drone, altitude)
if(missionCommand[0] == 'w'):
#latitude = missionCommand[2]
#longitude = missionCommand[3]
altitude = float(missionCommand[4])
gps = local_to_gps(float(missionCommand[2]), float(missionCommand[3]), homePosition[0], homePosition[1])
latitude = gps[0]
longitude = gps[1]
add_change_speed(drone, count)
add_waypoint(drone, latitude, longitude, altitude, distance=0.5)
add_loiter(drone, 10)
#add_guided_enable(drone)
count = count+1
if(missionCommand[0] == 'l'):
add_land(drone)
missionFile.close()
for drone in drones:
upload(drone)
def upload(drone):
drone.commands.upload()
drone.commands.wait_ready()
print('upload complete')
def add_takeoff(drone, altitude):
command = Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_TAKEOFF, 0, 0, 0, 0, 0, 0, 0, 0, altitude)
print('takeoff added')
drone.commands.add(command)
def add_land(drone):
command = Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_LAND, 0, 0, 0, 0, 0, 0, 0, 0, 0)
print('land added')
drone.commands.add(command)
def add_waypoint(drone, latitude, longitude, altitude, delay=0.0, distance=1.0):
gps = LocationGlobalRelative(latitude, longitude, altitude)
command = Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, delay, distance, 0, 0, gps.lat, gps.lon, gps.alt)
print('waypoint added')
drone.commands.add(command)
def add_spline_waypoint(drone, latitude, longitude, altitude, delay=0):
gps = LocationGlobalRelative(latitude, longitude, altitude)
command = Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_SPLINE_WAYPOINT, 0, 0, delay, 0, 0, 0, gps.lat, gps.lon, gps.alt)
drone.commands.add(command)
def add_change_speed(drone, speed):
command = Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED, 0, 0, 1, speed, -1, 0, 0, 0, 0)
drone.commands.add(command)
def add_loiter(drone, time = 0):
if time==0 :
command = Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_LOITER_UNLIM, 0, 0, 0, 0, 0, 0, 0, 0, 0)
else:
command = Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_LOITER_TIME, 0, 0, time, 0, 0, 0, 0, 0, 0)
drone.commands.add(command)
def add_guided_enable(drone):
command = Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_NAV_GUIDED_ENABLE, 0, 0, 1, 0, 0, 0, 0, 0, 0)
drone.commands.add(command)
def add_condition_distance(drone, distance):
command = Command(0, 0, 0, mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT,
mavutil.mavlink.MAV_CMD_CONDITION_DISTANCE, 0, 0, distance, 0, 0, 0, 0, 0, 0)
drone.commands.add(command)
def clear_mission(drone):
drone.commands.clear() | [
"dehan2@naver.com"
] | dehan2@naver.com |
a31c47466a63ebe9f2458043468b38fdab092b6d | 825b45bc5c8402abb0041bf7e33fc1c92948cd4e | /ecommerce_app/ecommerce/store/migrations/0003_auto_20201025_1739.py | 975ec0eea86750d49b413d51e7617e7f6801d3c9 | [] | no_license | wambuic/Ecommerce_Website | 8d2cbf4448c9576614faf22410be6196a6ed8e57 | 08cab454e01f544b0a70b1b4b6cedf3bd2067ef0 | refs/heads/master | 2023-01-01T07:14:24.056172 | 2020-10-26T20:42:13 | 2020-10-26T20:42:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # Generated by Django 3.1.1 on 2020-10-25 14:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0002_product_image'),
]
operations = [
migrations.AlterField(
model_name='product',
name='price',
field=models.DecimalField(decimal_places=1, max_digits=7),
),
]
| [
"consolatawambuimacharia@gmail.com"
] | consolatawambuimacharia@gmail.com |
0457440b4e3f996aaa557313efda6c7f2d6e1a76 | 069ce71ee1ca85988ebf5bc179bcafbbd3d04f7f | /golib/views.py | 70f41659f6d03852008364558b13e70346ea68e7 | [] | no_license | 9gix/golib | 21a1376b553a83b743c68f418f82a488c9964c1a | fbcfe0a9c5e0523c7b2e85f46cb0d18a4ac85db5 | refs/heads/master | 2021-03-12T19:57:59.971214 | 2012-11-04T17:22:59 | 2012-11-04T17:22:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | from django.shortcuts import redirect, render
from django.core.urlresolvers import reverse
def index(request):
if request.user.is_authenticated():
return redirect(reverse('catalog:book_list'))
return render(request, 'index.html', {})
| [
"yeo.eugene.oey@gmail.com"
] | yeo.eugene.oey@gmail.com |
aab890378fb0a5a2fca1102a0b1220f988bed7af | 03cab35935538a7049e7c9006c56d1bc6a339142 | /Recursion/Problems/permutation.py | e5f50184e73e667058341fc4dcb189f2684149bf | [
"MIT"
] | permissive | kannan5/Algorithms-And-DataStructures | a9cc8695f8f848f30c223275f51024a379dcb732 | 5d962bc123ecc51899894faac02214e296cc55d8 | refs/heads/master | 2023-03-13T15:29:11.702298 | 2021-03-10T08:38:13 | 2021-03-10T08:38:13 | 284,735,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py |
def Permutation(input_list, partial, used):
input_len = len(input_list)
if len(partial) == input_len:
print(partial)
else:
for i in range(0, input_len):
if not used[i] and not (input_list[i] == input_list[i - 1] and not used[i - 1]):
used[i] = True
partial.append(input_list[i])
Permutation(input_list, partial, used)
used[i] = False
partial.pop(-1)
if __name__ == '__main__':
Permutation([1, 2, 3, 4], [], 4 * [False])
| [
"kannanhlr@gmail.com"
] | kannanhlr@gmail.com |
0741736e6535eab3dc9df61cad2e63d918311716 | e961e47a7d835b41a1a3e579d2feb96cc9504e92 | /demo/GetUrl.py | 03bd87da189ed7af035976b2f3e8e0e240b634a9 | [] | no_license | asw5757257/Py- | 917ab77aea080029a7daf1467df4829ac3d0dff1 | 7720040d120c3bac1d194d7aaf7cb211ace56367 | refs/heads/master | 2020-07-23T08:20:55.682206 | 2019-09-10T08:01:26 | 2019-09-10T08:01:26 | 207,497,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py |
url="https://movie.douban.com/top250?start=%d&filter="
def Get_Data_Url(p):
url_page = url%p
print(url_page)
return url_page
if __name__ == '__main__':
for i in range(0, 249, 25):
p = i
Get_Data_Url(p)
pass
pass
| [
"207656645@qq.com"
] | 207656645@qq.com |
df7859b3968e2e07fe6d573c3c0175bb0d06485b | 72dbf8366cf17b6a81ab37e72af667726e3f2661 | /store/migrations/0016_auto_20201104_1719.py | 31c0e9e8bf2783c9b201a665dd614b048aa7b44d | [] | no_license | Rayhun/Django_E-Commerce_website | 3aef732ffa0a41509be95ced3c33b845233903a7 | 1a5f7e31f942914256e49ba7da1f7367a799f097 | refs/heads/main | 2023-05-23T18:18:27.875328 | 2021-04-30T19:29:06 | 2021-04-30T19:29:06 | 306,414,778 | 3 | 1 | null | 2021-04-30T19:28:58 | 2020-10-22T17:41:57 | CSS | UTF-8 | Python | false | false | 505 | py | # Generated by Django 3.1.1 on 2020-11-04 11:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0015_remove_product_product_name'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='gender',
field=models.CharField(blank=True, choices=[('MALE', 'Male'), ('FEMALE', 'Female'), ('OTHERS', 'Others')], default='MALE', max_length=6, null=True),
),
]
| [
"rayhunkhan27@gmail.com"
] | rayhunkhan27@gmail.com |
2a9b120d2b44a4f7fdfcd4a45e7c52f4354d5c9b | 87aef9e41fe73d69ca5ccc9e28a4fac0c13457c6 | /stmpemail.py | 7ac47afefba2ba80054d503783d4163de1a60bea | [] | no_license | BillKiller/Newpos | be0a83b84ff055d31c96f5ec73f9da2ef82dbd71 | 9912e11bd63bc3f59a0ca1167bfead427d696568 | refs/heads/master | 2020-07-17T21:38:48.770133 | 2017-06-19T15:32:02 | 2017-06-19T15:32:02 | 94,324,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,507 | py | #coding:utf-8 #强制使用utf-8编码格式
import smtplib #加载smtplib模块
from email.mime.text import MIMEText
from email.utils import formataddr
my_sender='###747441355@163.com' #发件人邮箱账号,为了后面易于维护,所以写成了变量
my_user='###747441355@163.com' #收件人邮箱账号,为了后面易于维护,所以写成了变量
def mail():
ret=True
try:
msg=MIMEText('填写邮件内容','plain','utf-8')
msg['From']=formataddr(["发件人邮箱昵称",my_sender]) #括号里的对应发件人邮箱昵称、发件人邮箱账号
msg['To']=formataddr(["收件人邮箱昵称",my_user]) #括号里的对应收件人邮箱昵称、收件人邮箱账号
msg['Subject']="主题" #邮件的主题,也可以说是标题
server=smtplib.SMTP("smtp.163.com",25) #发件人邮箱中的SMTP服务器,端口是25
server.login(my_sender,"##########") #括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(my_sender,[my_user,],msg.as_string()) #括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit() #这句是关闭连接的意思
except Exception: #如果try中的语句没有执行,则会执行下面的ret=False
ret=False
return ret
ret=mail()
if ret:
print("ok") #如果发送成功则会返回ok,稍等20秒左右就可以收到邮件
else:
print("filed") #如果发送失败则会返回filed
| [
"747441355@qq.com"
] | 747441355@qq.com |
936baa9a603ebaf11d6c5adc98fecc3cf562f6cc | 952abfc855d0fca89200f1e428aac9a87f1d3295 | /tf114/tf09_mv2.py | 6e8e7a3f054efe4bf5163d9aaf09c665b95a2f75 | [] | no_license | TaeYeon-kim-ai/STUDY_1.py | 7570b4510bf8d9791447efe3a97a9668a1cabe06 | e14392c706b7e51e40f1ac68555e26558e25b38f | refs/heads/master | 2023-06-03T09:04:13.498591 | 2021-06-21T17:10:47 | 2021-06-21T17:10:47 | 329,834,933 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,710 | py | #과제
# 차원 형태 내용
#스칼라 1
#벡터 [1,2,3]
#행렬 [[1,2],[2,3]]
#텐서 [[[1,2],[1,2,3]]]
# x * W 두개의 사이즈가 맞아야함
# x = 5, 3
# W = 3, 1(2,3,4,되던 상관없음) + b와 더할 수 있는 shape가 동일해야함.
# (5, 3) x (3, 1) = (5, 1) #앞에 열과 뒤에 행만 맞으면 행렬 연산할 수 있음
# (3, 2) x (2, 3) = (3, 3)
# [실습] 만들어봐
# verbose 로 나오는건 step과 cost와 hypothesis // epochs = 2001, 10개단위
import tensorflow as tf
tf.set_random_seed(66)
x_data = [[73, 51, 65],
[92, 98, 40],
[89, 31, 33],
[99, 33, 100],
[17, 66, 79]] #(5,3) metrix
y_data = [[152],
[185],
[180],
[205],
[142]] #metrix(5,1)
x = tf.placeholder(tf.float32, shape = [None, 3])
y = tf.placeholder(tf.float32, shape = [None, 1])
#행 맞춰두고 열은 y의 열값 x*w를 한 shape와 y의 shape가 같아야한다.
w = tf.Variable(tf.random_normal([3, 1]), name = 'weight')
b = tf.Variable(tf.random_normal([1]), name = 'bias') #바이어스 하나임
#hypothesis = x * w + b
hypothesis = tf.matmul(x, w) + b #matmul 은 매트릭스 멀티(매트릭스 곱)
cost = tf.reduce_mean(tf.square(hypothesis - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 71e-6) # cost : 294.25824
#optimizer = tf.train.AdamOptimizer(learning_rate = 0.1) #cost : 176.789 [실험]
train = optimizer.minimize(cost)
sess = tf.compat.v1.Session()
sess.run(tf.global_variables_initializer())
for step in range(2001) :
cost_val, hy_val, _ = sess.run([cost, hypothesis, train],
feed_dict = {x : x_data, y : y_data})
if step % 20 == 0 :
print("step : ", step, "\n", "cost : ", cost_val, "\n", hy_val)
sess.close()
'''
import matplotlib.pyplot as plt
w_history = []
cost_history = []
with tf.compat.v1.Session() as sess :
#sess.run(tf.compat.v1.global_variables_initializer()) #안해줘도 돌아감
#텐서플로의 실질적인 w에 tf.valiable설정은 안해줘서 파이썬의 veriable로 취급하는 듯
for i in range(-30, 50) : #-30 ~ 50
curr_w = i * 0.1 #i*0.1단위로 증가 -3,
curr_cost = sess.run(cost, feed_dict={w : curr_w})
w_history.append(curr_w)
cost_history.append(curr_cost)
print("=========================================")
print("W : ", w_history)
print("=========================================")
print("cost : ", cost_history)
print("=========================================")
plt.plot(w_history, cost_history)
plt.show()
'''
| [
"noreply@github.com"
] | noreply@github.com |
45dcf488356f131c81b6360b7d7b82addfcb891f | 27ae4f742aeb8300d29414983b9c708c3868a154 | /get_coupon_to_mysql.py | 56f10a620be7eba4d323255101ecc60be8c8dcb4 | [] | no_license | Jun10546027/python_get_coupon | 6f545b7ec2cb5d1b61811e1b9c7c00cf93b8d954 | 651a127fcef25e30c36d808171981183d5515c43 | refs/heads/master | 2020-05-19T21:06:40.238929 | 2019-05-19T14:20:53 | 2019-05-19T14:20:53 | 185,216,137 | 0 | 0 | null | null | null | null | BIG5 | Python | false | false | 7,342 | py | import requests
from bs4 import BeautifulSoup
import os
from urllib.request import urlretrieve
import pandas as pd
#資料庫存檔
import mysql.connector
from insert_mysql import prevent_duplicate
#--------這行只是要輸入密碼----------------
from password import My_password
#建立資料庫連線
mydb = mysql.connector.connect(
user='root',
passwd=My_password,
host='localhost',
database='mangerdb',
)
mycursor = mydb.cursor()
mycursor.execute('use mangerdb')
url = "https://www.4freeapp.com/"
respond = requests.get(url, headers={'User-agent': 'Mozilla/5.0'})
html = BeautifulSoup(respond.text)
main = html.find_all("div", class_="caption")
# 準備空的 dataFrame
df = pd.DataFrame(columns=['id', 'title', 'class', 'content'])
df1 = pd.DataFrame(columns=['title', 'adress'])
# 儲存網頁內所有內容
add_String = ''
add_Tag = ''
today_judge = ''
# 判斷檔名
def judge(x):
if "jpg" in x or "png" in x:
return True
else:
return False
for _ in main:
index_title = _.find("a").string # 這個會有\n
index_url = _.find("a").get("href")
# 直接使用網站的title會有\n的問題,所以要先排除\n或\造成的錯誤("\"會造成註解)
Index_Title = index_title.split('\n')[1]
# judge the url
today_judge = index_url
index_respond = requests.get(index_url, headers={'User-agent': 'Mozilla/5.0'})
index_html = BeautifulSoup(index_respond.text)
index_main = index_html.find_all("a", attrs={'style': 'margin-left: 1em; margin-right: 1em;'})
# 網頁內容
index_content = index_html.find_all("span", attrs={'style': 'font-size: large;'})
# 網頁 tag
index_tag = index_html.find_all("div", class_="widget-tags")
# MySQL Myclass
for text in index_tag:
print(Index_Title)
text_a = text.find_all("a")
for str in text_a:
# 物件類別
class_tab = str.string
web_tag = class_tab.split("\n")[1]
add_Tag = add_Tag + web_tag + "/"
# MySQL content
for text in index_content:
if text.string is not None:
# add_text.append(text.string.splitlines())
# splitlines將\n分開
add_String = add_String + text.string.splitlines()[0] + "\ "
# MySQL ID
for i in index_main:
downloadURL = i.get("href")
ds = downloadURL.split("/")
##設定ID
coupon_id = ds[5]
#將資料輸入進資料庫
prevent_duplicate(coupon_id, Index_Title.splitlines()[-1], add_Tag, add_String)
#將資料存入DataFrame
s = pd.Series([coupon_id, Index_Title.splitlines()[-1], add_Tag, add_String],
index=['id', 'title', 'class', 'content'])
df = df.append(s, ignore_index=True)
# 重製儲存字串
add_String = ''
add_Tag = ''
# 儲存圖片
for i in index_main:
# 設定資料夾路徑,怕標題有/
dname = "C:/Users/Jun/Desktop/coupon/" + Index_Title.split("/")[0] + "/"
if not os.path.exists(dname):
os.mkdir(dname)
downloadURL = i.get("href")
print(downloadURL)
ds = downloadURL.split("/")
filetype = ds[-1].split(".")[-1]
judgeURL = filetype
if judgeURL:
fpath = dname + ds[5] + "." + filetype
urlretrieve(downloadURL, fpath)
s1 = pd.Series([Index_Title.splitlines()[-1], fpath], index=['title', 'adress'])
df1 = df1.append(s1, ignore_index=True)
print("判斷標準", today_judge)
import datetime
now = datetime.datetime.now().strftime("%Y-%m-%d")
now = now.split("-")
begin = datetime.date(int(now[0]), int(now[1]), int(now[2]))
end = datetime.date(2019, 5, 11)
d = begin
delta = datetime.timedelta(days=1)
while d >= end:
print(d.strftime("%Y-%m-%d"))
url = "https://www.4freeapp.com/search?updated-max=" + d.strftime(
"%Y-%m-%d") + "T10%3A53%3A00%2B08%3A00&max-results=7#PageNo=2"
respond = requests.get(url, headers={'User-agent': 'Mozilla/5.0'})
html = BeautifulSoup(respond.text)
main = html.find_all("div", class_="caption")
# 網頁內容
index_content = index_html.find_all("span", attrs={'style': 'font-size: large;'})
# 網頁 tag
index_tag = index_html.find_all("div", class_="widget-tags")
for times in reversed(main):
index_title = times.find("a").string # 這個會有\n
index_url = times.find("a").get("href")
Index_Title = index_title.split('\n')[1] ##把indextitle的\n排除
if today_judge == index_url:
today_judge = main[-1].find("a").get("href")
break
print("頁尾", main[-1].find("a").get("href"))
print("判斷標準", today_judge)
print(Index_Title)
index_respond = requests.get(index_url, headers={'User-agent': 'Mozilla/5.0'})
index_html = BeautifulSoup(index_respond.text)
index_main = index_html.find_all("a", attrs={'style': 'margin-left: 1em; margin-right: 1em;'})
# 網頁內容
index_content = index_html.find_all("span", attrs={'style': 'font-size: large;'})
# 將標題和內容存成CSV
for text in index_content:
if text.string is not None:
# add_text.append(text.string.splitlines())
# splitlines將\n分開
add_String = add_String + text.string.splitlines()[0] + "\ "
# MySQL Myclass
for text in index_tag:
print(Index_Title)
text_a = text.find_all("a")
for str in text_a:
# 物件類別
class_tab = str.string
web_tag = class_tab.split("\n")[1]
add_Tag = add_Tag + web_tag + "/"
# MySQL ID
for i in index_main:
downloadURL = i.get("href")
ds = downloadURL.split("/")
##設定ID
coupon_id = ds[5]
print(coupon_id)
# 將資料輸入進資料庫
prevent_duplicate(coupon_id, Index_Title.splitlines()[-1], add_Tag, add_String)
#將資料存進DataFrame
s = pd.Series([coupon_id, Index_Title.splitlines()[-1], add_Tag, add_String],
index=['id', 'title', 'class', 'content'])
df = df.append(s, ignore_index=True)
# 重製儲存字串
add_String = ''
add_Tag = ''
##儲存
for i in index_main:
print(Index_Title)
# 設定資料夾路徑,怕標題有/
dname = "C:/Users/Jun/Desktop/coupon/" + Index_Title.split("/")[0] + "/"
if not os.path.exists(dname):
os.mkdir(dname)
downloadURL = i.get("href")
print(downloadURL)
ds = downloadURL.split("/")
filetype = ds[-1].split(".")[-1]
if judge(filetype):
fpath = dname + ds[5] + "." + filetype
urlretrieve(downloadURL, fpath)
s1 = pd.Series([Index_Title.splitlines()[-1], fpath], index=['title', 'adress'])
df1 = df1.append(s1, ignore_index=True)
d -= delta
print(df)
print(df1)
df.to_csv("title_content.csv", encoding='utf-8', index=False)
df1.to_csv("title_address.csv", encoding='utf-8', index=False) | [
"10546027@ntub.edu.tw"
] | 10546027@ntub.edu.tw |
75af37c7035fa42e49638ffc2f8b9d925f49ea7e | ee00ebe5e71c36b05fbff993b19e9723b963313f | /35_inserted_position.py | f5142c9a3ab2c24b65c81f2721f1dd7ad04a16e3 | [] | no_license | 26XINXIN/leetcode | f365560d93604a28abf399707b333f3c11f924ec | 78ed11f34fd03e9a188c9c6cb352e883016d05d9 | refs/heads/master | 2021-06-28T16:31:45.103879 | 2020-09-19T20:33:55 | 2020-09-19T20:33:55 | 144,975,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | class Solution:
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
ans = self.binary_search(nums, 0, len(nums)-1, target)
if ans == len(nums)-1 and target > nums[-1]:
ans += 1
return ans
def binary_search(self, nums, l, r, target):
if l == r:
return l
mid = (l + r) // 2
if nums[mid] == target:
return mid
elif nums[mid] < target:
return self.binary_search(nums, mid + 1, r, target)
else:
return self.binary_search(nums, l, mid, target)
| [
"yangxin.nlp@bytedance.com"
] | yangxin.nlp@bytedance.com |
843da81a8ffb5a42c03e04f0275721a829f06491 | e35f24e01a7f8888e30bb7fb6b758b03c56f9266 | /py/goiot/dataserver/views.py | def522155add1b9dea5d4ff0e6b2bcce74013516 | [] | no_license | huayunfly/repo | 7256a554c06f7c2d17823640a451a9a760ea78d3 | 6f5bc720ca425ae725cd2968e56ebb50cddbfd51 | refs/heads/master | 2020-04-06T06:35:02.952400 | 2018-09-05T06:58:01 | 2018-09-05T06:58:01 | 51,144,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | # -*- coding: utf-8 -*-
"""
@summary: View definition
@author: Yun Hua, yun_hua@yashentech.com
@date: 2017.01.04
"""
from datetime import datetime
from django.shortcuts import render
from django.http import HttpRequest
from django.http import JsonResponse
from dataserver.models import DataSource
DA_Q_KEY = 'key'
def home(request):
"""Renders the home page."""
assert isinstance(request, HttpRequest)
return render(
request,
'dataserver/index.html',
{
'title': 'Home Page',
'year': datetime.now().year,
}
)
def da_api(request):
"""Renders the data access APIs"""
assert isinstance(request, HttpRequest)
if request.is_ajax():
key = request.GET.get(DA_Q_KEY)
if key is not None:
return JsonResponse(
{'data': [d.dvalue for d in DataSource.objects.filter(id=key)]}
)
else:
return JsonResponse({'data', 'null'})
else:
return render(
request,
'dataserver/da.html',
{
'title': '数据访问',
'year': datetime.now().year,
}
)
| [
"cuiguoxia@sohu.com"
] | cuiguoxia@sohu.com |
d407e6efe97070be75014a8bc45c966906c9cd14 | e707164df1aa8edb5d276179538bd1eb1805f759 | /CODE/fedora_application/env/lib/python2.7/site-packages/fedmsg/config.py | bcb6258cc6c1c0570c5a144728cc39fb87d6c893 | [] | no_license | beckastar/cleaner_markov | af5816c14c94a8cb7924728179470e7db9ed2bc0 | a6de3fd87db77c0d80789cbce0ff409c222b4e67 | refs/heads/master | 2021-01-02T22:52:08.989862 | 2013-11-10T04:51:04 | 2013-11-10T04:51:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,936 | py | # This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
""" :mod:`fedmsg.config` handles loading, processing and validation of
all configuration.
The configuration values used at runtime are determined by checking in
the following order
- Built-in defaults
- Config file (/etc/fedmsg-config.py)
- Command line arguments
For example, if a config value does not appear in either the config file or on
the command line, then the built-in default is used. If a value appears in
both the config file and as a command line argument, then the command line
value is used.
You can print the runtime configuration to the terminal by using the
``fedmsg-config`` command implemented by
:func:`fedmsg.commands.config.config`.
"""
import argparse
import collections
import copy
import os
import sys
import textwrap
import warnings
from fedmsg.encoding import pretty_dumps
VALID_ENVIRONMENTS = ['dev', 'stg', 'prod']
defaults = dict(
topic_prefix="org.fedoraproject",
environment="dev",
io_threads=1,
post_init_sleep=0.5,
timeout=2,
print_config=False,
high_water_mark=0, # zero means no limit
zmq_linger=1000, # Wait one second before timing out on fedmsg-relay
active=False, # generally only true for fedmsg-logger
persistent_store=None, # an object. See the fedmsg.replay module.
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
}
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
__cache = {}
def load_config(extra_args=None,
doc=None,
filenames=None,
invalidate_cache=False,
fedmsg_command=False):
""" Setup a runtime config dict by integrating the following sources
(ordered by precedence):
- defaults
- config file
- command line arguments
If the ``fedmsg_command`` argument is False, no command line arguments are
checked.
"""
global __cache
if invalidate_cache:
__cache = {}
if __cache:
return __cache
# Coerce defaults if arguments are not supplied.
extra_args = extra_args or []
doc = doc or ""
config = copy.deepcopy(defaults)
config.update(_process_config_file(filenames=filenames))
# This is optional (and defaults to false) so that only 'fedmsg-*' commands
# are required to provide these arguments.
# For instance, the moksha-hub command takes a '-v' argument and internally
# makes calls to fedmsg. We don't want to impose all of fedmsg's CLI
# option constraints on programs that use fedmsg, so we make it optional.
if fedmsg_command:
config.update(_process_arguments(extra_args, doc, config))
# If the user specified a config file on the command line, then start over
# but read in that file instead.
if not filenames and config.get('config_filename', None):
return load_config(extra_args, doc,
filenames=[config['config_filename']])
# Just a little debug option. :)
if config['print_config']:
print pretty_dumps(config)
sys.exit(0)
if config['environment'] not in VALID_ENVIRONMENTS:
raise ValueError("%r not one of %r" % (
config['environment'], VALID_ENVIRONMENTS))
if 'endpoints' not in config:
raise ValueError("No config value 'endpoints' found.")
if not isinstance(config['endpoints'], dict):
raise ValueError("The 'endpoint' config value must be a dict.")
if 'srv_endpoints' in config and len(config['srv_endpoints']) > 0:
from dns.resolver import query, NXDOMAIN, Timeout, NoNameservers
for e in config['srv_endpoints']:
urls = []
try:
records = query('_fedmsg._tcp.{0}'.format(e), 'SRV')
except NXDOMAIN:
warnings.warn("There is no appropriate SRV records " +
"for {0}".format(e))
continue
except Timeout:
warnings.warn("The DNS query for the SRV records of" +
" {0} timed out.".format(e))
continue
except NoNameservers:
warnings.warn("No name server is available, please " +
"check the configuration")
break
for rec in records:
urls.append('tcp://{hostname}:{port}'.format(
hostname=rec.target.to_text(),
port=rec.port
))
config['endpoints'][e] = urls
if 'topic_prefix_re' not in config:
# Turn "org.fedoraproject" into "org\.fedoraproject\.(dev|stg|prod)"
config['topic_prefix_re'] = config['topic_prefix'].replace('.', '\.')\
+ '\.(%s)' % '|'.join(VALID_ENVIRONMENTS)
__cache = config
return config
def build_parser(declared_args, doc, config=None, prog=None):
""" Return the global :class:`argparse.ArgumentParser` used by all fedmsg
commands.
Extra arguments can be supplied with the `declared_args` argument.
"""
config = config or copy.deepcopy(defaults)
prog = prog or sys.argv[0]
parser = argparse.ArgumentParser(
description=textwrap.dedent(doc),
formatter_class=argparse.RawDescriptionHelpFormatter,
prog=prog,
)
parser.add_argument(
'--io-threads',
dest='io_threads',
type=int,
help="Number of io threads for 0mq to use",
default=config['io_threads'],
)
parser.add_argument(
'--topic-prefix',
dest='topic_prefix',
type=str,
help="Prefix for the topic of each message sent.",
default=config['topic_prefix'],
)
parser.add_argument(
'--post-init-sleep',
dest='post_init_sleep',
type=float,
help="Number of seconds to sleep after initializing.",
default=config['post_init_sleep'],
)
parser.add_argument(
'--config-filename',
dest='config_filename',
help="Config file to use.",
default=None,
)
parser.add_argument(
'--print-config',
dest='print_config',
help='Simply print out the configuration and exit. No action taken.',
default=False,
action='store_true',
)
parser.add_argument(
'--timeout',
dest='timeout',
help="Timeout in seconds for any blocking zmq operations.",
type=float,
default=config['timeout'],
)
parser.add_argument(
'--high-water-mark',
dest='high_water_mark',
help="Limit on the number of messages in the queue before blocking.",
type=int,
default=config['high_water_mark'],
)
parser.add_argument(
'--linger',
dest='zmq_linger',
help="Number of milliseconds to wait before timing out connections.",
type=int,
default=config['zmq_linger'],
)
for args, kwargs in declared_args:
# Replace the hard-coded extra_args default with the config file value
# (if it exists)
if all([k in kwargs for k in ['dest', 'default']]):
kwargs['default'] = config.get(
kwargs['dest'], kwargs['default'])
# Having slurped smart defaults from the config file, add the CLI arg.
parser.add_argument(*args, **kwargs)
return parser
def _process_arguments(declared_args, doc, config):
parser = build_parser(declared_args, doc, config)
args = parser.parse_args()
return dict(args._get_kwargs())
def _gather_configs_in(directory):
""" Return list of fully qualified python filenames in the given dir """
try:
return [
os.path.join(directory, fname)
for fname in os.listdir(directory)
if fname.endswith('.py')
]
except OSError:
return []
def _recursive_update(d1, d2):
""" Little helper function that does what d1.update(d2) does,
but works nice and recursively with dicts of dicts of dicts.
It's not necessarily very efficient.
"""
for k in set(d1).intersection(d2):
if isinstance(d1[k], dict) and isinstance(d2[k], dict):
d1[k] = _recursive_update(d1[k], d2[k])
else:
d1[k] = d2[k]
for k in set(d2).difference(d1):
d1[k] = d2[k]
return d1
def _process_config_file(filenames=None):
filenames = filenames or []
# Validate that these files are really files
for fname in filenames:
if not os.path.isfile(fname):
raise ValueError("%r is not a file." % fname)
# If nothing specified, look in the default locations
if not filenames:
filenames = [
'/etc/fedmsg-config.py',
os.path.expanduser('~/.fedmsg-config.py'),
os.getcwd() + '/fedmsg-config.py',
]
filenames = sum(map(_gather_configs_in, [
"/etc/fedmsg.d/",
os.path.expanduser('~/.fedmsg.d/'),
os.getcwd() + '/fedmsg.d/',
]), []) + filenames
# Each .ini file should really be a python module that
# builds a config dict.
config = {}
for fname in filenames:
if os.path.isfile(fname):
variables = {}
try:
execfile(fname, variables)
config = _recursive_update(config, variables['config'])
except IOError as e:
warnings.warn(str(e))
return config
| [
"rebecca.robbins.et@gmail.com"
] | rebecca.robbins.et@gmail.com |
7119dff524629f1ceea380a082865772286b6886 | 60ab0c50a338ae5f444bbb1210f5f840419851f3 | /zqz_tmp/devel/lib/python2.7/dist-packages/bsplines/__init__.py | 2d40ede40c4a5a744f71dec9598bc33fc1baf069 | [] | no_license | zhang-quanzhe/NvidiaNX_SLAM | df9789a230bf5b9ebcb888a8db8bd8fdba498b08 | c7a7cf8c460a7fe53c0e90d9fb699263681033fc | refs/heads/master | 2023-07-09T16:37:41.249915 | 2021-08-21T07:59:55 | 2021-08-21T07:59:55 | 398,501,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | # -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from pkgutil import extend_path
from sys import path as sys_path
__extended_path = '/home/nvidia/zqz_tmp/src/kalibr/aslam_nonparametric_estimation/bsplines_python/python'.split(';')
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
| [
"zhumingjie0206@163.com"
] | zhumingjie0206@163.com |
d0db1cd6164cae8da09acb05bc8dac99df5d4ccf | 3daea618967bb99858472837390d7d8cbc32807e | /modules/student.py | 03b54200697fd812bd67e1c97aa86d327e255bd2 | [
"LicenseRef-scancode-public-domain"
] | permissive | ergaurav2/placement-portal | f360559816c7378422131c1d8cf5eb24157f180f | 87b84828ca7ef18b4380eb14853a863dcfa1a935 | refs/heads/master | 2016-09-05T23:31:17.249989 | 2014-03-11T17:11:43 | 2014-03-11T17:11:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,181 | py | #!/usr/bin/env python
# coding: utf8
from gluon import *
class Student:
rollno=""
firstname=""
lastname=""
birth_date=""
gender=""
emailid=""
pwd=""
course=""
mobile=""
peradd=""
curradd=""
ext_emailid=""
fathername=""
def createbasicstudent(self,obj):
self.rollno=obj.rollno
self.firstname=obj.firstname
self.lastname=obj.lastname
self.gender=obj.gender
self.emailid=obj.emailid
self.pwd=obj.pwd
self.course=obj.course
self.mobile=obj.mobile
def createbasicstudent(self,obj):
self.rollno=obj.rollno
self.firstname=obj.firstname
self.lastname=obj.lastname
self.birth_date=obj.birth_date
self.gendeyr=obj.gender
self.emailid=obj.emailid
self.pwd=obj.pwd
self.course=obj.course
self.mobile=obj.mobile
self.peradd=obj.peradd
self.curradd=obj.curradd
self.ext_emailid=obj.ext_emailid
self.fathername=obj.fathername
def createstudent(self,obj):
self.rollno=obj.rollno
self.firstname=obj.firstname
self.lastname=obj.lastname
self.birth_date=obj.birth_date
self.gender=obj.gender
self.emailid=obj.emailid
self.course=obj.course
self.mobile=obj.mobile
self.peradd=obj.peradd
self.curradd=obj.curradd
self.ext_emailid=obj.ext_emailid
self.fathername=obj.fathername
def getstudentbasicdetails(self,emailid,db):
print "getstudentbasicdetails", emailid
obj=db(db.student_basicdetails.emailid==emailid).select().first()
print obj
self.rollno=obj.rollno
self.firstname=obj.firstname
self.lastname=obj.lastname
self.birth_date=obj.birth_date
self.gender=obj.gender
self.emailid=obj.emailid
self.course=obj.course
self.mobile=obj.mobile
self.peradd=obj.peradd
self.curradd=obj.curradd
self.ext_emailid=obj.ext_emailid
self.fathername=obj.fathername
def insertstudent(self,db):
db.student_basicdetails.insert(rollno=self.rollno,firstname=self.firstname,lastname=self.lastname,birth=self.birth,
gender=self.gender,emailid=self.emailid,pwd=self.pwd,program=self.program,dept=self.dept,mobile=self.mobile)
def updatestudent(self,emailid,db):
obj=db(db.student_basicdetails.emailid==emailid).select().first()
obj.update_record(rollno=self.rollno,firstname=self.firstname,
lastname=self.lastname,birth=self.birth,gender=self.gender,emailid=self.emailid,
program=self.program,dept=self.dept,mobile=self.mobile,
peradd=self.peradd,curradd=self.curradd,passout=self.passout,marks=self.marks,ext_emailid=self.ext_emailid,fathername=self.fathername)
class Academicdetails:
rollno=""
emailid=""
high_board=""
high_marks=""
high_passout=""
inter_board=""
inter_marks=""
inter_passout=""
ug_board=""
ug_marks=""
ug_passout=""
pg_board=""
pg_marks=""
pg_passout=""
def createacademicdetails(self,obj):
self.rollno=obj.rollno
self.emailid=obj.emailid
self.high_board=obj.high_board
self.high_marks=obj.high_marks
self.high_passout=obj.high_passout
self.inter_board=obj.inter_board
self.inter_marks=obj.inter_marks
self.inter_passout=obj.inter_passout
self.ug_board=obj.ug_board
self.ug_marks=obj.ug_marks
self.ug_passout=obj.ug_passout
self.pg_board=obj.pg_board
self.pg_marks=obj.pg_marks
self.pg_passout=obj.pg_passout
def getacademicdetails(self,emailid,db):
obj=db(db.student_academicdetails.emailid==emailid).select().first()
print "getacademicdetails "
print emailid
print obj
self.rollno=obj.rollno
self.emailid=obj.emailid
self.high_board=obj.high_board
self.high_marks=obj.high_marks
self.high_passout=obj.high_passout
self.inter_board=obj.inter_board
self.inter_marks=obj.inter_marks
self.inter_passout=obj.inter_passout
self.ug_board=obj.ug_board
self.ug_marks=obj.ug_marks
self.ug_passout=obj.ug_passout
self.pg_board=obj.pg_board
self.pg_marks=obj.pg_marks
self.pg_passout=obj.pg_passout
def insertacademicdetails(self,emailid,rollno,db):
db.student_academicdetails.insert(rollno=rollno,emailid=emailid)
def updateacademicdetails(self,emailid,db):
row=db(db.student_academicdetails.emailid==emailid).select().first()
row.update_record(high_board=self.high_board,high_marks=self.high_marks,high_passout=self.high_passout,
inter_board=self.inter_board,inter_marks=self.inter_marks,
inter_passout=self.inter_passout,ug_board=self.ug_board,ug_marks=self.ug_marks,ug_passout=self.ug_passout,
pg_board=self.pg_board,pg_marks=self.pg_marks,pg_passout=self.pg_passout)
| [
"ergaurav2@gmail.com"
] | ergaurav2@gmail.com |
d1dd82562e95e8748af9c0c79c4d56582c205ee7 | 612858007a347433f85c62ef0d82818be37a39b7 | /fileupload/migrations/0002_auto_20160720_0937.py | 792fc8e3033fd5feae2da495c85689ff815db646 | [] | no_license | zidan9011/python_git | ea3ccae2c9e147924437f5cdf916b4d4dc12a601 | 26925086003f03c7cd6dc3ef8cc694e546aed62a | refs/heads/master | 2020-05-29T16:09:00.227910 | 2016-12-13T06:54:35 | 2016-12-13T06:54:35 | 60,998,588 | 0 | 2 | null | 2016-12-13T06:54:36 | 2016-06-13T01:33:35 | JavaScript | UTF-8 | Python | false | false | 31,086 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('fileupload', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='report_detail',
name='CRType',
field=models.CharField(max_length=32, verbose_name=b'\xe5\x8f\x98\xe6\x9b\xb4\xe7\xb1\xbb\xe5\x9e\x8b', choices=[(b'zc', b'\xe6\xad\xa3\xe5\xb8\xb8'), (b'jj', b'\xe7\xb4\xa7\xe6\x80\xa5'), (b'lx', b'\xe4\xbe\x8b\xe8\xa1\x8c'), (b'kj', b'\xe5\xbf\xab\xe6\x8d\xb7'), (b'NA', b'NA')]),
),
migrations.AlterField(
model_name='report_detail',
name='Main_SysName',
field=models.CharField(max_length=128, verbose_name=b'\xe4\xb8\xbb\xe7\xb3\xbb\xe7\xbb\x9f\xe5\x90\x8d\xe7\xa7\xb0', choices=[(b'b01', b'\xe6\x9c\xac\xe5\xb8\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f'), (b'b02', b'\xe6\x9c\xac\xe5\xb8\x81\xe4\xba\xa4\xe6\x98\x93\xe7\x9b\xb4\xe9\x80\x9a\xe5\xbc\x8f\xe5\xa4\x84\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88\xe6\x9c\xac\xe5\xb8\x81CSTP\xef\xbc\x89'), (b'b03', b'\xe6\x9c\xac\xe5\xb8\x81\xe5\xb8\x82\xe5\x9c\xba\xe7\x9b\x91\xe6\xb5\x8b\xe7\xb3\xbb\xe7\xbb\x9f'), (b'b04', b'\xe6\xa0\x87\xe5\x87\x86\xe5\x8c\x96\xe5\xa4\x96\xe6\xb1\x87\xe4\xba\xa7\xe5\x93\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88C-Swap\xef\xbc\x89'), (b'b05', b'\xe6\xa0\x87\xe5\x87\x86\xe9\x87\x91\xe8\x9e\x8d\xe8\xb5\x84\xe4\xba\xa7\xe5\x8f\x8a\xe8\xa1\x8d\xe7\x94\x9f\xe5\x93\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88C-Trade\xef\xbc\x89'), (b'b06', b'\xe6\xa0\x87\xe5\x87\x86\xe9\x87\x91\xe8\x9e\x8d\xe8\xb5\x84\xe4\xba\xa7\xe5\x8f\x8a\xe8\xa1\x8d\xe7\x94\x9f\xe5\x93\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88X-Repo\xef\xbc\x89'), (b'b07', b'\xe6\xa0\x87\xe5\x87\x86\xe9\x87\x91\xe8\x9e\x8d\xe8\xb5\x84\xe4\xba\xa7\xe5\x8f\x8a\xe8\xa1\x8d\xe7\x94\x9f\xe5\x93\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88X-Swap\xef\xbc\x89'), (b'b08', b'\xe6\xa0\x87\xe5\x87\x86\xe9\x87\x91\xe8\x9e\x8d\xe8\xb5\x84\xe4\xba\xa7\xe5\x8f\x8a\xe8\xa1\x8d\xe7\x94\x9f\xe5\x93\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88X-Bond\xef\xbc\x89'), (b'b09', b'\xe4\xbf\x9d\xe8\xaf\x81\xe9\x87\x91\xe7\xb3\xbb\xe7\xbb\x9f'), (b'c01', b'\xe8\xb4\xa2\xe5\x8a\xa1\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'c02', b'\xe8\xb4\xa2\xe6\x94\xbf\xe9\x83\xa8\xe5\x81\x9a\xe5\xb8\x82\xe6\x94\xaf\xe6\x8c\x81\xe7\xb3\xbb\xe7\xbb\x9f'), (b'c03', b'\xe6\xb5\x8b\xe8\xaf\x95\xe7\xae\xa1\xe7\x90\x86\xe5\xb7\xa5\xe5\x85\xb7'), (b'd01', b'\xe5\xa4\xa7\xe9\xa2\x9d\xe5\xad\x98\xe5\x8d\x95\xe5\x8f\x91\xe8\xa1\x8c\xe5\xa4\x87\xe6\xa1\x88\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CDIS\xef\xbc\x89'), (b'd02', b'\xe5\xa4\xa7\xe9\xa2\x9d\xe5\xad\x98\xe5\x8d\x95\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f'), (b'd03', b'\xe5\xa4\xa7\xe5\xb1\x8f\xe5\xb1\x95\xe7\xa4\xba\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88ITS\xef\xbc\x89'), (b'd04', b'\xe8\xb4\xb7\xe6\xac\xbe\xe8\xbd\xac\xe8\xae\xa9\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88LTS\xef\xbc\x89'), (b'd05', b'\xe6\xa1\xa3\xe6\xa1\x88\xe7\xb3\xbb\xe7\xbb\x9f'), (b'd06', b'\xe7\x9f\xad\xe4\xbf\xa1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'f01', b'\xe9\xa3\x8e\xe9\x99\xa9\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'g01', b'\xe5\x9b\xbd\xe9\x99\x85\xe9\x87\x91\xe8\x9e\x8d\xe8\xb5\x84\xe4\xba\xa7\xe4\xba\xa4\xe6\x98\x93\xe5\xb9\xb3\xe5\x8f\xb0\xef\xbc\x88CNYFAST\xef\xbc\x89'), (b'h01', b'\xe5\x91\xbc\xe5\x8f\xab\xe4\xb8\xad\xe5\xbf\x83\xe7\xb3\xbb\xe7\xbb\x9f'), (b'h02', b'\xe4\xba\x92\xe8\x81\x94\xe7\xbd\x91\xe9\x82\xae\xe7\xae\xb1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'h03', b'\xe4\xbc\x9a\xe5\x91\x98\xe6\x9c\xba\xe6\x9e\x84\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CIM\xef\xbc\x89'), (b'h04', b'\xe8\xb4\xa7\xe5\xb8\x81\xe5\x8f\x8a\xe5\x80\xba\xe5\x8a\xa1\xe5\xb7\xa5\xe5\x85\xb7\xe5\x8f\x91\xe8\xa1\x8c\xe7\xb3\xbb\xe7\xbb\x9f'), (b'i01', b'IMIX\xe5\xba\x94\xe6\x80\xa5\xe5\xb7\xa5\xe5\x85\xb7'), (b'j01', b'\xe5\x8d\xb3\xe6\x97\xb6\xe9\x80\x9a\xe8\xae\xaf\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CM\xef\xbc\x89'), (b'j02', b'\xe8\xae\xa1\xe8\xb4\xb9\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'j03', b'\xe6\x8a\x80\xe6\x9c\xaf\xe5\x9c\xba\xe5\x8a\xa1\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'j04', b'\xe4\xba\xa4\xe6\x98\x93\xe5\x90\x8e\xe5\xa4\x84\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'k01', b'\xe5\xbc\x80\xe5\x8f\x91\xe8\xbf\x87\xe7\xa8\x8b\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'k02', b'KGR\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'k03', b'Kondor\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'k04', b'KTP\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'l01', b'\xe9\x9b\xb6\xe5\x94\xae\xe5\xb8\x82\xe5\x9c\xba\xe4\xba\x92\xe8\x81\x94\xe7\xbd\x91\xe4\xba\xa4\xe6\x98\x93\xe6\x9c\x8d\xe5\x8a\xa1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'q01', b'\xe5\x85\xa8\xe5\x9b\xbd\xe9\x93\xb6\xe8\xa1\x8c\xe9\x97\xb4\xe5\x80\xba\xe5\x88\xb8\xe5\xb8\x82\xe5\x9c\xba\xe5\x87\x86\xe5\x85\xa5\xe5\xa4\x87\xe6\xa1\x88\xe4\xbf\xa1\xe6\x81\xaf\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r01', b'RMDS\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r02', b'\xe4\xba\xba\xe6\xb0\x91\xe5\xb8\x81\xe7\x94\xb5\xe5\xad\x90\xe5\xa4\x87\xe6\xa1\x88\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r03', b'\xe4\xba\xba\xe6\xb0\x91\xe5\xb8\x81\xe8\xb7\xa8\xe5\xa2\x83\xe4\xba\xa4\xe6\x98\x93\xe4\xbf\xa1\xe6\x81\xaf\xe7\xbb\x9f\xe8\xae\xa1\xe7\x9b\x91\xe6\xb5\x8b\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r04', b'\xe4\xba\xba\xe6\xb0\x91\xe5\xb8\x81\xe5\x88\xa9\xe7\x8e\x87\xe7\x9b\x91\xe6\xb5\x8b\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r05', b'\xe4\xba\xba\xe6\xb0\x91\xe9\x93\xb6\xe8\xa1\x8c\xe5\x88\x86\xe6\x94\xaf\xe8\xa1\x8c\xe8\xb4\xa7\xe5\xb8\x81\xe5\xb8\x82\xe5\x9c\xba\xe4\xbf\xa1\xe6\x81\xaf\xe6\x9c\x8d\xe5\x8a\xa1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r06', b'\xe8\xbd\xaf\xe4\xbb\xb6\xe9\x85\x8d\xe7\xbd\xae\xe7\xae\xa1\xe7\x90\x86\xe5\xb7\xa5\xe5\x85\xb7'), (b'r07', b'\xe8\xbd\xaf\xe4\xbb\xb6\xe7\xbc\xba\xe9\x99\xb7\xe5\x8f\x8a\xe5\x8f\x98\xe6\x9b\xb4\xe7\xae\xa1\xe7\x90\x86\xe5\xb7\xa5\xe5\x85\xb7'), (b's01', b'SWIFT\xe6\x8a\xa5\xe6\x96\x87\xe7\xae\xa1\xe7\x90\x86\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b's02', b'SWIFT\xe4\xba\xa4\xe6\x98\x93\xe4\xb8\xad\xe5\xbf\x83\xe6\x8e\xa5\xe5\x85\xa5\xe7\xb3\xbb\xe7\xbb\x9f'), (b's03', b'\xe4\xb8\x8a\xe6\xb5\xb7\xe9\x93\xb6\xe8\xa1\x8c\xe9\x97\xb4\xe5\x90\x8c\xe4\xb8\x9a\xe6\x8b\x86\xe6\x94\xbe\xe5\x88\xa9\xe7\x8e\x87\xe7\xb3\xbb\xe7\xbb\x9f'), (b's04', b'\xe4\xb8\x8a\xe6\xb5\xb7\xe6\x80\xbb\xe9\x83\xa8\xe9\xbb\x84\xe9\x87\x91\xe5\xb8\x82\xe5\x9c\xba\xe7\x9b\x91\xe6\xb5\x8b\xe5\x88\x86\xe6\x9e\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b's05', b'\xe4\xb8\x8a\xe6\xb5\xb7\xe6\x80\xbb\xe9\x83\xa8\xe9\x87\x91\xe8\x9e\x8d\xe5\xb8\x82\xe5\x9c\xba\xe7\x9b\x91\xe6\xb5\x8b\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b's06', b'\xe5\xae\x9e\xe6\x97\xb6\xe6\xb6\x88\xe6\x81\xaf\xe4\xbc\xa0\xe8\xbe\x93\xe4\xb8\xad\xe9\x97\xb4\xe4\xbb\xb6\xef\xbc\x88IMIX\xef\xbc\x89'), (b's07', b'\xe5\xb8\x82\xe5\x9c\xba\xe5\x9f\xba\xe5\x87\x86\xe5\x8f\x82\xe8\x80\x83\xe7\xb3\xbb\xe7\xbb\x9f'), (b's08', b'\xe5\xb8\x82\xe5\x9c\xba\xe6\x95\xb0\xe6\x8d\xae\xe6\x8a\xa5\xe8\xa1\xa8\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CMDR\xef\xbc\x89'), (b's09', b'\xe5\xb8\x82\xe5\x9c\xba\xe6\x95\xb0\xe6\x8d\xae\xe5\xa4\x84\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88MDP\xef\xbc\x89'), (b's10', b'\xe5\xb8\x82\xe5\x9c\xba\xe6\x95\xb0\xe6\x8d\xae\xe5\x8f\x91\xe5\xb8\x83\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CMDS\xef\xbc\x89'), (b's11', b'\xe6\x95\xb0\xe6\x8d\xae\xe4\xbb\x93\xe5\xba\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CDW\xef\xbc\x89'), (b's12', b'shibor'), (b's13', b'\xe6\x95\xb0\xe6\x8d\xae\xe9\x87\x87\xe9\x9b\x86\xe5\xb9\xb3\xe5\x8f\xb0'), (b's14', b'\xe6\x95\xb0\xe6\x8d\xae\xe4\xbc\xa0\xe8\xbe\x93\xe4\xb8\xad\xe9\x97\xb4\xe4\xbb\xb6\xef\xbc\x88ETL\xef\xbc\x89'), (b't01', b'tivoli\xe7\x9b\x91\xe6\x8e\xa7\xe7\xb3\xbb\xe7\xbb\x9f'), (b't02', b'\xe5\x9b\xbe\xe4\xb9\xa6\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w01', b'\xe5\xa4\x96\xe6\xb1\x87\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w02', b'\xe5\xa4\x96\xe6\xb1\x87\xe4\xba\xa4\xe6\x98\x93\xe7\x9b\xb4\xe9\x80\x9a\xe5\xbc\x8f\xe5\xa4\x84\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w03', b'\xe5\xa4\x96\xe6\xb1\x87\xe9\x99\x90\xe9\xa2\x9d\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w04', b'\xe5\xa4\x96\xe6\xb1\x87\xe5\xb8\x90\xe5\x8a\xa1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w05', b'\xe7\xbd\x91\xe7\xbb\x9c\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w06', b'\xe5\xbe\xae\xe5\xae\xa2\xe6\x9c\x8d\xe4\xba\x8c\xe6\x9c\x9f'), (b'w07', b'\xe6\x96\x87\xe4\xbb\xb6\xe4\xbc\xa0\xe8\xbe\x93\xe4\xb8\xad\xe9\x97\xb4\xe4\xbb\xb6\xef\xbc\x88FTS\xef\xbc\x89'), (b'x01', b'X\xe8\xa1\x8c\xe5\x9c\xba\xe5\x8a\xa1\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x02', b'X\xe8\xa1\x8c\xe4\xba\xa4\xe6\x98\x93\xe6\x95\xb0\xe6\x8d\xae\xe4\xbc\xa0\xe8\xbe\x93\xe6\x8e\xa5\xe5\x8f\xa3\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x03', b'X\xe8\xa1\x8c\xe7\xab\x9e\xe4\xbb\xb7\xe4\xba\xa4\xe6\x98\x93\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x04', b'X\xe8\xa1\x8c\xe4\xba\xba\xe6\xb0\x91\xe5\xb8\x81\xe8\xb5\x84\xe9\x87\x91\xe4\xb8\x9a\xe5\x8a\xa1\xe5\x90\x8e\xe5\x8f\xb0\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x05', b'X\xe8\xa1\x8c\xe7\xbb\x9f\xe8\xae\xa1\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x06', b'X\xe8\xa1\x8c\xe4\xbf\xa1\xe6\x81\xaf\xe4\xba\xa4\xe6\xb5\x81\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x07', b'X\xe8\xa1\x8c\xe8\xaf\xa2\xe4\xbb\xb7\xe4\xba\xa4\xe6\x98\x93\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x08', b'X\xe8\xa1\x8c\xe5\xb8\x90\xe5\x8a\xa1\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x09', b'X\xe8\xa1\x8c\xe6\x8b\x9b\xe6\xa0\x87\xe4\xba\xa4\xe6\x98\x93\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x10', b'X\xe8\xa1\x8c\xe6\x94\xaf\xe4\xbb\x98\xe9\x94\x80\xe8\xb4\xa6\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x11', b'X\xe8\xa1\x8c\xe4\xb8\xad\xe5\x8f\xb0\xe6\x8a\xa5\xe8\xa1\xa8\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x12', b'\xe6\x96\xb0\xe8\xb4\xa7\xe5\xb8\x81\xe7\xbd\x91'), (b'x13', b'\xe6\x96\xb0\xe7\xa5\xa8\xe6\x8d\xae\xe7\xbd\x91'), (b'x14', b'\xe9\xa1\xb9\xe7\x9b\xae\xe7\xae\xa1\xe7\x90\x86\xe5\xb7\xa5\xe5\x85\xb7'), (b'x15', b'\xe9\x9c\x80\xe6\xb1\x82\xe7\xae\xa1\xe7\x90\x86\xe5\xb7\xa5\xe5\x85\xb7'), (b'y01', b'\xe5\xa4\xae\xe8\xa1\x8c\xe5\xa4\xa7\xe9\xa2\x9d\xe6\x94\xaf\xe4\xbb\x98\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CNAPS\xef\xbc\x89\xe4\xba\xa4\xe6\x98\x93\xe4\xb8\xad\xe5\xbf\x83\xe6\x8e\xa5\xe5\x85\xa5\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y02', b'\xe9\x93\xb6\xe8\xa1\x8c\xe9\x97\xb4\xe5\xb8\x82\xe5\x9c\xba\xe4\xba\x92\xe8\x81\x94\xe7\xbd\x91\xe4\xba\xa4\xe6\x98\x93\xe6\x9c\x8d\xe5\x8a\xa1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y03', b'\xe5\xba\x94\xe7\x94\xa8\xe9\x9b\x86\xe4\xb8\xad\xe7\x9b\x91\xe6\x8e\xa7\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y04', b'\xe5\xba\x94\xe7\x94\xa8\xe6\x8c\x87\xe6\xa0\x87\xe9\x87\x87\xe9\x9b\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y05', b'\xe7\x94\xa8\xe6\x88\xb7\xe7\xbb\x9f\xe4\xb8\x80\xe8\xae\xa4\xe8\xaf\x81\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y06', b'\xe9\xa2\x84\xe7\xae\x97\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y07', b'\xe5\x85\x83\xe6\x95\xb0\xe6\x8d\xae\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y08', b'\xe8\xbf\x9c\xe7\xa8\x8b\xe5\x9f\xb9\xe8\xae\xad\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y09', b'\xe8\xbf\x90\xe7\xbb\xb4\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z01', b'\xe5\xa2\x9e\xe5\x80\xbc\xe6\x9c\x8d\xe5\x8a\xa1\xe4\xba\xba\xe6\xb0\x91\xe5\xb8\x81\xe5\x89\x8d\xe4\xb8\xad\xe5\x8f\xb0\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z02', b'\xe5\xa2\x9e\xe5\x80\xbc\xe6\x9c\x8d\xe5\x8a\xa1\xe5\xa4\x96\xe6\xb1\x87\xe8\xb5\x84\xe9\x87\x91\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z03', b'\xe5\xa2\x9e\xe5\x80\xbc\xe6\x9c\x8d\xe5\x8a\xa1\xe5\xa4\x96\xe6\xb1\x87\xe5\x81\x9a\xe5\xb8\x82\xe5\x95\x86\xe6\x8a\xa5\xe4\xbb\xb7\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z04', b'\xe5\xa2\x9e\xe5\x80\xbc\xe6\x9c\x8d\xe5\x8a\xa1\xe7\x9b\xb4\xe9\x80\x9a\xe5\xbc\x8f\xe5\xa4\x84\xe7\x90\x86\xe6\x9c\x8d\xe5\x8a\xa1'), (b'z05', b'\xe5\x80\xba\xe5\x88\xb8\xe5\x9f\xba\xe7\xa1\x80\xe4\xbf\xa1\xe6\x81\xaf\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88BDS\xef\xbc\x89'), (b'z06', b'\xe5\x80\xba\xe5\x88\xb8\xe5\xb8\x82\xe5\x9c\xba\xe4\xba\xa4\xe6\x98\x93\xe4\xbf\xa1\xe6\x81\xaf\xe5\xa4\x87\xe6\xa1\x88\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z07', b'\xe8\xaf\x81\xe4\xb9\xa6\xe6\x8e\x88\xe6\x9d\x83\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CA\xef\xbc\x89'), (b'z08', b'\xe6\x94\xaf\xe6\x8c\x81\xe5\x9b\xbd\xe4\xba\xa7\xe5\xaf\x86\xe7\xa0\x81\xe7\xae\x97\xe6\xb3\x95\xe6\x95\xb0\xe5\xad\x97\xe8\xaf\x81\xe4\xb9\xa6\xe8\xae\xa4\xe8\xaf\x81\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z09', b'\xe4\xb8\xad\xe5\x9b\xbd\xe8\xb4\xa7\xe5\xb8\x81\xe7\xbd\x91'), (b'z10', b'\xe4\xb8\xad\xe5\x9b\xbd\xe8\xb4\xa7\xe5\xb8\x81\xe7\xbd\x91\xe7\xa7\xbb\xe5\x8a\xa8\xe7\x89\x88'), (b'z11', b'\xe4\xb8\xad\xe5\x9b\xbd\xe7\xa5\xa8\xe6\x8d\xae\xe7\xbd\x91'), (b'z12', b'\xe4\xb8\xad\xe9\x97\xb4\xe4\xbb\xb7\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z13', b'\xe4\xb8\xad\xe5\xbf\x83\xe5\x8a\x9e\xe5\x85\xac\xe8\x87\xaa\xe5\x8a\xa8\xe5\x8c\x96\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z14', b'\xe4\xb8\xad\xe5\xbf\x83\xe5\x9c\xba\xe5\x8a\xa1\xe5\xbe\xae\xe4\xbf\xa1\xe6\x9c\x8d\xe5\x8a\xa1\xe6\x94\xaf\xe6\x8c\x81\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z15', b'\xe4\xb8\xad\xe5\xbf\x83\xe5\xa4\xa7\xe9\xa2\x9d\xe6\x94\xaf\xe4\xbb\x98\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CNAPS\xef\xbc\x89\xe4\xba\xa4\xe6\x98\x93\xe4\xb8\xad\xe5\xbf\x83\xe6\x8e\xa5\xe5\x85\xa5\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z16', b'\xe8\xb5\x84\xe4\xba\xa7\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z17', b'\xe8\x87\xaa\xe5\x8a\xa8\xe5\x8c\x96\xe9\x83\xa8\xe7\xbd\xb2\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z18', b'\xe4\xb8\xad\xe5\xbf\x83\xe4\xba\xa4\xe6\x98\x93\xe4\xbf\xa1\xe6\x81\xaf\xe5\xa4\x87\xe6\xa1\x88\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CTRS\xef\xbc\x89')]),
),
migrations.AlterField(
model_name='report_detail',
name='ManpowerInput',
field=models.CharField(max_length=32, verbose_name=b'\xe4\xba\xba\xe5\x8a\x9b\xe6\x8a\x95\xe5\x85\xa5\xe6\x83\x85\xe5\x86\xb5', choices=[(b'rljz', b'\xe4\xba\xba\xe5\x8a\x9b\xe7\xb4\xa7\xe5\xbc\xa0'), (b'rlcz', b'\xe4\xba\xba\xe5\x8a\x9b\xe5\x85\x85\xe8\xb6\xb3'), (b'rlbz', b'\xe4\xba\xba\xe5\x8a\x9b\xe4\xb8\x8d\xe8\xb6\xb3'), (b'NA', b'NA')]),
),
migrations.AlterField(
model_name='report_detail',
name='OverallSchedule',
field=models.CharField(max_length=32, verbose_name=b'\xe9\xa1\xb9\xe7\x9b\xae\xe6\x95\xb4\xe4\xbd\x93\xe8\xbf\x9b\xe5\xba\xa6', choices=[(b'zc', b'\xe6\xad\xa3\xe5\xb8\xb8'), (b'yq', b'\xe5\xbb\xb6\xe6\x9c\x9f'), (b'zt', b'\xe6\x9a\x82\xe5\x81\x9c'), (b'zf', b'\xe4\xbd\x9c\xe5\xba\x9f'), (b'NA', b'NA')]),
),
migrations.AlterField(
model_name='report_detail',
name='PerformanceTest',
field=models.CharField(max_length=32, verbose_name=b'\xe6\x80\xa7\xe8\x83\xbd\xe6\xb5\x8b\xe8\xaf\x95', choices=[(b'y', b'\xe6\x9c\x89'), (b'n', b'\xe6\x97\xa0')]),
),
migrations.AlterField(
model_name='report_detail',
name='ProjectStage',
field=models.CharField(max_length=32, verbose_name=b'\xe7\x9b\xae\xe5\x89\x8d\xe9\xa1\xb9\xe7\x9b\xae\xe9\x98\xb6\xe6\xae\xb5', choices=[(b'cszb', b'\xe6\xb5\x8b\xe8\xaf\x95\xe5\x87\x86\xe5\xa4\x87'), (b'uat1cs', b'UAT1\xe6\xb5\x8b\xe8\xaf\x95'), (b'uat1wc', b'UAT1\xe5\xae\x8c\xe6\x88\x90'), (b'yslc', b'\xe9\xaa\x8c\xe6\x94\xb6\xe6\xb5\x81\xe7\xa8\x8b'), (b'yslc', b'\xe9\xaa\x8c\xe6\x94\xb6\xe6\xb5\x81\xe7\xa8\x8b'), (b'yscs', b'\xe9\xaa\x8c\xe6\x94\xb6\xe6\xb5\x8b\xe8\xaf\x95'), (b'mnlc', b'\xe6\xa8\xa1\xe6\x8b\x9f\xe6\xb5\x81\xe7\xa8\x8b'), (b'mncs', b'\xe6\xa8\xa1\xe6\x8b\x9f\xe6\xb5\x8b\xe8\xaf\x95'), (b'mnwc', b'\xe6\xa8\xa1\xe6\x8b\x9f\xe5\xae\x8c\xe6\x88\x90'), (b'ysx', b'\xe5\xb7\xb2\xe4\xb8\x8a\xe7\xba\xbf'), (b'NA', b'NA')]),
),
migrations.AlterField(
model_name='report_detail',
name='Reason',
field=models.TextField(max_length=2048, verbose_name=b'\xe5\x8e\x9f\xe5\x9b\xa0\xe8\xaf\xb4\xe6\x98\x8e', blank=True),
),
migrations.AlterField(
model_name='report_detail',
name='SystemName',
field=models.CharField(max_length=128, verbose_name=b'\xe7\xb3\xbb\xe7\xbb\x9f\xe5\x90\x8d\xe7\xa7\xb0', choices=[(b'b01', b'\xe6\x9c\xac\xe5\xb8\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f'), (b'b02', b'\xe6\x9c\xac\xe5\xb8\x81\xe4\xba\xa4\xe6\x98\x93\xe7\x9b\xb4\xe9\x80\x9a\xe5\xbc\x8f\xe5\xa4\x84\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88\xe6\x9c\xac\xe5\xb8\x81CSTP\xef\xbc\x89'), (b'b03', b'\xe6\x9c\xac\xe5\xb8\x81\xe5\xb8\x82\xe5\x9c\xba\xe7\x9b\x91\xe6\xb5\x8b\xe7\xb3\xbb\xe7\xbb\x9f'), (b'b04', b'\xe6\xa0\x87\xe5\x87\x86\xe5\x8c\x96\xe5\xa4\x96\xe6\xb1\x87\xe4\xba\xa7\xe5\x93\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88C-Swap\xef\xbc\x89'), (b'b05', b'\xe6\xa0\x87\xe5\x87\x86\xe9\x87\x91\xe8\x9e\x8d\xe8\xb5\x84\xe4\xba\xa7\xe5\x8f\x8a\xe8\xa1\x8d\xe7\x94\x9f\xe5\x93\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88C-Trade\xef\xbc\x89'), (b'b06', b'\xe6\xa0\x87\xe5\x87\x86\xe9\x87\x91\xe8\x9e\x8d\xe8\xb5\x84\xe4\xba\xa7\xe5\x8f\x8a\xe8\xa1\x8d\xe7\x94\x9f\xe5\x93\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88X-Repo\xef\xbc\x89'), (b'b07', b'\xe6\xa0\x87\xe5\x87\x86\xe9\x87\x91\xe8\x9e\x8d\xe8\xb5\x84\xe4\xba\xa7\xe5\x8f\x8a\xe8\xa1\x8d\xe7\x94\x9f\xe5\x93\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88X-Swap\xef\xbc\x89'), (b'b08', b'\xe6\xa0\x87\xe5\x87\x86\xe9\x87\x91\xe8\x9e\x8d\xe8\xb5\x84\xe4\xba\xa7\xe5\x8f\x8a\xe8\xa1\x8d\xe7\x94\x9f\xe5\x93\x81\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88X-Bond\xef\xbc\x89'), (b'b09', b'\xe4\xbf\x9d\xe8\xaf\x81\xe9\x87\x91\xe7\xb3\xbb\xe7\xbb\x9f'), (b'c01', b'\xe8\xb4\xa2\xe5\x8a\xa1\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'c02', b'\xe8\xb4\xa2\xe6\x94\xbf\xe9\x83\xa8\xe5\x81\x9a\xe5\xb8\x82\xe6\x94\xaf\xe6\x8c\x81\xe7\xb3\xbb\xe7\xbb\x9f'), (b'c03', b'\xe6\xb5\x8b\xe8\xaf\x95\xe7\xae\xa1\xe7\x90\x86\xe5\xb7\xa5\xe5\x85\xb7'), (b'd01', b'\xe5\xa4\xa7\xe9\xa2\x9d\xe5\xad\x98\xe5\x8d\x95\xe5\x8f\x91\xe8\xa1\x8c\xe5\xa4\x87\xe6\xa1\x88\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CDIS\xef\xbc\x89'), (b'd02', b'\xe5\xa4\xa7\xe9\xa2\x9d\xe5\xad\x98\xe5\x8d\x95\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f'), (b'd03', b'\xe5\xa4\xa7\xe5\xb1\x8f\xe5\xb1\x95\xe7\xa4\xba\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88ITS\xef\xbc\x89'), (b'd04', b'\xe8\xb4\xb7\xe6\xac\xbe\xe8\xbd\xac\xe8\xae\xa9\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88LTS\xef\xbc\x89'), (b'd05', b'\xe6\xa1\xa3\xe6\xa1\x88\xe7\xb3\xbb\xe7\xbb\x9f'), (b'd06', b'\xe7\x9f\xad\xe4\xbf\xa1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'f01', b'\xe9\xa3\x8e\xe9\x99\xa9\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'g01', b'\xe5\x9b\xbd\xe9\x99\x85\xe9\x87\x91\xe8\x9e\x8d\xe8\xb5\x84\xe4\xba\xa7\xe4\xba\xa4\xe6\x98\x93\xe5\xb9\xb3\xe5\x8f\xb0\xef\xbc\x88CNYFAST\xef\xbc\x89'), (b'h01', b'\xe5\x91\xbc\xe5\x8f\xab\xe4\xb8\xad\xe5\xbf\x83\xe7\xb3\xbb\xe7\xbb\x9f'), (b'h02', b'\xe4\xba\x92\xe8\x81\x94\xe7\xbd\x91\xe9\x82\xae\xe7\xae\xb1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'h03', b'\xe4\xbc\x9a\xe5\x91\x98\xe6\x9c\xba\xe6\x9e\x84\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CIM\xef\xbc\x89'), (b'h04', b'\xe8\xb4\xa7\xe5\xb8\x81\xe5\x8f\x8a\xe5\x80\xba\xe5\x8a\xa1\xe5\xb7\xa5\xe5\x85\xb7\xe5\x8f\x91\xe8\xa1\x8c\xe7\xb3\xbb\xe7\xbb\x9f'), (b'i01', b'IMIX\xe5\xba\x94\xe6\x80\xa5\xe5\xb7\xa5\xe5\x85\xb7'), (b'j01', b'\xe5\x8d\xb3\xe6\x97\xb6\xe9\x80\x9a\xe8\xae\xaf\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CM\xef\xbc\x89'), (b'j02', b'\xe8\xae\xa1\xe8\xb4\xb9\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'j03', b'\xe6\x8a\x80\xe6\x9c\xaf\xe5\x9c\xba\xe5\x8a\xa1\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'j04', b'\xe4\xba\xa4\xe6\x98\x93\xe5\x90\x8e\xe5\xa4\x84\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'k01', b'\xe5\xbc\x80\xe5\x8f\x91\xe8\xbf\x87\xe7\xa8\x8b\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'k02', b'KGR\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'k03', b'Kondor\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'k04', b'KTP\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'l01', b'\xe9\x9b\xb6\xe5\x94\xae\xe5\xb8\x82\xe5\x9c\xba\xe4\xba\x92\xe8\x81\x94\xe7\xbd\x91\xe4\xba\xa4\xe6\x98\x93\xe6\x9c\x8d\xe5\x8a\xa1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'q01', b'\xe5\x85\xa8\xe5\x9b\xbd\xe9\x93\xb6\xe8\xa1\x8c\xe9\x97\xb4\xe5\x80\xba\xe5\x88\xb8\xe5\xb8\x82\xe5\x9c\xba\xe5\x87\x86\xe5\x85\xa5\xe5\xa4\x87\xe6\xa1\x88\xe4\xbf\xa1\xe6\x81\xaf\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r01', b'RMDS\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r02', b'\xe4\xba\xba\xe6\xb0\x91\xe5\xb8\x81\xe7\x94\xb5\xe5\xad\x90\xe5\xa4\x87\xe6\xa1\x88\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r03', b'\xe4\xba\xba\xe6\xb0\x91\xe5\xb8\x81\xe8\xb7\xa8\xe5\xa2\x83\xe4\xba\xa4\xe6\x98\x93\xe4\xbf\xa1\xe6\x81\xaf\xe7\xbb\x9f\xe8\xae\xa1\xe7\x9b\x91\xe6\xb5\x8b\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r04', b'\xe4\xba\xba\xe6\xb0\x91\xe5\xb8\x81\xe5\x88\xa9\xe7\x8e\x87\xe7\x9b\x91\xe6\xb5\x8b\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r05', b'\xe4\xba\xba\xe6\xb0\x91\xe9\x93\xb6\xe8\xa1\x8c\xe5\x88\x86\xe6\x94\xaf\xe8\xa1\x8c\xe8\xb4\xa7\xe5\xb8\x81\xe5\xb8\x82\xe5\x9c\xba\xe4\xbf\xa1\xe6\x81\xaf\xe6\x9c\x8d\xe5\x8a\xa1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'r06', b'\xe8\xbd\xaf\xe4\xbb\xb6\xe9\x85\x8d\xe7\xbd\xae\xe7\xae\xa1\xe7\x90\x86\xe5\xb7\xa5\xe5\x85\xb7'), (b'r07', b'\xe8\xbd\xaf\xe4\xbb\xb6\xe7\xbc\xba\xe9\x99\xb7\xe5\x8f\x8a\xe5\x8f\x98\xe6\x9b\xb4\xe7\xae\xa1\xe7\x90\x86\xe5\xb7\xa5\xe5\x85\xb7'), (b's01', b'SWIFT\xe6\x8a\xa5\xe6\x96\x87\xe7\xae\xa1\xe7\x90\x86\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b's02', b'SWIFT\xe4\xba\xa4\xe6\x98\x93\xe4\xb8\xad\xe5\xbf\x83\xe6\x8e\xa5\xe5\x85\xa5\xe7\xb3\xbb\xe7\xbb\x9f'), (b's03', b'\xe4\xb8\x8a\xe6\xb5\xb7\xe9\x93\xb6\xe8\xa1\x8c\xe9\x97\xb4\xe5\x90\x8c\xe4\xb8\x9a\xe6\x8b\x86\xe6\x94\xbe\xe5\x88\xa9\xe7\x8e\x87\xe7\xb3\xbb\xe7\xbb\x9f'), (b's04', b'\xe4\xb8\x8a\xe6\xb5\xb7\xe6\x80\xbb\xe9\x83\xa8\xe9\xbb\x84\xe9\x87\x91\xe5\xb8\x82\xe5\x9c\xba\xe7\x9b\x91\xe6\xb5\x8b\xe5\x88\x86\xe6\x9e\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b's05', b'\xe4\xb8\x8a\xe6\xb5\xb7\xe6\x80\xbb\xe9\x83\xa8\xe9\x87\x91\xe8\x9e\x8d\xe5\xb8\x82\xe5\x9c\xba\xe7\x9b\x91\xe6\xb5\x8b\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b's06', b'\xe5\xae\x9e\xe6\x97\xb6\xe6\xb6\x88\xe6\x81\xaf\xe4\xbc\xa0\xe8\xbe\x93\xe4\xb8\xad\xe9\x97\xb4\xe4\xbb\xb6\xef\xbc\x88IMIX\xef\xbc\x89'), (b's07', b'\xe5\xb8\x82\xe5\x9c\xba\xe5\x9f\xba\xe5\x87\x86\xe5\x8f\x82\xe8\x80\x83\xe7\xb3\xbb\xe7\xbb\x9f'), (b's08', b'\xe5\xb8\x82\xe5\x9c\xba\xe6\x95\xb0\xe6\x8d\xae\xe6\x8a\xa5\xe8\xa1\xa8\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CMDR\xef\xbc\x89'), (b's09', b'\xe5\xb8\x82\xe5\x9c\xba\xe6\x95\xb0\xe6\x8d\xae\xe5\xa4\x84\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88MDP\xef\xbc\x89'), (b's10', b'\xe5\xb8\x82\xe5\x9c\xba\xe6\x95\xb0\xe6\x8d\xae\xe5\x8f\x91\xe5\xb8\x83\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CMDS\xef\xbc\x89'), (b's11', b'\xe6\x95\xb0\xe6\x8d\xae\xe4\xbb\x93\xe5\xba\x93\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CDW\xef\xbc\x89'), (b's12', b'shibor'), (b's13', b'\xe6\x95\xb0\xe6\x8d\xae\xe9\x87\x87\xe9\x9b\x86\xe5\xb9\xb3\xe5\x8f\xb0'), (b's14', b'\xe6\x95\xb0\xe6\x8d\xae\xe4\xbc\xa0\xe8\xbe\x93\xe4\xb8\xad\xe9\x97\xb4\xe4\xbb\xb6\xef\xbc\x88ETL\xef\xbc\x89'), (b't01', b'tivoli\xe7\x9b\x91\xe6\x8e\xa7\xe7\xb3\xbb\xe7\xbb\x9f'), (b't02', b'\xe5\x9b\xbe\xe4\xb9\xa6\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w01', b'\xe5\xa4\x96\xe6\xb1\x87\xe4\xba\xa4\xe6\x98\x93\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w02', b'\xe5\xa4\x96\xe6\xb1\x87\xe4\xba\xa4\xe6\x98\x93\xe7\x9b\xb4\xe9\x80\x9a\xe5\xbc\x8f\xe5\xa4\x84\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w03', b'\xe5\xa4\x96\xe6\xb1\x87\xe9\x99\x90\xe9\xa2\x9d\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w04', b'\xe5\xa4\x96\xe6\xb1\x87\xe5\xb8\x90\xe5\x8a\xa1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w05', b'\xe7\xbd\x91\xe7\xbb\x9c\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'w06', b'\xe5\xbe\xae\xe5\xae\xa2\xe6\x9c\x8d\xe4\xba\x8c\xe6\x9c\x9f'), (b'w07', b'\xe6\x96\x87\xe4\xbb\xb6\xe4\xbc\xa0\xe8\xbe\x93\xe4\xb8\xad\xe9\x97\xb4\xe4\xbb\xb6\xef\xbc\x88FTS\xef\xbc\x89'), (b'x01', b'X\xe8\xa1\x8c\xe5\x9c\xba\xe5\x8a\xa1\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x02', b'X\xe8\xa1\x8c\xe4\xba\xa4\xe6\x98\x93\xe6\x95\xb0\xe6\x8d\xae\xe4\xbc\xa0\xe8\xbe\x93\xe6\x8e\xa5\xe5\x8f\xa3\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x03', b'X\xe8\xa1\x8c\xe7\xab\x9e\xe4\xbb\xb7\xe4\xba\xa4\xe6\x98\x93\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x04', b'X\xe8\xa1\x8c\xe4\xba\xba\xe6\xb0\x91\xe5\xb8\x81\xe8\xb5\x84\xe9\x87\x91\xe4\xb8\x9a\xe5\x8a\xa1\xe5\x90\x8e\xe5\x8f\xb0\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x05', b'X\xe8\xa1\x8c\xe7\xbb\x9f\xe8\xae\xa1\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x06', b'X\xe8\xa1\x8c\xe4\xbf\xa1\xe6\x81\xaf\xe4\xba\xa4\xe6\xb5\x81\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x07', b'X\xe8\xa1\x8c\xe8\xaf\xa2\xe4\xbb\xb7\xe4\xba\xa4\xe6\x98\x93\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x08', b'X\xe8\xa1\x8c\xe5\xb8\x90\xe5\x8a\xa1\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x09', b'X\xe8\xa1\x8c\xe6\x8b\x9b\xe6\xa0\x87\xe4\xba\xa4\xe6\x98\x93\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x10', b'X\xe8\xa1\x8c\xe6\x94\xaf\xe4\xbb\x98\xe9\x94\x80\xe8\xb4\xa6\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x11', b'X\xe8\xa1\x8c\xe4\xb8\xad\xe5\x8f\xb0\xe6\x8a\xa5\xe8\xa1\xa8\xe5\xad\x90\xe7\xb3\xbb\xe7\xbb\x9f'), (b'x12', b'\xe6\x96\xb0\xe8\xb4\xa7\xe5\xb8\x81\xe7\xbd\x91'), (b'x13', b'\xe6\x96\xb0\xe7\xa5\xa8\xe6\x8d\xae\xe7\xbd\x91'), (b'x14', b'\xe9\xa1\xb9\xe7\x9b\xae\xe7\xae\xa1\xe7\x90\x86\xe5\xb7\xa5\xe5\x85\xb7'), (b'x15', b'\xe9\x9c\x80\xe6\xb1\x82\xe7\xae\xa1\xe7\x90\x86\xe5\xb7\xa5\xe5\x85\xb7'), (b'y01', b'\xe5\xa4\xae\xe8\xa1\x8c\xe5\xa4\xa7\xe9\xa2\x9d\xe6\x94\xaf\xe4\xbb\x98\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CNAPS\xef\xbc\x89\xe4\xba\xa4\xe6\x98\x93\xe4\xb8\xad\xe5\xbf\x83\xe6\x8e\xa5\xe5\x85\xa5\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y02', b'\xe9\x93\xb6\xe8\xa1\x8c\xe9\x97\xb4\xe5\xb8\x82\xe5\x9c\xba\xe4\xba\x92\xe8\x81\x94\xe7\xbd\x91\xe4\xba\xa4\xe6\x98\x93\xe6\x9c\x8d\xe5\x8a\xa1\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y03', b'\xe5\xba\x94\xe7\x94\xa8\xe9\x9b\x86\xe4\xb8\xad\xe7\x9b\x91\xe6\x8e\xa7\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y04', b'\xe5\xba\x94\xe7\x94\xa8\xe6\x8c\x87\xe6\xa0\x87\xe9\x87\x87\xe9\x9b\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y05', b'\xe7\x94\xa8\xe6\x88\xb7\xe7\xbb\x9f\xe4\xb8\x80\xe8\xae\xa4\xe8\xaf\x81\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y06', b'\xe9\xa2\x84\xe7\xae\x97\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y07', b'\xe5\x85\x83\xe6\x95\xb0\xe6\x8d\xae\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y08', b'\xe8\xbf\x9c\xe7\xa8\x8b\xe5\x9f\xb9\xe8\xae\xad\xe7\xb3\xbb\xe7\xbb\x9f'), (b'y09', b'\xe8\xbf\x90\xe7\xbb\xb4\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z01', b'\xe5\xa2\x9e\xe5\x80\xbc\xe6\x9c\x8d\xe5\x8a\xa1\xe4\xba\xba\xe6\xb0\x91\xe5\xb8\x81\xe5\x89\x8d\xe4\xb8\xad\xe5\x8f\xb0\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z02', b'\xe5\xa2\x9e\xe5\x80\xbc\xe6\x9c\x8d\xe5\x8a\xa1\xe5\xa4\x96\xe6\xb1\x87\xe8\xb5\x84\xe9\x87\x91\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z03', b'\xe5\xa2\x9e\xe5\x80\xbc\xe6\x9c\x8d\xe5\x8a\xa1\xe5\xa4\x96\xe6\xb1\x87\xe5\x81\x9a\xe5\xb8\x82\xe5\x95\x86\xe6\x8a\xa5\xe4\xbb\xb7\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z04', b'\xe5\xa2\x9e\xe5\x80\xbc\xe6\x9c\x8d\xe5\x8a\xa1\xe7\x9b\xb4\xe9\x80\x9a\xe5\xbc\x8f\xe5\xa4\x84\xe7\x90\x86\xe6\x9c\x8d\xe5\x8a\xa1'), (b'z05', b'\xe5\x80\xba\xe5\x88\xb8\xe5\x9f\xba\xe7\xa1\x80\xe4\xbf\xa1\xe6\x81\xaf\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88BDS\xef\xbc\x89'), (b'z06', b'\xe5\x80\xba\xe5\x88\xb8\xe5\xb8\x82\xe5\x9c\xba\xe4\xba\xa4\xe6\x98\x93\xe4\xbf\xa1\xe6\x81\xaf\xe5\xa4\x87\xe6\xa1\x88\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z07', b'\xe8\xaf\x81\xe4\xb9\xa6\xe6\x8e\x88\xe6\x9d\x83\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CA\xef\xbc\x89'), (b'z08', b'\xe6\x94\xaf\xe6\x8c\x81\xe5\x9b\xbd\xe4\xba\xa7\xe5\xaf\x86\xe7\xa0\x81\xe7\xae\x97\xe6\xb3\x95\xe6\x95\xb0\xe5\xad\x97\xe8\xaf\x81\xe4\xb9\xa6\xe8\xae\xa4\xe8\xaf\x81\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z09', b'\xe4\xb8\xad\xe5\x9b\xbd\xe8\xb4\xa7\xe5\xb8\x81\xe7\xbd\x91'), (b'z10', b'\xe4\xb8\xad\xe5\x9b\xbd\xe8\xb4\xa7\xe5\xb8\x81\xe7\xbd\x91\xe7\xa7\xbb\xe5\x8a\xa8\xe7\x89\x88'), (b'z11', b'\xe4\xb8\xad\xe5\x9b\xbd\xe7\xa5\xa8\xe6\x8d\xae\xe7\xbd\x91'), (b'z12', b'\xe4\xb8\xad\xe9\x97\xb4\xe4\xbb\xb7\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z13', b'\xe4\xb8\xad\xe5\xbf\x83\xe5\x8a\x9e\xe5\x85\xac\xe8\x87\xaa\xe5\x8a\xa8\xe5\x8c\x96\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z14', b'\xe4\xb8\xad\xe5\xbf\x83\xe5\x9c\xba\xe5\x8a\xa1\xe5\xbe\xae\xe4\xbf\xa1\xe6\x9c\x8d\xe5\x8a\xa1\xe6\x94\xaf\xe6\x8c\x81\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z15', b'\xe4\xb8\xad\xe5\xbf\x83\xe5\xa4\xa7\xe9\xa2\x9d\xe6\x94\xaf\xe4\xbb\x98\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CNAPS\xef\xbc\x89\xe4\xba\xa4\xe6\x98\x93\xe4\xb8\xad\xe5\xbf\x83\xe6\x8e\xa5\xe5\x85\xa5\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z16', b'\xe8\xb5\x84\xe4\xba\xa7\xe7\xae\xa1\xe7\x90\x86\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z17', b'\xe8\x87\xaa\xe5\x8a\xa8\xe5\x8c\x96\xe9\x83\xa8\xe7\xbd\xb2\xe7\xb3\xbb\xe7\xbb\x9f'), (b'z18', b'\xe4\xb8\xad\xe5\xbf\x83\xe4\xba\xa4\xe6\x98\x93\xe4\xbf\xa1\xe6\x81\xaf\xe5\xa4\x87\xe6\xa1\x88\xe7\xb3\xbb\xe7\xbb\x9f\xef\xbc\x88CTRS\xef\xbc\x89')]),
),
migrations.AlterField(
model_name='report_detail',
name='UpdateDate',
field=models.DateField(default=datetime.date(2016, 7, 20), verbose_name=b'\xe5\xa1\xab\xe5\x86\x99\xe6\x97\xa5\xe6\x9c\x9f', blank=True),
),
migrations.AlterField(
model_name='report_detail',
name='VersionQuality',
field=models.CharField(max_length=32, verbose_name=b'\xe7\x89\x88\xe6\x9c\xac\xe8\xb4\xa8\xe9\x87\x8f', choices=[(b'zlyb', b'\xe8\xb4\xa8\xe9\x87\x8f\xe4\xb8\x80\xe8\x88\xac'), (b'zljh', b'\xe8\xb4\xa8\xe9\x87\x8f\xe8\xbe\x83\xe5\xa5\xbd'), (b'zljc', b'\xe8\xb4\xa8\xe9\x87\x8f\xe8\xbe\x83\xe5\xb7\xae'), (b'NA', b'NA')]),
),
migrations.AlterField(
model_name='report_detail',
name='Workload',
field=models.CharField(max_length=32, verbose_name=b'\xe5\xb7\xa5\xe4\xbd\x9c\xe9\x87\x8f\xe6\x83\x85\xe5\x86\xb5', choices=[(b'cqb', b'\xe8\xb6\x85\xe7\xad\xbe\xe6\x8a\xa5'), (b'zc', b'\xe6\xad\xa3\xe5\xb8\xb8'), (b'ccg', b'\xe8\xb6\x85\xe9\x87\x87\xe8\xb4\xad'), (b'NA', b'NA')]),
),
migrations.AlterField(
model_name='report_detail',
name='Writter',
field=models.CharField(max_length=32, verbose_name=b'\xe6\xb5\x8b\xe8\xaf\x95\xe8\xb4\x9f\xe8\xb4\xa3\xe4\xba\xba', blank=True),
),
migrations.AlterUniqueTogether(
name='report_detail',
unique_together=set([('SystemName', 'VersionNum', 'Main_SysName', 'Main_VersionNum')]),
),
]
| [
"yezitt@163.com"
] | yezitt@163.com |
4e4336b975c5ee46eb7645c1b114c235d4303c50 | 989f011a784015e1a33c41362ab4ec06e92b3339 | /examples/07_functions/func_args_unpacking.py | 1e837181f73877fce4b28831c45f757f1b3da290 | [] | no_license | yevgeniy-voloshin/pyneng-online-jun-jul-2017 | b0be9df7d379e24b654172c1bc3f5cc0bdbbcd2f | 050e43d7f582528189005c1b7c34970352e968f1 | refs/heads/master | 2021-01-21T16:22:27.347769 | 2017-05-19T17:35:16 | 2017-05-19T17:35:16 | 91,885,650 | 1 | 0 | null | 2017-05-20T11:46:28 | 2017-05-20T11:46:28 | null | UTF-8 | Python | false | false | 3,132 | py |
# Unpacking positional arguments
def config_interface(intf_name, ip_address, cidr_mask):
interface = 'interface %s'
no_shut = 'no shutdown'
ip_addr = 'ip address %s %s'
result = []
result.append(interface % intf_name)
result.append(no_shut)
mask_bits = int(cidr_mask.split('/')[-1])
bin_mask = '1'*mask_bits + '0'*(32-mask_bits)
dec_mask = '.'.join([ str(int(bin_mask[i:i+8], 2)) for i in [0,8,16,24] ])
result.append(ip_addr % (ip_address, dec_mask))
return result
#print config_interface('Fa0/1', '10.0.1.1', '/25')
interfaces_info = [['Fa0/1', '10.0.1.1', '/24'],
['Fa0/2', '10.0.2.1', '/24'],
['Fa0/3', '10.0.3.1', '/24'],
['Fa0/4', '10.0.4.1', '/24'],
['Lo0', '10.0.0.1', '/32']]
for i in interfaces_info:
print config_interface(*i)
"""
Output:
['interface Fa0/1', 'no shutdown', 'ip address 10.0.1.1 255.255.255.0']
['interface Fa0/2', 'no shutdown', 'ip address 10.0.2.1 255.255.255.0']
['interface Fa0/3', 'no shutdown', 'ip address 10.0.3.1 255.255.255.0']
['interface Fa0/4', 'no shutdown', 'ip address 10.0.4.1 255.255.255.0']
['interface Lo0', 'no shutdown', 'ip address 10.0.0.1 255.255.255.255']
"""
# Unpacking keyword arguments
def config_to_list(cfg_file, delete_excl=True,
delete_empty=True, strip_end=True):
result = []
with open( cfg_file ) as f:
for line in f:
if strip_end:
line = line.rstrip()
if delete_empty and not line:
pass
elif delete_excl and line.startswith('!'):
pass
else:
result.append(line)
return result
cfg = [dict(cfg_file='r1.txt', delete_excl=True, delete_empty=True, strip_end=True),
dict(cfg_file='r2.txt', delete_excl=False, delete_empty=True, strip_end=True),
dict(cfg_file='r3.txt', delete_excl=True, delete_empty=False, strip_end=True),
dict(cfg_file='r4.txt', delete_excl=True, delete_empty=True, strip_end=False)]
for d in cfg:
print config_to_list(**d)
"""
Output:
['service timestamps debug datetime msec localtime show-timezone year', 'service timestamps log datetime msec localtime show-timezone year', 'service password-encryption', 'service sequence-numbers', 'no ip domain lookup', 'ip ssh version 2']
['!', 'service timestamps debug datetime msec localtime show-timezone year', 'service timestamps log datetime msec localtime show-timezone year', 'service password-encryption', 'service sequence-numbers', '!', 'no ip domain lookup', '!', 'ip ssh version 2', '!']
['service timestamps debug datetime msec localtime show-timezone year', 'service timestamps log datetime msec localtime show-timezone year', 'service password-encryption', 'service sequence-numbers', '', '', '', 'ip ssh version 2', '']
['service timestamps debug datetime msec localtime show-timezone year\n', 'service timestamps log datetime msec localtime show-timezone year\n', 'service password-encryption\n', 'service sequence-numbers\n', 'no ip domain lookup\n', 'ip ssh version 2\n']
```
| [
"pyneng.course@gmail.com"
] | pyneng.course@gmail.com |
ea72d2acaaac6dc2ec9c32732f3da94c4e0f077e | fa27041fe8f82971af6114d1eed2be03b5d3be51 | /1-report-repair/report-repair.py | 660dbf4b6cfb5b44d3e66ca2756c90213e221179 | [
"MIT"
] | permissive | RealOrangeOne/advent-of-code-2020 | 89e64d435958e74008600d7ddbafa8a547b8c6c2 | ca3cce3becec8ea3a62e040ff107a150e4b39588 | refs/heads/main | 2023-02-03T23:10:48.256449 | 2020-12-15T17:22:19 | 2020-12-15T17:22:19 | 318,804,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | from itertools import product
from math import prod
from pathlib import Path
with Path(__file__).parent.joinpath("data.txt").open() as f:
data = list(map(int, f.readlines()))
for repeat in [2, 3]:
for vals in product(data, repeat=repeat):
if sum(vals) == 2020:
print(repeat, prod(vals))
break
| [
"git@theorangeone.net"
] | git@theorangeone.net |
e22b0e93c405f52d9224ce7ce21d4d8440b85f62 | 03b0ab88a42a9dd5b86ff98d0409f522428cc014 | /gui/tiles/end_level.py | a4162710c4e2e0073d0d4a5a375d9e421e7fed66 | [] | no_license | anis-campos/macgyver | d6a3e78353d3c4aa68f9bc6d67d4738f75bf1b04 | 64bbaf2d8751d92f089f51d11f1ee063e389a7cf | refs/heads/master | 2021-01-08T02:52:10.731734 | 2020-02-24T15:25:14 | 2020-02-24T15:25:14 | 241,890,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | import pygame
from gui import WHITE
class EndLevel(pygame.sprite.Sprite):
def __init__(self, width, height):
super().__init__()
self.image = pygame.Surface((width,height))
self.rect = self.image.get_rect()
self.draw_text('YOU ESCAPED, WELL DONE !', 50, width/2, height/2)
font_name = pygame.font.match_font('arial')
def draw_text(self, text, size, x, y):
font = pygame.font.Font(self.font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect: pygame.Rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.image.blit(text_surface, text_rect) | [
"yocorporation@hotmail.com"
] | yocorporation@hotmail.com |
649781f4b1fe097b8c83013e3163486cc5e28b6d | cbe9bca15a67c1d5603f4aab7a67d9ffbc9a043b | /refresh_token.py | 0dcc712a76eb96c46b2d938215dd48e65bbe7d68 | [] | no_license | nicolashi/tal_baidu_test | adf6eb1e5754fc6709b5b4b55563da3d3c8faeff | b9a4a76e8ebebc16bb72410107c9d2fc59e9c7b8 | refs/heads/master | 2022-01-27T02:14:58.184075 | 2019-05-08T07:13:10 | 2019-05-08T07:13:10 | 180,771,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | #
# generate new token using refresh token
#
import urllib, urllib2, sys
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import ssl
# api key
client_id = "xDI1sdxGBAqDXmP1SN854oXx"
# secret key
client_secret = "j051S00hqjGdm8CEXDm4ZaYsAqQ2Ddmv"
refresh_token = "25.8e3be89217c9495725e8a5b7529b7226.315360000.1870331211.282335-15993344"
host = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&refresh_token=" + refresh_token + \
"&client_credentials&client_id=" + client_id + "&client_secret=" + client_secret
request = urllib2.Request(host)
request.add_header('Content-Type','application/json;charset=UTF-8')
response = urllib2.urlopen(request)
content = response.read()
if content:
print(content)
| [
"nicola.shi@tufts.edu"
] | nicola.shi@tufts.edu |
f524011728bfd4542c5c10736a8bbdf82778f96d | 5307ad6ec7c218c04383a613642c698bd16bb12c | /src/loan_admin/tests/test_loan_term_notification.py | 672e0fd45bf97c5a2cc1a691a8a9bb6f079b54b6 | [] | no_license | DrZedd42/constant-loan-api | 8cece9d2f4a71bbe9771357b54979463bc18ae8d | 71f003815a96e36e04d386fc7a07795f3ba8cd34 | refs/heads/master | 2021-10-19T07:39:56.378194 | 2019-02-19T08:53:41 | 2019-02-19T08:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from common.test_utils import AuthenticationUtils
from loan.factories import LoanTermNotificationFactory
class ListLoanTermTests(APITestCase):
def setUp(self):
self.auth_utils = AuthenticationUtils(self.client)
self.auth_utils.admin_login()
LoanTermNotificationFactory.create_batch(10)
def test_list(self):
url = reverse('loan-admin:loantermnotification-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()['results']), 10)
# print(response.json())
| [
"khoa@autonomous.nyc"
] | khoa@autonomous.nyc |
ec296fd81ac29f1b34a8df5f07bf89b73e9fd1f4 | 5cb5caf2955e8bab1dc9cfd07100c7bf5781ec90 | /linear_Regression/Ruebenpreis.py | f72d311a4606580ca73c99263142dc3a75ad6eb1 | [] | no_license | Raketenpete/Ruebenpreis | 934b22df29aee86b5c8b30e93f75b71997e0834c | 952b2e7f0b1e3435f946df329ea13bea91f729cf | refs/heads/master | 2022-04-17T14:42:48.580810 | 2020-04-16T13:18:44 | 2020-04-16T13:18:44 | 256,186,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | import numpy as np
np.set_printoptions(threshold=np.inf)
import pandas as pd
from sklearn.linear_model import LinearRegression
import warnings
warnings.filterwarnings(action="ignore", module="sklearn", message="^internal gelsd")
def linearizer(lowerLimit, upperLimit): # untere und obere Grenze
upperLimit += 1
out = []
while (lowerLimit < upperLimit):
out.append(lowerLimit)
lowerLimit += 1
return out
inputFile = "input.csv"
inputData = pd.read_csv(inputFile, delimiter=',', header=None)
y = inputData.iloc[:,0] #erste Zahl wird ausgewählt, nächste wäre [:,1] etc.
length_y = len(y.index)
x= []
x = linearizer(0, length_y - 1) #minus 1 um position 0 zu kompensieren
x = pd.DataFrame(x)
model = LinearRegression()
model.fit(x, y)
#neues array, 1 position voraus
positionsToPredict = 0
new_data = linearizer(length_y, (positionsToPredict + length_y))
new_data = pd.DataFrame(new_data)
prediction = model.predict(new_data)
print("\nVoraussagung aufgrund von linearer Regression: \n\n", prediction) | [
"mueller.tristan.91@gmail.com"
] | mueller.tristan.91@gmail.com |
097dd5898600765c7f6cb2a2c800a9d2703ba7f9 | 3a88ccc2835e4a88c93d3401926b4ce991b61168 | /run_horovod.py | f5216f600e3705381e01d5f1c0fc3fb992426dca | [] | no_license | Shiner11/bd18f-Noori2 | adf779f441fac2e1b22e8df535bb7f5983121ea1 | df4221e406cc714c85cac54ac448276c6f8847f1 | refs/heads/master | 2020-04-10T11:45:02.645773 | 2018-12-09T08:37:04 | 2018-12-09T08:37:04 | 161,001,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | """
Example command for running this script:
mpirun --mca btl_vader_single_copy_mechanism none --allow-run-as-root -bind-to none -map-by slot -mca orte_base_help_aggregate 0 -x NCCL_DEBUG=INFO -np 2 -H localhost:2 python run_horovod.py --max_steps=100
Example command for examining the checkpoint file:
python <PARALLAX_HOME>/tensorflow/tensorflow/python/tools/inspect_checkpoint.py --file_name=hvd_ckpt/model.ckpt-0 --tensor_name=conv1/kernel
"""
import os
import time
import tensorflow as tf
import horovod.tensorflow as hvd
import model
from model import rnn
from tensorflow.examples.tutorials.mnist import input_data
hvd.init()
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of iterations to run for each workers.""")
tf.app.flags.DEFINE_integer('log_frequency', 50,
"""How many steps between two runop logs.""")
tf.app.flags.DEFINE_integer('batch_size', 32,
"""Batch size""")
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
ops = rnn(only_logits=True)
logits = ops['logits']
x = ops['images']
y = ops['labels']
is_training = ops['is_training']
global_step = ops['global_step']
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=logits))
loss += model.weight_decay * tf.losses.get_regularization_loss()
acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, axis=1), tf.argmax(y, axis=1)), tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate=model.learning_rate)
optimizer = hvd.DistributedOptimizer(optimizer)
train_op = optimizer.minimize(loss, global_step=global_step)
hooks = [hvd.BroadcastGlobalVariablesHook(0)]
if hvd.rank() == 0:
saver = tf.train.Saver(tf.global_variables(), save_relative_paths=False, allow_empty=True, max_to_keep=1000000)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
scaffold = tf.train.Scaffold(saver=saver)
ckpt_hook = tf.train.CheckpointSaverHook('hvd_ckpt', save_steps=1, scaffold=scaffold)
hooks.append(ckpt_hook)
with tf.train.MonitoredTrainingSession(hooks=hooks) as sess:
start = time.time()
for i in range(FLAGS.max_steps):
batch = mnist.train.next_batch(FLAGS.batch_size, shuffle=False)
_, loss_ = sess.run([train_op, loss], feed_dict={x: batch[0], y: batch[1], is_training: True})
if i % FLAGS.log_frequency == 0:
end = time.time()
throughput = float(FLAGS.log_frequency) / float(end - start)
acc_ = sess.run(acc, feed_dict={x: mnist.test.images, y: mnist.test.labels, is_training: False})
print("step: %d, test accuracy: %lf, throughput: %f steps/sec" % (i, acc_, throughput))
start = time.time()
| [
"noreply@github.com"
] | noreply@github.com |
f58474a9aea0941e1addfa44edfbeac7a7c4e547 | 2c20f40a25c7e718e031db18901a6900527ea757 | /vedo/pyplot.py | c22cef53b499069a54ae9c2281be4877bf49d19b | [
"OFL-1.1",
"MIT",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | sariths/vtkPlotter | a09653c6aa862ff7932aef8ba417349f1b82288e | f5b84f9b3ef202353c85e0b18f2e2e1a5d72183c | refs/heads/master | 2021-12-11T02:51:47.300710 | 2021-11-10T10:42:50 | 2021-11-10T10:42:50 | 137,873,932 | 0 | 0 | MIT | 2018-07-09T13:09:21 | 2018-06-19T10:03:55 | Python | UTF-8 | Python | false | false | 98,743 | py | import vtk
import numpy as np
import vedo
import vedo.settings as settings
import vedo.utils as utils
import vedo.colors as colors
import vedo.shapes as shapes
import vedo.addons as addons
from vedo.assembly import Assembly
from vedo.mesh import Mesh, merge
from vedo.plotter import show # not used, but useful to import this
__doc__ = """Plotting utility functions.""" + vedo.docs._defs
__all__ = [
"plot",
"histogram",
"donut",
"quiver",
"violin",
"whisker",
"streamplot",
"matrix",
"DirectedGraph",
"show",
]
##########################################################################
class Plot(Assembly):
"""
Derived class of ``Assembly`` to manipulate plots.
"""
def __init__(self, *objs):
Assembly.__init__(self, *objs)
self.yscale = 1
self.aspect = 4 / 3.0
self.cut = True # todo
self.xlim = None
self.ylim = None
self.pad = 0.05
self._x0lim = None
self._y0lim = None
self._x1lim = None
self._y1lim = None
self.zmax = 0 # z-order
self.fixed_scale = 1
self.bins = []
self.freqs = []
def ybounds(self, scaled=True):
if scaled:
return (self._y0lim/self.yscale, self._y1lim/self.yscale)
else:
return (self._y0lim, self._y1lim)
def __iadd__(self, *objs):
"""
Add object to plot with taking automatically into account the correct aspect ratio.
"""
# these types will scale proportionally to keep their native shape aspect ratio intact
typs = (
shapes.Text3D,
shapes.Polygon,
shapes.Star,
shapes.Disc,
shapes.Ellipsoid,
shapes.Latex,
shapes.Sphere,
# shapes.Arrow2D,
Assembly,
vedo.Picture,
)
self.fixed_scale = np.min([1, self.yscale])
objs = objs[0] # make a list anyway
if not utils.isSequence(objs):
objs = [objs]
if not utils.isSequence(objs[0]) and isinstance(objs[0], Plot):
# is adding another whole Plot # TO BE REVISED
plot2 = objs[0]
plot_z = plot2.z() + (plot2._x1lim - plot2._x0lim)/1000 # add a small shift in z
# print(plot2.yscale, self.yscale)
elems = plot2.unpack()
objs2 = []
for e in elems:
if e.name == "axes":
continue
ec = e.clone()
# remove plot2.yscale and apply self.yscale:
ec.SetScale(1, self.yscale/plot2.yscale, 1)
self.AddPart(ec.z(plot_z))
objs2.append(ec)
objs = objs2
else:
# print('adding individual objects', len(objs))
for a in objs:
if isinstance(a, typs):
# special scaling to preserve the aspect ratio
# print('adding', a.name, 'fixed scale', self.fixed_scale)
a.scale(self.fixed_scale)
else:
# print('adding', a.name, 'yscale', self.yscale)
a.scale([1, self.yscale, 1])
py = a.y()
a.y(py * self.yscale)
self.AddPart(a)
if self.cut: # todo
for a in objs:
if not a or a.name == "axes":
continue
if self._y0lim is not None and hasattr(a, "cutWithPlane"):
a.cutWithPlane([0, self._y0lim, 0], [0, 1, 0])
if self._y1lim is not None and hasattr(a, "cutWithPlane"):
a.cutWithPlane([0, self._y1lim, 0], [0, -1, 0])
if self._x0lim is not None and hasattr(a, "cutWithPlane"):
a.cutWithPlane([self._x0lim, 0, 0], [1, 0, 0])
if self._x1lim is not None and hasattr(a, "cutWithPlane"):
a.cutWithPlane([self._x1lim, 0, 0], [-1, 0, 0])
return self
def overlayPlot(self, *args, **kwargs):
"""Plot on top of an already existing plot."""
kwargs['format'] = self
plt = plot(*args, **kwargs)
plt.format = self
for a in plt.unpack():
self.AddPart(a)
return self
def overlayHistogram(self, *args, **kwargs):
"""Plot histogram on top of an already existing plot."""
kwargs['format'] = self
h = histogram(*args, **kwargs)
h.format = self
for a in h.unpack():
self.AddPart(a)
return self
def plot(*args, **kwargs):
"""
Draw a 2D line plot, or scatter plot, of variable x vs variable y.
Input format can be either [allx], [allx, ally] or [(x1,y1), (x2,y2), ...]
:param list xerrors: set uncertainties for the x variable, shown as error bars.
:param list yerrors: set uncertainties for the y variable, shown as error bars.
:param bool errorBand: represent errors on y as a filled error band.
Use ``ec`` keyword to modify its color.
:param list xlim: set limits to the range for the x variable
:param list ylim: set limits to the range for the y variable
:param float, aspect: desired aspect ratio.
If None, it is automatically calculated to get a reasonable aspect ratio.
Scaling factor is saved in ``Plot.yscale``.
:param str c: color of frame and text.
:param float alpha: opacity of frame and text.
:param str xtitle: title label along x-axis.
:param str ytitle: title label along y-axis.
:param str title: histogram title on top.
:param float titleSize: size of title
:param str ec: color of error bar, by default the same as marker color
:param str lc: color of line
:param float la: transparency of line
:param float lw: width of line
:param bool dashed: use a dashed line style
:param bool splined: spline the line joining the point as a countinous curve
:param str,int marker: use a marker shape for the data points
:param float ms: marker size.
:param str mc: color of marker
:param float ma: opacity of marker
:Example:
.. code-block:: python
from vedo.pyplot import plot
import numpy as np
x = np.linspace(0, 6.28, num=50)
plot(np.sin(x), 'r').plot(np.cos(x), 'bo-').show()
|simpleplot|
More examples:
|plot_errbars| |plot_errbars.py|_
|plot_errband| |plot_errband.py|_
|plot_pip| |plot_pip.py|_
|scatter1| |scatter1.py|_
|scatter2| |scatter2.py|_
If input is an external function or a forumula, draw the surface
representing the function :math:`f(x,y)`.
:param float x: x range of values.
:param float y: y range of values.
:param float zlimits: limit the z range of the independent variable.
:param int zlevels: will draw the specified number of z-levels contour lines.
:param bool showNan: show where the function does not exist as red points.
:param list bins: number of bins in x and y.
|plot_fxy| |plot_fxy.py|_
Function is: :math:`f(x,y)=\sin(3x) \cdot \log(x-y)/3` in range :math:`x=[0,3], y=[0,3]`.
If ``mode='complex'`` draw the real value of the function and color map the imaginary part.
:param str cmap: diverging color map (white means imag(z)=0).
:param float lw: line with of the binning
:param list bins: binning in x and y
|fcomplex| |plot_fxy.py|_
If ``mode='polar'`` input arrays are interpreted as a list of polar angles and radii.
Build a polar (radar) plot by joining the set of points in polar coordinates.
:param str title: plot title
:param float tsize: title size
:param int bins: number of bins in phi
:param float r1: inner radius
:param float r2: outer radius
:param float lsize: label size
:param c: color of the line
:param bc: color of the frame and labels
:param alpha: alpha of the frame
:param int ps: point size in pixels, if ps=0 no point is drawn
:param int lw: line width in pixels, if lw=0 no line is drawn
:param bool deg: input array is in degrees
:param float vmax: normalize radius to this maximum value
:param bool fill: fill convex area with solid color
:param bool spline: interpolate the set of input points
:param bool showDisc: draw the outer ring axis
:param int nrays: draw this number of axis rays (continuous and dashed)
:param bool showLines: draw lines to the origin
:param bool showAngles: draw angle values
|histo_polar| |histo_polar.py|_
If ``mode='spheric'`` input input is an external function rho(theta, phi).
A surface is created in spherical coordinates.
Return an ``Plot(Assembly)`` of 2 objects, the unit grid
sphere (in wireframe representation) and the surface `rho(theta, phi)`.
:param function rfunc: handle to a user defined function.
:param bool normalize: scale surface to fit inside the unit sphere
:param int res: grid resolution
:param bool scalarbar: add a 3D scalarbar to the plot for radius
:param c: color of the unit grid
:param alpha: transparency of the unit grid
:param str cmap: color map of the surface
|plot_spheric| |plot_spheric.py|_
"""
mode = kwargs.pop("mode", "")
if "spher" in mode:
return _plotSpheric(args[0], **kwargs)
if "bar" in mode:
return _barplot(args[0], **kwargs)
if isinstance(args[0], str) or "function" in str(type(args[0])):
if "complex" in mode:
return _plotFz(args[0], **kwargs)
return _plotFxy(args[0], **kwargs)
# grab the matplotlib-like options
optidx = None
for i, a in enumerate(args):
if i > 0 and isinstance(a, str):
optidx = i
break
if optidx:
opts = args[optidx].replace(" ", "")
if "--" in opts:
opts = opts.replace("--", "")
kwargs["dashed"] = True
elif "-" in opts:
opts = opts.replace("-", "")
else:
kwargs["lw"] = 0
symbs = [".", "p", "*", "h", "D", "d", "o", "v", "^", ">", "<", "s", "x", "+", "a"]
for ss in symbs:
if ss in opts:
opts = opts.replace(ss, "", 1)
kwargs["marker"] = ss
break
allcols = list(colors.color_nicks.keys()) + list(colors.colors.keys())
for cc in allcols:
if cc in opts:
opts = opts.replace(cc, "")
kwargs["lc"] = cc
kwargs["mc"] = cc
break
if opts:
colors.printc("Could not understand option(s):", opts, c="y")
if optidx == 1 or optidx is None:
if utils.isSequence(args[0][0]):
# print('case 1', 'plot([(x,y),..])')
data = np.array(args[0])
x = np.array(data[:, 0])
y = np.array(data[:, 1])
elif len(args) == 1 or optidx == 1:
# print('case 2', 'plot(x)')
x = np.linspace(0, len(args[0]), num=len(args[0]))
y = np.array(args[0])
elif utils.isSequence(args[1]):
# print('case 3', 'plot(allx,ally)')
x = np.array(args[0])
y = np.array(args[1])
elif utils.isSequence(args[0]) and utils.isSequence(args[0][0]):
# print('case 4', 'plot([allx,ally])')
x = np.array(args[0][0])
y = np.array(args[0][1])
elif optidx == 2:
# print('case 5', 'plot(x,y)')
x = np.array(args[0])
y = np.array(args[1])
else:
print("plot(): Could not understand input arguments", args)
return None
if "polar" in mode:
return _plotPolar(np.c_[x, y], **kwargs)
return _plotxy(np.c_[x, y], **kwargs)
def histogram(*args, **kwargs):
"""
Histogramming for 1D and 2D data arrays.
For 1D arrays:
:param int bins: number of bins.
:param list vrange: restrict the range of the histogram.
:param bool density: normalize the area to 1 by dividing by the nr of entries and bin size.
:param bool logscale: use logscale on y-axis.
:param bool fill: fill bars woth solid color `c`.
:param float gap: leave a small space btw bars.
:param bool outline: show outline of the bins.
:param bool errors: show error bars.
|histo_1D| |histo_1D.py|_
If ``mode='polar'`` assume input is polar coordinate system (rho, theta):
:param list weights: array of weights, of the same shape as the input.
Each value only contributes its associated weight towards the bin count (instead of 1).
:param str title: histogram title
:param float tsize: title size
:param int bins: number of bins in phi
:param float r1: inner radius
:param float r2: outer radius
:param float phigap: gap angle btw 2 radial bars, in degrees
:param float rgap: gap factor along radius of numeric angle labels
:param float lpos: label gap factor along radius
:param float lsize: label size
:param c: color of the histogram bars, can be a list of length `bins`.
:param bc: color of the frame and labels
:param alpha: alpha of the frame
:param str cmap: color map name
:param bool deg: input array is in degrees
:param float vmin: minimum value of the radial axis
:param float vmax: maximum value of the radial axis
:param list labels: list of labels, must be of length `bins`
:param bool showDisc: show the outer ring axis
:param int nrays: draw this number of axis rays (continuous and dashed)
:param bool showLines: show lines to the origin
:param bool showAngles: show angular values
:param bool showErrors: show error bars
|histo_polar| |histo_polar.py|_
For 2D arrays:
Input data formats [(x1,x2,..), (y1,y2,..)] or [(x1,y1), (x2,y2),..] are both valid.
:param str xtitle: x axis title
:param str ytitle: y axis title
:param list bins: binning as (nx, ny)
:param list vrange: range in x and y in format [(xmin,xmax), (ymin,ymax)]
:param str cmap: color map name
:param float lw: line width of the binning
:param bool scalarbar: add a scalarbar
|histo_2D| |histo_2D.py|_
If ``mode='hexbin'``, build a hexagonal histogram from a list of x and y values.
:param str xtitle: x axis title
:param str ytitle: y axis title
:param bool bins: nr of bins for the smaller range in x or y.
:param list vrange: range in x and y in format [(xmin,xmax), (ymin,ymax)]
:param float norm: sets a scaling factor for the z axis (freq. axis).
:param bool fill: draw solid hexagons.
:param str cmap: color map name for elevation.
|histo_hexagonal| |histo_hexagonal.py|_
If ``mode='spheric'``, build a histogram from list of theta and phi values.
:param float rmax: maximum radial elevation of bin
:param int res: sphere resolution
:param cmap: color map name
:param float lw: line width of the bin edges
:param bool scalarbar: add a scalarbar to plot
|histo_spheric| |histo_spheric.py|_
"""
mode = kwargs.pop("mode", "")
if len(args) == 2: # x, y
if "spher" in mode:
return _histogramSpheric(args[0], args[1], **kwargs)
if "hex" in mode:
return _histogramHexBin(args[0], args[1], **kwargs)
return _histogram2D(args[0], args[1], **kwargs)
elif len(args) == 1:
if isinstance(args[0], vedo.Volume):
data = args[0].pointdata[0]
elif isinstance(args[0], vedo.Points):
pd0 = args[0].pointdata[0]
if pd0:
data = pd0.ravel()
else:
data = args[0].celldata[0].ravel()
else:
data = np.array(args[0])
if "spher" in mode:
return _histogramSpheric(args[0][:, 0], args[0][:, 1], **kwargs)
if len(data.shape) == 1:
if "polar" in mode:
return _histogramPolar(data, **kwargs)
return _histogram1D(data, **kwargs)
else:
if "hex" in mode:
return _histogramHexBin(args[0][:, 0], args[0][:, 1], **kwargs)
return _histogram2D(args[0], **kwargs)
print("histogram(): Could not understand input", args[0])
return None
def fit(points,
deg=1,
niter=0,
nstd=3,
xerrors=None,
yerrors=None,
vrange=None,
res=250,
lw=3,
c='red4',
):
"""
Polynomial fitting in 2D with parameter error and error bands calculation.
Errors bars in both x and y are supported.
Additional information about the fitting output can be accessed. E.g.:
``fit = fitPolynomial(pts)``
- ``fit.coefficients``: contains the coefficient of the polynomial fit
- ``fit.coefficientErrors``: errors on the fitting coefficients,
these numbers only make sense if parameters are not correlated
- ``fit.MonteCarloCoefficients``: fitting coefficient set from MC generation
- ``fit.covarianceMatrix``: covariance matrix as a numpy array
- ``fit.reducedChi2``: reduced chi-square of the fitting
- ``fit.ndof``: number of degrees of freedom
- ``fit.dataSigma``: mean data dispersion from the central fit assuming Chi2=1
- ``fit.errorLines``: a ``vedo.Line`` object for the upper and lower error band
- ``fit.errorBand``: the ``vedo.Mesh`` object representing the error band
Errors on x and y can be specified. If left `None` an estimate is made from
the statistical spread of the dataset itself. Errors are always assumed gaussian.
:param int deg: degree of the polynomial to be fitted
:param int niter: number of monte-carlo iterations to compute error bands.
If set to 0, return the simple least-squares fit with naive error estimation
on coefficients only. A reasonable non-zero value to set is about 500, in
this case ``errorLines``, ``errorBand`` and the other class attributes are filled
:param int nstd: nr. of standard deviation to use for error calculation
:param list xerrors: array of the same length of points with the errors on x
:param list yerrors: array of the same length of points with the errors on y
:param list vrange: specify the domain range of the fitting line
(only affects visualization, but can be used to extrapolate the fit
outside the data range)
:param int res: resolution of the output fitted line and error lines
|fitPolynomial1| |fitPolynomial1.py|_
|fitPolynomial2| |fitPolynomial2.py|_
"""
if isinstance(points, vedo.pointcloud.Points):
points = points.points()
points = np.asarray(points)
if len(points) == 2: # assume user is passing [x,y]
points = np.c_[points[0],points[1]]
x = points[:,0]
y = points[:,1] # ignore z
n = len(x)
ndof = n - deg - 1
if vrange is not None:
x0, x1 = vrange
else:
x0, x1 = np.min(x), np.max(x)
if xerrors is not None:
x0 -= xerrors[0]/2
x1 += xerrors[-1]/2
tol = (x1-x0)/1000
xr = np.linspace(x0,x1, res)
# project x errs on y
if xerrors is not None:
xerrors = np.asarray(xerrors)
if yerrors is not None:
yerrors = np.asarray(yerrors)
w = 1.0/yerrors
coeffs = np.polyfit(x, y, deg, w=w, rcond=None)
else:
coeffs = np.polyfit(x, y, deg, rcond=None)
# update yerrors, 1 bootstrap iteration is enough
p1d = np.poly1d(coeffs)
der = (p1d(x+tol)-p1d(x))/tol
yerrors = np.sqrt(yerrors*yerrors + np.power(der*xerrors,2))
if yerrors is not None:
yerrors = np.asarray(yerrors)
w = 1.0/yerrors
coeffs, V = np.polyfit(x, y, deg, w=w, rcond=None, cov=True)
else:
w = 1
coeffs, V = np.polyfit(x, y, deg, rcond=None, cov=True)
p1d = np.poly1d(coeffs)
theor = p1d(xr)
l = shapes.Line(xr, theor, lw=lw, c=c).z(tol*2)
l.coefficients = coeffs
l.covarianceMatrix = V
residuals2_sum = np.sum(np.power(p1d(x)-y, 2))/ndof
sigma = np.sqrt(residuals2_sum)
l.reducedChi2 = np.sum(np.power((p1d(x)-y)*w, 2))/ndof
l.ndof = ndof
l.dataSigma = sigma # worked out from data using chi2=1 hypo
l.name = "LinePolynomialFit"
if not niter:
l.coefficientErrors = np.sqrt(np.diag(V))
return l ################################
if yerrors is not None:
sigma = yerrors
else:
w = None
l.reducedChi2 = 1
Theors, all_coeffs = [], []
for i in range(niter):
noise = np.random.randn(n)*sigma
Coeffs = np.polyfit(x, y + noise, deg, w=w, rcond=None)
all_coeffs.append(Coeffs)
P1d = np.poly1d(Coeffs)
Theor = P1d(xr)
Theors.append(Theor)
all_coeffs = np.array(all_coeffs)
l.MonteCarloCoefficients = all_coeffs
stds = np.std(Theors, axis=0)
l.coefficientErrors = np.std(all_coeffs, axis=0)
# check distributions on the fly
# for i in range(deg+1):
# vedo.pyplot.histogram(all_coeffs[:,i],title='par'+str(i)).show(new=1)
# vedo.pyplot.histogram(all_coeffs[:,0], all_coeffs[:,1],
# xtitle='param0', ytitle='param1',scalarbar=1).show(new=1)
# vedo.pyplot.histogram(all_coeffs[:,1], all_coeffs[:,2],
# xtitle='param1', ytitle='param2').show(new=1)
# vedo.pyplot.histogram(all_coeffs[:,0], all_coeffs[:,2],
# xtitle='param0', ytitle='param2').show(new=1)
error_lines = []
for i in [nstd, -nstd]:
el = shapes.Line(xr, theor+stds*i, lw=1, alpha=0.2, c='k').z(tol)
error_lines.append(el)
el.name = "ErrorLine for sigma="+str(i)
l.errorLines = error_lines
l1 = error_lines[0].points().tolist()
cband = l1 + list(reversed(error_lines[1].points().tolist())) + [l1[0]]
l.errorBand = shapes.Line(cband).triangulate().lw(0).c('k', 0.15)
l.errorBand.name = "PolynomialFitErrorBand"
return l
#########################################################################################
def _plotxy(
data,
format=None,
aspect=4/3,
xlim=None,
ylim=None,
xerrors=None,
yerrors=None,
title="",
xtitle="x",
ytitle="y",
titleSize=None,
c="k",
alpha=1,
ec=None,
lc="k",
la=1,
lw=3,
dashed=False,
spline=False,
errorBand=False,
marker="",
ms=None,
mc=None,
ma=None,
pad=0.05,
axes={},
):
line=False
if lw>0:
line=True
if marker == "" and not line and not spline:
line = True
# purge NaN from data
validIds = np.all(np.logical_not(np.isnan(data)), axis=1)
data = data[validIds]
offs = 0 # z offset
if format is not None: # reset to allow meaningful overlap
xlim = format.xlim
ylim = format.ylim
aspect = format.aspect
pad = format.pad
title = ""
xtitle = ""
ytitle = ""
offs = format.zmax
x0, y0 = np.min(data, axis=0)
x1, y1 = np.max(data, axis=0)
x0lim, x1lim = x0 - pad * (x1 - x0), x1 + pad * (x1 - x0)
y0lim, y1lim = y0 - pad * (y1 - y0), y1 + pad * (y1 - y0)
if y0lim == y1lim: # in case y is constant
y0lim = y0lim - (x1lim - x0lim) / 2
y1lim = y1lim + (x1lim - x0lim) / 2
elif x0lim == x1lim: # in case x is constant
x0lim = x0lim - (y1lim - y0lim) / 2
x1lim = x1lim + (y1lim - y0lim) / 2
if xlim is not None and xlim[0] is not None:
x0lim = xlim[0]
if xlim is not None and xlim[1] is not None:
x1lim = xlim[1]
if ylim is not None and ylim[0] is not None:
y0lim = ylim[0]
if ylim is not None and ylim[1] is not None:
y1lim = ylim[1]
dx = x1lim - x0lim
dy = y1lim - y0lim
if dx == 0 and dy == 0: # in case x and y are all constant
x0lim = x0lim - 1
x1lim = x1lim + 1
y0lim = y0lim - 1
y1lim = y1lim + 1
dx, dy = 1, 1
yscale = dx / dy / aspect
y0lim, y1lim = y0lim * yscale, y1lim * yscale
if format is not None:
x0lim = format._x0lim
y0lim = format._y0lim
x1lim = format._x1lim
y1lim = format._y1lim
yscale = format.yscale
dx = x1lim - x0lim
dy = y1lim - y0lim
offs += np.sqrt(dx * dx + dy * dy) / 10000
scale = np.array([[1, yscale]])
data = np.multiply(data, scale)
acts = []
# the line or spline
if dashed:
l = shapes.DashedLine(data, c=lc, alpha=la, lw=lw)
acts.append(l)
elif spline:
l = shapes.KSpline(data).lw(lw).c(lc).alpha(la)
acts.append(l)
elif line:
l = shapes.Line(data, c=lc, alpha=la).lw(lw)
acts.append(l)
if marker:
pts = shapes.Points(data)
if mc is None:
mc = lc
if ma is None:
ma = la
if utils.isSequence(ms): ### variable point size
mk = shapes.Marker(marker, s=1)
msv = np.zeros_like(pts.points())
msv[:, 0] = ms
marked = shapes.Glyph(
pts, glyphObj=mk, c=mc, orientationArray=msv, scaleByVectorSize=True
)
else: ### fixed point size
if ms is None:
ms = dx / 100.0
# print('automatic ms =', ms)
if utils.isSequence(mc):
# print('mc is sequence')
mk = shapes.Marker(marker, s=ms).triangulate()
msv = np.zeros_like(pts.points())
msv[:, 0] = 1
marked = shapes.Glyph(
pts, glyphObj=mk, c=mc, orientationArray=msv, scaleByVectorSize=True
)
else:
# print('mc is fixed color')
mk = shapes.Marker(marker, s=ms).triangulate()
marked = shapes.Glyph(pts, glyphObj=mk, c=mc)
marked.alpha(ma).z(offs)
acts.append(marked)
if ec is None:
if mc is not None:
ec = mc
else:
ec = lc
if xerrors is not None and not errorBand:
if len(xerrors) != len(data):
colors.printc("Error in plotxy(xerrors=...): mismatched array length.", c='r')
return None
errs = []
for i, dta in enumerate(data):
xval, yval = dta
xerr = xerrors[i] / 2
el = shapes.Line((xval - xerr, yval, offs), (xval + xerr, yval, offs))
errs.append(el)
mxerrs = merge(errs).c(ec).lw(lw).alpha(alpha).z(2 * offs)
acts.append(mxerrs)
if yerrors is not None and not errorBand:
if len(yerrors) != len(data):
colors.printc("Error in plotxy(yerrors=...): mismatched array length.", c='r')
return None
errs = []
for i in range(len(data)):
xval, yval = data[i]
yerr = yerrors[i] * yscale
el = shapes.Line((xval, yval - yerr, offs), (xval, yval + yerr, offs))
errs.append(el)
myerrs = merge(errs).c(ec).lw(lw).alpha(alpha).z(3 * offs)
acts.append(myerrs)
if errorBand:
epsy = np.zeros_like(data)
epsy[:, 1] = yerrors * yscale
data3dup = data + epsy
data3dup = np.c_[data3dup, np.zeros_like(yerrors)]
data3d_down = data - epsy
data3d_down = np.c_[data3d_down, np.zeros_like(yerrors)]
band = shapes.Ribbon(data3dup, data3d_down).z(-offs)
if ec is None:
band.c(lc)
else:
band.c(ec)
band.alpha(la).z(2 * offs)
acts.append(band)
for a in acts:
a.cutWithPlane([0, y0lim, 0], [0, 1, 0])
a.cutWithPlane([0, y1lim, 0], [0, -1, 0])
a.cutWithPlane([x0lim, 0, 0], [1, 0, 0])
a.cutWithPlane([x1lim, 0, 0], [-1, 0, 0])
a.lighting('off')
if title:
if titleSize is None:
titleSize = dx / 40.0
tit = shapes.Text3D(
title,
s=titleSize,
c=c,
depth=0,
alpha=alpha,
pos=((x0lim + x1lim) / 2, y1lim + (y1lim-y0lim) / 80, 0),
justify="bottom-center",
)
tit.pickable(False).z(3 * offs)
acts.append(tit)
if axes == 1 or axes == True:
axes = {}
if isinstance(axes, dict): #####################
ndiv = 6
if "numberOfDivisions" in axes.keys():
ndiv = axes["numberOfDivisions"]
tp, ts = utils.makeTicks(y0lim / yscale, y1lim / yscale, ndiv / aspect)
labs = []
for i in range(1, len(tp) - 1):
ynew = utils.linInterpolate(tp[i], [0, 1], [y0lim, y1lim])
# print(i, tp[i], ynew, ts[i])
labs.append([ynew, ts[i]])
if "xtitle" not in axes: axes["xtitle"] = xtitle
if "ytitle" not in axes: axes["ytitle"] = ytitle
axes["yValuesAndLabels"] = labs
axes["xrange"] = (x0lim, x1lim)
axes["yrange"] = (y0lim, y1lim)
axes["zrange"] = (0, 0)
# axes["c"] = "k"
axes["yUseBounds"] = True
axs = addons.Axes(**axes)
axs.name = "axes"
asse = Plot(acts, axs)
asse.axes = axs
asse.SetOrigin(x0lim, y0lim, 0)
else:
# settings.xtitle = xtitle
# settings.ytitle = ytitle
asse = Plot(acts)
asse.yscale = yscale
asse.xlim = xlim
asse.ylim = ylim
asse.aspect = aspect
asse.pad = pad
asse.title = title
asse.xtitle = xtitle
asse.ytitle = ytitle
asse._x0lim = x0lim
asse._y0lim = y0lim
asse._x1lim = x1lim
asse._y1lim = y1lim
asse.zmax = offs * 3 # z-order
asse.name = "plotxy"
return asse
def _plotFxy(
z,
xlim=(0, 3),
ylim=(0, 3),
zlim=(None, None),
showNan=True,
zlevels=10,
c=None,
bc="aqua",
alpha=1,
texture="paper4",
bins=(100, 100),
axes=True,
):
if isinstance(z, str):
try:
z = z.replace("math.", "").replace("np.", "")
namespace = locals()
code = "from math import*\ndef zfunc(x,y): return " + z
exec(code, namespace)
z = namespace["zfunc"]
except:
colors.printc("Syntax Error in _plotFxy()", c='r')
return None
if c is not None:
texture = None # disable
ps = vtk.vtkPlaneSource()
ps.SetResolution(bins[0], bins[1])
ps.SetNormal([0, 0, 1])
ps.Update()
poly = ps.GetOutput()
dx = xlim[1] - xlim[0]
dy = ylim[1] - ylim[0]
todel, nans = [], []
for i in range(poly.GetNumberOfPoints()):
px, py, _ = poly.GetPoint(i)
xv = (px + 0.5) * dx + xlim[0]
yv = (py + 0.5) * dy + ylim[0]
try:
zv = z(xv, yv)
except:
zv = 0
todel.append(i)
nans.append([xv, yv, 0])
poly.GetPoints().SetPoint(i, [xv, yv, zv])
if len(todel):
cellIds = vtk.vtkIdList()
poly.BuildLinks()
for i in todel:
poly.GetPointCells(i, cellIds)
for j in range(cellIds.GetNumberOfIds()):
poly.DeleteCell(cellIds.GetId(j)) # flag cell
poly.RemoveDeletedCells()
cl = vtk.vtkCleanPolyData()
cl.SetInputData(poly)
cl.Update()
poly = cl.GetOutput()
if not poly.GetNumberOfPoints():
colors.printc("Function is not real in the domain", c='r')
return None
if zlim[0]:
tmpact1 = Mesh(poly)
a = tmpact1.cutWithPlane((0, 0, zlim[0]), (0, 0, 1))
poly = a.polydata()
if zlim[1]:
tmpact2 = Mesh(poly)
a = tmpact2.cutWithPlane((0, 0, zlim[1]), (0, 0, -1))
poly = a.polydata()
cmap=''
if c in colors.cmaps_names:
cmap = c
c = None
bc= None
mesh = Mesh(poly, c, alpha).computeNormals().lighting("plastic")
if cmap:
mesh.addElevationScalars().cmap(cmap)
if bc:
mesh.bc(bc)
if texture:
mesh.texture(texture)
acts = [mesh]
if zlevels:
elevation = vtk.vtkElevationFilter()
elevation.SetInputData(poly)
bounds = poly.GetBounds()
elevation.SetLowPoint(0, 0, bounds[4])
elevation.SetHighPoint(0, 0, bounds[5])
elevation.Update()
bcf = vtk.vtkBandedPolyDataContourFilter()
bcf.SetInputData(elevation.GetOutput())
bcf.SetScalarModeToValue()
bcf.GenerateContourEdgesOn()
bcf.GenerateValues(zlevels, elevation.GetScalarRange())
bcf.Update()
zpoly = bcf.GetContourEdgesOutput()
zbandsact = Mesh(zpoly, "k", alpha).lw(1).lighting('off')
zbandsact._mapper.SetResolveCoincidentTopologyToPolygonOffset()
acts.append(zbandsact)
if showNan and len(todel):
bb = mesh.GetBounds()
if bb[4] <= 0 and bb[5] >= 0:
zm = 0.0
else:
zm = (bb[4] + bb[5]) / 2
nans = np.array(nans) + [0, 0, zm]
nansact = shapes.Points(nans, r=2, c="red", alpha=alpha)
nansact.GetProperty().RenderPointsAsSpheresOff()
acts.append(nansact)
if axes:
axs = addons.Axes(mesh)
acts.append(axs)
asse = Assembly(acts)
asse.name = "plotFxy"
if isinstance(z, str):
asse.name += " " + z
return asse
def _plotFz(
z,
x=(-1, 1),
y=(-1, 1),
zlimits=(None, None),
cmap="PiYG",
alpha=1,
lw=0.1,
bins=(75, 75),
axes=True,
):
if isinstance(z, str):
try:
z = z.replace("np.", "")
namespace = locals()
code = "from math import*\ndef zfunc(x,y): return " + z
exec(code, namespace)
z = namespace["zfunc"]
except:
colors.printc("Syntax Error in complex plotFz()", c='r')
return None
ps = vtk.vtkPlaneSource()
ps.SetResolution(bins[0], bins[1])
ps.SetNormal([0, 0, 1])
ps.Update()
poly = ps.GetOutput()
dx = x[1] - x[0]
dy = y[1] - y[0]
arrImg = []
for i in range(poly.GetNumberOfPoints()):
px, py, _ = poly.GetPoint(i)
xv = (px + 0.5) * dx + x[0]
yv = (py + 0.5) * dy + y[0]
try:
zv = z(np.complex(xv), np.complex(yv))
except:
zv = 0
poly.GetPoints().SetPoint(i, [xv, yv, np.real(zv)])
arrImg.append(np.imag(zv))
mesh = Mesh(poly, alpha).lighting("plastic")
v = max(abs(np.min(arrImg)), abs(np.max(arrImg)))
mesh.cmap(cmap, arrImg, vmin=-v, vmax=v)
mesh.computeNormals().lw(lw)
if zlimits[0]:
mesh.cutWithPlane((0, 0, zlimits[0]), (0, 0, 1))
if zlimits[1]:
mesh.cutWithPlane((0, 0, zlimits[1]), (0, 0, -1))
acts = [mesh]
if axes:
axs = addons.Axes(mesh, ztitle="Real part")
acts.append(axs)
asse = Assembly(acts)
asse.name = "plotFz"
if isinstance(z, str):
asse.name += " " + z
return asse
def _plotPolar(
rphi,
title="",
tsize=0.1,
lsize=0.05,
r1=0,
r2=1,
c="blue",
bc="k",
alpha=1,
ps=5,
lw=3,
deg=False,
vmax=None,
fill=False,
spline=False,
smooth=0,
showDisc=True,
nrays=8,
showLines=True,
showAngles=True,
):
if len(rphi) == 2:
rphi = np.stack((rphi[0], rphi[1]), axis=1)
rphi = np.array(rphi)
thetas = rphi[:, 0]
radii = rphi[:, 1]
k = 180 / np.pi
if deg:
thetas = np.array(thetas) / k
vals = []
for v in thetas: # normalize range
t = np.arctan2(np.sin(v), np.cos(v))
if t < 0:
t += 2 * np.pi
vals.append(t)
thetas = np.array(vals)
if vmax is None:
vmax = np.max(radii)
angles = []
points = []
for i in range(len(thetas)):
t = thetas[i]
r = (radii[i]) / vmax * r2 + r1
ct, st = np.cos(t), np.sin(t)
points.append([r * ct, r * st, 0])
p0 = points[0]
points.append(p0)
r2e = r1 + r2
lines = None
if spline:
lines = shapes.KSpline(points, closed=True)
lines.c(c).lw(lw).alpha(alpha)
elif lw:
lines = shapes.Line(points)
lines.c(c).lw(lw).alpha(alpha)
points.pop()
ptsact = None
if ps:
ptsact = shapes.Points(points, r=ps, c=c, alpha=alpha)
filling = None
if fill and lw:
faces = []
coords = [[0, 0, 0]] + lines.points().tolist()
for i in range(1, lines.N()):
faces.append([0, i, i + 1])
filling = Mesh([coords, faces]).c(c).alpha(alpha)
back = None
back2 = None
if showDisc:
back = shapes.Disc(r1=r2e, r2=r2e * 1.01, c=bc, res=(1,360))
back.z(-0.01).lighting('off').alpha(alpha)
back2 = shapes.Disc(r1=r2e/2, r2=r2e/2 * 1.005, c=bc, res=(1,360))
back2.z(-0.01).lighting('off').alpha(alpha)
ti = None
if title:
ti = shapes.Text3D(title, (0, 0, 0), s=tsize, depth=0, justify="top-center")
ti.pos(0, -r2e * 1.15, 0.01)
rays = []
if showDisc:
rgap = 0.05
for t in np.linspace(0, 2 * np.pi, num=nrays, endpoint=False):
ct, st = np.cos(t), np.sin(t)
if showLines:
l = shapes.Line((0, 0, -0.01), (r2e * ct * 1.03, r2e * st * 1.03, -0.01))
rays.append(l)
ct2, st2 = np.cos(t+np.pi/nrays), np.sin(t+np.pi/nrays)
lm = shapes.DashedLine((0, 0, -0.01),
(r2e * ct2, r2e * st2, -0.01),
spacing=0.25)
rays.append(lm)
elif showAngles: # just the ticks
l = shapes.Line(
(r2e * ct * 0.98, r2e * st * 0.98, -0.01),
(r2e * ct * 1.03, r2e * st * 1.03, -0.01),
)
if showAngles:
if 0 <= t < np.pi / 2:
ju = "bottom-left"
elif t == np.pi / 2:
ju = "bottom-center"
elif np.pi / 2 < t <= np.pi:
ju = "bottom-right"
elif np.pi < t < np.pi * 3 / 2:
ju = "top-right"
elif t == np.pi * 3 / 2:
ju = "top-center"
else:
ju = "top-left"
a = shapes.Text3D(int(t * k), pos=(0, 0, 0), s=lsize, depth=0, justify=ju)
a.pos(r2e * ct * (1 + rgap), r2e * st * (1 + rgap), -0.01)
angles.append(a)
mrg = merge(back, back2, angles, rays, ti)
if mrg:
mrg.color(bc).alpha(alpha).lighting('off')
rh = Assembly([lines, ptsact, filling] + [mrg])
rh.base = np.array([0, 0, 0])
rh.top = np.array([0, 0, 1])
rh.name = "plotPolar"
return rh
def _plotSpheric(rfunc, normalize=True, res=33, scalarbar=True, c="grey", alpha=0.05, cmap="jet"):
sg = shapes.Sphere(res=res, quads=True)
sg.alpha(alpha).c(c).wireframe()
cgpts = sg.points()
r, theta, phi = utils.cart2spher(*cgpts.T)
newr, inans = [], []
for i in range(len(r)):
try:
ri = rfunc(theta[i], phi[i])
if np.isnan(ri):
inans.append(i)
newr.append(1)
else:
newr.append(ri)
except:
inans.append(i)
newr.append(1)
newr = np.array(newr)
if normalize:
newr = newr / np.max(newr)
newr[inans] = 1
nanpts = []
if len(inans):
redpts = utils.spher2cart(newr[inans], theta[inans], phi[inans])
nanpts.append(shapes.Points(redpts, r=4, c="r"))
pts = utils.spher2cart(newr, theta, phi)
ssurf = sg.clone().points(pts)
if len(inans):
ssurf.deletePoints(inans)
ssurf.alpha(1).wireframe(0).lw(0.1)
ssurf.cmap(cmap, newr)
ssurf.computeNormals()
if scalarbar:
xm = np.max([np.max(pts[0]), 1])
ym = np.max([np.abs(np.max(pts[1])), 1])
ssurf.mapper().SetScalarRange(np.min(newr), np.max(newr))
sb3d = ssurf.addScalarBar3D(sx=xm * 0.07, sy=ym, c='k').scalarbar
sb3d.rotateX(90).pos(xm * 1.1, 0, -0.5)
else:
sb3d = None
sg.pickable(False)
asse = Assembly([ssurf, sg] + nanpts + [sb3d])
asse.name = "plotSpheric"
return asse
#########################################################################################
def _barplot(
data,
format=None,
errors=False,
aspect=4/3,
xlim=None,
ylim=(0,None),
xtitle=" ",
ytitle="counts",
title="",
titleSize=None,
titleColor=None,
logscale=False,
fill=True,
c="olivedrab",
gap=0.02,
alpha=1,
outline=False,
lw=2,
lc="k",
pad=0.05,
axes={},
bc="k",
):
offs = 0 # z offset
if len(data) == 4:
counts, xlabs, cols, edges = data
elif len(data) == 3:
counts, xlabs, cols = data
edges = np.array(range(len(counts)+1))+0.5
elif len(data) == 2:
counts, xlabs = data
edges = np.array(range(len(counts)+1))+0.5
cols = [c] * len(counts)
else:
m = "barplot error: data must be given as [counts, labels, colors, edges] not\n"
colors.printc(m, data, c='r')
colors.printc(" bin edges and colors are optional. Abort.", c='r')
raise RuntimeError()
counts = np.asarray(counts)
edges = np.asarray(edges)
# sanity checks
assert len(counts) == len(xlabs)
assert len(counts) == len(cols)
assert len(counts) == len(edges)-1
if format is not None: # reset to allow meaningful overlap
xlim = format.xlim
ylim = format.ylim
aspect = format.aspect
pad = format.pad
axes = 0
title = ""
xtitle = ""
ytitle = ""
offs = format.zmax
if logscale:
counts = np.log10(counts + 1)
if ytitle=='counts':
ytitle='log_10 (counts+1)'
x0, x1 = np.min(edges), np.max(edges)
y0, y1 = 0, np.max(counts)
binsize = edges[1] - edges[0]
x0lim, x1lim = x0 - pad * (x1 - x0), x1 + pad * (x1 - x0)
y0lim, y1lim = y0 - pad * (y1 - y0) / 100, y1 + pad * (y1 - y0)
if errors:
y1lim += np.sqrt(y1) / 2
if y0lim == y1lim: # in case y is constant
y0lim = y0lim - (x1lim - x0lim) / 2
y1lim = y1lim + (x1lim - x0lim) / 2
elif x0lim == x1lim: # in case x is constant
x0lim = x0lim - (y1lim - y0lim) / 2
x1lim = x1lim + (y1lim - y0lim) / 2
if xlim is not None and xlim[0] is not None:
x0lim = xlim[0]
if xlim is not None and xlim[1] is not None:
x1lim = xlim[1]
if ylim is not None and ylim[0] is not None:
y0lim = ylim[0]
if ylim is not None and ylim[1] is not None:
y1lim = ylim[1]
dx = x1lim - x0lim
dy = y1lim - y0lim
if dx == 0 and dy == 0: # in case x and y are all constant
x0lim = x0lim - 1
x1lim = x1lim + 1
y0lim = y0lim - 1
y1lim = y1lim + 1
dx, dy = 1, 1
yscale = dx / dy / aspect
y0lim, y1lim = y0lim * yscale, y1lim * yscale
if format is not None:
x0lim = format._x0lim
y0lim = format._y0lim
x1lim = format._x1lim
y1lim = format._y1lim
yscale = format.yscale
dx = x1lim - x0lim
dy = y1lim - y0lim
offs += np.sqrt(dx * dx + dy * dy) / 10000
counts = counts * yscale
centers = (edges[0:-1] + edges[1:]) / 2
rs = []
maxheigth = 0
if fill: #####################
if outline:
gap = 0
for i in range(len(centers)):
p0 = (edges[i] + gap * binsize, 0, 0)
p1 = (edges[i + 1] - gap * binsize, counts[i], 0)
r = shapes.Rectangle(p0, p1)
r.origin(p0).PickableOff()
maxheigth = max(maxheigth, p1[1])
if c in colors.cmaps_names:
col = colors.colorMap((p0[0]+p1[0])/2, c, edges[0], edges[-1])
else:
col = cols[i]
r.color(col).alpha(alpha).lighting('off').z(offs)
r.name = f'bar_{i}'
rs.append(r)
if outline or not fill: #####################
lns = [[edges[0], 0, 0]]
for i in range(len(centers)):
lns.append([edges[i], counts[i], 0])
lns.append([edges[i + 1], counts[i], 0])
maxheigth = max(maxheigth, counts[i])
lns.append([edges[-1], 0, 0])
outl = shapes.Line(lns, c=lc, alpha=alpha, lw=lw).z(offs)
outl.name = f'bar_outline_{i}'
rs.append(outl)
bin_centers_pos = []
for i in range(len(centers)):
if counts[i]:
bin_centers_pos.append([centers[i], counts[i], 0])
if errors: #####################
for bcp in bin_centers_pos:
x = bcp[0]
f = bcp[1]
err = np.sqrt(f / yscale) * yscale
el = shapes.Line([x, f-err/2, 0], [x, f+err/2, 0], c=lc, alpha=alpha, lw=lw)
el.z(offs * 1.9)
rs.append(el)
# print('errors', el.z())
for a in rs: #####################
a.cutWithPlane([0, y0lim, 0], [0, 1, 0])
a.cutWithPlane([0, y1lim, 0], [0, -1, 0])
a.cutWithPlane([x0lim, 0, 0], [1, 0, 0])
a.cutWithPlane([x1lim, 0, 0], [-1, 0, 0])
a.lighting('off')
if title: #####################
if titleColor is None:
titleColor = bc
if titleSize is None:
titleSize = dx / 40.0
tit = shapes.Text3D(
title,
s=titleSize,
c=titleColor,
depth=0,
alpha=alpha,
pos=((x0lim + x1lim) / 2, y1lim + (y1lim-y0lim) / 80, 0),
justify="bottom-center",
)
tit.pickable(False).z(2.5 * offs)
rs.append(tit)
if axes == 1 or axes == True: #####################
axes = {}
if isinstance(axes, dict):
ndiv = 6
if "numberOfDivisions" in axes:
ndiv = axes["numberOfDivisions"]
tp, ts = utils.makeTicks(y0lim / yscale, y1lim / yscale, ndiv / aspect)
ylabs = []
for i in range(1, len(tp) - 1):
ynew = utils.linInterpolate(tp[i], [0, 1], [y0lim, y1lim])
ylabs.append([ynew, ts[i]])
axes["yValuesAndLabels"] = ylabs
_xlabs = []
for i in range(len(centers)):
_xlabs.append([centers[i], str(xlabs[i])])
axes["xValuesAndLabels"] = _xlabs
if "xtitle" not in axes: axes["xtitle"] = xtitle
if "ytitle" not in axes: axes["ytitle"] = ytitle
axes["xrange"] = (x0lim, x1lim)
axes["yrange"] = (y0lim, y1lim)
axes["zrange"] = (0, 0)
axes["c"] = bc
axs = addons.Axes(**axes)
axs.name = "axes"
asse = Plot(rs, axs)
asse.axes = axs
asse.SetOrigin(x0lim, y0lim, 0)
else:
# settings.xtitle = xtitle
# settings.ytitle = ytitle
asse = Plot(rs)
asse.yscale = yscale
asse.xlim = xlim
asse.ylim = ylim
asse.aspect = aspect
asse.pad = pad
asse.title = title
asse.xtitle = xtitle
asse.ytitle = ytitle
asse._x0lim = x0lim
asse._y0lim = y0lim
asse._x1lim = x1lim
asse._y1lim = y1lim
asse.zmax = offs * 3 # z-order
asse.bins = edges
asse.centers = centers
asse.freqs = counts / yscale
asse.name = "BarPlot"
return asse
#########################################################################################
def _histogram1D(
data,
format=None,
bins=25,
aspect=4/3,
xlim=None,
ylim=(0,None),
errors=False,
title="",
xtitle=" ",
ytitle="counts",
titleSize=None,
titleColor=None,
density=False,
logscale=False,
fill=True,
c="olivedrab",
gap=0.02,
alpha=1,
outline=False,
lw=2,
lc="k",
marker="",
ms=None,
mc=None,
ma=None,
pad=0.05,
axes={},
bc="k",
):
# purge NaN from data
validIds = np.all(np.logical_not(np.isnan(data)))
data = data[validIds]
offs = 0 # z offset
if format is not None: # reset to allow meaningful overlap
xlim = format.xlim
ylim = format.ylim
aspect = format.aspect
pad = format.pad
bins = format.bins
axes = 0
title = ""
xtitle = ""
ytitle = ""
offs = format.zmax
fs, edges = np.histogram(data, bins=bins, range=xlim)
# print('frequencies', fs)
# print('edges', edges)
if density:
ntot = len(data.ravel())
binsize = edges[1]-edges[0]
fs = fs/(ntot*binsize)
if ytitle=='counts':
ytitle=f"counts/({ntot}~\dot~{utils.precision(binsize,3)})"
elif logscale:
fs = np.log10(fs + 1)
if ytitle=='counts':
ytitle='log_10 (counts+1)'
x0, x1 = np.min(edges), np.max(edges)
y0, y1 = 0, np.max(fs)
binsize = edges[1] - edges[0]
x0lim, x1lim = x0 - pad * (x1 - x0), x1 + pad * (x1 - x0)
y0lim, y1lim = y0 - pad * (y1 - y0) / 100, y1 + pad * (y1 - y0)
if errors:
y1lim += np.sqrt(y1) / 2
if y0lim == y1lim: # in case y is constant
y0lim = y0lim - (x1lim - x0lim) / 2
y1lim = y1lim + (x1lim - x0lim) / 2
elif x0lim == x1lim: # in case x is constant
x0lim = x0lim - (y1lim - y0lim) / 2
x1lim = x1lim + (y1lim - y0lim) / 2
if xlim is not None and xlim[0] is not None:
x0lim = xlim[0]
if xlim is not None and xlim[1] is not None:
x1lim = xlim[1]
if ylim is not None and ylim[0] is not None:
y0lim = ylim[0]
if ylim is not None and ylim[1] is not None:
y1lim = ylim[1]
dx = x1lim - x0lim
dy = y1lim - y0lim
if dx == 0 and dy == 0: # in case x and y are all constant
x0lim = x0lim - 1
x1lim = x1lim + 1
y0lim = y0lim - 1
y1lim = y1lim + 1
dx, dy = 1, 1
yscale = dx / dy / aspect
y0lim, y1lim = y0lim * yscale, y1lim * yscale
if format is not None:
x0lim = format._x0lim
y0lim = format._y0lim
x1lim = format._x1lim
y1lim = format._y1lim
yscale = format.yscale
dx = x1lim - x0lim
dy = y1lim - y0lim
offs += np.sqrt(dx * dx + dy * dy) / 10000
fs = fs * yscale
if utils.isSequence(bins):
myedges = np.array(bins)
bins = len(bins) - 1
else:
myedges = edges
rs = []
maxheigth = 0
if fill: #####################
if outline:
gap = 0
for i in range(bins):
p0 = (myedges[i] + gap * binsize, 0, 0)
p1 = (myedges[i + 1] - gap * binsize, fs[i], 0)
r = shapes.Rectangle(p0, p1)
r.origin(p0).PickableOff()
maxheigth = max(maxheigth, p1[1])
if c in colors.cmaps_names:
col = colors.colorMap((p0[0]+p1[0])/2, c, myedges[0], myedges[-1])
else:
col = c
r.color(col).alpha(alpha).lighting('off').z(offs)
rs.append(r)
# print('rectangles', r.z())
if outline: #####################
lns = [[myedges[0], 0, 0]]
for i in range(bins):
lns.append([myedges[i], fs[i], 0])
lns.append([myedges[i + 1], fs[i], 0])
maxheigth = max(maxheigth, fs[i])
lns.append([myedges[-1], 0, 0])
outl = shapes.Line(lns, c=lc, alpha=alpha, lw=lw).z(offs)
rs.append(outl)
# print('histo outline', outl.z())
bin_centers_pos = []
for i in range(bins):
x = (myedges[i] + myedges[i + 1]) / 2
if fs[i]:
bin_centers_pos.append([x, fs[i], 0])
if marker: #####################
pts = shapes.Points(bin_centers_pos)
if mc is None:
mc = lc
if ma is None:
ma = alpha
if utils.isSequence(ms): ### variable point size
mk = shapes.Marker(marker, s=1)
msv = np.zeros_like(pts.points())
msv[:, 0] = ms
marked = shapes.Glyph(
pts, glyphObj=mk, c=mc, orientationArray=msv, scaleByVectorSize=True
)
else: ### fixed point size
if ms is None:
ms = dx / 100.0
if utils.isSequence(mc):
mk = shapes.Marker(marker, s=ms)
msv = np.zeros_like(pts.points())
msv[:, 0] = 1
marked = shapes.Glyph(
pts, glyphObj=mk, c=mc, orientationArray=msv, scaleByVectorSize=True
)
else:
mk = shapes.Marker(marker, s=ms)
marked = shapes.Glyph(pts, glyphObj=mk, c=mc)
marked.alpha(ma).z(offs * 2)
# print('marker', marked.z())
rs.append(marked)
if errors: #####################
for bcp in bin_centers_pos:
x = bcp[0]
f = bcp[1]
err = np.sqrt(f / yscale) * yscale
el = shapes.Line([x, f-err/2, 0], [x, f+err/2, 0], c=lc, alpha=alpha, lw=lw)
el.z(offs * 1.9)
rs.append(el)
# print('errors', el.z())
for a in rs: #####################
a.cutWithPlane([0, y0lim, 0], [0, 1, 0])
a.cutWithPlane([0, y1lim, 0], [0, -1, 0])
a.cutWithPlane([x0lim, 0, 0], [1, 0, 0])
a.cutWithPlane([x1lim, 0, 0], [-1, 0, 0])
a.lighting('off').phong()
if title: #####################
if titleColor is None:
titleColor = bc
if titleSize is None:
titleSize = dx / 40.0
tit = shapes.Text3D(
title,
s=titleSize,
c=titleColor,
depth=0,
alpha=alpha,
pos=((x0lim + x1lim) / 2, y1lim + (y1lim-y0lim) / 80, 0),
justify="bottom-center",
)
tit.pickable(False).z(2.5 * offs)
rs.append(tit)
if axes == 1 or axes == True:
axes = {}
if isinstance(axes, dict): #####################
ndiv = 6
if "numberOfDivisions" in axes.keys():
ndiv = axes["numberOfDivisions"]
tp, ts = utils.makeTicks(y0lim / yscale, y1lim / yscale, ndiv / aspect)
labs = []
for i in range(1, len(tp) - 1):
ynew = utils.linInterpolate(tp[i], [0, 1], [y0lim, y1lim])
labs.append([ynew, ts[i]])
if "xtitle" not in axes: axes["xtitle"] = xtitle
if "ytitle" not in axes: axes["ytitle"] = ytitle
axes["yValuesAndLabels"] = labs
axes["xrange"] = (x0lim, x1lim)
axes["yrange"] = (y0lim, y1lim)
axes["zrange"] = (0, 0)
axes["c"] = bc
axs = addons.Axes(**axes)
axs.name = "axes"
asse = Plot(rs, axs)
asse.axes = axs
asse.SetOrigin(x0lim, y0lim, 0)
else:
# settings.xtitle = xtitle
# settings.ytitle = ytitle
asse = Plot(rs)
asse.yscale = yscale
asse.xlim = xlim
asse.ylim = ylim
asse.aspect = aspect
asse.pad = pad
asse.title = title
asse.xtitle = xtitle
asse.ytitle = ytitle
asse._x0lim = x0lim
asse._y0lim = y0lim
asse._x1lim = x1lim
asse._y1lim = y1lim
asse.zmax = offs * 3 # z-order
asse.bins = edges
asse.centers = (edges[0:-1] + edges[1:]) / 2
asse.freqs = fs / yscale
asse.name = "histogram1D"
return asse
def _histogram2D(
xvalues,
yvalues=None,
format=None,
bins=25,
aspect=1,
xlim=None,
ylim=None,
weights=None,
cmap="cividis",
alpha=1,
title="",
xtitle="x",
ytitle="y",
ztitle="z",
titleSize=None,
titleColor=None,
# logscale=False,
lw=0,
scalarbar=True,
axes=True,
bc="k",
):
offs = 0 # z offset
if format is not None: # reset to allow meaningful overlap
xlim = format.xlim
ylim = format.ylim
aspect = format.aspect
bins = format.bins
axes = 0
title = ""
xtitle = ""
ytitle = ""
ztitle = ""
offs = format.zmax
if yvalues is None:
# assume [(x1,y1), (x2,y2) ...] format
yvalues = xvalues[:, 1]
xvalues = xvalues[:, 0]
if isinstance(bins, int):
bins = (bins, bins)
H, xedges, yedges = np.histogram2d(xvalues, yvalues, weights=weights,
bins=bins, range=(xlim, ylim))
x0lim, x1lim = np.min(xedges), np.max(xedges)
y0lim, y1lim = np.min(yedges), np.max(yedges)
dx, dy = x1lim - x0lim, y1lim - y0lim
if dx == 0 and dy == 0: # in case x and y are all constant
x0lim = x0lim - 1
x1lim = x1lim + 1
y0lim = y0lim - 1
y1lim = y1lim + 1
dx, dy = 1, 1
yscale = dx / dy / aspect
y0lim, y1lim = y0lim * yscale, y1lim * yscale
acts = []
#####################
g = shapes.Grid(
pos=[(x0lim + x1lim) / 2, (y0lim + y1lim) / 2, 0],
sx=dx,
sy=dy * yscale,
resx=bins[0],
resy=bins[1],
)
g.alpha(alpha).lw(lw).wireframe(0).flat().lighting('off')
g.cmap(cmap, np.ravel(H.T), on='cells')
g.SetOrigin(x0lim, y0lim, 0)
if scalarbar:
sc = g.addScalarBar3D(c=bc).scalarbar
scy0, scy1 = sc.ybounds()
sc_scale = (y1lim-y0lim)/(scy1-scy0)
sc.scale(sc_scale)
acts.append(sc)
g.base = np.array([0, 0, 0])
g.top = np.array([0, 0, 1])
acts.append(g)
if title: #####################
if titleColor is None:
titleColor = bc
if titleSize is None:
titleSize = dx / 40.0
tit = shapes.Text3D(
title,
s=titleSize,
c=titleColor,
depth=0,
alpha=alpha,
pos=((x0lim + x1lim) / 2, y1lim + (y1lim-y0lim) / 80, 0),
justify="bottom-center",
)
tit.pickable(False).z(2.5 * offs)
acts.append(tit)
if axes == 1 or axes == True: #####################
axes = {"xyGridTransparent": True, "xyAlpha": 0}
if isinstance(axes, dict):
ndiv = 6
if "numberOfDivisions" in axes.keys():
ndiv = axes["numberOfDivisions"]
tp, ts = utils.makeTicks(y0lim / yscale, y1lim / yscale, ndiv / aspect)
labs = []
for i in range(1, len(tp) - 1):
ynew = utils.linInterpolate(tp[i], [0, 1], [y0lim, y1lim])
labs.append([ynew, ts[i]])
if "xtitle" not in axes: axes["xtitle"] = xtitle
if "ytitle" not in axes: axes["ytitle"] = ytitle
if "ztitle" not in axes: axes["ztitle"] = ztitle
axes["yValuesAndLabels"] = labs
axes["xrange"] = (x0lim, x1lim)
axes["yrange"] = (y0lim, y1lim)
axes["zrange"] = (0, 0) # todo
axes["c"] = bc
axs = addons.Axes(**axes)
axs.name = "axes"
asse = Plot(acts, axs)
asse.axes = axs
asse.SetOrigin(x0lim, y0lim, 0)
else:
# settings.xtitle = xtitle
# settings.ytitle = ytitle
# settings.ytitle = ztitle
asse = Plot(acts)
asse.yscale = yscale
asse.xlim = xlim
asse.ylim = ylim
asse.aspect = aspect
asse.title = title
asse.xtitle = xtitle
asse.ytitle = ytitle
asse._x0lim = x0lim
asse._y0lim = y0lim
asse._x1lim = x1lim
asse._y1lim = y1lim
asse.freqs = H
asse.bins = (xedges, yedges)
asse.zmax = offs * 3 # z-order
asse.name = "histogram2D"
return asse
def _histogramHexBin(
xvalues,
yvalues,
xtitle="",
ytitle="",
ztitle="",
bins=12,
vrange=None,
norm=1,
fill=True,
c=None,
cmap="terrain_r",
alpha=1,
):
# if xtitle:
# settings.xtitle = xtitle
# if ytitle:
# settings.ytitle = ytitle
# if ztitle:
# settings.ztitle = ztitle
xmin, xmax = np.min(xvalues), np.max(xvalues)
ymin, ymax = np.min(yvalues), np.max(yvalues)
dx, dy = xmax - xmin, ymax - ymin
if utils.isSequence(bins):
n,m = bins
else:
if xmax - xmin < ymax - ymin:
n = bins
m = np.rint(dy / dx * n / 1.2 + 0.5).astype(int)
else:
m = bins
n = np.rint(dx / dy * m * 1.2 + 0.5).astype(int)
src = vtk.vtkPointSource()
src.SetNumberOfPoints(len(xvalues))
src.Update()
pointsPolydata = src.GetOutput()
# values = list(zip(xvalues, yvalues))
values = np.stack((xvalues, yvalues), axis=1)
zs = [[0.0]] * len(values)
values = np.append(values, zs, axis=1)
pointsPolydata.GetPoints().SetData(utils.numpy2vtk(values, dtype=float))
cloud = Mesh(pointsPolydata)
col = None
if c is not None:
col = colors.getColor(c)
hexs, binmax = [], 0
ki, kj = 1.33, 1.12
r = 0.47 / n * 1.2 * dx
for i in range(n + 3):
for j in range(m + 2):
cyl = vtk.vtkCylinderSource()
cyl.SetResolution(6)
cyl.CappingOn()
cyl.SetRadius(0.5)
cyl.SetHeight(0.1)
cyl.Update()
t = vtk.vtkTransform()
if not i % 2:
p = (i / ki, j / kj, 0)
else:
p = (i / ki, j / kj + 0.45, 0)
q = (p[0] / n * 1.2 * dx + xmin, p[1] / m * dy + ymin, 0)
ids = cloud.closestPoint(q, radius=r, returnCellId=True)
ne = len(ids)
if fill:
t.Translate(p[0], p[1], ne / 2)
t.Scale(1, 1, ne * 10)
else:
t.Translate(p[0], p[1], ne)
t.RotateX(90) # put it along Z
tf = vtk.vtkTransformPolyDataFilter()
tf.SetInputData(cyl.GetOutput())
tf.SetTransform(t)
tf.Update()
if c is None:
col = i
h = Mesh(tf.GetOutput(), c=col, alpha=alpha).flat()
h.lighting('plastic')
h.PickableOff()
hexs.append(h)
if ne > binmax:
binmax = ne
if cmap is not None:
for h in hexs:
z = h.GetBounds()[5]
col = colors.colorMap(z, cmap, 0, binmax)
h.color(col)
asse = Assembly(hexs)
asse.SetScale(1.2 / n * dx, 1 / m * dy, norm / binmax * (dx + dy) / 4)
asse.SetPosition(xmin, ymin, 0)
asse.base = np.array([0, 0, 0])
asse.top = np.array([0, 0, 1])
asse.name = "histogramHexBin"
return asse
def _histogramPolar(
values,
weights=None,
title="",
tsize=0.1,
bins=16,
r1=0.25,
r2=1,
phigap=0.5,
rgap=0.05,
lpos=1,
lsize=0.04,
c='grey',
bc="k",
alpha=1,
cmap=None,
deg=False,
vmin=None,
vmax=None,
labels=(),
showDisc=True,
nrays=8,
showLines=True,
showAngles=True,
showErrors=False,
):
k = 180 / np.pi
if deg:
values = np.array(values) / k
else:
values = np.array(values)
vals = []
for v in values: # normalize range
t = np.arctan2(np.sin(v), np.cos(v))
if t < 0:
t += 2 * np.pi
vals.append(t+0.00001)
histodata, edges = np.histogram(vals, weights=weights,
bins=bins, range=(0, 2*np.pi))
thetas = []
for i in range(bins):
thetas.append((edges[i] + edges[i + 1]) / 2)
if vmin is None:
vmin = np.min(histodata)
if vmax is None:
vmax = np.max(histodata)
errors = np.sqrt(histodata)
r2e = r1 + r2
if showErrors:
r2e += np.max(errors) / vmax * 1.5
back = None
if showDisc:
back = shapes.Disc(r1=r2e, r2=r2e * 1.01, c=bc, res=(1,360))
back.z(-0.01)
slices = []
lines = []
angles = []
errbars = []
for i, t in enumerate(thetas):
r = histodata[i] / vmax * r2
d = shapes.Disc((0, 0, 0), r1, r1+r, res=(1,360))
delta = np.pi/bins - np.pi/2 - phigap/k
d.cutWithPlane(normal=(np.cos(t + delta), np.sin(t + delta), 0))
d.cutWithPlane(normal=(np.cos(t - delta), np.sin(t - delta), 0))
if cmap is not None:
cslice = colors.colorMap(histodata[i], cmap, vmin, vmax)
d.color(cslice)
else:
if c is None:
d.color(i)
elif utils.isSequence(c) and len(c) == bins:
d.color(c[i])
else:
d.color(c)
d.alpha(alpha).lighting('off')
slices.append(d)
ct, st = np.cos(t), np.sin(t)
if showErrors:
showLines = False
err = np.sqrt(histodata[i]) / vmax * r2
errl = shapes.Line(
((r1 + r - err) * ct, (r1 + r - err) * st, 0.01),
((r1 + r + err) * ct, (r1 + r + err) * st, 0.01),
)
errl.alpha(alpha).lw(3).color(bc)
errbars.append(errl)
labs=[]
rays = []
if showDisc:
outerdisc = shapes.Disc(r1=r2e, r2=r2e * 1.01, c=bc, res=(1,360))
outerdisc.z(-0.01)
innerdisc = shapes.Disc(r1=r2e/2, r2=r2e/2 * 1.005, c=bc, res=(1, 360))
innerdisc.z(-0.01)
rays.append(outerdisc)
rays.append(innerdisc)
rgap = 0.05
for t in np.linspace(0, 2 * np.pi, num=nrays, endpoint=False):
ct, st = np.cos(t), np.sin(t)
if showLines:
l = shapes.Line((0, 0, -0.01), (r2e * ct * 1.03, r2e * st * 1.03, -0.01))
rays.append(l)
ct2, st2 = np.cos(t+np.pi/nrays), np.sin(t+np.pi/nrays)
lm = shapes.DashedLine((0, 0, -0.01),
(r2e * ct2, r2e * st2, -0.01),
spacing=0.25)
rays.append(lm)
elif showAngles: # just the ticks
l = shapes.Line(
(r2e * ct * 0.98, r2e * st * 0.98, -0.01),
(r2e * ct * 1.03, r2e * st * 1.03, -0.01),
)
if showAngles:
if 0 <= t < np.pi / 2:
ju = "bottom-left"
elif t == np.pi / 2:
ju = "bottom-center"
elif np.pi / 2 < t <= np.pi:
ju = "bottom-right"
elif np.pi < t < np.pi * 3 / 2:
ju = "top-right"
elif t == np.pi * 3 / 2:
ju = "top-center"
else:
ju = "top-left"
a = shapes.Text3D(int(t * k), pos=(0, 0, 0), s=lsize, depth=0, justify=ju)
a.pos(r2e * ct * (1 + rgap), r2e * st * (1 + rgap), -0.01)
angles.append(a)
ti = None
if title:
ti = shapes.Text3D(title, (0, 0, 0), s=tsize, depth=0, justify="top-center")
ti.pos(0, -r2e * 1.15, 0.01)
for i,t in enumerate(thetas):
if i < len(labels):
lab = shapes.Text3D(labels[i], (0, 0, 0), #font="VTK",
s=lsize, depth=0, justify="center")
lab.pos(r2e *np.cos(t) * (1 + rgap) * lpos / 2,
r2e *np.sin(t) * (1 + rgap) * lpos / 2, 0.01)
labs.append(lab)
mrg = merge(lines, angles, rays, ti, labs)
if mrg:
mrg.color(bc).lighting('off')
rh = Plot(slices + errbars + [mrg])
rh.freqs = histodata
rh.bins = edges
rh.base = np.array([0, 0, 0])
rh.top = np.array([0, 0, 1])
rh.name = "histogramPolar"
return rh
def _histogramSpheric(
thetavalues, phivalues, rmax=1.2, res=8, cmap="rainbow", lw=0.1, scalarbar=True,
):
x, y, z = utils.spher2cart(np.ones_like(thetavalues) * 1.1, thetavalues, phivalues)
ptsvals = np.c_[x, y, z]
sg = shapes.Sphere(res=res, quads=True).shrink(0.999).computeNormals().lw(0.1)
sgfaces = sg.faces()
sgpts = sg.points()
# sgpts = np.vstack((sgpts, [0,0,0]))
# idx = sgpts.shape[0]-1
# newfaces = []
# for fc in sgfaces:
# f1,f2,f3,f4 = fc
# newfaces.append([idx,f1,f2, idx])
# newfaces.append([idx,f2,f3, idx])
# newfaces.append([idx,f3,f4, idx])
# newfaces.append([idx,f4,f1, idx])
newsg = sg # Mesh((sgpts, sgfaces)).computeNormals().phong()
newsgpts = newsg.points()
cntrs = sg.cellCenters()
counts = np.zeros(len(cntrs))
for p in ptsvals:
cell = sg.closestPoint(p, returnCellId=True)
counts[cell] += 1
acounts = np.array(counts)
counts *= (rmax - 1) / np.max(counts)
for cell, cn in enumerate(counts):
if not cn:
continue
fs = sgfaces[cell]
pts = sgpts[fs]
_, t1, p1 = utils.cart2spher(pts[:, 0], pts[:, 1], pts[:, 2])
x, y, z = utils.spher2cart(1 + cn, t1, p1)
newsgpts[fs] = np.c_[x, y, z]
newsg.points(newsgpts)
newsg.cmap(cmap, acounts, on='cells')
if scalarbar:
newsg.addScalarBar()
newsg.name = "histogramSpheric"
return newsg
def donut(
fractions,
title="",
tsize=0.3,
r1=1.7,
r2=1,
phigap=0,
lpos=0.8,
lsize=0.15,
c=None,
bc="k",
alpha=1,
labels=(),
showDisc=False,
):
"""
Donut plot or pie chart.
:param str title: plot title
:param float tsize: title size
:param float r1: inner radius
:param float r2: outer radius, starting from r1
:param float phigap: gap angle btw 2 radial bars, in degrees
:param float lpos: label gap factor along radius
:param float lsize: label size
:param c: color of the plot slices
:param bc: color of the disc frame
:param alpha: alpha of the disc frame
:param list labels: list of labels
:param bool showDisc: show the outer ring axis
|donut| |donut.py|_
"""
fractions = np.array(fractions)
angles = np.add.accumulate(2 * np.pi * fractions)
angles[-1] = 2 * np.pi
if angles[-2] > 2 * np.pi:
print("Error in donut(): fractions must sum to 1.")
raise RuntimeError
cols = []
for i, th in enumerate(np.linspace(0, 2 * np.pi, 360, endpoint=False)):
for ia, a in enumerate(angles):
if th < a:
cols.append(c[ia])
break
labs = ()
if len(labels):
angles = np.concatenate([[0], angles])
labs = [""] * 360
for i in range(len(labels)):
a = (angles[i + 1] + angles[i]) / 2
j = int(a / np.pi * 180)
labs[j] = labels[i]
data = np.linspace(0, 2 * np.pi, 360, endpoint=False) + 0.005
dn = _histogramPolar(
data,
title=title,
bins=360,
r1=r1,
r2=r2,
phigap=phigap,
lpos=lpos,
lsize=lsize,
tsize=tsize,
c=cols,
bc=bc,
alpha=alpha,
vmin=0,
vmax=1,
labels=labs,
showDisc=showDisc,
showLines=0,
showAngles=0,
showErrors=0,
)
dn.name = "donut"
return dn
def quiver(
points,
vectors,
c="k",
alpha=1,
shaftLength=0.8,
shaftWidth=0.05,
headLength=0.25,
headWidth=0.2,
fill=True,
):
"""
Quiver Plot, display `vectors` at `points` locations.
Color can be specified as a colormap which maps the size of the arrows.
:param float shaftLength: fractional shaft length
:param float shaftWidth: fractional shaft width
:param float headLength: fractional head length
:param float headWidth: fractional head width
:param bool fill: if False only generate the outline
|quiver| |quiver.py|_
"""
if isinstance(points, vedo.Points):
points = points.points()
else:
points = np.array(points)
vectors = np.array(vectors) / 2
spts = points - vectors
epts = points + vectors
arrs2d = shapes.Arrows2D(
spts,
epts,
c=c,
shaftLength=shaftLength,
shaftWidth=shaftWidth,
headLength=headLength,
headWidth=headWidth,
fill=fill,
alpha=alpha,
)
arrs2d.pickable(False)
arrs2d.name = "quiver"
return arrs2d
def violin(
values,
bins=10,
vlim=None,
x=0,
width=3,
spline=True,
fill=True,
c="violet",
alpha=1,
outline=True,
centerline=True,
lc="darkorchid",
lw=3,
):
"""
Violin style histogram.
:param int bins: number of bins
:param list vlim: input value limits. Crop values outside range.
:param list x: x-position of the violin axis
:param float width: width factor of the normalized distribution
:param bool spline: spline points
:param bool fill: fill violin with solid color
:param bool outline: add the distribution outline
:param bool centerline: add the vertical centerline at x
:param lc: line color
|histo_violin| |histo_violin.py|_
"""
fs, edges = np.histogram(values, bins=bins, range=vlim)
mine, maxe = np.min(edges), np.max(edges)
fs = fs.astype(float) / len(values) * width
rs = []
if spline:
lnl, lnr = [(0, edges[0], 0)], [(0, edges[0], 0)]
for i in range(bins):
xc = (edges[i] + edges[i + 1]) / 2
yc = fs[i]
lnl.append([-yc, xc, 0])
lnr.append([yc, xc, 0])
lnl.append((0, edges[-1], 0))
lnr.append((0, edges[-1], 0))
spl = shapes.KSpline(lnl).x(x)
spr = shapes.KSpline(lnr).x(x)
spl.color(lc).alpha(alpha).lw(lw)
spr.color(lc).alpha(alpha).lw(lw)
if outline:
rs.append(spl)
rs.append(spr)
if fill:
rb = shapes.Ribbon(spl, spr, c=c, alpha=alpha).lighting('off')
rs.append(rb)
else:
lns1 = [[0, mine, 0]]
for i in range(bins):
lns1.append([fs[i], edges[i], 0])
lns1.append([fs[i], edges[i + 1], 0])
lns1.append([0, maxe, 0])
lns2 = [[0, mine, 0]]
for i in range(bins):
lns2.append([-fs[i], edges[i], 0])
lns2.append([-fs[i], edges[i + 1], 0])
lns2.append([0, maxe, 0])
if outline:
rs.append(shapes.Line(lns1, c=lc, alpha=alpha, lw=lw).x(x))
rs.append(shapes.Line(lns2, c=lc, alpha=alpha, lw=lw).x(x))
if fill:
for i in range(bins):
p0 = (-fs[i], edges[i], 0)
p1 = (fs[i], edges[i + 1], 0)
r = shapes.Rectangle(p0, p1).x(p0[0] + x)
r.color(c).alpha(alpha).lighting('off')
rs.append(r)
if centerline:
cl = shapes.Line([0, mine, 0.01], [0, maxe, 0.01], c=lc, alpha=alpha, lw=2).x(x)
rs.append(cl)
asse = Assembly(rs)
asse.base = np.array([0, 0, 0])
asse.top = np.array([0, 1, 0])
asse.name = "violin"
return asse
def whisker(data,
s=0.25,
c='k',
lw=2,
bc='blue',
alpha=0.25,
r=5,
jitter=True,
horizontal=False,
):
"""
Generate a "whisker" bar from a 1-dimensional dataset.
:param float s: size of the box
:param c: color of the lines
:param float lw: line width
:param bc: color of the box
:param float alpha: transparency of the box
:param float r: point radius in pixels (use value 0 to disable)
:param bool jitter: add some randomness to points to avoid overlap
:param bool horizontal: set horizontal layout
|whiskers| |whiskers.py|_
"""
xvals = np.zeros_like(np.array(data))
if jitter:
xjit = np.random.randn(len(xvals))*s/9
xjit = np.clip(xjit, -s/2.1, s/2.1)
xvals += xjit
dmean = np.mean(data)
dq05 = np.quantile(data, 0.05)
dq25 = np.quantile(data, 0.25)
dq75 = np.quantile(data, 0.75)
dq95 = np.quantile(data, 0.95)
pts = None
if r: pts = shapes.Points([xvals, data], c=c, r=r)
rec = shapes.Rectangle([-s/2, dq25],[s/2, dq75], c=bc, alpha=alpha)
rec.GetProperty().LightingOff()
rl = shapes.Line([[-s/2, dq25],[s/2, dq25],[s/2, dq75],[-s/2, dq75]], closed=True)
l1 = shapes.Line([0,dq05,0], [0,dq25,0], c=c, lw=lw)
l2 = shapes.Line([0,dq75,0], [0,dq95,0], c=c, lw=lw)
lm = shapes.Line([-s/2, dmean], [s/2, dmean])
lns = merge(l1, l2, lm, rl)
asse = Assembly([lns, rec, pts])
if horizontal:
asse.rotateZ(-90)
asse.name = "Whisker"
asse.info['mean'] = dmean
asse.info['quantile_05'] = dq05
asse.info['quantile_25'] = dq25
asse.info['quantile_75'] = dq75
asse.info['quantile_95'] = dq95
return asse
def streamplot(X, Y, U, V, direction="both",
maxPropagation=None, mode=1, lw=0.001, c=None, probes=()):
"""
Generate a streamline plot of a vectorial field (U,V) defined at positions (X,Y).
Returns a ``Mesh`` object.
:param str direction: either "forward", "backward" or "both"
:param float maxPropagation: maximum physical length of the streamline
:param float lw: line width in absolute units
:param int mode: vary line width
- 0 - do not vary line width
- 1 - vary line width by first vector component
- 2 - vary line width vector magnitude
- 3 - vary line width by absolute value of first vector component
|plot_stream| |plot_stream.py|_
"""
n = len(X)
m = len(Y[0])
if n != m:
print("Limitation in streamplot(): only square grids are allowed.", n, m)
raise RuntimeError()
xmin, xmax = X[0][0], X[-1][-1]
ymin, ymax = Y[0][0], Y[-1][-1]
field = np.sqrt(U * U + V * V)
vol = vedo.Volume(field, dims=(n, n, 1))
uf = np.ravel(U, order="F")
vf = np.ravel(V, order="F")
vects = np.c_[uf, vf, np.zeros_like(uf)]
vol.addPointArray(vects, "vects")
if len(probes) == 0:
probe = shapes.Grid(pos=((n-1)/2,(n-1)/2,0), sx=n-1, sy=n-1, resx=n-1, resy=n-1)
else:
if isinstance(probes, vedo.Points):
probes = probes.points()
else:
probes = np.array(probes)
if len(probes[0]) == 2:
probes = np.c_[probes[:, 0], probes[:, 1], np.zeros(len(probes))]
sv = [(n - 1) / (xmax - xmin), (n - 1) / (ymax - ymin), 1]
probes = probes - [xmin, ymin, 0]
probes = np.multiply(probes, sv)
probe = vedo.Points(probes)
stream = vedo.base.streamLines( vol.imagedata(),
probe,
tubes={"radius": lw, "varyRadius": mode,},
lw=lw,
maxPropagation=maxPropagation,
direction=direction,
)
if c is not None:
stream.color(c)
else:
stream.addScalarBar()
stream.lighting('off')
stream.scale([1 / (n - 1) * (xmax - xmin), 1 / (n - 1) * (ymax - ymin), 1])
stream.shift(xmin, ymin)
return stream
def matrix(M,
title='Matrix',
xtitle='',
ytitle='',
xlabels=[],
ylabels=[],
xrotation=0,
cmap='Reds',
vmin=None,
vmax=None,
precision=2,
font='Theemim',
scale=0,
scalarbar=True,
lc='white',
lw=0,
c='black',
alpha=1,
):
"""
Generate a matrix, or a 2D color-coded plot with bin labels.
Returns an ``Assembly`` object.
Parameters
----------
M : list or numpy array
the input array to visualize.
title : str, optional
title of the plot. The default is 'Matrix'.
xtitle : str, optional
title of the horizontal colmuns. The default is ''.
ytitle : str, optional
title of the vertical rows. The default is ''.
xlabels : list, optional
individual string labels for each column. Must be of length m. The default is [].
ylabels : list, optional
individual string labels for each row. Must be of length n. The default is [].
xrotation : float, optional
rotation of the horizontal labels. The default is 0.
cmap : str, optional
color map name. The default is 'Reds'.
vmin : float, optional
minimum value of the colormap range. The default is None.
vmax : float, optional
maximum value of the colormap range. The default is None.
precision : int, optional
number of digits for the matrix entries or bins. The default is 2.
font : str, optional
font name. The default is ''.
scale : float, optional
size of the numeric entries or bin values. The default is 0.
scalarbar : bool, optional
add a scalar bar to the right of the plot. The default is True.
lc : str, optional
color of the line separating the bins. The default is 'white'.
lw : float, optional
Width of the line separating the bins. The default is 0.
c : str, optional
text color. The default is 'k'.
alpha : float, optional
plot transparency. The default is 1.
"""
M = np.asarray(M)
n,m = M.shape
gr = shapes.Grid(resx=m, resy=n, sx=m/(m+n)*2, sy=n/(m+n)*2, c=c, alpha=alpha)
gr.wireframe(False).lc(lc).lw(lw)
matr = np.flip( np.flip(M), axis=1).ravel(order='C')
gr.cmap(cmap, matr, on='cells', vmin=vmin, vmax=vmax)
sbar=None
if scalarbar:
gr.addScalarBar3D(titleFont=font, labelFont=font)
sbar = gr.scalarbar
labs=None
if scale !=0:
labs = gr.labels(cells=True, scale=scale/max(m,n),
precision=precision, font=font, justify='center', c=c)
labs.z(0.001)
t = None
if title:
if title == 'Matrix':
title += ' '+str(n)+'x'+str(m)
t = shapes.Text3D(title, font=font, s=0.04,
justify='bottom-center', c=c)
t.shift(0, n/(m+n)*1.05)
xlabs=None
if len(xlabels)==m:
xlabs=[]
jus = 'top-center'
if xrotation>44:
jus = 'right-center'
for i in range(m):
xl = shapes.Text3D(xlabels[i], font=font, s=0.02,
justify=jus, c=c).rotateZ(xrotation)
xl.shift((2*i-m+1)/(m+n), -n/(m+n)*1.05)
xlabs.append(xl)
ylabs=None
if len(ylabels)==n:
ylabs=[]
for i in range(n):
yl = shapes.Text3D(ylabels[i], font=font, s=.02,
justify='right-center', c=c)
yl.shift(-m/(m+n)*1.05, (2*i-n+1)/(m+n))
ylabs.append(yl)
xt=None
if xtitle:
xt = shapes.Text3D(xtitle, font=font, s=0.035,
justify='top-center', c=c)
xt.shift(0, -n/(m+n)*1.05)
if xlabs is not None:
y0,y1 = xlabs[0].ybounds()
xt.shift(0, -(y1-y0)-0.55/(m+n))
yt=None
if ytitle:
yt = shapes.Text3D(ytitle, font=font, s=0.035,
justify='bottom-center', c=c).rotateZ(90)
yt.shift(-m/(m+n)*1.05, 0)
if ylabs is not None:
x0,x1 = ylabs[0].xbounds()
yt.shift(-(x1-x0)-0.55/(m+n),0)
asse = Assembly(gr, sbar, labs, t, xt, yt, xlabs, ylabs)
asse.name = "Matrix"
return asse
def cornerPlot(points, pos=1, s=0.2, title="", c="b", bg="k", lines=True, dots=True):
"""
Return a ``vtkXYPlotActor`` that is a plot of `x` versus `y`,
where `points` is a list of `(x,y)` points.
:param int pos: assign position:
- 1, topleft,
- 2, topright,
- 3, bottomleft,
- 4, bottomright.
"""
if len(points) == 2: # passing [allx, ally]
points = np.stack((points[0], points[1]), axis=1)
c = colors.getColor(c) # allow different codings
array_x = vtk.vtkFloatArray()
array_y = vtk.vtkFloatArray()
array_x.SetNumberOfTuples(len(points))
array_y.SetNumberOfTuples(len(points))
for i, p in enumerate(points):
array_x.InsertValue(i, p[0])
array_y.InsertValue(i, p[1])
field = vtk.vtkFieldData()
field.AddArray(array_x)
field.AddArray(array_y)
data = vtk.vtkDataObject()
data.SetFieldData(field)
plot = vtk.vtkXYPlotActor()
plot.AddDataObjectInput(data)
plot.SetDataObjectXComponent(0, 0)
plot.SetDataObjectYComponent(0, 1)
plot.SetXValuesToValue()
plot.SetAdjustXLabels(0)
plot.SetAdjustYLabels(0)
plot.SetNumberOfXLabels(3)
plot.GetProperty().SetPointSize(5)
plot.GetProperty().SetLineWidth(2)
plot.GetProperty().SetColor(colors.getColor(bg))
plot.SetPlotColor(0, c[0], c[1], c[2])
plot.SetXTitle(title)
plot.SetYTitle("")
plot.ExchangeAxesOff()
plot.SetPlotPoints(dots)
if not lines:
plot.PlotLinesOff()
if isinstance(pos, str):
spos = 2
if "top" in pos:
if "left" in pos: spos=1
elif "right" in pos: spos=2
elif "bottom" in pos:
if "left" in pos: spos=3
elif "right" in pos: spos=4
pos = spos
if pos == 1:
plot.GetPositionCoordinate().SetValue(0.0, 0.8, 0)
elif pos == 2:
plot.GetPositionCoordinate().SetValue(0.76, 0.8, 0)
elif pos == 3:
plot.GetPositionCoordinate().SetValue(0.0, 0.0, 0)
elif pos == 4:
plot.GetPositionCoordinate().SetValue(0.76, 0.0, 0)
else:
plot.GetPositionCoordinate().SetValue(pos[0], pos[1], 0)
plot.GetPosition2Coordinate().SetValue(s, s, 0)
return plot
def cornerHistogram(
values,
bins=20,
vrange=None,
minbin=0,
logscale=False,
title="",
c="g",
bg="k",
alpha=1,
pos="bottom-left",
s=0.175,
lines=True,
dots=False,
nmax=None,
):
"""
Build a histogram from a list of values in n bins.
The resulting object is a 2D actor.
Use *vrange* to restrict the range of the histogram.
:param int nmax: limit the sampling to this max nr of entries
Use `pos` to assign its position:
- 1, topleft,
- 2, topright,
- 3, bottomleft,
- 4, bottomright,
- (x, y), as fraction of the rendering window
"""
if hasattr(values, '_data'):
values = utils.vtk2numpy(values._data.GetPointData().GetScalars())
n = values.shape[0]
if nmax and nmax < n:
# subsample:
idxs = np.linspace(0, n, num=int(nmax), endpoint=False).astype(int)
values = values[idxs]
fs, edges = np.histogram(values, bins=bins, range=vrange)
if minbin:
fs = fs[minbin:-1]
if logscale:
fs = np.log10(fs + 1)
pts = []
for i in range(len(fs)):
pts.append([(edges[i] + edges[i + 1]) / 2, fs[i]])
plot = cornerPlot(pts, pos, s, title, c, bg, lines, dots)
plot.SetNumberOfYLabels(2)
plot.SetNumberOfXLabels(3)
tprop = vtk.vtkTextProperty()
tprop.SetColor(colors.getColor(bg))
tprop.SetFontFamily(vtk.VTK_FONT_FILE)
tprop.SetFontFile(utils.getFontPath(settings.defaultFont))
tprop.SetOpacity(alpha)
plot.SetAxisTitleTextProperty(tprop)
plot.GetProperty().SetOpacity(alpha)
plot.GetXAxisActor2D().SetLabelTextProperty(tprop)
plot.GetXAxisActor2D().SetTitleTextProperty(tprop)
plot.GetXAxisActor2D().SetFontFactor(0.55)
plot.GetYAxisActor2D().SetLabelFactor(0.0)
plot.GetYAxisActor2D().LabelVisibilityOff()
return plot
class DirectedGraph(Assembly):
"""A graph consists of a collection of nodes (without postional information)
and a collection of edges connecting pairs of nodes.
The task is to determine the node positions only based on their connections.
This class is derived from class ``Assembly``, and it assembles 4 Mesh objects
representing the graph, the node labels, edge labels and edge arrows.
:param c: color of the Graph
:param int n: number of the initial set of nodes
:param int,str layout: layout in ['2d', 'fast2d', 'clustering2d', 'circular',
'circular3d', 'cone', 'force', 'tree']
Each of these layouts has diferent available options.
Options for layouts '2d', 'fast2d' and 'clustering2d':
:param int seed: seed of the random number generator used to jitter point positions
:param float restDistance: manually set the resting distance
:param int maxNumberOfIterations: the maximum number of iterations to be used
:param float zrange: expand 2d graph along z axis.
Options for layouts 'circular', and 'circular3d':
:param float radius: set the radius of the circles.
:param float height: set the vertical (local z) distance between the circles
:param float zrange: expand 2d graph along z axis.
Options for layout 'cone':
:param float compactness: ratio between the average width of a cone in the tree,
and the height of the cone. The default setting is 0.75.
:param bool compression: put children closer together, possibly allowing sub-trees to overlap.
This is useful if the tree is actually the spanning tree of a graph.
:param float spacing: space between layers of the tree
Options for layout 'force':
:param int seed: seed the random number generator used to jitter point positions
:param list bounds: set the region in space in which to place the final graph
:param int maxNumberOfIterations: the maximum number of iterations to be used
:param bool threeDimensional: allow optimization in the 3rd dimension too
:param bool randomInitialPoints: use random positions within the graph bounds as initial points
Example:
|lineage_graph| |lineage_graph.py|_
|graph_network| |graph_network.py|_
"""
def __init__(self, **kargs):
vedo.base.BaseActor.__init__(self)
self.nodes = []
self.edges = []
self._nodeLabels = [] # holds strings
self._edgeLabels = []
self.edgeOrientations = []
self.edgeGlyphPosition = 0.6
self.zrange = 0.0
self.rotX = 0
self.rotY = 0
self.rotZ = 0
self.arrowScale = 0.15
self.nodeLabelScale = None
self.nodeLabelJustify = "bottom-left"
self.edgeLabelScale = None
self.mdg = vtk.vtkMutableDirectedGraph()
n = kargs.pop('n', 0)
for i in range(n): self.addNode()
self._c = kargs.pop('c', (0.3,0.3,0.3))
self.gl = vtk.vtkGraphLayout()
self.font = kargs.pop('font', '')
s = kargs.pop('layout', '2d')
if isinstance(s, int):
ss = ['2d', 'fast2d', 'clustering2d', 'circular', 'circular3d',
'cone', 'force', 'tree']
s = ss[s]
self.layout = s
if '2d' in s:
if 'clustering' in s:
self.strategy = vtk.vtkClustering2DLayoutStrategy()
elif 'fast' in s:
self.strategy = vtk.vtkFast2DLayoutStrategy()
else:
self.strategy = vtk.vtkSimple2DLayoutStrategy()
self.rotX = 180
opt = kargs.pop('restDistance', None)
if opt is not None: self.strategy.SetRestDistance(opt)
opt = kargs.pop('seed', None)
if opt is not None: self.strategy.SetRandomSeed(opt)
opt = kargs.pop('maxNumberOfIterations', None)
if opt is not None: self.strategy.SetMaxNumberOfIterations(opt)
self.zrange = kargs.pop('zrange', 0)
elif 'circ' in s:
if '3d' in s:
self.strategy = vtk.vtkSimple3DCirclesStrategy()
self.strategy.SetDirection(0,0,-1)
self.strategy.SetAutoHeight(True)
self.strategy.SetMethod(1)
self.rotX = -90
opt = kargs.pop('radius', None) # float
if opt is not None:
self.strategy.SetMethod(0)
self.strategy.SetRadius(opt) # float
opt = kargs.pop('height', None)
if opt is not None:
self.strategy.SetAutoHeight(False)
self.strategy.SetHeight(opt) # float
else:
self.strategy = vtk.vtkCircularLayoutStrategy()
self.zrange = kargs.pop('zrange', 0)
elif 'cone' in s:
self.strategy = vtk.vtkConeLayoutStrategy()
self.rotX = 180
opt = kargs.pop('compactness', None)
if opt is not None: self.strategy.SetCompactness(opt)
opt = kargs.pop('compression', None)
if opt is not None: self.strategy.SetCompression(opt)
opt = kargs.pop('spacing', None)
if opt is not None: self.strategy.SetSpacing(opt)
elif 'force' in s:
self.strategy = vtk.vtkForceDirectedLayoutStrategy()
opt = kargs.pop('seed', None)
if opt is not None: self.strategy.SetRandomSeed(opt)
opt = kargs.pop('bounds', None)
if opt is not None:
self.strategy.SetAutomaticBoundsComputation(False)
self.strategy.SetGraphBounds(opt) # list
opt = kargs.pop('maxNumberOfIterations', None)
if opt is not None: self.strategy.SetMaxNumberOfIterations(opt) # int
opt = kargs.pop('threeDimensional', True)
if opt is not None: self.strategy.SetThreeDimensionalLayout(opt) # bool
opt = kargs.pop('randomInitialPoints', None)
if opt is not None: self.strategy.SetRandomInitialPoints(opt) # bool
elif 'tree' in s:
self.strategy = vtk.vtkSpanTreeLayoutStrategy()
self.rotX = 180
else:
colors.printc("Cannot understand layout:", s, c='r')
colors.printc("Available layouts:", c='r')
colors.printc("[2d,fast2d,clustering2d,circular,circular3d,cone,force,tree]", c='r')
raise RuntimeError()
self.gl.SetLayoutStrategy(self.strategy)
if len(kargs):
colors.printc("Cannot understand options:", kargs, c='r')
return
def addNode(self, label="id"):
"""Add a new node to the Graph."""
v = self.mdg.AddVertex() # vtk calls it vertex..
self.nodes.append(v)
if label == 'id': label=int(v)
self._nodeLabels.append(str(label))
return v
def addEdge(self, v1, v2, label=""):
"""Add a new edge between to nodes.
An extra node is created automatically if needed."""
nv = len(self.nodes)
if v1>=nv:
for i in range(nv, v1+1):
self.addNode()
nv = len(self.nodes)
if v2>=nv:
for i in range(nv, v2+1):
self.addNode()
e = self.mdg.AddEdge(v1,v2)
self.edges.append(e)
self._edgeLabels.append(str(label))
return e
def addChild(self, v, nodeLabel="id", edgeLabel=""):
"""Add a new edge to a new node as its child.
The extra node is created automatically if needed."""
nv = len(self.nodes)
if v>=nv:
for i in range(nv, v+1):
self.addNode()
child = self.mdg.AddChild(v)
self.edges.append((v,child))
self.nodes.append(child)
if nodeLabel == 'id': nodeLabel=int(child)
self._nodeLabels.append(str(nodeLabel))
self._edgeLabels.append(str(edgeLabel))
return child
def build(self):
"""
Build the DirectedGraph(Assembly).
Accessory objects are also created for labels and arrows.
"""
self.gl.SetZRange(self.zrange)
self.gl.SetInputData(self.mdg)
self.gl.Update()
graphToPolyData = vtk.vtkGraphToPolyData()
graphToPolyData.EdgeGlyphOutputOn()
graphToPolyData.SetEdgeGlyphPosition(self.edgeGlyphPosition)
graphToPolyData.SetInputData(self.gl.GetOutput())
graphToPolyData.Update()
dgraph = Mesh(graphToPolyData.GetOutput(0))
# dgraph.clean() # WRONG!!! dont uncomment
dgraph.flat().color(self._c).lw(2)
dgraph.name = "DirectedGraph"
diagsz = self.diagonalSize()/1.42
if not diagsz:
return None
dgraph.SetScale(1/diagsz)
if self.rotX:
dgraph.rotateX(self.rotX)
if self.rotY:
dgraph.rotateY(self.rotY)
if self.rotZ:
dgraph.rotateZ(self.rotZ)
vecs = graphToPolyData.GetOutput(1).GetPointData().GetVectors()
self.edgeOrientations = utils.vtk2numpy(vecs)
# Use Glyph3D to repeat the glyph on all edges.
arrows=None
if self.arrowScale:
arrowSource = vtk.vtkGlyphSource2D()
arrowSource.SetGlyphTypeToEdgeArrow()
arrowSource.SetScale(self.arrowScale)
arrowSource.Update()
arrowGlyph = vtk.vtkGlyph3D()
arrowGlyph.SetInputData(0, graphToPolyData.GetOutput(1))
arrowGlyph.SetInputData(1, arrowSource.GetOutput())
arrowGlyph.Update()
arrows = Mesh(arrowGlyph.GetOutput())
arrows.SetScale(1/diagsz)
arrows.lighting('off').color(self._c)
if self.rotX:
arrows.rotateX(self.rotX)
if self.rotY:
arrows.rotateY(self.rotY)
if self.rotZ:
arrows.rotateZ(self.rotZ)
arrows.name = "DirectedGraphArrows"
nodeLabels = dgraph.labels(self._nodeLabels,
scale=self.nodeLabelScale,
precision=0,
font=self.font,
justify=self.nodeLabelJustify,
)
nodeLabels.color(self._c).pickable(True)
nodeLabels.name = "DirectedGraphNodeLabels"
edgeLabels = dgraph.labels(self._edgeLabels,
cells=True,
scale=self.edgeLabelScale,
precision=0,
font=self.font,
)
edgeLabels.color(self._c).pickable(True)
edgeLabels.name = "DirectedGraphEdgeLabels"
Assembly.__init__(self, [dgraph,
nodeLabels,
edgeLabels,
arrows])
self.name = "DirectedGraphAssembly"
return self
| [
"marco.musy@gmail.com"
] | marco.musy@gmail.com |
ab2ef61c7be69927178e04d9011d650cc23a1a87 | 2322b5a1e6443715d14029ed09d9091d221e4569 | /tests/test_dfply_select.py | 718f55fd119c9ec68b7fdbd216d3fa859c7dd4e2 | [
"MIT"
] | permissive | zdelrosario/py_grama | 1e86454cb67dde055a332de023aab8a52eeb0d87 | d5edbcd3c8dc8705362eb36d437890962851da1d | refs/heads/master | 2023-09-05T01:44:57.707310 | 2023-08-29T12:51:56 | 2023-08-29T12:51:56 | 173,464,075 | 19 | 9 | MIT | 2023-08-16T20:14:08 | 2019-03-02T15:25:43 | Jupyter Notebook | UTF-8 | Python | false | false | 14,801 | py | import numpy as np
import pandas as pd
from scipy.stats import norm
import unittest
from context import grama as gr
from context import data
X = gr.Intention()
##==============================================================================
## select and drop test functions
##==============================================================================
# 0 1 2 3 4 5 6 7 8 9
# carat cut color clarity depth table price x y z
# 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
class TestSelect(unittest.TestCase):
def test_select(self):
df = data.df_diamonds[["carat", "cut", "price"]]
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_select("carat", "cut", "price"))
)
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_select(0, 1, 6)))
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_select(0, 1, "price")))
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_select([0, X.cut], X.price))
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_select(X.carat, X["cut"], X.price))
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_select(X[["carat", "cut", "price"]]))
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_select(X[["carat", "cut"]], X.price))
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_select(X.iloc[:, [0, 1, 6]]))
)
self.assertTrue(
df.equals(
data.df_diamonds >> gr.tf_select([X.loc[:, ["carat", "cut", "price"]]])
)
)
def test_select_inversion(self):
df = data.df_diamonds.iloc[:, 3:]
d = data.df_diamonds >> gr.tf_select(~X.carat, ~X.cut, ~X.color)
self.assertTrue(df.equals(d))
def test_drop(self):
df = data.df_diamonds.drop(["carat", "cut", "price"], axis=1)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_drop("carat", "cut", "price"))
)
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_drop(0, 1, 6)))
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_drop(0, 1, "price")))
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_drop([0, X.cut], X.price)))
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_drop(X.carat, X["cut"], X.price))
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_drop(X[["carat", "cut", "price"]]))
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_drop(X[["carat", "cut"]], X.price))
)
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_drop(X.iloc[:, [0, 1, 6]])))
self.assertTrue(
df.equals(
data.df_diamonds >> gr.tf_drop([X.loc[:, ["carat", "cut", "price"]]])
)
)
def test_select_containing(self):
df = data.df_diamonds[["carat", "cut", "color", "clarity", "price"]]
assert df.equals(data.df_diamonds >> gr.tf_select(gr.contains("c")))
def test_drop_containing(self):
df = data.df_diamonds[["depth", "table", "x", "y", "z"]]
assert df.equals(data.df_diamonds >> gr.tf_drop(gr.contains("c")))
def test_select_matches(self):
df = data.df_diamonds[["carat", "cut", "color", "clarity", "price"]]
assert df.equals(data.df_diamonds >> gr.tf_select(gr.matches("^c[auol]|pri")))
def test_drop_matches(self):
df = data.df_diamonds[["depth", "table", "x", "y", "z"]]
assert df.equals(data.df_diamonds >> gr.tf_drop(gr.matches("^c[auol]|p.i")))
def test_select_startswith(self):
df = data.df_diamonds[["carat", "cut", "color", "clarity"]]
assert df.equals(data.df_diamonds >> gr.tf_select(gr.starts_with("c")))
def test_drop_startswith(self):
df = data.df_diamonds[["depth", "table", "price", "x", "y", "z"]]
assert df.equals(data.df_diamonds >> gr.tf_drop(gr.starts_with("c")))
def test_select_endswith(self):
df = data.df_diamonds[["table", "price"]]
assert df.equals(data.df_diamonds >> gr.tf_select(gr.ends_with("e")))
def test_drop_endswith(self):
df = data.df_diamonds.drop("z", axis=1)
assert df.equals(data.df_diamonds >> gr.tf_drop(gr.ends_with("z")))
def test_select_between(self):
df = data.df_diamonds[["cut", "color", "clarity"]]
self.assertTrue(
df.equals(
data.df_diamonds >> gr.tf_select(gr.columns_between(X.cut, X.clarity))
)
)
self.assertTrue(
df.equals(
data.df_diamonds >> gr.tf_select(gr.columns_between("cut", "clarity"))
)
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_select(gr.columns_between(1, 3)))
)
df = data.df_diamonds[["x", "y", "z"]]
assert df.equals(data.df_diamonds >> gr.tf_select(gr.columns_between("x", 20)))
def test_drop_between(self):
df = data.df_diamonds[["carat", "z"]]
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_drop(gr.columns_between("cut", "y")))
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_drop(gr.columns_between(X.cut, 8)))
)
df = data.df_diamonds[["carat", "cut"]]
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_drop(gr.columns_between(X.color, 20)))
)
def test_select_from(self):
df = data.df_diamonds[["x", "y", "z"]]
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_select(gr.columns_from("x")))
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_select(gr.columns_from(X.x)))
)
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_select(gr.columns_from(7))))
self.assertTrue(
data.df_diamonds[[]].equals(
data.df_diamonds >> gr.tf_select(gr.columns_from(100))
)
)
def test_drop_from(self):
df = data.df_diamonds[["carat", "cut"]]
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_drop(gr.columns_from("color")))
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_drop(gr.columns_from(X.color)))
)
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_drop(gr.columns_from(2))))
self.assertTrue(
data.df_diamonds[[]].equals(
data.df_diamonds >> gr.tf_drop(gr.columns_from(0))
)
)
def test_select_to(self):
df = data.df_diamonds[["carat", "cut"]]
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_select(gr.columns_to("color")))
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_select(gr.columns_to(X.color)))
)
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_select(gr.columns_to(2))))
def test_drop_to(self):
df = data.df_diamonds[["x", "y", "z"]]
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_drop(gr.columns_to("x"))))
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_drop(gr.columns_to(X.x))))
self.assertTrue(df.equals(data.df_diamonds >> gr.tf_drop(gr.columns_to(7))))
def select_through(self):
df = data.df_diamonds[["carat", "cut", "color"]]
self.assertTrue(
df.equals(
data.df_diamonds >> gr.tf_select(gr.columns_to("color", inclusive=True))
)
)
self.assertTrue(
df.equals(
data.df_diamonds >> gr.tf_select(gr.columns_to(X.color, inclusive=True))
)
)
self.assertTrue(
df.equals(
data.df_diamonds >> gr.tf_select(gr.columns_to(2, inclusive=True))
)
)
def drop_through(self):
df = data.df_diamonds[["y", "z"]]
self.assertTrue(
df.equals(
data.df_diamonds >> gr.tf_drop(gr.columns_to("x", inclusive=True))
)
)
self.assertTrue(
df.equals(
data.df_diamonds >> gr.tf_drop(gr.columns_to(X.x, inclusive=True))
)
)
self.assertTrue(
df.equals(data.df_diamonds >> gr.tf_drop(gr.columns_to(7, inclusive=True)))
)
def test_select_if(self):
# test 1: manually build data.df_diamonds subset where columns are numeric and
# mean is greater than 3
cols = list()
for col in data.df_diamonds:
try:
if mean(data.df_diamonds[col]) > 3:
cols.append(col)
except:
pass
df_if = data.df_diamonds[cols]
self.assertTrue(
df_if.equals(data.df_diamonds >> gr.tf_select_if(lambda col: mean(col) > 3))
)
# test 2: use and
cols = list()
for col in data.df_diamonds:
try:
if mean(data.df_diamonds[col]) > 3 and max(data.df_diamonds[col]) < 50:
cols.append(col)
except:
pass
df_if = data.df_diamonds[cols]
self.assertTrue(
df_if.equals(
data.df_diamonds
>> gr.tf_select_if(lambda col: mean(col) > 3 and max(col) < 50)
)
)
# test 3: use or
cols = list()
for col in data.df_diamonds:
try:
if mean(data.df_diamonds[col]) > 3 or max(data.df_diamonds[col]) < 6:
cols.append(col)
except:
pass
df_if = data.df_diamonds[cols]
self.assertTrue(
df_if.equals(
data.df_diamonds
>> gr.tf_select_if(lambda col: mean(col) > 3 or max(col) < 6)
)
)
# test 4: string operations - contain a specific string
cols = list()
for col in data.df_diamonds:
try:
if any(data.df_diamonds[col].str.contains("Ideal")):
cols.append(col)
except:
pass
df_if = data.df_diamonds[cols]
self.assertTrue(
df_if.equals(
data.df_diamonds
>> gr.tf_select_if(lambda col: any(col.str.contains("Ideal")))
)
)
# test 5: get any text columns
# uses the special '.' regex symbol to find any text value
cols = list()
for col in data.df_diamonds:
try:
if any(data.df_diamonds[col].str.contains(".")):
cols.append(col)
except:
pass
df_if = data.df_diamonds[cols]
self.assertTrue(
df_if.equals(
data.df_diamonds
>> gr.tf_select_if(lambda col: any(col.str.contains(".")))
)
)
# test 6: is_numeric helper
self.assertEqual(
{"carat", "depth", "table", "price", "x", "y", "z"},
set(
(
data.df_diamonds
>> gr.tf_select_if(gr.is_numeric)
).columns
)
)
def test_drop_if(self):
# test 1: returns a dataframe where any column does not have a mean greater than 3
# this means numeric columns with mean less than 3, and also any non-numeric column
# (since it does not have a mean)
cols = list()
for col in data.df_diamonds:
try:
if mean(data.df_diamonds[col]) > 3:
cols.append(col)
except:
pass
inverse_cols = [col for col in data.df_diamonds if col not in cols]
df_if = data.df_diamonds[inverse_cols]
self.assertTrue(
df_if.equals(data.df_diamonds >> gr.tf_drop_if(lambda col: mean(col) > 3))
)
# test 2: use and
# return colums where both conditions are false:
# the mean greater than 3, and max < 50
# again, this will include non-numeric columns
cols = list()
for col in data.df_diamonds:
try:
if mean(data.df_diamonds[col]) > 3 and max(data.df_diamonds[col]) < 50:
cols.append(col)
except:
pass
inverse_cols = [col for col in data.df_diamonds if col not in cols]
df_if = data.df_diamonds[inverse_cols]
self.assertTrue(
df_if.equals(
data.df_diamonds
>> gr.tf_drop_if(lambda col: mean(col) > 3 and max(col) < 50)
)
)
# test 3: use or
# this will return a dataframe where either of the two conditions are false:
# the mean is greater than 3, or the max < 6
cols = list()
for col in data.df_diamonds:
try:
if mean(data.df_diamonds[col]) > 3 or max(data.df_diamonds[col]) < 6:
cols.append(col)
except:
pass
inverse_cols = [col for col in data.df_diamonds if col not in cols]
df_if = data.df_diamonds[inverse_cols]
self.assertTrue(
df_if.equals(
data.df_diamonds
>> gr.tf_drop_if(lambda col: mean(col) > 3 or max(col) < 6)
)
)
# test 4: string operations - contain a specific string
# this will drop any columns if they contain the word 'Ideal'
cols = list()
for col in data.df_diamonds:
try:
if any(data.df_diamonds[col].str.contains("Ideal")):
cols.append(col)
except:
pass
inverse_cols = [col for col in data.df_diamonds if col not in cols]
df_if = data.df_diamonds[inverse_cols]
self.assertTrue(
df_if.equals(
data.df_diamonds
>> gr.tf_drop_if(lambda col: any(col.str.contains("Ideal")))
)
)
# test 5: drop any text columns
# uses the special '.' regex symbol to find any text value
cols = list()
for col in data.df_diamonds:
try:
if any(data.df_diamonds[col].str.contains(".")):
cols.append(col)
except:
pass
inverse_cols = [col for col in data.df_diamonds if col not in cols]
df_if = data.df_diamonds[inverse_cols]
self.assertTrue(
df_if.equals(
data.df_diamonds
>> gr.tf_drop_if(lambda col: any(col.str.contains(".")))
)
)
| [
"zdelrosario@outlook.com"
] | zdelrosario@outlook.com |
767e56a2ed2fe2b567278f720273e1bc40ffebb5 | 060e4121c5c33d3df81f1a707735e20fc8f5717e | /plot_utilities.py | e8166865aa544d25c06dadc3f85481a6d5463240 | [] | no_license | drthomasbalzer/mmf_2020 | 15edd53d802606b05b706ba8dad0e88d83adad0d | a838d645a3d66e47281985606ca2e67598bb4520 | refs/heads/master | 2023-01-19T08:00:24.642150 | 2020-11-24T21:37:24 | 2020-11-24T21:37:24 | 293,277,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,309 | py | ################
## Author: Thomas Balzer
## (c) 2020
## Material for MMF Stochastic Analysis - Fall 2020
################
import matplotlib.pyplot as plt
def min_max_axis(y):
t_min = 100
t_max = -100.
t_min = min( [min(t_min, min(y_k)) for y_k in y] )
t_max = max( [max(t_max, max(y_k)) for y_k in y] )
if (t_min < 0):
t_min = t_min * 1.1
else:
t_min = t_min * 0.9
if (t_max < 0):
t_max = t_max * 0.9
else:
t_max = t_max * 1.1
return t_min, t_max
class PlotUtilities():
def __init__(self, title, x_label, y_label):
self.title = title
self.x_label = x_label
self.y_label = y_label
###############
##
## utility to plot histogram
##
###############
def plotHistogram(self, sample_data, num_bins, labels = 'None', _alpha = 1.):
plt.hist(sample_data, num_bins, normed=True, label=labels, alpha=_alpha)
plt.xlabel(self.x_label)
plt.ylabel(self.y_label)
if (labels != 'None'):
plt.legend(prop={'size': 9})
plt.title(self.title)
plt.show()
plt.close()
###############
##
## utility to plot multiple histograms
##
###############
def plotSubHistograms(self, sample_data, num_bins, labels = 'None', _alpha = 1.):
plt.xlabel(self.x_label)
plt.ylabel(self.y_label)
n_plots = len(sample_data)
for k in range(n_plots):
plt.subplot(n_plots, 1, k+1)
_thisLabel = 'None'
if (labels != 'None'):
_thisLabel = labels[k]
plt.legend(prop={'size': 9})
plt.title('Histogram With Parameter={0}'.format(_thisLabel))
plt.hist(sample_data[k], num_bins, normed=True, alpha=_alpha)
plt.show()
plt.close()
def scatterPlot(self, x_values, y_values, labels, colors):
plt.xlabel(self.x_label)
plt.ylabel(self.y_label)
plt.title(self.title)
n_plots = len(y_values)
for k in range(n_plots):
plt.scatter(x_values, y_values[k], label=labels[k], color = colors[k])
plt.legend(prop={'size': 9})
plt.show()
plt.close()
###############
##
## utility to plot multiple histograms
##
###############
def plotMultiHistogram(self, samples, num_bins, colors = 'None'):
n_plots = len(samples)
base_alpha = 0.55
for k in range(n_plots):
# the histogram of the data
_thisAlpha = base_alpha + 0.10 * float(k)
if (colors == 'None'):
plt.hist(samples[k], num_bins, normed=True, facecolor='blue', alpha=_thisAlpha)
else:
plt.hist(samples[k], num_bins, normed=True, facecolor=colors[k], alpha=_thisAlpha)
plt.xlabel(self.x_label)
plt.ylabel(self.y_label)
plt.title(self.title)
plt.show()
plt.close()
###############
##
## utility to plot multiple graphs at once
##
###############
def subPlots(self, x_ax, y_ax, arg, colors):
n_plots = len(y_ax)
t_min, t_max = min_max_axis(y_ax)
########
## some basic formatting
########
plt.axis([min(x_ax), max(x_ax), t_min, t_max])
########
## actual plotting
########
for k in range(n_plots):
plt.subplot(n_plots, 1, k+1)
if (k == 0):
plt.title(self.title)
elif (k == n_plots - 1):
plt.xlabel(self.x_label)
plt.ylabel(arg[k])
plt.plot(x_ax, y_ax[k], color=colors[k])
plt.show()
plt.close()
def multiPlot(self, x_ax, y_ax, arg = ''):
#######
## sizing of axis
#######
n_plots = len(y_ax)
t_min, t_max = min_max_axis(y_ax)
########
## some basic formatting
########
plt.xlabel(self.x_label)
plt.ylabel(self.y_label)
plt.title(self.title)
plt.axis([min(x_ax), max(x_ax), t_min, t_max])
########
## actual plotting
########
for k in range(n_plots):
plt.plot(x_ax, y_ax[k], arg)
plt.show()
plt.close()
| [
"thomas.balzer@gmail.com"
] | thomas.balzer@gmail.com |
79f421b971ae6350edb574cc28e1f5ea9378a75f | 8386d5bccc331ab0a55fcf72fb2d70cf66dd179a | /chpt1/os-example-6.py | ce1d0acd8e66aca78c5f42cf54ce971856e6f6a0 | [] | no_license | makeabug/PythonStandardLib | 215daf1c87508e5643f09670f9d5f7ea1b20d9a0 | 584b69daa410d092c1e71f638b99947abca8fc19 | refs/heads/master | 2021-01-22T19:30:53.433425 | 2013-09-11T03:09:01 | 2013-09-11T03:09:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | import os
os.makedirs("samples/test/level2/tmp")
fp = open("samples/test/level2/tmp/file", "w")
fp.write("inspector praline")
fp.close()
os.remove("samples/test/level2/tmp/file")
os.removedirs("samples/test/level2/tmp")
| [
"dw@example.com"
] | dw@example.com |
d7e990e744dac5ef71e3c0c35c59023144364306 | 4aa84729b3d84760433af7d96a2b7bad634abad3 | /code/data-creation/stage0.py | 9d17caab3cfb9be5121f3528524dbfc843488d12 | [] | no_license | shashankg7/rnnsm | 11326c3462f31be85dabd637a62b647d8dec8023 | 15f487ede38557e7bd2c48e89ee80b2bcd5a102c | refs/heads/master | 2020-10-01T21:05:08.702395 | 2018-01-22T21:00:46 | 2018-01-22T21:00:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | import sys
sys.path.insert(0, '../utils')
from dataPiping import makeunixtime
import pandas as pd
import numpy as np
from multiprocessing import Pool
def createStage0():
df = loadData()
df = parallelizeDataframe(df, convertTimeCols)
return df
# read raw data
def loadData():
"""Loads data into df
"""
df = pd.read_pickle('../../data/sessionDF.pkl')
return df
def convertTimeCols(df):
# convert session start time to pandas time index
df.startTime = pd.DatetimeIndex(df.startTime.apply(makeunixtime) * 1000000000)
df.startUserTime = pd.DatetimeIndex(df.startUserTime.apply(makeunixtime) * 1000000000)
df.sessionLength = pd.TimedeltaIndex(df.sessionLength * 1000000000)
return df
def parallelizeDataframe(df, func, num_cores=8):
df['partition'] = ((df.customerId // 100) % 16) // 2
pt = df.groupby('partition')
df_split = [pt.get_group(x) for x in pt.groups]
pool = Pool(num_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
| [
"grobgl+github@gmail.com"
] | grobgl+github@gmail.com |
59ff54f14f345d3754b2facbe16b2913a6ca90c5 | 4ef553f103f5373b4eb0dbb8985eaa6f0427aa81 | /xiaolingdangvenv/bin/flask | 02393711deb8ae9e163495e7243a77b45eaeb817 | [] | no_license | amateurlee/xiaolingdang | 2f5a3d5c5392e7cba4c84177c536d86b40b3d694 | ef35c8b504835c5e3bd090750743c3cdcf53a40f | refs/heads/master | 2020-03-08T20:19:50.533380 | 2018-07-04T17:15:22 | 2018-07-04T17:15:22 | 128,379,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | #!/root/xiaolingdang/xiaolingdangvenv/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"amateurlee@126.com"
] | amateurlee@126.com | |
408cd967099fe900471e4103edf3f71c8f1f8cd8 | 12e04c219d6911d06a048c913f8d8d6c00dad857 | /chendian/api/blog/views.py | 5deab03ba38adced720c0e0764230d3bc891c9a2 | [
"MIT"
] | permissive | mozillazg/chendian-plus | 928e98beb77f351e08b25a5ba9671ad648dac4b5 | 893c62b4b855879006d4cb378faeb9d1c6635923 | refs/heads/master | 2023-09-04T09:58:58.112022 | 2017-04-04T09:44:28 | 2017-04-04T09:44:28 | 31,481,576 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,155 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from api.permissions import IsAdminOrReadonly, IsAdminOrReadAndCreate
from blog.models import Article, Tag, Category
from .serializers import ArticleSerializer, TagSerializer, CategorySerializer
class CategoryViewSet(ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
search_fields = ('name',)
filter_fields = ('id',)
permission_classes = (IsAdminOrReadonly,)
class TagViewSet(ModelViewSet):
queryset = Tag.objects.all()
serializer_class = TagSerializer
search_fields = ('name',)
filter_fields = ('id',)
class ArticleViewSet(ModelViewSet):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
filter_fields = (
'id', 'author__nick_name',
'categories__slug', 'tags__slug'
)
search_fields = ('title',)
permission_classes = (IsAdminOrReadAndCreate,)
def get_queryset(self):
queryset = super(ArticleViewSet, self).get_queryset()
if not self.request.user.is_staff:
queryset = queryset.filter(status=Article.STATUS_APPROVED)
return queryset.select_related('author'
).prefetch_related('tags',
'categories')
class ArticleApprove(APIView):
permission_classes = (IsAdminUser,)
def put(self, request, pk, *args, **kwargs):
article = get_object_or_404(Article.objects.all(), pk=pk)
article.status = Article.STATUS_APPROVED
article.save()
return Response(status=204)
def delete(self, request, pk, *args, **kwargs):
article = get_object_or_404(Article.objects.all(), pk=pk)
article.status = Article.STATUS_DISAPPROVED
article.save()
return Response(status=204)
| [
"opensource.mozillazg@gmail.com"
] | opensource.mozillazg@gmail.com |
21ce8528825c4e522f40d39c4685443bcdbcdac4 | 76f4443972ba066e9a3239d96416b8e807800d8f | /tensorflow/contrib/seq2seq/python/ops/helper.py | 46d0563fe08b03a7eb96dfec092949761666d563 | [
"Apache-2.0"
] | permissive | ravyg/tensorflow | 34702f5c0003b602431471987fe50b5ffb6d1912 | 2a83490455f0f02ea05fc447551cacea1c1b8cb6 | refs/heads/master | 2021-01-17T11:46:47.552165 | 2017-02-23T22:21:57 | 2017-02-23T22:21:57 | 60,307,856 | 3 | 2 | null | 2016-08-28T21:03:23 | 2016-06-03T01:07:06 | C++ | UTF-8 | Python | false | false | 14,187 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of helpers for use with SamplingDecoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.distributions.python.ops import categorical
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Helper interface. Helper instances are used by SamplingDecoder."""
@abc.abstractproperty
def batch_size(self):
"""Returns a scalar int32 tensor."""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sample_noise = random_ops.random_uniform(
[self.batch_size], seed=self._scheduling_seed)
select_sample = (self._sampling_probability > select_sample_noise)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
array_ops.tile([-1], [self.batch_size]))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
where_sampling_flat = array_ops.reshape(where_sampling, [-1])
where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
inputs_not_sampling = array_ops.gather(
base_next_inputs, where_not_sampling_flat)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
outputs)
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
7b7d425aab4c254bf7b7b0b7397173468957f603 | 2fc1acb9e0f1ae989461f601cb63a796526a8e0c | /interview_bit/stack/sliding_window_maximum_of_window.py | 8d02f1149c71eeaddd28bf851bdf57eec65a4eb8 | [] | no_license | anojkr/help | 4ff64ef9503c90d36e9c051c4d912cf727c1e077 | f2f0b46b19b5efbf888e063606c78db2cbfdd306 | refs/heads/master | 2020-06-28T01:54:10.344799 | 2019-10-31T07:35:28 | 2019-10-31T07:35:28 | 200,111,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | from collections import deque
class Solution:
# @param A : tuple of integers
# @param B : integer
# @return a list of integers
def slidingMaximum(self, A, B):
# print(A)
n = len(A)
C = [int(x) for x in A]
t = 0
r = deque()
res = []
if n == 1 and B == 1:
return C
for x in range(0, n):
r.append(C[x])
t = t + 1
if t == B:
res.append(max(r))
r.popleft()
t = t -1
continue
return res
| [
"anoj.kumar48@gmail.com"
] | anoj.kumar48@gmail.com |
39ce5936fbd983e8024c5dcb8f9878eedec786a4 | 45c32c7e725b5e4d9dbe2fc92160ded14827d2dc | /app/ota_repo/tc_policy_wt.py | a4bc885685a9655587f0d41e0aaf13af1d5619be | [] | no_license | a413107719/tourbillon | 3b68b088d07ca75044a12aa8ec6d928b580a8dd9 | c645ce586b3fb21dc6029fe0bbae3116e3531cae | refs/heads/main | 2023-06-16T01:31:45.738712 | 2021-07-16T06:10:29 | 2021-07-16T06:10:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,427 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments, broad-except, anomalous-backslash-in-string
# pylint: disable=missing-docstring, line-too-long, too-few-public-methods
"""
# @Author : wxt
# @Software: PyCharm
同程政策拉取订单模式
万途公司定制
"""
import datetime
import base64
from ..controller.http_request import HttpRequest
from ..utils.exception import *
from ..utils.logger import Logger
from ..utils.util import md5_hash,simple_encrypt,RoutingKey
from .base import OTABase
from ..dao.models import *
from ..dao.internal import *
class TcPolicyWt(OTABase):
verify_search_timeout = 30
order_search_timeout = 30
ticket_process_mode = 'push' # 票号处理模式-主动推送
order_process_mode = 'polling' # 订单同步模式-轮询拉取模式
ota_name = 'tc_policy_wt' # OTA名称,保证唯一,必须赋值
ota_env = 'PROD' # 生产OR测试接口 TODO 暂时无作用
ota_token = 'iwj69etrnbq1lzcx' # ota 访问验证token
cn_ota_name = '万途政策拉单'
tc_username = 'hzwantu'
tc_password = 'hzwantu@tcjp'
order_list_by_poll_url = 'http://tcflightopenapi.17usoft.com/tc/getorderlist.ashx' # 生产
# order_list_by_poll_url = 'http://127.0.0.1:8899/tc/getorderlist.ashx' # 测试
order_detail_by_poll_url = 'http://tcflightopenapi.17usoft.com/tc/getorderdetail.ashx' # 生产
# order_detail_by_poll_url = 'http://127.0.0.1:8899/tc/getorderdetail.ashx' # 测试
set_order_issued_url = 'http://tcflightopenapi.17usoft.com/tc/ticketnotify.ashx' # 生产
# set_order_issued_url = 'http://127.0.0.1:8899/tc/ticketnotify.ashx' # 测试
def __init__(self):
super(TcPolicyWt, self).__init__()
def pwd_encrypt(self):
"""
密码加密模块
:param pwd:
:return:
"""
return md5_hash(self.tc_username+'#'+self.tc_password).lower()
def _set_order_issued(self, order_info):
"""
{
"OrderSerialid":"FP54193AC23A83801392",
"IsTicketSuccess":"T",
"Remark":"机建费应该是 100",
"DifferencePrice":123.45,
"ticketInfo":[
{
"PassengerName":"杨进龙",
"Pnr":"N45PDL",
"TicketNo":"2302569874563",
}
],
"Username":"517na",
"Password":"8139cfb7818d54b957f768b0a62ddf9d"
}
:param order_info:
:return:
"""
http_session = HttpRequest()
pax_infos = []
# TODO 目前没有加入返程和缺口程
for pax_info in order_info.passengers:
pax_infos.append({
'PassengerName': pax_info.name,
'Pnr':pax_info.pnr,
'TicketNo': pax_info.ticket_no,
})
data = {
'Username': self.tc_username,
'Password': self.pwd_encrypt(),
'OrderSerialid': order_info.assoc_order_id,
'ticketInfo': pax_infos,
'IsTicketSuccess':'T'
}
Logger().info('tc_policy_set_issued_request %s'% data)
result = http_session.request(method='POST', url=self.set_order_issued_url, json=data, verify=False,
is_direct=True).to_json()
Logger().info('tc_policy_set_issued_response %s' % result)
if result['ErrorCode'] == '100000':
return True
elif result['ErrorCode'] == '100010':
Logger().info('HASTICKETED')
return True
else:
raise SetOrderIssuedException(result)
def _order_by_poll(self):
"""
订单轮询接口
{
"OrderList": [
{
"OrderInfo": {
"BaseInfo":
{
"OrderStatus": "N",
"FlightWay": "S", # S: 单程 D: 往返
"OrderDesc": "重庆—三亚",
"OrderCreateTime": "2015-12-01 00:00:00",
"OrderStatusDes": "待付款"
},
"OrderSerialid": "FP54193AC23A83801392",
"UrgeCount": 1,
"OpenId": null,
"CustomerPay": null,
"IsBackfillAgain": 0
},
"FlightInfoList": [
{
"Sequence": 1,
"FlightNo": "PN6211",
"Class": "Y",
"SubClass": "U",
"Dport": "CKG",
"Aport": "SYX",
"TakeOffTime": "2015-12-01 10:00:00", "PNR": "HF45BS"
}
]
},
],
"ErrorCode": "100000",
"ErrorMsg": "SUCCESS"
}
详情回参
{
"OrderSerialid":"FP54193AC23A83801392",
"BaseInfo":{
"OrderStatus":"N",
"FlightWay":"S",
"OrderDesc":"重庆—三亚",
"OrderCreateTime":"2015/12/04 00:00:00",
"PNR":"KHL234",
"BigPNR":"NKL234",
"CPNR":"",
"CBigPNR":"",
"AllPrice":1050,
"AllFacePrice":480,
"AllSalePrice":462.72,
"AllTaxPrice":100
},
"FlightList":[
{
"Sequence":1,
"FlightNo":"PN6211",
"Class":"Y",
"SubClass":"U",
"Dport":"SYX",
"Aport":"CKG",
"TakeOffTime":"2015/12/8 10:05:00",
"ArrivalTime":"2015/12/8 12:30:00"
}
],
"PassengerList":[
{
"PassengerName":"杨进龙",
"SubPNR":"NTXREA",
"SubBigPNR":"NKL234",
"PassengerType":"1",
"CertType":"0", # 0:身份证 1:护照 2:军官证 3:回乡证 5:台胞证 9:其 他
"CertNO":"320981198812292976",
"FacePrice":480,
"SalePrice":462.72,
"AirPortBuildFee":50,
"OileFee":110,
"EticketNo":"",
"TCabinCode":"U",
"Birthday":"1980-01-01"
}
],
"PolicyInfo":{
"PolicyId":"",
"PolicyType":"1",
"Benefit":"0.0360",
"PolicyRemark":"政策备注"
},
"ContractInfo":{
"ContractName":"杨进龙",
"LinkMobiel":"13913541307"
},
"ErrorCode":"100000",
"ErrorMsg":"SUCCESS"
}
"""
# http_session = HttpRequest()
#
# data = {
# 'Username': self.username,
# 'Password': self.pwd_encrypt(),
# 'OrderSerialid': 'OC4F8RWQC100UQ007004',
# }
# result = http_session.request(method='POST', url=self.order_detail_by_poll_url, json=data, verify=False).to_json()
#
# Logger().debug('resultxxxxxxxxxxxxxxxxxxxx %s'% json.dumps(result,ensure_ascii=False))
http_session = HttpRequest()
start_time = Time.curr_date_obj() - datetime.timedelta(hours=2)
end_time = Time.curr_date_obj()
data = {
'Username': self.tc_username,
'Password': self.pwd_encrypt(),
'OrderStatus': 'N',
'OrderBeginDataTime': start_time.strftime('%Y-%m-%d %H:%M:%S'),
'orderEndDataTime': end_time.strftime('%Y-%m-%d %H:%M:%S')
}
Logger().debug('_order_by_poll request %s' % data)
ol_result = http_session.request(method='POST', url=self.order_list_by_poll_url, json=data, verify=False,
is_direct=True).to_json()
order_list = []
Logger().info("===== poll order list result : {}".format(ol_result))
if ol_result['ErrorCode'] == '100000':
ota_order_id_list = []
for order in ol_result['OrderList']:
ota_order_id_list.append(order['OrderInfo']['OrderSerialid'])
to_process_list = self.exists_order_filter(ota_order_id_list)
Logger().sdebug('to_process_list %s' % to_process_list)
# 请求成功
processed_list = []
for order in ol_result['OrderList']:
# 对比订单库中是否存在此订单,如果存在代表已经拉取,不再进行拉取
# 获取价格信息
ota_order_id = order['OrderInfo']['OrderSerialid']
# 该接口如果多个乘客会显示多个重复订单,所以逻辑上需要去重
if ota_order_id in processed_list:
continue
else:
processed_list.append(ota_order_id)
if ota_order_id in to_process_list:
if not order['FlightInfoList'][0]['FlightNo'][:2] in ['DZ', 'BK']: # 根据航司筛选
continue
data = {
'Username': self.tc_username,
'Password': self.pwd_encrypt(),
'OrderSerialid': ota_order_id,
}
result = http_session.request(method='POST', url=self.order_detail_by_poll_url, json=data,
verify=False, is_direct=True).to_json()
Logger().info("========== poll order detail result : {}".format(result))
if result['ErrorCode'] == '100000':
order_detail = result
if not order_detail['PolicyInfo']['PolicyRemark'] == 'TAGWBK': # 根据指定政策号拉取订单
continue
Logger().debug('to process detail result %s' % result)
oi = OrderInfo()
# 允许降舱
oi.allow_cabin_downgrade = 1
oi.ota_order_id = ota_order_id
oi.assoc_order_id = ota_order_id # 为了后续跟踪ota订单状态
oi.ota_create_order_time = order['OrderInfo']['BaseInfo']['OrderCreateTime']
trip_type = order['OrderInfo']['BaseInfo']['FlightWay']
if trip_type == 'S':
oi.trip_type = 'OW'
elif trip_type == 'D':
oi.trip_type = 'RT'
else:
raise OrderByPollException('trip_type invalid %s' %trip_type)
sorted_segments = sorted(order_detail['FlightList'],key=lambda x:x['Sequence'])
flight_number_key = "-".join([x['FlightNo'] for x in sorted_segments])
cabin_key = "-".join([x['SubClass'] for x in sorted_segments])
adt_count = len([x for x in order_detail['PassengerList'] if x['PassengerType'] == "1"])
chd_count = len([x for x in order_detail['PassengerList'] if x['PassengerType'] in ["2","3"]])
oi.from_date = datetime.datetime.strptime(sorted_segments[0]['TakeOffTime'], '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d')
oi.from_airport = sorted_segments[0]['Dport']
oi.to_airport = sorted_segments[-1]['Aport']
oi.adt_count = adt_count
oi.chd_count = chd_count
oi.inf_count = 0
oi.ret_date = None
if sorted_segments:
oi.cabin_grade = sorted_segments[0]['Class']
# routing_key 拼接
# 单程无经停非中转 routing_key 生成代码
dep_time_obj = datetime.datetime.strptime(sorted_segments[0]['TakeOffTime'], '%Y-%m-%d %H:%M:%S')
arr_time_obj = datetime.datetime.strptime(sorted_segments[-1]['ArrivalTime'], '%Y-%m-%d %H:%M:%S')
adult_price = 0
adult_tax = 0
child_price = 0
child_tax = 0
oi.ota_pay_price = order_detail['BaseInfo']['AllPrice']
for pax in order_detail['PassengerList']:
pax_info = PaxInfo()
pax_info.passenger_id = pax['CertNO']
# pax_info.last_name = pax.get('PassengerName', '')
# pax_info.first_name = pax.get('PassengerName', '')
pax_info.name = pax.get('PassengerName', '')
pax_info.pnr = Random.gen_littlepnr()
if int(pax['PassengerType']) in [2,3]:
# 儿童
pax_info.age_type = 'CHD'
child_price = pax['SalePrice']
child_tax = pax['AirPortBuildFee'] + pax['OileFee']
elif int(pax['PassengerType']) == 1:
pax_info.age_type = 'ADT'
adult_price = pax['SalePrice']
adult_tax = pax['AirPortBuildFee'] + pax['OileFee']
else:
raise OrderByPollException('age_type invalid %s' % pax['PassengerType'])
pax_info.birthdate = pax.get('Birthday', '')
# 这里把除了身份证之外的证件一律作为护照处理
if pax['CertType'] == '0':
pax_info.used_card_type = 'NI'
pax_info.card_type = 'NI'
else:
pax_info.card_type = 'PP'
pax_info.used_card_type = 'PP'
if pax_info.card_type == 'NI':
pax_info.card_ni = pax['CertNO'].upper()
pax_info.used_card_no = pax['CertNO'].upper()
else:
pax_info.used_card_no = pax['CertNO'].upper()
pax_info.card_pp = pax['CertNO'].upper()
pax_info.attr_competion()
oi.passengers.append(pax_info)
if order['FlightInfoList'][0]['FlightNo'][:2] == 'DZ':
select_provider = 'donghaiair'
select_provider_channel = 'donghaiair_web_wantu'
elif order['FlightInfoList'][0]['FlightNo'][:2] == 'BK':
select_provider_channel = 'okair_web'
select_provider = 'okair'
else:
select_provider_channel = 'okair_web'
select_provider = 'okair'
rk_info = RoutingKey.serialize(from_airport=oi.from_airport, dep_time=dep_time_obj,
to_airport=oi.to_airport, arr_time=arr_time_obj,
flight_number=flight_number_key, cabin=cabin_key,
adult_price=adult_price, adult_tax=adult_tax,
provider_channel=select_provider_channel, child_price=child_price,
child_tax=child_tax,provider=select_provider)
Logger().info("========= routing key : {}".format(rk_info['plain']))
oi.verify_routing_key = rk_info['encrypted']
contact_info = ContactInfo()
contact_info.name = order_detail['ContractInfo']['ContractName']
contact_info.mobile = order_detail['ContractInfo']['LinkMobiel']
oi.contacts.append(contact_info)
# routing信息采集
flight_routing = FlightRoutingInfo()
flight_routing.adult_price_discount = 100
flight_routing.adult_price_full_price = 0
flight_routing.adult_price = adult_price
flight_routing.adult_price_forsale = adult_price
flight_routing.adult_tax = adult_tax
flight_routing.child_price = child_price
flight_routing.child_price_forsale = child_price
flight_routing.child_tax = child_tax
flight_routing.product_type = 'DEFAULT'
flight_routing.routing_key = rk_info['encrypted']
flight_routing.routing_key_detail = rk_info['plain']
routing_number = 1
for segment in sorted_segments:
flight_segment = FlightSegmentInfo()
flight_segment.carrier = order['FlightInfoList'][0]['FlightNo'][:2]
flight_segment.dep_airport = segment['Dport']
flight_segment.dep_time = segment['TakeOffTime']
flight_segment.arr_airport = segment['Aport']
flight_segment.arr_time = segment['ArrivalTime']
flight_segment.cabin = segment['SubClass']
flight_segment.cabin_count = 9
flight_segment.flight_number = segment['FlightNo']
flight_segment.cabin_grade = segment['Class']
flight_segment.routing_number = routing_number
routing_number += 1
flight_routing.from_segments.append(flight_segment)
oi.routing = flight_routing
order_list.append(oi)
else:
raise OrderByPollException(result)
else:
Logger().debug('ota_order_id %s exists '% ota_order_id)
Logger().info("============== filtered order list : {}".format(order_list))
return order_list
else:
raise OrderByPollException(ol_result)
def _before_order_list_interface(self, req_body):
"""
:param req_body:
:return:
"""
d = json.loads(req_body)
return {
'start_time':d.get('startTime'),
'end_time':d.get('endTime'),
'order_status':d.get('orderStatus')
}
def _after_order_list_interface(self, order_info_list):
"""
:param
:return:
"""
ret_list = []
for order_info in order_info_list:
from_segments = []
segment_index = 0
if order_info.routing:
for flight_segment in order_info.routing.from_segments:
segment_index += 1
output_segment = {
"carrier": flight_segment.carrier,
"depAirport": flight_segment.dep_airport,
"depTime": flight_segment.dep_time,
"arrAirport": flight_segment.arr_airport,
"arrTime": flight_segment.arr_time,
"stopAirports": flight_segment.stop_airports,
"stopCities": flight_segment.stop_airports,
"cabinCode": flight_segment.cabin,
"flightNumber": flight_segment.flight_number,
"depTerminal": flight_segment.dep_terminal,
"arrTerminal": flight_segment.arr_terminal,
"cabinGrade": flight_segment.cabin_grade,
"segmentIndex": segment_index
}
from_segments.append(output_segment)
price_info = []
if order_info.routing:
price_info.append(
{
'passengerType': 0,
'price': order_info.ota_adult_price,
'tax': order_info.routing.adult_tax
}
)
if order_info.routing and order_info.routing.child_price:
price_info.append(
{
'passengerType': 1,
'price': order_info.ota_child_price,
'tax': order_info.routing.child_tax
}
)
paxs = []
for pax_info in order_info.passengers:
if pax_info.age_type == 'ADT':
age_type = 0
elif pax_info.age_type == 'CHD':
age_type = 1
elif pax_info.age_type == 'INF':
age_type = 2
output_pax = {
'passengerId': pax_info.passenger_id,
'lastName': pax_info.last_name,
'firstName': pax_info.first_name,
'name': pax_info.name,
'gender': pax_info.gender,
'passengerType': age_type,
'birthday': pax_info.birthdate,
'cardType': pax_info.used_card_type,
'cardNum': pax_info.used_card_no,
'pnrCode': order_info.pnr_code,
'ticketNo': pax_info.ticket_no
}
paxs.append(output_pax)
sub = {
'orderNo': order_info.assoc_order_id,
'flightInfos': [
{
'travelType': '1',
'segments': from_segments
}
],
'passengerInfos': paxs,
'priceInfos': price_info,
'finalPrice': order_info.ota_pay_price,
'tripType': '1',
'orderStatus': PROVIDERS_STATUS[order_info.providers_status]['status_category'], # 此处屏蔽具体的状态信息,只显示粗略流程节点。
'OTADetailStatus':order_info.ota_order_status,
'account': order_info.ffp_account.username if order_info.ffp_account else '',
'payType': '网银支付',
'providerOrderId': order_info.sub_orders[0].provider_order_id if order_info.sub_orders else '',
'payPrice': order_info.sub_orders[0].provider_price if order_info.sub_orders else 0,
'remarks': order_info.sub_orders[0].comment if order_info.sub_orders else '',
}
ret_list.append(sub)
ret = {
'status': 'SUCCESS',
'msg': '成功',
'orderList':ret_list
}
self.final_result = json.dumps(ret)
return True
def _before_order_detail_interface(self, req_body):
"""
:param req_body:
:return:
"""
req_body = json.loads(req_body)
self.order_info.assoc_order_id = req_body['orderNo']
def _after_order_detail_interface(self, order_info):
"""
:param
:return:
"""
if order_info.order_detail_status == 'INNER_ERROR_5001':
self.final_result = json.dumps({'status':order_info.order_detail_status,'msg':ERROR_STATUS[order_info.order_detail_status]})
return False
else:
from_segments = []
segment_index = 0
if order_info.routing:
for flight_segment in order_info.routing.from_segments:
segment_index += 1
output_segment = {
"carrier": flight_segment.carrier,
"depAirport": flight_segment.dep_airport,
"depTime": flight_segment.dep_time,
"arrAirport": flight_segment.arr_airport,
"arrTime": flight_segment.arr_time,
"stopAirports": flight_segment.stop_airports,
"stopCities": flight_segment.stop_airports,
"cabinCode": flight_segment.cabin,
"flightNumber": flight_segment.flight_number,
"depTerminal": flight_segment.dep_terminal,
"arrTerminal": flight_segment.arr_terminal,
"cabinGrade": flight_segment.cabin_grade,
"segmentIndex": segment_index
}
from_segments.append(output_segment)
price_info = []
if order_info.routing:
price_info.append(
{
'passengerType': 0,
'price': order_info.ota_adult_price,
'tax': order_info.routing.adult_tax
}
)
if order_info.routing and order_info.routing.child_price:
price_info.append(
{
'passengerType': 1,
'price': order_info.ota_child_price,
'tax': order_info.routing.child_tax
}
)
paxs = []
for pax_info in order_info.passengers:
if pax_info.age_type == 'ADT':
age_type = 0
elif pax_info.age_type == 'CHD':
age_type = 1
elif pax_info.age_type == 'INF':
age_type = 2
output_pax = {
'passengerId': pax_info.passenger_id,
'lastName': pax_info.last_name,
'firstName': pax_info.first_name,
'name': pax_info.name,
'gender': pax_info.gender,
'passengerType': age_type,
'birthday': pax_info.birthdate,
'cardType': pax_info.used_card_type,
'cardNum': pax_info.used_card_no,
'pnrCode': order_info.pnr_code,
'ticketNo': pax_info.ticket_no
}
paxs.append(output_pax)
ret = {
'status': 'SUCCESS',
'msg': '成功',
'orderNo': order_info.assoc_order_id,
'flightInfos': [
{
'travelType': '1',
'segments': from_segments
}
],
'passengerInfos': paxs,
'priceInfos': price_info,
'finalPrice': order_info.ota_pay_price,
'tripType': '1',
'orderStatus': PROVIDERS_STATUS[order_info.providers_status]['status_category'], # 此处屏蔽具体的状态信息,只显示粗略流程节点。
'OTADetailStatus':order_info.ota_order_status,
'account': order_info.ffp_account.username if order_info.ffp_account else '',
'payType': '网银支付',
'providerOrderId': order_info.sub_orders[0].provider_order_id if order_info.sub_orders else '',
'payPrice': order_info.sub_orders[0].provider_price if order_info.sub_orders else 0,
'remarks': order_info.sub_orders[0].comment if order_info.sub_orders else '',
}
self.final_result = json.dumps(ret)
return True
if __name__ == '__main__':
pass | [
"xiaoixiao1989@163.com"
] | xiaoixiao1989@163.com |
3146ac4d82a11f18275b102801c61fb91b9937eb | b57c77c985c45f51e766cd0c5a2595a6b89b8562 | /BackEnd/account_info/admin.py | 8a1ed8e04fb27f08f3b577f9b9c7c589dd5f0ddc | [
"MIT"
] | permissive | coinarrival/BackEnd | 2e90c2be109671d3abcaf7f83b39b737d230c21e | 58a0e61510c732de6cd3f398e0ae8481c351873a | refs/heads/master | 2021-06-23T16:40:21.294612 | 2019-06-26T04:52:58 | 2019-06-26T04:52:58 | 191,327,303 | 0 | 0 | MIT | 2021-06-10T21:33:34 | 2019-06-11T08:26:00 | Python | UTF-8 | Python | false | false | 111 | py | from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(User) | [
"423127555@qq.com"
] | 423127555@qq.com |
c3276098bbfeed6b97b1c4291af58b7939b17082 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /multiple_user_representations/models/task.py | 05df7200848d01a702e2184141f95464a2814410 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 10,488 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the task class that computes the loss and metrics for retrieval task."""
from typing import Optional
import tensorflow as tf
import tensorflow_recommenders as tfrs
class MultiShotRetrievalTask(tfrs.tasks.Retrieval):
"""Extends the tfrs retrieval task to support multiple user representation (MUR).
This class modifies the call function to support MUR. See base class for more
details. For details on MUR see http://shortn/_PO6OdvUuAs.
"""
def call(self,
user_embeddings,
candidate_embeddings,
sample_weight = None,
candidate_sampling_probability = None,
eval_candidate_embeddings = None,
compute_metrics = True,
is_head_item = None):
"""Computes the loss function for next-item prediction.
While computing the loss, the next-item's embedding is used as positive,
while remaining items in the batch are negatives (in-batch negatives).
Args:
user_embeddings: User Embeddings [B, H, D] from the user tower, where H
corresponds to the number of user representations.
candidate_embeddings: The candidate embeddings corresponding to ground
truth items that match the users. Shape: [B, D].
sample_weight: Tensor of sample weights [B].
candidate_sampling_probability: Optional tensor of candidate sampling
probabilities. When given will be be used to correct the logits to
reflect the sampling probability of in-batch negatives.
eval_candidate_embeddings: Candidate Embeddings [B, N, D] from the item
tower, where N corresponds to the number of candidates. The
eval_candidate_embeddings is only used to evaluate the metrics. See
evaluation (http://shortn/_PO6OdvUuAs#heading=h.tghsu9ag8g7x) with
negative samples. When not given, all candidates are considered for
evaluating metrics.
compute_metrics: Whether to compute metrics or not.
is_head_item: 1 if the positive candidate is a head item else 0. This is
used to compute head/tail metrics. Shape: [B, 1].
Returns:
loss: Loss value tensor.
"""
scores = tf.linalg.matmul(
user_embeddings, candidate_embeddings, transpose_b=True)
scores = tf.math.reduce_max(scores, axis=1)
batch_size = tf.shape(scores)[0]
num_candidates = tf.shape(scores)[-1]
labels = tf.eye(batch_size, num_candidates)
if self._temperature is not None:
scores = scores / self._temperature
if candidate_sampling_probability is not None:
candidate_sampling_probability = tf.cast(candidate_sampling_probability,
tf.float32)
scores = tfrs.layers.loss.SamplingProbablityCorrection()(
scores, candidate_sampling_probability)
loss = self._loss(y_true=labels, y_pred=scores, sample_weight=sample_weight)
if not compute_metrics:
return loss
if not self._factorized_metrics:
return loss
if eval_candidate_embeddings is None:
update_op = self._factorized_metrics.update_state(
user_embeddings, candidate_embeddings, is_head_item=is_head_item)
else:
update_op = self._factorized_metrics.update_state_with_negatives(
user_embeddings,
candidate_embeddings,
eval_candidate_embeddings,
is_head_item=is_head_item)
with tf.control_dependencies([update_op]):
return tf.identity(loss)
def compute_cosine_disagreement_loss(self,
query_head):
"""Computes the cosine disagreement loss given the query_head.
Args:
query_head: Tensor of shape [1, H, D].
Returns:
disagreement_loss: The cosine similarity computed across axis=1 for
different queries in the query_head.
"""
norm = tf.linalg.norm(query_head, axis=-1, keepdims=True)
disagreement_queries = query_head / tf.stop_gradient(norm)
query_embedding_score = tf.linalg.matmul(
disagreement_queries, disagreement_queries, transpose_b=True)
disagreement_loss = tf.reduce_sum(query_embedding_score)
disagreement_loss -= tf.reduce_sum(tf.linalg.trace(query_embedding_score))
return disagreement_loss
class MultiQueryStreaming(tfrs.layers.factorized_top_k.Streaming):
"""A wrapper for item candidates to efficiently retrieve top K scores.
This class extends the tfrs factorized_top_k.Streaming class, which only
supports a single user representation for retrieval. The class overrides the
_compute_score to extend the functionality to multiple representations.
See base class.
"""
def _compute_score(self, queries,
candidates):
"""Computes multi-query score by overriding _compute_score from parent.
Args:
queries: Multiple queries.
candidates: Candidate tensor.
Returns:
scores: Max score from multiple queries.
"""
scores = tf.matmul(queries, candidates, transpose_b=True)
scores = tf.reduce_max(scores, axis=1)
return scores
class MultiQueryFactorizedTopK(tfrs.metrics.FactorizedTopK):
"""Computes metrics for multi user representations across top K candidates.
We reuse the functionality from base class, while only modifying the
update_state function to support multiple representations.
See base class for details.
"""
def update_state(self,
query_embeddings,
true_candidate_embeddings,
is_head_item = None):
"""Updates the state of the FactorizedTopK Metric.
See the base class method `update_state` for details. This method extends
the functionality to multiple user representation case, i.e. when
query_embeddings is of shape [B, H, D].
Args:
query_embeddings: The query embeddings used to retrieve candidates.
true_candidate_embeddings: The positive candidate embeddings.
is_head_item: 1 if the positive candidate is a head item else 0. This is
used to compute head/tail metrics. Shape: [B, 1].
Returns:
update_ops: The metric update op. Used for tf.v1 functionality.
"""
# true_candidate_embeddings: [B, d]
true_candidate_embeddings = tf.expand_dims(
true_candidate_embeddings, axis=1)
# positive_scores: B x H x 1
positive_scores = tf.reduce_sum(
query_embeddings * true_candidate_embeddings, axis=2, keepdims=True)
# positive_scores: B x 1
positive_scores = tf.reduce_max(positive_scores, axis=1)
top_k_predictions, _ = self._candidates(query_embeddings, k=self._k)
y_true = tf.concat(
[tf.ones(tf.shape(positive_scores)),
tf.zeros_like(top_k_predictions)],
axis=1)
y_pred = tf.concat([positive_scores, top_k_predictions], axis=1)
update_ops = []
for metric in self._top_k_metrics:
if metric.name.startswith(
("head_", "Head_")) and is_head_item is not None:
update_ops.append(
metric.update_state(
y_true=y_true, y_pred=y_pred, sample_weight=is_head_item))
elif metric.name.startswith(
("tail_", "Tail_")) and is_head_item is not None:
# If item is not head, then the item is considered to be a tail item.
is_tail_item = 1.0 - is_head_item
update_ops.append(
metric.update_state(
y_true=y_true, y_pred=y_pred, sample_weight=is_tail_item))
else:
update_ops.append(metric.update_state(y_true=y_true, y_pred=y_pred))
return tf.group(update_ops)
def update_state_with_negatives(
self,
query_embeddings,
candidate_embeddings,
neg_candidate_embeddings,
is_head_item = None):
"""Updates the state of the FactorizedTopK Metric wrt the negative samples.
The function computes the metrics only wrt the given negative samples.
Unlike the update_state fn, this fn assumes that the number of candidate
samples are fewer while finding topK candidates.
Args:
query_embeddings: The query embeddings used for retrieval. Shape: [B,H,D].
candidate_embeddings: The candidate embeddings corresponding to ground
truth items that match the query. Shape: [B,D].
neg_candidate_embeddings: The negative candidate embeddings against which
we evaluate. Shape: [B,N,D].
is_head_item: 1 if the positive candidate is a head item else 0. This is
used to compute head/tail metrics. Shape: [B, 1].
Returns:
update_ops: The metric op used for tf.v1 functionality.
"""
pos_candidate_embeddings = tf.expand_dims(candidate_embeddings, axis=-1)
positive_scores = tf.matmul(query_embeddings, pos_candidate_embeddings)
positive_scores = tf.reduce_max(positive_scores, axis=1)
negative_scores = tf.matmul(
query_embeddings, neg_candidate_embeddings, transpose_b=True)
negative_scores = tf.reduce_max(negative_scores, axis=1)
y_true = tf.concat(
[tf.ones_like(positive_scores),
tf.zeros_like(negative_scores)],
axis=1)
y_pred = tf.concat([positive_scores, negative_scores], axis=1)
update_ops = []
for metric in self._top_k_metrics:
if metric.name.startswith(
("head_", "Head_")) and is_head_item is not None:
update_ops.append(
metric.update_state(
y_true=y_true, y_pred=y_pred, sample_weight=is_head_item))
elif metric.name.startswith(
("tail_", "Tail_")) and is_head_item is not None:
# If item is not head, then the item is considered to be a tail item.
is_tail_item = 1.0 - is_head_item
update_ops.append(
metric.update_state(
y_true=y_true, y_pred=y_pred, sample_weight=is_tail_item))
else:
update_ops.append(metric.update_state(y_true=y_true, y_pred=y_pred))
return tf.group(update_ops)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
2dabc5c778f7f9d2cbe5ba4d185e74d905c10ccb | 159bb98457406b35d9424640ba26490883dd629a | /generateClassifier.py | 7b18593b68ba61ef7cbc66dc4ea2418f9426040d | [] | no_license | Jeya27/Handwritten-Digit-Recognition | 74594d197d6e65abcc6cb73a5f21a9ee10ff3bc5 | 8a4862e3d4f0458ae9002038cba8e66139c8eaf3 | refs/heads/master | 2020-06-21T16:12:08.807605 | 2019-07-18T03:04:16 | 2019-07-18T03:04:16 | 197,498,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | # Import the modules
from sklearn.externals import joblib
from sklearn import datasets
from skimage.feature import hog
from sklearn.svm import LinearSVC
import numpy as np
from collections import Counter
# Load the dataset
#dataset = datasets.fetch_mldata("MNIST Original")
dataset = datasets.fetch_mldata('MNIST original', transpose_data=True, data_home='files')
# Extract the features and labels
features = np.array(dataset.data, 'int16')
labels = np.array(dataset.target, 'int')
# Extract the hog features
list_hog_fd = []
for feature in features:
fd = hog(feature.reshape((28, 28)), orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1), visualise=False)
list_hog_fd.append(fd)
hog_features = np.array(list_hog_fd, 'float64')
print "Count of digits in dataset", Counter(labels)
# Create an linear SVM object
clf = LinearSVC()
# Perform the training
clf.fit(hog_features, labels)
# Save the classifier
joblib.dump(clf, "digits_cls.pkl", compress=3)
| [
"noreply@github.com"
] | noreply@github.com |
278f20ab0afd45446b591cebd052df3a7798927e | 3529a89ec8c127d8b1bf8d37f9ae08be31077881 | /bootstrap_example_1.py | 7bf5ac9d16054101fba22b688a435144dfa54af1 | [] | no_license | momchi93/Bootstrapp | 6a4df5a9c04587d2e98fab4c0bdaa0a5add75cbc | 1a550419d647a771f274629167a5b9958cb5d572 | refs/heads/master | 2020-04-07T04:42:18.065056 | 2019-02-14T14:55:50 | 2019-02-14T14:55:50 | 158,066,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,329 | py | import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_process import ArmaProcess
import time
# Function Defintions
def generate_ar_process(N, ar):
ar_process = ArmaProcess(np.r_[1, -ar], np.array([1]))
X = ar_process.generate_sample(N)
return X
def get_I_XX(X, N):
T = X.size
t = np.arange(T)
w = 2 * np.pi * np.arange(N + 1) / T
I_XX = np.zeros(N + 1)
for k in range(N + 1):
I_XX[k] = (1 / T) * np.abs(np.sum(X * np.exp(-1j * w[k] * t))) ** 2
return I_XX
def kernel(x):
mask = np.abs(x) < 1
K = np.zeros(x.size)
K[mask] = 0.75 * (1 - x[mask] ** 2)
return K
def get_C_XX_hat(I_XX, T, h):
N = I_XX.size - 1
w = 2 * np.pi * np.arange(N + 1) / T
C_XX_hat = np.zeros(N + 1)
for k in range(N + 1):
C_XX_hat[k] = (2 * np.pi) * np.sum(kernel((w[k] - w) / h) * I_XX) / (h * T)
return C_XX_hat
# Bootstrap Example
# set parameter
np.random.seed(123)
start_time = time.time()
T = 256
N = int(np.floor(T / 2))
h = 0.05
g = 0.05
BSR = 100
alpha = 0.05
ar = np.array([0.5, -0.6, 0.3, -0.4, 0.2])
# generate sample
X = generate_ar_process(T, ar)
# calculate periodogram and smoothed periodogram
I_XX = get_I_XX(X, N)
C_XX_hat = get_C_XX_hat(I_XX, T, h)
# calculate scaled residuals
eps = I_XX / C_XX_hat
eps_mean = np.mean(eps)
eps_scaled = eps / eps_mean
# draw BSR boostrap resamples and calculate smoothed periodgrams
C_XX_hat_BS = np.zeros([BSR, N + 1])
for b in range(BSR):
eps_star = np.random.choice(eps, eps.size)
I_XX_star = C_XX_hat * eps_star
I_XX_star[0] = 0
C_XX_hat_BS[b, :] = get_C_XX_hat(I_XX_star, T, g)
# get confidence bounds
upper_index = int(np.ceil((BSR - 1) * (1 - alpha)))
lower_index = int(np.floor((BSR - 1) * alpha))
C_XX_hat_BS.sort(axis=0)
C_XX_hat_upper = C_XX_hat_BS[upper_index, :]
C_XX_hat_lower = C_XX_hat_BS[lower_index, :]
# plot estimate and bounds
w = 2 * np.pi * np.arange(N + 1) / T
plt.plot(w, C_XX_hat)
plt.plot(w, C_XX_hat_upper)
plt.plot(w, C_XX_hat_lower)
plt.show()
elapsed_time = time.time() - start_time
print('elapsed_time for ' + str(BSR) + ' Bootstraps: ' + str(elapsed_time))
# test
BSR = 5
Monte_Carlo_runs = 100
coverage_probability_list = []
for d in range(Monte_Carlo_runs):
b_sde_Monte_Carlo = np.zeros([BSR, N + 1])
for d1 in range(BSR):
eps_star = np.random.choice(eps, eps.size)
I_XX_star = C_XX_hat * eps_star
I_XX_star[0] = 0
b_sde_Monte_Carlo[d1, :] = get_C_XX_hat(I_XX_star, T, g)
b_sde_Monte_Carlo.sort(axis=0)
Cxx_estimate_centered_upper = b_sde_Monte_Carlo[BSR - 1, :]
Cxx_estimate_centered_lower = b_sde_Monte_Carlo[0, :]
low_hits = np.count_nonzero(Cxx_estimate_centered_lower < C_XX_hat)
up_hits = np.count_nonzero(C_XX_hat < Cxx_estimate_centered_upper)
coverage_probability_list.append((low_hits + up_hits) / (2 * np.size(Cxx_estimate_centered_lower)))
coverage_probability = 100 * np.mean(np.array(coverage_probability_list))
print('Frequency Domain Residual-based Bootstrap')
print('bootstrap_samples = ' + str(BSR) + ' , Monte_Carlo_runs = ' + str(Monte_Carlo_runs))
print('Coverage Probability = ' + str(coverage_probability) + '%')
elapsed_time = time.time() - start_time
print('elapsed_time for coverage_probability: ' + str(elapsed_time))
| [
"momchi93@gmail.com"
] | momchi93@gmail.com |
65dc4ad278c73ef066d397bcc5220e4f2824e5aa | 63a0597d1d5f7a2e7a5c36e4b2c568ec35a5fc19 | /tests/terraform_helper.py | 35acd107dd68004380f9cfb326ebdc014da36819 | [] | no_license | melon-ruet/lambda-testing-localstack | b80db545272c01fd526f3339502f636b0cf9a2b2 | a307f4c3dcabd9c5f3c323caa68b8f0bc66317ad | refs/heads/master | 2022-12-04T11:05:47.681169 | 2020-08-26T20:58:04 | 2020-08-26T20:58:04 | 290,556,093 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | import subprocess
import os
TERRAFORM_DIR_PATH = os.path.dirname(os.path.realpath(__file__)) + "/../terraform/"
def terraform_init():
"""Terraform init command"""
tf_init = ["terraform", "init", TERRAFORM_DIR_PATH]
subprocess.check_call(tf_init)
def create_resources():
"""Create a tf resource."""
proc = subprocess.Popen("terraform apply -auto-approve " + TERRAFORM_DIR_PATH, shell=True)
proc.wait()
def destroy_resources():
"""Destroy all tf resources.
This method will destroy any resources it can find in the state file,
and delete all resources from the state file.
"""
tf_destroy = [
"terraform",
"destroy",
"-auto-approve",
TERRAFORM_DIR_PATH
]
subprocess.call(tf_destroy)
tf_refresh = [
"terraform",
"refresh",
TERRAFORM_DIR_PATH
]
subprocess.call(tf_refresh)
def terraform_start():
""" teardown and create resources at the beginning of feature test """
terraform_init()
destroy_resources()
return create_resources()
| [
"m.melon@m2amedia.tv"
] | m.melon@m2amedia.tv |
36c86d678f066fe77a212a885efefd8f088686e3 | 3ad2fb33160bd7e47e620e33921d3174f39715fb | /src/motor_driver/dummy.py | e924a4b618cfdb6e1dfe06fb0b3504f130568bb1 | [
"MIT"
] | permissive | kazet/cnc | 24453cdf10dcd57eb4baf134437bde287710354b | 8e207a71616a9a13bac57df85631714235589891 | refs/heads/master | 2023-05-28T14:19:30.470077 | 2020-03-25T16:59:16 | 2020-03-25T16:59:16 | 164,492,847 | 0 | 0 | MIT | 2023-05-01T20:32:20 | 2019-01-07T20:45:54 | Python | UTF-8 | Python | false | false | 1,586 | py | from typeguard import typechecked
from utils.typing import Numeric
from motor_driver.base import BaseMotorDriver
class DummyMotorDriver(BaseMotorDriver):
"""
A dummy implementation of BaseMotorDriver that raises NotImplementedError on any command.
"""
def __init__(self, *unused_args, **unused_kwargs):
pass
@typechecked
def signal_go_left(self) -> None:
"""
Please refer to the docstring in the base class.
"""
self._raise()
@typechecked
def signal_go_right(self) -> None:
"""
Please refer to the docstring in the base class.
"""
self._raise()
@typechecked
def signal_pul_up(self) -> None:
"""
Please refer to the docstring in the base class.
"""
self._raise()
@typechecked
def signal_pul_down(self) -> None:
"""
Please refer to the docstring in the base class.
"""
self._raise()
@typechecked
def step_left(self, step_time: Numeric) -> None:
"""
Please refer to the docstring in the base class.
"""
self._raise()
@typechecked
def step_right(self, step_time: Numeric) -> None:
"""
Please refer to the docstring in the base class.
"""
self._raise()
@typechecked
def step(self, step_time: Numeric) -> None:
"""
Please refer to the docstring in the base class.
"""
self._raise()
def _raise(self):
raise NotImplementedError("No actual milling machine connected")
| [
"krzysztof.zajac2@gmail.com"
] | krzysztof.zajac2@gmail.com |
e8535cca01d9895b10308330ab51999192cb4c42 | b6357755916f2dcf465301f721dd18f19441d1d7 | /examples/pbsquerynodes.py | 8bac1bc5b82fc5e131ba78db0a07a83418e4d8e7 | [] | no_license | prehensilecode/qstatviewer | b9731bf604834604c39c794326fa798f14c4db0c | 696cf077fcac29a0a34a380ed3fdcf35c003e29f | refs/heads/master | 2020-05-17T18:40:11.826440 | 2014-02-11T04:53:29 | 2014-02-11T04:53:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | #!/usr/bin/env python
import sys
import os
import re
from PBSQuery import PBSQuery
def main():
pq = PBSQuery()
nodes = pq.getnodes()
for k,v in sorted(nodes.iteritems()):
print k, v['state']
if __name__ == '__main__':
main()
| [
"david.w.h.chin@gmail.com"
] | david.w.h.chin@gmail.com |
ec9e6dd992eeed6db72e166f944d03ef079367f3 | 9bf2e049e5f76af872ce95a2a6d52156416ad080 | /EMS/users/backend.py | 19cceaa19cab4e0c2ccaff7793d5009a7beb8f75 | [] | no_license | nitesh5695/api-testing-ems | 7c578295de889b75bb77c08fd9a03709a702a648 | f0cb0225dbf2129e38b31601faa3cc4cafe8e45a | refs/heads/master | 2023-04-06T00:23:44.456983 | 2021-04-19T09:43:27 | 2021-04-19T09:43:27 | 344,903,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py |
from .models import companies,employers
from rest_framework import exceptions
class MyAuthentication():
def login(request,username):
request.session['username']=username
return True
def isemployee(email):
try:
user=employers.objects.get(email=email)
return True
except employers.DoesNotExist:
return False
def iscompany(email):
try:
user=companies.objects.get(email=email)
return True
except companies.DoesNotExist:
return False
def authenticate(request,email=None, password=None):
try:
user= employers.objects.get(email=email,password=password)
request.session['emp_id']=user.emp_id
return user
except employers.DoesNotExist:
try:
print(" company")
user=companies.objects.get(email=email,password=password)
request.session['company_id']=user.company_id
return user
except:
raise exceptions.AuthenticationFailed('email or password is wrong')
except companies.DoesNotExist:
print("not exist")
return None
except Exception as e:
print("not exist1")
return None
| [
"pksingh@nitesh.singh5695@gmail.com"
] | pksingh@nitesh.singh5695@gmail.com |
da6a8938b7bf45fce3a7ec152f61296f754997c9 | 2bcadbb06c7783d73e7f546ada90c6ff4ec32aa0 | /catalog/views.py | 750d1e37eec922490ab6748d39b64b63301139e6 | [
"MIT"
] | permissive | RafaelGreen1/django_local_library | b5a259e1dd15efe4f1b750b3a9ed66c3de9d2304 | 8eb9ad35ac71e35671ebb78c9157374fda1b17e2 | refs/heads/main | 2022-12-27T04:22:12.897395 | 2020-10-13T06:33:49 | 2020-10-13T06:33:49 | 303,576,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,464 | py | from django.shortcuts import render
# Create your views here.
from catalog.models import Book, Author, BookInstance, Genre
def index(request):
"""View function for home page of site."""
# Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_books_with_the_word_name = Book.objects.filter(title__icontains="name").count()
num_genres_with_the_word_fantasy = Genre.objects.filter(name__icontains="fantasy").count()
num_instances = BookInstance.objects.all().count()
# Available books (status = 'a')
num_instances_available = BookInstance.objects.filter(status__exact='a').count()
# The 'all()' is implied by default.
num_authors = Author.objects.count()
# Number of visits to this view, as counted in the session variable.
num_visits = request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits + 1
context = {
'num_books': num_books,
'num_instances': num_instances,
'num_instances_available': num_instances_available,
'num_authors': num_authors,
'num_books_with_the_word_name': num_books_with_the_word_name,
'num_genres_with_the_word_fantasy': num_genres_with_the_word_fantasy,
'num_visits': num_visits,
}
# Render the HTML template index.html with the data in the context variable
return render(request, 'index.html', context=context)
from django.views import generic
class BookListView(generic.ListView):
model = Book
paginate_by = 2
class BookDetailView(generic.DetailView):
model = Book
class AuthorListView(generic.ListView):
model = Author
paginate_by = 10
class AuthorDetailView(generic.DetailView):
model = Author
from django.contrib.auth.mixins import LoginRequiredMixin
class LoanedBooksByUserListView(LoginRequiredMixin,generic.ListView):
"""Generic class-based view listing books on loan to current user."""
model = BookInstance
template_name ='catalog/bookinstance_list_borrowed_user.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(borrower=self.request.user).filter(status__exact='o').order_by('due_back')
from django.contrib.auth.mixins import PermissionRequiredMixin
class LoanedBooksByUserForLibrariansListView(PermissionRequiredMixin, generic.ListView):
"""Generic class-based view listing books on loan to current user."""
permission_required = 'catalog.can_mark_returned'
model = BookInstance
template_name ='catalog/bookinstance_list_borrowed_librarian.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(status__exact='o').order_by('due_back')
import datetime
from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from catalog.forms import RenewBookForm
@permission_required('catalog.can_mark_returned')
def renew_book_librarian(request, pk):
"""View function for renewing a specific BookInstance by librarian."""
book_instance = get_object_or_404(BookInstance, pk=pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = RenewBookForm(request.POST)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
book_instance.due_back = form.cleaned_data['renewal_date']
book_instance.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed') )
# If this is a GET (or any other method) create the default form.
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=3)
form = RenewBookForm(initial={'renewal_date': proposed_renewal_date})
context = {
'form': form,
'book_instance': book_instance,
}
return render(request, 'catalog/book_renew_librarian.html', context)
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from catalog.models import Author
class AuthorCreate(PermissionRequiredMixin, CreateView):
permission_required = 'catalog.can_mark_returned'
model = Author
fields = '__all__'
initial = {'date_of_death': '05/01/2018'}
class AuthorUpdate(PermissionRequiredMixin, UpdateView):
permission_required = 'catalog.can_mark_returned'
model = Author
fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death']
class AuthorDelete(PermissionRequiredMixin, DeleteView):
permission_required = 'catalog.can_mark_returned'
model = Author
success_url = reverse_lazy('authors')
from catalog.models import Book
class BookCreate(PermissionRequiredMixin, CreateView):
permission_required = 'catalog.can_mark_returned'
model = Book
fields = '__all__'
class BookUpdate(PermissionRequiredMixin, UpdateView):
permission_required = 'catalog.can_mark_returned'
model = Book
fields = '__all__'
class BookDelete(PermissionRequiredMixin, DeleteView):
permission_required = 'catalog.can_mark_returned'
model = Book
success_url = reverse_lazy('books')
| [
"arpaxad1@gmail.com"
] | arpaxad1@gmail.com |
8b95c1c41b34279bd7fb140fbdf3ddfee5576700 | 67e5436d39a2aab5bfd2b9c5cff23ca934a85182 | /donkeycar/templates/just_drive.py | b7eae21a71916d468ad04aa75dd72d1786b0b985 | [
"MIT"
] | permissive | autorope/donkeycar | d4991aa69d8b1334c6331640e532d8d796b2ac25 | 9f91ad1aaff054522b24c2c1e727d1a111e266f4 | refs/heads/main | 2023-08-17T20:25:19.085591 | 2023-07-05T19:35:50 | 2023-07-05T19:35:50 | 76,095,264 | 1,861 | 921 | MIT | 2023-08-01T23:06:30 | 2016-12-10T06:35:09 | Python | UTF-8 | Python | false | false | 2,275 | py | #!/usr/bin/env python3
"""
Scripts to drive a donkey 2 car
Usage:
manage.py (drive)
Options:
-h --help Show this screen.
"""
import os
import time
from docopt import docopt
import donkeycar as dk
from donkeycar.parts.datastore import TubHandler
from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle
def drive(cfg):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
#Initialize car
V = dk.vehicle.Vehicle()
class MyController:
'''
a simple controller class that outputs a constant steering and throttle.
'''
def run(self):
steering = 0.0
throttle = 0.1
return steering, throttle
V.add(MyController(), outputs=['angle', 'throttle'])
#Drive train setup
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = dk.load_config()
if args['drive']:
drive(cfg)
| [
"tawnkramer@gmail.com"
] | tawnkramer@gmail.com |
ced36850afb33a6ad4af3eec0f9c3f2e32020cd3 | 7e1c6d8d8783b1a1e6ddf247739bc646d8694f7f | /ner/cnngram/src/infer.py | 299f96a84933b485b6e2069672c132acdcce3d4c | [] | no_license | ankitshah009/BioASQ-Question-Answering | 6cdef1f97a3649d969721bef93cbff676a2ab9dc | 663d4ebb0c4dca3f4b75ac5fa3be3efb96493d52 | refs/heads/master | 2020-04-02T15:30:37.680572 | 2018-12-05T21:55:36 | 2018-12-05T21:55:36 | 154,570,563 | 0 | 0 | null | 2018-10-24T21:17:28 | 2018-10-24T21:17:27 | null | UTF-8 | Python | false | false | 8,927 | py | #!/usr/bin/env python
import os
import numpy as np
import re
import optparse
import itertools
from collections import OrderedDict
from utils import create_input
import loader
from utils import models_path, evaluate, eval_script, eval_temp, reload_mappings, create_result, create_JNLPBA_result
from loader import word_mapping, char_mapping, tag_mapping, pt_mapping
from loader import update_tag_scheme, prepare_dataset
from loader import augment_with_pretrained
from gensim.models import word2vec
from GRAMCNN import GRAMCNN
import tensorflow as tf
def get_comma_separated_args(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
# Read parameters from command line
optparser = optparse.OptionParser()
optparser.add_option(
"-T", "--train", default="",
help="Train set location"
)
optparser.add_option(
"-d", "--dev", default="",
help="Dev set location"
)
optparser.add_option(
"-t", "--test", default="",
help="Test set location"
)
optparser.add_option(
"-s", "--tag_scheme", default="iobes",
help="Tagging scheme (IOB or IOBES)"
)
optparser.add_option(
"-l", "--lower", default="0",
type='int', help="Lowercase words (this will not affect character inputs)"
)
optparser.add_option(
"-z", "--zeros", default="0",
type='int', help="Replace digits with 0"
)
optparser.add_option(
"-c", "--char_dim", default="25",
type='int', help="Char embedding dimension"
)
optparser.add_option(
"-C", "--char_lstm_dim", default="25",
type='int', help="Char LSTM hidden layer size"
)
optparser.add_option(
"-b", "--char_bidirect", default="1",
type='int', help="Use a bidirectional LSTM for chars"
)
optparser.add_option(
"-w", "--word_dim", default="200",
type='int', help="Token embedding dimension"
)
optparser.add_option(
"-W", "--word_lstm_dim", default="100",
type='int', help="Token LSTM hidden layer size"
)
optparser.add_option(
"-B", "--word_bidirect", default="1",
type='int', help="Use a bidirectional LSTM for words"
)
optparser.add_option(
"-p", "--pre_emb", default="",
help="Location of pretrained embeddings"
)
optparser.add_option(
"-A", "--all_emb", default="1",
type='int', help="Load all embeddings"
)
optparser.add_option(
"-a", "--cap_dim", default="0",
type='int', help="Capitalization feature dimension (0 to disable)"
)
optparser.add_option(
"-f", "--crf", default="1",
type='int', help="Use CRF (0 to disable)"
)
optparser.add_option(
"-D", "--dropout", default="0.5",
type='float', help="Droupout on the input (0 = no dropout)"
)
optparser.add_option(
"-L", "--lr_method", default="sgd-lr_.005",
help="Learning method (SGD, Adadelta, Adam..)"
)
optparser.add_option(
"-r", "--reload", default="0",
type='int', help="Reload the last saved model"
)
optparser.add_option(
"-U", "--use_word", default="1",
type = 'int', help = "Whether to use word embedding"
)
optparser.add_option(
"-u", "--use_char", default="1",
type = 'int', help = "Whether to use char embedding"
)
optparser.add_option(
"-H", "--hidden_layer", default = "1",
type = 'int', help = "number of layers used in LSTM"
)
optparser.add_option(
"-K", "--kernel_size", default = "2,3,4,5",
type = 'string', action='callback',
callback=get_comma_separated_args
)
optparser.add_option(
"-k", "--kernel_num", default = "100,100,100,100",
type = 'string', action='callback',
callback=get_comma_separated_args
)
optparser.add_option(
"-P", "--padding", default = "0",
type = 'int', help = "whether padding the input to use gram-CNN"
)
optparser.add_option(
"-S", "--pts", default = "0",
type = 'int', help = "whether use the pts tagging"
)
opts = optparser.parse_args()[0]
# Parse parameters
parameters = OrderedDict()
#IOB OR IOEB
parameters['padding'] = opts.padding == 1
parameters['tag_scheme'] = opts.tag_scheme
parameters['lower'] = opts.lower == 1
parameters['zeros'] = opts.zeros == 1
parameters['char_dim'] = opts.char_dim
parameters['char_lstm_dim'] = opts.char_lstm_dim
parameters['char_bidirect'] = opts.char_bidirect == 1
parameters['word_dim'] = opts.word_dim
parameters['word_lstm_dim'] = opts.word_lstm_dim
parameters['word_bidirect'] = opts.word_bidirect == 1
parameters['pre_emb'] = opts.pre_emb
parameters['all_emb'] = opts.all_emb == 1
parameters['cap_dim'] = opts.cap_dim
parameters['crf'] = opts.crf == 1
parameters['dropout'] = opts.dropout
parameters['lr_method'] = opts.lr_method
parameters['use_word'] = opts.use_word == 1
parameters['use_char'] = opts.use_char == 1
parameters['hidden_layer'] = opts.hidden_layer
parameters['kernels'] = [2,3,4,5] if type(opts.kernel_size) == str else map(lambda x : int(x), opts.kernel_size)
parameters['num_kernels'] = [100,100,100,100] if type(opts.kernel_num) == str else map(lambda x : int(x), opts.kernel_num)
parameters['pts'] = opts.pts == 1
model_name = 'use_word' + str(parameters['use_word']) + \
' use_char' + str(parameters['use_char']) + \
' drop_out' + str(parameters['dropout']) + \
' hidden_size' + str(parameters['word_lstm_dim']) + \
' hidden_layer' + str(parameters['hidden_layer']) + \
' lower' + str(parameters['lower']) + \
' allemb' + str(parameters['all_emb']) + \
' kernels' + str(parameters['kernels'])[1:-1] + \
' num_kernels' + str(parameters['num_kernels'])[1:-1] + \
' padding' + str(parameters['padding']) + \
' pts' + str(parameters['pts']) + \
' w_emb' + str(parameters['word_dim'])
# Check parameters validity
assert os.path.isfile(opts.train)
assert os.path.isfile(opts.dev)
#assert os.path.isfile(opts.test)
assert parameters['char_dim'] > 0 or parameters['word_dim'] > 0
assert 0. <= parameters['dropout'] < 1.0
assert parameters['tag_scheme'] in ['iob', 'iobes']
assert not parameters['all_emb'] or parameters['pre_emb']
assert not parameters['pre_emb'] or parameters['word_dim'] > 0
assert not parameters['pre_emb'] or os.path.isfile(parameters['pre_emb'])
# Check evaluation script / folders
if not os.path.isfile(eval_script):
raise Exception('CoNLL evaluation script not found at "%s"' % eval_script)
if not os.path.exists(eval_temp):
os.makedirs(eval_temp)
if not os.path.exists(models_path):
os.makedirs(models_path)
if 'bin' in parameters['pre_emb']:
wordmodel = word2vec.Word2Vec.load_word2vec_format(parameters['pre_emb'], binary=True)
else:
wordmodel = word2vec.Word2Vec.load_word2vec_format(parameters['pre_emb'], binary=False)
# Data parameters
lower = parameters['lower']
zeros = parameters['zeros']
tag_scheme = parameters['tag_scheme']
if os.path.isfile(opts.test):
test_sentences = loader.load_sentences(opts.test, lower, zeros)
update_tag_scheme(test_sentences, tag_scheme)
word_to_id, char_to_id, tag_to_id, pt_to_id, dico_words, id_to_tag = reload_mappings(os.path.join(models_path ,model_name, 'mappings.pkl'))
if os.path.isfile(opts.test):
test_data, m3 = prepare_dataset(
test_sentences, word_to_id, char_to_id, tag_to_id, pt_to_id,lower
)
print "%i sentences in test." % (
len(test_data))
n_epochs = 100 # number of epochs over the training set
freq_eval = 2000 # evaluate on dev every freq_eval steps
best_dev = -np.inf
best_test = -np.inf
count = 0
max_seq_len = m3 if m3 > 200 else 200
#initilaze the embedding matrix
word_emb_weight = np.zeros((len(dico_words), parameters['word_dim']))
n_words = len(dico_words)
gramcnn = GRAMCNN(n_words, len(char_to_id), len(pt_to_id),
use_word = parameters['use_word'],
use_char = parameters['use_char'],
use_pts = parameters['pts'],
num_classes = len(tag_to_id),
word_emb = parameters['word_dim'],
drop_out = 0,
word2vec = word_emb_weight,feature_maps=parameters['num_kernels'],#,200,200, 200,200],
kernels=parameters['kernels'], hidden_size = parameters['word_lstm_dim'], hidden_layers = parameters['hidden_layer'],
padding = parameters['padding'], max_seq_len = max_seq_len)
gramcnn.load(models_path ,model_name)
test_score, output_path = evaluate(parameters, gramcnn, test_sentences,
test_data, id_to_tag, remove = False, max_seq_len = max_seq_len, padding = parameters['padding'], use_pts = parameters['pts'])
#os.remove(output_path)
print (output_path)
if 'bc2' in opts.test:
create_result(output_path)
from subprocess import call
call("perl alt_eval.perl -gene GENE.eval -altgene ALTGENE.eval result.eval".split())
if 'Genia4EReval1' in opts.test:
create_JNLPBA_result(output_path)
from subprocess import call
call("perl evalIOB2.pl result_JNLPBA.eval Genia4EReval1.iob2".split())
| [
"gbayomi@gmail.com"
] | gbayomi@gmail.com |
4c8e5edcd4706488c54a8864ed2c93d83349dfc1 | 958231073be5a0978d3e3fbb648da8f26b2cbb03 | /example/conftest.py | 4071b2c66e9419fe3f49c4b652e0bfb75cc1b5d1 | [] | no_license | Suren76/Python | afc9589c677496db6bbfb38166164b6e28da41ab | c843d43f7384397aeabcc16155b7ef6617ce256c | refs/heads/main | 2023-03-02T22:36:17.904471 | 2021-01-31T23:48:29 | 2021-01-31T23:48:29 | 329,370,489 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | import os
import pytest
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
@pytest.fixture
def browser(request):
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
driver.implicitly_wait(10)
def close_browser():
driver.quit()
request.addfinalizer(close_browser)
return driver
def pytest_addoption(parser):
parser.addoption("--url", action="store", default='https://buy.am/')
@pytest.hookimpl()
def pytest_sessionstart(session):
os.environ['URL'] = session.config.getoption("--url")
| [
"parsyan_suren@mail.ru"
] | parsyan_suren@mail.ru |
e127448c81eaef35d6b4be3f774b985c4a0bbc7d | 72b1683c545a78a2dad9f1d8a30ca4101d79ffa9 | /our_duck.py | 2d8363c64ed927daf3f0fe5d0b8d0beb012beb93 | [] | no_license | whitercactus/Duck-Hunt | 21a2cb1e23050663fa7fb7ff2cd6ee46683fb406 | 3341cd3239abe02345bbd7ff93c243573e4cdc75 | refs/heads/master | 2022-12-25T02:35:59.571689 | 2020-10-02T06:02:56 | 2020-10-02T06:02:56 | 300,516,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,010 | py | import pygame
from constants import *
from spritesheet import SpriteSheet
pygame.init()
class Duck(pygame.sprite.Sprite):
def __init__(self, x,y):
super().__init__()
sprite_sheet = SpriteSheet("duck.png")
self.frames_r = []
self.frames_l = []
image = sprite_sheet.get_image(0,0,70,70)
self.frames_r.append(image)
image = sprite_sheet.get_image(70, 0, 70, 70)
self.frames_r.append(image)
image = sprite_sheet.get_image(140, 0, 70, 70)
image = pygame.transform.flip(image,True,False)
self.frames_l.append(image)
image = sprite_sheet.get_image(0, 0, 75, 75)
image = pygame.transform.flip(image, True, False)
self.frames_l.append(image)
image = sprite_sheet.get_image(75, 0, 75, 75)
image = pygame.transform.flip(image, True, False)
self.frames_l.append(image)
image = sprite_sheet.get_image(150, 0, 75, 75)
self.frames_r.append(image)
self.image = self.frames_r[0]
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.change_x = 10
self.change_y = 7
self.dir = "r"
def update(self):
pos = self.rect.x
if self.dir == "r":
frame = (pos // 30) % len(self.frames_r)
self.image = self.frames_r[frame]
if self.dir == "l":
frame = (pos // 30) % len(self.frames_l)
self.image = self.frames_l[frame]
if self.change_x == 10:
self.dir = "r"
if self.change_x == -10:
self.dir = "l"
self.rect.x += self.change_x
self.rect.y += self.change_y
if self.rect.x < 0:
self.change_x *= -1
if self.rect.x > 750:
self.change_x *= -1
if self.rect.y < 0:
self.change_y *= -1
if self.rect.y >= 300:
self.change_y *= -1
def draw(self):
screen.blit(self.image,(self.rect.x,self.rect.y)) | [
"whitercactus927@gmail.com"
] | whitercactus927@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.