text stringlengths 8 6.05M |
|---|
from os.path import join
from xml.sax import ContentHandler, parseString
from action import Action
import kodi_baselibrary as kodi
class MessageContentHandler(ContentHandler):
def __init__(self, unit):
self.unit = unit
self.messagecodes = []
self.messages = []
self.insidelabelelement = False
def startElement(self, tag, attributes):
if tag == kodi.LABEL_ELEMENT or tag == kodi.LABEL2_ELEMENT or tag == kodi.ALTLABEL_ELEMENT:
self.insidelabelelement = True
elif tag == kodi.PARAM_ELEMENT and 'value' in attributes:
self.parselocalize(attributes['value'])
elif tag == kodi.VIEWTYPE_ELEMENT and 'label' in attributes:
self.parsemessagelabel(attributes['label'])
def endElement(self, tag):
self.insidelabelelement = False
def characters(self, content):
if self.insidelabelelement and content.isdigit():
self.messagecodes.append(int(content))
else:
self.parselocalize(content)
def parsemessagelabel(self, content):
if content.isdigit():
self.messagecodes.append(int(content))
else:
self.parselocalize(content)
def parselocalize(self, content):
index = content.find(kodi.LOCALIZE_IDENTIFIER)
while index >= 0:
start = index + len(kodi.LOCALIZE_IDENTIFIER)
end = self.findendoflocalize(content, start)
messagecode = content[start + 1:end - 1]
if messagecode.isdigit():
self.messagecodes.append(int(messagecode))
else:
self.messages.append("Unexpected (not strictly numeric) message key '" + messagecode + "' in content '" + content + "'")
index = content.find(kodi.LOCALIZE_IDENTIFIER, end)
def findendoflocalize(self, content, start):
index = start
count = 0
while (index == start or count > 0) and index < len(content):
count = count + (1 if content[index] == '[' else 0)
count = count - (1 if content[index] == ']' else 0)
index += 1
return index
class CheckMessagesAction(Action):
def __init__(self):
super().__init__(
name = "Check language files and messages",
function = self.checkmessages,
description = "Check messages in skin-specific language file for:\n" +
"- duplicate entries within the skin-specific language file (texts that appear multiple times in the skin-specific language file)\n" +
"- duplicate entries with the standard language file (texts that appear in both the skin-specific and the standard language file)\n" +
"- unused entries (texts with numbers that are never used)",
arguments = ['skin', 'sharedlanguage'])
def checkmessages(self, messagecallback, arguments):
messagecallback("action", "\nChecking messages...")
skin = arguments['skin']
sharedlanguage = arguments['sharedlanguage']
for resolution in skin.resolutions:
messagecallback("info", "- Skin resolution: " + resolution.aspect + " (" + resolution.directory + ")")
self.resetmessages()
self.parsemessages(messagecallback, resolution)
self.analyzemessages(messagecallback, sharedlanguage, skin, resolution)
def resetmessages(self):
self.messagecodes = []
def parsemessages(self, messagecallback, resolution):
for unit in resolution.units:
contenthandler = MessageContentHandler(unit)
parseString("".join(unit.lines), contenthandler)
self.messagecodes.extend(contenthandler.messagecodes)
messages = contenthandler.messages
for message in messages:
messagecallback("warning", "- " + unit.name + ": " + message)
messagecallback("info", "- Number of referenced messages: " + str(len(self.messagecodes)))
def analyzemessages(self, messagecallback, sharedlanguage, skin, resolution):
messagekeyset = set(self.messagecodes)
for messagekey in sorted(messagekeyset):
if messagekey >= kodi.LOCALIZE_FIRSTSKINKEY and messagekey < kodi.LOCALIZE_LASTSKINKEY:
if messagekey not in skin.language.strings:
messagecallback("warning", "- Undefined (skin) message key '" + str(messagekey) + "'")
elif sharedlanguage:
if messagekey not in sharedlanguage.strings:
messagecallback("warning", "- Undefined (shared) message key '" + str(messagekey) + "'")
for languagekey in skin.language.strings:
if languagekey not in self.messagecodes:
messagecallback("message", "- Unused language entry '" + str(languagekey) + "' (" + skin.language.strings[languagekey] + ")")
if sharedlanguage:
sharedlanguagevalues = set([languagevalue for languagekey, languagevalue in sharedlanguage.strings.items()])
for languagekey, languagevalue in skin.language.strings.items():
if languagevalue in sharedlanguagevalues:
messagecallback("message", "- Shared language file entry duplication '" + str(languagekey) + "' (" + skin.language.strings[languagekey] + ")")
languagevalues = set([languagevalue for languagekey, languagevalue in skin.language.strings.items()])
for languagekey, languagevalue in skin.language.strings.items():
if languagevalue in languagevalues:
languagevalues.remove(languagevalue)
else:
messagecallback("message", "- Language file entry duplication '" + str(languagekey) + "' (" + skin.language.strings[languagekey] + ")")
|
from django.contrib import admin
from mapFriends.models import UserProfile
#Modificar el amdin para ver los datos
admin.site.register(UserProfile) |
#-*- coding: utf-8 -*-
#####
#
#Localization for payroll to the Dominican Republic.
#Modifications to the hr.employee object.
#
#Author: Carlos Llamacho @ Open Business Solutions
#
#Date: 2013-10-22
#
#####
from openerp.osv import fields, orm
class hr_employee(orm.Model):
_name = 'hr.employee'
_inherit = 'hr.employee'
_auto = True
_columns = {
'names': fields.char('Primer y segundo nombre', size=128, required=True, help="""El primer y el segundo nombre del empleado."""),
'first_lastname': fields.char('Primer apellido', size=64, required=True, help="""El primer apellido del empleado"""),
'second_lastname': fields.char('Segundo Apellido', size=64, help="""El segundo apellido del empleado."""),
'bank_id': fields.many2one('res.bank', 'Banco Nomina'),
#'company_not_ret_agent': fields.boolean('Utiliza otro agente de retencion?', help='Marque este campo si el empleado cotiza a la TSS '
# 'a traves de otra Empresa'),
#'company_ret_rnc_ced': fields.char('RNC/Cedula Agente de Retencion', 11,help='Agente de Retencion Unico de las '
# 'Retenciones del Trabajador'),
}
def onchange_names(self, cr, uid, ids, names,
first, last, context=None):
"""When there is change in the name of the employee
concatenate the names with the first or second lastname and
write the field name_related with it.
Returns:
True
"""
res = {}
if context is None:
context = {}
res = {'value':{'None'}}
name = []
if names:
name.append(names)
if first:
name.append(first)
if last:
name.append(last)
name = ' '.join(name)
res['value'] = {'name': name}
return res
def create(self, cr, uid, ids, context=None):
"""Overwritten to the create method of employee so that it creates a res_partner record as well."""
partner_obj = self.pool.get('res.partner')
values = ids
created_id = super(hr_employee, self).create(cr, uid, values, context)
created_partner_id = partner_obj.create(cr, uid, {'name': values.get('name'),
'display_name': values.get('name_related'),
'lang': 'es_DO',
'active': True,
'email': values.get('work_email'),
'phone': values.get('work_phone'),
'employee': True,
'tz': 'America/Santo_Domingo',
'notification_email_send': 'comment',
'company_id': values.get('company_id')}, context)
self.write(cr, uid, created_id, {'address_home_id': created_partner_id}, context)
return created_id
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 1. Exercise Webmining"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
a=256
b=256
a*b
"### Use Python as a calculator to compute 256 times 256"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Put the strings \"Hello\" and \"World!\" in two variables. Concatenate both Strings and print them"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a list that contains the 4 strings \"alpha\", \"beta\", \"gamma\", and \"delta\". Add \"epsilon\" to the end of this list"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a sublist with the first three elements of this list\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Write a Python program to construct the following pattern using for loops"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Write a function that takes 3 input parameters and returns product of these values"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Write a Python function that takes an int as a parameter and checks if the number is prime or not."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Use your function to create a list that contains all primes from 2 to 100"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Write a function that simulates dice rolls (6-sided dices)\n",
"- Write a function that has the number of dice rolls as the input parameter\n",
"- The output is a map with the counts how often each result of a roll (1-6) occurred\n",
"- You can use the randomint() function from the random package"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Combine two dictionaries by adding values for common keys.\n",
"d1 = {'a': 100, 'b': 200, 'c':300}\n",
"\n",
"d2 = {'a': 300, 'b': 200, 'd':400}\n",
"\n",
"Sample output: {'a': 400, 'b': 400, 'd': 400, 'c': 300}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a list of lists. Then, write a list comprehension that creates a list with the lengths of each (sub)list in the primary list"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Exceptions\n",
"\n",
"Write your own function *my_division* that gets two variables a and b and returns a divided by b. Catch the ZeroDivisionError that occurs if b equals 0 and return 1 in this case.\n",
"\n",
"How could the same behavior be achieved without catching an exception?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### File handling\n",
"\n",
"Write a function that gets a letter and the path to a (text-)file on your harddrive as input parameters and computes a list as its output. The list should contain one number for each line. The i-th line shows, how often the given letter occurs in line number i."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Change please!\n",
"In our currency, we have coins with certain values, e.g., (200,100,50,20,10,5,2,1) cents. Write a function that computes the different options to pay a certain amount X with the given set of coins."
]
},
{
"cell_type": "raw",
"metadata": {},
"source": [
"Example:\n",
"pay_options (10, [200,100,50,20,10,5,2,1])\n",
"[[10],\n",
" [5, 5],\n",
" [5, 2, 2, 1],\n",
" [5, 2, 1, 1, 1],\n",
" [5, 1, 1, 1, 1, 1],\n",
" [2, 2, 2, 2, 2],\n",
" [2, 2, 2, 2, 1, 1],\n",
" [2, 2, 2, 1, 1, 1, 1],\n",
" [2, 2, 1, 1, 1, 1, 1, 1],\n",
" [2, 1, 1, 1, 1, 1, 1, 1, 1],\n",
" [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Numpy"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a numpy arrays with 20 rows and 10 columns, all values set to zero"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create an array with the same shape containing all natural numbers 0-199"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Create a 4-dimensional array and set all values to a random integer smaller than 5"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### What is the mean of all values contained in this array?"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Select a sub-array containing the middle two rows of the middle two columns from that array"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Given a numpy array, return a new numpy array the contains only the elements that are greater than 100"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|
import numpy as np
import pickle
from sklearn.metrics import roc_auc_score
from isolation_forest.isolation_forest import IsolationForest
from isolation_forest.tree_grower_generalized_uniform import TreeGrowerGeneralizedUniform
from isolation_forest.tree_grower_basic import TreeGrowerBasic
import os
from utils import load_data
def test_grower_saving_files(grower, grower_args, source_path, target_path,
repeat_cnt, additional_info=""):
target_folder_path = f"{target_path}\\{grower.__name__}_{additional_info}"
if(not os.path.exists(target_folder_path)):
os.mkdir(target_folder_path)
datasets = os.listdir(source_path)
for dataset in datasets:
ds_name = dataset.split('.')[0]
X,y = load_data(source_path, dataset)
total_sample_cnt = X.shape[0]
scores = np.zeros((repeat_cnt, total_sample_cnt))
for i in range(repeat_cnt):
gr_args = (X,)+grower_args
new_grower = grower(*gr_args)
forest = IsolationForest(new_grower, X, tree_cnt, sample_size)
forest.grow_forest()
print(f"trained, {grower.__name__}!")
scores[i,...] = forest.compute_paths()
print(f'{ds_name}: {roc_auc_score(y, scores[i,...])}')
np.save(f'{target_path}\\{dataset.split(".")[0]}.npy', scores)
tree_cnt = 100
sample_size = 256
repeat_cnt = 30
power = 2
growers = [TreeGrowerBasic, TreeGrowerGeneralizedUniform]
grower_arg_sets = [(sample_size,), (sample_size,power)]
additional_infos = ['', f'power_{power}']
source_path = 'real_data'
target_path = 'results_real'
if(not os.path.exists(target_path)):
os.mkdir(target_path)
repeat_cnt = 1
cnt = 0
for grower, grower_arg_set, additional_info in zip(growers, grower_arg_sets, additional_infos):
test_grower_saving_files(grower, grower_arg_set, source_path, target_path,
repeat_cnt, additional_info)
|
# Variable to hold student continent
studentContinent = 'America'
studentSubContinent = 'South America'
if(studentContinent == 'Africa'):
print('You get Cookies and Cream flavor')
elif(studentContinent == 'Asia'):
print('You get Cookies and Cream flavor')
elif(studentContinent == 'Europe'):
print('You get Chocolate Ice Cream flavor')
elif(studentContinent == 'America' and studentSubContinent == 'North America'):
print('You get Vanilla Ice Cream flavor')
elif(studentContinent == 'America' and studentSubContinent == 'South America'):
print('You get Peanut Butter Cup flavor')
elif(studentContinent == 'Australasia'):
print('You get Chocolate Chip flavor')
else:
print('We are still working on finding a continent for you!')
|
"""
剑指 Offer 44. 数字序列中某一位的数字
数字以0123456789101112131415…的格式序列化到一个字符序列中。在这个序列中,第5位(从下标0开始计数)是5,第13位是1,第19位是4,等等。
请写一个函数,求任意第n位对应的数字。
"""
"""
这个题简单来说,还是最简单的找规律,排除第一位的0,然后可以发现;
123456789 总共9个数,都是1位的
101112...9899 总共90*2个数,都是两位的
100101102...998999 总共900*3个数,都是三位的
...
那么如果求第n个数,用这个数循环去寻找就知道是在哪个范围里了,比方说第365个数,因为第一个数是0,所以需要排除掉这个.
364 - 9 - 180 = 175 < 900*3,那么这个数一定是处于三位数的组合中. 175/3 = 58...1
那么这个数应该是158里边的第一个数,即1.
反正大概就是这个意思,其中的计算可能会有一点点误差,在写代码的时候再调节吧.
"""
def findNthDigit(n: int) -> int:
digt = 1
counter = 1
n = n-1
while n > digt * 9 * counter:
n -= digt*9*counter
digt *= 10
counter += 1
a,b = n//counter,n%counter
targetNum = str(digt + a)[b]
return int(targetNum)
if __name__ == '__main__':
res = findNthDigit(2147483647)
print(res) |
num1=int(input("Enter the first number:"))
num2=int(input("Enter the second number:"))
op= input("Enter operator:")
if(op=="+"):
print(num1+num2)
elif(op=="-"):
print(num1-num2)
elif(op=="*"):
print(num1*num2)
elif(op=="/"):
print(num1/num2)
else:
print("Wrong operator!!!!!") |
import glob
from label_sentences import label_sentences
from sklearn.model_selection import KFold
import pandas as pd
# TODO
sentences_filepaths = glob.glob("sentences/psa_research/*.csv")
for sentences_filepath in sentences_filepaths:
print(sentences_filepath)
label_sentences(sentences_filepath, mode='auto')
sentences_filepaths = glob.glob("labeled_sentences/*/*.csv")
data = None
for path in sentences_filepaths:
if data is None:
data = pd.read_csv(path, encoding='utf-8')
else:
data = pd.concat([data, pd.read_csv(path, encoding='utf-8')])
data.to_csv('all_labeled_sentences.csv')
fold = KFold(n_splits=5, shuffle=True, random_state=0)
for i, (train_index, test_index) in enumerate(fold.split(data, data.has_citation)):
data.iloc[test_index].to_csv('pre_split_data/test_{}.csv'.format(i), encoding='utf8')
train_df = data.iloc[train_index]
outstr = ''
for _, row in train_df.iterrows():
outstr += row[0] + '``' + str(int(row['has_citation'])) + ' '
with open('pre_split_data/train_{}.txt'.format(i), 'w', encoding='utf8') as f:
f.write(outstr)
|
from tkinter import Tk
from encrypt import Encrypt
from encrypt_view import EncryptView
# Encrypt 的 Controller 類別
class EncryptController:
# 設定初值
def __init__(self):
self.e = None
self.userinput = ""
self.result = ""
self.app = EncryptView(master=Tk())
self.app.nb["command"] = self.nm
self.app.lb["command"] = self.lm
self.app.sb["command"] = self.sm
self.app.eb["command"] = self.em
self.app.db["command"] = self.dm
self.app.cb["command"] = self.cm
self.app.cb2["command"] = self.cm2
self.app.mainloop()
# 按下新建按鈕的事件
def nm(self):
self.e = Encrypt()
self.app.dt["text"] = self.e
# 按下載入按鈕的事件
def lm(self):
self.app.dt["text"] = "觸發載入按鈕的事件。"
# 按下儲存按鈕的事件
def sm(self):
self.app.dt["text"] = "觸發儲存按鈕的事件。"
# 按下編碼按鈕的事件
def em(self):
# 取得使用者輸入
self.userinput = self.app.ifd.get()
# 先測試使用者是否有輸入
if self.userinput == "":
m = "沒有輸入!"
self.app.dt["text"] = m
else:
# 再測試是否有按過新建按鈕
if self.e == None:
m = "沒有密碼物件!"
self.app.dt["text"] = m
else:
# 使用者有輸入
# 並且有按過新建按鈕
s = self.userinput
r = self.e.toEncode(s)
self.result = r
self.app.ofd.delete(0, 200)
self.app.ofd.insert(0, r)
m = "編碼成功!"
self.app.dt["text"] = m
# 按下解碼按鈕的事件
def dm(self):
# 取得使用者輸入
self.userinput = self.app.ifd.get()
# 先測試使用者是否有輸入
if self.userinput == "":
m = "沒有輸入!"
self.app.dt["text"] = m
else:
# 再測試是否有按過新建按鈕
if self.e == None:
m = "沒有密碼物件!"
self.app.dt["text"] = m
else:
# 使用者有輸入
# 並且有按過新建按鈕
s = self.userinput
r = self.e.toDecode(s)
self.result = r
self.app.ofd.delete(0, 200)
self.app.ofd.insert(0, r)
m = "解碼成功!"
self.app.dt["text"] = m
# 按下清除按鈕的事件
def cm(self):
self.app.dt["text"] = "觸發清除按鈕的事件。"
# 按下拷貝按鈕的事件
def cm2(self):
self.app.dt["text"] = "觸發拷貝按鈕的事件。"
# 執行部分
if __name__ == '__main__':
app = EncryptController()
# 檔名: encrypt_controller.py
# 作者: Kaiching Chang
# 時間: May, 2018
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""zk2 specific configuration."""
import server
class Zk2TopoServer(server.TopoServer):
"""Implementation of TopoServer for zk2."""
def __init__(self):
self.ports_assigned = False
def assign_ports(self):
"""Assign ports if not already assigned."""
if self.ports_assigned:
return
from environment import reserve_ports # pylint: disable=g-import-not-at-top
import utils # pylint: disable=g-import-not-at-top
self.zk_port_base = reserve_ports(3)
self.hostname = utils.hostname
self.zk_ports = ':'.join(str(self.zk_port_base + i) for i in range(3))
self.addr = 'localhost:%d' % (self.zk_port_base + 2)
self.ports_assigned = True
def setup(self, add_bad_host=False):
from environment import run, binary_args, vtlogroot # pylint: disable=g-import-not-at-top,g-multiple-import
import utils # pylint: disable=g-import-not-at-top
self.assign_ports()
run(binary_args('zkctl') + [
'-log_dir', vtlogroot,
'-zk.cfg', '1@%s:%s' % (self.hostname, self.zk_ports),
'init'])
# Create the cell configurations using 'vtctl AddCellInfo'
utils.run_vtctl_vtctl(['AddCellInfo',
'-root', '/test_nj',
'-server_address', self.addr,
'test_nj'])
utils.run_vtctl_vtctl(['AddCellInfo',
'-root', '/test_ny',
'-server_address', self.addr,
'test_ny'])
ca_addr = self.addr
if add_bad_host:
ca_addr += ',does.not.exists:1234'
# Use UpdateCellInfo for this one, more coverage.
utils.run_vtctl_vtctl(['UpdateCellInfo',
'-root', '/test_ca',
'-server_address', ca_addr,
'test_ca'])
def teardown(self):
from environment import run, binary_args, vtlogroot # pylint: disable=g-import-not-at-top,g-multiple-import
import utils # pylint: disable=g-import-not-at-top
self.assign_ports()
run(binary_args('zkctl') + [
'-log_dir', vtlogroot,
'-zk.cfg', '1@%s:%s' % (self.hostname, self.zk_ports),
'shutdown' if utils.options.keep_logs else 'teardown'],
raise_on_error=False)
def flags(self):
return [
'-topo_implementation', 'zk2',
'-topo_global_server_address', self.addr,
'-topo_global_root', '/global',
]
def wipe(self):
from environment import run, binary_args # pylint: disable=g-import-not-at-top,g-multiple-import
# Only delete keyspaces/ in the global topology service, to keep
# the 'cells' directory. So we don't need to re-add the CellInfo records.
run(binary_args('zk') + ['-server', self.addr, 'rm', '-rf',
'/global/keyspaces'])
run(binary_args('zk') + ['-server', self.addr, 'rm', '-rf', '/test_nj/*'])
run(binary_args('zk') + ['-server', self.addr, 'rm', '-rf', '/test_ny/*'])
run(binary_args('zk') + ['-server', self.addr, 'rm', '-rf', '/test_ca/*'])
def update_addr(self, cell, keyspace, shard, tablet_index, port):
pass
server.flavor_map['zk2'] = Zk2TopoServer()
|
# -*- coding: utf-8 -*-
class FenwickTree:
def __init__(self, nums):
self.els = [0] * (len(nums) + 2)
for i, num in enumerate(nums, 1):
self.add(i, num)
def add(self, i, k):
while i < len(self.els):
self.els[i] += k
i += i & -i
def sumPrefix(self, i):
result = 0
while i > 0:
result += self.els[i]
i -= i & -i
return result
class NumArray:
def __init__(self, nums):
self.tree = FenwickTree(nums)
def update(self, i, val):
self.tree.add(i + 1, val - self.sumRange(i, i))
def sumRange(self, i, j):
return self.tree.sumPrefix(j + 1) - self.tree.sumPrefix(i)
if __name__ == "__main__":
obj = NumArray([1, 3, 5])
assert 9 == obj.sumRange(0, 2)
obj.update(1, 2)
assert 8 == obj.sumRange(0, 2)
obj = NumArray([-1])
assert -1 == obj.sumRange(0, 0)
obj.update(0, 1)
assert 1 == obj.sumRange(0, 0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/2/2 0:42
# @Author : Lee
# @File : bubble_sort.py
# @Software: PyCharm
from utils.common import swap
from utils.sort_test_helper import SortTestHelper
def bubble_sort(lists):
length = len(lists)
for i in range(length):
for j in range(0, length - i - 1,):
if lists[j] > lists[j+1]:
swap(lists, j, j+1)
return lists
if __name__ == '__main__':
test_lists = SortTestHelper.generate_random_list(20, 0, 30)
print(test_lists)
bubble_sort(test_lists)
print(test_lists) |
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
## Setup file to compile the sources and install the package on your system
# ==========================================
from distutils.core import setup
from setuptools import find_namespace_packages
from musclex import __version__
from distutils.extension import Extension
# Build the main package, with script etc...
# ==========================================
setup(
name = 'musclex',
packages = find_namespace_packages(),
# package_dir={"": "musclex"},
# packages = ['CalibrationSettings', 'csv_manager', 'modules', 'headless', 'ui', 'utils'],
version = __version__,
description = 'Muscle X',
author = 'BioCAT',
author_email = 'biocat@lethocerus.biol.iit.edu',
url = 'https://github.com/biocatiit/musclex',
keywords = ['musclex', 'biomuscle', 'diffraction', 'biocat'],
# python_requires='>=2.7.10, <=3.6',
classifiers = ['Development Status :: 3 - Alpha',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: Other OS',
'Programming Language :: Python',
'Programming Language :: Cython',
'Topic :: Scientific/Engineering :: Bio-Informatics'],
install_requires=['pip',
'wheel',
'numpy<1.24,>=1.18', # version control for numba
'Cython',
'scikit-image',
'scikit-learn',
'openpyxl',
'pyopencl',
'tifffile',
'distro',
'numba',
'scikit-learn',
'lmfit',
'ConfigParser',
'pillow',
'fabio',
'peakutils',
'h5py',
'scipy',
'matplotlib',
'musclex_ccp13',
'PyMca5',
'pandas',
'opencv-python-headless',
'pyFAI',
'PyQt5',
'hdf5plugin',
'fisx',
'future'],
entry_points={
'console_scripts': [
'musclex=musclex.main:main',
'musclex-launcher=musclex.launcher:LauncherForm.main'
],
},
include_package_data=True,
test_suite="musclex/tests"
)
|
def minion_game():
string = input()
stuart = 0
kevin = 0
for j in range(len(string)):
k = len(string) - j
if string[j] == 'A' or string[j] == 'E' or string[j] == 'I' or string[j] == 'O' or string[j] == 'U':
stuart += k
else:
kevin += k
if stuart == kevin:
return "Draw"
elif stuart > kevin:
return "Kevin " + str(stuart)
else:
return "Stuart " + str(kevin)
print(minion_game())
|
from django.views.generic import ListView , DetailView
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from . import views
from Main import views as main_views
from feeds.models import Post
urlpatterns = [
url(r'^$', ListView.as_view(queryset = Post.objects.all().order_by("-date")[:25],
template_name = "feeds/feeds.html")),
url(r'^feed/', ListView.as_view(queryset = Post.objects.all().order_by("-date")[:25],
template_name = "feeds/feeds.html")),
url( r'^$', main_views.index, name='home'),
url( r'^Index', main_views.index, name='Index'),
url( r'^home', main_views.index, name='home'),
url( r'^friends', main_views.friends, name='friends'),
url( r'^profile', main_views.profile, name='profile'),
url( r'^goodie', main_views.goodie, name='goodie'),
url(r'^signup', main_views.signup, name='signup'),
url(r'^login/$',auth_views.LoginView.as_view(template_name='main/login.html')) ,
url(r'^feed/', ListView.as_view(queryset = Post.objects.all().order_by("-date")[:25],
template_name = "feeds/feeds.html")),
url(r'^(?P<slug>[-\w]+)$',DetailView.as_view (model = Post, template_name = 'feeds/post.html')),
]
|
# Copyright (c) 2017, Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Load Use Case Collector
"""
from xml.etree import ElementTree as et
import random
import os
import shutil
import yaml
import pyexcel
from landscaper.collector import base
from landscaper.common import LOG
from landscaper import paths
_MAX_GENERATION = 1000
class UseCaseCollector(base.Collector):
"""
UseCase collector. Imports excel file with a use case landscape separated
into two sheets; Nodes and Links. Converts the excel sheets to .csv format.
Creates hwloc and cpuinfo files for the servers connected to every network
switch saves files in the Data Directory.
"""
def __init__(self, graph_db, conf_manager, events_manager, events=None):
super(UseCaseCollector, self).__init__(graph_db, conf_manager,
events_manager, events=None)
self.cnf = conf_manager
self.mac_addresses = []
def init_graph_db(self):
"""
Import excel file, create two .csv files for the nodes and edges in
the landscape. Create two servers for each network switch in the
landscape. Generate hwloc and cpuinfo for each machine and add files
to the Data Directory. Create a network_description.yaml file with
connections to servers and network switches.
"""
LOG.info("Deleting hwloc and cpuifo files")
# Deleting hwloc and cpuinfo files is necessary for testing collector
filelist = [file for file in os.listdir(paths.DATA_DIR) if
file.endswith(".txt") or file.endswith('.xml') or
file.endswith('.yaml')]
for file in filelist:
os.remove(os.path.join(paths.DATA_DIR, file))
if os.path.exists(os.path.join(paths.DATA_DIR, "nodes.csv")) and \
os.path.exists(os.path.join(paths.DATA_DIR, "links.csv")):
node_array = pyexcel.get_sheet(file_name=os.path.join(
paths.DATA_DIR, "nodes.csv"), name_columns_by_row=0)
attributes = list(node_array.colnames)
link_array = pyexcel.get_sheet(file_name=os.path.join(
paths.DATA_DIR, "links.csv"), name_columns_by_row=0)
LOG.info("Creating hwloc, cpu_info and network description files")
for node in node_array:
if node[1] == 'network' and node[2] == 'switch':
connections = []
node_id = node[0]
links = self._search_links(link_array, node[0])
for element in range(1, node[3] + 1):
mac_address = self._create_hwloc_file(node_id, element)
self._create_cpuinfo_file(node_id, element)
connections.append(mac_address)
connections.extend(links)
self._add_network_switch(node, attributes, connections)
else:
LOG.error("Node.csv file does not contain network switch data")
else:
LOG.error("CSV Files not in data directory")
def update_graph_db(self, event, body):
"""
Not implemented as there is no update events for DataClay or this one either.
"""
raise NotImplementedError
def _generate_mac_address(self):
"""
Generate a unique fake mac address for each server.
Change hwloc template if server information provided by Use Case provider.
:return random_mac: unique MAC address
"""
for _ in range(_MAX_GENERATION):
random_mac = "%02x:%02x:%02x:%02x:%02x:%02x" % (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)
)
if random_mac not in self.mac_addresses:
self.mac_addresses.append(random_mac)
return random_mac.upper()
def _build_hwloc_object(self, node_id, index):
"""
Build hwloc object
:param node_id: unique id for each network switch
:param index: number given to the server for the given network switch
:return mac_address: mac_address for server
:return tree: hwloc object
"""
tree = et.parse(paths.TEMP_HWLOC)
root = tree.getroot()
for info in root.findall("object/info"):
name = info.get('name')
if name.lower() == 'hostname':
info.set('value', "{}-{}".format(node_id, index))
for element in tree.iter('info'):
value = element.get('value')
if value == 'ENTER_MAC':
mac_address = self._generate_mac_address()
element.set('value', mac_address)
return mac_address, tree
def _create_hwloc_file(self, node_id, index):
"""
Write hwloc object to .xml file in data directory
:param node_id: unique id for each network switch
:param index: number given to the server for the given network switch
"""
mac_address, hwloc = self._build_hwloc_object(node_id, index)
hwloc.write(os.path.join(paths.DATA_DIR, "{}-{}_hwloc.xml"
.format(node_id, index)))
return mac_address
@staticmethod
def _create_cpuinfo_file(node_id, index):
"""
Create cpuinfo file using the template for each physical machine
:param node_id: unique id for each network switch
:param index: number given to the server for the given network switch
"""
source = paths.TEMP_CPUINFO
destination = paths.DATA_DIR
shutil.copy(source, destination)
os.rename(os.path.join(destination, 'template_cpuinfo.txt'),
os.path.join(destination, '{}-{}_cpuinfo.txt'
.format(node_id, index)))
@staticmethod
def _search_links(link_array, node_id):
"""
Search links.csv for connections between network switches
:param link_array: list of connections between network switches
:param node_id: unique id for each network switch
:return: links: network switches connected to node_id
"""
links = []
for link in link_array:
if node_id == link[1]:
links.append(link[0])
return links
@staticmethod
def _add_network_switch(node, attributes, connections):
"""
Add network switch data to the network description file
:param node: network switch unique id and values for attributes
:param attributes: key attribute names
:param connections: list of network switch's connected devices
"""
i = 0
switch_attributes = attributes[4:]
node_attributes = node[4:]
switch_data = {node[0]: {attributes[0]: node[0]}}
for item in switch_attributes:
switch_data[node[0]][item] = node_attributes[i]
i = i+1
switch_data[node[0]]['address'] = node[0].lower()
switch_data[node[0]]['connected-devices'] = connections
with open(os.path.join(paths.DATA_DIR, "network_description.yaml"), 'a') as yaml_file:
yaml.safe_dump(switch_data, yaml_file, default_flow_style=False)
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
"""Contain GlobalStore
"""
from logging import getLogger
from tonga.models.structs.persistency_type import PersistencyType
from tonga.stores.base import BaseStores
from tonga.stores.errors import BadEntryType
from tonga.stores.manager.errors import UninitializedStore
from tonga.stores.persistency.memory import MemoryPersistency
from tonga.stores.persistency.shelve import ShelvePersistency
from tonga.stores.persistency.rocksdb import RocksDBPersistency
from tonga.models.structs.store_record_type import StoreRecordType
__all__ = [
'GlobalStore',
]
class GlobalStore(BaseStores):
""" Global store
"""
def __init__(self, db_type: PersistencyType, db_path: str = None):
self._logger = getLogger('tonga')
if db_type == PersistencyType.MEMORY:
self._persistency = MemoryPersistency()
elif db_type == PersistencyType.SHELVE:
if db_path is not None:
self._persistency = ShelvePersistency(db_path)
else:
# Todo change raised error
raise KeyError
elif db_type == PersistencyType.ROCKSDB:
if db_path is not None:
self._persistency = RocksDBPersistency(db_path)
else:
# Todo change raised error
raise KeyError
else:
# Todo change raised error
raise NotImplementedError
async def get(self, key: str) -> bytes:
""" Get value by key in global store
Args:
key (str): Value key as string
Returns:
bytes: return value as bytes
"""
if self._persistency.is_initialize():
if isinstance(key, str):
return await self._persistency.get(key)
else:
raise BadEntryType
raise UninitializedStore
async def _build_set(self, key: str, value: bytes) -> None:
""" Set value & key in global store
Args:
key (str): Value key as string
value (bytes): Value to store as bytes format
Returns:
None
"""
if isinstance(key, str) and isinstance(value, bytes):
await self._persistency.__getattribute__('_build_operations').__call__(key, value, StoreRecordType.SET)
else:
raise BadEntryType
async def _build_delete(self, key: str) -> None:
""" Delete value by key in global store
Args:
key (str): Value key as string
Returns:
None
"""
if isinstance(key, str):
await self._persistency.__getattribute__('_build_operations').__call__(key, '', StoreRecordType.DEL)
else:
raise BadEntryType
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from collections import defaultdict
from dataclasses import dataclass
from pants.core.util_rules.source_files import SourceFiles
from pants.core.util_rules.source_files import rules as source_files_rules
from pants.engine.collection import Collection
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.fs import Digest, DigestSubset, MergeDigests, PathGlobs, RemovePrefix, Snapshot
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import SourcesPaths
from pants.source.source_root import (
SourceRoot,
SourceRootRequest,
SourceRootsRequest,
SourceRootsResult,
)
from pants.source.source_root import rules as source_root_rules
from pants.util.dirutil import fast_relpath
@dataclass(frozen=True)
class StrippedSourceFiles:
"""Wrapper for a snapshot of files whose source roots have been stripped.
Use via `Get(StrippedSourceFiles, SourceFilesRequest([tgt.get(SourcesField)])`.
"""
snapshot: Snapshot
@rule
async def strip_source_roots(source_files: SourceFiles) -> StrippedSourceFiles:
"""Removes source roots from a snapshot.
E.g. `src/python/pants/util/strutil.py` -> `pants/util/strutil.py`.
"""
if not source_files.snapshot.files:
return StrippedSourceFiles(source_files.snapshot)
if source_files.unrooted_files:
rooted_files = set(source_files.snapshot.files) - set(source_files.unrooted_files)
rooted_files_snapshot = await Get(
Snapshot, DigestSubset(source_files.snapshot.digest, PathGlobs(rooted_files))
)
else:
rooted_files_snapshot = source_files.snapshot
source_roots_result = await Get(
SourceRootsResult,
SourceRootsRequest,
SourceRootsRequest.for_files(rooted_files_snapshot.files),
)
source_roots_to_files = defaultdict(set)
for f, root in source_roots_result.path_to_root.items():
source_roots_to_files[root.path].add(str(f))
if len(source_roots_to_files) == 1:
source_root = next(iter(source_roots_to_files.keys()))
if source_root == ".":
resulting_snapshot = rooted_files_snapshot
else:
resulting_snapshot = await Get(
Snapshot, RemovePrefix(rooted_files_snapshot.digest, source_root)
)
else:
digest_subsets = await MultiGet(
Get(Digest, DigestSubset(rooted_files_snapshot.digest, PathGlobs(files)))
for files in source_roots_to_files.values()
)
resulting_digests = await MultiGet(
Get(Digest, RemovePrefix(digest, source_root))
for digest, source_root in zip(digest_subsets, source_roots_to_files.keys())
)
resulting_snapshot = await Get(Snapshot, MergeDigests(resulting_digests))
# Add the unrooted files back in.
if source_files.unrooted_files:
unrooted_files_digest = await Get(
Digest,
DigestSubset(source_files.snapshot.digest, PathGlobs(source_files.unrooted_files)),
)
resulting_snapshot = await Get(
Snapshot, MergeDigests((resulting_snapshot.digest, unrooted_files_digest))
)
return StrippedSourceFiles(resulting_snapshot)
@dataclass(frozen=True)
class StrippedFileName:
value: str
@dataclass(frozen=True)
class StrippedFileNameRequest(EngineAwareParameter):
file_path: str
def debug_hint(self) -> str:
return self.file_path
@rule
async def strip_file_name(request: StrippedFileNameRequest) -> StrippedFileName:
source_root = await Get(
SourceRoot, SourceRootRequest, SourceRootRequest.for_file(request.file_path)
)
return StrippedFileName(
request.file_path
if source_root.path == "."
else fast_relpath(request.file_path, source_root.path)
)
class StrippedSourceFileNames(Collection[str]):
"""The file names from a target's `sources` field, with source roots stripped.
Use via `Get(StrippedSourceFileNames, SourcePathsRequest(tgt.get(SourcesField))`.
"""
@rule
async def strip_sources_paths(sources_paths: SourcesPaths) -> StrippedSourceFileNames:
if not sources_paths.files:
return StrippedSourceFileNames()
source_root = await Get(
SourceRoot, SourceRootRequest, SourceRootRequest.for_file(sources_paths.files[0])
)
if source_root.path == ".":
return StrippedSourceFileNames(sources_paths.files)
return StrippedSourceFileNames(fast_relpath(f, source_root.path) for f in sources_paths.files)
def rules():
return (*collect_rules(), *source_root_rules(), *source_files_rules())
|
import csv
import re
import tempfile
import urllib
import requests
from django_extensions.db.models import TimeStampedModel
from storage.s3wrapper import S3Wrapper
"""
Generic mixins for reading from files in a Django management command.
The files can be a local file, a URL or on an S3 bucket.
`self.S3_BUCKET_NAME` needs to be set when using S3.
"""
class ReadFromFileMixin:
S3_BUCKET_NAME = None
def add_arguments(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-f",
"--file",
action="store",
help="Path to import e.g: /foo/bar/baz.csv",
)
group.add_argument(
"-u",
"--url",
action="store",
help="URL to import e.g: http://foo.bar/baz.csv",
)
group.add_argument(
"-s",
"--s3",
action="store",
help="S3 key to import e.g: foo/bar/baz.csv",
)
def read_from_local(self, filename):
with open(filename, "rt"):
return filename
def read_from_url(self, url):
tmp = tempfile.NamedTemporaryFile()
urllib.request.urlretrieve(url, tmp.name)
return tmp
def read_from_s3(self, filepath):
s3 = S3Wrapper(self.S3_BUCKET_NAME)
return s3.get_file(filepath)
def load_data(self, options):
if options["file"]:
return self.read_from_local(options["file"])
if options["url"]:
return self.read_from_url(options["url"])
if options["s3"]:
return self.read_from_s3(options["s3"])
return None
class ReadFromCSVMixin(ReadFromFileMixin):
ENCODING = "utf-8"
DELIMITER = ","
def read_from_local(self, filename):
with open(filename, "rt", encoding=self.ENCODING) as f:
reader = csv.DictReader(f, delimiter=self.DELIMITER)
return list(reader)
def read_from_url(self, url):
r = requests.get(url)
r.raise_for_status()
# if CSV came from google docs
# manually set the encoding
gdocs_pattern = r"(.)+docs\.google(.)+\/ccc(.)+"
if re.match(gdocs_pattern, url):
r.encoding = self.ENCODING
csv_reader = csv.DictReader(r.text.splitlines())
return list(csv_reader)
def read_from_s3(self, filepath):
f = super().read_from_s3(filepath)
return self.read_from_local(f.name)
class UpdateElectionsTimestampedModel(TimeStampedModel):
class Meta:
get_latest_by = "modified"
abstract = True
def save(self, **kwargs):
"""
Whenever the object is saved, we update all related elections
modified date to have the same date. This is to make sure that
changes made on the parent Organisation or
OrganisationDivision are picked up by importers looking for
changes to the Election made in EE
"""
super().save(**kwargs)
self.election_set.update(modified=self.modified)
|
from skimage import io
from skimage.transform import downscale_local_mean
from skimage.filters import threshold_triangle as threshold
from skimage.segmentation import clear_border, random_walker
from skimage.measure import label, regionprops
from skimage.morphology import binary_opening, square, remove_small_objects
from skimage.color import gray2rgb
from skimage.draw import circle
import config
from lib.Crop import Crop
from mpyx.Video import Video
from lib.Database import Database
from lib.models.Classifier import Classifier
from dateutil.parser import parse as dateparse
from base64 import b64encode
from itertools import repeat
from PIL import Image
from io import BytesIO
from uuid import uuid4
import matplotlib.pyplot as plt
import numpy as np
import traceback
import multiprocessing
import subprocess
import threading
import concurrent.futures
import shutil
import shlex
import pexpect
import queue
import asyncio
import fcntl
import tempfile
import time
import os
import sys
import matplotlib.pyplot as plt
async def main(args):
if len(args) < 2:
print("What you want to process and insert?")
print("""USING: path/to/video/file.avi 2017-10-31 T_NAME_ViDEO "Notes." """)
else:
print(await detect_video(*args))
async def detect_video(video_fpath, date = "NOW()", name = "Today", notes = ""):
# note: "NOW()" may not work.
cpus = multiprocessing.cpu_count()
experiment_uuid = uuid4()
experiment_day = dateparse(date)
experiment_dir = os.path.join(config.experiment_dir, str(experiment_uuid))
experiment = (experiment_uuid, experiment_day, name, "detection", notes)
try:
print("Loading raw video for processing", video_fpath)
video = Video(video_fpath)
print("Creating data directory", experiment_dir)
os.mkdir(experiment_dir)
print("Launching ffmpeg for raw video compression")
compressor_raw = VideoFileCompressor(video_fpath, experiment_dir, "raw.mp4", video.width, video.height)
compressor_raw.start()
print("Launching ffmpeg for extracted video compression")
frame_bytes_norm = multiprocessing.Queue()
compressor_norm = VideoStreamCompressor(frame_bytes_norm, experiment_dir, "extraction.mp4", video.width, video.height, 300, pix_format="gray")
compressor_norm.start()
print("Launching ffmpeg for binary mask visualization")
frame_bytes_mask = multiprocessing.Queue()
compressor_mask = VideoStreamCompressor(frame_bytes_mask, experiment_dir, "mask.mp4", video.width, video.height, 300, pix_format="gray" )
compressor_mask.start()
print("Launching ffmpeg for particle detection visualization")
frame_bytes_detect = multiprocessing.Queue()
compressor_detect = VideoStreamCompressor(frame_bytes_detect, experiment_dir, "detection.mp4", video.width, video.height, 300, pix_format="rgb24" )
compressor_detect.start()
print("Launching CropWriter process")
crop_writer_queue = multiprocessing.Queue()
crop_writer = CropWriter(crop_writer_queue, experiment_dir)
crop_writer.start()
print("Launching Database Writer")
db_writer_queue = multiprocessing.Queue()
db_writer = DBWriter(db_writer_queue)
db_writer.start()
print("Loading Classifier")
classifier = Classifier().load()
compressors = [compressor_raw, compressor_norm, compressor_detect, compressor_mask, crop_writer]
frame_buffers = [frame_bytes_norm, frame_bytes_detect, frame_bytes_mask, crop_writer_queue]
print("Inserting experiment", name, "(", experiment_uuid, ") into database.")
db_writer_queue.put(("execute", """
INSERT INTO Experiment (experiment, day, name, method, notes)
VALUES ($1, $2, $3, $4, $5)
""", experiment))
for i in range(len(video)):
print("Processing frame", i)
frame = video.normal_frame(i)
detection_frame = gray2rgb(frame.squeeze().copy()).astype("int32")
sframe = frame.squeeze()
frame_bytes_norm.put(frame.tobytes())
cropper = Crop(frame)
thresh = threshold(sframe)
binary = binary_opening((sframe < thresh), square(3))
cleared = clear_border(binary)
frame_bytes_mask.put((cleared.squeeze().astype("uint8") * 255).tobytes())
labeled = label(cleared)
filtered = remove_small_objects(labeled, 64)
properties = regionprops(filtered, sframe)
frame = (uuid4(), experiment_uuid, i)
frame_uuid = frame[0]
db_writer_queue.put(("execute", """
INSERT INTO Frame (frame, experiment, number)
VALUES ($1, $2, $3)
""", frame))
crop_dir = os.path.join(experiment_dir, str(frame[0]))
os.mkdir(crop_dir)
print("Found", len(properties), "particles.")
batchSize = 1024
for b in batch(properties, batchSize):
print(".", end='', flush=True)
coords = [(p.centroid[1], p.centroid[0]) for p in b]
bboxes = [((p.bbox[1], p.bbox[0]), (p.bbox[3], p.bbox[2])) for p in b]
crops = np.array([cropper.crop(int(round(c[0])), int(round(c[1]))) for c in coords])
pcrops = np.array([classifier.preproc(crop) for crop in crops])
# need to pad the array of crop images into a number divisible by the # of GPUs
pcrops = np.lib.pad(pcrops, ((0, len(pcrops) % config.GPUs), (0,0), (0,0), (0,0)), 'constant', constant_values=0)
print(".", end='', flush=True)
latents = classifier.encoder.predict(pcrops.reshape((len(pcrops), *(pcrops[0].shape))))
print(".", end='', flush=True)
categories = [np.argmax(c) for c in classifier.featureclassifier.predict(latents.reshape(len(latents), *(latents[0].shape)))]
print(".", end='', flush=True)
particles = [(uuid4(), experiment_uuid, p.area, p.mean_intensity, p.perimeter, p.major_axis_length, categories[i])
for i, p in enumerate(b)]
track_uuids = [uuid4() for i in range(len(b))]
tracks = [(track_uuids[i], frame_uuid, particles[i][0], coords[i], bboxes[i], ",".join(list(latents[i].astype("str"))))
for i, p in enumerate(b)]
crop_writer_queue.put((frame_uuid, track_uuids, crops))
print(".", end='', flush=True)
db_writer_queue.put(("executemany", """
INSERT INTO Particle (particle, experiment, area, intensity, perimeter, radius, category)
VALUES ($1, $2, $3, $4, $5, $6, $7)
""", (particles,)))
db_writer_queue.put(("executemany", """
INSERT INTO Track (track, frame, particle, location, bbox, latent)
VALUES ($1, $2, $3, $4, $5, $6)
""", (tracks,)))
print(".", end='', flush=True)
# colorize detection_frame
for i, p in enumerate(b):
rr, cc = circle(*p.centroid, p.major_axis_length, sframe.shape)
if categories[i] == 0: # Undefined
detection_frame[rr, cc, 0] += 25
detection_frame[rr, cc, 1] += 25 # R+G = Orange
if categories[i] == 1: # Unknown
detection_frame[rr, cc, 1] += 25
detection_frame[rr, cc, 2] += 25 # G+B = Cyan
if categories[i] == 2: # Bitumen
detection_frame[rr, cc, 2] += 50 # Blue
if categories[i] == 3: # Sand
detection_frame[rr, cc, 0] += 50 # Red
if categories[i] == 4: # Bubbple
detection_frame[rr, cc, 1] += 50 # Green
print(":", end='', flush=True)
detection_frame = np.clip(detection_frame, 0, 255)
frame_bytes_detect.put(detection_frame.astype("uint8").tobytes())
while max([b.qsize() for b in frame_buffers]) > 8 or db_writer_queue.qsize() > 128:
print("queues:", frame_bytes_norm.qsize(), frame_bytes_detect.qsize(), frame_bytes_mask.qsize(), crop_writer_queue.qsize(), db_writer_queue.qsize())
time.sleep(5)
except Exception as e:
print("Uh oh. Something went wrong")
traceback.print_exc()
print("Waiting for rollback")
db_writer.stop()
db_writer_queue.put(None)
db_writer.join()
[c.stop() for c in compressors]
[b.put(None) for b in frame_buffers]
[c.join() for c in compressors]
if os.path.exists(experiment_dir):
print("Removing files from", experiment_dir)
shutil.rmtree(experiment_dir)
else:
print("Comitting changes to database.")
db_writer.commit()
db_writer_queue.put(None)
db_writer.join()
print("Waiting for compression to complete.")
[b.put(None) for b in frame_buffers]
[c.join() for c in compressors]
finally:
print("Fin.")
return experiment[0]
def batch(iterable, n=1):
#https://stackoverflow.com/a/8290508
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
class VideoFileCompressor(multiprocessing.Process):
def __init__(self, video_fpath, experiment_dir, fname, width=2336, height=1729, fps=300., rate=24.):
super(VideoFileCompressor, self).__init__()
self.edir = experiment_dir
self.fname = fname
self.stop_event = multiprocessing.Event()
self.cmd = ''.join(('ffmpeg -i "{}"'.format(video_fpath),
' -c:v libx264 -crf 15 -preset fast',
' -pix_fmt yuv420p',
' -filter:v "setpts={}*PTS'.format(fps/rate),
', crop={}:{}:0:0"'.format(width, height-1) if height % 2 else '"',
' -r 24',
' -movflags +faststart',
' "{}"'.format(os.path.join(experiment_dir, fname))))
def run(self):
proc = subprocess.Popen(shlex.split(self.cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
fcntl.fcntl(proc.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
with open("{}.log".format(os.path.join(self.edir, self.fname)), 'wb', 0) as log_file:
while proc.poll() is None:
try:
log_file.write(proc.stdout.read())
except:
time.sleep(0.5)
if self.stopped():
proc.kill()
def stop(self):
self.stop_event.set()
def stopped(self):
return self.stop_event.is_set()
class VideoStreamCompressor(multiprocessing.Process):
def __init__(self, queue, experiment_dir, fname, width=2336, height=1729, fps=300., rate=24., pix_format="gray"):
super(VideoStreamCompressor, self).__init__()
self.queue = queue
self.edir = experiment_dir
self.fname = fname
self.stop_event = multiprocessing.Event()
self.cmd = ''.join(('ffmpeg',
' -f rawvideo -pix_fmt {}'.format(pix_format),
' -video_size {}x{}'.format(width,height),
' -framerate {}'.format(fps),
' -i -',
' -c:v libx264 -crf 15 -preset fast',
' -pix_fmt yuv420p',
' -filter:v "setpts={}*PTS'.format(fps/rate),
', crop={}:{}:0:0"'.format(width, height-1) if height % 2 else '"',
' -r 24',
' -movflags +faststart',
' "{}"'.format(os.path.join(experiment_dir, fname))))
def run(self):
proc = subprocess.Popen(shlex.split(self.cmd), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
fcntl.fcntl(proc.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
with open("{}.log".format(os.path.join(self.edir, self.fname)), 'wb', 0) as log_file:
while True:
try:
log_file.write(proc.stdout.read())
except:
pass
if self.stopped():
return self.close(proc)
else:
frame_bytes = self.queue.get()
if frame_bytes is None:
return self.close(proc)
else:
proc.stdin.write(frame_bytes)
def close(self, proc):
proc.stdin.close()
proc.wait()
def stop(self):
self.stop_event.set()
def stopped(self):
return self.stop_event.is_set()
class CropWriter(multiprocessing.Process):
def __init__(self, queue, experiment_dir):
super(CropWriter, self).__init__()
self.queue = queue
self.edir = experiment_dir
self.stop_event = multiprocessing.Event()
def run(self):
while True:
if self.stopped():
return
else:
crop_data = self.queue.get()
if crop_data is None:
return
else:
frame_uuid, track_uuids, crops = crop_data
frame_uuid = str(frame_uuid)
#print(crops[0].shape, np.min(crops[0]), np.max(crops[0]))
for i, crop in enumerate(crops):
io.imsave(os.path.join(self.edir, frame_uuid, str(track_uuids[i]) + ".jpg"), crop.squeeze(), quality=90)
def close(self, proc):
proc.stdin.close()
proc.wait()
def stop(self):
self.stop_event.set()
def stopped(self):
return self.stop_event.is_set()
# 2am madness follows
def go(fn, args):
return asyncio.new_event_loop().run_until_complete(fn(*args))
class DBWriter(multiprocessing.Process):
def __init__(self, queue):
super(DBWriter, self).__init__()
self.queue = queue
self.stop_event = multiprocessing.Event()
self.commit_event = multiprocessing.Event()
def run(self):
# await self.inner_loop(Database().transaction, self.queue)
go(self.inner_loop, (Database().transaction, self.queue))
# dun know if this works or not.. todo: test.
async def inner_loop(self, tx, queue):
tx, transaction = await tx()
print("DBWriter ready.")
while True:
if not self.stopped():
sql_drop = self.queue.get()
if sql_drop is None:
pass
else:
method, query, args = sql_drop
await getattr(tx, method)(query, *args)
else:
if self.stop_event.is_set():
await transaction.commit()
else:
await transaction.rollback()
return
def commit(self):
self.commit_event.set()
self.stop()
def stop(self):
self.stop_event.set()
def stopped(self):
return self.stop_event.is_set()
# class DBTransciever(multiprocessing.Process):
# """TODO, R/W support"""
# def __init__(self, queue):
# super(DBTransciver, self).__init__()
# class Classy(multiprocessing.Process):
# def __init__(self, preprocessed_crops_queue, latents_categories_mirror_queue = None):
# super(Classy, self).__init__()
# self.pc = preprocessed_crops_queue
# self.lcm = latents_categories_mirror_queue # or dump_queue()
# self.stop_event = multiprocessing.Event()
# def run(self):
# return go(self.inner_loop, self, Classifier().load(), self.pc, self.lcm)
# async def inner_loop(self, classifier, crops, queue):
# while True:
# if not self.stopped():
# crop_batch = crops.get()
# if crop_batch is None:
# return
# else:
# # todo: test if further proc splitting helps or hinders
# categories = [np.argmax(c) for c in classifier.featureclassifier.predict(latents.reshape(len(latents), *(latents[0].shape)))]
# latents = classifier.encoder.predict(crop_batch.reshape((len(crop_batch), *(crop_batch[0].shape))))
# mirror = classifier.decoder.predict(latents.reshape((len(latents), *(latents[0].shape))))
# queue.put((categories, latents, mirror))
# else:
# queue.put(None)
# return
|
from _typeshed import Incomplete
def k_components(G: Incomplete, min_density: float = ...) -> Incomplete: ...
|
from itertools import combinations
import operator as op
import itertools as it
from functools import reduce
import re
import json
import math
|
import logging
import pytest
from log_it import CRITICAL, DEBUG, ERROR, INFO, WARNING, log_it
LOG_LEVEL = {
"debug": DEBUG,
"info": INFO,
"warning": WARNING,
"error": ERROR,
"critical": CRITICAL,
}
def test_callable_log_levels():
for level in LOG_LEVEL:
assert callable(LOG_LEVEL[level])
@pytest.mark.parametrize(
"msg, level",
[
("This is a debug message", "debug"),
("This is an info message", "info"),
("This is a warning message", "warning"),
("This is an error message", "error"),
("This is a critical message", "critical"),
],
)
def test_log_it(msg, level, caplog):
caplog.set_level(logging.DEBUG)
log_it(LOG_LEVEL[level], msg)
assert len(caplog.records) == 1
for record in caplog.records:
assert record.levelname == level.upper()
assert record.message == msg
assert record.name == "pybites_logger"
def test_wrong_log_level(caplog):
msg = "This is a warning message"
caplog.set_level(logging.ERROR)
log_it(WARNING, msg)
assert len(caplog.records) == 0
caplog.set_level(logging.ERROR)
msg = "This is an error message"
log_it(ERROR, msg)
assert len(caplog.records) == 1
for record in caplog.records:
assert record.levelname == "ERROR"
assert record.message == msg
assert record.name == "pybites_logger" |
from pyUbiForge.ACU.type_readers.minimap_textures import Reader as MMClass
from pyUbiForge.ACU.type_readers.texture import Reader as TextureClass
from plugins import BasePlugin
from PIL import Image
from io import BytesIO
import struct
from typing import Union, List
import pyUbiForge
import logging
class Plugin(BasePlugin):
plugin_name = 'Export Minimap'
plugin_level = 4
file_type = 'EE568905'
def run(self, file_id: Union[str, int], forge_file_name: str, datafile_id: int, options: Union[List[dict], None] = None):
# TODO add select directory option
save_folder = pyUbiForge.CONFIG.get('dumpFolder', 'output')
data = pyUbiForge.temp_files(file_id, forge_file_name, datafile_id)
if data is None:
logging.warning(f"Failed to find file {file_id:016X}")
return
file_name = data.file_name
minimap_textures: MMClass = pyUbiForge.read_file(data.file)
output_image = None
tile_width = tile_height = 128
for y in range(minimap_textures.width):
for x in range(minimap_textures.height):
texture_file_id = minimap_textures.image_ids[y * minimap_textures.height + x]
texture_data = pyUbiForge.temp_files(texture_file_id)
if texture_data is None:
logging.warning(f"Failed to find file {texture_file_id:016X}")
continue
texture: TextureClass = pyUbiForge.read_file(texture_data.file)
if output_image is None:
tile_width = struct.unpack('<I', texture.dwWidth)[0]
tile_height = struct.unpack('<I', texture.dwHeight)[0]
output_image = Image.new('RGBA', (tile_width * minimap_textures.width, tile_height * minimap_textures.height))
temp_image = Image.open(BytesIO(texture.dds_string))
output_image.paste(temp_image, (tile_width * x, tile_height * minimap_textures.height - tile_height * (y+1)))
logging.info(f'Written {y+1} row of {minimap_textures.width}')
if output_image is None:
logging.info('No Minimap to export')
else:
output_image.save(f'{save_folder}/{file_name}.png')
|
from mido import MidiFile
import operator
def MIDIconvert(file):
mid = MidiFile(file)
sequence=[]
previous=0
for i, track in enumerate(mid.tracks):
#print('Track {}: {}'.format(i, track.name))
for msg in track:
if msg.type == 'note_on'or msg.type == 'note_off':
#(msg.note)
if msg.type == 'note_on':
note=1
else:
note=0
sequence.append([msg.time/6+previous+1,msg.note-24,note,0])
previous+=msg.time/6
#print(sequence)
i=0
while i<=len(sequence)-1:
if sequence[i][2]==1:
j=i
while j<=len(sequence)-1:
if sequence[j][2]==0 and sequence[j][1]==sequence[i][1]:
duration=sequence[j][0]-sequence[i][0]
sequence[i][3]=duration
sequence[j][3]=duration
#print(i, j ,"here")
break
j+=1
i+=1
#print(sequence)
sequence=sorted(sequence, key=operator.itemgetter(0,1,2))
print(sequence)
return sequence
MIDIconvert('test4.mid')
|
import logging
from sqlalchemy import update, select
from src.hana.db.model import cookies
from src.hana.db.mysql import init_connection
logging.basicConfig(
datefmt="%Y%m%d %H:%M:%S",
format="%(asctime)s %(levelname)-8s %(message)s",
level=logging.INFO,
)
def u_update(connection, table):
"""
Todo: 如何传入where条件
Todo: 2.如何传入更新的value
:param connection:
:param table:
:return:
"""
u = update(table).where(cookies.c.cookie_name == 'chocolate chip')
u = u.values(quantity=(cookies.c.quantity + 120))
rp = connection.execute(u)
logging.debug('u_update:{} lines.'.format(rp.rowcount))
return rp
if __name__ == '__main__':
connection = init_connection('root', '123456', '127.0.0.1', 'chapter2')
r = u_update(connection, cookies)
logging.info(r.rowcount)
s = select([cookies]).where(cookies.c.cookie_name == 'chocolate chip')
result = connection.execute(s).first()
for key in result.keys():
print('{:>20}: {}'.format(key, result[key]))
|
from waitlist import entry
if __name__ == '__main__':
entry.main()
|
__author__ = "Narwhale"
# current_users = ['A','B','C','D','admin','F','G','H','J']
# new_users = ['A','R','Q','B','L']
#
# for i in new_users:
# if i in current_users:
# print('用户名已存在,请重新输入')
# else:
# print('此用户名可以使用')
#-----------------------------------
current_users = ['Ai','B','C','D','admin','F','G','H','J']
new_users = ['AI','R','Q','B','L']
current_users = [username.upper() for username in current_users]
for i in new_users:
if i.upper() in current_users:
print('用户名已存在,请重新输入')
else:
print('此用户名可以使用') |
import tensorflow as tf
import theano
import pandas as pd
import numpy as np
import matplotlib
import os
import math
import pydot
import graphviz
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from keras.layers import Dense, Dropout, LSTM, Embedding, Activation, Lambda, Bidirectional
from keras.engine import Input, Model, InputSpec
from keras.preprocessing.sequence import pad_sequences
from keras.utils import plot_model
from keras.utils.data_utils import get_file
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from sklearn.utils import class_weight
from keras import backend as K
from keras.preprocessing import sequence
from keras.models import model_from_json
EPCOHS = 10
BATCH_SIZE = 400
input_layer = 4
output_layer = 50
middle_layer = 62
dropout_perentage = 0.18
seqlen = 900
checkpoint_dir ='checkpoints'
os.path.exists(checkpoint_dir)
def letter_to_index(letter):
_alphabet = 'ATGC'
return next((i for i, _letter in enumerate(_alphabet) if _letter == letter), None)
def parseInput(test_split = 0.1, maxlen = seqlen):
data = np.load("dataset.npy", allow_pickle=True).item()
df = pd.DataFrame(data, columns=["genes", "resistant"])
df['genes'] = df['genes'].apply(lambda x: [int(letter_to_index(e)) for e in x])
df = df.reindex(np.random.permutation(df.index))
train_size = int(len(df) * (1 - test_split))
X_train = df['genes'].values[:train_size]
y_train = np.array(df['resistant'].values[:train_size])
X_test = np.array(df['genes'].values[train_size:])
y_test = np.array(df['resistant'].values[train_size:])
return pad_sequences(X_train, maxlen=maxlen), y_train, pad_sequences(X_test, maxlen=maxlen), y_test
def build_net(input_length, rnn_hidden_dim = middle_layer, output_dim = output_layer, input_dim = input_layer, dropout = dropout_perentage):
model = Sequential()
model.add(Embedding(input_dim = input_layer, output_dim = output_dim, input_length = input_length, name='embedding_layer'))
model.add(Bidirectional(LSTM(rnn_hidden_dim, return_sequences=True)))
model.add(Dropout(dropout))
model.add(Bidirectional(LSTM(rnn_hidden_dim)))
model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid'))
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
return model
def create_plots(history):
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('accuracy.png')
plt.clf()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('loss.png')
plt.clf()
if __name__ == '__main__':
X_train, y_train, X_test, y_test = parseInput()
model = build_net(len(X_train[0]))
filepath= checkpoint_dir + "/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
class_weight = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
print(class_weight)
history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, class_weight=class_weight,
epochs=EPCOHS, callbacks=callbacks_list, validation_split = 0.1, verbose = 1)
model_json = model.to_json()
with open("neuralnet.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("weights")
create_plots(history)
plot_model(model, to_file='model.png')
score, acc = model.evaluate(X_test, y_test, batch_size=BATCH_SIZE)
print('score:', score)
print(' accuracy:', acc)
|
#!/usr/bin/python3
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import time
def rot_mat(roll, pitch, yaw):
roll = -roll
pitch = -pitch
yaw = -yaw
D = np.array([[ np.cos(yaw), np.sin(yaw), 0],
[ -np.sin(yaw), np.cos(yaw), 0],
[ 0, 0, 1]])
C = np.array([[ np.cos(pitch), 0, -np.sin(pitch)],
[ 0, 1, 0],
[ np.sin(pitch), 0, np.cos(pitch)]])
B = np.array([[ 1, 0, 0],
[ 0, np.cos(roll), np.sin(roll)],
[ 0, -np.sin(roll), np.cos(roll)]])
"""
D = np.array([[ np.cos(pitch), np.sin(pitch), 0],
[-np.sin(pitch), np.cos(pitch), 0],
[ 0, 0, 1]])
C = np.array([[ 1, 0, 0],
[ 0, np.cos(roll), np.sin(roll)],
[ 0, -np.sin(roll), np.cos(roll)]])
B = np.array([[ np.cos(yaw), np.sin(yaw), 0],
[ -np.sin(yaw), np.cos(yaw), 0],
[ 0, 0, 1]])
"""
#A = np.matmul(np.matmul(B,C),D)
# inverted so it resolves yaw first, then pitch, then roll
A = np.matmul(np.matmul(D,C),B)
return A
def deg2rad(deg):
return np.pi/180 * deg
def rad2deg(rad):
return 180/np.pi * rad
class Camera:
def __init__(self,x,y,z,roll,pitch,yaw,wfov,hfov):
self.x = x
self.y = y
self.z = z
self.roll = roll
self.pitch = pitch
self.yaw = yaw
self.wfov = wfov
self.hfov = hfov
def mk_fov(self):
rot = rot_mat(self.roll, self.pitch, self.yaw)
line_step = np.linspace([0,0,0],[5,0,0],101).T
line_center = np.matmul(rot, line_step)
line_top = np.matmul(rot, np.matmul(rot_mat(0, self.hfov/2, 0), line_step))
line_bot = np.matmul(rot, np.matmul(rot_mat(0,-self.hfov/2, 0), line_step))
line_lft = np.matmul(rot, np.matmul(rot_mat(0, 0,-self.wfov/2), line_step))
line_rgt = np.matmul(rot, np.matmul(rot_mat(0, 0, self.wfov/2), line_step))
line_tl = np.matmul(rot, np.matmul(rot_mat(0, self.hfov/2,-self.wfov/2), line_step))
line_tr = np.matmul(rot, np.matmul(rot_mat(0, self.hfov/2, self.wfov/2), line_step))
line_bl = np.matmul(rot, np.matmul(rot_mat(0,-self.hfov/2,-self.wfov/2), line_step))
line_br = np.matmul(rot, np.matmul(rot_mat(0,-self.hfov/2, self.wfov/2), line_step))
loc = np.array([self.x, self.y, self.z])
rec = np.array([ line_tl[:,20],
line_tr[:,20],
line_br[:,20],
line_bl[:,20],
line_tl[:,20]]).T
line_center = (line_center.T + loc).T
line_tl = (line_tl.T + loc).T
line_tr = (line_tr.T + loc).T
line_bl = (line_bl.T + loc).T
line_br = (line_br.T + loc).T
rec = (rec.T + loc).T
return line_center, line_tl, line_tr, line_bl, line_br, rec
#, line_top, line_bot, line_lft, line_rgt
def plot(self, ax):
i = 0
colors = ['green','red','orange','blue','purple','black','gray','gray','gray','gray']
for line in self.mk_fov():
xx,yy,zz = line
ax.plot(xx,yy,zz,c=colors[i])
i+=1
#break
class Webcam(Camera):
def __init__(self,x,y,z,roll,pitch,yaw):
super(Webcam,self).__init__(x,y,z,roll,pitch,yaw,deg2rad(60),deg2rad(44.048625674))
jj = 1
fig = plt.figure()
def mk_plot():
global fig, jj
ax = fig.add_subplot(330 + jj,projection='3d')
jj += 1
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
ax.set_xlim([-2,2])
ax.set_ylim([2,-2])
ax.set_zlim([2,-2])
grounded = np.linspace([0,0,0],[5,0,0],50).T
xx,yy,zz = grounded
ax.plot(xx,yy,zz,c='black')
return fig, ax
#fig,ax = mk_plot()
#
#xs = [0]
#ys = [0]
#zs = [0]
#
#ax.scatter(xs,ys,zs, c='r', marker='o')
#cam1 = Webcam(-2,-2,0,0,deg2rad(30),deg2rad(-45))
#cam1 = Webcam(0,0,0,deg2rad(90),deg2rad(0),deg2rad(0))
#cam2 = Webcam(2,-2,0,0,deg2rad(30),deg2rad(-135))
#cam3 = Webcam(2,2,0,0,deg2rad(30),deg2rad(135))
#cam1.plot(ax)
#cam2.plot(ax)
#cam3.plot(ax)
#plt.show()
curr = 0
for curr in range(0,361,45):
fig,ax = mk_plot()
cam1 = Webcam(-2,-2,0,deg2rad(0),deg2rad(0),deg2rad(curr))
cam1.plot(ax)
ax.set_title(str(curr))
plt.show()
|
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
from .forms import PostModelForm
from .models import PostModel
from datetime import datetime, date
def temp_test(request):
template = 'blog/test-view.html'
projects_list = ["Market Place", "Bonds Data", "PMService", "Algorithm"]
context = {
"name": "Bahmani",
"lucky": 75,
"buffer": 35,
"bdate": date(1985,6,28),
"objects_list":projects_list,
"title": "Ava"
}
messages.success(request, "Test", fail_silently=True, extra_tags='test')
return render(request, template, context)
def post_model_create_view(request):
# if request.method == "POST":
# print(request.POST)
# form = PostModelForm(request.POST)
# if form.is_valid():
# form.save(commit=False)
# print(form.cleaned_data)
form = PostModelForm(request.POST or None)
context ={
"form": form
}
if form.is_valid():
obj = form.save(commit=False)
print(form.cleaned_data)
obj.save()
messages.success(request, "Created a new blog post")
context = {
"form": PostModelForm()
}
return HttpResponseRedirect(f"/blog/{obj.id}")
template = "blog/create-view.html"
return render(request, template, context)
def post_model_delete_view(request, id):
target_obj = get_object_or_404(PostModel, id=id)
if request.method == 'POST':
target_obj.delete()
#messages.success(request, "Post deleted")
return HttpResponseRedirect("/blog/")
context = {
"object": target_obj
}
template = "blog/delete-view.html"
return render(request, template, context)
def post_model_update_view(request, id):
target_obj = get_object_or_404(PostModel, id=id)
form = PostModelForm(request.POST or None, instance=target_obj)
context = {
"object": target_obj,
"form": form
}
if form.is_valid():
obj = form.save(commit=False)
print("Form updated successfully")
obj.save()
messages.success(request, f"Updated post with id {id} successfully")
context = {
"form": PostModelForm()
}
return HttpResponseRedirect(f"/blog/{id}")
template = "blog/update-view.html"
return render(request, template, context)
def post_model_detail_view(request, id):
# try:
# obj = PostModel.objects.get(id=id)
# except ObjectDoesNotExist:
# obj = f"No post with id {id}"
obj = get_object_or_404(PostModel, id=id)
context = {
"object": obj
}
template = "blog/detail-view.html"
return render(request, template, context)
#@login_required()
def post_model_list_view(request):
print(request.GET)
query = request.GET.get("q")
qs = PostModel.objects.all()
if not(query is None):
qs = qs.filter(
Q(title__icontains=query) |
Q(content__icontains=query)
)
print(request.user)
print(qs)
context = {
"object_list": qs,
"name_list": ['Bahman', 'Salehi']
}
if request.user.is_authenticated:
template = 'blog/list-view.html'
context['user_status'] = 'Authenticated'
else:
template = 'blog/list-view-public.html'
context['user_status'] = 'public'
#raise Http404
return render(request, template, context) |
import hashlib
def encrypt_string(hash_string):
sha_signature = \
hashlib.sha512(hash_string.encode()).hexdigest()
return sha_signature
hash_string = input("Enter a String: ")
sha_signature = encrypt_string(hash_string)
print("Hash Equivalent : ", end ="")
print(sha_signature) |
import json
import datetime
def get_list_json(results):
return json.dumps([result.to_dict() for result in results], cls=ComplexEncoder)
def get_json(result):
return json.dumps(result.to_dict(), cls=ComplexEncoder)
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
encoded_object = obj.isoformat()
else:
encoded_object =json.JSONEncoder.default(self, obj)
return encoded_object |
import cv2,time
video=cv2.VideoCapture(0)
a=1
while True:
a=a+1
check,frame=video.read()
print(frame)
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow("capture",gray)
key=cv2.waitKey(1)
if key == ord('q'):
break
print(a)
video.release()
cv2.destroyAllWindows()
|
import tkinter as tk
class gui:
# gui general parameters
TITLE = "Drone Simulation"
GUI_WIDTH = 1100
GUI_HEIGHT = 700
GUI_TOP_LEFT_CORNER_X = 50
GUI_TOP_LEFT_CORNER_Y = 50
BUTTON_WIDTH = 10
# frame size parameters
DISPLAY_CANVAS_WIDTH = 800
DISPLAY_CANVAS_HEIGHT = 500
# drone perspective parameters
SCALE_FACTOR = 100
WORLD_WIDTH = DISPLAY_CANVAS_WIDTH/SCALE_FACTOR
WORLD_HEIGHT = DISPLAY_CANVAS_HEIGHT/SCALE_FACTOR
LOW_FRAMES_HEIGHT = GUI_HEIGHT - DISPLAY_CANVAS_HEIGHT
SIDE_FRAMES_WIDTH = GUI_WIDTH - DISPLAY_CANVAS_WIDTH
SIDE_FRAMES_HEIGHT = DISPLAY_CANVAS_HEIGHT
# gui styles
DISPLAY_BG_COLOUR = "white"
BUTTON_BG_COLOUR = "white"
FRAME_BG_COLOUR = "gray"
SLIDER_BG_COLOUR = "gray"
GAINS_BG_COLOUR = "gray"
BORDER_COLOUR = "black"
BORDER_WIDTH = 1
TITLE_TEXT_SIZE = 12
NORMAL_TEXT_SIZE = 10
ROUNDING = 2
FONT = "TkDefaultFont"
TEXT_COLOUR = "black"
# GUI Status Variables
START_PRESSED = False
EXIT_PRESSED = False
RESTART_PRESSED = False
LOG_DATA_PRESSED = False
SAVE_LOGGED_DATA = False
NEW_ICs_AVAIL = False
NEW_GAINS_AVAIL = False
NEW_REFS_AVAIL = False
HOVER_DYNAMICS = True
def printStatus(self):
print("START_PRESSED = {}, RESTART_PRESSED = {}".format(self.START_PRESSED,self.RESTART_PRESSED))
# Set initialisation parameters
ICs = [4,0,2,0,0,0]
initial_gains = [1,8,9,3,0.2,0.5]
t = 0
dt = 0.01
def updateFlightData(self,Z,FL,FR):
self.t_var.set(round(self.t,self.ROUNDING))
self.x.set(round(Z[0],self.ROUNDING))
self.x_dot.set(round(Z[1],self.ROUNDING))
self.y.set(round(Z[2],self.ROUNDING))
self.y_dot.set(round(Z[3],self.ROUNDING))
self.theta.set(round(Z[4],self.ROUNDING))
self.theta_dot.set(round(Z[5],self.ROUNDING))
self.F_l.set(round(FL,self.ROUNDING))
self.F_r.set(round(FR,self.ROUNDING))
def incrementTime(self):
self.t += self.dt
def initialiseFrames(self):
self.uiFrame = tk.Frame(self.gui, bg=self.FRAME_BG_COLOUR, height=self.LOW_FRAMES_HEIGHT,
width=self.GUI_WIDTH, bd=self.BORDER_WIDTH,highlightbackground=self.BORDER_COLOUR,
highlightcolor=self.BORDER_COLOUR, highlightthickness=self.BORDER_WIDTH)
self.uiFrame.grid(row=1,column=0,columnspan=2)
self.displayCanvas = tk.Canvas(self.gui, bg=self.DISPLAY_BG_COLOUR, height=self.DISPLAY_CANVAS_HEIGHT,
width=self.DISPLAY_CANVAS_WIDTH,highlightbackground=self.BORDER_COLOUR,
highlightcolor=self.BORDER_COLOUR, highlightthickness=self.BORDER_WIDTH)
self.displayCanvas.grid(row=0,column=0)
self.flightDataFrame = tk.Frame(self.gui, bg=self.FRAME_BG_COLOUR,
height=self.SIDE_FRAMES_HEIGHT, width=self.SIDE_FRAMES_WIDTH,
highlightbackground=self.BORDER_COLOUR, highlightcolor=self.BORDER_COLOUR,
highlightthickness=self.BORDER_WIDTH)
self.flightDataFrame.grid(row=0,column=1)
def startButtonCallback(self,event):
if self.START_PRESSED:
self.startButton['text'] = "Start"
else:
self.startButton['text'] = "Pause"
self.START_PRESSED = not self.START_PRESSED
def restartKeyCallback(self,event):
if(event.char=="r"):
self.t = 0
self.START_PRESSED = False
self.RESTART_PRESSED = True
print("Restart simulation")
def restartButtonCallback(self):
self.t = 0
self.START_PRESSED = False
self.RESTART_PRESSED = True
print("Restart simulation")
def exitButtonCallback(self,event):
print("Closing GUI")
self.EXIT_PRESSED = not self.EXIT_PRESSED
def saveButtonCallback(self):
print("Data Saved")
def disturbancesButtonCallback(self):
print("Disturbances added")
def updateGainsButtonCallback(self):
self.NEW_GAINS_AVAIL = True
def updateIniticialConditions(self):
self.NEW_ICs_AVAIL = True
def initialiseControlButtons(self):
print("Initialising Control Buttons")
BUTTON_COL_L = 0
BUTTON_COL_R = 1
TITLE_ROW = 0
ROW_1 = 1
ROW_2 = 2
ROW_3 = 3
ROW_4 = 4
BUTTON_PAD_X = 10
BUTTON_PAD_Y = 1
title = tk.Label(self.uiFrame, bg=self.FRAME_BG_COLOUR, text="Control Buttons",
font=(self.FONT,self.TITLE_TEXT_SIZE))
title.grid(row=TITLE_ROW,columnspan=len([BUTTON_COL_L,BUTTON_COL_R]))
self.startButton = tk.Button(self.uiFrame, bg=self.BUTTON_BG_COLOUR,
text="Start", fg=self.TEXT_COLOUR, width=self.BUTTON_WIDTH,
command=self.startButtonCallback)
self.startButton.grid(column=BUTTON_COL_L,padx=BUTTON_PAD_X,row=ROW_1,pady=BUTTON_PAD_Y)
self.restartButton = tk.Button(self.uiFrame, bg=self.BUTTON_BG_COLOUR,
text="Restart", fg=self.TEXT_COLOUR, width=self.BUTTON_WIDTH,
command=self.restartButtonCallback)
self.restartButton.grid(column=BUTTON_COL_L,padx=BUTTON_PAD_X,row=ROW_2,
pady=BUTTON_PAD_Y)
self.exitButton = tk.Button(self.uiFrame, bg=self.BUTTON_BG_COLOUR,
text="Exit", fg=self.TEXT_COLOUR, width=self.BUTTON_WIDTH,
command=self.exitButtonCallback)
self.exitButton.grid(column=BUTTON_COL_L,padx=BUTTON_PAD_X,row=ROW_3,
pady=BUTTON_PAD_Y)
self.plotButton = tk.Button(self.uiFrame, bg=self.BUTTON_BG_COLOUR,
text="Plot Data", fg=self.TEXT_COLOUR, width=self.BUTTON_WIDTH,
command=self.startButtonCallback)
self.plotButton.grid(column=BUTTON_COL_R,padx=BUTTON_PAD_X,row=ROW_1,
pady=BUTTON_PAD_Y)
self.saveButton = tk.Button(self.uiFrame, bg=self.BUTTON_BG_COLOUR,
text="Save Data", fg=self.TEXT_COLOUR, width=self.BUTTON_WIDTH,
command=self.saveButtonCallback)
self.saveButton.grid(column=BUTTON_COL_R,padx=BUTTON_PAD_X,row=ROW_2,
pady=BUTTON_PAD_Y)
self.disturbancesButton = tk.Button(self.uiFrame, bg=self.BUTTON_BG_COLOUR,
text="Disturbances", fg=self.TEXT_COLOUR, width=self.BUTTON_WIDTH,
command=self.disturbancesButtonCallback)
self.disturbancesButton.grid(column=BUTTON_COL_R,padx=BUTTON_PAD_X,row=ROW_3,
pady=BUTTON_PAD_Y)
self.gui.bind('<space>', self.startButtonCallback)
self.gui.bind('<Escape>', self.exitButtonCallback)
self.gui.bind('<Key>', self.restartKeyCallback)
def initialiseReferences(self):
print("Setting Initial References")
SLIDER_COL = 6
SLIDER_LENGTH = 150
SLIDER_WIDTH = 15
SLIDER_PAD_X = 7
SLIDER_PAD_Y = 1
X_ROW = 0
Y_ROW = 3
ROW_SPAN = 3
self.x_ref_slider = tk.Scale(self.uiFrame, from_=0, to=self.WORLD_WIDTH,
width=SLIDER_WIDTH, length=SLIDER_LENGTH,bg=self.SLIDER_BG_COLOUR,
tickinterval=0.5,orient="horizontal",label="X Reference")
self.x_ref_slider.set(4)
self.x_ref_slider.grid(column=SLIDER_COL,row=X_ROW,rowspan=ROW_SPAN,
padx=SLIDER_PAD_X,pady=SLIDER_PAD_Y)
self.y_ref_slider = tk.Scale(self.uiFrame, from_=0, to=self.WORLD_HEIGHT,
width=SLIDER_WIDTH, length=SLIDER_LENGTH,bg=self.SLIDER_BG_COLOUR,
tickinterval=0.5,orient="horizontal",label="Y reference")
self.y_ref_slider.set(2)
self.y_ref_slider.grid(column=SLIDER_COL,row=Y_ROW,rowspan=ROW_SPAN,
padx=SLIDER_PAD_X,pady=SLIDER_PAD_Y)
def initialiseFlightData(self):
print("Initialising Flight Data")
Z = [4,0,2,0,0,0]
F = [2,69]
X_ROW = 1
Y_ROW = 2
THETA_ROW = 3
TIME_ROW = 4
THRUST_ROW = 5
POS_TEXT_COL = 0
POS_COL = 1
VEL_TEXT_COL = 3
VEL_COL = 4
ACC_TEXT_COL = 6
ACC_COL = 7
PAD_Y = 2
PAD_X = 1
ENTRY_WIDTH = 5
# labelFrame = tk.Frame(self.flightDataFrame,bg=self.FRAME_BG_COLOUR, width=self.SIDE_FRAMES_WIDTH)
flightDataTitle = tk.Label(self.flightDataFrame,text="Flight Data",
bg=self.FRAME_BG_COLOUR,font=(self.FONT,self.TITLE_TEXT_SIZE)).grid(row=0,columnspan=8)
x_label = tk.Label(self.flightDataFrame,text="x (m):",bg=self.FRAME_BG_COLOUR,
font=(self.FONT,self.NORMAL_TEXT_SIZE)).grid(row=X_ROW,column=POS_TEXT_COL,
padx=PAD_X,pady=PAD_Y)
self.x = tk.DoubleVar()
self.x.set("%.2f" % Z[0])
x_pos = tk.Entry(self.flightDataFrame,textvariable=self.x,width=ENTRY_WIDTH,
font=(self.FONT,self.NORMAL_TEXT_SIZE))
x_pos.grid(row=X_ROW,column=POS_COL,padx=PAD_X,pady=PAD_Y)
x_dot_label = tk.Label(self.flightDataFrame,bg=self.FRAME_BG_COLOUR,text="x_d (m/s):",
font=(self.FONT,self.NORMAL_TEXT_SIZE)).grid(row=X_ROW,column=VEL_TEXT_COL,
padx=PAD_X,pady=PAD_Y)
self.x_dot = tk.DoubleVar()
self.x_dot.set("%.2f" % Z[1])
x_vel = tk.Entry(self.flightDataFrame,textvariable=self.x_dot,width=ENTRY_WIDTH,
font=(self.FONT,self.NORMAL_TEXT_SIZE))
x_vel.grid(row=X_ROW,column=VEL_COL,padx=PAD_X,pady=PAD_Y)
y_label = tk.Label(self.flightDataFrame,text="y (m):",bg=self.FRAME_BG_COLOUR,
font=(self.FONT,self.NORMAL_TEXT_SIZE)).grid(row=Y_ROW,column=POS_TEXT_COL,
padx=PAD_X,pady=PAD_Y)
self.y = tk.DoubleVar()
self.y.set("%.2f" % Z[2])
y_pos = tk.Entry(self.flightDataFrame,textvariable=self.y,width=ENTRY_WIDTH,
font=(self.FONT,self.NORMAL_TEXT_SIZE))
y_pos.grid(row=Y_ROW,column=POS_COL,padx=PAD_X,pady=PAD_Y)
y_dot_label = tk.Label(self.flightDataFrame,text="y_d (m/s):",bg=self.FRAME_BG_COLOUR,
font=(self.FONT,self.NORMAL_TEXT_SIZE)).grid(row=Y_ROW,column=VEL_TEXT_COL,
padx=PAD_X,pady=PAD_Y)
self.y_dot = tk.DoubleVar()
self.y_dot.set("%.2f" % Z[3])
y_vel = tk.Entry(self.flightDataFrame,textvariable=self.y_dot,width=ENTRY_WIDTH,
font=(self.FONT,self.NORMAL_TEXT_SIZE))
y_vel.grid(row=Y_ROW,column=VEL_COL,padx=PAD_X,pady=PAD_Y)
theta_label = tk.Label(self.flightDataFrame,text="Theta (rad):",bg=self.FRAME_BG_COLOUR,
font=(self.FONT,self.NORMAL_TEXT_SIZE)).grid(row=THETA_ROW,column=POS_TEXT_COL,
padx=PAD_X,pady=PAD_Y)
self.theta = tk.DoubleVar()
self.theta.set("%.2f" % Z[4])
theta_pos = tk.Entry(self.flightDataFrame,textvariable=self.theta,width=ENTRY_WIDTH,
font=(self.FONT,self.NORMAL_TEXT_SIZE))
theta_pos.grid(row=THETA_ROW,column=POS_COL,padx=PAD_X,pady=PAD_Y)
theta_dot_label = tk.Label(self.flightDataFrame,text="Theta_d (rad/s):",bg=self.FRAME_BG_COLOUR,
font=(self.FONT,self.NORMAL_TEXT_SIZE)).grid(row=THETA_ROW,column=VEL_TEXT_COL,
padx=PAD_X,pady=PAD_Y)
self.theta_dot = tk.DoubleVar()
self.theta_dot.set("%.2f" % Z[5])
theta_vel = tk.Entry(self.flightDataFrame,textvariable=self.theta_dot,width=ENTRY_WIDTH,
font=(self.FONT,self.NORMAL_TEXT_SIZE))
theta_vel.grid(row=THETA_ROW,column=VEL_COL,padx=PAD_X,pady=PAD_Y)
time_label = tk.Label(self.flightDataFrame,text="Time (s):",bg=self.FRAME_BG_COLOUR,
font=(self.FONT,self.NORMAL_TEXT_SIZE)).grid(row=TIME_ROW,column=POS_TEXT_COL,
padx=PAD_X,pady=PAD_Y)
self.t_var = tk.DoubleVar()
self.t_var.set("%.2f" % self.t)
time_display = tk.Entry(self.flightDataFrame,textvariable=self.t_var,width=ENTRY_WIDTH,
font=(self.FONT,self.NORMAL_TEXT_SIZE))
time_display.grid(row=TIME_ROW,column=POS_COL,padx=PAD_X,pady=PAD_Y)
Fr_label = tk.Label(self.flightDataFrame,text="F_r (N):",bg=self.FRAME_BG_COLOUR,
font=(self.FONT,self.NORMAL_TEXT_SIZE)).grid(row=THRUST_ROW,column=POS_TEXT_COL,
padx=PAD_X,pady=PAD_Y)
self.F_r = tk.DoubleVar()
self.F_r.set("%.2f" % F[0])
Thrust_right = tk.Entry(self.flightDataFrame,textvariable=self.F_r,width=ENTRY_WIDTH,
font=(self.FONT,self.NORMAL_TEXT_SIZE))
Thrust_right.grid(row=THRUST_ROW,column=POS_COL,padx=PAD_X,pady=PAD_Y)
Fl_label = tk.Label(self.flightDataFrame,text="F_l (N): =",bg=self.FRAME_BG_COLOUR,
font=(self.FONT,self.NORMAL_TEXT_SIZE)).grid(row=THRUST_ROW,column=VEL_TEXT_COL,
padx=PAD_X,pady=PAD_Y)
self.F_l = tk.DoubleVar()
self.F_l.set("%.2f" % F[1])
Thrust_left = tk.Entry(self.flightDataFrame,textvariable=self.F_l,width=ENTRY_WIDTH,
font=(self.FONT,self.NORMAL_TEXT_SIZE))
Thrust_left.grid(row=THRUST_ROW,column=VEL_COL,padx=PAD_X,pady=PAD_Y)
def setInitialConditions(self):
print("Setting Initial Conditions")
POS_TEXT_COL = 2
POS_COL = 3
VEL_TEXT_COL = 4
VEL_COL = 5
LABEL_WIDTH = 10
ENTRY_WIDTH = 5
COL_SPAN = 4
ENTRY_PAD_X = 3
TITLE_ROW = 0
X_ROW = 1
Y_ROW = 2
THETA_ROW = 3
BUTTON_ROW = 4
title = tk.Label(self.uiFrame, bg=self.FRAME_BG_COLOUR, text="Set ICs",
font=(self.FONT,self.TITLE_TEXT_SIZE))
title.grid(row=TITLE_ROW,column=POS_TEXT_COL,columnspan=COL_SPAN)
self.updateIniticialConditionsButton = tk.Button(self.uiFrame, bg=self.BUTTON_BG_COLOUR,
text="Set ICs", fg=self.TEXT_COLOUR, width=self.BUTTON_WIDTH,
command=self.updateIniticialConditions)
self.updateIniticialConditionsButton.grid(column=POS_TEXT_COL,row=BUTTON_ROW,columnspan=COL_SPAN)
x_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="X(m):",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
x_label.grid(row=X_ROW,column=POS_TEXT_COL)
self.x_IC = tk.DoubleVar()
self.x_IC.set(self.ICs[0])
x_entry = tk.Entry(self.uiFrame,textvariable=self.x_IC,width=ENTRY_WIDTH)
x_entry.grid(row=X_ROW,column=POS_COL,padx=ENTRY_PAD_X)
y_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="Y(m):",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
y_label.grid(row=Y_ROW,column=POS_TEXT_COL)
self.y_IC = tk.DoubleVar()
self.y_IC.set(self.ICs[2])
y_entry = tk.Entry(self.uiFrame,textvariable=self.y_IC,width=ENTRY_WIDTH)
y_entry.grid(row=Y_ROW,column=POS_COL,padx=ENTRY_PAD_X)
theta_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="Theta (Rad):",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
theta_label.grid(row=THETA_ROW,column=POS_TEXT_COL)
self.theta_IC = tk.DoubleVar()
self.theta_IC.set(self.ICs[4])
theta_entry = tk.Entry(self.uiFrame,textvariable=self.theta_IC,width=ENTRY_WIDTH)
theta_entry.grid(row=THETA_ROW,column=POS_COL,padx=ENTRY_PAD_X)
x_dot_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="X_d (m/s)",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
x_dot_label.grid(row=X_ROW,column=VEL_TEXT_COL)
self.x_dot_IC = tk.DoubleVar()
self.x_dot_IC.set(self.ICs[1])
x_dot_entry = tk.Entry(self.uiFrame,textvariable=self.x_dot_IC,width=ENTRY_WIDTH)
x_dot_entry.grid(row=X_ROW,column=VEL_COL,padx=ENTRY_PAD_X)
y_dot_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="Y_d (m/s)",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
y_dot_label.grid(row=Y_ROW,column=VEL_TEXT_COL)
self.y_dot_IC = tk.DoubleVar()
self.y_dot_IC.set(self.ICs[3])
y_dot_entry = tk.Entry(self.uiFrame,textvariable=self.y_dot_IC,width=ENTRY_WIDTH)
y_dot_entry.grid(row=Y_ROW,column=VEL_COL,padx=ENTRY_PAD_X)
theta_dot_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="Theta_d (rad/s)",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
theta_dot_label.grid(row=THETA_ROW,column=VEL_TEXT_COL)
self.theta_dot_IC = tk.DoubleVar()
self.theta_dot_IC.set(self.ICs[5])
theta_dot_entry = tk.Entry(self.uiFrame,textvariable=self.theta_dot_IC,width=ENTRY_WIDTH)
theta_dot_entry.grid(row=THETA_ROW,column=VEL_COL,padx=ENTRY_PAD_X)
def initialiseGains(self):
print("Setting Initial Gains")
K = self.initial_gains
KP_TEXT_COL = 7
KP_COL = 8
KD_TEXT_COL = 9
KD_COL = 10
LABEL_WIDTH = 7
ENTRY_WIDTH = 5
COL_SPAN = 4
ENTRY_PAD_X = 3
TITLE_ROW = 0
X_ROW = 1
Y_ROW = 2
THETA_ROW = 3
BUTTON_ROW = 4
title = tk.Label(self.uiFrame, bg=self.FRAME_BG_COLOUR, text="Update Gains",
font=(self.FONT,self.TITLE_TEXT_SIZE))
title.grid(row=TITLE_ROW,column=KP_TEXT_COL,columnspan=COL_SPAN)
self.updateGains = tk.Button(self.uiFrame, bg=self.BUTTON_BG_COLOUR,
text="Update Gains", fg=self.TEXT_COLOUR, width=self.BUTTON_WIDTH,
command=self.updateGainsButtonCallback)
self.updateGains.grid(column=KP_TEXT_COL,row=BUTTON_ROW,columnspan=COL_SPAN)
Kp_x_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="X: Kp",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
Kp_x_label.grid(row=X_ROW,column=KP_TEXT_COL)
self.Kp_x = tk.DoubleVar()
self.Kp_x.set(K[0])
Kp_x_entry = tk.Entry(self.uiFrame,textvariable=self.Kp_x,width=ENTRY_WIDTH)
Kp_x_entry.grid(row=X_ROW,column=KP_COL,padx=ENTRY_PAD_X)
Kp_y_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="Y: Kp",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
Kp_y_label.grid(row=Y_ROW,column=KP_TEXT_COL)
self.Kp_y = tk.DoubleVar()
self.Kp_y.set(K[2])
Kp_y_entry = tk.Entry(self.uiFrame,textvariable=self.Kp_y,width=ENTRY_WIDTH)
Kp_y_entry.grid(row=Y_ROW,column=KP_COL,padx=ENTRY_PAD_X)
Kp_theta_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="Theta: Kp",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
Kp_theta_label.grid(row=THETA_ROW,column=KP_TEXT_COL)
self.Kp_theta = tk.DoubleVar()
self.Kp_theta.set(K[4])
Kp_theta_entry = tk.Entry(self.uiFrame,textvariable=self.Kp_theta,width=ENTRY_WIDTH)
Kp_theta_entry.grid(row=THETA_ROW,column=KP_COL,padx=ENTRY_PAD_X)
Kd_x_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="X: Kd",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
Kd_x_label.grid(row=X_ROW,column=KD_TEXT_COL)
self.Kd_x = tk.DoubleVar()
self.Kd_x.set(K[1])
Kd_x_entry = tk.Entry(self.uiFrame,textvariable=self.Kd_x,width=ENTRY_WIDTH)
Kd_x_entry.grid(row=X_ROW,column=KD_COL,padx=ENTRY_PAD_X)
Kd_y_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="Y: Kd",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
Kd_y_label.grid(row=Y_ROW,column=KD_TEXT_COL)
self.Kd_y = tk.DoubleVar()
self.Kd_y.set(K[3])
Kd_y_entry = tk.Entry(self.uiFrame,textvariable=self.Kd_y,width=ENTRY_WIDTH)
Kd_y_entry.grid(row=Y_ROW,column=KD_COL,padx=ENTRY_PAD_X)
Kd_theta_label = tk.Label(self.uiFrame,bg=self.GAINS_BG_COLOUR,text="Theta: Kd",
font=(self.FONT,self.NORMAL_TEXT_SIZE),width=LABEL_WIDTH)
Kd_theta_label.grid(row=THETA_ROW,column=KD_TEXT_COL)
self.Kd_theta = tk.DoubleVar()
self.Kd_theta.set(K[5])
Kd_theta_entry = tk.Entry(self.uiFrame,textvariable=self.Kd_theta,width=ENTRY_WIDTH)
Kd_theta_entry.grid(row=THETA_ROW,column=KD_COL,padx=ENTRY_PAD_X)
def initialiseWindow(self):
self.gui.title(self.TITLE)
self.gui.geometry("{}x{}+{}+{}".format(self.GUI_WIDTH,self.GUI_HEIGHT,self.GUI_TOP_LEFT_CORNER_X,self.GUI_TOP_LEFT_CORNER_Y))
# code run once to initialise the GUI
def __init__(self):
self.gui = tk.Tk()
self.initialiseWindow()
self.initialiseFrames()
self.initialiseFlightData()
self.initialiseControlButtons()
self.initialiseReferences()
self.initialiseGains()
self.setInitialConditions()
self.updateFrameSizes()
self.draw_compass()
def draw_compass(self):
x_origin = 10
y_origin = self.DISPLAY_CANVAS_HEIGHT-10
length = 50
x_arrow = self.displayCanvas.create_line(x_origin, y_origin, x_origin+length, y_origin,
arrow=tk.LAST)
y_arrow = self.displayCanvas.create_line(x_origin, y_origin, x_origin, y_origin-length,
arrow=tk.LAST)
x_origin += 80
y_origin -= 10
self.displayCanvas.create_text(x_origin,y_origin,text="(Xmax = {}m, Ymax = {}m)".format(self.WORLD_WIDTH,self.WORLD_HEIGHT))
def updateFrameSizes(self):
self.gui.update()
self.gui.update_idletasks()
PADDING_HEIGHT_SIDE = (self.SIDE_FRAMES_HEIGHT - self.flightDataFrame.winfo_height())/2
PADDING_WIDTH_SIDE = (self.SIDE_FRAMES_WIDTH - self.flightDataFrame.winfo_width())/2
self.flightDataFrame.configure(padx=PADDING_WIDTH_SIDE,pady=PADDING_HEIGHT_SIDE+self.BORDER_WIDTH)
PADDING_HEIGHT_UI = (self.LOW_FRAMES_HEIGHT - self.uiFrame.winfo_height())/2
PADDING_WIDTH_UI = (self.GUI_WIDTH - self.uiFrame.winfo_width())/2
self.uiFrame.configure(padx=PADDING_WIDTH_UI+self.BORDER_WIDTH,pady=PADDING_HEIGHT_UI)
|
# -*- coding:utf-8 -*-
from selenium import webdriver
from time import sleep
import pandas as pd
from selenium.webdriver.android.webdriver import WebDriver
driver = webdriver.Chrome() # Chrome浏览器
driver.get("http://www.baidu.com")
driver.find_element_by_class_name("s_ipt").send_keys("selenium")
driver.find_element_by_id("su").click()
sleep(2)
driver.find_element_by_link_text("Selenium").click()
|
# Name: Taidgh Murray
# Student ID: 15315901
# File: rectangle.py
############################################################################
import graphics
win = graphics.GraphWin("Rectangle", 200,200)
p1= win.getMouse()
p2= win.getMouse()
rectangle=graphics.Rectangle(p1, p2)
rectangle.setOutline('Black')
rectangle.setFill('Light Blue')
rectangle.draw(win)
anchor1= graphics.Point(110,10)
anchor2= graphics.Point(110,180)
Text1 = graphics.Text(anchor1, ("The area of the rectangle is" , (p2.getX() - p1.getX()) * (p2.getY() - p1.getY())))
Text1.setSize(8)
Text1.draw(win)
Text2 = graphics.Text(anchor2, ("The perimeter of the rectange is" , 2*(p2.getX() - p1.getX()) + 2*(p2.getY() - p1.getY())))
Text2.setSize(7)
Text2.draw(win)
# this waits until you have clicked in the window to close it.
win.getMouse()
win.close()
|
# -*-coding=utf-8-*-
__author__ = 'Rocky'
'''
http://30daydo.com
Contact: weigesysu@qq.com
'''
import requests
session = requests.Session()
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip,deflate,br', 'Accept-Language': 'zh,en;q=0.9,en-US;q=0.8',
'Cache-Control': 'no-cache', 'Connection': 'keep-alive',
'Host': 'www.jisilu.cn', 'Pragma': 'no-cache', 'Referer': 'https://www.jisilu.cn/',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0(WindowsNT6.1;WOW64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/67.0.3396.99Safari/537.36'}
s1 = session.get(url='https://www.jisilu.cn/login/', headers=headers)
url = 'https://www.jisilu.cn/account/ajax/login_process/'
data = {
'return_url': 'https://www.jisilu.cn/',
'user_name': '一言不发',
'password': '123456qA',
'net_auto_login': '1',
'_post_type': 'ajax',
}
headers1 = {'Host': 'www.jisilu.cn', 'Connection': 'keep-alive', 'Pragma': 'no-cache',
'Cache-Control': 'no-cache', 'Accept': 'application/json,text/javascript,*/*;q=0.01',
'Origin': 'https://www.jisilu.cn', 'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0(WindowsNT6.1;WOW64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/67.0.3396.99Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Referer': 'https://www.jisilu.cn/login/',
'Accept-Encoding': 'gzip,deflate,br',
'Accept-Language': 'zh,en;q=0.9,en-US;q=0.8'
}
s2 = session.post(url=url, data=data, headers=headers1)
access_header = {'Accept': '*/*', 'Accept-Encoding': 'gzip,deflate,br', 'Accept-Language': 'zh,en;q=0.9,en-US;q=0.8',
'Cache-Control': 'no-cache', 'Connection': 'keep-alive',
# 'Cookie': 'kbz_newcookie=1;kbzw_r_uname=%E4%B8%80%E8%A8%80%E4%B8%8D%E5%8F%91;Hm_lvt_164fe01b1433a19b507595a43bf58262=1534596284,1534815723,1535250562,1535534424;kbzw__Session=m56m8cspqr166jreqq9tpmgfg3;kbzw__user_login=7Obd08_P1ebax9aXWxr2SR_3VzDuVfAFmrCW6c3q1e3Q6dvR1YzUwtqprJ2sodiV3JinqKiomqHCp7DYzN3NqcbYk6nbpZmcndbd3dPGpJ6wlauXq5iupbaxv9Gkwtjz1ePO15CspaOYicfK4t3k4OyMxbaWl6Worpi4v7iqrZ6Jutznztu43Nm-4dWflqewo5yvjJ-tvrXEw5-YzdnM2Zm8ztzX5ouWpN_p4uXGn5erp6WXrJ-wmKSasJfG2cfR092oqpywmqqY;Hm_lpvt_164fe01b1433a19b507595a43bf58262=1535646338',
'Host': 'www.jisilu.cn', 'Pragma': 'no-cache', 'Referer': 'https://www.jisilu.cn/home/mine/',
'User-Agent': 'Mozilla/5.0(WindowsNT6.1;WOW64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/67.0.3396.99Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'}
s3=session.get(url='https://www.jisilu.cn/home/ajax/index_actions/page-0__filter-', headers=access_header)
print(s3.text) |
# -*- coding: utf-8 -*-
# @Author: zjx
# @Date : 2018/6/4
import web
import config
import sys
import json
from db.DataStore import sqlhelper
from db.SqlHelper import Proxy
# 路由
urls = (
'/', 'select',
'/delete', 'delete'
)
# 启动服务
def start_api_server():
sys.argv.append('0.0.0.0:%s' % config.API_PORT)
app = web.application(urls, globals())
app.run()
# 查询
class select(object):
def GET(self):
inputs = web.input()
json_result = json.dumps(sqlhelper.select(inputs.get('count', None), inputs))
return json_result
# 删除
class delete(object):
params = {}
def GET(self):
inputs = web.input()
json_result = json.dumps(sqlhelper.select(inputs))
return json_result
if __name__ == '__main__':
start_api_server()
|
import time
start_time = time.time()
number = 1000
summa = 0
for i in range(1, number):
if i % 3 == 0 or i % 5 == 0:
summa += i
i += 1
print(summa)
print("Elapsed Time: ",(time.time() - start_time)) |
# _*_ coding: utf-8 _*_
# 程序 9-2 (Python 3 version)
import requests
url = 'http://www.moe.gov.cn/'
html = requests.get(url).text.splitlines()
for i in range(0,15):
print(html[i])
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Sentry logging backend.
Currently only Python application errors are sent to
`Sentry <http://getsentry.com>`_. Future extensions may allow for sending
JavaScript errors to Sentry as well.
**Configuration**
================================== ============================================
`SENTRY_DSN` Sentry DSN (get it from your Sentry account)
. **Required**.
`LOGGING_SENTRY_LEVEL` Log level threshold for handler.
**Default:** ``WARNING``.
`LOGGING_SENTRY_INCLUDE_WARNINGS` Include messages from warnings module.
**Default:** ``True``.
`LOGGING_SENTRY_CELERY` Log Celery messages to Sentry.
**Default:** ``True``.
`LOGGING_SENTRY_CELERY_TRANSPORT` Transport mechanism for Celery.
**Default:** ``sync``.
================================== ============================================
`Raven <raven.readthedocs.org/en/latest/>`_ (the Python library responsible for
sending log messages to Sentry), supports some additionally configuration
variables. See https://github.com/getsentry/raven-python/blob/master/raven/contrib/flask.py
for further details.
"""
import logging
import pkg_resources
from functools import partial
from werkzeug.local import LocalProxy
from flask import current_app
from raven.handlers.logging import SentryHandler
from raven.contrib.flask import Sentry
from raven.processors import SanitizePasswordsProcessor
from celery.signals import after_setup_logger, after_setup_task_logger
class InvenioSanitizeProcessor(SanitizePasswordsProcessor):
"""Remove additional sensitve configuration from Sentry data."""
FIELDS = frozenset([
'access_token'
])
def sentry_include_paths():
"""Detect Invenio dependencies and for use with SENTRY_INCLUDE_PATHS."""
try:
dist = pkg_resources.get_distribution('')
return map(lambda req: req.key, dist.requires())
except pkg_resources.DistributionNotFound:
pass
def setup_warnings(sentry):
"""Add sentry to warnings logger."""
warnings = logging.getLogger('py.warnings')
warnings.addHandler(SentryHandler(sentry.client, level=logging.WARNING))
def add_sentry_id_header(self, sender, response, *args, **kwargs):
"""Fix issue when last_event_id is not defined."""
if hasattr(self, 'last_event_id'):
response.headers['X-Sentry-ID'] = self.last_event_id
return response
def celery_logger_setup(app=None, sender=None, logger=None, **kwargs):
"""Setup Sentry logging for Celery."""
add_handler(logger, app)
def celery_dsn_fix(app):
"""Fix SENTRY_DSN for Celery.
Celery does not handle threaded transport very well, so allow overriding
default transport mechanism for Celery.
"""
if app.config.get('CELERY_CONTEXT', False) and \
app.config['LOGGING_SENTRY_CELERY'] and \
app.config['LOGGING_SENTRY_CELERY_TRANSPORT']:
parts = app.config['SENTRY_DSN'].split('+', 1)
if parts[0] in ('eventlet', 'gevent', 'requests', 'sync',
'threaded', 'twisted', 'tornado'):
app.config['SENTRY_DSN'] = "%s+%s" % (
app.config['LOGGING_SENTRY_CELERY_TRANSPORT'],
parts[1],
)
else:
app.config['SENTRY_DSN'] = "%s+%s" % (
app.config['LOGGING_SENTRY_CELERY_TRANSPORT'],
"+".join(parts),
)
def add_handler(logger, app):
"""Add handler to logger if not already added."""
for h in logger.handlers:
if type(h) == SentryHandler:
return
logger.addHandler(
SentryHandler(
app.extensions['sentry'].client,
level=app.config['LOGGING_SENTRY_LEVEL']
)
)
def setup_app(app):
"""Setup Sentry extension."""
app.config.setdefault('SENTRY_DSN', None)
# Sanitize data more
app.config.setdefault('SENTRY_PROCESSORS', (
'raven.processors.SanitizePasswordsProcessor',
'invenio.ext.logging.backends.sentry.InvenioSanitizeProcessor',
))
# When a user is logged in, also include the user info in the log message.
app.config.setdefault('SENTRY_USER_ATTRS', ['info', ])
# Defaults to only reporting errors and warnings.
app.config.setdefault('LOGGING_SENTRY_LEVEL', 'WARNING')
# Send warnings to Sentry?
app.config.setdefault('LOGGING_SENTRY_INCLUDE_WARNINGS', True)
# Send Celery log messages to Sentry?
app.config.setdefault('LOGGING_SENTRY_CELERY', True)
# Transport mechanism for Celery. Defaults to synchronous transport.
# See http://raven.readthedocs.org/en/latest/transports/index.html
app.config.setdefault('LOGGING_SENTRY_CELERY_TRANSPORT', 'sync')
if app.config['SENTRY_DSN']:
# Detect Invenio requirements and add to Sentry include paths so
# version information about them is added to the log message.
app.config.setdefault('SENTRY_INCLUDE_PATHS', sentry_include_paths())
# Fix-up known version problems getting version information
# Patch submitted to raven-python, if accepted the following lines
# can be removed:
# https://github.com/getsentry/raven-python/pull/452
from raven.utils import _VERSION_CACHE
import numpy
import webassets
import setuptools
_VERSION_CACHE['invenio'] = invenio.__version__
_VERSION_CACHE['numpy'] = numpy.__version__
_VERSION_CACHE['webassets'] = webassets.__version__
_VERSION_CACHE['setuptools'] = setuptools.__version__
# Modify Sentry transport for Celery - must be called prior to client
# creation.
celery_dsn_fix(app)
# Installs sentry in app.extensions['sentry']
s = Sentry(
app,
logging=True,
level=getattr(logging, app.config['LOGGING_SENTRY_LEVEL'])
)
# Replace method with more robust version
s.add_sentry_id_header = add_sentry_id_header
# Add extra tags information to sentry.
s.client.extra_context({'version': invenio.__version__})
# Capture warnings from warnings module
if app.config['LOGGING_SENTRY_INCLUDE_WARNINGS']:
setup_warnings(s)
# Setup Celery logging to Sentry
if app.config['LOGGING_SENTRY_CELERY']:
# Setup Celery loggers
after_setup_task_logger.connect(
partial(celery_logger_setup, app=app),
weak=False
)
after_setup_logger.connect(
partial(celery_logger_setup, app=app),
weak=False
)
# Werkzeug only adds a stream handler if there's no other handlers
# defined, so when Sentry adds a log handler no output is
# received from Werkzeug unless we install a console handler here on
# the werkzeug logger.
if app.debug:
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
logger.addHandler(handler)
sentry = LocalProxy(lambda: current_app.extensions['sentry'])
"""Proxy object to sentry instance."""
|
# 110. Balanced Binary Tree
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isBalancedHelper(self, root):
if not root:
return 0, True
elif not root.left and not root.right:
return 1, True
LeftHeight, LeftFlag = self.isBalancedHelper(root.left)
RightHeight, RightFlag = self.isBalancedHelper(root.right)
CurrFlag = abs(LeftHeight-RightHeight)<=1 and LeftFlag and RightFlag
return max(LeftHeight,RightHeight)+1, CurrFlag
def isBalanced(self, root: 'TreeNode') -> 'bool':
Height, res = self.isBalancedHelper(root)
return res |
import requests
import json
import datetime
from .translator import Translator
class User:
def __init__(self, playerName: str = None, playeruuid: str = None):
self.playerName = playerName
self.playeruuid = playeruuid
if self.playerName == None and self.playeruuid == None:
raise AttributeError('You need to fill in either playerName or playeruuid')
if self.playerName != None and self.playeruuid != None:
raise AttributeError("You can't fill in both playerName or playeruuid")
self.nameValue = self.playerName if not playeruuid else playeruuid
self.get_username_link = requests.get(f'https://api.slothpixel.me/api/players/{self.nameValue}')
self.user_data = json.loads(self.get_username_link.text)
self.rank = self.user_data['rank']
def get_info(self, item, rewards_object=None, voting_object=None, links_object=None):
"""
To get the info of a user. The user does not have to be on Hypixel for this to work.
They have to be at least logged in once to hypixel, however.
https://docs.slothpixel.me/#operation/getPlayer
Items:
- :uuid:`string`
- :username:`string`
- :online:`boolean`
- :rank:`string`
- :rank plus color:`string`
- :rank formatted:`string`
- :prefix:`string`
- :karma:`integer`
- :exp:`integer`
- :level:`integer`
- :achievment_points:`integer`
- :quests completed:`integer`
- :total kills:`integer`
- :total wins:`integer`
- :total coins:`integer`
- :mc_version:`string`
- :first login:`integer`
- :last login:`integer`
- :last logout:`integer`
- :last game:`string`
- :language:`string`
- :gifts sent:`integer`
- :gifts received:`integer`
- :is contributor:`boolean`
- :rewards:`object`
- :voting:`object`
- :links:`object`
- :stats:`object`
Params:
- :item:`string`
- :rewards_object:`string` (optional)
- :voting_object:`string` (optional)
- :links_object:`string` (optional)
- :stats_object:`string` (optional)
`versionadded`: 1.0
"""
self.item = item
self.item = str(self.item).replace(" ", "_")
if self.item == 'rewards':
if rewards_object == None:
raise AttributeError('Attribute `rewards_object` has to be filled in for :rewards: object')
else:
return self.user_data[self.item][rewards_object]
elif self.item == 'voting':
if voting_object == None:
raise AttributeError('Attribute `voting_object` has to be filled in for :voting: object')
else:
return self.user_data[self.item][rewards_object]
elif self.item == 'links':
if links_object == None:
raise AttributeError('Attribute `links_object` has to be filled in for :links: object')
else:
return self.user_data[self.item][rewards_object]
elif self.item == 'first_login':
return datetime.datetime.fromtimestamp(round(self.user_data[self.item]/1000))
elif self.item == 'last_login':
return datetime.datetime.fromtimestamp(round(self.user_data[self.item]/1000))
elif self.item == 'last_logout':
return datetime.datetime.fromtimestamp(round(self.user_data[self.item]/1000))
elif self.item == '':
return self.user_data
else:
return self.user_data[self.item]
def get_status(self, item, game_object=None):
"""
To get the status of a user. The user has to be on Hypixel for this status to work. Try `get_info` for more information
https://docs.slothpixel.me/#operation/getPlayerStatus
Items:
- :online:`boolean`
- :game:`object`
Params:
- :item:`string`
- :game_object:`string` (optional)
`versionadded`: 1.0
"""
self.item = item
self.get_status_link = requests.get(f'https://api.slothpixel.me/api/players/{self.nameValue}/status')
self.user_status = json.loads(self.get_status_link.text)
if self.item == 'game':
if game_object == None:
raise AttributeError('Attribute `game_object` has to be filled in for :game: object')
else:
return self.user_status[self.item][game_object]
else:
return self.user_status[self.item]
def get_achievements(self, item, rewards_object=None):
"""
To get the achievements of a user. The user does not have to be on Hypixel for this to work.
https://docs.slothpixel.me/#operation/getPlayerAchievements
Items:
- :achievement points:`integer`
- :completed tiered:`integer`
- :completed one time:`integer`
- :completed total:`integer`
Params:
- :item:`string`
- :game_object:`string` (optional)
`versionadded`: 1.0
"""
self.item = item
self.get_achievements_link = requests.get(f'https://api.slothpixel.me/api/players/{self.nameValue}/achievements')
self.achievements_status = json.loads(self.get_achievements_link.text)
self.item = str(self.item).replace(" ", "_")
return self.achievements_status[self.item]
def get_quests(self, item):
"""
To get the quests of a user. The user does not have to be on Hypixel for this to work.
https://docs.slothpixel.me/#operation/getPlayerQuests
Items:
- :quests completed:`integer`
- :challenges completed:`integer`
Params:
- :item:`string`
- :game_object:`string` (optional)
`versionadded`: 1.0
"""
self.item = item
self.get_quests_link = requests.get(f'https://api.slothpixel.me/api/players/{self.nameValue}/quests')
self.quest_data = json.loads(self.get_quests_link.text)
self.item = str(self.item).replace(" ", "_")
return self.quest_data[item]
def get_recentGames(self, gameNumber: int, item=None):
"""
To get the recent games of a user. The user does not have to be on Hypixel for this to work.
https://docs.slothpixel.me/#operation/getPlayerQuests
Items:
- :date:`datetime`
- :gameType:`string`
- :mode:`string`
- :map:`string`
- :ended:`datetime`
Params:
- :item:`string`
- :game_object:`string` (optional)
`versionadded`: 1.0
"""
gameNumber -= 1
self.game_number = gameNumber
self.item = item
self.get_RC_link = requests.get(f'https://api.slothpixel.me/api/players/{self.nameValue}/recentGames')
self.recentGames_data = json.loads(self.get_RC_link.text)
if self.item == 'date' or self.item == 'ended':
return datetime.datetime.fromtimestamp(round(self.recentGames_data[gameNumber][self.item]/1000))
elif self.item == 'mode':
return Translator(self.recentGames_data[gameNumber]['mode'])
else:
return self.recentGames_data[gameNumber][self.item]
def get_username(*uuid):
"""
Get the username of a uuid.
Params:
- :uuid:`string`, `array`
Returns:
- :username:`string`, `array`
`versionadded`: 1.0
"""
uuids = []
for arg in uuid:
if isinstance(arg, list):
for uuid2 in arg:
get_username_link = requests.get(f'https://api.slothpixel.me/api/players/{uuid2}')
user_data = json.loads(get_username_link.text)
uuids.append(user_data['username'])
else:
get_username_link = requests.get(f'https://api.slothpixel.me/api/players/{arg}')
user_data = json.loads(get_username_link.text)
uuids.append(user_data['username'])
if len(uuids) == 1:
return uuids[0]
else:
return uuids
def get_uuid(*username):
"""
Get the uuid of a username
Params:
- :username:`string`, `array`
Returns:
- :uuid:`string`, `array`
`versionadded`: 1.0
"""
usernames = []
for arg in username:
if isinstance(arg, list):
for username2 in arg:
get_username_link = requests.get(f'https://api.slothpixel.me/api/players/{username2}')
user_data = json.loads(get_username_link.text)
usernames.append(user_data['uuid'])
else:
get_username_link = requests.get(f'https://api.slothpixel.me/api/players/{arg}')
user_data = json.loads(get_username_link.text)
usernames.append(user_data['uuid'])
if len(usernames) == 1:
return usernames[0]
else:
return usernames |
# Your code for the 1st query goes here
# Make sure to write the "put" function to create data structure from data base.
# Make sure to write the "get" function to query from data structure
# Refer app.py to see what "put" and 'get" should return
# your implementation of data structure goes here
def put():
data_structure = None
# code here
return data_structure
def get(data_structure,id):
# query on the data_structure and return data required
return
|
# Generated by Django 2.1.7 on 2020-01-15 08:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Rank', '0008_forgotpassword'),
]
operations = [
migrations.AddField(
model_name='contest',
name='status',
field=models.BooleanField(default=True),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 10 10:46:11 2020
@author: TOP Artes
"""
import pandas as pd
import numpy as np
from model.estado import Estado
from model.pre_processor import PreProcessor
from control.control_regressor import ControlRegressor
class ControlEstado:
def __init__(self):
self.estado = Estado()
self.lst_targets = list(['Inc_ENEM', 'Mediana_CN', 'Mediana_CH', 'Mediana_LN', 'Mediana_MT', 'Mediana_RD'])
self.pre_processor = PreProcessor()
self.control_regressor = ControlRegressor()
self.DataFrame = pd.DataFrame()
self.previsoes = []
def get_raw(self) -> pd.DataFrame:
# Invoca a função passando a lista os anos analisados neste projeto
DataFrame = self.estado.gerar_dados(['2010','2011','2012','2013','2014','2015','2016','2017','2018','2019'])
# Invoca a função enviando a lista de colunas numéricas(Int) e o DataFrame para tratar os dados.
self.DataFrame = self.estado.tratar_dados(list(DataFrame.columns[4:27]))
return self.DataFrame
def balance_data(self, DataFrame) -> pd.DataFrame:
# estado = Estado()
# Invoca a função passando a lista de tuplas com os indices de intervalos das colunas
# Respectivamente Matrículas, Escolas, Docentes
DataFrame = self.estado.calcular_totais([(6,13),(13,20),(20,27)], DataFrame.copy())
# Invoca a função passando a lista de tuplas com os indices de intervalos das colunas
# Respectivamente Área Territorial - km²[0], Pop_estimada[0][1], Escolas[0][1], Matrículas[0][1], Docentes[0][1]
DataFrame = self.estado.calcular_densidade([(3,4),(3,33),(3,32),(3,34)], DataFrame.copy()) # Densidade por km²
# Invoca a função passando a lista de tuplas com os indices de intervalos das colunas
# Respectivamente:
# Habitante/Inscrições ENEM[(X,Y)]
# Habitante/Total de escolas[(X,Y)]
# Total de Matrículas/Escolas[(X,Y)]
# Total de Docentes/Matrículas[(X,Y)]
DataFrame = self.estado.calcular_proporcao([(4,5),(4,33),(32,33),(32,34),(34,33)], DataFrame.copy()) # Proporção do valores absolutos
return DataFrame
def get_targets(self):
return self.lst_targets
def set_target(self, DataFrame) -> list:
self.target = [i for i, col in enumerate(DataFrame.columns) if col == self.lst_targets[0]][0]
return self.target
def get_previsores_alvos(self, DataFrame, c_target, r_target, target, balanced) -> np.array:
# Define a fatia com os dados para treinamento e teste
X = DataFrame[DataFrame[c_target]!=r_target].copy()
# Define os dados alvos
y = X.iloc[:,[target]].values
cols = ['Região Geográfica','Unidade da Federação']+self.lst_targets
if balanced:
cols.append('POP/INSC_ENEM')
# Seleciona as features relevantes para predição
X = X.drop(cols, inplace=False, axis=1)
# Define dados previsores
X = X.iloc[:,0:].values
return X, y
def get_previsores(self, DataFrame, balanced, modelo) -> np.array:
# Seleciona os dados preditores de 2019
X = DataFrame[DataFrame['ano']==2019].copy()
# Seleciona as features relevantes para predição
cols = ['Região Geográfica', 'Unidade da Federação']+self.lst_targets
if balanced:
cols.append('POP/INSC_ENEM')
# Seleciona as features relevantes para predição
X = X.drop(cols, inplace=False, axis=1)
# Define dados previsores
X = X.iloc[:,1:].values
## Recebe as previsões
#self.set_previsoes(DataFrame, 'ano', 2019, self.lst_targets[0], previsoes)
return X
def set_previsoes(self, DataFrame, c_index, r_target, c_target, previsoes) -> pd.DataFrame:
df_balanced = DataFrame.copy()
df_balanced.loc[df_balanced[c_index] == r_target, c_target] = previsoes
#df_balanced = self.balance_data(df_balanced) # Invoca a função enviando o DataFrame original
return df_balanced
def pre_process(self, df, test_size, random_state, base, balanced, scaled, balance, baseline, scale, plot, validation):
config = {}
DataFrame = df[1].copy() if balanced else df[0].copy()
plt_text = self.lst_targets[0]
kwargs = self.control_regressor.set_kwargs(
base, baseline)
if len(balance + baseline + scale) > 0:
return self.check_options(
df, test_size, random_state, base, balance, baseline, scale, plt_text, kwargs, validation)
X, y = self.get_previsores_alvos(
DataFrame, 'ano', 2019, self.target, balanced) # Separa os previsores balanceados dos alvos
X_train, X_test, y_train, y_test = self.control_regressor.train_test(
X, y, test_size, random_state)
X_scaled, y_scaled, scaler_X, scaler_y = self.pre_processor.scale_data(
(X_train[:,1:], X_test[:,1:]), (y_train, y_test))
X_train_poly, X_test_poly, poly = self.pre_processor.poly_data(
(X_train[:,1:], X_test[:,1:]))
pre = {'scaler':[X_scaled, y_scaled, scaler_X, scaler_y],
'poly':[(X_train_poly, X_test_poly), poly]}
X, y, pre = (X_train[:,1:], X_test[:,1:], X_test), (y_train, y_test), pre
return self.control_regressor.get_metrics(
X, y, pre, random_state, base, scaled, plt_text, config, kwargs, validation, plot)
def check_options(self, df, test_size, random_state, base, balance, baseline, scale, plt_text, kwargs, validation):
config = {}
for k in range(4):
balanced = True if k in balance else False
scaled = True if k in scale else False
data_set = df[1].copy() if k in balance else df[0].copy()
X, y = self.get_previsores_alvos(
data_set, 'ano', 2019, self.target, balanced) # Separa os previsores balanceados dos alvos
pre, config[k] = self.check_base(
X, y, k, test_size, random_state, base, balanced, scaled, plt_text, kwargs)
if validation:
return self.control_regressor.get_validation(
X, y, pre, random_state, base, scaled, plt_text, config, kwargs, validation, plot=False)
return self.control_regressor.get_metrics(
X, y, pre, random_state, base, scaled, plt_text, config, kwargs, validation, True)
def check_base(self, X, y, k, test_size, random_state, base, balanced, scaled, plt_text, kwargs):
X_train, X_test, y_train, y_test = self.control_regressor.train_test(
X, y, test_size, random_state)
X_scaled, y_scaled, scaler_X, scaler_y = self.pre_processor.scale_data(
(X_train[:,1:], X_test[:,1:]), (y_train, y_test))
X_train_poly, X_test_poly, poly = self.pre_processor.poly_data(
(X_train[:,1:], X_test[:,1:]))
pre = {'scaler':[X_scaled, y_scaled, scaler_X, scaler_y],
'poly':[(X_train_poly, X_test_poly, ), poly]}
config = {'X':(X_train[:,1:], X_test[:,1:], X_test),
'y':(y_train.reshape(-1,1), y_test.reshape(-1,1)), 'pre':pre, 'random_state':random_state,
'base':base, 'scaled':scaled, 'plt_text':plt_text, 'kwargs':kwargs}
return pre, config
|
from __future__ import unicode_literals
from ptpython.layout import CompletionVisualisation
def configure(repl):
repl.completion_visualisation = CompletionVisualisation.POP_UP
repl.show_line_numbers = True
repl.highlight_matching_parenthesis = True
repl.prompt_style = "ipython"
repl.confirm_exit = False
repl.show_status_bar = False
repl.use_code_colorscheme("monokai")
repl.use_ui_colorscheme("blue")
|
from DPjudge import Status, host
class Reopen(Status):
# ----------------------------------------------------------------------
"""
This class is invoked by the Judgekeeper to inform The Diplomatic Pouch
openings list that this DPjudge is up and available. The bin/reopen
tool is run manually when the judge is back in service at some point
after the openings list informed the judgekeeper that it had recorded
the judge as being down.
"""
# ----------------------------------------------------------------------
def __init__(self):
if host.openingsList:
Status.__init__(self)
self.list(host.openingsList)
print '-' * 56
print(('The Diplomatic Pouch Openings List has been e-mailed\n'
'announcing the availability of the %s DPjudge.',
'No openings list address given. Make sure the %s judge\n'
'is registered with the Diplomatic Pouch Openings List.')
[not host.openingsList] % host.dpjudgeID)
print '-' * 56
# ----------------------------------------------------------------------
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
engine = create_engine('mysql+pymysql://root:123456@127.0.0.1:3306/blog?charset=utf8',pool_size=100)
Session = sessionmaker(bind=engine)
dbsession = scoped_session(Session) |
import tkinter as tk
from tkinter import *
from tkinter import ttk
import os
from colorama import Fore, Back, Style
from datetime import datetime, timedelta, time
import pika
import defs_common
import uuid
import json
#import cfg_outlets
import cls_OutletConfig
import time
import threading
class OutletWidget():
def __init__(self, master):
defs_common.logtoconsole("Initializing OutletWidget...", fg = "YELLOW", bg = "MAGENTA", style = "BRIGHT")
self.outletid = StringVar() # id of this outlet ex: int_outlet_1
self.outletname = StringVar() # user defined name of outlet
self.outletbus = StringVar() # int or ext bus
self.control_type = StringVar() # control scheme of outlet ex: skimmer, lights, always...
self.button_state = IntVar() # button state ON (3), OFF (1), or AUTO (2)
self.outletstate = StringVar() # is the outlet currently on or off
self.statusmsg = StringVar() # short status message to display above buttons
self.outlet_freezeupdate = BooleanVar()
# set initial value...
self.statusmsg.set("waiting...")
self.outlet_freezeupdate.set(True)
self.initializeConnection()
## #initialize the messaging queues
## self.connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost', heartbeat_interval=5))
## self.channel = self.connection.channel()
##
## result = self.channel.queue_declare(exclusive=True)
## self.callback_queue = result.method.queue
##
## self.channel.basic_consume(self.rpc_response, no_ack=True,
## queue=self.callback_queue)
# frame for internal outlet 1 control
self.frame_outlet = LabelFrame(master, text="waiting...", relief= RAISED)
self.frame_outlet.pack(fill=X, side=TOP)
self.frame_outlet_spacer = tk.LabelFrame(self.frame_outlet, relief = tk.FLAT)
self.frame_outlet_spacer.pack(fill=X, side=TOP)
self.img_cfg16 = PhotoImage(file="images/settings-16.png")
self.btn_cfg_outlet = Button(self.frame_outlet_spacer, text = "edit", image=self.img_cfg16,
relief = FLAT, command=lambda:self.configureOutlet(master))
self.btn_cfg_outlet.pack(side=LEFT, anchor=W)
self.lbl_outlet_status = Label(self.frame_outlet_spacer, text = "waiting...", relief = FLAT, textvariable=self.statusmsg)
self.lbl_outlet_status.pack(side=TOP, anchor=E)
self.rdo_outlet_off = Radiobutton(self.frame_outlet, text="Off", variable=self.button_state,
value=1, command=self.select_outlet_state,
indicatoron=0)
self.rdo_outlet_off.pack(side=LEFT, expand=1, fill=X)
self.rdo_outlet_auto = Radiobutton(self.frame_outlet, text="Auto", variable=self.button_state,
value=2, command=self.select_outlet_state,
indicatoron=0)
self.rdo_outlet_auto.pack(side=LEFT, expand=1, fill=X)
self.rdo_outlet_on = Radiobutton(self.frame_outlet, text="On", variable=self.button_state,
value=3, command=self.select_outlet_state,
indicatoron=0)
self.rdo_outlet_on.pack(side=LEFT, expand=1, fill=X)
self.sendKeepAlive()
def rpc_response(self, ch, method, props, body):
if self.corr_id == props.correlation_id:
self.response = body
def rpc_call(self, n, queue):
self.response = None
self.corr_id = str(uuid.uuid4())
## print(str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + " RPC call: " + n
## + " UID: " + self.corr_id)
defs_common.logtoconsole(str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")) + " RPC call: " + n
+ " UID: " + self.corr_id, fg="GREEN", style="BRIGHT")
if self.connection.is_open:
defs_common.logtoconsole("Pika connection is OPEN")
else:
defs_common.logtoconsole("Pika connection is CLOSED")
defs_common.logtoconsole("Reopen Pika connection")
self.initializeConnection()
self.channel.basic_publish(exchange='',
routing_key=queue,
properties=pika.BasicProperties(
reply_to = self.callback_queue,
correlation_id = self.corr_id,
expiration="300000"),
body=str(n))
while self.response is None:
self.connection.process_data_events()
return self.response
def initializeConnection(self):
#initialize the messaging queues
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
self.channel = self.connection.channel()
result = self.channel.queue_declare(exclusive=True)
self.callback_queue = result.method.queue
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(self.rpc_response, no_ack=True,
queue=self.callback_queue)
def updateOutletFrameName(self, name):
self.frame_outlet.config(text = name)
#print(name)
def sendKeepAlive(self):
# periodically (like every 1 or 2 minutes) send a message to the exchange so it
# knows this channel is still active and not closed due to inactivity
defs_common.logtoconsole("send keep alive request: " + str(self.outletid.get()), fg="YELLOW", style="BRIGHT")
request = {
"rpc_req": "set_keepalive",
"module": str(self.outletid.get()),
}
request = json.dumps(request)
self.rpc_call(request, "rpc_queue")
# every 2 minutes, send out a message on this channel so the exchange server knows
# we are still alive and doesn't close our connection
heartbeatThread = threading.Timer(120, self.sendKeepAlive)
heartbeatThread.daemon = True
heartbeatThread.start()
#threading.Timer(120, self.sendKeepAlive).start()
def select_outlet_state(self):
defs_common.logtoconsole("outlet state change: " + str(self.outletid.get()) + " to " + str(self.button_state.get()), fg="YELLOW", style="BRIGHT")
if self.button_state.get() == defs_common.OUTLET_OFF:
self.statusmsg.set("OFF")
self.lbl_outlet_status.config(foreground="RED")
## self.channel.basic_publish(exchange='',
## routing_key='outlet_change',
## properties=pika.BasicProperties(expiration='30000'),
## body=str(str(self.outletid.get()) + "," + "OFF"))
## #body=str("int_outlet_1" + "," + "OFF"))
# request outlet change on server
request = {
"rpc_req": "set_outletoperationmode",
"bus": str(self.outletbus.get()),
"outletnum": str(self.outletid.get().split("_")[2]),
"opmode": "off"
}
request = json.dumps(request)
self.rpc_call(request, "rpc_queue")
elif self.button_state.get() == defs_common.OUTLET_AUTO:
self.statusmsg.set("AUTO")
self.lbl_outlet_status.config(foreground="DARK ORANGE")
## self.channel.basic_publish(exchange='',
## routing_key='outlet_change',
## properties=pika.BasicProperties(expiration='30000'),
## body=str(str(self.outletid.get()) + "," + "AUTO"))
## #body=str("int_outlet_1" + "," + "AUTO"))
request = {
"rpc_req": "set_outletoperationmode",
"bus": str(self.outletbus.get()),
"outletnum": str(self.outletid.get().split("_")[2]),
"opmode": "auto"
}
request = json.dumps(request)
self.rpc_call(request, "rpc_queue")
elif self.button_state.get() == defs_common.OUTLET_ON:
self.statusmsg.set("ON")
self.lbl_outlet_status.config(foreground="GREEN")
## self.channel.basic_publish(exchange='',
## routing_key='outlet_change',
## properties=pika.BasicProperties(expiration='30000'),
## body=str(str(self.outletid.get()) + "," + "ON"))
## #body=str("int_outlet_1" + "," + "ON"))
request = {
"rpc_req": "set_outletoperationmode",
"bus": str(self.outletbus.get()),
"outletnum": str(self.outletid.get().split("_")[2]),
"opmode": "on"
}
request = json.dumps(request)
self.rpc_call(request, "rpc_queue")
else:
self.lbl_outlet_status.config(text="UNKNOWN", foreground="BLACK")
## selection = "Select outlet option " + self.lbl_outlet_status.cget("text")
## print(Fore.YELLOW + Style.BRIGHT + datetime.now().strftime("%Y-%m-%d %H:%M:%S") +
## " " + selection + Style.RESET_ALL)
self.outlet_freezeupdate.set(True)
defs_common.logtoconsole("Freeze Update: " + str(self.outletid.get() + " " + str(self.outlet_freezeupdate.get())), fg="CYAN")
def configureOutlet(self, master):
print("dialog " + self.outletid.get().split("_")[2])
outnum = self.outletid.get().split("_")[2]
d = Dialog(master, self, outnum)
def uploadsettings(self, section, key, value):
defs_common.logtoconsole("Request settings change: [" + str(section) + "] [" + str(key) + "] = " + str(value))
# request settings change on server
request = {
"rpc_req": "set_writeinifile",
"section": str(section),
"key": str(key),
"value": str(value)
}
request = json.dumps(request)
self.rpc_call(request, "rpc_queue")
def downloadsettings(self, section, key, defaultval):
defs_common.logtoconsole("Request settings vaue: [" + str(section) + "] [" + str(key) + "]")
# get setting value from server
request = {
"rpc_req": "get_readinifile",
"section": str(section),
"key": str(key),
"defaultval": str(defaultval)
}
request = json.dumps(request)
val = self.rpc_call(request, "rpc_queue")
val = val.decode()
val = json.loads(val)
val = val.get("readinifile")
#print (val)
return val
def getProbeList(self):
defs_common.logtoconsole("Request probe list for outlet control")
# get setting value from server
request = {
"rpc_req": "get_probelist",
}
request = json.dumps(request)
val = self.rpc_call(request, "rpc_queue")
val = val.decode()
val = json.loads(val)
#print (val)
return val
class Dialog(Toplevel):
def __init__(self, parent, controller, outletnum, title = None):
Toplevel.__init__(self, parent)
self.transient(parent)
self.controller = controller
if title:
self.title(title)
self.parent = parent
self.result = None
self.outletnum = outletnum
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
#
# construction hooks
def body(self, master):
# create dialog body. return widget that should have
# initial focus. this method should be overridden
#outlet = cfg_outlets.PageOutlets(master, self)
self.outlet = cls_OutletConfig.Outlet(master, self, cls_OutletConfig.BUS_INTERNAL, self.outletnum)
self.outlet.pack()
pass
def buttonbox(self):
# add standard button box. override if you don't want the
# standard buttons
box = Frame(self)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
#
# standard button semantics
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.outlet.saveOutlet()
self.withdraw()
self.update_idletasks()
self.apply()
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
return 1 # override
def apply(self):
pass # override
|
n, char = input().split(" ")
for x in range(2):
for y in range(int(n)):
print(char * (y+1))
for x in range(int(n)):
print(char)
|
#Simple implementation of a doubly linked list
class Node:
def __init__(self, data=None):
self.data = data
self.next = None
self.prev = None
class DoublyLinkedList:
def __init__(self, node=None):
self.head = node
self.tail = node
self.length = 0 if node is None else 1
def append(self, value):
toAdd = Node(value)
if self.head is None:
self.head = toAdd
self.tail = toAdd
self.length += 1
else:
toAdd.prev = self.tail
self.tail.next = toAdd
self.tail = toAdd
self.length += 1
return self
def prepend(self, value):
toAdd = Node(value)
if self.head is None:
self.head = toAdd
self.tail = toAdd
self.length += 1
else:
toAdd.next = self.head
self.head.prev = toAdd
self.head = toAdd
self.length +=1
return self
def traverse(self, index):
curr = self.head
for x in range(index):
curr = curr.next
return curr
def insert(self, index, value):
toAdd = Node(value)
curr = self.head
if index >= self.length:
return self.append(value)
elif index == 0:
return self.prepend(value)
else:
curr = self.traverse(index-1)
toAdd.next = curr.next
toAdd.prev = curr
curr.next = toAdd
toAdd.next.prev = toAdd
self.length +=1
return self
def remove(self, index):
#do nothing if empty
if self.length == 0:
return self
#delete head
elif index == 0:
self.head = self.head.next
self.head.prev = None
self.length -= 1
return self
#delete last if index too high
elif index >= self.length:
curr = self.traverse(self.length-2)
else:
curr = self.traverse(index-1)
toDelete = curr.next
curr.next = toDelete.next
curr.next.prev = curr
self.length -= 1
return self
# Print all values in linked list
def print(self):
curr = self.head
while (curr is not None):
print(f"Node Value: {curr.data}, Previous Node Value: {str(curr.prev.data) if curr.prev != None else 'None'}, Next Node Value: {str(curr.next.data) if curr.next != None else 'None'}")
curr = curr.next
return self
sLinkedList = DoublyLinkedList()
sLinkedList.append(10)
ele2 = 5
ele3 = 16
sLinkedList.append(ele2)
sLinkedList.append(ele3)
sLinkedList.insert(3,44)
sLinkedList.insert(1,22)
sLinkedList.insert(55,74)
sLinkedList.remove(0)
sLinkedList.remove(3)
sLinkedList.print() |
import sys
from EMR.ScheduledJobUpdaterOozie import ScheduledJobUpdaterOozie
from Lambda.LambdaUpdater import LambdaUpdater
from Utils.ChangedResources import ChangedResources
from Utils.EMRUtil import EMRUtil
# logging.basicConfig(level=logging.DEBUG)
# _LOG = logging.getLogger(__name__)
if __name__ == '__main__':
if len(sys.argv) < 2: raise Exception(
"ERROR: Insufficient number of arguments, changes.txt, config.json and output.json file paths must be given")
crobj = ChangedResources(sys.argv[1], sys.argv[2], sys.argv[3])
print "Changed Resources", crobj.changedResources
changedLambdas = crobj.getChangedLambdas()
print "Lambdas: ", changedLambdas
changedEMRs = crobj.getChangedEMRs()
print "EMRs: ", changedEMRs
for eachLambda in changedLambdas:
config = crobj.configReaderObj.getConfiguration(eachLambda)
# print config
if LambdaUpdater(config).upload_lambda_fuction():
print "[SUCCESS] Lamda Function: %s updated successfully" % config['serviceName']
else:
print "FAILED to update Lamda Function: %s" % config['serviceName']
for each in changedEMRs:
emrConfig = crobj.configReaderObj.getConfiguration(each)
emrUtil = EMRUtil()
emrUtil.addToKnownHosts(clusterID=emrConfig['arn'])
changedEMRjobs = crobj.identifyEMRjobs(each)
print "EMR Cluster:%s Jobs Changed:%s " % (each, changedEMRjobs)
for eachJobChanged in changedEMRjobs:
jobConfig = crobj.configReaderObj.getEMRJobConfiguration(eachJobChanged['jobName'])
print "JobName:%s JobType:%s JobConfig:%s" % (
eachJobChanged['jobName'], eachJobChanged['jobType'], jobConfig)
if eachJobChanged['jobType'] == 'oozie':
if ScheduledJobUpdaterOozie(jobConfig, emrConfig).update():
print "[SUCCESS] EMR Job: %s updated successfully" % eachJobChanged['jobName']
|
# Exercício 9.35 - Livro
import os.path, sys, urllib.request
mascaraEstilo = "'margin: 5px 0px 5px 0px;'"
def geraEstilo(nivel):
return mascaraEstilo
def geraListagem(pagina, diretorio):
nraiz = os.path.abspath(diretorio).count(os.sep)
for raiz, diretorios, arquivos in os.walk(diretorio):
nivel = raiz.count(os.sep) - nraiz
pagina.write(f'<p style={geraEstilo(nivel)}>{raiz}</p>')
estilo = geraEstilo(nivel + 1)
for a in arquivos:
caminhoCompleto = os.path.join(raiz, a)
tamanho = os.path.getsize(caminhoCompleto)
link = urllib.request.pathname2url(caminhoCompleto)
pagina.write(f"<p style={estilo}><a href='{link}'>{a}</a> ({tamanho} bytes)</p>")
if len(sys.argv) < 2:
print('Digite o nome do diretório para coletar os arquivos!')
sys.exit(1)
diretorio = sys.argv[1]
pagina = open('arquivos.html', 'w', encoding='utf-8')
pagina.write("""
<!DOCTYPE html>
<html lang="pt-BR">
<head>
<meta charset="utf-8">
<title>Arquivos</title>
</head>
<body>
""")
pagina.write(f'Arquivos encontrados a partir do diretório: {diretorio}')
geraListagem(pagina, diretorio)
pagina.write("""
</body>
</html>
""")
pagina.close()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-31 22:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Centre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('libelle_centre', models.CharField(max_length=500, verbose_name='Centre de Sant\xe9')),
('situation', models.CharField(max_length=500, verbose_name='Situation Geographique du Centre')),
],
options={
'verbose_name': 'centre sanitaire',
'verbose_name_plural': 'Centres Sanitaires',
},
),
migrations.CreateModel(
name='Commune',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('region', models.CharField(max_length=250, verbose_name='Region')),
('libelle', models.CharField(max_length=250, verbose_name='Commune')),
],
options={
'verbose_name': 'commune',
'verbose_name_plural': 'Communes',
},
),
migrations.CreateModel(
name='Mairie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('maire', models.CharField(max_length=500, verbose_name='Nom et Prenoms du Maire')),
('telephone1', models.CharField(max_length=50, verbose_name='Telephone 1')),
('telephone2', models.CharField(blank=True, max_length=50, null=True, verbose_name='Telephone 2')),
('adresse', models.CharField(blank=True, max_length=100, verbose_name='Adresse Postale')),
('email', models.CharField(blank=True, max_length=100, verbose_name='Adresse Email')),
('site', models.CharField(blank=True, max_length=100, verbose_name='Site Web')),
('logo', models.ImageField(blank=True, null=True, upload_to='logos')),
('Commune', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='parametres.Commune')),
],
),
migrations.AddField(
model_name='centre',
name='centre',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='parametres.Commune'),
),
]
|
from glob import glob
import json
import os
import pickle
import string
import text2vec
import h5py
import nltk
import numpy as np
from tqdm import tqdm
import yaml
import matplotlib.pyplot as plt
import random
from math import sqrt
import nltk.stopwords as noisy_words
def smooth_text_plus(text, dominant_word):
"""
smooth the text by removing the noisy words
"""
center = []
for l in dominant_word:
center_array = np.array([l]*3)
center.append(np.uint8(center_array))
#print(center)
for i in range(text.shape[0]):
for j in range(text.shape[1]):
current_word = np.uint8(text[i,j])
#sort centers
if not any(all(current_word == x) for x in center):
#print("sort_center")
center.sort(key=lambda c: sqrt((current_word[0]-c[0])**2+(current_word[1]-c[1])**2+(current_word[2]-c[2])**2))
text[i,j] = center[0]
#print(text)
return text
def create_h5(split, data_path, h5_path, resize_wh=128, data_augmentation=False):
if split == "val":
with open('/'.join([data_path, "val.json"]), 'r') as f:
split_json = json.load(f)
h5_split = h5py.File(os.path.join(h5_path, 'gandraw_val.h5'), 'w')
if split == "test":
with open('/'.join([data_path, "test.json"]), 'r') as f:
split_json = json.load(f)
h5_split = h5py.File(os.path.join(h5_path, 'gandraw_test.h5'), 'w')
if split == "train":
#load the json file
with open('/'.join([data_path, "train.json"]), 'r') as f:
split_json = json.load(f)
#initialize the hdf5 files
h5_split = h5py.File(os.path.join(h5_path, 'gandraw_train.h5'), 'w')
c_split = 0
for scene_id, scene in tqdm(enumerate(split_json['data'])):
text = []
text_semantic = []
utterences = []
target_text = []
target_text_segmentation = []
target_text_path = []
description = []
for i in range(len(scene['dialog'])):
turn = scene['dialog'][i]
#lower case all messages
data2 = str.lower(turn['data2'])
data1 = str.lower(turn['data1'])
#The information will always be alteranating between data2 and data1
if data2 != '':
description += ['<data2>'] + nltk.word_tokenize(data2)
if data1 != '':
description += ['<data1>'] + nltk.word_tokenize(data1)
description = [w for w in description if w not in string.punctuation]
utterences.append(str.join(' ', description))
current_turn_text = text2vec.txtread(os.path.join(data_path, turn['text_synthetic']))
current_turn_text = preprocessing_text(current_turn_text, resize_wh=resize_wh) #The text is converted from BGR2RGB and the size becomes 128*128
text.append(current_turn_text)
#semantic_text_path = 'semantic_text/'+turn['text_semantic'].split('/')[-1]
current_turn_text_semantic = text2vec.txtread(os.path.join(data_path, turn['text_semantic']))
current_turn_text_semantic = preprocessing_text(current_turn_text_semantic, resize_wh=resize_wh, segmentation=True)
#print(current_turn_text_semantic.shape)
assert current_turn_text_semantic is not None, "os.path.join({}, {})".format(data_path, semantic_text_path)
text_semantic.append(current_turn_text_semantic)
description = []
current_target_text = text2vec.txtread(os.path.join(data_path, scene['target_text']))
current_target_text = preprocessing_text(current_target_text, resize_wh=resize_wh)
target_text.append(current_target_text)
target_text_path.append(scene['target_text'])
current_target_text_segmentation = text2vec.txtread(os.path.join(data_path, scene['target_text_semantic']))
current_target_text_segmentation = preprocessing_text(current_target_text_segmentation, resize_wh=resize_wh, segmentation=True)
target_text_segmentation.append(current_target_text_segmentation)
scene_hdf5 = h5_split.create_group(str(c_split))
c_split += 1
#Add the task_id
task_id = scene.get("task_id", None)
scene_hdf5.create_dataset('text', data=text)
scene_hdf5.create_dataset('text_semantic', data=text_semantic)
if task_id is not None:
scene_hdf5.create_dataset('scene_id', data=task_id)
else:
scene_hdf5.create_dataset('scene_id', data=str(scene_id))
scene_hdf5.create_dataset('target_text', data=target_text)
scene_hdf5.create_dataset('target_text_segmentation', data = target_text_segmentation)
dt = h5py.special_dtype(vlen=str)
scene_hdf5.create_dataset('utterences', data=np.string_(utterences), dtype=dt)
scene_hdf5.create_dataset('target_text_path', data=np.string_(target_text_path), dtype=dt)
#increasing the data by one time with data augmentation
if data_augmentation:
for scene_id, scene in tqdm(enumerate(split_json['data'])):
text = []
text_semantic = []
utterences = []
target_text = []
target_text_segmentation = []
target_text_path = []
description = []
#define data_augmentation_mode
data_augmentation_mode = random.choice(["crop", "contrast", "noisy"])
for i in range(len(scene['dialog'])):
turn = scene['dialog'][i]
#lower case all messages
data2 = str.lower(turn['data2'])
data1 = str.lower(turn['data1'])
#The information will always be alteranating between data2 and data1
if data2 != '':
description += ['<data2>'] + nltk.word_tokenize(data2)
if data1 != '':
description += ['<data1>'] + nltk.word_tokenize(data1)
description = [w for w in description if w not in string.punctuation]
utterences.append(str.join(' ', description))
current_turn_text = text2vec.txtread(os.path.join(data_path, turn['text_synthetic']))
#Augment the data
current_turn_text = apply_augmentation(current_turn_text, mode=data_augmentation_mode)
current_turn_text = preprocessing_text(current_turn_text, resize_wh=resize_wh) #The text is converted from BGR2RGB and the size becomes 128*128
text.append(current_turn_text)
#semantic_text_path = 'semantic_text/'+turn['text_semantic'].split('/')[-1]
current_turn_text_semantic = text2vec.txtread(os.path.join(data_path, turn['text_semantic']))
current_turn_text_semantic = preprocessing_text(current_turn_text_semantic, resize_wh=resize_wh, segmentation=True)
assert current_turn_text_semantic is not None, "os.path.join({}, {})".format(data_path, semantic_text_path)
text_semantic.append(current_turn_text_semantic)
description = []
current_target_text = text2vec.txtread(os.path.join(data_path, scene['target_text']))
current_target_text = preprocessing_text(current_target_text, resize_wh=resize_wh)
target_text.append(current_target_text)
target_text_path.append(scene['target_text'])
current_target_text_segmentation = text2vec.txtread(os.path.join(data_path, scene['target_text_semantic']))
current_target_text_segmentation = preprocessing_text(current_target_text_segmentation, resize_wh=resize_wh, segmentation=True)
target_text_segmentation.append(current_target_text_segmentation)
scene_hdf5 = h5_split.create_group(str(c_split))
c_split += 1
#Add the task_id
task_id = scene.get("task_id", None)
scene_hdf5.create_dataset('text', data=text)
scene_hdf5.create_dataset('text_semantic', data=text_semantic)
if task_id is not None:
scene_hdf5.create_dataset('scene_id', data=task_id+"_DA")
else:
scene_hdf5.create_dataset('scene_id', data=str(scene_id+len(split_json['data'])))
scene_hdf5.create_dataset('target_text', data=target_text)
scene_hdf5.create_dataset('target_text_segmentation', data = target_text_segmentation)
dt = h5py.special_dtype(vlen=str)
scene_hdf5.create_dataset('utterences', data=np.string_(utterences), dtype=dt)
scene_hdf5.create_dataset('target_text_path', data=np.string_(target_text_path), dtype=dt)
|
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello world")
class StoryHandler(tornado.web.RequestHandler):
def get(self, story_id):
self.write("You requested the story " + story_id)
class BuyHandler(tornado.web.RequestHandler):
def get(self):
self.write("buy.wupeiqi.com/index")
application = tornado.web.Application([
(r"/index", MainHandler),
(r"/story/([0-9])",StoryHandler),
])
application.add_handlers("buy.wupeiqi.com$",[
(r"/index",BuyHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
|
# Author:ambiguoustexture
# Date: 2020-02-06
import re
pattern_file = re.compile(r'''
(?:File|ファイル) # Uncaptured, 'File' or 'ファイル'
:
(.+?) # Capture target,
# 0 or more arbitrary characters,
# non-greedy match
\|
''', re.VERBOSE)
file = 'UK.txt'
with open(file) as text:
text = text.read()
res = pattern_file.findall(text)
for line in res:
print(line)
|
import random
import sys
import time
def mengetik(s):
for c in s + '\n' :
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(random.random() * 0.2)
mengetik(' \033[31;1m• ✆••>€•LIT>MR.4Nz<[BPI] \n \033[33;1m• Mr.CL4Y0<[BPI] \n \033[37;1m• ᴹᴿ.$⁴ᴺᵀᴿ¹-SSC<[BPI]<[MCC]')
|
#coding:utf-8
import jieba
import sys
import re
from documentRead import DocumentRead
reload(sys)
sys.setdefaultencoding('utf8')
stopwords = open("H:\\stopwords.txt", 'rb').read().splitlines()
directory ='H:\\user_content'
documentReader=DocumentRead(directory)
documentReader.load_document()
documents=documentReader.get_documents()
documents_name=documentReader.get_documents_name()
for index in range(0, len(documents_name),1):
str=unicode(documents[index])
pattern2 = re.compile(u"-|/+|[u4e00-u9fa5]+|\s+|[.]|\#")
pattern3 = re.compile(u"[A-Za-z]+")
str = pattern2.sub("。", str)
str = pattern3.sub("。", str)
seg_list = jieba.cut(str, cut_all=False) # 默认是精确模式
stayed_line = ""
for seg in seg_list:
if seg.encode('utf-8') not in stopwords:
stayed_line += seg + " "
file_name = "H:\\user_seg_content\\"+"seg_"+documents_name[index]
with open(file_name, "w") as f:
f.write(stayed_line)
print u'done'
# with open("H:\\user_content\\1427592210_content.txt") as f:
# text=f.read()
# import re
# str=unicode(text)
# # pattern1 = re.compile(u"(http.*?)[\u4e00-\u9fa5]+")
# pattern2 = re.compile(u"-|/+|[u4e00-u9fa5]+|\s+|[.]|\#")
# pattern3 = re.compile(u"[A-Za-z]+")
# # print str
# # str=pattern1.sub("",str)
# str=pattern2.sub("",str)
# str=pattern3.sub("",str)
# seg_list = jieba.cut(str) # 默认是精确模式
# print u'----------'
#
# stayed_line = ""
# for seg in seg_list:
# if seg.encode('utf-8') not in stopwords:
# stayed_line += seg+" "
# file_name="H:\\seg_content.txt"
# with open(file_name,"w") as f:
# f.write(stayed_line)
# print u'done' |
# 印出 0.2 比例的兩個整數
print(0.2.as_integer_ratio())
# 判斷 0.235 是否為整數
print(0.235.is_integer())
# 判斷 2.000 是否為整數
print(2.000.is_integer())
# 檔名: typedemo01.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
class TwitterBot:
def __init__(self):
self.bot = webdriver.Firefox()
def startProcess(self):
bot = self.bot
bot.get("http://localhost:3001/admin")
time.sleep(2)
for x in bot.find_elements_by_class_name("form-control"):
x.clear()
x.send_keys("sss")
test = TwitterBot()
test.startProcess()
|
"""PDF credentials tests."""
import os
import shutil
import tempfile
from django.test import override_settings
from django.urls import reverse
from modoboa.admin import factories as admin_factories
from modoboa.core import models as core_models
from modoboa.lib.tests import ModoTestCase
class EventsTestCase(ModoTestCase):
"""Test event handlers."""
@classmethod
def setUpTestData(cls):
"""Create some data."""
super(EventsTestCase, cls).setUpTestData()
admin_factories.DomainFactory(name="test.com")
def setUp(self):
"""Create temp. directory to store files."""
super(EventsTestCase, self).setUp()
self.workdir = tempfile.mkdtemp()
self.set_global_parameter("storage_dir", self.workdir)
def tearDown(self):
"""Reset test env."""
shutil.rmtree(self.workdir)
def _create_account(self, username, expected_status=200):
"""Create a test account."""
values = {
"username": username,
"first_name": "Tester", "last_name": "Toto",
"role": "SimpleUsers", "quota_act": True,
"is_active": True, "email": username,
"random_password": True, "stepid": 2
}
response = self.client.post(
reverse("admin:account_add"), values,
HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, expected_status)
return values
def test_password_updated(self):
"""Check that document is generated at account creation/update."""
values = self._create_account("leon@test.com")
fname = os.path.join(self.workdir, "{}.pdf".format(values["username"]))
self.assertTrue(os.path.exists(fname))
account = core_models.User.objects.get(username=values["username"])
# Check if link is present in listing page
response = self.ajax_get(reverse("admin:_identity_list"))
self.assertIn('name="get_credentials"', response["rows"])
# Try to download the file
response = self.client.get(
reverse("pdfcredentials:account_credentials",
args=[account.pk]))
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/pdf")
# File have been deleted?
self.assertFalse(os.path.exists(fname))
# Try to download a second time
response = self.client.get(
reverse("pdfcredentials:account_credentials",
args=[account.pk]))
self.assertContains(response, "No document available for this user")
# Update account
values.update({"language": "en"})
self.ajax_post(
reverse("admin:account_change", args=[account.pk]), values
)
self.assertFalse(os.path.exists(fname))
self.set_global_parameter("generate_at_creation", False)
self.ajax_post(
reverse("admin:account_change", args=[account.pk]), values
)
self.assertTrue(os.path.exists(fname))
def test_with_connection_settings(self):
"""Add connection settings to documents."""
self.set_global_parameter("include_connection_settings", True)
values = self._create_account("leon@test.com")
fname = os.path.join(self.workdir, "{}.pdf".format(values["username"]))
self.assertTrue(os.path.exists(fname))
def test_with_custom_message(self):
"""Add custom message to documents."""
self.set_global_parameter("custom_message", "This is a test message.")
values = self._create_account("leon@test.com")
fname = os.path.join(self.workdir, "{}.pdf".format(values["username"]))
self.assertTrue(os.path.exists(fname))
def test_account_delete(self):
"""Check that document is deleted with account."""
values = self._create_account("leon@test.com")
fname = os.path.join(self.workdir, "{}.pdf".format(values["username"]))
self.assertTrue(os.path.exists(fname))
account = core_models.User.objects.get(username=values["username"])
self.ajax_post(
reverse("admin:account_delete", args=[account.pk]), {}
)
self.assertFalse(os.path.exists(fname))
@override_settings(MODOBOA_LOGO="modoboa.png")
def test_with_custom_logo(self):
"""Check that document is deleted with account."""
values = self._create_account("leon@test.com")
fname = os.path.join(self.workdir, "{}.pdf".format(values["username"]))
self.assertTrue(os.path.exists(fname))
def test_download_and_delete_account(self):
"""Check that document is deleted with account."""
values = self._create_account("leon@test.com")
fname = os.path.join(self.workdir, "{}.pdf".format(values["username"]))
self.assertTrue(os.path.exists(fname))
account = core_models.User.objects.get(username=values["username"])
# Try to download the file
response = self.client.get(
reverse("pdfcredentials:account_credentials",
args=[account.pk]))
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/pdf")
# File have been deleted?
self.assertFalse(os.path.exists(fname))
# Delete account
self.ajax_post(
reverse("admin:account_delete", args=[account.pk]), {}
)
def test_storage_dir_creation(self):
"""Test storage directory creation."""
self.set_global_parameter("storage_dir", "/nonexistentdir")
self._create_account("leon@test.com", expected_status=500)
|
#*_* coding=utf8 *_*
#!/usr/bin/env python
from tornado import web
from unreal import enum
from unreal import session
from unreal import config
from unreal import exception
from unreal.utils import mysql
CONF = config.CONF
def require_login(func):
def wrapper(handler, *args, **kwargs):
if not handler.is_login:
raise exception.PromptRedirect("请登录后,执行操作。", "/")
return func(handler, *args, **kwargs)
return wrapper
def require_admin(func):
def wrapper(handler, *args, **kwargs):
if not handler.is_admin:
raise exception.PromptRedirect("没有权限进行此操作。", "/")
return func(handler, *args, **kwargs)
return wrapper
class BaseHandler(web.RequestHandler):
@property
def session(self):
if not hasattr(self, '_session'):
sessionid = self.get_secure_cookie('sid')
expire_seconds = CONF.session_expire_seconds
self._session = session.RedisSession(
self.application.session_store,
sessionid, expire_seconds=expire_seconds)
if not sessionid:
self.set_secure_cookie('sid', self._session.id,
expires_days=None)
return self._session
@property
def db(self):
return mysql.Connection(CONF.mysql_host, CONF.mysql_db,
CONF.mysql_user, CONF.mysql_pasword)
@property
def user(self):
return self.session.get('user')
@property
def is_login(self):
return self.session.get('user') is not None
@property
def is_admin(self):
user = self.session.get('user')
return user is not None and user.get('type') == enum.Role.Admin
def logout(self):
if self.is_login:
del self.session['user']
self.session.save()
def prompt_and_redirect(self, message, uri=None):
if uri is None:
uri = self.request.headers.get('Referer', "/")
return self.render("prompt.html", message=message, redirect_url=uri)
def _handle_request_exception(self, e):
e_type = type(e)
referer = self.request.headers.get('Referer', "/")
if e_type is exception.PromptRedirect:
self.prompt_and_redirect(e.msg, e.uri or referer)
else:
super(BaseHandler, self)._handle_request_exception(e)
|
class ATM:
def __init__(self):
self.cnt = defaultdict(int)
self.money = [20, 50, 100, 200, 500]
def deposit(self, banknotesCount: List[int]) -> None:
for i in range(5):
self.cnt[self.money[i]] += banknotesCount[i]
def withdraw(self, amount: int) -> List[int]:
res = [0, 0, 0, 0, 0]
for i in range(4, -1, -1):
k = min(amount // self.money[i], self.cnt[self.money[i]])
amount -= k * self.money[i]
res[i] = k
if amount > 0:
return [-1]
for i in range(5):
self.cnt[self.money[i]] -= res[i]
return res
# Your ATM object will be instantiated and called as such:
# obj = ATM()
# obj.deposit(banknotesCount)
# param_2 = obj.withdraw(amount) |
import keras
from keras.models import *
from keras.layers import *
from augment import Process
from keras.callbacks import *
from keras.optimizers import *
class NN(object):
def __init__(self, row, col):
self.row = row
self.col = col
def load_data(self):
data = Process(self.row, self.col)
train, label = data.load_train()
test = data.load_test()
return train, label, test
def net_structure(self, show = True):
print('\n\n' + '-' * 30 + '\nConstruct Neural Network\n' + '-' * 30)
# Structure
# input layer
inputs = Input((self.row, self.col, 1))
# the first convolution layer-----filters: 64 3x3 + 64 3x3
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
# the second convolution layer----filters: 128 3x3 + 128 3x3
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
# the third convolution layer-----filters: 256 3x3 + 256 3x3
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
# the forth convolution layer-----filters: 512 3x3 + 512 3x3
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# the fifth convolution layer-----filters: 1024 3x3 + 1024 3x3
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
# the sixth convolution layer-----filters: 512 3x3 + 512 3x3------concatenate with the fourth layer
up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5))
merge6=concatenate([drop4,up6],axis=3)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
# the seventh convolution layer-----filters: 256 3x3 + 256 3x3------concatenate with the third layer
up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6))
merge7 =concatenate([conv3,up7],axis=3)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
# the eighth convolution layer-----filters: 128 3x3 + 128 3x3------concatenate with the second layer
up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7))
merge8 =concatenate([conv2,up8],axis=3)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
# the ninth convolution layer-----filters: 64 3x3 + 64 3x3 + 2 3x3------concatenate with the first layer
up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8))
merge9 =concatenate([conv1,up9],axis=3)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
conv9 = Conv2D(2 , 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
# output layer------segmentation result------don't change the size of conv9 output
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
# Close-out
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
if show:
print('\nStructure')
print(model.summary())
return model
def train_test(self):
train, label, test = self.load_data()
model = self.net_structure()
print('\n\nTraining Neural Network')
model_checkpoint = ModelCheckpoint('net.h5', monitor='loss', verbose=1, save_best_only=True)
model.fit(train, label, batch_size=20, epochs=30, verbose=1, shuffle=True, callbacks=[model_checkpoint])
test_result = model.predict(test, batch_size=1, verbose=1)
np.save('test_result.npy', test_result)
if __name__ == '__main__':
n = NN()
n.train_test()
|
def add_node(v):
global node_count
if (v in nodes):
print("The node already present in the graph")
else:
node_count = node_count + 1
nodes.append(v)
for n in graph:
n.append(0)
temp = []
for i in range (node_count):
temp.append(0)
graph.append(temp)
def print_graph():
for i in range(node_count):
for j in range(node_count):
print(graph[i][j],end=" ")
print()
node_count = 0
nodes = []
graph = []
# print("Before adding nodes")
# print(nodes)
# print(graph)
# add_node("A")
# add_node("B")
# print("After adding nodes")
# print(nodes)
# print(graph)
# print_graph() |
print('Hello! What is your name?')
name = input()
print('Well, ' + name + ', Think of random number from 1 to 100, and I will try to guess it!')
lowest = 1
highest = 100
mean = 0
guessestaken = 0
guessing = True
while guessing:
guessestaken = guessestaken + 1
mean = int((lowest+highest)/2)
print('Is it ',mean,' ? ')
response = input('yes/no : ')
if response == 'yes':
print('Yeey! You got it in ' + str(guessestaken) + ' tries')
if response == input("Do you want to play more ? (yes/no)") :
lowest = 1
highest = 100
guessestaken = 0
mean = 0
else:
print('Bye Bye')
break
if response == 'no' :
print('Is it greater than',mean,"?")
response_again = input('yes/no : ')
if response_again == 'yes':
lowest = mean
if response_again == 'no' :
highest = mean
|
import sys; sys.path.insert(0, "/home/adriano/goamazondownloader")
"""S-Band radar data download"""
# Author: Adriano P. Almeida <adriano.almeida@inpe.br>
# License: MIT
from goamazondownloader import (Downloader, os, requests as req, BeautifulSoup,
ElementTree as ET)
from goamazondownloader.constants import (SIPAM_FILENAME, SIPAM_FRAME_URL,
ARM_URL_BASE, ARM_LOGIN_URL)
from goamazondownloader._exceptions import *
class SBandRadar(Downloader):
def __init__(self, **kwargs) -> None:
super(SBandRadar, self).__init__(**kwargs)
self.hour = kwargs.get('hour', None)
self.minute = kwargs.get('minute', None)
self.initializer()
def initializer(self) -> None:
try:
if not self.has_date():
raise DateRequiredError
if not self.has_time():
raise TimeRequiredError
self.format_date()
self.format_time()
super(SBandRadar, self).set_directory(instrument='sbandradar')
self.set_filename()
except DateRequiredError as err:
print(err)
except TimeRequiredError as err:
print(err)
def set_remote_url(self) -> None:
try:
if not self.is_logged():
raise LoginRequiredError
if not self.has_date():
raise DateRequiredError
if not self.has_time():
raise TimeRequiredError
frame_url = SIPAM_FRAME_URL.substitute(year=self.year,
month=self.month,
day=self.day,
token_access=self.token)
res = req.get(frame_url)
soup = BeautifulSoup(res.content, "html.parser")
tags = soup.find_all('pre')[0].find_all('a')
urls = list()
for i, tag in enumerate(tags):
if i % 2 != 0:
file_url = os.path.join(ARM_URL_BASE, tag.attrs['href'][1:])
print(file_url)
urls.append(file_url)
if not urls:
raise ListFilesNotFoundError
substring = "_cappi_%s%s%s_%s%s" % (self.year, self.month, self.day,
self.hour, self.minute)
file_url = list(filter(lambda url: substring in url, urls))
print(file_url)
if not file_url:
raise UrlFileNotFoundError
self.remote_url = file_url[0]
except LoginRequiredError as err:
print(err)
except DateRequiredError as err:
print(err)
except TimeRequiredError as err:
print(err)
except ListFilesNotFoundError as err:
print(err)
except UrlFileNotFoundError as err:
print(err)
def has_time(self) -> bool:
return self.hour is not None and self.minute is not None
def format_time(self) -> None:
self.hour = str(self.hour).zfill(2) \
if self.hour is not None else None
self.minute = str(self.minute).zfill(2) \
if self.minute is not None else None
def set_filename(self) -> None:
try:
if not self.has_date():
raise DateRequiredError
if not self.has_time():
raise TimeRequiredError
if not self.has_directory():
self.set_directory()
except DateRequiredError as err:
print(err)
except TimeRequiredError as err:
print(err)
self.format_date()
self.format_time()
self.filename = os.path.join(self.directory, SIPAM_FILENAME.\
substitute(year=self.year,
month=self.month,
day=self.day,
hour=self.hour,
minute=self.minute,
sep='_'))
if __name__ == "__main__":
obj = SBandRadar(year=2014, month=2, day=18, hour=15, minute=15)
obj.login("username")
obj.set_remote_url()
obj.download()
|
# coding=utf-8
from datetime import datetime
import json
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
__author__ = 'ITTC-Jayvee'
project_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
data_path = '%s/data' % (project_path)
# project import
sys.path.append(project_path)
import Utils
from Utils.get_logger import logger, Timer
@Timer
def analysis(appinfo_connect, appname):
appinfo = appinfo_connect.find_one({'app_name': appname})
fout = open('%s/result.csv' % data_path, 'w')
fout.write('date,content,version,nickname,phonetype\n')
if appinfo is not None:
comments = appinfo['comments']
for comment in comments:
date = comment['date']
content = comment['comment']
content = str(content).replace('\r','')
version = comment['version']
nickname = comment['nickname']
phonetype = comment['phonetype']
fout.write('%s,%s,%s,%s,%s\n' % (str(date), str(content).strip(), version, nickname, phonetype))
# fout.write('%s\n' % (nickname))
if __name__ == '__main__':
import pymongo
db_address = json.loads(open('%s/conf/DB_Address.conf' % (project_path), 'r').read())['MongoDB_Address']
conn = pymongo.MongoClient(host=db_address, port=27017)
appinfo_connect = conn.AppChinaData.AppInfo
analysis(appinfo_connect, '微信')
|
from . import MovieFilterPolicy
class KeywordPolicy(MovieFilterPolicy):
def __init__(self, word):
self.word = word
def _isInteresting(self, movie):
return self.word.lower() in movie.title.lower()
|
"""
The plot subpackage contains tools for plotting signals and annotations.
"""
from wfdb.plot.plot import plot_items, plot_wfdb, plot_all_records
|
# This file is part of beets.
# Copyright 2016, François-Xavier Thomas.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Use command-line tools to check for audio file corruption.
"""
from subprocess import check_output, CalledProcessError, list2cmdline, STDOUT
import shlex
import os
import errno
import sys
import confuse
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.util import displayable_path, par_map
from beets import ui
from beets import importer
class CheckerCommandException(Exception):
"""Raised when running a checker failed.
Attributes:
checker: Checker command name.
path: Path to the file being validated.
errno: Error number from the checker execution error.
msg: Message from the checker execution error.
"""
def __init__(self, cmd, oserror):
self.checker = cmd[0]
self.path = cmd[-1]
self.errno = oserror.errno
self.msg = str(oserror)
class BadFiles(BeetsPlugin):
def __init__(self):
super().__init__()
self.verbose = False
self.register_listener('import_task_start',
self.on_import_task_start)
self.register_listener('import_task_before_choice',
self.on_import_task_before_choice)
def run_command(self, cmd):
self._log.debug("running command: {}",
displayable_path(list2cmdline(cmd)))
try:
output = check_output(cmd, stderr=STDOUT)
errors = 0
status = 0
except CalledProcessError as e:
output = e.output
errors = 1
status = e.returncode
except OSError as e:
raise CheckerCommandException(cmd, e)
output = output.decode(sys.getdefaultencoding(), 'replace')
return status, errors, [line for line in output.split("\n") if line]
def check_mp3val(self, path):
status, errors, output = self.run_command(["mp3val", path])
if status == 0:
output = [line for line in output if line.startswith("WARNING:")]
errors = len(output)
return status, errors, output
def check_flac(self, path):
return self.run_command(["flac", "-wst", path])
def check_custom(self, command):
def checker(path):
cmd = shlex.split(command)
cmd.append(path)
return self.run_command(cmd)
return checker
def get_checker(self, ext):
ext = ext.lower()
try:
command = self.config['commands'].get(dict).get(ext)
except confuse.NotFoundError:
command = None
if command:
return self.check_custom(command)
if ext == "mp3":
return self.check_mp3val
if ext == "flac":
return self.check_flac
def check_item(self, item):
# First, check whether the path exists. If not, the user
# should probably run `beet update` to cleanup your library.
dpath = displayable_path(item.path)
self._log.debug("checking path: {}", dpath)
if not os.path.exists(item.path):
ui.print_("{}: file does not exist".format(
ui.colorize('text_error', dpath)))
# Run the checker against the file if one is found
ext = os.path.splitext(item.path)[1][1:].decode('utf8', 'ignore')
checker = self.get_checker(ext)
if not checker:
self._log.error("no checker specified in the config for {}",
ext)
return []
path = item.path
if not isinstance(path, str):
path = item.path.decode(sys.getfilesystemencoding())
try:
status, errors, output = checker(path)
except CheckerCommandException as e:
if e.errno == errno.ENOENT:
self._log.error(
"command not found: {} when validating file: {}",
e.checker,
e.path
)
else:
self._log.error("error invoking {}: {}", e.checker, e.msg)
return []
error_lines = []
if status > 0:
error_lines.append(
"{}: checker exited with status {}"
.format(ui.colorize('text_error', dpath), status))
for line in output:
error_lines.append(f" {line}")
elif errors > 0:
error_lines.append(
"{}: checker found {} errors or warnings"
.format(ui.colorize('text_warning', dpath), errors))
for line in output:
error_lines.append(f" {line}")
elif self.verbose:
error_lines.append(
"{}: ok".format(ui.colorize('text_success', dpath)))
return error_lines
def on_import_task_start(self, task, session):
if not self.config['check_on_import'].get(False):
return
checks_failed = []
for item in task.items:
error_lines = self.check_item(item)
if error_lines:
checks_failed.append(error_lines)
if checks_failed:
task._badfiles_checks_failed = checks_failed
def on_import_task_before_choice(self, task, session):
if hasattr(task, '_badfiles_checks_failed'):
ui.print_('{} one or more files failed checks:'
.format(ui.colorize('text_warning', 'BAD')))
for error in task._badfiles_checks_failed:
for error_line in error:
ui.print_(error_line)
ui.print_()
ui.print_('What would you like to do?')
sel = ui.input_options(['aBort', 'skip', 'continue'])
if sel == 's':
return importer.action.SKIP
elif sel == 'c':
return None
elif sel == 'b':
raise importer.ImportAbort()
else:
raise Exception(f'Unexpected selection: {sel}')
def command(self, lib, opts, args):
# Get items from arguments
items = lib.items(ui.decargs(args))
self.verbose = opts.verbose
def check_and_print(item):
for error_line in self.check_item(item):
ui.print_(error_line)
par_map(check_and_print, items)
def commands(self):
bad_command = Subcommand('bad',
help='check for corrupt or missing files')
bad_command.parser.add_option(
'-v', '--verbose',
action='store_true', default=False, dest='verbose',
help='view results for both the bad and uncorrupted files'
)
bad_command.func = self.command
return [bad_command]
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file takes two arguments, the relative location of the shell script that
# does the checking, and the name of the sysroot.
# TODO(brettw) the build/linux/sysroot_ld_path.sh script should be rewritten in
# Python in this file.
import subprocess
import sys
if len(sys.argv) != 3:
print("Need two arguments")
sys.exit(1)
result = subprocess.check_output([sys.argv[1], sys.argv[2]],
universal_newlines=True).strip()
print('"' + result + '"')
|
import numpy as np
import random
from machine_learning import dataset, plot
def dist2_min(d): return d.min()
def dist2_max(d): return d.max()
def dist2_avg(d): return d.sum() / (d.shape[0] * d.shape[1])
def agnes(datas, num_clusters, dist_functor=dist2_avg):
k = num_clusters
m = len(datas)
indexes_m = range(m)
dist = np.square(datas[:, np.newaxis, :] - datas[np.newaxis, :, :]).sum(axis=2)
clusters = np.arange(m, dtype=np.int)
clusters_dist = dist.copy()
clusters_dist[indexes_m, indexes_m] = np.inf
q = m
i = 0
while q > k or clusters_dist.min() is np.inf:
r, c = np.unravel_index(clusters_dist.argmin(), clusters_dist.shape)
c1, c2 = clusters[r], clusters[c]
# merge c2 to c1
clusters[clusters == c2] = c1
# clear c2 distance
clusters_dist[:, c2] = np.inf
clusters_dist[c2, :] = np.inf
# update distance between c1 with ci
dist_c1 = dist[clusters == c1]
c_indexes = np.unique(clusters)
for ci in c_indexes:
if ci == c1: continue
data_indexes_ci = np.where(clusters == ci)[0]
dist_c1_ci = dist_c1[:, data_indexes_ci]
d = dist_functor(dist_c1_ci)
clusters_dist[[ci, c1], [c1, ci]] = d
# return
yield i, clusters
# next iterator
q = len(c_indexes)
i += 1
def update(f, plt, datas):
paint = False
try:
iters, labels = next(f)
plt.erase()
plt.set_title('Iterator: {} Clusters: {}'.format(iters, len(np.unique(labels))))
plt.paint({
'point_x': datas[:, 0], 'point_y': datas[:, 1],
'labels': labels, 'num_clusters': len(datas)})
paint = True
except StopIteration:
pass
if paint: plt.show()
datas, labels = dataset.point.area(bounds=(-10, -10, 20, 20), num_area=(2, 2), num_area_points=(3, 4), area_border_ratio=0.3)
f = agnes(datas, num_clusters=len(np.unique(labels)))
plt = plot.create_cluster_plot()
plt.connect_event('button_press_event', lambda _: update(f, plt, datas))
plt.show({'point_x': datas[:, 0], 'point_y': datas[:, 1]})
|
#Simple unit tests covering preprocess_data.py, encode_image.py, vgg16.py and imagenet_utils.py
import pytest
from preprocess_data import preprocessing
from vgg16 import VGG16
from encode_image import model_gen,encodings
from imagenet_utils import preprocess_input as i_preprocess_input
import os
import numpy as np
from keras import backend as K
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input as k_preprocess_input #distinguish from imagenet_utils.preprocess_input!
captions_folder='Flickr8K_Text'
image_folder='Flickr8K_Data'
test_image='3666056567_661e25f54c.jpg'
@pytest.fixture(scope='module')
def load_example_image(request):
img=image.load_img(os.path.join(image_folder,test_image),target_size=(224,224))
x=image.img_to_array(img)
x=np.expand_dims(x,axis=0)
return x
def test_preprocess():
'''
Wherein we test preprocess_data.preprocess(). We check whether the number of
test/training examples as well as the 42nd entries of both output files,
trainimgs.txt and testimgs.txt, agree with the truth.
'''
preprocessing()
train_captions=open(os.path.join(captions_folder,'trainimgs.txt'),'r').read().split('\n')
test_captions=open(os.path.join(captions_folder,'testimgs.txt'),'r').read().split('\n')
assert len(train_captions)==30001#the last line yields one stray empty string
assert len(test_captions)==5001
assert train_captions[42]=='2638369467_8fc251595b.jpg\t<start> A smiling young girl in braids is playing ball . <end>'
assert test_captions[42]=='280706862_14c30d734a.jpg\t<start> A black dog on a beach carrying a ball in its mouth . <end>'
@pytest.mark.slow
def test_vgg16(load_example_image):
'''
Wherein we test vgg16.py. We create the model, load a test image, perform the
encoding, and test the result for correct shape, correct number of nonzero
entries, correct first 100 entries.
'''
model=VGG16(include_top=True,weights='imagenet')
x=load_example_image
x=k_preprocess_input(x)
preds=model.predict(x)
vgg16_pred_slice=np.array([0,0,0,0,6.281284,0,0,0,0,0.3895438,0,0,1.1056916,0,0,1.7175925,0,1.8917649,0,0,1.7590455,6.602196,0.43233678,0.4528695,0,3.6373415,3.5424447,2.4427955,0,0,3.206093,2.5562162,0,0,2.2278,0,3.2318673,2.3519747,0,6.4740357,0,2.8249745,0,0,9.216705,0,0,0,2.835004,2.1258173,2.2908218,1.0856905,2.6308882,0.91681635,0,0,0,0,0,0,0.40301943,0,0,0,3.2203531,0.2024988,0,0,0,0,0,0,1.2414606,0,0,0,1.2453537,0,4.912657,0,4.361135,0,0,0,0,0,0,0,2.4128122,0.68293965,0.7010477,1.3694913,1.7609701,0,0,0,0,0,0,0,])
assert(x.shape==(1,224,224,3))
assert(preds.shape==(1,4096))
assert(len(preds[0][preds[0]!=0])==1368)
np.testing.assert_allclose(preds[0,:100],vgg16_pred_slice,err_msg='VGG16 encoding yielded wrong encoding for test image!')
def test_utils_preprocessing(load_example_image):
'''
Wherein we test imagenet_utils.encodings(), which is slightly different from
keras.applications.imagenet_utils.preprocessing. It reorders the axes, flips
the order of colours, and subtracts constant values from each colour.
'''
dim_ordering=K.image_dim_ordering()
assert(dim_ordering in ['tf','th'])
x=load_example_image
x_pre=x.copy()
x=i_preprocess_input(x)
if dim_ordering=='th':
#BGR -> RGB
x=x[ :, ::-1, :, : ]
diff=x-x_pre
colR=diff[ :, 0, :, : ]
colG=diff[ :, 1, :, : ]
colB=diff[ :, 2, :, : ]
else:
#BGR -> RGB
x=x[ :, :, :, ::-1 ]
diff=x-x_pre
colR=diff[ :, :, :, 0 ]
colG=diff[ :, :, :, 1 ]
colB=diff[ :, :, :, 2 ]
np.testing.assert_allclose(colR,-103.939)
np.testing.assert_allclose(colG,-116.779)
np.testing.assert_allclose(colB,-123,68)
@pytest.mark.slow
def test_encodings():
'''
We test encode_image.encodings(), which is just a wrapper for VGG16.
This proceeds in the same fashion as in test_vgg16(). The truth value for
the encodings differ slightly, since the encodings() wrapper uses another
preprocessing routine defined in imagenet_utils.
'''
model=model_gen()
preds=encodings(model=model,path=os.path.join(image_folder,test_image))
encoding_pred_slice=np.array([0,0,0,0,6.943786,0,0,0,0,0.495808,0,0,1.2099614,0,0,1.9550853,0,2.5830698,0,0,1.5520213,6.7467823,0.30691597,0.6208435,0,3.8465405,3.7862554,2.3970299,0,0,3.5254822,2.7294598,0,0,2.7226853,0,3.2338202,2.3976898,0,6.3592043,0,2.7090664,0,0,10.004378,0,0,0,3.0425727,2.0538316,1.8156273,0.15581878,2.3381875,0.88823074,0,0,0,0,0,0,0.036334604,0,0,0,3.5556676,0.29299664,0,0,0,0,0,0,1.0033253,0,0,0,0.96017045,0,5.8062425,0,4.4312,0,0,0,0,0,0,0,2.7901797,0.5715834,0.76234996,1.7294867,1.2244358,0,0,0,0,0,0,0,])
assert(preds.shape==(4096,))
assert(len(preds[preds!=0])==1351)
np.testing.assert_allclose(preds[:100],encoding_pred_slice,err_msg='encoding wrapper yielded wrong encoding for test image!')
|
from prometheus_client.registry import REGISTRY
from prometheus_client import start_http_server
from helpers import global_config, logs
import yaml, logging, asyncio
from pprint import pprint
from .collector import OpenHABCollector
logger = logging.getLogger('collector')
logger.setLevel(global_config._LOGLEVEL)
logger.addHandler(logs.ch)
def run():
REGISTRY.register(OpenHABCollector())
logger.info(f'Starting webserver on port {global_config.PORT}...')
start_http_server(global_config.PORT)
loop = asyncio.get_event_loop()
try:
loop.run_forever()
finally:
loop.close()
|
import unittest
from Drone import Drone
from Battery import Battery
class DroneTestMethods(unittest.TestCase):
def test_start_stop(self):
drone = Drone(5, (10,20), (10,20,0), 0.2, (0,0), Battery(300,100,3))
self.assertEqual(drone.start(), 0)
self.assertEqual(drone.start(), -1)
self.assertEqual(drone.stop(), 0)
self.assertEqual(drone.stop(), -1)
if __name__ == '__main__':
unittest.main()
|
customers = {
0:{
"id":0,
"name":"John",
"city":"San Francisco",
"email":"johny.bravo@cn.com"
},
1:{
"id":1,
"name":"Mark",
"city":"Las Vegas",
"email":"mark.zuckerberg@fb.com"
},
2:{
"id":2,
"name":"Thomas",
"city":"Birmingham",
"email":"thomas.shelby@peakyblinder.com"
}
}
def search(data, target, search):
print(data[target][search])
search(customers,1,"email")
search(customers,2, search = "email") |
# Imprimir os números pares entre 0 e um
# número fornecido sem utilizar o if
num = int(input('Número: '))
x = 0
while ((num%2) == 0 and num != 0):
print(num)
num = num - 2
|
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from delivery.views import CourierViewSet, OrderViewSet
router = DefaultRouter()
router.register('couriers', CourierViewSet, basename='couriers')
router.register('orders', OrderViewSet, basename='orders')
urlpatterns = [
path('', include(router.urls))
]
|
def cicloHamiltoniano(grafo):
def genera(ciclo, nodiCiclo):
ultimoNodo = ciclo[-1] # Ultimo nodo aggiunto
if len(ciclo) == len(grafo): # nodo foglia
if 0 in grafo[ultimoNodo]: # Se si chiude il ciclo
return True
else: # nodo interno
for adiacente in grafo[ultimoNodo]:
if adiacente not in nodiCiclo:
nodiCiclo.add(adiacente)
ciclo.append(adiacente)
if genera(ciclo, nodiCiclo):
return True
ciclo.pop()
nodiCiclo.remove(adiacente)
ciclo = [0]
nodiCiclo = {0}
genera(ciclo, nodiCiclo)
return ciclo if len(ciclo) == len(grafo) else []
|
string = input()
digits = [] # или с листове или със стрингове
letters = []
other_characters = []
for ch in string:
if ch.isdigit():
digits.append(ch)
elif ch.isalpha():
letters.append(ch)
else:
other_characters.append(ch)
print("".join(digits))
print("".join(letters))
print("".join(other_characters))
|
import requests
def main():
response = requests.get("http://www.google.com")
# response = requests.get("http://www.google.com/random-address/")
print("Status Code: ", response.status_code)
# print("Headers: ", response.headers)
# print("'Content-Type': ", response.headers['Content-Type'])
print("Content: ", response.text)
if __name__ == "__main__":
main()
|
from bs4 import BeautifulSoup
import random
import requests
from fake_useragent import UserAgent
import datetime
import traceback
from scraper.functionScraper import *
from scraper.classListingObject import *
from scraper.classHelpClasses import *
from classes.classPostalData import *
from classes.SBBAPI import *
def babyScraper(url):
print("--> Check URL: "+url)
# CREATE LISTING OBJECT
listing = listingObject(DBOperations("kezenihi_srmidb3"))
# c = requests.get(url[1], proxies=proxies, headers={'user-agent': headers}).content
c = None
try:
c = requests.get(url).content
except Exception as e:
print("An error occured")
print(e)
if (c is not None):
print("Url checked successfully")
checkedURL = True
soup = BeautifulSoup(c, 'html.parser')
descrCheck = False
try:
listing.description = soup.select('#div_Description')[0].text.strip().replace("\n", "")
descrCheck = True
except:
print("No description available")
listing.address = soup.select('section.content-section .column .row.column .align-middle h3.text-green')[0].text.strip()
# CONVERT ADDRESS VIA GOOGLE GEOLOCATION API
print("RUN GEOLOCATION API")
api_key = "AIzaSyDLpLwScHEHrVpIQOVjSj0RaCOwrkuViNI"
query = (listing.address+",+Switzerland").replace(" ", "+")
# query = "Berneggstrasse 54, 9000 St. Gallen, Switzerland"
print(query)
try:
googleurl = 'https://maps.googleapis.com/maps/api/geocode/json?address=' + query + '&lang=de&key=' + api_key
result = requests.get(googleurl)
data = result.json()
location = data['results'][0]
listing.latitude = location['geometry']['location']['lat']
listing.longitude = location['geometry']['location']['lng']
for i in location['address_components']:
for category in i['types']:
data[category] = {}
data[category] = i['long_name']
listing.postalCode = data.get("postal_code", None)
except Exception as e:
print(e)
print("Google API was not successful")
# GET ATTRIBUTES GRID INFOS
listingAvailable = False
try:
infos = get_list_content_pairs(soup)
listingAvailable = True
except:
print("Listing is not available anymore.")
if (listingAvailable is True):
for info in infos:
type = info.find('dt').string.strip()
if (type == "Property type"):
listing.category = info.find('dd').string.strip()
if (type == "Rent per month"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.price = replaceMultiple(result, substitutions)
if (type == "Rent per day"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.pricePerDay = replaceMultiple(result, substitutions)
if (type == "Rent per week"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.pricePerWeek = replaceMultiple(result, substitutions)
if (type == "Annual rent per m²"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.pricePerYear = replaceMultiple(result, substitutions)
if (type == "Rent per month (without charges)"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.primaryCosts = replaceMultiple(result, substitutions)
if (type == "Supplementary charges"):
result = info.find('dd').string.strip()
if(result != "On request"):
substitutions = {"CHF ": "", ",": ""}
listing.additionalCosts = replaceMultiple(result, substitutions)
if (type == "Living space"):
result = info.find('dd').string.strip().replace(" m²", "")
if(result != "On request"):
listing.size = result
if (type == "Floor space"):
result = info.find('dd').string.strip().replace(" m²", "")
if(result != "On request"):
listing.floorSpace = result
if (type == "Property area"):
result = info.find('dd').string.strip().replace(" m²", "")
if(result != "On request"):
listing.propertyArea = result
if (type == "Rooms"):
listing.rooms = info.find('dd').string.strip().replace("½", ".5")
if (type == "Floor"):
result = info.find('dd').string.strip().replace(". floor", "")
if (result == "Ground floor"):
listing.floor = 0
elif (result == "Basement"):
listing.floor = -1
else:
listing.floor = result
if (type == "Available"):
listing.available = info.find('dd').string.strip()
if (type == "Year of construction"):
listing.construction = info.find('dd').string.strip()
if (type == "Lift"):
listing.elevator = 1
if (type == "Balcony/ies"):
listing.balconies = 1
if (type == "Motorway"):
listing.motorway = info.find('dd').string.strip().replace(" m", "")
if (type == "Shops"):
listing.shops = info.find('dd').string.strip().replace(" m", "")
if (type == "Public transport stop"):
listing.publicTransport = info.find('dd').string.strip().replace(" m", "")
if (type == "Kindergarten"):
listing.kindergarten = info.find('dd').string.strip().replace(" m", "")
if (type == "Primary school"):
listing.primarySchool = info.find('dd').string.strip().replace(" m", "")
if (type == "Secondary school"):
listing.secondarySchool = info.find('dd').string.strip().replace(" m", "")
if (type == "Minergie certified"):
listing.minergie = 1
if (type == "Pets allowed"):
listing.pets = 1
if (type == "Child-friendly"):
listing.childFriendly = 1
if (type == "Cable TV"):
listing.cableTV = 1
if (type == "New building"):
listing.newBuilding = 1
if (type == "Wheelchair accessible"):
listing.wheelchair = 1
if (type == "Outdoor parking"):
listing.parkingOutdoor = 1
if (type == "Indoor parking"):
listing.parkingIndoor = 1
if (type == "Veranda"):
listing.veranda = 1
if (type == "Swimming pool"):
listing.pool = 1
else:
"There was a problem with accessign the URL, please try again."
postalObject = postalCodeData(DBOperations("kezenihi_srmidb3")).average(listing.postalCode, "price", "size")
print("\nComparison for this listing\n")
try:
diffAvgPrice = int(listing.price) - int(postalObject['avgPrice'])
if (diffAvgPrice > 0):
print("The price for this property is "+str(diffAvgPrice)+" higher than the communal average\n")
else:
print("The price for this property is "+str(diffAvgPrice)+" lower than the communal average\n")
except:
print("Unfortunately no price could be found for this listing\n")
try:
diffAvgSize = int(listing.size) - int(postalObject['avgSize'])
if (diffAvgSize > 0):
print("The size of this property is "+str(diffAvgSize)+" larger than the communal average\n")
else:
print("The size of this property is "+str(diffAvgSize)+" smaller than the communal average\n")
except:
print("Unfortunately no size could be found for this listing\n")
try:
diffAvgMeterPrice = (int(listing.price)/int(listing.size)) - int(postalObject['avgMeterPrice'])
if (diffAvgMeterPrice > 0):
print("The price per square meter of this property is "+str(diffAvgMeterPrice)+" higher than the communal average\n")
elif (diffAvgMeterPrice < 0):
print("The price per square meter of this property is "+str(diffAvgMeterPrice)+" lower than the communal average\n")
except:
print("Unfortunately no price per square meter could be calculated for this property\n")
print("Details about public transport availability for this property:\n")
SBB(DBOperations("kezenihi_srmidb3")).getClosestStop(lat=listing.latitude, long=listing.longitude)
|
#改良版バブルソート
#A = [9,2,7,5,4]
A = [1,2,3,8,7,9]
for i in range(0,len(A)-1):
print(A)
count = 0
for j in range(len(A)-1,i,-1):
if A[j] < A[j-1]:
A[j],A[j-1] = A[j-1],A[j]
count += 1
if count == 0:
break
print(A)
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
from tkinter import *
app=Tk()
app.title("Aplicaion grafica en python")
etiqueta=Label(app, text="Hola Mundo!!!")
button = Button(app, text="OK!!!")
etiqueta.pack()
button.pack()
app.mainloop()
|
from open_pension_crawler.OpenPensionCrawlSpiderBase import OpenPensionCrawlSpiderBase
class ClalbitSpider(OpenPensionCrawlSpiderBase):
name = 'clalbit'
allowed_domains = ['clalbit.co.il']
start_urls = ['https://www.clalbit.co.il/pensiongemel/financialreports/funds/clalpension/Pages/default.aspx']
regex = r'[0-9]*_[b|g|p|m][0-9]{0,4}_(01|02|03|04)[0-9]{2}.(xlsx|xls)'
|
import redis
client = redis.Redis()
client.sadd('myset', 'Andik')
client.sadd('myset', 'Sergio')
client.sadd('myset', 'irfan')
client.sadd('myset', 'Zoya')
client.sadd('employee', 'Andik')
client.sadd('employee', 'Sergio')
client.sadd('employee', 'Lilipaly')
client.sadd('employee', 'Beni')
client.sadd('employee', 'Bayu')
client.sadd('employee', 'Boaz')
client.sadd('employee', 'Kurnia')
client.sadd('employee', 'Rizky')
print(client.smembers('myset')) #mencetak isi set
print(client.sismember('myset', 'Andik')) #mengecek nilai, apakah value merupakan aggota dari set(sismember)
print(client.sismember('myset', 'Siroch Chatong'))
client.srem('myset', 'Zola') #menghapus suatu nilai
print(client.smembers('myset'))
print(client.sdiff('employee', 'myset')) #melakukan operasi himpunan
print(client.sinter('employee', 'myset')) #melakukan operasi himpunan
print(client.sunion('employee', 'myset')) #melakukan operasi himpunan
client.delete('myset')
client.delete('employee') |
#!/usr/bin/env python
import rospy
import cv2
import math
from sensor_msgs.msg import Image
from struct import unpack
from cv_bridge import CvBridge, CvBridgeError
from cmvision.msg import Blobs, Blob
import copy
# Change this to your desired image topic
defaultImageTopic = "/camera/rgb/image_color"
colorImage = Image()
isColorImageReady = False
blobsData = Blobs()
isBlobsReady = False
depthImage = Image()
isDepthImageReady = False
def updateColorImage(data):
global colorImage, isColorImageReady
colorImage = data
isColorImageReady = True
def updateBlobs(data):
global blobsData, isBlobsReady
blobsData = data
isBlobsReady = True
def updateDepthImage(data):
global depthImage, isDepthImageReady
depthImage = data
isDepthImageReady = True
def getDepth(x,y):
global depthImage
step = depthImage.step
offset = (y * step) + (x * 4)
(dist,) = unpack('f', depthImage.data[offset] + depthImage.data[offset+1] + depthImage.data[offset+2] + depthImage.data[offset+3])
return dist
def filterByMinimumArea(blob, minimumArea):
if blob.area < minimumArea:
return False
else:
return True
def main():
global colorImage, isColorImageReady, blobsData, isBlobsReady, depthImage, isDepthImageReady
rospy.init_node('object_sizer', anonymous=True)
rospy.Subscriber(defaultImageTopic, Image, updateColorImage, queue_size=10)
rospy.Subscriber('/blobs', Blobs, updateBlobs)
rospy.Subscriber('/camera/depth/image', Image, updateDepthImage, queue_size=10)
bridge = CvBridge()
cv2.namedWindow("Color Image")
while (not isColorImageReady or not isBlobsReady or not isDepthImageReady) and not rospy.is_shutdown():
pass
while not rospy.is_shutdown():
try:
color_image = bridge.imgmsg_to_cv2(colorImage, "bgr8")
except CvBridgeError, e:
print e
# Create a deep copy of blobsData
blobsCopy = copy.deepcopy(blobsData)
if blobsCopy.blob_count > 0:
for box in blobsCopy.blobs:
if filterByMinimumArea(box, 100):
startX = box.left
endX = box.right
startY = box.top
endY = box.bottom
cv2.rectangle(color_image, (startX,startY), (endX,endY), (0,255,0), 2)
cv2.imshow("Color Image", color_image)
cv2.waitKey(1)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'CertifyDlg.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_CertifyDlg(object):
def setupUi(self, CertifyDlg):
CertifyDlg.setObjectName("CertifyDlg")
CertifyDlg.resize(431, 381)
CertifyDlg.setMinimumSize(QtCore.QSize(328, 250))
self.checkBox_2 = QtWidgets.QCheckBox(CertifyDlg)
self.checkBox_2.setGeometry(QtCore.QRect(30, 70, 101, 23))
self.checkBox_2.setObjectName("checkBox_2")
self.tiplabel = QtWidgets.QLabel(CertifyDlg)
self.tiplabel.setGeometry(QtCore.QRect(50, 320, 128, 19))
self.tiplabel.setObjectName("tiplabel")
self.widget = QtWidgets.QWidget(CertifyDlg)
self.widget.setGeometry(QtCore.QRect(12, 44, 304, 224))
self.widget.setObjectName("widget")
self.formLayout = QtWidgets.QFormLayout(self.widget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.certiImg = QtWidgets.QGraphicsView(self.widget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.certiImg.sizePolicy().hasHeightForWidth())
self.certiImg.setSizePolicy(sizePolicy)
self.certiImg.setMinimumSize(QtCore.QSize(293, 190))
self.certiImg.setBaseSize(QtCore.QSize(293, 190))
self.certiImg.setMouseTracking(False)
self.certiImg.setObjectName("certiImg")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.certiImg)
self.certiImg.raise_()
self.retranslateUi(CertifyDlg)
self.checkBox_2.stateChanged['int'].connect(self.tiplabel.setNum)
QtCore.QMetaObject.connectSlotsByName(CertifyDlg)
def retranslateUi(self, CertifyDlg):
_translate = QtCore.QCoreApplication.translate
CertifyDlg.setWindowTitle(_translate("CertifyDlg", "验证码"))
self.checkBox_2.setText(_translate("CertifyDlg", "123"))
self.tiplabel.setText(_translate("CertifyDlg", "点击图片进行选择"))
|
from PIL import Image
import random
# NOTE: Feel free to add in any constant values you find useful to use
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
# NOTE: Feel free to add in any helper functions to organize your code but
# do NOT rename any existing functions (or else, autograder
# won't be able to find them!!)
# HELPER FUNCTIONS
def multi_avg(x):
"""return average colour of list of pixels"""
r = 0
g = 0
b = 0
for i in x:
r += i[0]
g += i[1]
b += i[2]
y = len(x)
return (int(r/y),int(g/y),int(b/y))
def avg(x):
"""return average colour of pixel"""
a,b,c = x
y = (a+b+c)/3
return y
def unique_num(x):
a = []
b = random.randint(0, x)
for i in range(x):
while b in a:
b = random.randint(0, x)
a.append(b)
return a
def remove_red(img: Image) -> Image:
"""
Given an Image, img, update the img to have all the reds in the original
image turned to 0 intensity. Return img.
"""
img_width, img_height = img.size
pixels = img.load() # create the pixel map
# for every pixel
for i in range(img_width):
for j in range(img_height):
r, g, b = pixels[i, j]
pixels[i, j] = (0, g, b)
return img
def scale_new_image(orig_img, goal_width: int = None, goal_height: int = None):
"""
Create and return a new image which resizes the given original Image,
orig_img to a the given goal_width and goal_height. If no goal dimensions
are provided, scale the image down to half its original width and height.
Return the new image.
"""
orig_width, orig_height = orig_img.size
orig_pixels = orig_img.load()
if not goal_width and not goal_height:
goal_width, goal_height = orig_width//2, orig_height//2
img = Image.new('RGB', (goal_width, goal_height))
new_pixels = img.load()
width_factor = goal_width / orig_width
height_factor = goal_height / orig_height
for i in range(goal_width):
for j in range(goal_height):
new_pixels[i, j] = orig_pixels[i // width_factor, j // height_factor]
return img
def greyscale(img: Image) -> Image:
"""
Change the Image, img, to greyscale by taking the average color of each
pixel in the image and set the red, blue and green values of the pixel
with that average. Return img.
"""
img_width, img_height = img.size
pixels = img.load() # create the pixel map
# for every pixel
for i in range(img_width):
for j in range(img_height):
r, g, b = pixels[i, j]
avg = int((r + g + b) / 2)
pixels[i, j] = (avg, avg, avg)
return img
def black_and_white(img: Image) -> Image:
"""
Given an Image, img, update the img to have ONLY black or white pixels.
Return img.
Hints:
- Get the average color of each pixel
- If the average is higher than the "middle" color (i.e. 255/2),
change it to white; if it's equal to or lower, change it to black
"""
img_width, img_height = img.size
pixels = img.load() # create the pixel map
avg = 255/2
# for every pixel
for i in range(img_width):
for j in range(img_height):
r, g, b = pixels[i, j]
pixave = (r+g+b)/2
if pixave > avg:
pixels[i, j] = (255, 255, 255)
else:
pixels[i, j] = (0,0,0)
return img
def sepia(img: Image) -> Image:
"""
Given an Image, img, update the img to have a sepia scheme.
Return img.
Hints:
- Get the RGB value of the pixel.
- Calculate newRed, newGree, newBlue using the formula below:
newRed = 0.393*R + 0.769*G + 0.189*B
newGreen = 0.349*R + 0.686*G + 0.168*B
newBlue = 0.272*R + 0.534*G + 0.131*B
(Take the integer value of each.)
If any of these output values is greater than 255, simply set it to 255.
These specific values are the recommended values for sepia tone.
- Set the new RGB value of each pixel based on the above calculations
(i.e. Replace the value of each R, G and B with the new value
that we calculated for the pixel.)
"""
img_width, img_height = img.size
pixels = img.load() # create the pixel map
# for every pixel
for i in range(img_width):
for j in range(img_height):
R, G, B = pixels[i, j]
newRed = 0.393 * R + 0.769 * G + 0.189 * B
newGreen = 0.349 * R + 0.686 * G + 0.168 * B
newBlue = 0.272 * R + 0.534 * G + 0.131 * B
pixels[i, j] = (int(newRed), int(newGreen), int(newBlue))
return img
def normalize_brightness(img: Image) -> Image:
"""
Normalize the brightness of the given Image img by:
1. computing the average brightness of the picture:
- this can be done by calculating the average brightness of each pixel
in img (the average brightness of each pixel is the sum of the values
of red, blue and green of the pixel, divided by 3 as a float division)
- the average brightness of the picture is then the sum of all the
pixel averages, divided by the product of the width and hight of img
2. find the factor, let's call it x, which we can multiply the
average brightness by to get the value of 128
3. multiply the colors in each pixel by this factor x
"""
b_vals = []
img_width, img_height = img.size
pixels = img.load() # create the pixel map
# for every pixel
for i in range(img_width):
for j in range(img_height):
r, g, b = pixels[i, j]
avg_b = (sum(pixels[i,j]))/3
b_vals.append(avg_b)
total_avg = sum(b_vals) / (img_width*img_height)
print (total_avg)
x = 128 / total_avg
for i in range(img_width):
for j in range(img_height):
r, g, b = pixels[i, j]
pixels[i, j] = (int(r*x),int(g*x),int(b*x))
return img
def sort_pixels(img: Image) -> Image:
"""
Given an Image, img, sort (in non-descending order) each row of pixels
by the average of their RGB values. Return the updated img.
Tip: When testing this function out, first choose the greyscale
feature. This will make it easier to spot whether or not the pixels in each
row are actually sorted (it should go from darkest to lightest in a
black and white image, if the sort is working correctly).
"""
img_width, img_height = img.size
pixels = img.load() # create the pixel map
j = 0
while j < img_height:
d = {}
for i in range(img_width):
d[avg(pixels[i,j])] = pixels[i,j]
x = sorted(d.items())
i = 0
while i < (len(x)):
pixels[i,j] = x[i][1]
i+=1
j+=1
print(len(x))
return img
def blur(img: Image) -> Image:
"""Blur Image, img, based on the given pixel_size
Hints:
- For each pixel, calculate average RGB values of its neighbouring pixels
(i.e. newRed = average of all the R values of each adjacent pixel, ...)
- Set the RGB value of the center pixel to be the average RGB values of all
the pixels around it
Be careful at the edges of the image: not all pixels have 8 neighboring pixels!
"""
img_width, img_height = img.size
pixels = img.load() # create the pixel map
new_img = Image.new('RGB', (img_width, img_height))
new_pixels = new_img.load()
for i in range(img_width):
for j in range(img_height):
r,g,b = new_pixels[i,j]
if i == 0 and j == 0: #topleft
adj = [pixels[i+1,j+1], pixels[i+1,j], pixels[i,j+1]]
elif i == (img_width-1) and j == 0: #topright
adj = [pixels[i-1,j], pixels[i-1,j+1], pixels[i,j+1]]
elif i == 0 and j == (img_height-1): #bottomleft
adj = [pixels[i,j-1], pixels[i+1,j-1], pixels[i+1,j]]
elif i == (img_width-1) and j == (img_height-1): #bottomright
adj = [pixels[i-1,j-1], pixels[i-1,j], pixels[i,j-1]]
elif i == 0: #leftwall
adj = [pixels[i,j-1], pixels[i,j+1], pixels[i+1,j-1], pixels[i+1,j+1], pixels[i+1,j]]
elif j == 0: #topwall
adj = [pixels[i-1,j], pixels[i+1,j], pixels[i-1,j+1], pixels[i+1,j+1], pixels[i,j+1]]
elif i == (img_width-1): #rightwall
adj = [pixels[i-1,j-1], pixels[i-1,j], pixels[i-1,j+1], pixels[i,j-1], pixels[i,j+1]]
elif j == (img_height-1): #bottomwall
adj = [pixels[i,j-1], pixels[i-1,j-1], pixels[i+1,j-1], pixels[i-1,j], pixels[i+1,j]]
else:
adj = [pixels[i - 1, j - 1], pixels[i - 1, j], pixels[i - 1, j + 1], pixels[i, j + 1], pixels[i, j - 1],
pixels[i + 1, j], pixels[i + 1, j - 1], pixels[i + 1, j + 1]]
new_pixels[i,j] = multi_avg(adj)
return new_img
def rotate_picture_90_left(img: Image) -> Image:
"""Return a NEW picture that is the given Image img rotated 90 degrees
to the left.
Hints:
- create a new blank image that has reverse width and height
- reverse the coordinates of each pixel in the original picture, img,
and put it into the new picture
"""
img_width, img_height = img.size
orig_pixels = img.load()
new_img = Image.new('RGB', (img_height, img_width))
new_width, new_height = new_img.size
new_pixels = new_img.load()
for i in range(new_width):
for j in range(new_height):
new_pixels[i,j] = orig_pixels[img_width-(1+j),i]
return new_img
def rotate_picture_90_right(img: Image) -> Image:
"""
Return a NEW picture that is the given Image img rotated 90 degrees
to the right.
"""
# NOTE: Remove the "pass" placeholder when you put your own code in
img_width, img_height = img.size
orig_pixels = img.load()
new_img = Image.new('RGB', (img_height, img_width))
new_width, new_height = new_img.size
new_pixels = new_img.load()
for i in range(new_width):
for j in range(new_height):
new_pixels[i, j] = orig_pixels[j,img_height-(1+i)]
return new_img
def flip_horizontal(img: Image) -> Image:
"""
Given an Image, img, update it so it looks like a mirror
was placed horizontally down the middle.
Tip: Remember Python allows you to switch values using a, b = b, a notation.
You don't HAVE to use this here, but it might come in handy.
"""
# NOTE: Remove the "pass" placeholder when you put your own code in
img_width, img_height = img.size
pixels = img.load()
copy_img = Image.new('RGB', (img_width, img_height))
copy_pixels = copy_img.load()
for i in range(img_width):
for j in range(img_height):
copy_pixels[i,j] = pixels[i,j]
for i in range(img_width):
for j in range(img_height):
pixels[i, j] = copy_pixels[i, img_height - (j + 1)]
return img
def flip_vertical(img: Image) -> Image:
"""
Given an Image, img, update it so it looks like a mirror
was placed vertically down the middle.
Tip: Remember Python allows you to switch values using a, b = b, a notation.
You don't HAVE to use this here, but it might come in handy.
"""
img_width, img_height = img.size
pixels = img.load()
copy_img = Image.new('RGB', (img_width, img_height))
copy_pixels = copy_img.load()
for i in range(img_width):
for j in range(img_height):
copy_pixels[i, j] = pixels[i, j]
for i in range(img_width):
for j in range(img_height):
pixels[i, j] = copy_pixels[img_width - (i+1), j]
return img
def kaleidoscope(img: Image) -> Image:
"""
Given an Image, img, update it to create a kaleidoscope.
You must maintain the size dimensions of the original image.
Return the updated img.
The kaleidoscope effect should have this image repeated four times:
- the original image will be in the lower left quadrant
- the lower right will be the original image flipped on the vertical axis
- the two top images will be the bottom two images flipped on the horizontal axis
Tip: You may want to use helper functions to organize the code here.
This filter can be broken down into a series of other operations, such as
flip vertical / horizontal, scale / downscale, etc.
"""
img_width, img_height = img.size
pixels = img.load()
A_img = scale_new_image(img)
A_width, A_height = A_img.size
A_pixels = A_img.load()
for i in range(A_width):
for j in range(A_height):
pixels[i,j+img_height//2] = A_pixels[i,j]
B_img = flip_vertical(A_img)
B_width, B_height = B_img.size
B_pixels = B_img.load()
for i in range(B_width):
for j in range(B_height):
pixels[i+img_width//2,j+img_height//2] = B_pixels[i,j]
C_img = Image.new('RGB', (img_width, A_height))
C_pixels = C_img.load()
for i in range(img_width):
for j in range(A_height):
C_pixels[i,j] = pixels[i,j+img_height//2]
D_img = flip_horizontal(C_img)
D_pixels = D_img.load()
for i in range(img_width):
for j in range(A_height):
pixels[i,j] = D_pixels[i,j]
return img
def draw_border(img: Image) -> Image:
"""
Given an Image, img, update it to have a five pixel wide black border
around the edges. Return the updated img.
"""
img_width, img_height = img.size
pixels = img.load() # create the pixel map
# for every pixel
for i in range(img_width):
for j in range(5):
r,g,b = pixels[i,j]
pixels[i,j] = (0,0,0)
for j in range(img_height - 5, img_height):
r, g, b = pixels[i, j]
pixels[i, j] = (0,0,0)
for v in range(img_height):
for k in range(5):
r,g,b = pixels[k,v]
pixels[k,v] = (0,0,0)
for k in range(img_width - 5, img_width):
r, g, b = pixels[k,v]
pixels[k,v] = (0, 0, 0)
return img
def scramble(img: Image) -> Image:
"""
Scramble the pixels in the image by re-assigning each color intensity
value to a unique randomly generated number. Store information that can
be used to decrypt (e.g. what each original intensity value is now mapped
to) in a file named key.txt. Return the scrambled image.
Note:
Figure out a good way to assign each number from 0-255 to a specific
new number (there should be a 1-1 relationship between the old number and
a new one). Consider using random.randint to generate numbers.
Then, go through each pixel in the image and reset each RGB value to the
newly mapped number.
You should use helper functions to clean up your code, if applicable.
"""
d = {}
nums = unique_num(256)
for i in range(256):
d[i] = nums[i]
with open('key.txt', 'w') as key:
x = d.items()
for i in x:
key.write(str(i) + '\n')
img_width, img_height = img.size
pixels = img.load() # create the pixel map
# for every pixel
for i in range(img_width):
for j in range(img_height):
r,g,b = pixels[i,j]
pixels[i,j] = (d[r],d[g],d[b])
return img
def unscramble(img: Image) -> Image:
"""
Unscramble the pixels in the image by re-assigning each color intensity
value to its original value based on the information in file named "key.txt".
If the file is empty, do nothing and just return the original image back.
You may assume this file exists.
Note:
You can't just hard-code some calculations in to unscramble each pixel
Your key.txt file must be formatted in a way that the data in it lets you
revert each pixel, and that file data must be used in this function.
"""
reverse_key = {}
with open('key.txt', 'r') as key:
for line in key:
x = line.strip(' ()\n').split(',')
if int(x[1]) not in reverse_key:
reverse_key[int(x[1])] = int(x[0])
else:
print (str(x[1])+' '+str(x[0]))
img_width, img_height = img.size
pixels = img.load() # create the pixel map
# for every pixel
for i in range(img_width):
for j in range(img_height):
r, g, b = pixels[i, j]
pixels[i, j] = (reverse_key[r], reverse_key[g], reverse_key[b])
return img
def pastelify(img: Image) -> Image:
'''
changes the tint of a pic to a pinkish purplish pastel colour
'''
img_width, img_height = img.size
pixels = img.load()
b_vals = []
for i in range(img_width):
for j in range(img_height):
r, g, b = pixels[i, j]
avg_b = (r+b+g)/3
b_vals.append(avg_b)
total_avg = sum(b_vals) / (img_width*img_height)
x = 128 / total_avg
for i in range(img_width):
for j in range(img_height):
r, g, b = pixels[i, j]
r2 = int(r*x)
g2 = int(b*x)
b2 = int(g*x)
pixels[i, j] = (r2,g2,b2)
return img
def endgame(img: Image) -> Image:
'''
Re-arrange the pixels in the image according RGB averages to achieve
the thanos snap effect from endgame
'''
img_width, img_height = img.size
pixels = img.load() # create the pixel map
j = 0
while j < img_height:
pix_avs = []
for i in range(img_width):
r, g, b = pixels[i,j]
avg_val = (r+g+b)/3
pix_avs.append(avg_val)
pix_avs.sort()
for item in pix_avs:
for i in range(img_width):
if avg(pixels[i,j]) == item:
g = pix_avs.index(item)
pixels[g,j] = pixels[i,j]
j+=1
return img
def inverse(img: Image) -> Image:
"""
Returns a inverted colour image
"""
img_width, img_height = img.size
pixels = img.load() # create the pixel map
# for every pixel
for i in range(img_width):
for j in range(img_height):
r,g,b = pixels[i, j]
newR = 255 - r
newG = 255 - g
newB = 255 - b
pixels[i, j] = (newR, newG, newB)
return img
COMMANDS = {
"Remove red": remove_red,
"Downscale": scale_new_image,
"Greyscale": greyscale,
"Black and White": black_and_white,
"Sepia": sepia,
"Normalize Brightness": normalize_brightness,
"Sort Pixels": sort_pixels,
"Blur": blur,
"Kaleidoscope": kaleidoscope,
"Rotate Clockwise": rotate_picture_90_right,
"Rotate Counter-clockwise": rotate_picture_90_left,
"Flip Horizontal": flip_horizontal,
"Flip Vertical": flip_vertical,
"Add border": draw_border,
"Scramble": scramble,
"Unscramble": unscramble,
"Pastelify": pastelify,
"EndGame": endgame,
"Inverse": inverse
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from head import *
from threading import Timer
from db_base import MssqlConnection
def load_threshold(stopEvent, param, ):
"""
从数据库中载入环境限制范围
:param stopEven:
:param param:
:rtype:
"""
def load(room_id, threshold):
db_inst = MssqlConnection()
str_time = threshold[room_id][1][1].strftime("%Y-%m-%d %H:%M:%S")
temp = db_inst.get_threshold(room_id, str_time)
log_msg = str(temp)
log_handler.debug(log_msg)
if len(temp) == 2:
# 当前策略尚未执行完
threshold[room_id][0] = temp[0]
threshold[room_id][1] = (temp[1][0],temp[1][1])
log_msg = 'Load Threshold of Room_id : %d \n%s' %(room_id, str(threshold[room_id][0]))
elif len(temp) == 1:
# 已执行至当前策略最后一条规则
threshold[room_id][0] = temp[0]
threshold[room_id][1] = (temp[0][0],temp[0][1])
# 确保当前执行策略完整结束
if temp[0][1] <= datetime.now():
# 将该roomID所对应的policy_instance的状态设置为 OLD
db_inst.update_policy_instance_state(room_id, POLICY_OLD)
# 检查并载入当前房间的新执行策略
db_inst.transfor_room_absolute_time(room_id)
log_msg = 'Policy in Room [%d] Complete, Last Threshold : \n%s' %(room_id, str(threshold[room_id][0]))
log_handler.debug(log_msg)
else:
# 当前房间无要新策略,均为就策略实例
# 将该roomID所对应的policy_instance的状态设置为 OLD
db_inst.update_policy_instance_state(room_id, POLICY_OLD)
if db_inst.transfor_room_absolute_time(room_id) == FAI:
# 此时后两种情况:1. 无新策略待执行; 2. 有新策略,但规则为空
log_msg = 'There is no new policy in room %d currently' %room_id
log_handler.debug(log_msg)
return FAI
log_handler.debug('[ROOM: %d] current threshold is : %s ' %(room_id, str(threshold)))
def timer_work():
for room_id in room_dict.keys():
try:
if not threshold.has_key(room_id):
# 系统启动后首次载入环境限制
threshold[room_id] = [(), (room_id, datetime.now())]
load(room_id, threshold)
elif threshold[room_id][1][1] < datetime.now():
load(room_id, threshold)
except Exception, e:
log_msg = 'Something wrong with the database when try loading threshold !!!'
log_handler.error(log_msg)
continue
log_msg = 'Thread Load Threshold is Ready ...'
log_handler.work(log_msg)
try:
db_inst = MssqlConnection()
room_dict = db_inst.room_id2desc
db_inst.transfor_absolute_time()
except Exception:
log_msg = 'Something wrong with the database when try transforing absolute time !!!'
log_handler.error(log_msg)
return ERR
pointer = 0
timer = Timer(THRESHOLD_LOAD_CYCLE, timer_work)
while not stopEvent.isSet():
dbIsReady = MssqlConnection.test_connection()
if dbIsReady == SUC:
timer = Timer(THRESHOLD_LOAD_CYCLE, timer_work)
timer.setName(THREAD_POLICY)
timer.start()
while timer.isAlive():
sleep(0.1)
else:
log_msg = 'Something wrong with the database, system will reconnect in %d seconds !!!' %db_reconnect_cycle[pointer]
log_handler.error(log_msg)
sleep(db_reconnect_cycle[pointer])
pointer = (pointer + 1) % len(db_reconnect_cycle)
timer.cancel()
log_msg = 'Load Threshold Thread shutdown and cleaned! '
log_handler.work(log_msg)
# log_manager.add_work_log(log_msg, sys._getframe().f_code.co_name)
print '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
exit()
if __name__ == '__main__':
main_event = Event()
try:
load_threshold(main_event, '')
except KeyboardInterrupt:
main_event.set()
|
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
"""
****** NOTE: ALL THE CODE BELOW ARE TAKEN FROM TORCHIO WITH MODIFICATION******
https://github.com/fepegar/torchio
"""
import torch
import numpy as np
import numbers
from abc import ABC, abstractmethod
from typing import Optional, Union, Tuple, Sequence, Iterable, List, Dict
import nibabel as nib
import scipy.ndimage as ndi
import os.path
import sys
import socket
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
IMPORT_TORCHIO = True
socket_name = socket.gethostname()
this_dir = os.path.dirname(__file__)
lib_path = os.path.normpath(os.path.join(this_dir, '..', '..', '..', 'torchio_package'))
if 'scinet' in socket_name:
lib_path = '/scratch/m/mgoubran/mbiparva/torchio_package/'
add_path(os.path.normpath(lib_path))
try:
from torchio.transforms.augmentation.spatial.random_elastic_deformation import RandomElasticDeformation as ElasticDeformationTIO
from torchio.transforms.augmentation.intensity.random_motion import RandomMotion as MotionTIO
from torchio import Subject, ScalarImage
except ImportError:
IMPORT_TORCHIO = False
TypeTripletInt = Tuple[int, int, int]
TypeTuple = Union[int, TypeTripletInt]
TypeTripletInt = Tuple[int, int, int]
TypeLocations = Sequence[Tuple[TypeTripletInt, TypeTripletInt]]
TypeRangeFloat = Union[float, Tuple[float, float]]
TypeData = Union[torch.Tensor, np.ndarray]
TypeTripletFloat = Tuple[float, float, float]
TypeNumber = Union[int, float]
TypeTransformInput = Union[
torch.Tensor,
np.ndarray,
dict,
nib.Nifti1Image,
]
TypeSextetFloat = Tuple[float, float, float, float, float, float]
def parse_range(
nums_range: Union[TypeNumber, Tuple[TypeNumber, TypeNumber]],
name: str,
min_constraint: TypeNumber = None,
max_constraint: TypeNumber = None,
type_constraint: type = None,
) -> Tuple[TypeNumber, TypeNumber]:
r"""Adapted from ``torchvision.transforms.RandomRotation``.
Args:
nums_range: Tuple of two numbers :math:`(n_{min}, n_{max})`,
where :math:`n_{min} \leq n_{max}`.
If a single positive number :math:`n` is provided,
:math:`n_{min} = -n` and :math:`n_{max} = n`.
name: Name of the parameter, so that an informative error message
can be printed.
min_constraint: Minimal value that :math:`n_{min}` can take,
default is None, i.e. there is no minimal value.
max_constraint: Maximal value that :math:`n_{max}` can take,
default is None, i.e. there is no maximal value.
type_constraint: Precise type that :math:`n_{max}` and
:math:`n_{min}` must take.
Returns:
A tuple of two numbers :math:`(n_{min}, n_{max})`.
Raises:
ValueError: if :attr:`nums_range` is negative
ValueError: if :math:`n_{max}` or :math:`n_{min}` is not a number
ValueError: if :math:`n_{max} \lt n_{min}`
ValueError: if :attr:`min_constraint` is not None and
:math:`n_{min}` is smaller than :attr:`min_constraint`
ValueError: if :attr:`max_constraint` is not None and
:math:`n_{max}` is greater than :attr:`max_constraint`
ValueError: if :attr:`type_constraint` is not None and
:math:`n_{max}` and :math:`n_{max}` are not of type
:attr:`type_constraint`.
"""
if isinstance(nums_range, numbers.Number): # single number given
if nums_range < 0:
raise ValueError(
f'If {name} is a single number,'
f' it must be positive, not {nums_range}')
if min_constraint is not None and nums_range < min_constraint:
raise ValueError(
f'If {name} is a single number, it must be greater'
f' than {min_constraint}, not {nums_range}'
)
if max_constraint is not None and nums_range > max_constraint:
raise ValueError(
f'If {name} is a single number, it must be smaller'
f' than {max_constraint}, not {nums_range}'
)
if type_constraint is not None:
if not isinstance(nums_range, type_constraint):
raise ValueError(
f'If {name} is a single number, it must be of'
f' type {type_constraint}, not {nums_range}'
)
min_range = -nums_range if min_constraint is None else nums_range
return (min_range, nums_range)
try:
min_value, max_value = nums_range
except (TypeError, ValueError):
raise ValueError(
f'If {name} is not a single number, it must be'
f' a sequence of len 2, not {nums_range}'
)
min_is_number = isinstance(min_value, numbers.Number)
max_is_number = isinstance(max_value, numbers.Number)
if not min_is_number or not max_is_number:
message = (
f'{name} values must be numbers, not {nums_range}')
raise ValueError(message)
if min_value > max_value:
raise ValueError(
f'If {name} is a sequence, the second value must be'
f' equal or greater than the first, but it is {nums_range}')
if min_constraint is not None and min_value < min_constraint:
raise ValueError(
f'If {name} is a sequence, the first value must be greater'
f' than {min_constraint}, but it is {min_value}'
)
if max_constraint is not None and max_value > max_constraint:
raise ValueError(
f'If {name} is a sequence, the second value must be smaller'
f' than {max_constraint}, but it is {max_value}'
)
if type_constraint is not None:
min_type_ok = isinstance(min_value, type_constraint)
max_type_ok = isinstance(max_value, type_constraint)
if not min_type_ok or not max_type_ok:
raise ValueError(
f'If "{name}" is a sequence, its values must be of'
f' type "{type_constraint}", not "{type(nums_range)}"'
)
return nums_range
def to_tuple(
value: Union[TypeNumber, Iterable[TypeNumber]],
length: int = 1,
) -> Union[TypeTripletFloat, Tuple[TypeNumber, ...]]:
"""
to_tuple(1, length=1) -> (1,)
to_tuple(1, length=3) -> (1, 1, 1)
If value is an iterable, n is ignored and tuple(value) is returned
to_tuple((1,), length=1) -> (1,)
to_tuple((1, 2), length=1) -> (1, 2)
to_tuple([1, 2], length=3) -> (1, 2)
"""
try:
iter(value)
value = tuple(value)
except TypeError:
value = length * (value,)
return value
def to_range(n, around):
if around is None:
return 0, n
else:
return around - n, around + n
def parse_params(params, around, name, make_ranges=True, **kwargs):
params = to_tuple(params)
if len(params) == 1 or (len(params) == 2 and make_ranges): # d or (a, b)
params *= 3 # (d, d, d) or (a, b, a, b, a, b)
if len(params) == 3 and make_ranges: # (a, b, c)
items = [to_range(n, around) for n in params]
# (-a, a, -b, b, -c, c) or (1-a, 1+a, 1-b, 1+b, 1-c, 1+c)
params = [n for prange in items for n in prange]
if make_ranges:
if len(params) != 6:
message = (
f'If "{name}" is a sequence, it must have length 2, 3 or 6,'
f' not {len(params)}'
)
raise ValueError(message)
for param_range in zip(params[::2], params[1::2]):
parse_range(param_range, name, **kwargs)
return tuple(params)
def sample_uniform(a, b):
return torch.FloatTensor(1).uniform_(a, b)
def sample_uniform_sextet(self, params):
results = []
for (a, b) in zip(params[::2], params[1::2]):
results.append(self.sample_uniform(a, b))
return torch.Tensor(results)
class Transformable(ABC):
def __call__(self, volume):
return self.apply_transform(volume)
@abstractmethod
def apply_transform(self, volume):
raise NotImplementedError
class Randomizeable(ABC):
def __init__(self, p: float = 0.5):
self.p = p
def __call__(self, volume):
if torch.rand(1).item() > self.p:
return volume
return self.apply_transform(volume)
@abstractmethod
def apply_transform(self, volume):
raise NotImplementedError
class FourierTransform:
@staticmethod
def fourier_transform(array: np.ndarray) -> np.ndarray:
transformed = np.fft.fftn(array)
fshift = np.fft.fftshift(transformed)
return fshift
@staticmethod
def inv_fourier_transform(fshift: np.ndarray) -> np.ndarray:
f_ishift = np.fft.ifftshift(fshift)
img_back = np.fft.ifftn(f_ishift)
return img_back
class RandomSpike(Randomizeable, FourierTransform):
r"""Add random MRI spike artifacts.
Also known as `Herringbone artifact
<https://radiopaedia.org/articles/herringbone-artifact?lang=gb>`_,
crisscross artifact or corduroy artifact, it creates stripes in different
directions in image space due to spikes in k-space.
Args:
num_spikes: Number of spikes :math:`n` present in k-space.
If a tuple :math:`(a, b)` is provided, then
:math:`n \sim \mathcal{U}(a, b) \cap \mathbb{N}`.
If only one value :math:`d` is provided,
:math:`n \sim \mathcal{U}(0, d) \cap \mathbb{N}`.
Larger values generate more distorted images.
intensity: Ratio :math:`r` between the spike intensity and the maximum
of the spectrum.
If a tuple :math:`(a, b)` is provided, then
:math:`r \sim \mathcal{U}(a, b)`.
If only one value :math:`d` is provided,
:math:`r \sim \mathcal{U}(-d, d)`.
Larger values generate more distorted images.
p: Probability that this transform will be applied.
keys: See :class:`~torchio.transforms.Transform`.
.. note:: The execution time of this transform does not depend on the
number of spikes.
"""
def __init__(
self,
num_spikes: Union[int, Tuple[int, int]] = 1,
intensity: Union[float, Tuple[float, float]] = (1, 3),
p: float = 1,
):
super().__init__(p)
self.intensity_range = parse_range(
intensity, 'intensity_range')
self.num_spikes_range = parse_range(
num_spikes, 'num_spikes', min_constraint=0, type_constraint=int)
def apply_transform(self, volume: torch.tensor) -> torch.tensor:
arguments = dict()
spikes_positions_param, intensity_param = self.get_params(
self.num_spikes_range,
self.intensity_range,
)
arguments['spikes_positions'] = spikes_positions_param
arguments['intensity'] = intensity_param
transform = Spike(**arguments)
volume = transform(volume)
return volume
@staticmethod
def get_params(
num_spikes_range: Tuple[int, int],
intensity_range: Tuple[float, float],
) -> Tuple[np.ndarray, float]:
ns_min, ns_max = num_spikes_range
num_spikes_param = torch.randint(ns_min, ns_max + 1, (1,)).item()
intensity_param = sample_uniform(*intensity_range)
spikes_positions = torch.rand(num_spikes_param, 3).numpy()
return spikes_positions, intensity_param.item()
class Spike(Transformable, FourierTransform):
r"""Add MRI spike artifacts.
Also known as `Herringbone artifact
<https://radiopaedia.org/articles/herringbone-artifact?lang=gb>`_,
crisscross artifact or corduroy artifact, it creates stripes in different
directions in image space due to spikes in k-space.
Args:
spikes_positions:
intensity: Ratio :math:`r` between the spike intensity and the maximum
of the spectrum.
keys: See :class:`~torchio.transforms.Transform`.
.. note:: The execution time of this transform does not depend on the
number of spikes.
"""
def __init__(
self,
spikes_positions: Union[np.ndarray, Dict[str, np.ndarray]],
intensity: Union[float, Dict[str, float]],
keys: Optional[Sequence[str]] = None,
):
self.spikes_positions = spikes_positions
self.intensity = intensity
self.args_names = 'spikes_positions', 'intensity'
self.invert_transform = False
def apply_transform(self, volume: torch.tensor) -> torch.tensor:
spikes_positions = self.spikes_positions
intensity = self.intensity
transformed_tensors = []
for channel in volume:
transformed_tensor = self.add_artifact(
channel,
spikes_positions,
intensity,
)
transformed_tensors.append(transformed_tensor)
volume = torch.stack(transformed_tensors)
return volume
def add_artifact(
self,
tensor: torch.Tensor,
spikes_positions: np.ndarray,
intensity_factor: float,
):
array = np.asarray(tensor)
spectrum = self.fourier_transform(array)
shape = np.array(spectrum.shape)
mid_shape = shape // 2
indices = np.floor(spikes_positions * shape).astype(int)
for index in indices:
diff = index - mid_shape
i, j, k = mid_shape + diff
artifact = spectrum.max() * intensity_factor
if self.invert_transform:
spectrum[i, j, k] -= artifact
else:
spectrum[i, j, k] += artifact
# If we wanted to add a pure cosine, we should add spikes to both
# sides of k-space. However, having only one is a better
# representation og the actual cause of the artifact in real
# scans. Therefore the next two lines have been removed.
# #i, j, k = mid_shape - diff
# #spectrum[i, j, k] = spectrum.max() * intensity_factor
result = np.real(self.inv_fourier_transform(spectrum))
return torch.from_numpy(result.astype(np.float32))
class RandomGhosting(Randomizeable):
r"""Add random MRI ghosting artifact.
Discrete "ghost" artifacts may occur along the phase-encode direction
whenever the position or signal intensity of imaged structures within the
field-of-view vary or move in a regular (periodic) fashion. Pulsatile flow
of blood or CSF, cardiac motion, and respiratory motion are the most
important patient-related causes of ghost artifacts in clinical MR imaging
(from `mriquestions.com <http://mriquestions.com/why-discrete-ghosts.html>`_).
Args:
num_ghosts: Number of 'ghosts' :math:`n` in the image.
If :attr:`num_ghosts` is a tuple :math:`(a, b)`, then
:math:`n \sim \mathcal{U}(a, b) \cap \mathbb{N}`.
If only one value :math:`d` is provided,
:math:`n \sim \mathcal{U}(0, d) \cap \mathbb{N}`.
axes: Axis along which the ghosts will be created. If
:attr:`axes` is a tuple, the axis will be randomly chosen
from the passed values. Anatomical labels may also be used (see
:class:`~torchio.transforms.augmentation.RandomFlip`).
intensity: Positive number representing the artifact strength
:math:`s` with respect to the maximum of the :math:`k`-space.
If ``0``, the ghosts will not be visible. If a tuple
:math:`(a, b)` is provided then :math:`s \sim \mathcal{U}(a, b)`.
If only one value :math:`d` is provided,
:math:`s \sim \mathcal{U}(0, d)`.
restore: Number between ``0`` and ``1`` indicating how much of the
:math:`k`-space center should be restored after removing the planes
that generate the artifact.
p: Probability that this transform will be applied.
keys: See :class:`~torchio.transforms.Transform`.
.. note:: The execution time of this transform does not depend on the
number of ghosts.
"""
def __init__(
self,
num_ghosts: Union[int, Tuple[int, int]] = (4, 10),
axes: Union[int, Tuple[int, ...]] = (0, 1, 2),
intensity: Union[float, Tuple[float, float]] = (0.5, 1),
restore: float = 0.02,
p: float = 1,
):
super().__init__(p)
if not isinstance(axes, tuple):
try:
axes = tuple(axes)
except TypeError:
axes = (axes,)
for axis in axes:
if not isinstance(axis, str) and axis not in (0, 1, 2):
raise ValueError(f'Axes must be in (0, 1, 2), not "{axes}"')
self.axes = axes
self.num_ghosts_range = parse_range(
num_ghosts, 'num_ghosts', min_constraint=0, type_constraint=int)
self.intensity_range = parse_range(
intensity, 'intensity_range', min_constraint=0)
self.restore = self._parse_restore(restore)
@staticmethod
def _parse_restore(restore):
if not isinstance(restore, float):
raise TypeError(f'Restore must be a float, not {restore}')
if not 0 <= restore <= 1:
message = (
f'Restore must be a number between 0 and 1, not {restore}')
raise ValueError(message)
return restore
def apply_transform(self, volume: torch.tensor) -> torch.tensor:
arguments = dict()
# if any(isinstance(n, str) for n in self.axes):
# subject.check_consistent_orientation()
# is_2d = image.is_2d()
axes = self.axes
num_ghosts_param, axis_param, intensity_param = self.get_params(
self.num_ghosts_range,
axes,
self.intensity_range,
)
arguments['num_ghosts'] = num_ghosts_param
arguments['axis'] = axis_param
arguments['intensity'] = intensity_param
arguments['restore'] = self.restore
transform = Ghosting(**arguments)
volume = transform(volume)
return volume
def get_params(
self,
num_ghosts_range: Tuple[int, int],
axes: Tuple[int, ...],
intensity_range: Tuple[float, float],
) -> Tuple:
ng_min, ng_max = num_ghosts_range
num_ghosts = torch.randint(ng_min, ng_max + 1, (1,)).item()
axis = axes[torch.randint(0, len(axes), (1,))]
intensity = self.sample_uniform(*intensity_range).item()
return num_ghosts, axis, intensity
class Ghosting(Transformable, FourierTransform):
r"""Add MRI ghosting artifact.
Discrete "ghost" artifacts may occur along the phase-encode direction
whenever the position or signal intensity of imaged structures within the
field-of-view vary or move in a regular (periodic) fashion. Pulsatile flow
of blood or CSF, cardiac motion, and respiratory motion are the most
important patient-related causes of ghost artifacts in clinical MR imaging
(from `mriquestions.com <http://mriquestions.com/why-discrete-ghosts.html>`_).
Args:
num_ghosts: Number of 'ghosts' :math:`n` in the image.
axes: Axis along which the ghosts will be created.
intensity: Positive number representing the artifact strength
:math:`s` with respect to the maximum of the :math:`k`-space.
If ``0``, the ghosts will not be visible.
restore: Number between ``0`` and ``1`` indicating how much of the
:math:`k`-space center should be restored after removing the planes
that generate the artifact.
.. note:: The execution time of this transform does not depend on the
number of ghosts.
"""
def __init__(
self,
num_ghosts: Union[int, Dict[str, int]],
axis: Union[int, Dict[str, int]],
intensity: Union[float, Dict[str, float]],
restore: Union[float, Dict[str, float]],
):
self.axis = axis
self.num_ghosts = num_ghosts
self.intensity = intensity
self.restore = restore
self.args_names = 'num_ghosts', 'axis', 'intensity', 'restore'
def apply_transform(self, volume: torch.tensor) -> torch.tensor:
axis = self.axis
num_ghosts = self.num_ghosts
intensity = self.intensity
restore = self.restore
transformed_tensors = []
for tensor in volume:
transformed_tensor = self.add_artifact(
tensor,
num_ghosts,
axis,
intensity,
restore,
)
transformed_tensors.append(transformed_tensor)
volume = torch.stack(transformed_tensors)
return volume
def add_artifact(
self,
tensor: torch.Tensor,
num_ghosts: int,
axis: int,
intensity: float,
restore_center: float,
):
if not num_ghosts or not intensity:
return tensor
array = tensor.numpy()
spectrum = self.fourier_transform(array)
shape = np.array(array.shape)
ri, rj, rk = np.round(restore_center * shape).astype(np.uint16)
mi, mj, mk = np.array(array.shape) // 2
# Variable "planes" is the part of the spectrum that will be modified
if axis == 0:
planes = spectrum[::num_ghosts, :, :]
restore = spectrum[mi, :, :].copy()
elif axis == 1:
planes = spectrum[:, ::num_ghosts, :]
restore = spectrum[:, mj, :].copy()
elif axis == 2:
planes = spectrum[:, :, ::num_ghosts]
restore = spectrum[:, :, mk].copy()
else:
raise NotImplementedError
# Multiply by 0 if intensity is 1
planes *= 1 - intensity
# Restore the center of k-space to avoid extreme artifacts
if axis == 0:
spectrum[mi, :, :] = restore
elif axis == 1:
spectrum[:, mj, :] = restore
elif axis == 2:
spectrum[:, :, mk] = restore
array_ghosts = self.inv_fourier_transform(spectrum)
array_ghosts = np.real(array_ghosts).astype(np.float32)
return torch.from_numpy(array_ghosts)
class RandomBlur(Transformable):
r"""Blur an image using a random-sized Gaussian filter.
Args:
std: Tuple :math:`(a_1, b_1, a_2, b_2, a_3, b_3)` representing the
ranges (in mm) of the standard deviations
:math:`(\sigma_1, \sigma_2, \sigma_3)` of the Gaussian kernels used
to blur the image along each axis, where
:math:`\sigma_i \sim \mathcal{U}(a_i, b_i)`.
If two values :math:`(a, b)` are provided,
then :math:`\sigma_i \sim \mathcal{U}(a, b)`.
If only one value :math:`x` is provided,
then :math:`\sigma_i \sim \mathcal{U}(0, x)`.
If three values :math:`(x_1, x_2, x_3)` are provided,
then :math:`\sigma_i \sim \mathcal{U}(0, x_i)`.
p: Probability that this transform will be applied.
keys: See :class:`~torchio.transforms.Transform`.
"""
def __init__(
self,
std: Union[float, Tuple[float, float]] = (0, 2),
p: float = 1,
):
super().__init__(p)
self.std_ranges = parse_params(std, None, 'std', min_constraint=0)
def apply_transform(self, volume: torch.tensor) -> torch.tensor:
arguments = dict()
stds = [self.get_params(self.std_ranges) for _ in volume]
arguments['std'] = stds
transform = Blur(**arguments)
volume = transform(volume)
return volume
@staticmethod
def get_params(std_ranges: TypeSextetFloat) -> TypeTripletFloat:
std = sample_uniform_sextet(std_ranges)
return std
class Blur(Transformable):
r"""Blur an image using a Gaussian filter.
Args:
std: Tuple :math:`(\sigma_1, \sigma_2, \sigma_3)` representing the
the standard deviations (in mm) of the standard deviations
of the Gaussian kernels used to blur the image along each axis.
spacing: the volume spacing of voxels; default is 1.
"""
def __init__(
self,
std: Union[TypeTripletFloat, Dict[str, TypeTripletFloat], int],
spacing: Union[TypeTripletFloat, Dict[str, TypeTripletFloat], int] = 1,
):
self.std = std
self.spacing = spacing
def apply_transform(self, volume: torch.tensor) -> torch.tensor:
std = self.std
spacing = self.spacing
stds = to_tuple(std, length=len(volume))
spacing = to_tuple(spacing, length=len(volume))
transformed_tensors = []
for std, spc, tensor in zip(stds, spacing, volume):
transformed_tensor = self.blur(
tensor,
spc,
std,
)
transformed_tensors.append(transformed_tensor)
volume = torch.stack(transformed_tensors)
return volume
@staticmethod
def blur(
data: torch.tensor,
spacing: Union[int, float],
std_voxel: Union[int, float],
) -> torch.Tensor:
assert data.ndim == 3
std_physical = np.array(std_voxel) / np.array(spacing)
blurred = ndi.gaussian_filter(data, std_physical)
tensor = torch.from_numpy(blurred)
return tensor
class RandomBiasField(Transformable):
r"""Add random MRI bias field artifact.
MRI magnetic field inhomogeneity creates intensity
variations of very low frequency across the whole image.
The bias field is modeled as a linear combination of
polynomial basis functions, as in K. Van Leemput et al., 1999,
*Automated model-based tissue classification of MR images of the brain*.
It was implemented in NiftyNet by Carole Sudre and used in
`Sudre et al., 2017, Longitudinal segmentation of age-related
white matter hyperintensities
<https://www.sciencedirect.com/science/article/pii/S1361841517300257?via%3Dihub>`_.
Args:
coefficients: Maximum magnitude :math:`n` of polynomial coefficients.
If a tuple :math:`(a, b)` is specified, then
:math:`n \sim \mathcal{U}(a, b)`.
order: Order of the basis polynomial functions.
p: Probability that this transform will be applied.
keys: See :class:`~torchio.transforms.Transform`.
"""
def __init__(
self,
coefficients: Union[float, Tuple[float, float]] = 0.5,
order: int = 3,
p: float = 1,
):
super().__init__(p)
self.coefficients_range = parse_range(
coefficients, 'coefficients_range')
self.order = self._parse_order(order)
def apply_transform(self, volume: torch.tensor) -> torch.tensor:
arguments = dict()
coefficients = self.get_params(
self.order,
self.coefficients_range,
)
arguments['coefficients'] = coefficients
arguments['order'] = self.order
transform = BiasField(**arguments)
volume = transform(volume)
return volume
@staticmethod
def get_params(
order: int,
coefficients_range: Tuple[float, float],
) -> List[float]:
# Sampling of the appropriate number of coefficients for the creation
# of the bias field map
random_coefficients = []
for x_order in range(0, order + 1):
for y_order in range(0, order + 1 - x_order):
for _ in range(0, order + 1 - (x_order + y_order)):
number = sample_uniform(*coefficients_range)
random_coefficients.append(number.item())
return random_coefficients
@staticmethod
def _parse_order(order):
if not isinstance(order, int):
raise TypeError(f'Order must be an int, not {type(order)}')
if order < 0:
raise ValueError(f'Order must be a positive int, not {order}')
return order
class BiasField(Transformable):
r"""Add MRI bias field artifact.
Args:
coefficient: Magnitudes of the polynomial coefficients.
order: Order of the basis polynomial functions.
"""
def __init__(
self,
coefficient: Union[List[float], Dict[str, List[float]], int],
order: Union[int, Dict[str, int]],
):
self.order = self._parse_order(order)
self.coefficients_range = parse_range(coefficient, 'coefficients_range')
self.invert_transform = False
def apply_transform(self, volume: torch.tensor, normalize: bool = False) -> torch.tensor:
coefficients_range, order = self.coefficients_range, self.order
coefficients = self.get_coefficients(self.order, coefficients_range)
bias_field = self.generate_bias_field(volume, order, coefficients)
if self.invert_transform:
np.divide(1, bias_field, out=bias_field)
bias_field = torch.from_numpy(bias_field)
if normalize:
bias_field -= bias_field.min()
if bias_field.max() > 0:
bias_field /= bias_field.max()
volume = volume * bias_field
return volume
@staticmethod
def generate_bias_field(
data: TypeData,
order: int,
coefficients: Union[TypeData, List],
) -> np.ndarray:
# Create the bias field map using a linear combination of polynomial
# functions and the coefficients previously sampled
shape = np.array(data.shape[1:]) # first axis is channels
half_shape = shape / 2
ranges = [np.arange(-n, n) for n in half_shape]
bias_field = np.zeros(shape)
x_mesh, y_mesh, z_mesh = np.asarray(np.meshgrid(*ranges))
x_mesh /= x_mesh.max()
y_mesh /= y_mesh.max()
z_mesh /= z_mesh.max()
i = 0
for x_order in range(order + 1):
for y_order in range(order + 1 - x_order):
for z_order in range(order + 1 - (x_order + y_order)):
coefficient = coefficients[i]
new_map = (
coefficient
* x_mesh ** x_order
* y_mesh ** y_order
* z_mesh ** z_order
)
bias_field += np.transpose(new_map, (1, 0, 2)) # why?
i += 1
bias_field = np.exp(bias_field).astype(np.float32)
return bias_field
@staticmethod
def _parse_order(order):
if not isinstance(order, int):
raise TypeError(f'Order must be an int, not {type(order)}')
if order < 0:
raise ValueError(f'Order must be a positive int, not {order}')
return order
@staticmethod
def get_coefficients(
order: int,
coefficients_range: Union[List[float], float],
) -> List[float]:
# if isinstance(coefficient, list):
# return coefficient
# Setting the appropriate number of coefficients for the creation of the bias field map
coefficients = []
for x_order in range(0, order + 1):
for y_order in range(0, order + 1 - x_order):
for _ in range(0, order + 1 - (x_order + y_order)):
number = sample_uniform(*coefficients_range)
coefficients.append(number.item())
return coefficients
class RandomSwap(Randomizeable):
r"""Randomly swap patches within an image.
This is typically used in `context restoration for self-supervised learning
<https://www.sciencedirect.com/science/article/pii/S1361841518304699>`_.
Args:
patch_size: Tuple of integers :math:`(w, h, d)` to swap patches
of size :math:`w \times h \times d`.
If a single number :math:`n` is provided, :math:`w = h = d = n`.
num_iterations: Number of times that two patches will be swapped.
p: Probability that this transform will be applied.
keys: See :class:`~torchio.transforms.Transform`.
"""
def __init__(
self,
patch_size: TypeTuple = 15,
num_iterations: int = 100,
p: float = 1,
):
super().__init__(p)
self.patch_size = np.array(to_tuple(patch_size))
self.num_iterations = self._parse_num_iterations(num_iterations)
@staticmethod
def _parse_num_iterations(num_iterations):
if not isinstance(num_iterations, int):
raise TypeError('num_iterations must be an int,'
f'not {num_iterations}')
if num_iterations < 0:
raise ValueError('num_iterations must be positive,'
f'not {num_iterations}')
return num_iterations
def get_params(
self,
tensor: torch.Tensor,
patch_size: np.ndarray,
num_iterations: int,
) -> List[Tuple[TypeTripletInt, TypeTripletInt]]:
spatial_shape = tensor.shape[-3:]
locations = []
for _ in range(num_iterations):
first_ini, first_fin = self.get_random_indices_from_shape(
spatial_shape,
patch_size,
)
while True:
second_ini, second_fin = self.get_random_indices_from_shape(
spatial_shape,
patch_size,
)
larger_than_initial = np.all(second_ini >= first_ini)
less_than_final = np.all(second_fin <= first_fin)
if larger_than_initial and less_than_final:
continue # patches overlap
else:
break # patches don't overlap
location = tuple(first_ini), tuple(second_ini)
locations.append(location)
return locations
def apply_transform(self, volume: torch.tensor) -> torch.tensor:
arguments = dict()
locations = self.get_params(
volume,
self.patch_size,
self.num_iterations,
)
arguments['locations'] = locations
arguments['patch_size'] = self.patch_size
transform = Swap(**arguments)
volume = transform(volume)
return volume
@staticmethod
def get_random_indices_from_shape(
spatial_shape: TypeTripletInt,
patch_size: TypeTripletInt,
) -> Tuple[np.ndarray, np.ndarray]:
shape_array = np.array(spatial_shape)
patch_size_array = np.array(patch_size)
max_index_ini = shape_array - patch_size_array
if (max_index_ini < 0).any():
message = (
f'Patch size {patch_size} cannot be'
f' larger than image spatial shape {spatial_shape}'
)
raise ValueError(message)
max_index_ini = max_index_ini.astype(np.uint16)
coordinates = []
for max_coordinate in max_index_ini.tolist():
if max_coordinate == 0:
coordinate = 0
else:
coordinate = torch.randint(max_coordinate, size=(1,)).item()
coordinates.append(coordinate)
index_ini = np.array(coordinates, np.uint16)
index_fin = index_ini + patch_size_array
return index_ini, index_fin
class Swap(Transformable):
r"""Swap patches within an image.
This is typically used in `context restoration for self-supervised learning
<https://www.sciencedirect.com/science/article/pii/S1361841518304699>`_.
Args:
patch_size: Tuple of integers :math:`(w, h, d)` to swap patches
of size :math:`w \times h \times d`.
If a single number :math:`n` is provided, :math:`w = h = d = n`.
num_iterations: Number of times that two patches will be swapped.
keys: See :class:`~torchio.transforms.Transform`.
"""
def __init__(
self,
patch_size: Union[TypeTripletInt, Dict[str, TypeTripletInt]],
locations: Union[TypeLocations, Dict[str, TypeLocations]],
):
self.locations = locations
self.patch_size = patch_size
self.invert_transform = False
def apply_transform(self, volume: torch.tensor) -> torch.tensor:
locations, patch_size = self.locations, self.patch_size
if self.invert_transform:
locations.reverse()
volume = self.swap(volume, patch_size, locations)
return volume
def swap(
self,
tensor: torch.Tensor,
patch_size: TypeTuple,
locations: List[Tuple[np.ndarray, np.ndarray]],
) -> torch.tensor:
tensor = tensor.clone()
patch_size = np.array(patch_size)
for first_ini, second_ini in locations:
first_fin = first_ini + patch_size
second_fin = second_ini + patch_size
first_patch = self.crop(tensor, first_ini, first_fin)
second_patch = self.crop(tensor, second_ini, second_fin).clone()
self.insert(tensor, first_patch, second_ini)
self.insert(tensor, second_patch, first_ini)
return tensor
@staticmethod
def insert(tensor: TypeData, patch: TypeData, index_ini: np.ndarray) -> None:
index_fin = index_ini + np.array(patch.shape[-3:])
i_ini, j_ini, k_ini = index_ini
i_fin, j_fin, k_fin = index_fin
tensor[:, i_ini:i_fin, j_ini:j_fin, k_ini:k_fin] = patch
@staticmethod
def crop(
image: Union[np.ndarray, torch.Tensor],
index_ini: np.ndarray,
index_fin: np.ndarray,
) -> Union[np.ndarray, torch.Tensor]:
i_ini, j_ini, k_ini = index_ini
i_fin, j_fin, k_fin = index_fin
return image[:, i_ini:i_fin, j_ini:j_fin, k_ini:k_fin]
if IMPORT_TORCHIO:
class ElasticDeformation(ElasticDeformationTIO):
r"""Apply dense random elastic deformation.
A random displacement is assigned to a coarse grid of control points around
and inside the image. The displacement at each voxel is interpolated from
the coarse grid using cubic B-splines.
The `'Deformable Registration' <https://www.sciencedirect.com/topics/computer-science/deformable-registration>`_
topic on ScienceDirect contains useful articles explaining interpolation of
displacement fields using cubic B-splines.
Args:
num_control_points: Number of control points along each dimension of
the coarse grid :math:`(n_x, n_y, n_z)`.
If a single value :math:`n` is passed,
then :math:`n_x = n_y = n_z = n`.
Smaller numbers generate smoother deformations.
The minimum number of control points is ``4`` as this transform
uses cubic B-splines to interpolate displacement.
max_displacement: Maximum displacement along each dimension at each
control point :math:`(D_x, D_y, D_z)`.
The displacement along dimension :math:`i` at each control point is
:math:`d_i \sim \mathcal{U}(0, D_i)`.
If a single value :math:`D` is passed,
then :math:`D_x = D_y = D_z = D`.
Note that the total maximum displacement would actually be
:math:`D_{max} = \sqrt{D_x^2 + D_y^2 + D_z^2}`.
locked_borders: If ``0``, all displacement vectors are kept.
If ``1``, displacement of control points at the
border of the coarse grid will be set to ``0``.
If ``2``, displacement of control points at the border of the image
(red dots in the image below) will also be set to ``0``.
image_interpolation: See :ref:`Interpolation`.
Note that this is the interpolation used to compute voxel
intensities when resampling using the dense displacement field.
The value of the dense displacement at each voxel is always
interpolated with cubic B-splines from the values at the control
points of the coarse grid.
p: Probability that this transform will be applied.
keys: See :class:`~torchio.transforms.Transform`.
`This gist <https://gist.github.com/fepegar/b723d15de620cd2a3a4dbd71e491b59d>`_
can also be used to better understand the meaning of the parameters.
This is an example from the
`3D Slicer registration FAQ <https://www.slicer.org/wiki/Documentation/4.10/FAQ/Registration#What.27s_the_BSpline_Grid_Size.3F>`_.
.. image:: https://www.slicer.org/w/img_auth.php/6/6f/RegLib_BSplineGridModel.png
:alt: B-spline example from 3D Slicer documentation
To generate a similar grid of control points with TorchIO,
the transform can be instantiated as follows::
>>> from torchio import RandomElasticDeformation
>>> transform = RandomElasticDeformation(
... num_control_points=(7, 7, 7), # or just 7
... locked_borders=2,
... )
Note that control points outside the image bounds are not showed in the
example image (they would also be red as we set :attr:`locked_borders`
to ``2``).
.. warning:: Image folding may occur if the maximum displacement is larger
than half the coarse grid spacing. The grid spacing can be computed
using the image bounds in physical space [#]_ and the number of control
points::
>>> import numpy as np
>>> import torchio as tio
>>> image = tio.datasets.Slicer().MRHead.as_sitk()
>>> image.GetSize() # in voxels
(256, 256, 130)
>>> image.GetSpacing() # in mm
(1.0, 1.0, 1.2999954223632812)
>>> bounds = np.array(image.GetSize()) * np.array(image.GetSpacing())
>>> bounds # mm
array([256. , 256. , 168.99940491])
>>> num_control_points = np.array((7, 7, 6))
>>> grid_spacing = bounds / (num_control_points - 2)
>>> grid_spacing
array([51.2 , 51.2 , 42.24985123])
>>> potential_folding = grid_spacing / 2
>>> potential_folding # mm
array([25.6 , 25.6 , 21.12492561])
Using a :attr:`max_displacement` larger than the computed
:attr:`potential_folding` will raise a :class:`RuntimeWarning`.
.. [#] Technically, :math:`2 \epsilon` should be added to the
image bounds, where :math:`\epsilon = 2^{-3}` `according to ITK
source code <https://github.com/InsightSoftwareConsortium/ITK/blob/633f84548311600845d54ab2463d3412194690a8/Modules/Core/Transform/include/itkBSplineTransformInitializer.hxx#L116-L138>`_.
"""
def __init__(
self,
num_control_points,
max_displacement: TypeTripletFloat,
image_interpolation: str = 'linear',
keys: Optional[Sequence[str]] = None,
):
super().__init__(
num_control_points=num_control_points,
max_displacement=max_displacement,
image_interpolation=image_interpolation,
keys=keys
)
def __call__(self, data, **kwargs):
data = Subject(data=ScalarImage(tensor=data))
return super(ElasticDeformation, self).__call__(data, **kwargs).get_first_image().data
class Motion(MotionTIO):
r"""Add random MRI motion artifact.
Magnetic resonance images suffer from motion artifacts when the subject
moves during image acquisition. This transform follows
`Shaw et al., 2019 <http://proceedings.mlr.press/v102/shaw19a.html>`_ to
simulate motion artifacts for data augmentation.
Args:
degrees: Tuple :math:`(a, b)` defining the rotation range in degrees of
the simulated movements. The rotation angles around each axis are
:math:`(\theta_1, \theta_2, \theta_3)`,
where :math:`\theta_i \sim \mathcal{U}(a, b)`.
If only one value :math:`d` is provided,
:math:`\theta_i \sim \mathcal{U}(-d, d)`.
Larger values generate more distorted images.
translation: Tuple :math:`(a, b)` defining the translation in mm of
the simulated movements. The translations along each axis are
:math:`(t_1, t_2, t_3)`,
where :math:`t_i \sim \mathcal{U}(a, b)`.
If only one value :math:`t` is provided,
:math:`t_i \sim \mathcal{U}(-t, t)`.
Larger values generate more distorted images.
num_transforms: Number of simulated movements.
Larger values generate more distorted images.
image_interpolation: See :ref:`Interpolation`.
p: Probability that this transform will be applied.
keys: See :class:`~torchio.transforms.Transform`.
.. warning:: Large numbers of movements lead to longer execution times for
3D images.
"""
def __init__(
self,
degrees: Union[TypeTripletFloat, Dict[str, TypeTripletFloat]],
translation: Union[TypeTripletFloat, Dict[str, TypeTripletFloat]],
num_transforms: Union[Sequence[float], Dict[str, Sequence[float]]],
image_interpolation: Union[Sequence[str], Dict[str, Sequence[str]]],
keys: Optional[Sequence[str]] = None,
):
super().__init__(
degrees=degrees,
translation=translation,
num_transforms=num_transforms,
image_interpolation=image_interpolation,
keys=keys
)
def __call__(self, data, **kwargs):
data = Subject(data=ScalarImage(tensor=data))
return super(Motion, self).__call__(data, **kwargs).get_first_image().data
else:
class ElasticDeformation:
def __init__(self):
raise NotImplementedError('if you want to call this, set the flag to true')
def __call__(self, *args, **kwargs):
pass
class Motion:
def __init__(self):
raise NotImplementedError('if you want to call this, set the flag to true')
def __call__(self, *args, **kwargs):
pass
|
#!/usr/bin/env python
#Copyright (c) 2013, Eduard Broecker
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the aframeve copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the aframeve copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
#WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
#PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
#DAMAGE.
#
# this script exports xls-files from a canmatrix-object
# xls-files are the can-matrix-definitions displayed in Excel
from __future__ import division
from __future__ import absolute_import
from builtins import *
import math
try:
import xlsxwriter
except ImportError:
xlsxwriter = None
import sys
from .canmatrix import *
import os.path
#Font Size : 8pt * 20 = 160
#font = 'font: name Arial Narrow, height 160'
font = 'font: name Verdana, height 160'
sty_header = 0
sty_norm = 0
sty_first_frame = 0
sty_white = 0
sty_green = 0
sty_green_first_frame = 0
sty_sender = 0
sty_sender_first_frame = 0
sty_sender_green = 0
sty_sender_green_first_frame = 0
def writeFramex(frame, worksheet, row, mystyle):
#frame-id
worksheet.write(row, 0, "%3Xh" % frame._Id, mystyle)
#frame-Name
worksheet.write(row, 1, frame._name, mystyle)
#determin cycle-time
if "GenMsgCycleTime" in frame._attributes:
worksheet.write(row, 2, int(frame._attributes["GenMsgCycleTime"]), mystyle)
else:
worksheet.write(row, 2, "", mystyle)
#determin send-type
if "GenMsgSendType" in frame._attributes:
if frame._attributes["GenMsgSendType"] == "5":
worksheet.write(row, 3, "Cyclic+Change", mystyle )
if "GenMsgDelayTime" in frame._attributes:
worksheet.write(row, 4, int(frame._attributes["GenMsgDelayTime"]), mystyle )
else:
worksheet.write(row, 4, "", mystyle)
elif frame._attributes["GenMsgSendType"] == "0":
worksheet.write(row, 3, "Cyclic", mystyle )
worksheet.write(row, 4, "" , mystyle)
elif frame._attributes["GenMsgSendType"] == "2":
worksheet.write(row, 3, "BAF", mystyle)
if "GenMsgNrOfRepetitions" in frame._attributes:
worksheet.write(row, 4, int(frame._attributes["GenMsgNrOfRepetitions"]) , mystyle)
else:
worksheet.write(row, 4, "", mystyle)
elif frame._attributes["GenMsgSendType"] == "8":
worksheet.write(row, 3, "DualCycle", mystyle )
if "GenMsgCycleTimeActive" in frame._attributes:
worksheet.write(row, 4, int(frame._attributes["GenMsgCycleTimeActive"]), mystyle )
else:
worksheet.write(row, 4, "", mystyle)
elif frame._attributes["GenMsgSendType"] == "10":
worksheet.write(row, 3, "None", mystyle)
if "GenMsgDelayTime" in frame._attributes:
worksheet.write(row, 4, int(frame._attributes["GenMsgDelayTime"]), mystyle)
else:
worksheet.write(row, 4, "", mystyle)
elif frame._attributes["GenMsgSendType"] == "9":
worksheet.write(row, 3, "OnChange" , mystyle)
if "GenMsgNrOfRepetitions" in frame._attributes:
worksheet.write(row, 4, int(frame._attributes["GenMsgNrOfRepetitions"]) , mystyle)
else:
worksheet.write(row, 4, "", mystyle)
elif frame._attributes["GenMsgSendType"] == "1":
worksheet.write(row, 3, "Spontaneous" , mystyle)
if "GenMsgDelayTime" in frame._attributes:
worksheet.write(row, 4, int(frame._attributes["GenMsgDelayTime"]) , mystyle)
else:
worksheet.write(row, 4, "", mystyle)
else:
worksheet.write(row, 3, "", mystyle)
worksheet.write(row, 4, "", mystyle)
else:
worksheet.write(row, 3, "", mystyle)
worksheet.write(row, 4, "", mystyle)
def writeSignalx(db, sig, worksheet, row, rearCol, mystyle, motorolaBitFormat):
if motorolaBitFormat == "msb":
startBit = sig.getStartbit(bitNumbering = 1)
elif motorolaBitFormat == "msbreverse":
startBit = sig.getStartbit()
else: # motorolaBitFormat == "lsb"
startBit = sig.getStartbit(bitNumbering = 1, startLittle = True)
#startbyte
worksheet.write(row, 5, math.floor(startBit/8)+1, mystyle)
#startbit
worksheet.write(row, 6, (startBit)%8, mystyle)
#signalname
worksheet.write(row, 7, sig._name, mystyle)
# eval comment:
if sig._comment is None:
comment = ""
else:
comment = sig._comment
# eval multiplex-info
if sig._multiplex == 'Multiplexor':
comment = "Mode Signal: " + comment
elif sig._multiplex is not None:
comment = "Mode " + str(sig._multiplex) + ":" + comment
#write comment and size of signal in sheet
worksheet.write(row, 8, comment, mystyle)
worksheet.write(row, 9, sig._signalsize, mystyle)
#startvalue of signal available
if "GenSigStartValue" in sig._attributes:
if db._signalDefines["GenSigStartValue"]._definition == "STRING":
worksheet.write(row, 10, sig._attributes["GenSigStartValue"], mystyle)
elif db._signalDefines["GenSigStartValue"]._definition == "INT" or db._signalDefines["GenSigStartValue"]._definition == "HEX":
worksheet.write(row, 10, "%Xh" % int(sig._attributes["GenSigStartValue"]), mystyle)
else:
worksheet.write(row, 10, " ", mystyle)
#SNA-value of signal available
if "GenSigSNA" in sig._attributes:
sna = sig._attributes["GenSigSNA"][1:-1]
worksheet.write(row, 11, sna, mystyle)
#no SNA-value of signal available / just for correct style:
else:
worksheet.write(row, 11, " ", mystyle)
# eval byteorder (intel == True / motorola == False)
if sig._is_little_endian:
worksheet.write(row, 12, "i", mystyle)
else:
worksheet.write(row, 12, "m", mystyle)
# is a unit defined for signal?
if sig._unit.strip().__len__() > 0:
# factor not 1.0 ?
if float(sig._factor) != 1:
worksheet.write(row, rearCol+2, "%g" % float(sig._factor) + " " + sig._unit, mystyle)
#factor == 1.0
else:
worksheet.write(row, rearCol+2, sig._unit, mystyle)
# no unit defined
else:
# factor not 1.0 ?
if float(sig._factor) != 1:
worksheet.write(row, rearCol+2, "%g" % float(sig._factor), mystyle)
#factor == 1.0
else:
worksheet.write(row, rearCol+2, "", mystyle)
def writeValuex(label, value, worksheet, row, rearCol, mystyle):
# write value and lable in sheet
worksheet.write(row, rearCol, label, mystyle)
worksheet.write(row, rearCol+1, value, mystyle)
def writeBuMatrixx(buList, sig, frame, worksheet, row, col, firstframe):
# first-frame - style with borders:
if firstframe == sty_first_frame:
norm = sty_first_frame
sender = sty_sender_first_frame
norm_green = sty_green_first_frame
sender_green = sty_sender_green_first_frame
# consecutive-frame - style without borders:
else:
norm = sty_norm
sender = sty_sender
norm_green = sty_green
sender_green = sty_sender_green
#iterate over boardunits:
for bu in buList:
#every second Boardunit with other style
if col % 2 == 0:
locStyle = norm
locStyleSender = sender
#every second Boardunit with other style
else:
locStyle = norm_green
locStyleSender = sender_green
# write "s" "r" "r/s" if signal is sent, recieved or send and recived by boardunit
if bu in sig._receiver and bu in frame._Transmitter:
worksheet.write(row, col, "r/s", locStyleSender)
elif bu in sig._receiver:
worksheet.write(row, col, "r", locStyle)
elif bu in frame._Transmitter:
worksheet.write(row, col, "s", locStyleSender)
else:
worksheet.write(row, col, "", locStyle)
col += 1
# loop over boardunits ends here
return col
def exportXlsx(db, filename, **options):
if xlsxwriter is None:
raise ImportError("no xlsx-export-support, some dependencies missing... try pip install xlsxwriter")
if 'xlsMotorolaBitFormat' in options:
motorolaBitFormat = options["xlsMotorolaBitFormat"]
else:
motorolaBitFormat = "msbreverse"
head_top = ['ID', 'Frame Name', 'Cycle Time [ms]', 'Launch Type', 'Launch Parameter', 'Signal Byte No.', 'Signal Bit No.', 'Signal Name', 'Signal Function', 'Signal Length [Bit]', 'Signal Default', ' Signal Not Available', 'Byteorder']
head_tail = ['Value', 'Name / Phys. Range', 'Function / Increment Unit']
workbook = xlsxwriter.Workbook(filename)
wsname = os.path.basename(filename).replace('.xlsx','')
worksheet = workbook.add_worksheet('K-Matrix ' + wsname[0:22])
col = 0
global sty_header
sty_header = workbook.add_format({'bold': True, 'rotation': 90, 'font_name' : 'Verdana', 'font_size' : 8, 'align' : 'center', 'valign' : 'center'})
global sty_first_frame
sty_first_frame = workbook.add_format({'font_name' : 'Verdana', 'font_size' : 8,
'font_color' : 'black', 'top' : 1})
global sty_white
sty_white = workbook.add_format({'font_name' : 'Verdana', 'font_size' : 8,
'font_color' : 'white'})
global sty_norm
sty_norm = workbook.add_format({'font_name' : 'Verdana', 'font_size' : 8,
'font_color' : 'black'})
# BUMatrix-Styles
global sty_green
sty_green = workbook.add_format({'pattern': 1, 'fg_color': '#CCFFCC' })
global sty_green_first_frame
sty_green_first_frame = workbook.add_format({'pattern': 1, 'fg_color': '#CCFFCC', 'top':1 })
global sty_sender
sty_sender = workbook.add_format({'pattern': 0x04, 'fg_color': '#C0C0C0'})
global sty_sender_first_frame
sty_sender_first_frame = workbook.add_format({'pattern': 0x04, 'fg_color': '#C0C0C0', 'top' : 1})
global sty_sender_green
sty_sender_green = workbook.add_format({'pattern': 0x04, 'fg_color': '#C0C0C0', 'bg_color': '#CCFFCC'})
global sty_sender_green_first_frame
sty_sender_green_first_frame = workbook.add_format({'pattern': 0x04, 'fg_color': '#C0C0C0', 'bg_color': '#CCFFCC','top': 1})
# write first row (header) cols before frameardunits:
for head in head_top:
worksheet.write(0, col, head, sty_header)
worksheet.set_column(col, col, 3.57)
col += 1
# write frameardunits in first row:
buList = []
for bu in db._BUs._list:
worksheet.write(0, col, bu._name, sty_header)
worksheet.set_column(col, col, 3.57)
buList.append(bu._name)
col += 1
head_start = col
# write first row (header) cols after frameardunits:
for head in head_tail:
worksheet.write(0, col, head, sty_header)
worksheet.set_column(col, col, 6)
col += 1
# set width of selected Cols
worksheet.set_column(0,0, 3.57)
worksheet.set_column(1,1, 21)
worksheet.set_column(3,3, 12.29)
worksheet.set_column(7,7, 21)
worksheet.set_column(8,8, 30)
worksheet.set_column(head_start+1,head_start+1, 21)
worksheet.set_column(head_start+2,head_start+2, 12)
frameHash = {}
for frame in db._fl._list:
frameHash[int(frame._Id)] = frame
#set row to first Frame (row = 0 is header)
row = 1
# iterate over the frames
for idx in sorted(frameHash.keys()):
frame = frameHash[idx]
framestyle = sty_first_frame
#sort signals:
sigHash ={}
for sig in frame._signals:
sigHash["%02d" % int(sig.getStartbit()) + sig._name] = sig
#set style for first line with border
sigstyle = sty_first_frame
#iterate over signals
for sig_idx in sorted(sigHash.keys()):
sig = sigHash[sig_idx]
# if not first Signal in Frame, set style
if sigstyle != sty_first_frame:
sigstyle = sty_norm
# valuetable available?
if sig._values.__len__() > 0:
valstyle = sigstyle
# iterate over values in valuetable
for val in sorted(sig._values.keys()):
writeFramex(frame, worksheet, row, framestyle)
if framestyle != sty_first_frame:
worksheet.set_row(row, None, None, {'level': 1})
col = head_top.__len__()
col = writeBuMatrixx(buList, sig, frame, worksheet, row, col, framestyle)
# write Value
writeValuex(val,sig._values[val], worksheet, row, col, valstyle)
writeSignalx(db, sig, worksheet, row, col, sigstyle, motorolaBitFormat)
# no min/max here, because min/max has same col as values...
#next row
row +=1
# set style to normal - without border
sigstyle = sty_white
framestyle = sty_white
valstyle = sty_norm
#loop over values ends here
# no valuetable available
else:
writeFramex(frame, worksheet, row, framestyle)
if framestyle != sty_first_frame:
worksheet.set_row(row, None, None, {'level': 1})
col = head_top.__len__()
col = writeBuMatrixx(buList, sig, frame, worksheet, row, col, framestyle)
writeSignalx(db, sig, worksheet, row, col, sigstyle, motorolaBitFormat)
if float(sig._min) != 0 or float(sig._max) != 1.0:
worksheet.write(row, col+1, str("%g..%g" %(sig._min, sig._max)), sigstyle)
else:
worksheet.write(row, col+1, "", sigstyle)
# just for border
worksheet.write(row, col, "", sigstyle)
#next row
row +=1
# set style to normal - without border
sigstyle = sty_white
framestyle = sty_white
# reset signal-Array
signals = []
#loop over signals ends here
# loop over frames ends here
worksheet.autofilter(0,0,row,len(head_top)+len(head_tail)+len(db._BUs._list))
worksheet.freeze_panes(1,0)
# save file
workbook.close()
|
# coding: utf-8
# # Simple Character-level Language Model using LSTM
# 2017-04-21 jkang
# Python3.5
# TensorFlow1.0.1
#
# - <p style="color:red">Different window sizes were applied</p> e.g. n_window = 3 (three-character window)
# - input: 'hello_world_good_morning_see_you_hello_grea'
# - output: 'ello_world_good_morning_see_you_hello_great'
#
# ### Reference:
# - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
# - https://github.com/aymericdamien/TensorFlow-Examples
# - https://hunkim.github.io/ml/
#
# ### Comment:
# - 단어 단위가 아닌 문자 단위로 훈련함
# - 하나의 example만 훈련에 사용함
# : 하나의 example을 windowing하여 여러 샘플을 만들어 냄 (새로운 샘플의 크기는 window_size)
# - Cell의 종류는 BasicLSTMCell을 사용함 (첫번째 Reference 참조)
# - dynamic_rnn방식 사용 (기존 tf.nn.rnn보다 더 시간-계산 효율적이라고 함)
# - AdamOptimizer를 사용
# In[1]:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random
# Input/Ouput data
char_raw = 'hello_world_good_morning_see_you_hello_great'
char_list = sorted(list(set(char_raw)))
char_to_idx = {c: i for i, c in enumerate(char_list)}
idx_to_char = {i: c for i, c in enumerate(char_list)}
char_data = [char_to_idx[c] for c in char_raw]
char_data_one_hot = tf.one_hot(char_data, depth=len(
char_list), on_value=1., off_value=0., axis=1, dtype=tf.float32)
char_input = char_data_one_hot[:-1, :] # 'hello_world_good_morning_see_you_hello_grea'
char_output = char_data_one_hot[1:, :] # 'ello_world_good_morning_see_you_hello_great'
with tf.Session() as sess:
char_input = char_input.eval()
char_output = char_output.eval()
# In[2]:
# Learning parameters
learning_rate = 0.001
max_iter = 1000
# Network Parameters
n_input_dim = char_input.shape[1]
n_input_len = char_input.shape[0]
n_output_dim = char_output.shape[1]
n_output_len = char_output.shape[0]
n_hidden = 100
n_window = 2 # number of characters in one window (like a mini-batch)
# TensorFlow graph
# (batch_size) x (time_step) x (input_dimension)
x_data = tf.placeholder(tf.float32, [None, None, n_input_dim])
# (batch_size) x (time_step) x (output_dimension)
y_data = tf.placeholder(tf.float32, [None, None, n_output_dim])
# Parameters
weights = {
'out': tf.Variable(tf.truncated_normal([n_hidden, n_output_dim]))
}
biases = {
'out': tf.Variable(tf.truncated_normal([n_output_dim]))
}
# In[3]:
def make_window_batch(x, y, window_size):
'''
This function will generate samples based on window_size from (x, y)
Although (x, y) is one example, it will create multiple examples with the length of window_size
x: (time_step) x (input_dim)
y: (time_step) x (output_dim)
x_out: (total_batch) x (batch_size) x (window_size) x (input_dim)
y_out: (total_batch) x (batch_size) x (window_size) x (output_dim)
total_batch x batch_size <= examples
'''
# (batch_size) x (window_size) x (dim)
# n_examples is calculated by sliding one character with window_size
n_examples = x.shape[0] - window_size + 1 # n_examples = batch_size
x_batch = np.empty((n_examples, window_size, x.shape[1]))
y_batch = np.empty((n_examples, window_size, y.shape[1]))
for i in range(n_examples):
x_batch[i, :, :] = x[i:i + window_size, :]
y_batch[i, :, :] = y[i:i + window_size, :]
z = list(zip(x_batch, y_batch))
random.shuffle(z)
x_batch, y_batch = zip(*z)
x_batch = np.array(x_batch)
y_batch = np.array(y_batch)
# (total_batch) x (batch_size) x (window_size) x (dim)
# total_batch is set to 1 (no mini-batch)
x_new = x_batch.reshape((n_examples, window_size, x_batch.shape[2]))
y_new = y_batch.reshape((n_examples, window_size, y_batch.shape[2]))
return x_new, y_new, n_examples
# In[4]:
def RNN(x, weights, biases):
cell = tf.contrib.rnn.BasicLSTMCell(n_hidden) # Make LSTMCell
outputs, states = tf.nn.dynamic_rnn(cell, x, time_major=False, dtype=tf.float32)
'''
**Notes on tf.nn.dynamic_rnn**
- 'x' can have shape (batch)x(time)x(input_dim), if time_major=False or
(time)x(batch)x(input_dim), if time_major=True
- 'outputs' can have the same shape as 'x'
(batch)x(time)x(input_dim), if time_major=False or
(time)x(batch)x(input_dim), if time_major=True
- 'states' is the final state, determined by batch and hidden_dim
'''
# outputs[-1] is outputs for the last example in the mini-batch
return tf.matmul(outputs[-1], weights['out']) + biases['out']
def softmax(x):
rowmax = np.max(x, axis=1)
x -= rowmax.reshape((x.shape[0] ,1)) # for numerical stability
x = np.exp(x)
sum_x = np.sum(x, axis=1).reshape((x.shape[0],1))
return x / sum_x
pred = RNN(x_data, weights, biases)
cost = tf.reduce_mean(tf.squared_difference(pred, y_data))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# In[5]:
# Learning
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(max_iter):
loss = 0
x_batch, y_batch, n_examples = make_window_batch(char_input, char_output, n_window)
for ibatch in range(x_batch.shape[0]):
x_train = x_batch[ibatch, :, :].reshape((1,-1,n_input_dim))
y_train = y_batch[ibatch, :, :].reshape((1,-1,n_output_dim))
x_test = char_input.reshape((1, n_input_len, n_input_dim))
y_test = char_output.reshape((1, n_input_len, n_input_dim))
c, _ = sess.run([cost, optimizer], feed_dict={
x_data: x_train, y_data: y_train})
p = sess.run(pred, feed_dict={x_data: x_test, y_data: y_test})
loss += c
mean_mse = loss / n_examples
if i == max_iter-1:
pred_act = softmax(p)
if (i+1) % 100 == 0:
pred_out = np.argmax(p, axis=1)
accuracy = np.sum(char_data[1:] == pred_out)/n_output_len*100
print('Epoch:{:>4}/{},'.format(i+1,max_iter),
'Cost:{:.4f},'.format(mean_mse),
'Acc:{:>.1f},'.format(accuracy),
'Predict:', ''.join([idx_to_char[i] for i in pred_out]))
# In[6]:
# Probability plot
fig, ax = plt.subplots()
fig.set_size_inches(15,20)
plt.title('Input Sequence', y=1.08, fontsize=20)
plt.xlabel('Probability of Next Character(y) Given Current One(x)'+
'\n[window_size={}, accuracy={:.1f}]'.format(n_window, accuracy),
fontsize=20, y=1.5)
plt.ylabel('Character List', fontsize=20)
plot = plt.imshow(pred_act.T, cmap=plt.get_cmap('plasma'))
fig.colorbar(plot, fraction=0.015, pad=0.04)
plt.xticks(np.arange(len(char_data)-1), list(char_raw)[:-1], fontsize=15)
plt.yticks(np.arange(len(char_list)), [idx_to_char[i] for i in range(len(char_list))], fontsize=15)
ax.xaxis.tick_top()
# Annotate
for i, idx in zip(range(len(pred_out)), pred_out):
annotation = idx_to_char[idx]
ax.annotate(annotation, xy=(i-0.2, idx+0.2), fontsize=12)
plt.show()
# f.savefig('result_' + idx + '.png')
|
import pika
import json
import sys
connection = pika.BlockingConnection(pika.URLParameters("amqp://hi:hi@pi-point:5672"))
channel = connection.channel()
channel.exchange_declare(exchange='logs', exchange_type='direct')
ins = json.load(sys.stdin)
channel.basic_publish(exchange = 'logs', routing_key='pi-point', body = json.dumps(ins))
print("[x] Sent %r" % ins)
connection.close()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
* UMSE Antivirus Agent Example
* Author: David Alvarez Perez <dalvarezperez87[at]gmail[dot]com>
* Module: UMSE Intelligence Server
* Description: This module launch the "UMSE Intelligence Server".
*
* Copyright (c) 2019-2020. The UMSE Authors. All Rights Reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
"""
from kaitaistruct import KaitaiStream
#import umse
import os
import cherrypy
import base64
import json
import sqlitequeries
import umsedecryptiontools
from io import BytesIO
from datetime import datetime
def check_auth():
'''
Takes username and password parameters from POST request, perfomns authentication and returns user data
'''
needs_auth = cherrypy.request.config.get('auth.require', False)
username = None
password = None
try:
username = cherrypy.request.params['username']
password = cherrypy.request.params['password']
cherrypy.request.params.pop('password', None)
except:
pass
auth_result = json.loads(sqlitequeries.check_auth(username, password))
if(needs_auth and auth_result.get('status') != 'ok'):
raise cherrypy.HTTPError(401)
else:
return True
def needsauth():
'''
A decorator that sets auth.require config variable.
'''
def decorate(f):
if not hasattr(f, '_cp_config'):
f._cp_config = dict()
if 'auth.require' not in f._cp_config:
f._cp_config['auth.require'] = []
f._cp_config['auth.require'] = True
return f
return decorate
class App:
_cp_config = {'tools.auth.on': True,}
@cherrypy.expose
def upload(self, ufile, original_filename):
'''
This function allows to upload an UMSE file to database
'''
#original_filename = ufile.filename
file_content = b''
sample_size = 0
while True:
data = ufile.file.read(8192)
if not data:
break
file_content = b''.join([file_content, data])
sample_size += len(data)
file_hash = ufile.filename.split('.')[0]
return sqlitequeries.upload_sample(file_hash, original_filename, sample_size, file_content)
@cherrypy.expose
def intelligence(self, sample_hash):
'''
This function retrieves information of an UMSE sample given its sample hash
'''
return sqlitequeries.get_sample_by_hash(sample_hash)
@cherrypy.expose
def index(self):
'''
This function shows the "index.html" file.
Index file consists in a paginated table of samples and a search inputbox.
'''
result = ""
with open("tpl\\samples.tpl") as samples_tpl:
return samples_tpl.read().replace("**SAMPLES_JSON_HERE**", sqlitequeries.get_all_samples())
@cherrypy.expose
def get_all_samples(self):
'''
This function retrieves information of all UMSE samples available in database
'''
return sqlitequeries.get_all_samples()
@cherrypy.expose
def download_sample(self, id):
'''
This function allows to download an UMSE file sample given its sample identifier
'''
samples = json.loads(sqlitequeries.get_samples_by_id_range(str(int(id)-1), str(int(id)+1)))
for sample in samples:
cherrypy.response.headers['Content-Type'] = 'application/octet-stream'
cherrypy.response.headers['Content-Disposition'] = 'attachment; filename={0}.umse'.format(samples[sample].get('hash_sha256'))
return base64.b64decode(samples[sample].get('file_content'))
@cherrypy.expose
@needsauth()
def decrypt_sample(self, sample_hash, entry, username):
'''
This function allows to decrypt an UMSE entry from a given sample (identified by its sample hash)
'''
user_data_json = json.loads(sqlitequeries.get_user_data_from_username(username))
user_id = user_data_json['id']
access_level = user_data_json['access_level']
sqlitequeries.log_user_activity(user_id, "{0} User '{1}'' requested to decrypt entry '{2}'' of sample '{3}'".format(str(datetime.now()), username, entry, sample_hash))
sample = json.loads(sqlitequeries.get_sample_by_hash(sample_hash))
file_content = base64.b64decode(sample.get('file_content'))
original_filename = sample.get('original_filename')
'''stream = KaitaiStream(BytesIO(file_content))
sample = umse.Umse(stream)'''
sample = file_content
decrypted_entry = None
try:
decrypted_entry = umsedecryptiontools.decrypt_entry(int(entry), sample, int(access_level))
except umsedecryptiontools.GeneralError:
raise cherrypy.HTTPError(500, "General error")
sqlitequeries.log_user_activity(user_id, "{0} User '{1}'' attempted to decrypt entry '{2}'' of sample '{3}' but a general error was raised.".format(datetime.now(), username, entry, sample_hash))
except umsedecryptiontools.InvalidAuthenticationHeader:
sqlitequeries.log_user_activity(user_id, "{0} User '{1}'' attempted to decrypt entry '{2}'' of sample '{3}' but this UMSE file was TAMPERED!!!".format(datetime.now(), username, entry, sample_hash))
raise cherrypy.HTTPError(500, "Invalid UMSE Authentication Header detected")
except umsedecryptiontools.InsufficientCredentials:
sqlitequeries.log_user_activity(user_id, "{0} User '{1}'' attempted to decrypt entry '{2}'' of sample '{3}' but operation was DISALLOWED!!!".format(datetime.now(), username, entry, sample_hash))
raise cherrypy.HTTPError(401, "User unauthorized to decrypt this UMSE entry")
cherrypy.response.headers['Content-Type'] = 'application/octet-stream'
cherrypy.response.headers['Content-Disposition'] = 'attachment; filename={0}.entry{1}'.format(original_filename, entry)
if(decrypted_entry is not None):
sqlitequeries.log_user_activity(user_id, "{0} User '{1}'' successfully decrypted entry '{2}'' of sample '{3}'".format(datetime.now(), username, entry, sample_hash))
return base64.b64encode(decrypted_entry)
@cherrypy.expose
@needsauth()
def check_credentials(self, username):
'''
This function allows to check if user credentials are valid or not.
'''
return "ok"
if __name__ == '__main__':
sqlitequeries.create_schema_tables()
config = {
'/': {
'tools.staticdir.root': os.path.abspath( os.getcwd() ),
'tools.staticfile.root': os.path.abspath( os.getcwd() )
},
'global' : {
'server.socket_host' : '127.0.0.1',
'server.socket_port' : 8080
},
"/favicon.ico": {
"tools.staticfile.on": True,
"tools.staticfile.filename": "./favicon.ico"
},
}
cherrypy.tools.auth = cherrypy.Tool('before_handler', check_auth, priority=50)
cherrypy.quickstart(App(), '/', config)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.