text
stringlengths 8
6.05M
|
|---|
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, DateTime, UnicodeText, Sequence
from sqlalchemy.orm import relationship, backref
from datetime import datetime
from app.db.database import Base
class Project(Base):
__tablename__ = "projects"
id = Column(Integer, Sequence('projects_id_seq'), primary_key=True, index=True)
name = Column(String(50), index=True)
items = relationship("Item", back_populates="project")
|
from node import Operator_node, Float_node, Variable_node, Print_node, If_node, Endif_node, While_node, Endwhile_node, Node
from token_types import Token_types
from operations import Operations
from token import Token
from typing import List
def parse(tokens: List[List[Token]]) -> List[List[Node]]:
'''
This function is used to parse the given tokens into a 2d list containing nodes.
This generates a AST.
The tree is generated in a specific order with specific operators being handled first.
This is done in order to adhere to mathematical priority rules.
'''
lst = list(map(lambda row: get_nodes(row), tokens))
lst = list(map(lambda row: nodes_to_tree(row, [Operations.TIMES, Operations.DIVIDE]), lst))
lst = list(map(lambda row: nodes_to_tree(row, [Operations.PLUS, Operations.MINUS]), lst))
lst = list(map(lambda row: nodes_to_tree(row, [Operations.EQUALS, Operations.GREATER, Operations.LESSER]), lst))
lst = list(map(lambda row: nodes_to_tree(row, [Operations.IS, Operations.PRINT, Operations.IF, Operations.WHILE]), lst))
lst = pair_if_and_while(lst)
return lst
def parse_print(f: callable):
'''
This is a decorator for the parse function to print the parse output.
'''
def inner(text: List[List[str]]):
print("Parsing started\n")
nodes = f(text)
print("Parse Output:")
list(map(print, nodes))
print()
return nodes
return inner
def pair_if_and_while(nodes: List[List[Node]], row: int = 0) -> List[List[Node]]:
'''
This function is used to pair all the if-endif while-endwhile nodes.
Both nodes in the pair receive a line number that points to the other node's location.
'''
if row == len(nodes):
return nodes
if type(nodes[row][0]) == Endif_node or type(nodes[row][0]) == Endwhile_node:
if nodes[row][0].row_number == None:
return pair_if_and_while(find_pair(nodes, row, row), row)
return pair_if_and_while(nodes, row+1)
def find_pair(nodes: List[List[Node]], row: int, location: int) -> List[List[Node]]:
'''
This function looks for the matching pair to a endif or endwhile node.
Both nodes in the pair receive a line number that points to the other node's location.
'''
if row < 0:
return nodes
if (type(nodes[row][0]) == If_node and type(nodes[location][0]) == Endif_node) or (type(nodes[row][0]) == While_node and type(nodes[location][0]) == Endwhile_node):
if nodes[row][0].row_number == None:
new_nodes = nodes.copy()
new_nodes[row][0].row_number = location
new_nodes[location][0].row_number = row
return new_nodes
return find_pair(nodes, row-1, location)
def nodes_to_tree(nodes: List[Node], operations: List[Operations]) -> List[Node]:
'''
This function transforms the given nodes into a AST.
This is done by looking for operators and moving the adjacent nodes into the operator node.
'''
if len(nodes) < 2:
return nodes
x, y, *tail = nodes
if type(x) == Print_node and Operations.PRINT in operations:
return nodes_to_tree([Print_node(y)] + tail, operations)
elif type(x) == If_node and Operations.IF in operations:
return nodes_to_tree([If_node(y)] + tail, operations)
elif type(x) == While_node and Operations.WHILE in operations:
return nodes_to_tree([While_node(y)] + tail, operations)
if len(nodes) < 3:
return nodes
z, *tail = tail
if type(y) == Operator_node:
if y.operation in operations:
return nodes_to_tree([Operator_node(y.operation, x, z)] + tail, operations)
return [x] + nodes_to_tree(([y] + [z] + tail), operations)
def get_nodes(tokens: List[Token]) -> List[Node]:
'''
This function returns a list of nodes based on the given tokens.
'''
if len(tokens) == 0:
return []
head, *tail = tokens
if head.type == Token_types.FLOAT:
return [Float_node(head.value)] + get_nodes(tail)
elif head.type == Token_types.PLUS:
return [Operator_node(Operations.PLUS, Node, Node)] + get_nodes(tail)
elif head.type == Token_types.MINUS:
return [Operator_node(Operations.MINUS, Node, Node)] + get_nodes(tail)
elif head.type == Token_types.DIVIDE:
return [Operator_node(Operations.DIVIDE, Node, Node)] + get_nodes(tail)
elif head.type == Token_types.TIMES:
return [Operator_node(Operations.TIMES, Node, Node)] + get_nodes(tail)
elif head.type == Token_types.IS:
return [Operator_node(Operations.IS, Node, Node)] + get_nodes(tail)
elif head.type == Token_types.VARIABLE:
return [Variable_node(head.value)] + get_nodes(tail)
elif head.type == Token_types.EQUALS:
return [Operator_node(Operations.EQUALS, Node, Node)] + get_nodes(tail)
elif head.type == Token_types.GREATER:
return [Operator_node(Operations.GREATER, Node, Node)] + get_nodes(tail)
elif head.type == Token_types.LESSER:
return [Operator_node(Operations.LESSER, Node, Node)] + get_nodes(tail)
elif head.type == Token_types.PRINT:
return [Print_node(Node)] + get_nodes(tail)
elif head.type == Token_types.IF:
return [If_node(Node)] + get_nodes(tail)
elif head.type == Token_types.ENDIF:
return [Endif_node()] + get_nodes(tail)
elif head.type == Token_types.WHILE:
return [While_node(Node)] + get_nodes(tail)
elif head.type == Token_types.ENDWHILE:
return [Endwhile_node()] + get_nodes(tail)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
from . import goods_blue
@goods_blue.route("/index")
def user_info():
return "goods_blue"
|
from sklearn.cluster import KMeans
import numpy as np
import pandas as pd
class AlgoritmoDeKMeans:
def __init__(self, dataset):
self.model = KMeans(n_clusters=14) # escolhido 14 devido a análise dos valores de inertia em 30 grupos diferentes
self.model.fit(dataset)
# insere coluna grupo no dataset
def insere_coluna_de_grupo(self, dataset):
grupo = pd.Series(self.model.labels_)
dataset["grupo"] = grupo.values
return dataset
# retorna os centros de cada grupo
def retorna_centros(self):
return self.model.cluster_centers_
# mostra inertia de 30 grupos diferentes, para encontrar o número de clusters a ser usado no programa
def mostra_erro_com_30_grupos_diferentes(self, dataset):
for n in range(1, 31):
new_model = KMeans(n_clusters=n)
new_model.fit(dataset)
print("inertia com %d grupos" % n)
print(new_model.inertia_)
# mostra indicações de um grupo passado pelo usuário
def mostra_jogos_de_um_grupo(self, n, dataset_k_means, dataset_completo):
str_n = str(n)
qual_grupo = "grupo=="
qual_grupo = qual_grupo+str_n
info_grupo = dataset_k_means.query(qual_grupo)
print("Se você gostou do seguinte jogo:")
jogo_inicial = info_grupo.index[0]
print(dataset_completo.loc[jogo_inicial])
print()
print("Você pode gostar dos seguintes jogos:")
for num in range(1, 4):
indicacao = info_grupo.index[num]
print(dataset_completo.loc[indicacao])
|
#!/usr/bin/env python
import socket
import time
from envirophat import weather, leds , light
HOST, PORT = '', 8888
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((HOST, PORT))
listen_socket.listen(1)
print 'Serving HTTP on port %s ...' % PORT
while True:
temperature = weather.temperature()
pressure = weather.pressure()
lightvalue = light.light()
client_connection, client_address = listen_socket.accept()
request = client_connection.recv(2048)
first_line = request.split('\r\n')
path = first_line[0].split(' ')
path_clean = path[1].split('?')
print request
if path_clean[0] == "/pressure":
humidi = "Pressure: %.2f hPa" % pressure
http_response = \
"HTTP/1.1 200 OK\n"\
"Content-Type: text/xml\n\n"\
"<?xml version='1.0' encoding='UTF-8'?>"\
"<SnomIPPhoneText track='no'>"\
"\t<Title>Alicante</Title>"\
"\t<LocationX>55</LocationX>"\
"\t<LocationY>32</LocationY>"\
"\t<Text>"\
+humidi+\
"<br/>"\
"\t</Text>"\
"\t<Image>"\
"\t\t<LocationX>2</LocationX>"\
"\t\t<LocationY>23</LocationY>"\
"\t\t<Url>http://www.omkarsupra.com/images/icons/high-pressure.png</Url>"\
"\t</Image>"\
"\t<SoftKeyItem>"\
"\t\t<Name>F1</Name>"\
"\t\t<Label>Reload</Label>"\
"\t\t<URL>http://10.0.1.153:8888/pressure</URL>"\
"\t</SoftKeyItem>"\
"\t<SoftKeyItem>"\
"\t\t<Name>F2</Name>"\
"\t\t<Label>Temperature</Label>"\
"\t\t<URL>http://10.0.1.153:8888/temperature</URL>"\
"\t</SoftKeyItem>"\
"\t<SoftKeyItem>"\
"\t\t<Name>F3</Name>"\
"\t\t<Label>Light</Label>"\
"\t\t<URL>http://10.0.1.153:8888/light</URL>"\
"\t</SoftKeyItem>"\
"</SnomIPPhoneText>"
elif path_clean[0] == "/temperature":
temp = "Temperature: %.2f C" % temperature
http_response = \
"HTTP/1.1 200 OK\n"\
"Content-Type: text/xml\n\n"\
"<?xml version='1.0' encoding='UTF-8'?>"\
"<SnomIPPhoneText track='no'>"\
"\t<Title>Alicante</Title>"\
"\t<LocationX>55</LocationX>"\
"\t<LocationY>32</LocationY>"\
"\t<Text>"\
+temp+\
"<br/>"\
"\t</Text>"\
"\t<Image>"\
"\t\t<LocationX>2</LocationX>"\
"\t\t<LocationY>23</LocationY>"\
"\t\t<Url>http://www.tommytape.com/wp-content/assets/icons/red-icon-temp.png</Url>"\
"\t</Image>"\
"\t<SoftKeyItem>"\
"\t\t<Name>F1</Name>"\
"\t\t<Label>Reload</Label>"\
"\t\t<URL>http://10.0.1.153:8888/temperature</URL>"\
"\t</SoftKeyItem>"\
"\t<SoftKeyItem>"\
"\t\t<Name>F2</Name>"\
"\t\t<Label>Pressure</Label>"\
"\t\t<URL>http://10.0.1.153:8888/pressure</URL>"\
"\t</SoftKeyItem>"\
"\t<SoftKeyItem>"\
"\t\t<Name>F3</Name>"\
"\t\t<Label>Light</Label>"\
"\t\t<URL>http://10.0.1.153:8888/light</URL>"\
"\t</SoftKeyItem>"\
"</SnomIPPhoneText>"
elif path_clean[0] == "/light":
lightnum = "Light: %.2f " % lightvalue
http_response = \
"HTTP/1.1 200 OK\n"\
"Content-Type: text/xml\n\n"\
"<?xml version='1.0' encoding='UTF-8'?>"\
"<SnomIPPhoneText track='no'>"\
"\t<Title>Alicante</Title>"\
"\t<LocationX>55</LocationX>"\
"\t<LocationY>32</LocationY>"\
"\t<Text>"\
+lightnum+\
"<br/>"\
"\t</Text>"\
"\t<Image>"\
"\t\t<LocationX>2</LocationX>"\
"\t\t<LocationY>23</LocationY>"\
"\t\t<Url>https://ae01.alicdn.com/kf/HTB1Sv9UJFXXXXXPXVXXq6xXFXXXn/14W-Light-sensor-energy-saving-lamp-dusk-to-dawn-CFL-ESL.jpg_50x50.jpg</Url>"\
"\t</Image>"\
"\t<SoftKeyItem>"\
"\t\t<Name>F1</Name>"\
"\t\t<Label>Reload</Label>"\
"\t\t<URL>http://10.0.1.153:8888/light</URL>"\
"\t</SoftKeyItem>"\
"\t<SoftKeyItem>"\
"\t\t<Name>F2</Name>"\
"\t\t<Label>Pressure</Label>"\
"\t\t<URL>http://10.0.1.153:8888/pressure</URL>"\
"\t</SoftKeyItem>"\
"\t<SoftKeyItem>"\
"\t\t<Name>F3</Name>"\
"\t\t<Label>Temperature</Label>"\
"\t\t<URL>http://10.0.1.153:8888/temperature</URL>"\
"\t</SoftKeyItem>"\
"</SnomIPPhoneText>"
else :
http_response = \
"HTTP/1.1 200 OK\n"\
"Content-Type: text/xml\n\n"\
client_connection.sendall(http_response)
client_connection.close()
|
#####################
from spyre import server
class SimpleApp(server.App):
title = "Simple App"
inputs = [{ "type":"text",
"key":"words",
"label": "write here",
"value":"hello world"}]
outputs = [{"type":"html",
"id":"some_html",
"control_id":"button1"}]
controls = [{"type":"button",
"label":"press to update",
"id":"button1"}]
def getHTML(self, params):
words = params['words']
return "Here are some words: <b>%s</b>"%words
app = SimpleApp()
app.launch()
|
""" lets make some dominos and some functions to access them"""
"""creates a domino with a left and a right value"""
def create(left, right):
domino = (left, right)
return domino
"""gets left domino value"""
def get_left(domino):
return domino[0]
"""gets right domino value"""
def get_right(domino):
return domino[1]
"""converts domino value to a str and prints to terminal """
def as_str(domino):
return ("[%d | %d]" % (get_left(domino), get_right(domino)))
|
# coding: utf-8
import random
import json
import asyncio
import itchat
from threading import Thread
from .config import (STICKERS_FOR_SPAM, EVERY_REPLY_SEND_COUNT, REPLY_TEMPLATE_SPAM,
ANIMATED_QUERY_TYPE, GIFT_MONEY_KEYWORD, GIFT_MONEY_STICKER_QUERY)
from .logger import logger
from .chatroom import chatroom
from .messager import send_image_by_urls, send_stickers_by_query, send_animated_stickers_by_query
from .util import match_query_from_text, is_spam_msg
if not chatroom.memberList:
chatroom.update(detailedMember=True)
chatroom_owner = chatroom.memberList[0]
async def reply_spam():
chatroom.send(REPLY_TEMPLATE_SPAM.format(chatroom_owner.nickName))
sticker_urls = random.sample(STICKERS_FOR_SPAM, EVERY_REPLY_SEND_COUNT)
await send_image_by_urls(sticker_urls)
async def reply_note(msg):
if GIFT_MONEY_KEYWORD in msg.text:
await send_stickers_by_query(GIFT_MONEY_STICKER_QUERY)
async def reply_text(msg):
query, query_type = match_query_from_text(msg.text)
if not query:
return
if query_type == ANIMATED_QUERY_TYPE:
await send_animated_stickers_by_query(query)
else:
await send_stickers_by_query(query)
def serialize_msg(msg):
return {
'msgId': msg.msgId,
'createTime': msg.createTime,
'fromUserName': msg.fromUserName,
'toUserName': msg.toUserName,
'msgType': msg.msgType,
'type': msg.type,
'text': msg.text if isinstance(msg.text, str) else '',
'url': msg.url,
'content': msg.content,
'actualUserName': msg.actualUserName,
'actualNickName': msg.actualNickName,
'isAt': msg.isAt,
}
def log_msg(msg):
serialized_msg = serialize_msg(msg)
serialized_msg_text = serialized_msg['text']
if msg.type == itchat.content.SYSTEM:
should_log_msg = any([serialized_msg_text, msg.content, msg.url])
if not should_log_msg:
return
query, _ = match_query_from_text(serialized_msg_text)
logger.notice(json.dumps(serialized_msg, ensure_ascii=False))
debug_log = '{} {} {} ({})'.format(msg.actualNickName, serialized_msg_text, msg.url, msg.type)
if query:
logger.info(debug_log)
else:
logger.spam(debug_log)
async def reply(msg):
try:
log_msg(msg)
except Exception as e:
logger.critical(e)
if is_spam_msg(msg):
await reply_spam()
elif msg.type == itchat.content.NOTE:
await reply_note(msg)
elif msg.type == itchat.content.TEXT:
await reply_text(msg)
def sync_reply(msg):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(reply(msg))
loop.close()
def reply_in_background(msg):
t = Thread(target=sync_reply, kwargs={'msg': msg})
t.start()
|
n , v = map(int,input().split())
arr = list(map(int,input().strip().split()))[:n]
brr = list(map(int,input().strip().split()))[:n]
mn = 10000000000000
for i in range(n):
mn = min(mn,brr[i]/arr[i])
sum = 0
for i in range(n):
sum = sum + arr[i] * mn
if sum > v:
print(v)
else:
print(sum)
|
# -*- coding: utf-8 -*-
__author__ = 'Konrad'
import threading
import time
class myThread(threading.Thread):
threadCounter = 0
barrierCounter = 0
exitCounter = 0
lock = threading.Lock()
cv = threading.Condition(lock)
def __enter__(self):
return self
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
with myThread.lock:
myThread.threadCounter += 1
def run(self):
print(self.name + " is starting.")
time.sleep(self.threadID * 2)
print(self.name + " is ending.")
self.barrier()
print(self.name + " - ended")
@staticmethod
def barrier():
myThread.cv.acquire()
myThread.barrierCounter += 1
while myThread.barrierCounter < myThread.threadCounter:
myThread.cv.wait()
myThread.exitCounter += 1
if myThread.exitCounter >= myThread.threadCounter:
myThread.exitCounter = 0
myThread.barrierCounter = 0
myThread.cv.notify_all()
myThread.cv.release()
def __exit__(self, exc_type, exc_val, exc_tb):
with myThread.lock:
myThread.threadCounter -= 1
with myThread(1, "Thread 1") as t1, myThread(2, "Thread 2") as t2, myThread(3, "Thread 3") as t3:
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
with myThread(1, "Thread 4") as t1, myThread(2, "Thread 5") as t2, myThread(3, "Thread 6") as t3:
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
|
class TimeDiff:
"""Helper class to handle a duration in various units.
Attributes:
time_diff (int): duration to manage (in seconds).
"""
def __init__(self, time1, time2):
"""Initialize the duration.
Args:
time1 (int): first timestamp in second.
time2 (int): second timestamp in second.
"""
self.time_diff = time2-time1
def hms(self):
"""Return the number of hours, minutes and seconds from the duration."""
hours = self.time_diff // 3600
minutes = (self.time_diff % 3600) // 60
seconds = self.time_diff % 60
return [int(hours), int(minutes), int(seconds)]
def ms(self):
"""Return the number of minutes and seconds from the duration."""
minutes = self.time_diff // 60
seconds = self.time_diff % 60
return [int(minutes), int(seconds)]
def s(self):
"""Return the number of seconds from the duration."""
return int(self.time_diff)
|
class Inside(): pass
class Outside(): pass
def switch(s):
if isinstance(s, Outside):
return Inside()
elif isinstance(s, Inside):
return Outside()
def separate(line, separator, escape_char):
assert(type(separator) == str)
assert(type(escape_char) == str)
assert(separator != escape_char)
state = Outside()
tokens = []
aux = ""
for c in line:
if c == escape_char:
state = switch(state)
elif c == separator:
if isinstance(state, Inside):
aux += c
elif isinstance(state, Outside):
tokens.append(aux)
aux = ""
else:
exit("error")
else:
aux += c
return tokens
def parse_csv(x, separator, escape_char):
assert(type(separator) == str)
assert(type(escape_char) == str)
assert(separator == ',' or separator == ' ')
x = open(x).read()
lines = [l for l in x.split('\n') if l.strip() != ""]
header = lines[3]
content = lines[4:]
del lines
header = separate(header, separator, escape_char)
content = [separate(line, separator, escape_char) for line in content]
for token in content:
assert(len(token) == len(header))
return (header, content)
def separate_by_any_of(x, a):
assert(x.__class__ == str)
assert(a.__class__ == set)
for i in a:
assert(i.__class__ == str)
tokens = []
aux = ""
for c in x:
if c in a:
tokens.append(aux)
aux = ""
else:
aux += c
tokens.append(aux)
return tokens
def split_by_any_with_index(content, index, a):
assert(type(content) == list)
assert(type(index) == int)
assert(type(a) == set)
tokens = []
for line in content:
assert(type(line) == list)
tokens.append(separate_by_any_of(line[index], a))
#print(line[index])
return tokens
def split_to_array(token, index, a):
assert(token.__class__ == list)
assert(index.__class__ == int)
assert(a.__class__ == set)
token[index] = separate_by_any_of(token[index], a)
return token
def LevenshteinD(word1, word2):
assert(m.__class__ == 'str')
assert(n.__class__ == 'str')
m = len(word1)
n = len(word2)
table = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
table[i][0] = i
for j in range(n + 1):
table[0][j] = j
for i in range(1, m + 1):
for j in range(1, n + 1):
if word1[i - 1] == word2[j - 1]:
table[i][j] = table[i - 1][j - 1]
else:
table[i][j] = 1 + min(table[i - 1][j], table[i][j - 1], table[i - 1][j - 1])
return table[-1][-1]
def exclude_repetition(l):
assert(l.__class__ == list)
last = None
ans = []
for i in l:
if last != i[0]:
ans.append(i)
last = i[0]
return ans
if __name__ == "__main__":
header, content = parse_csv("central/Ingredient Nutrient Values-Table 1.csv", ',', '"')
tokens = split_by_any_with_index(content, 1, {','})
out = open("code_and_ingredients.txt", 'w')
#print(tokens)
a = []
for token in tokens:
b = []
for t in token:
b.append(t.strip())
a.append(b)
#print(a)
ingredients = []
#for ingredient in a:
#print(ingredient)
#ingredients.append(ingredient[0])
#print(ingredients)
ans = []
for token in content:
ans.append((token[0], separate_by_any_of(token[1], {','})[0].lower()))
#print(ans)
ans = exclude_repetition(ans)
print(ans)
print(len(ans))
del header
del tokens
del token
del b
del t
del a
del ingredients
del content
del Inside
del Outside
for (code, ingredient) in ans:
print(str(code) + "$" + str(ingredient), file = out)
out.close()
|
# *_* coding=utf8 *_*
#!/usr/bin/env python
from unreal.utils import ipv4
from unreal.handler import base
class Link(base.BaseHandler):
def get(self, uuid):
url = self.db.get("SELECT * FROM url WHERE uuid=%s", uuid)
remote_ip_v4 = ipv4.to_int(self.request.remote_ip)
referer = self.request.headers.get('Referer')
if url is not None:
ori_url = url['url']
self.db.execute("UPDATE url SET pv=pv+1 WHERE id=%s", url['id'])
self.db.execute("INSERT INTO pv_log(url_id, remote_ip_v4, referer, create_time) "
"VALUES(%s, %s, %s, NOW())", url['id'], remote_ip_v4, referer)
redirect_url = ori_url if ori_url.startswith(
'http://') else 'http://%s' % ori_url
self.redirect(redirect_url)
else:
self.redirect("/")
|
import matplotlib.pyplot as plt
import numpy as np
import math
#Core Visualization Function for generated .csv files & Overall Performance file
def Visualize(version_array, measure_array, nrWorkers=0, nrCycles=0, promFile=''):
plt.rcdefaults()
fig, ax = plt.subplots()
y_pos = np.arange(len(version_array))
#Normalize() the x_axis due to long float point values
norm_x_axis = [int(math.floor(round(x))) for x in measure_array]
#set up the subplots
clr = ('blue', 'forestgreen', 'gold', 'red', 'purple', 'orange', 'lightblue')
bar = ax.barh(y_pos, norm_x_axis, align='center', linewidth =0, color=clr)
ax.set_yticks(y_pos)
ax.set_yticklabels(version_array)
ax.invert_yaxis()
ax.set_xlabel('Execution Time in Milliseconds(Lower/Higher)')
#Print the Overall model performance or seperate performance for each .csv file
if promFile != '':
ax.set_ylabel('Models')
ax.set_title('Graph: ' + promFile + '_Workers: [' + nrWorkers + ']' )
fig_name = promFile +'[' + nrWorkers + ']' + '.png'
else:
ax.set_title('Overall Model Performance' )
fig_name = 'Overall_Performance' + '.png'
plt.legend(bar, version_array)
plt.tight_layout()
#instead of plotting save the image locally
plt.savefig(fig_name, dpi=95)
|
class Jeu(object):
def __init__(self):
self.Village = {}
self.nbrGentils = 0
self.nbrLoups = 0
self.tours = []
self.listeMortsPotentielles = [] # Liste des morts potentielle avant le vote du matin |||| A effacer CHAQUE matin
self.listeProteges = [] # Liste des proteges durant la nuit |||| A effacer CHAQUE matin
def regarde(self, pseudo):
try:
role = self.Village[pseudo]
return 'rregarde %s' % role
except:
return None
def protege(self, pseudo):
self.listeProteges.append(pseudo)
def tue(self, pseudo):
self.listeMortsPotentielles.append(pseudo)
def tourSuivant(self):
print('')
def removePerso(self, pseudo):
try:
del self.Village[pseudo]
return True
except:
return False
def ajoutPerso(self, reste):
pseudo, role = reste.split()
self.Village[pseudo] = role
def updateMorts(self):
print('')
|
# -*- coding: utf-8 -*-
"""A plugin to migrate mailboxes using IMAP."""
from __future__ import unicode_literals
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
default_app_config = "modoboa.imap_migration.apps.IMAPMigrationConfig"
|
import os, sys, math
import pygame as pg
from player import Player
from enemy import EnemySpawner
from utils import DamageBar, MenuSystem, media_path
TITLE = "Bubble Shoot"
SIZE = (0, 0)
FPS = 60
BACKGROUND = (80, 80, 80)
MENU_BACKGROUND = (55, 37, 92)
class Game:
def __init__(self):
pg.init()
SIZE = pg.display.Info().current_w, pg.display.Info().current_h - 100
pg.display.set_caption(TITLE)
self.screen = pg.display.set_mode(SIZE, pg.RESIZABLE, 32)
# Create sprites
self.player = Player([n / 2 for n in SIZE], (50, 50))
self.espawner = EnemySpawner()
self.damage_bar = DamageBar((110, 20), (200, 25))
self.menu = MenuSystem()
# Game vars
self.score = 0
self.font = pg.font.Font(None, 30)
# Load cursor
self.game_cursor = pg.cursors.load_xbm(media_path("cursor.xbm"), media_path("cursor-mask.xbm"))
def reset(self):
self.espawner.enemies_killed = 0
for obj in [self.player, self.espawner]:
if isinstance(obj, Player):
obj.health = obj.max_health
obj.bullets.empty()
if isinstance(obj, EnemySpawner):
obj.time = 0
obj.enemies.empty()
def events(self):
for event in pg.event.get():
# Quit Events
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
self.running = False
# Resize Events
if event.type == pg.VIDEORESIZE:
self.screen = pg.display.set_mode(event.dict['size'], pg.RESIZABLE, 32)
pg.display.update()
# Pause Menu
if event.type == pg.KEYDOWN and event.key == pg.K_TAB:
if not self.menu.active:
self.menu.set_pause()
self.menu.active = True
else:
self.menu.active = False
# Menu and Player Events
if event.type == pg.MOUSEBUTTONDOWN:
if event.button == 1:
if self.menu.active:
self.menu.on_mouse(self.reset)
else:
self.player.shoot_bullet()
def draw(self):
if self.menu.active:
self.screen.fill(MENU_BACKGROUND)
pg.mouse.set_cursor(*pg.cursors.arrow)
self.menu.draw(self.score)
else:
self.screen.fill(BACKGROUND)
pg.mouse.set_cursor(*self.game_cursor)
# Draw Player DamageBar
self.screen.blit(self.damage_bar.image, self.damage_bar.rect)
# Draw Score
size = pg.display.get_surface().get_size()
score_surf = self.font.render("Score {} ".format(self.score), True, (255, 255, 255))
self.screen.blit(score_surf, (size[0] - 200, 15))
# Draw player and player bullets
self.screen.blit(self.player.image, self.player.rect)
self.player.bullets.draw(self.screen)
# Draw enemies and enemy bullets, damage_bars
self.espawner.enemies.draw(self.screen)
for enemy in self.espawner.enemies.sprites():
enemy.bullets.draw(self.screen)
self.screen.blit(enemy.damage_bar.image, enemy.damage_bar.rect)
pg.display.update()
def update(self, dt):
if not self.menu.active:
# Update player and score
self.player.update(dt)
self.score = self.espawner.enemies_killed
# Update damage_bar
self.damage_bar.update(self.player)
# Update enemies
self.espawner.update(dt, self.player.rect.center)
# Check collisions
self.player.check_collision(self.espawner.enemies)
for enemy in self.espawner.enemies:
enemy.check_collision(self.player)
# Check Player Death
if self.player.health <= 0:
self.menu.set_gameover()
self.menu.active = True
else:
if self.menu.quit:
self.running = False
def run(self):
self.running = True
self.clock = pg.time.Clock()
dt = 0
self.clock.tick(FPS)
while self.running:
self.events()
self.draw()
self.update(dt)
dt = self.clock.tick(FPS) / 1000.0
if __name__ == '__main__':
os.environ['SDL_VIDEO_CENTERED'] = '1'
g = Game()
g.run()
pg.quit()
sys.exit()
|
from location import *
import csv
import os
def readData():
locdict = {}
if os.name == 'nt':
with open("LocationLatLong.csv", newline='', encoding='utf-8') as csvfile:
locreader = csv.reader(csvfile, dialect='excel', delimiter=',')
for row in locreader:
locdict[str(row[0])] = Location(name = str(row[0]), lat = float(row[2]), lon = float(row[3]))
#Convert the Locations that are VDCs to VDCs
with open("ExistingVDC.csv", newline='', encoding='utf-8') as csvfile:
vdcreader = csv.reader(csvfile, dialect = 'excel', delimiter = ',')
for row in vdcreader:
locname = str(row[0])
loc = locdict[locname]
locdict[locname] = loc.toVDC(cap = float(row[1]), rail = (row[2] == 'TRUE'))
else:
with open("LocationLatLong.csv") as csvfile:
locreader = csv.reader(csvfile, dialect='excel', delimiter=',')
for row in locreader:
locdict[str(row[0])] = Location(name = str(row[0]), lat = float(row[2]), lon = float(row[3]))
#Convert the Locations that are VDCs to VDCs
with open("ExistingVDC.csv") as csvfile:
vdcreader = csv.reader(csvfile, dialect = 'excel', delimiter = ',')
for row in vdcreader:
locname = str(row[0])
loc = locdict[locname]
locdict[locname] = loc.toVDC(cap = float(row[1]), rail = (row[2] == 'TRUE'))
#Convert the rest of the Locations to Dealers
for loc in locdict.keys():
if not locdict[loc].isVDC():
locdict[loc] = locdict[loc].toDealer()
return locdict
|
import os;
import time;
import math;
import sys;
f = [ [] , [] ]
filename = '../runBodies0.bat'
f[0] = open(filename,'w')
filename = '../runBodies1.bat'
f[1] = open(filename,'w')
for i in range(300,399+1):
filename = '../runBodies' + str(i%2) + '.bat';
writeString = './Modularity ' + str(i) + '\n';
f[i%2].write(writeString)
f[0].close()
f[1].close()
|
#!/usr/bin/env python3
from functions_script import binomial
import matplotlib.pyplot as plt
from math import sqrt
def mean(n, p):
return n*p
def variance(n, p):
return n*p*(1-p)
def std_dev(var):
return sqrt(var)
def pr(p, total, choose, verbose=False):
if verbose:
print(f'{total} choose {choose} has {binomial(total, choose)} combinations. Single probability: {(p**choose) * (1-p)**(total-choose)}')
return binomial(total, choose)*(p**choose) * (1-p)**(total-choose)
def main():
n = 40
p = 0.12
maximum = n+1
minimum = 0
# print(pr(p=p, total=n, selection=18))
print()
results = []
for k in range(minimum, maximum):
results.append(pr(p=p, total=n, choose=k, verbose=True))
print()
print('Integral of results:', sum(results))
# Plot
plt.bar(range(minimum, maximum), results)
plt.xlabel('Number of Heads')
plt.ylabel('Likelihood')
plt.title(f'{n} coin flips')
plt.show()
sorted_results = sorted(results, reverse=True)
print(f'We have the highest probability of getting {results.index(max(results))} heads in this experiment, '
f'followed by {results.index(sorted_results[1])} heads.')
print('Mean:', mean(n, p))
print('Variance:', variance(n, p))
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
''' Classes partake of the dynamic nature of Python: they are created at runtime and can be modified after creation. '''
class MyClass():
name = "Wukong"
def __init__(self, name): # constructor
self.name = name
def show(self):
print "Welcome to here, my friend: ", self.name
def __delete__(self): # destructor
print 'Destructor Deleting object -' self.name
'''
Most OO languages pass this as a hidden parameter to the methods
defined on an object; Python does not. You have to declare it
explicitly. When you create an instance of a class and call its
methods, it will be passed automatically.
'''
a = MyClass("Carpie Diem")
print a.name
print MyClass.name
''' class method
A class method can be invoked without creating an instance of the
class. When a normal method is invoked, the interpreter inserts the
instance object as the first positional parameter: self. When a
class method is invoked, the class itself is given as the first
parameter, often called: cls '''
''' difference between method object and function object
When a non-data attribute of an instance is referenced, the
instance's class is searched. If the name denotes a valid class
attribute that is a function object, a method object is created by
packing (pointers to) the instance object and the function object
just found together in an abstract object: this is the method object.
When the method object is called with an argument list, a new
argument list is constructed from the instance object and the
argument list, and the function object is called with this new
argument list.'''
''' Generally speaking, instance variables are for data unique to
each instance and class variables are for attributes and methods
shared by all instances of the class'''
class Dog:
tricks = [] # mistaken use of a class variable
def __init__(self, name):
self.name = name
def add_trick(self, trick):
self.tricks.append(trick)
d = Dog('Fido')
e = Dog('Buddy')
d.add_trick('roll over')
e.add_trick('play dead')
d.tricks # unexpectedly shared by all dogs
#> ['roll over', 'play dead']
# correct design:
class Dog:
def __init__(self, name):
self.name = name
self.tricks = [] # creates a new empty list for each dog
def add_trick(self, trick):
self.tricks.append(trick)
d = Dog('Fido')
e = Dog('Buddy')
d.add_trick('roll over')
e.add_trick('play dead')
d.tricks
#> ['roll over']
e.tricks
#> ['play dead']
'''Data attributes may be referenced by methods as well as by
ordinary users ("clients") of an object. In other words, classes
are not usable to implement pure abstract data types. In fact,
nothing in Python makes it possible to enforce data hiding — it is
all based upon convention.'''
''' Inheritance
all methods in Python are effectively virtual. Derived classes can
override methods of their base classes.
'''
''' attribute of instance method objects '''
m.__self__ # instance object with the method m()
m.__func__ # the function obejct corresponding to the method
|
# test 1
# 嵌套
# 创建30个外星人字典:列表中嵌套字典
aliens = []
for alien_num in range(1,31):
alien = {'color':'green', 'age':'14', 'point':'5', 'number':alien_num}
aliens.append(alien)
# 修改前3个,用到切片
for alien in aliens[:3]:
if alien['color'] == 'green':
alien['color'] = 'blue'
alien['age'] = '20'
alien['point'] = '10'
for alien in aliens:
print(alien)
# 披萨店的点餐:字典中嵌套列表
pizza = {
'crust':'thick',
'toppings':['mushrooms', 'extra cheese']
}
# 仅访问其中的列表
for topping in pizza['toppings']:
print(topping)
# 网站用户登记:字典中嵌套字典
users = {
'aeinstein': {
'first': 'albert',
'last': 'einstein',
'location': 'princeton',
},
'mcurie': {
'first': 'marie',
'last': 'curie',
'location': 'paris',
},
}
for username, user_info in users.items():
print("\nUsername: " + username)
full_name = user_info['first'] + " " + user_info['last']
location = user_info['location']
print("\tFull name: " + full_name.title())
print("\tLocation: " + location.title())
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the 'License'); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from . import BaseDomsHandler
from webservice.NexusHandler import nexus_handler
from webservice.algorithms.doms.ResultsStorage import ResultsRetrieval
from webservice.webmodel import NexusExecutionResults
from webservice.webmodel import NexusProcessingException
@nexus_handler
class ExecutionStatusHandler(BaseDomsHandler.BaseDomsQueryCalcHandler):
name = 'Execution Status Handler'
path = '/job'
description = ''
params = {}
singleton = True
def __init__(self, tile_service_factory, config=None):
BaseDomsHandler.BaseDomsQueryCalcHandler.__init__(self, tile_service_factory)
self.config = config
def calc(self, request, **args):
execution_id = request.get_argument('id', None)
try:
execution_id = uuid.UUID(execution_id)
except ValueError:
raise NexusProcessingException(reason='"id" argument must be a valid uuid', code=400)
# Check if the job is done
with ResultsRetrieval(self.config) as retrieval:
try:
execution_details = retrieval.retrieveExecution(execution_id)
execution_params = retrieval.retrieveParams(execution_id)
except ValueError:
raise NexusProcessingException(
reason=f'Execution {execution_id} not found',
code=404
)
job_status = NexusExecutionResults.ExecutionStatus(execution_details['status'])
host = f'{request.requestHandler.request.protocol}://{request.requestHandler.request.host}'
return NexusExecutionResults.NexusExecutionResults(
status=job_status,
created=execution_details['timeStarted'],
completed=execution_details['timeCompleted'],
execution_id=execution_id,
message=execution_details['message'],
params=execution_params,
host=host
)
|
from abc import ABCMeta, abstractmethod
from SignalGenerationPackage.SignalData import SignalData
import numpy as np
from LoggersConfig import loggers
from SignalGenerationPackage.Point import Point
class Signal(metaclass=ABCMeta):
# Model in MVC, abstract class
# Aggregates the class SignalData - also part of the model, entity
def __init__(self):
self.SignalData = None # abstract class
self.SendingTransformer = None
self.Observers = []
self.InitSignalData()
self.InitSendingTransformer()
self.RequestFreq = 1.0
@abstractmethod
def InitSignalData(self):
pass
@abstractmethod
def InitSendingTransformer(self):
pass
@abstractmethod
def Func(self, x):
pass
@abstractmethod
def UpdateSignalData(self):
pass
def UpdateDeltaTimes(self):
input = SignalData.point_array_with_requests
output = []
N = len(input)
if N == 1:
output.insert(0, 0.0) # Начальная точка отсчёта по времени, 0.00
elif N > 1:
output = [
input[dt_next_idx].x - input[dt_prev_idx].x
for dt_next_idx, dt_prev_idx
in zip(range(1, N), range(0, N - 1))
]
return output
def RecalcData(self):
self.ClearSignalData()
self.UpdateSignalData()
self.TransformSignal() # Преобразовать для отправки # TODO: Может Transform и Request сделать по колбеку на StartSending?
self.AddRequests_X()
self.AddRequests_Y()
SignalData.dx = self.UpdateDeltaTimes()
self.Recalc_X_Y()
@abstractmethod
def Recalc_X_Y(self):
pass
def ClearSignalData(self):
SignalData.x.clear()
SignalData.y.clear()
SignalData.dx.clear()
SignalData.transformed_point_array.clear()
SignalData.point_array.clear()
SignalData.point_array_with_requests.clear()
def TransformSignal(self):
self.SendingTransformer.TransformSignal()
@staticmethod
def extend_edge_points(list_x, list_y, to_send_list):
pts_arr = []
for x, y, to_send in zip(list_x, list_y, to_send_list):
point = Point(x=x, y=y, to_send=to_send)
pts_arr.append(point)
SignalData.point_array_with_requests.extend(pts_arr)
@abstractmethod
def AddRequests_Y(self):
# Уже добавили моменты времени для опроса.
# Теперь надо пересчитать ожидаемую частоту в момент опроса.
pass
def AddRequests_X(self):
# Исходные данные - сам сигнал, SignalData.x, SignalData.y
# Надо - зная частоту опроса, идём по всему массиву времени
# dt = SignalData.x[i+1] - SignalData.x[i].
# Если dt > 1 / request_freq --> Надо добавить "фиктивные точки" по времени
# В этой точке, по прерыванию, будет только опрос, без отправки значения
# на частотник
request_freq = self.RequestFreq
point_arr = SignalData.transformed_point_array
dx = 1 / request_freq
len_x = len(point_arr)
for prev_idx in range(0, len_x - 1):
next_idx = prev_idx + 1
x_prev = point_arr[prev_idx].x
x_next = point_arr[next_idx].x
y_prev = point_arr[prev_idx].y
y_next = point_arr[next_idx].y
to_send_prev = point_arr[prev_idx].to_send
to_send_next = point_arr[next_idx].to_send
dx_current = abs(x_next - x_prev)
if dx_current <= dx and next_idx == len_x - 1:
# Значит, нет необходимости вставлять точки для опроса - текущий dx_current и так достаточно мал
# На последней итерации вставляем крайние точки
self.extend_edge_points([x_prev, x_next], [y_prev, y_next], to_send_list=[to_send_prev, to_send_next])
elif dx_current <= dx and next_idx < len_x - 1:
# Итерация не последняя - только левые крайние точки добавляем
self.extend_edge_points([x_prev], [y_prev], to_send_list=[to_send_prev])
elif dx_current > dx:
# Значит, надо вставить точки для опроса
# Сколько точек вставить:
N = int(dx_current * request_freq)
if N == 0:
# Так совпало - тогда только крайние точки вставляем
if next_idx < len_x - 1:
# итерация не последняя
self.extend_edge_points([x_prev], [y_prev], to_send_list=[to_send_prev])
else:
# итерация последняя - добавляем края
self.extend_edge_points([x_prev, x_next], [y_prev, y_next], to_send_list=[to_send_prev, to_send_next])
else:
# Тогда вставим несколько промежуточных точек:
# Массив x для вставки:
# N + 2 в linspace - т.к. N - только промежуточные, а тут linspace c учётом крайних
x_new = np.linspace(x_prev, x_next, N + 2, endpoint=True)
# Массив y для вставки:
# Да, None это костыль. При отправке (SignalSendingOperator),
# если значение 'y' == None, то не отправляем, а только запрашиваем
# частоту
y_new = [y_prev] + [None] * (len(x_new) - 2) + [y_next]
# Ещё один костыль - лист из булевых флагов to_send - отправлять мы
# будем или опрашивать
to_send_list = [to_send_prev] + [False] * (len(x_new) - 2) + [to_send_next]
# Если не последняя итерация - то необходимо исключить последнюю точку
# из массивов X и Y
# А если последняя - то она включится
if next_idx != len_x - 1:
x_new = x_new[0:-1]
y_new = y_new[0:-1]
to_send_list = to_send_list[0:-1]
self.extend_edge_points(x_new, y_new, to_send_list)
def AddObserver(self, Observer):
self.Observers.append(Observer)
def RemoveObserver(self, Observer):
self.Observers.remove(Observer)
def NotifyObservers(self):
for observer in self.Observers:
observer.UpdateModel()
@property
def x(self):
return SignalData.x
@property
def y(self):
return SignalData.y
@property
def request_freq(self):
return self.RequestFreq
@request_freq.setter
def request_freq(self, val):
self.RequestFreq = val
self.RecalcData()
self.NotifyObservers()
|
#!/usr/bin/python
def run(filename):
f = open(filename, "r")
case_count = int(f.readline())
for i in range(case_count):
line = f.readline()
result = process_case(parse_case(line))
print('Case #%d: %s' % (i+1, result))
def parse_case(line):
pieces = line.split()
return int(pieces[0]), int(pieces[1])
def process_case(case):
a, b = case
result = 0
tested_number = set()
all_pairs = []
for i in range(a, b):
str_i = str(i)
pairs = set([i])
for j in range(len(str_i)):
str_i = str_i[-1:] + str_i[:-1]
int_i = int(str_i)
if int_i > i and int_i >= a and int_i <= b:
pairs.add(int_i)
#print pairs
result += len(pairs) - 1
return result
|
import sys
import urllib2
import json
import ipdb as pdb
def get_results(ddg):
results = []
for ret in ddg.get("RelatedTopics"):
if ret.has_key("Topics"):
for rett in ret.get("Topics"):
result_inf = {"description":rett["Text"], "url":rett["FirstURL"]}
else:
result_inf = {"description":ret["Text"], "url":ret["FirstURL"]}
results.append(result_inf)
for result in results:
print HEADER+" ["+str(results.index(result))+"]: "+BLUE+result.get("description")
print HEADER+" ["+str(results.index(result))+"]: "+GREEN+result.get("url")
print "\n"
#pdb.set_trace()
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
param = sys.argv[1]
if param == "-s":
#params = 'hola'
for arg in sys.argv:
params+=arg+"+"
#pdb.set_trace()
ddg = urllib2.urlopen("http://api.duckduckgo.com/?q=hola%2Bmundo&format=json&no_html=1")
#pdb.set_trace()
get_results(json.load(ddg))
|
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import cv2
import torch
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
class InferDataset(Dataset):
def __init__(self, annotations_list, mode='train', transform=None):
self.annotations_list = annotations_list
self.transform = transform
self.mode = mode
def __getitem__(self, idx):
if self.mode == 'train' or self.mode == 'val':
label = self.annotations_list[idx]['label']
filepath = self.annotations_list[idx]['filepath']
# Read an image with OpenCV
# print('filepath', filepath)
image = cv2.imread(filepath)
# By default OpenCV uses BGR color space for color images,
# so we need to convert the image to RGB color space.
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented['image']
# return image, torch.FloatTensor(label)
return image, label
elif self.mode == 'test':
filepath = self.annotations_list[idx]['filepath']
f=open(filepath, 'rb')
chunk = f.read()
chunk_arr = np.frombuffer(chunk, dtype=np.uint8)
image = cv2.imdecode(chunk_arr, cv2.IMREAD_COLOR)
f.close()
# By default OpenCV uses BGR color space for color images,
# so we need to convert the image to RGB color space.
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform:
augmented = self.transform(image=image)
image = augmented['image']
return image, filepath
def __len__(self):
return len(self.annotations_list)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, pygame
from pygame.locals import *
chessplate =(
('L','C','C','R','6','R','I','E'),
('E','E','L','S','S','P','9','E'),
('E','E','E','1','U','E','D','U'),
('S','L','U','E','M','9','V','R'),
('D','L','1','S','T','E','A','S'),
('L','O','C','P','5','O','T','D'),
('A','U','A','D','T','E','R','N'),
('R','E','R','C','A','U','R','N')
)
#index de ligne [0 (null),1,2,3,4,5,6,7,8]
ligne = [0,7,6,5,4,3,2,1,0]
colonne = {'A' : 0,'B' : 1,'C':2,'D':3,'E':4,'F':5,'G':6,'H':7}
colonne_label = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
message_p2=["B5 G5 E7 C1 A8 E6 A1 G6 G4","B1 F8 D5 D3 E5 E4 A2 H3 D3 A2","H8 F8 D7 F6 F5 F4 A6 F2 F7","F1 D3 F2 E1 F5 E3 B6 G4 D3","C6 F1 A2 G7 D6 B7 G3 E7 A3 G4"]
def getCoord(sym2,sym1):
return [ligne[sym1],colonne[sym2]]
def d(sym2,sym1,tableau):
return tableau[ligne[sym1]][colonne[sym2]]
def getChessCoord(i, j):
string = "(%s,%s)"%(colonne_label[j], 8-i)
return string
def dispTable(table):
for ligne in table:
string = ""
for lettre in ligne:
string = string + lettre.__str__() + " "
print string
def decodeMess(string,tableau):
retour = ""
string = string.split(" ")
for j in string:
retour = retour + d(j[0],int(j[1]),tableau)
return retour
def rot90(tableau):
#Alloue nouveau tableau
hauteur = len(tableau)
nouveau = hauteur *[0]
for i in range(len(nouveau)):
nouveau[i] = hauteur*[0]
#colonne devient le numéro de ligne
for n_ligne in range(hauteur):
for n_colonne in range(hauteur):
nouveau[n_colonne][hauteur - n_ligne - 1] = tableau[n_ligne][n_colonne]
return nouveau
def rotinv90(tableau):
#Alloue nouveau tableau
hauteur = len(tableau)
nouveau = hauteur *[0]
for i in range(len(nouveau)):
nouveau[i] = hauteur*[0]
#numéro de ligne reste
for n_ligne in range(hauteur):
for n_colonne in range(hauteur):
nouveau[hauteur - 1 - n_colonne][n_ligne] = tableau[n_ligne][n_colonne]
return nouveau
def highlightCase(screen,case, color):
i = case[0]
j = case[1]
highLight = pygame.Surface((caseSize, caseSize))
highLight.fill(color)
pos = (j*caseSize + offset, i*caseSize + offset)
screen.blit(highLight, pos)
text = font.render(chessplate[i][j], 1, (0, 0, 0,))
screen.blit(text, (pos[0] + 20, pos[1] + 20))
pygame.display.flip()
def isValid(i, j):
if(i > 7 or i <0 or j <0 or j>7):
return False
else:
return True
def getPossibilities(case, passed):
i = case[0]
j = case[1]
futureCase = []
if isValid(i+2, j-1) and [i+2, j-1] not in passed: futureCase.append([i+2, j-1])
if isValid(i+2, j+1) and [i+2, j+1] not in passed: futureCase.append([i+2, j+1])
if isValid(i-2, j-1) and [i-2, j-1] not in passed: futureCase.append([i-2, j-1])
if isValid(i-2, j+1) and [i-2, j+1] not in passed: futureCase.append([i-2, j+1])
if isValid(i+1, j-2) and [i+1, j-2] not in passed: futureCase.append([i+1, j-2])
if isValid(i+1, j+2) and [i+1, j+2] not in passed: futureCase.append([i+1, j+2])
if isValid(i-1, j-2) and [i-1, j-2] not in passed: futureCase.append([i-1, j-2])
if isValid(i-1, j+2) and [i-1, j+2] not in passed: futureCase.append([i-1, j+2])
return futureCase
def dispChess(screen):
color = 1
for i in range(8):
label = little_font.render((8-i).__str__(), 1, (0, 0, 0))
screen.blit(label, (25, i*caseSize + offset + 25))
screen.blit(label, (screen_square - 25, i*caseSize + offset + 25))
label = little_font.render(colonne_label[i], 1, (0, 0, 0))
screen.blit(label, (i*caseSize + offset + 25, 25))
screen.blit(label, (i*caseSize + offset + 25, screen_square - 25))
color = 1-color
for j in range(8):
pos = (j*caseSize + offset, i*caseSize + offset)
currentCase = pygame.Surface((caseSize, caseSize))
if(color):
currentCase.fill((0, 0, 0))
text = font.render(chessplate[i][j], 1, (255, 255, 255))
else:
currentCase.fill((255, 255, 255))
text = font.render(chessplate[i][j], 1, (0, 0,0))
screen.blit(currentCase,pos)
screen.blit(text, (pos[0] + 20, pos[1] + 20))
color = 1 - color
#highlight blue ppassed case
for case in passed:
highlightCase(app, case, (128, 128, 128))
pygame.display.flip()
def caseChoosed(case):
global phrase
global passed
i = case[0]
j = case[1]
passed.append([i, j])
phrase = phrase + chessplate[i][j]
def dispChoiceForCase(screen, case):
global lastPossibilities
global phrase
dispChess(screen)
print phrase
print "Choose the next case:"
#lastPossibilities = getPossibilities(case,passed)
lastPossibilities = getNext(case,passed)
if lastPossibilities == 0:
print "Result : " + phrase
for index in range(len(lastPossibilities)):
i = lastPossibilities[index][0]
j = lastPossibilities[index][1]
highlightCase(screen, lastPossibilities[index], (255, 0, 255))
print "[%i] %s Coord %s"%(index, chessplate[i][j],getChessCoord(i, j))
highlightCase(app, case, (0, 0, 255))
def getNext(case,passed):
temp = passed[:]
possib = getPossibilities(case,temp)
temp.append(case)
next = []
for pos in possib:
next.append(len(getPossibilities(pos,temp)))
#print "Next Possibilities :" + next.__str__()
mini = next[0]
index = []
for i in range(len(next)):
if(next[i] < mini):
mini = next[i]
#print "Mini:" + mini.__str__()
for i in range(len(next)):
if next[i] == mini:
index.append(i)
retour = []
for i in index:
retour.append(possib[i])
#print "Retour pour case:" + retour.__str__()
return retour
if __name__ == '__main__':
#param
caseSize = 50
screen_square = 500
app_size = (screen_square,screen_square)
chessplate_square = 8 *caseSize
offset = (screen_square - chessplate_square)/2
START_POINT = getCoord('D',3)
passed=[]
phrase=""
lastPossibilities = []
pygame.init()
font = pygame.font.Font(None, 40)
little_font = pygame.font.Font(None, 15)
app = pygame.display.set_mode(app_size)
pygame.display.set_caption('E1 - Beta')
app.fill((250, 250, 250))
pygame.display.flip()
caseChoosed(START_POINT)
dispChoiceForCase(app, START_POINT)
# Loop
go_out = 1
while (go_out):
for event in pygame.event.get():
if event.type == QUIT:
go_out =0
elif event.type == KEYDOWN:
if(event.key == K_KP0 or event.key == K_0):
caseChoosed(lastPossibilities[0])
dispChoiceForCase(app, lastPossibilities[0])
elif(event.key == K_KP1 or event.key == K_1):
caseChoosed(lastPossibilities[1])
dispChoiceForCase(app, lastPossibilities[1])
elif(event.key == K_KP2 or event.key == K_2):
caseChoosed(lastPossibilities[2])
dispChoiceForCase(app, lastPossibilities[2])
elif(event.key == K_KP3 or event.key == K_3):
caseChoosed(lastPossibilities[3])
dispChoiceForCase(app, lastPossibilities[3])
elif(event.key == K_KP4 or event.key == K_4):
caseChoosed(lastPossibilities[4])
dispChoiceForCase(app, lastPossibilities[4])
elif(event.key == K_KP5 or event.key == K_5):
caseChoosed(lastPossibilities[5])
dispChoiceForCase(app, lastPossibilities[5])
elif(event.key == K_KP6 or event.key == K_6):
caseChoosed(lastPossibilities[6])
dispChoiceForCase(app, lastPossibilities[6])
elif(event.key == K_KP7 or event.key == K_7):
caseChoosed(lastPossibilities[7])
dispChoiceForCase(app, lastPossibilities[7])
elif(event.key == K_KP8 or event.key == K_8):
caseChoosed(lastPossibilities[8])
dispChoiceForCase(app, lastPossibilities[8])
elif(event.key == K_BACKSPACE):
if(len(passed ) > 1):
phrase = phrase[:-1]
passed = passed[:-1]
dispChoiceForCase(app, passed[-1])
else:
passed = []
phrase = ""
caseChoosed(START_POINT)
dispChoiceForCase(app, START_POINT)
else:
pass
|
import numpy as np
# -------------------------------INPUT-------------------------------------------------------
number_of_river_patches = 44
id_file = 'id_mesh_refined_acuna.dat'
outputfile = 'dzg_2007_acuna_option_1.dat'
schwelle_id = 999
schwelle_value = 0
value_bed = 3
value_banks = 0
# -------------------------------------------------------------------------------------------
usecols = np.arange(2, 46)
# Import
id_array = np.genfromtxt(id_file, skip_header=8, dtype=np.float16, skip_footer=1)
# Defining sections
# Patch identifiers range from 10 till number of patches*10 plus the identifier for the schwellen (river ramps)
sections_bed = np.arange(10, (number_of_river_patches + 1) * 10, 10)
#include mask value for schwelle
sections_banks = np.arange(11, (number_of_river_patches + 1) * 10, 10)
for i, section in np.ndenumerate(sections_bed):
id_array = np.where(id_array != section, id_array, value_bed)
for i, section in np.ndenumerate(sections_banks):
id_array = np.where(id_array != section, id_array, value_banks)
id_array = np.where(id_array != schwelle_id, id_array, schwelle_value)
np.savetxt(outputfile, id_array, fmt='%1.2f')
|
def unscramble_eggs(word):
return word.replace('egg','')
'''
Unscramble the eggs.
The string given to your function has had an "egg" inserted directly after
each consonant. You need to return the string before it became eggcoded.
Example
unscrambleEggs("Beggegeggineggneggeregg"); => "Beginner"
// "B---eg---in---n---er---"
Kata is supposed to be for beginners to practice regular expressions,
so commenting would be appreciated.
'''
|
import sys
import os
import errno
from datetime import datetime
name = "{query}"
sys.stdout.write(name)
title = name.title()
file_name = "{}-{}.md".format(
datetime.today().strftime('%Y-%m-%d'),
name.replace(' ', '-').lower()
)
folder = os.environ.get('noteable_folder')
home = os.getenv("HOME")
if folder:
if folder.startswith('~'):
folder = home + folder[1:]
else:
folder = "{}/.notable".format(home)
if folder.endswith('notes') or folder.endswith('notes/'):
pass
else:
if folder.endswith('/'):
folder = '{}notes'.format(folder)
else:
folder = '{}/notes'.format(folder)
if folder.endswith('/'):
folder = folder[:-1]
try:
os.makedirs(folder)
except OSError as e:
if e.errno != errno.EEXIST:
raise
template_path = '{}{}template.txt'.format(folder, os.sep)
if os.path.isfile(template_path):
template = open(template_path, 'r').read()
else:
template = """# {title}
## Agenda
1.
2.
## Tasks
* [x] Task One
+ [ ] Task Two
## Remarks
| Task | Your Remark |
| ------ | ----- |
| Task One | I got it done without any issue |
| Task Two | Had some problem with foo bar |
---
[Cheatsheet](https://cheatsheet.md/notable.pdf) :smile:
"""
if os.environ.get('no_template'):
data = ""
else:
data = template.format(title=title)
with open("{}{}{}".format(folder, os.sep, file_name), 'w') as f:
f.write(data)
f.close()
|
def main():
"""
Visualize the outputs of the train dataset analyses.
"""
import matplotlib.pyplot as plt
import pandas as pd
# The original data set
df_0 = pd.DataFrame.from_csv('arr_dep.csv')
# The data set with the multiplication interaction feature.
df_times = pd.DataFrame.from_csv('arr_times_dep.csv')
# The data set with the subtraction interaction feature.
df_minus = pd.DataFrame.from_csv('arr_minus_dep.csv')
alpha = .04
# Show the relationships between the original features and
# the target.
fig = plt.figure(1)
plt.subplot(1,2,1)
plt.plot(df_0.index, df_0.iloc[:,1], color='red',
marker='.', linewidth=0, alpha=alpha)
plt.xlabel('feature 0')
plt.ylabel('regression target')
plt.subplot(1,2,2)
plt.plot(df_0.iloc[:,0], df_0.iloc[:,1], color='blue',
marker='.', linewidth=0, alpha=alpha)
plt.xlabel('feature 1')
fig.tight_layout()
fig.savefig('original.png')
# Show the relationships between the multiplicative feature and
# the target.
fig = plt.figure(2)
plt.subplot(1,2,1)
plt.plot(df_times.index, df_times.iloc[:,1], color='red',
marker='.', linewidth=0, alpha=alpha)
plt.xlabel('feature 0')
plt.ylabel('regression target')
plt.subplot(1,2,2)
plt.plot(df_times.iloc[:,2], df_times.iloc[:,1], color='orange',
marker='.', linewidth=0, alpha=alpha)
plt.xlabel('feature 0 x feature 1')
fig.tight_layout()
fig.savefig('multiply.png')
# Show the relationships between the subtractive feature and
# the target.
fig = plt.figure(3)
plt.subplot(1,2,1)
plt.plot(df_minus.index, df_minus.iloc[:,1], color='red',
marker='.', linewidth=0, alpha=alpha)
plt.xlabel('feature 0')
plt.ylabel('regression target')
alpha = .02
plt.subplot(1,2,2)
plt.plot(df_minus.iloc[:,2], df_minus.iloc[:,1], color='green',
marker='.', linewidth=0, alpha=alpha)
plt.xlabel('feature 1 - feature 0')
fig.tight_layout()
fig.savefig('subtraction.png')
plt.show()
if __name__ == '__main__':
main()
|
# Generated by Django 2.2.1 on 2019-05-25 05:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0011_auto_20190519_2047'),
]
operations = [
migrations.AddField(
model_name='post',
name='custom_css',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='post',
name='custom_js',
field=models.TextField(blank=True),
),
]
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the GCS Violations upload pipeline."""
import mock
import tempfile
import unittest
from datetime import datetime
from google.cloud.security.notifier.pipelines import gcs_violations_pipeline
from tests.unittest_utils import ForsetiTestCase
class GcsViolationsPipelineTest(ForsetiTestCase):
"""Tests for gcs_violations_pipeline."""
def setUp(self):
"""Setup."""
self.fake_utcnow = datetime(
year=1900, month=1, day=1, hour=0, minute=0, second=0,
microsecond=0)
fake_global_conf = {
'db_host': 'x',
'db_name': 'y',
'db_user': 'z',
}
fake_pipeline_conf = {
'gcs_path': 'gs://blah'
}
self.gvp = gcs_violations_pipeline.GcsViolationsPipeline(
'abcd',
'11111',
[],
fake_global_conf,
{},
fake_pipeline_conf)
@mock.patch(
'google.cloud.security.notifier.pipelines.gcs_violations_pipeline.datetime',
autospec=True)
def test_get_output_filename(self, mock_datetime):
"""Test _get_output_filename()."""
mock_datetime.utcnow = mock.MagicMock()
mock_datetime.utcnow.return_value = self.fake_utcnow
output_timestamp = mock_datetime.utcnow().strftime(
gcs_violations_pipeline.OUTPUT_TIMESTAMP_FMT)
actual_filename = self.gvp._get_output_filename()
self.assertEquals(
gcs_violations_pipeline.VIOLATIONS_JSON_FMT.format(
self.gvp.resource, self.gvp.cycle_timestamp, output_timestamp),
actual_filename)
@mock.patch(
'google.cloud.security.common.gcp_api.storage.StorageClient',
autospec=True)
@mock.patch('tempfile.NamedTemporaryFile')
def test_run(self, mock_tempfile, mock_storage):
"""Test run()."""
fake_tmpname = 'tmp_name'
fake_output_name = 'abc'
self.gvp._get_output_filename = mock.MagicMock(
return_value=fake_output_name)
gcs_path = '{}/{}'.format(
self.gvp.pipeline_config['gcs_path'],
fake_output_name)
mock_tmp_json = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = mock_tmp_json
mock_tmp_json.name = fake_tmpname
mock_tmp_json.write = mock.MagicMock()
self.gvp.run()
mock_tmp_json.write.assert_called()
mock_storage.return_value.put_text_file.assert_called_once_with(
fake_tmpname, gcs_path)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2021/1/7 16:33
__author__ = 'the king of north'
from django.shortcuts import render
from django.core.paginator import Paginator
def carlists(request):
clists = [{"Id": 1, "brand": "11", "name": "ll", "price": 1, "type": 1},
{"Id": 1, "brand": "11", "name": "ll", "price": 1, "type": 1},
{"Id": 1, "brand": "11", "name": "ll", "price": 1, "type": 1}]
return render(request, 'carlist.html', {'carlists': clists})
def get_article(request):
page = request.GET.get('page')
if page:
page = int(page)
else:
page = 1
print('PAGE 参数为:', page)
article_list = [{"article_id": 1, "article_title": "11", "name": "ll", "price": 1, "type": 1},
{"article_id": 1, "article_title": "11", "name": "ll", "price": 1, "type": 1},
{"article_id": 1, "article_title": "11", "name": "ll", "price": 1, "type": 1}]
# 实例化一个分页组件,第一个参数是需要被分页的列表,第二个参数是每一个的item个数,比如这边指定每页个数为2
paginator = Paginator(article_list, 2)
# page方法,传入一个参数,表示第几页的列表,这边传入的page,是你在地址中写的参数
page_article_list = paginator.page(page)
page_num = paginator.num_pages
# print('page_num:',page_num);
# 判断是否存在下一页
if page_article_list.has_next():
next_page = page + 1
else:
next_page = page
# 是否存在上一页
if page_article_list.has_previous():
previous_page = page - 1
else:
previous_page = page
return render(request, 'pagetable.html', {
# 根据前端,我们需要知道当前page的列表,需要知道点击【上一页】,跳转,点击下一页的跳转,需要知道被分了几页
'article_list': page_article_list,
'page_num': range(1, page_num + 1),
'curr_page': page,
'next_page': next_page,
'previous_page': previous_page
})
|
import speech_recognition
robot_ear=speech_recognition.Recognizer()
with speech_recognition.Microphone()as mic:
print("Robot: i'm listening ")
audio=robot_ear.listen(mic)
try:
you= robot_ear.recognize_google(audio)
except:
you== ""
print("you :"+you)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 09:57:02 2020
@author: hua'wei
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
import sklearn.preprocessing as prep
import tensorflow as tf
from Autoencoder1 import Autoencoder
import csv
import phate
from bikmeans import biKmeans
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn.metrics.cluster import adjusted_rand_score as ari
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi
def filte(data_name, args_name):
input_path = data_name +".csv"
X = pd.read_csv(input_path, header=None)
file_path = args_name +".csv"
a = X.shape[1]
exist = (X > 0) * 1.0
factor = np.ones(X.shape[1])
res = ((np.dot(exist, factor))/a)*100
test = np.column_stack((res,X))
with open(file_path, 'w', newline='') as fout:
reader = test
writer = csv.writer(fout, delimiter=',')
for i in reader:
if 5 < int(i[0]):
writer.writerow(i)
return
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
def autorunner(data_name, epochs, h1, h2, args_name):
tf.reset_default_graph()
input_path = data_name +".csv"
X = pd.read_csv(input_path, header=None)
X = X.drop(0, axis=1)
X = np.array(X)
X = X.transpose()
batch_size = X.shape[0]-1
num = X.shape[1]
file_path = args_name +".csv"
n_samples,_ = np.shape(X)
training_epochs = epochs
display_step = 1
autoencoder = Autoencoder(
n_input = num,
n_hidden1 = h1,
n_hidden2 = h2,
n_hidden3 = h1,
transfer_function=tf.nn.softplus,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001))
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X, batch_size)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
print("Epoch:", '%d,' % (epoch + 1),
"Cost:", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(X)))
X_test_transform=autoencoder.transform(X)
X_test_reconstruct=autoencoder.reconstruct(X)
with open(file_path, 'w', newline='') as fout:
writer = csv.writer(fout, delimiter=',')
for i in X_test_reconstruct:
writer.writerow(i)
return X_test_reconstruct
def clust(data_path, label_path, pca_com, phate_com):
input_path = data_path +".csv"
label_path = label_path +".csv"
X = pd.read_csv(input_path, header=None)
X = X.drop(0)
X = np.array(X)
X = X.transpose()
pca = PCA(n_components=pca_com)
b = pca.fit_transform(X)
phate_op = phate.PHATE(n_components=phate_com)
data_phate = phate_op.fit_transform(b)
label = pd.read_csv(label_path)
y=np.array(label)
label = y.ravel()
c = label.max()
centList,clusterAssment = biKmeans(data_phate,c)
julei = clusterAssment[:,0]
y=np.array(julei)
julei = y.ravel()
print('NMI value is %f \n' % nmi(julei.flatten(),label.flatten()))
print('ARI value is %f \n' % ari(julei.flatten(),label.flatten()))
print('HOM value is %f \n' % metrics.homogeneity_score(julei,label))
print('AMI value is %f \n' % metrics.adjusted_mutual_info_score(label, julei))
return julei
|
import pygame
import random
pygame.init()
display_width = 800
display_height = 600
yellow_color = (255, 253, 208)
black_color_1 = (100, 100, 100)
black_color_2 = (0, 0, 0)
red_color = (255, 0, 0)
green_color = (0, 255, 0)
display = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption("Змейка")
clock = pygame.time.Clock()
block_size = 20
font = pygame.font.SysFont("monaco", 40)
def game():
game_over = False
game_close = False
game_begin = False
x = display_width / 2
y = display_height / 2
delta_x = 0
delta_y = 0
last_key = ''
fps = 10
snake_list = []
snake_length = 1
x_food = round(random.randrange(0, display_width - block_size) / float(block_size)) * float(block_size)
y_food = round(random.randrange(0, display_height - block_size) / float(block_size)) * float(block_size)
while not game_over:
while not game_begin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_begin = True
game_over = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
fps = 10
game_begin = True
if event.key == pygame.K_2:
fps = 20
game_begin = True
if event.key == pygame.K_3:
fps = 30
game_begin = True
display.fill(yellow_color)
display.blit(font.render("Выберите уровень сложности:",
True, red_color), [display_width / 3.5, display_height / 2.7])
display.blit(font.render("1 - легкий",
True, red_color), [display_width / 2.5, display_height / 2.3])
display.blit(font.render("2 - средний",
True, red_color), [display_width / 2.5, display_height / 2.05])
display.blit(font.render("3 - сложный",
True, red_color), [display_width / 2.5, display_height / 1.82])
pygame.display.update()
while game_close == True:
display.fill(yellow_color)
display.blit(font.render("Вы проиграли!",
True, red_color), [display_width / 2.5, display_height / 2.3])
display.blit(font.render("(нажмите R для рестарта, esc - для выхода)",
True, red_color), [display_width / 6, display_height / 2.05])
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
game_close = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
game_over = True
game_close = False
if event.key == pygame.K_r:
game()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT and last_key != pygame.K_RIGHT and last_key != pygame.K_LEFT:
delta_x = -block_size
delta_y = 0
elif event.key == pygame.K_RIGHT and last_key != pygame.K_LEFT and last_key != pygame.K_RIGHT:
delta_x = block_size
delta_y = 0
elif event.key == pygame.K_UP and last_key != pygame.K_DOWN and last_key != pygame.K_UP:
delta_y = -block_size
delta_x = 0
elif event.key == pygame.K_DOWN and last_key != pygame.K_UP and last_key != pygame.K_DOWN:
delta_y = block_size
delta_x = 0
last_key = event.key
if x >= display_width or x < 0 or y >= display_height or y < 0:
game_close = True
x += delta_x
y += delta_y
display.fill(yellow_color)
pygame.draw.rect(display, green_color, [x_food, y_food, block_size, block_size])
snake_Head = []
snake_Head.append(x)
snake_Head.append(y)
snake_list.append(snake_Head)
if len(snake_list) > snake_length:
del snake_list[0]
for el in snake_list[:-1]:
if el == snake_Head:
game_close = True
for el in snake_list[0:len(snake_list)]:
pygame.draw.rect(display, black_color_1, [el[0], el[1], block_size, block_size])
display.blit(font.render("Счет: " + str(snake_length - 1), True, red_color), [display_width - 150, 20])
pygame.draw.rect(display, black_color_2, [snake_list[-1][0], snake_list[-1][1], block_size, block_size])
pygame.display.update()
if x == x_food and y == y_food:
x_food = round(random.randrange(0, display_width - block_size) / float(block_size)) * float(block_size)
y_food = round(random.randrange(0, display_height - block_size) / float(block_size)) * float(block_size)
snake_length += 1
clock.tick(fps)
pygame.quit()
quit()
game()
|
import datetime as dt
birthday = dt.datetime(2001, 1, 23)
time_alive = dt.datetime.today() - birthday
print(time_alive.days)
|
#!/usr/bin/env python
from setuptools import setup
setup(name='igtdetect',
version='1.1.1',
description='Line-level classifier for IGT instances, part of RiPLEs pipeline.',
author='Ryan Georgi',
author_email='rgeorgi@uw.edu',
url='https://github.com/xigt/igtdetect',
scripts=['detect-igt'],
packages=['igtdetect'],
install_requires = [
'wheel',
'setuptools>=53',
'scikit-learn>=0.18.1',
'numpy',
'freki@https://github.com/xigt/freki/archive/v0.3.0.tar.gz',
'riples-classifier@https://github.com/xigt/riples-classifier/archive/0.1.0.tar.gz',
]
)
|
from math import *
from decimal import *
def methoda(a):
n = 0
getcontext().prec = 1000
total = Decimal(0)
for i in range(1):
for a in range(a):
total = Decimal(total) + Decimal(1/Decimal(factorial(n)))
n = n + 1
print(Decimal(total))
a = 5000
methoda(a)
|
from typing import Dict
from graph_db.engine.types import *
from .graph_storage import NodeStorage, RelationshipStorage, PropertyStorage, LabelStorage, DynamicStorage
from .record import Record
import rpyc
from rpyc.utils.server import ThreadedServer
class WorkerService(rpyc.SlaveService):
class exposed_Worker(object):
"""
Worker Machine File System manager.
Manages distribution of a portion of database across several stores.
"""
stores = dict()
stats = dict()
def __init__(self):
self.update_stats()
def exposed_flush(self):
for storage in self.stores:
self.stores[storage].close()
def update_stats(self) -> Dict[str, int]:
"""
Updates total number of records in each connected storage.
:return: dictionary with stats
"""
self.stats = dict()
for storage_type in self.stores:
self.stats[storage_type] = self.stores[storage_type].count_records()
return self.stats
def exposed_get_stats(self) -> Dict[str, int]:
"""
Returns total number of records in each connected storage.
:return: dictionary with stats
"""
return self.stats
def exposed_write_record(self, record: Record, storage_type: str, update: bool = False):
"""
Writes record data to specified storage.
:param record: record object
:param storage_type: storage type
:param update: is it an update of previous record or not
"""
storage = self.stores[storage_type]
if not update:
new_record = storage.allocate_record()
record.set_index(new_record.idx)
storage.write_record(record)
# if ok:
if record.idx == self.stats[storage_type]:
self.stats[storage_type] += 1
def exposed_read_record(self, record_id: int, storage_type: str):
"""
Reads record with `record_id` from specified storage.
:param record_id: record id
:param storage_type storage type
"""
storage = self.stores[storage_type]
try:
record = storage.read_record(record_id)
except AssertionError:
record = None
return record
def start_worker_service(server_port, path, base_config):
worker = WorkerService.exposed_Worker
if base_config['NodeStorage']:
worker.stores['NodeStorage'] = NodeStorage(path=path + NODE_STORAGE)
if base_config['RelationshipStorage']:
worker.stores['RelationshipStorage'] = RelationshipStorage(path=path + RELATIONSHIP_STORAGE)
if base_config['LabelStorage']:
worker.stores['LabelStorage'] = LabelStorage(path=path + LABEL_STORAGE)
if base_config['PropertyStorage']:
worker.stores['PropertyStorage'] = PropertyStorage(path=path + PROPERTY_STORAGE)
if base_config['DynamicStorage']:
worker.stores['DynamicStorage'] = DynamicStorage(path=path + DYNAMIC_STORAGE)
t = ThreadedServer(WorkerService, port=server_port)
t.start()
|
# Generated by Django 3.0.3 on 2021-03-21 22:39
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('api', '0006_auto_20210321_2239'),
]
operations = [
migrations.AlterField(
model_name='resource',
name='recurrenceDays',
field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('SUN', 'Sunday'), ('MON', 'Monday'), ('TUE', 'Tuesday'), ('WED', 'Wednesday'), ('THU', 'Thursday'), ('FRI', 'Friday'), ('SAT', 'Saturday')], default=[], max_length=27, null=True),
),
]
|
# coding: utf-8
# Standard Python libraries
from io import IOBase
from pathlib import Path
from typing import Optional, Union
# https://github.com/usnistgov/atomman
import atomman.unitconvert as uc
# https://github.com/usnistgov/DataModelDict
from DataModelDict import DataModelDict as DM
# iprPy imports
from .. import Calculation
from .stacking_fault_static import stackingfault
from ...calculation_subset import (LammpsPotential, LammpsCommands, Units,
AtommanSystemLoad, LammpsMinimize,
StackingFault)
class StackingFaultStatic(Calculation):
"""Class for managing stacking fault energy calculations"""
############################# Core properties #################################
def __init__(self,
model: Union[str, Path, IOBase, DM, None]=None,
name: Optional[str]=None,
database = None,
params: Union[str, Path, IOBase, dict] = None,
**kwargs: any):
"""
Initializes a Calculation object for a given style.
Parameters
----------
model : str, file-like object or DataModelDict, optional
Record content in data model format to read in. Cannot be given
with params.
name : str, optional
The name to use for saving the record. By default, this should be
the calculation's key.
database : yabadaba.Database, optional
A default Database to associate with the Record, typically the
Database that the Record was obtained from. Can allow for Record
methods to perform Database operations without needing to specify
which Database to use.
params : str, file-like object or dict, optional
Calculation input parameters or input parameter file. Cannot be
given with model.
**kwargs : any
Any other core Calculation record attributes to set. Cannot be
given with model.
"""
# Initialize subsets used by the calculation
self.__potential = LammpsPotential(self)
self.__commands = LammpsCommands(self)
self.__units = Units(self)
self.__system = AtommanSystemLoad(self)
self.__minimize = LammpsMinimize(self)
self.__defect = StackingFault(self)
subsets = (self.commands, self.potential, self.system,
self.minimize, self.defect, self.units)
# Initialize unique calculation attributes
self.a1 = 0.0
self.a2 = 0.0
self.__dumpfile_base = None
self.__dumpfile_defect = None
self.__potential_energy_base = None
self.__potential_energy_defect = None
self.__gsf_energy = None
self.__gsf_displacement = None
# Define calc shortcut
self.calc = stackingfault
# Call parent constructor
super().__init__(model=model, name=name, database=database, params=params,
subsets=subsets, **kwargs)
@property
def filenames(self) -> list:
"""list: the names of each file used by the calculation."""
return [
'stacking_fault_static.py',
'sfmin.template'
]
############################## Class attributes ###############################
@property
def commands(self) -> LammpsCommands:
"""LammpsCommands subset"""
return self.__commands
@property
def potential(self) -> LammpsPotential:
"""LammpsPotential subset"""
return self.__potential
@property
def units(self) -> Units:
"""Units subset"""
return self.__units
@property
def system(self) -> AtommanSystemLoad:
"""AtommanSystemLoad subset"""
return self.__system
@property
def minimize(self) -> LammpsMinimize:
"""LammpsMinimize subset"""
return self.__minimize
@property
def defect(self) -> StackingFault:
"""StackingFault subset"""
return self.__defect
@property
def a1(self) -> float:
"""float: Fractional shift along the a1vect direction to apply"""
return self.__a1
@a1.setter
def a1(self, val: float):
self.__a1 = float(val)
@property
def a2(self)-> float:
"""float: Fractional shift along the a2vect direction to apply"""
return self.__a2
@a2.setter
def a2(self, val: float):
self.__a2 = float(val)
@property
def dumpfile_base(self) -> str:
"""str: Name of the LAMMPS dump file of the 0 shift reference system"""
if self.__dumpfile_base is None:
raise ValueError('No results yet!')
return self.__dumpfile_base
@property
def dumpfile_defect(self) -> str:
"""str: Name of the LAMMPS dump file of the defect system"""
if self.__dumpfile_defect is None:
raise ValueError('No results yet!')
return self.__dumpfile_defect
@property
def potential_energy_base(self)-> float:
"""float: Potential energy of the 0 shift reference system"""
if self.__potential_energy_base is None:
raise ValueError('No results yet!')
return self.__potential_energy_base
@property
def potential_energy_defect(self)-> float:
"""float: Potential energy of the defect system"""
if self.__potential_energy_defect is None:
raise ValueError('No results yet!')
return self.__potential_energy_defect
@property
def gsf_displacement(self)-> float:
"""float: Difference in planar displacement between reference and defect systems"""
if self.__gsf_displacement is None:
raise ValueError('No results yet!')
return self.__gsf_displacement
@property
def gsf_energy(self)-> float:
"""float: Generalized stacking fault energy associated with the defect system"""
if self.__gsf_energy is None:
raise ValueError('No results yet!')
return self.__gsf_energy
def set_values(self,
name: Optional[str] = None,
**kwargs: any):
"""
Set calculation values directly. Any terms not given will be set
or reset to the calculation's default values.
Parameters
----------
name : str, optional
The name to assign to the calculation. By default, this is set as
the calculation's key.
a1 : float, optional
The fractional shift to make along the a1 shift vector.
a2 : float, optional
The fractional shift to make along the a2 shift vector.
**kwargs : any, optional
Any keyword parameters supported by the set_values() methods of
the parent Calculation class and the subset classes.
"""
# Call super to set universal and subset content
super().set_values(name=name, **kwargs)
# Set calculation-specific values
if 'a1' in kwargs:
self.a1 = kwargs['a1']
if 'a2' in kwargs:
self.a2 = kwargs['a2']
####################### Parameter file interactions ###########################
def load_parameters(self,
params: Union[dict, str, IOBase],
key: Optional[str] = None):
"""
Reads in and sets calculation parameters.
Parameters
----------
params : dict, str or file-like object
The parameters or parameter file to read in.
key : str, optional
A new key value to assign to the object. If not given, will use
calc_key field in params if it exists, or leave the key value
unchanged.
"""
# Load universal content
input_dict = super().load_parameters(params, key=key)
# Load input/output units
self.units.load_parameters(input_dict)
# Change default values for subset terms
input_dict['sizemults'] = input_dict.get('sizemults', '3 3 3')
input_dict['forcetolerance'] = input_dict.get('forcetolerance',
'1.0e-6 eV/angstrom')
# Load calculation-specific strings
# Load calculation-specific booleans
# Load calculation-specific integers
# Load calculation-specific unitless floats
self.a1 = float(input_dict.get('stackingfault_a1', 0.0))
self.a2 = float(input_dict.get('stackingfault_a2', 0.0))
# Load calculation-specific floats with units
# Load LAMMPS commands
self.commands.load_parameters(input_dict)
# Load minimization parameters
self.minimize.load_parameters(input_dict)
# Load LAMMPS potential
self.potential.load_parameters(input_dict)
# Load initial system
self.system.load_parameters(input_dict)
# Load defect parameters
self.defect.load_parameters(input_dict)
def master_prepare_inputs(self,
branch: str = 'main',
**kwargs: any) -> dict:
"""
Utility method that build input parameters for prepare according to the
workflows used by the NIST Interatomic Potentials Repository. In other
words, transforms inputs from master_prepare into inputs for prepare.
Parameters
----------
branch : str, optional
Indicates the workflow branch to prepare calculations for. Default
value is 'main'.
**kwargs : any
Any parameter modifications to make to the standard workflow
prepare scripts.
Returns
-------
params : dict
The full set of prepare parameters based on the workflow branch
"""
raise NotImplementedError('Not implemented for this calculation style')
@property
def templatekeys(self) -> dict:
"""dict : The calculation-specific input keys and their descriptions."""
return {
'stackingfault_a1': ' '.join([
"The fractional shift to apply along the a1 direction."]),
'stackingfault_a2': ' '.join([
"The fractional shift to apply along the a2 direction."]),
}
@property
def singularkeys(self) -> list:
"""list: Calculation keys that can have single values during prepare."""
keys = (
# Universal keys
super().singularkeys
# Subset keys
+ self.commands.keyset
+ self.units.keyset
# Calculation-specific keys
)
return keys
@property
def multikeys(self) -> list:
"""list: Calculation key sets that can have multiple values during prepare."""
keys = (
# Universal multikeys
super().multikeys +
# Combination of potential and system keys
[
self.potential.keyset +
self.system.keyset
] +
# Defect multikeys
self.defect.multikeys +
# Run parameter keys
[
[
'stackingfault_a1',
'stackingfault_a2',
]
] +
# Minimize keys
[
self.minimize.keyset
]
)
return keys
########################### Data model interactions ###########################
@property
def modelroot(self) -> str:
"""str: The root element of the content"""
return 'calculation-stacking-fault-static'
def build_model(self) -> DM:
"""
Generates and returns model content based on the values set to object.
"""
# Build universal content
model = super().build_model()
calc = model[self.modelroot]
# Build subset content
self.commands.build_model(calc, after='atomman-version')
self.potential.build_model(calc, after='calculation')
self.system.build_model(calc, after='potential-LAMMPS')
self.defect.build_model(calc, after='system-info')
self.minimize.build_model(calc)
# Build calculation-specific content
if 'calculation' not in calc:
calc['calculation'] = DM()
if 'run-parameter' not in calc['calculation']:
calc['calculation']['run-parameter'] = DM()
run_params = calc['calculation']['run-parameter']
run_params['stackingfault_a1'] = self.a1
run_params['stackingfault_a2'] = self.a2
# Build results
if self.status == 'finished':
calc['defect-free-system'] = DM()
calc['defect-free-system']['artifact'] = DM()
calc['defect-free-system']['artifact']['file'] = self.dumpfile_base
calc['defect-free-system']['artifact']['format'] = 'atom_dump'
calc['defect-free-system']['symbols'] = self.system.ucell.symbols
calc['defect-free-system']['potential-energy'] = uc.model(self.potential_energy_base,
self.units.energy_unit)
calc['defect-system'] = DM()
calc['defect-system']['artifact'] = DM()
calc['defect-system']['artifact']['file'] = self.dumpfile_defect
calc['defect-system']['artifact']['format'] = 'atom_dump'
calc['defect-system']['symbols'] = self.system.ucell.symbols
calc['defect-system']['potential-energy'] = uc.model(self.potential_energy_defect,
self.units.energy_unit)
# Save the stacking fault energy
energy_per_area_unit = f'{self.units.energy_unit}/{self.units.length_unit}^2'
calc['stacking-fault-energy'] = uc.model(self.gsf_energy,
energy_per_area_unit)
# Save the plane separation
calc['plane-separation'] = uc.model(self.gsf_displacement,
self.units.length_unit)
self._set_model(model)
return model
def load_model(self,
model: Union[str, DM],
name: Optional[str] = None):
"""
Loads record contents from a given model.
Parameters
----------
model : str or DataModelDict
The model contents of the record to load.
name : str, optional
The name to assign to the record. Often inferred from other
attributes if not given.
"""
# Load universal and subset content
super().load_model(model, name=name)
calc = self.model[self.modelroot]
# Load calculation-specific content
run_params = calc['calculation']['run-parameter']
self.a1 = run_params['stackingfault_a1']
self.a2 = run_params['stackingfault_a2']
# Load results
if self.status == 'finished':
self.__dumpfile_base = calc['defect-free-system']['artifact']['file']
self.__potential_energy_base = uc.value_unit(calc['defect-free-system']['potential-energy'])
self.__dumpfile_defect= calc['defect-system']['artifact']['file']
self.__potential_energy_defect = uc.value_unit(calc['defect-system']['potential-energy'])
self.__gsf_energy = uc.value_unit(calc['stacking-fault-energy'])
self.__gsf_displacement = uc.value_unit(calc['plane-separation'])
########################## Metadata interactions ##############################
def metadata(self) -> dict:
"""
Generates a dict of simple metadata values associated with the record.
Useful for quickly comparing records and for building pandas.DataFrames
for multiple records of the same style.
"""
# Call super to extract universal and subset content
meta = super().metadata()
# Extract calculation-specific content
# Extract results
if self.status == 'finished':
meta['dumpfile_base'] = self.dumpfile_base
meta['dumpfile_defect'] = self.dumpfile_defect
meta['E_pot_base'] = self.potential_energy_base
meta['E_pot_defect'] = self.potential_energy_defect
meta['E_gsf'] = self.gsf_energy
meta['delta_gsf'] = self.gsf_displacement
return meta
@property
def compare_terms(self) -> list:
"""list: The terms to compare metadata values absolutely."""
return [
'script',
'load_file',
'load_options',
'symbols',
'potential_LAMMPS_key',
'potential_key',
'a_mult',
'b_mult',
'c_mult',
'stackingfault_key',
]
@property
def compare_fterms(self) -> dict:
"""dict: The terms to compare metadata values using a tolerance."""
return {
'a1':1e-5,
'a2':1e-5,
}
def isvalid(self) -> bool:
return self.system.family == self.defect.family
########################### Calculation interactions ##########################
def calc_inputs(self) -> dict:
"""Builds calculation inputs from the class's attributes"""
# Initialize input_dict
input_dict = {}
# Add subset inputs
for subset in self.subsets:
subset.calc_inputs(input_dict)
# Add calculation-specific inputs
input_dict['a1'] = self.a1
input_dict['a2'] = self.a2
# Return input_dict
return input_dict
def process_results(self, results_dict: dict):
"""
Processes calculation results and saves them to the object's results
attributes.
Parameters
----------
results_dict: dict
The dictionary returned by the calc() method.
"""
self.__dumpfile_base = results_dict['dumpfile_0']
self.__dumpfile_defect = results_dict['dumpfile_sf']
self.__potential_energy_base = results_dict['E_total_0']
self.__potential_energy_defect = results_dict['E_total_sf']
self.__gsf_energy = results_dict['E_gsf']
self.__gsf_displacement = results_dict['delta_disp']
|
import numpy as np
import pdb
from gym import utils
from . import mujoco_env
from . import geom_utils
class BaseAntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
# Initialize Mujoco environment
def __init__(self, xml_file='my_ant.xml'):
mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
utils.EzPickle.__init__(self)
# Forward step
def step(self, a):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_reward = (xposafter - xposbefore)/self.dt
ctrl_cost = .5 * np.square(a).sum()
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 1.0
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self._get_obs()
return ob, reward, done, dict(
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward)
# State - 29 dimensional
# This is possibly the only documentation that exists for mujoco ant. You're welcome
# s[0:3]: (x, y, z)
# s[3:7]: quaternion of the rotation of the ant from perspective of x+ being the front of the ant
# s[7:9], s[9:11], s[11:13], s[13:15]: joint angles for front left leg, back left leg, back right leg and front right leg respectively
# First variable is side to side angle (hip), second is up down angle (knee). In radians, but circles around
# s[15:18]: (d_x, d_y, d_z)
# s[18:21]: (d_roll, d_pitch, d_yaw)
# s[21:28]: angle velocities - same order as before
def get_state_by_name(self, name, s=None):
# Get state (if not passed in)
if s is None:
s = self.state_vector()
# Switch on name
if name == 'xyz':
val = s[0:3]
elif name == 'x':
val = s[0]
elif name == 'y':
val = s[1]
elif name == 'z':
val = s[2]
elif name == 'quart':
val = s[3:7]
elif name in ['rpy', 'roll', 'pitch', 'yaw']:
quart = s[3:7]
roll, pitch, yaw = geom_utils.quaternion_to_euler_angle(quart)
if name == 'roll':
val = roll
elif name == 'pitch':
val = pitch
elif name == 'yaw':
val = yaw
elif name == 'rpy':
return np.array([roll, pitch, yaw])
elif name == 'joint_angles':
val = s[7:15]
elif name == 'front_left_joints':
val = s[7:9]
elif name == 'front_left_hip':
val = s[7]
elif name == 'front_left_knee':
val = s[8]
elif name == 'back_left_joints':
val = s[9:11]
elif name == 'back_left_hip':
val = s[9]
elif name == 'back_left_knee':
val = s[10]
elif name == 'back_right_joints':
val = s[11:13]
elif name == 'back_right_hip':
val = s[11]
elif name == 'back_right_knee':
val = s[12]
elif name == 'front_right_joints':
val = s[13:15]
elif name == 'front_right_hip':
val = s[13]
elif name == 'front_right_knee':
val = s[14]
elif name == 'xyz_vel':
val = s[15:18]
elif name == 'x_vel':
val = s[15]
elif name == 'y_vel':
val = s[16]
elif name == 'z_vel':
val = s[17]
elif name == 'rpy_vel':
val = s[18:21]
elif name == 'roll_vel':
val = s[18]
elif name == 'pitch_vel':
val = s[19]
elif name == 'yaw_vel':
val = s[20]
elif name == 'joint_angle_vel':
val = s[21:]
elif name == 'front_left_joint_vel':
val = s[21:23]
elif name == 'front_left_hip_vel':
val = s[21]
elif name == 'front_left_knee_vel':
val = s[22]
elif name == 'back_left_joint_vel':
val = s[23:25]
elif name == 'back_left_hip_vel':
val = s[23]
elif name == 'back_left_knee_vel':
val = s[24]
elif name == 'back_right_joint_vel':
val = s[25:27]
elif name == 'back_right_hip_vel':
val = s[25]
elif name == 'back_right_knee_vel':
val = s[26]
elif name == 'front_right_joint_vel':
val = s[27:29]
elif name == 'front_right_hip_vel':
val = s[27]
elif name == 'front_right_knee_vel':
val = s[28]
else:
raise Error("Not a recognized state")
return val
# We consider only roll, pitch and joint angles, and their velocities as "internal" state
# We consider the rest as "external"
# We convert away from quaternions to do this
def get_intern_extern_state(self):
# Extract different states
s = self.state_vector()
xyz = self.get_state_by_name('xyz', s)
rpy = self.get_state_by_name('rpy', s)
joint_angles = self.get_state_by_name('joint_angles', s)
d_xyz = self.get_state_by_name('xyz_vel', s)
d_rpy = self.get_state_by_name('rpy_vel', s)
d_joint = self.get_state_by_name('joint_angle_vel', s)
# Seperate out yaw
roll = rpy[0]
pitch = rpy[1]
yaw = rpy[2]
d_roll = d_rpy[0]
d_pitch = d_rpy[1]
d_yaw = d_rpy[2]
# Set internal/external states
s_internal = np.concatenate([[roll, pitch], joint_angles, [d_roll, d_pitch], d_joint])
s_external = np.concatenate([xyz, [yaw], d_xyz, [d_yaw]])
assert(s_internal.shape[0] == 20)
assert(s_external.shape[0] == 8)
return s_internal, s_external
def _get_obs(self):
raise NotImplementedError
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
class BaseAntLowGearEnv(mujoco_env.MujocoEnv, utils.EzPickle):
# Initialize Mujoco environment
def __init__(self, xml_file='my_ant.xml'):
mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
utils.EzPickle.__init__(self)
# Forward step
def step(self, a):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_reward = (xposafter - xposbefore)/self.dt
ctrl_cost = .5 * np.square(a/5).sum()
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 1.0
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self._get_obs()
return ob, reward, done, dict(
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward)
# State - 29 dimensional
# This is possibly the only documentation that exists for mujoco ant. You're welcome
# s[0:3]: (x, y, z)
# s[3:7]: quaternion of the rotation of the ant from perspective of x+ being the front of the ant
# s[7:9], s[9:11], s[11:13], s[13:15]: joint angles for front left leg, back left leg, back right leg and front right leg respectively
# First variable is side to side angle (hip), second is up down angle (knee). In radians, but circles around
# s[15:18]: (d_x, d_y, d_z)
# s[18:21]: (d_roll, d_pitch, d_yaw)
# s[21:28]: angle velocities - same order as before
def get_state_by_name(self, name, s=None):
# Get state (if not passed in)
if s is None:
s = self.state_vector()
# Switch on name
if name == 'xyz':
val = s[0:3]
elif name == 'x':
val = s[0]
elif name == 'y':
val = s[1]
elif name == 'z':
val = s[2]
elif name == 'quart':
val = s[3:7]
elif name in ['rpy', 'roll', 'pitch', 'yaw']:
quart = s[3:7]
roll, pitch, yaw = geom_utils.quaternion_to_euler_angle(quart)
if name == 'roll':
val = roll
elif name == 'pitch':
val = pitch
elif name == 'yaw':
val = yaw
elif name == 'rpy':
return np.array([roll, pitch, yaw])
elif name == 'joint_angles':
val = s[7:15]
elif name == 'front_left_joints':
val = s[7:9]
elif name == 'front_left_hip':
val = s[7]
elif name == 'front_left_knee':
val = s[8]
elif name == 'back_left_joints':
val = s[9:11]
elif name == 'back_left_hip':
val = s[9]
elif name == 'back_left_knee':
val = s[10]
elif name == 'back_right_joints':
val = s[11:13]
elif name == 'back_right_hip':
val = s[11]
elif name == 'back_right_knee':
val = s[12]
elif name == 'front_right_joints':
val = s[13:15]
elif name == 'front_right_hip':
val = s[13]
elif name == 'front_right_knee':
val = s[14]
elif name == 'xyz_vel':
val = s[15:18]
elif name == 'x_vel':
val = s[15]
elif name == 'y_vel':
val = s[16]
elif name == 'z_vel':
val = s[17]
elif name == 'rpy_vel':
val = s[18:21]
elif name == 'roll_vel':
val = s[18]
elif name == 'pitch_vel':
val = s[19]
elif name == 'yaw_vel':
val = s[20]
elif name == 'joint_angle_vel':
val = s[21:]
elif name == 'front_left_joint_vel':
val = s[21:23]
elif name == 'front_left_hip_vel':
val = s[21]
elif name == 'front_left_knee_vel':
val = s[22]
elif name == 'back_left_joint_vel':
val = s[23:25]
elif name == 'back_left_hip_vel':
val = s[23]
elif name == 'back_left_knee_vel':
val = s[24]
elif name == 'back_right_joint_vel':
val = s[25:27]
elif name == 'back_right_hip_vel':
val = s[25]
elif name == 'back_right_knee_vel':
val = s[26]
elif name == 'front_right_joint_vel':
val = s[27:29]
elif name == 'front_right_hip_vel':
val = s[27]
elif name == 'front_right_knee_vel':
val = s[28]
else:
raise Error("Not a recognized state")
return val
# We consider only roll, pitch and joint angles, and their velocities as "internal" state
# We consider the rest as "external"
# We convert away from quaternions to do this
def get_intern_extern_state(self):
# Extract different states
s = self.state_vector()
xyz = self.get_state_by_name('xyz', s)
rpy = self.get_state_by_name('rpy', s)
joint_angles = self.get_state_by_name('joint_angles', s)
d_xyz = self.get_state_by_name('xyz_vel', s)
d_rpy = self.get_state_by_name('rpy_vel', s)
d_joint = self.get_state_by_name('joint_angle_vel', s)
# Seperate out yaw
roll = rpy[0]
pitch = rpy[1]
yaw = rpy[2]
d_roll = d_rpy[0]
d_pitch = d_rpy[1]
d_yaw = d_rpy[2]
# Set internal/external states
s_internal = np.concatenate([[roll, pitch], joint_angles, [d_roll, d_pitch], d_joint])
s_external = np.concatenate([xyz, [yaw], d_xyz, [d_yaw]])
assert(s_internal.shape[0] == 20)
assert(s_external.shape[0] == 8)
return s_internal, s_external
def _get_obs(self):
raise NotImplementedError
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
|
for i in range(1, 11):
with open('resources/asap_prompt_'+str(i)+'.txt', 'r', encoding='utf-8') as file:
max_length = 0
sum_length = 0
count_line = 0
min_length = 1024
for line in file:
token_list = line.split(" ")
if len(token_list) > max_length:
max_length = len(token_list)
sum_length += len(token_list)
count_line += 1
if len(token_list) < min_length:
min_length = len(token_list)
sum_length += len(token_list)
count_line += 1
print('Min sentence length in this prompt' + str(i) + ' is ' + str(min_length))
print('Max sentence length in this prompt' + str(i) + ' is ' + str(max_length))
print('Average sentence length in prompt ' + str(i) + ' is ' + str(sum_length/count_line))
print()
file.close()
|
from queue import LifoQueue
class StackUnderflowError(Exception):
def __init__(self, message):
super().__init__(message)
def check_size(stack, size):
if stack.qsize() < size:
raise StackUnderflowError("Stack does not contain enough values for required pop")
def dup(stack):
check_size(stack, 1)
v = stack.get()
stack.put(v); stack.put(v)
return stack
def drop(stack):
check_size(stack, 1)
stack.get()
return stack
def swap(stack):
check_size(stack, 2)
a = stack.get()
b = stack.get()
stack.put(a)
stack.put(b)
return stack
def over(stack):
check_size(stack, 2)
a = stack.get()
b = stack.get()
stack.put(b)
stack.put(a)
stack.put(b)
return stack
def add(stack):
check_size(stack, 2)
a = stack.get()
b = stack.get()
stack.put(a + b)
return stack
def subtract(stack):
check_size(stack, 2)
a = stack.get()
b = stack.get()
stack.put(b - a)
return stack
def multiply(stack):
check_size(stack, 2)
a = stack.get()
b = stack.get()
stack.put(a * b)
return stack
def divide(stack):
check_size(stack, 2)
a = stack.get()
b = stack.get()
stack.put(b // a)
return stack
ops = {
'+': add, '-': subtract, '*': multiply, '/': divide,
'dup': dup, 'drop': drop, 'swap': swap, 'over': over
}
def stack_to_list(stack):
elements = []
while stack.qsize():
elements.append(stack.get())
return elements[::-1]
def execute(item, stack):
if item.isnumeric():
stack.put(int(item))
elif item in ops:
stack = ops[item](stack)
else:
raise ValueError("Unexpected item: {}".format(item))
return stack
def evaluate(input_data):
stack = LifoQueue()
custom_ops = {}
for line in input_data:
line = line.lower().split(' ')
if line[0] == ':' and line[-1] == ';': # define word
if not line[1].isnumeric():
custom_ops[line[1]] = line[2:-1]
else:
raise ValueError("Got fully numerical word-name")
else:
for item in line:
if item in custom_ops: # replace with custom op expression:
for op in custom_ops[item]:
stack = execute(op, stack)
else:
stack = execute(item, stack)
return stack_to_list(stack)
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class LibtiffToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('libtiff')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['libtiff'].version
values['PFX'] = spec['libtiff'].prefix
fname = 'libtiff.xml'
contents = str("""<tool name="libtiff" version="$VER">
<info url="http://www.libtiff.org/"/>
<lib name="tiff"/>
<client>
<environment name="LIBTIFF_BASE" default="$PFX"/>
<environment name="LIBDIR" default="$$LIBTIFF_BASE/lib"/>
<environment name="INCLUDE" default="$$LIBTIFF_BASE/include"/>
</client>
<runtime name="ROOT_INCLUDE_PATH" value="$$INCLUDE" type="path"/>
<use name="root_cxxdefaults"/>
<use name="libjpeg-turbo"/>
<use name="zlib"/>
</tool>""")
write_scram_toolfile(contents, values, fname, prefix)
|
import boto3
import json
def get_aws_mesh_info():
"""
A function that gets the mesh information
"""
conn = boto3.client('ec2')
regions = [region['RegionName'] for region in conn.describe_regions()['Regions']]
appmeshes = []
route_info = []
for region in regions:
if region == 'ap-east-1' or region == 'eu-west-3' or region == 'eu-north-1' or region == 'sa-east-1':
continue
conn = boto3.client('appmesh', region_name=region)
# list meshes to get mesh names
response = conn.list_meshes()['meshes']
appmesh_names = []
for res in response:
appmesh_names.append(res['meshName'])
# describe each mesh
for name in appmesh_names:
response = conn.describe_mesh(
meshName=name
)
req_info = []
req_info.append(response)
# append each mesh info as seperate list
appmeshes.append(req_info)
# convert the mesh list to json
mesh_dict = {"Mesh": appmeshes}
mesh_json = json.dumps(mesh_dict, indent=4, default=str)
print(mesh_json)
get_aws_mesh_info()
|
import pandas as pd
from PIL import Image
import numpy as np
import csv
def main():
csv_data = pd.read_csv("./challenge_test/test.csv").values
image_array = csv_data[:,1:]
image_id = csv_data[:,0]
image_num = image_array.shape[0]
print(image_array.shape)
image_array = image_array.reshape((image_num, 28, 28))
image_prefix = "./challenge_test/image/image"
# image_csv_path = "./challenge_testa/train_label.csv"
for i in range(image_num):
im = Image.fromarray(np.uint8(image_array[i]))
path = image_prefix + str(i) + ".bmp"
im.save(path)
main()
|
# -*- coding: utf-8 -*-
class Solution:
def countEven(self, num: int) -> int:
digit_sum, original_num = 0, num
while num:
num, remainder = divmod(num, 10)
digit_sum += remainder
return original_num // 2 if digit_sum % 2 == 0 else (original_num - 1) // 2
if __name__ == "__main__":
solution = Solution()
assert 2 == solution.countEven(4)
assert 14 == solution.countEven(30)
|
import pandas as pd
data frame
data frame2
|
try:
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.OUT)
def blink(pin):
GPIO.output(pin, True)
time.sleep(0.1)
GPIO.output(pin, False)
except:
def blink(pin):
print('blinking pin %s' % pin)
|
"""
权限视图模块
"""
# pylint: disable=invalid-name, too-few-public-methods
from flask import render_template, redirect, url_for, flash
from flask_login import login_required, current_user
from flask_moment import Moment
from datetime import datetime
import pytz
from .. import mydb
from .forms import LabelDict, PayapplyForm, Paydetail, AddPermissionForm, Permissiondetail
from . import work
from ..models import Payments, Approvers, User, Permissions, Departments, Crossvalids, Lawyers
from sqlalchemy import and_, or_
@work.route('/addpermission', methods=['GET', 'POST'])
@login_required
def addpermission():
"""add new permission"""
"""关停授权功能
#刷新失效授权,效期末小于今天
pytz.country_timezones('cn')
tz = pytz.timezone('Asia/Shanghai')
invalid_permissions = Permissions.query.filter(Permissions.termend<datetime.now(tz)).all()
if invalid_permissions:
for invalid in invalid_permissions:
invalid.valid=False
mydb.session.add(invalid)
#动态初始化的表单实例
mypermit = Permissions.query.filter(and_(Permissions.term==1, Permissions.puid==current_user.uid), Permissions.approved==1).order_by(Permissions.positionid).first()
defaultinput=['']
if mypermit:
defaultinput.append(str(mypermit.apprv100001))
defaultinput.append(str(mypermit.apprv100002))
defaultinput.append(str(mypermit.apprv100003))
defaultinput.append(str(mypermit.apprv100004))
defaultinput.append(str(mypermit.apprv100005))
defaultinput.append(str(mypermit.apprv100006))
defaultinput.append(str(mypermit.apprv100007))
defaultinput.append(str(mypermit.apprv100008))
defaultinput.append(str(mypermit.apprv100009))
defaultinput.append(str(mypermit.apprv100010))
defaultinput.append(str(mypermit.apprv100011))
defaultinput.append(str(mypermit.apprv100012))
defaultinput.append(str(mypermit.apprv100013))
defaultinput.append(str(mypermit.apprv100014))
defaultinput.append(str(mypermit.apprv100015))
defaultinput.append(str(mypermit.apprv100016))
defaultinput.append(str(mypermit.apprv100017))
defaultinput.append(str(mypermit.apprv100018))
defaultinput.append(str(mypermit.apprv100019))
addpermissionform_app = AddPermissionForm(apprv100001_addpm_input=defaultinput[1],
apprv100002_addpm_input=defaultinput[2],
apprv100003_addpm_input=defaultinput[3],
apprv100004_addpm_input=defaultinput[4],
apprv100005_addpm_input=defaultinput[5],
apprv100006_addpm_input=defaultinput[6],
apprv100007_addpm_input=defaultinput[7],
apprv100008_addpm_input=defaultinput[8],
apprv100009_addpm_input=defaultinput[9],
apprv100010_addpm_input=defaultinput[10],
apprv100011_addpm_input=defaultinput[11],
apprv100012_addpm_input=defaultinput[12],
apprv100013_addpm_input=defaultinput[13],
apprv100014_addpm_input=defaultinput[14],
apprv100015_addpm_input=defaultinput[15],
apprv100016_addpm_input=defaultinput[16],
apprv100017_addpm_input=defaultinput[17],
apprv100018_addpm_input=defaultinput[18],
apprv100019_addpm_input=defaultinput[19])
else:
flash('您无法进行授权。')
return redirect(url_for('work.addrules'))
#构建user选择器option传入模板
selectoption_dict = {}
label_dict = LabelDict()
for key_i, _ in label_dict.all_company_dict.items():#公司遍历
dptdict = {}
for key_j,_ in label_dict.all_dpt_dict.items():#职能遍历
userquery = User.query.filter(and_(User.company==key_i, User.dpt==key_j)).all()
dptstr = ""
for auser in userquery:
dptstr = dptstr + "<option value='" + str(auser.uid) + "'>" + auser.name + "</option>"
dptdict[key_j]=dptstr
selectoption_dict[key_i]=dptdict
apprv100001_data = 0.0
apprv100002_data = 0.0
apprv100003_data = 0.0
apprv100004_data = 0.0
apprv100005_data = 0.0
apprv100006_data = 0.0
apprv100007_data = 0.0
apprv100008_data = 0.0
apprv100009_data = 0.0
apprv100010_data = 0.0
apprv100011_data = 0.0
apprv100012_data = 0.0
apprv100013_data = 0.0
apprv100014_data = 0.0
apprv100015_data = 0.0
apprv100016_data = 0.0
apprv100017_data = 0.0
apprv100018_data = 0.0
apprv100019_data = 0.0
#提交表单写入数据库
addnewpower = False #创建权限的开关
if addpermissionform_app.validate_on_submit():
#矫正时区(暂时屏蔽)
#pytz.country_timezones('cn')
#tz = pytz.timezone('Asia/Shanghai')
#判断当前用户的权限,长期董事长可以创建长临时;长期部门长可以创建临时
#第一种模式符合ALL:
#1.授予职能=董事长
#2.授权期限=临时
#3.本人在权限表有权限且
#1)本人所属公司=授权范围公司
#2)本人权限为长期
#3)本人权限为董事长
#4)本人权限在效期内
#5)本人权限已审批且有效
#6)不能给自己授予临时董事长
#临时董事长可以有无数个,且不会相互迭代。
if (addpermissionform_app.position_addpm_input.data=="2" and
addpermissionform_app.term_addpm_input.data=="2" and
Permissions.query.filter(and_(Permissions.companyid==int(addpermissionform_app.company_addpm_input.data),
Permissions.term==1, Permissions.positionid==2, Permissions.termend>datetime.now(tz), Permissions.puid==current_user.uid, Permissions.approved==1, Permissions.valid==1)).all() and
int(addpermissionform_app.user_addpm_input.data)!=current_user.uid):
addnewpower = True #打开开关
#第二种模式符合ALL:
#1.授予职能=部门长【且如果授予另一个人的授予时间与已有的长期部门长无交集】
#2.授权期限=长期
#3.本人在权限表有权限且
#1)本人所属公司=授权范围公司
#2)本人权限为长期
#3)本人权限为董事长
#4)本人权限在效期内
#5)本人权限已审批且有效
#长期部门长只能有一个,新增一个则前一个失效。
if (int(addpermissionform_app.position_addpm_input.data)>=4 and
addpermissionform_app.term_addpm_input.data=="1" and
Permissions.query.filter(and_(Permissions.companyid==int(addpermissionform_app.company_addpm_input.data),
Permissions.term==1, Permissions.positionid==2, Permissions.termend>datetime.now(tz), Permissions.puid==current_user.uid, Permissions.approved==1, Permissions.valid==1)).all()):
addnewpower = True #打开开关
#第三种模式符合ALL:
#1.授予职能=部门长
#2.授权期限=临时
#3.本人在权限表有权限且
#1)本人所属公司=授权范围公司
#2)本人权限为长期
#3)本人权限为部门长
#4)本人权限在效期内
#5)本人权限已审批且有效
#6)不能给自己授予临时部门长
#临时部门长只能有一个,新增一个则停用前一个。
if (int(addpermissionform_app.position_addpm_input.data)>=4 and
addpermissionform_app.term_addpm_input.data=="2" and
Permissions.query.filter(and_(Permissions.companyid==int(addpermissionform_app.company_addpm_input.data),
Permissions.term==1, Permissions.positionid==int(addpermissionform_app.position_addpm_input.data), Permissions.termend>datetime.now(tz), Permissions.puid==current_user.uid, Permissions.approved==1, Permissions.valid==1)).all() and
int(addpermissionform_app.user_addpm_input.data)!=current_user.uid):
addnewpower = True #打开开关
#有效数据写入数据库
if addnewpower == True:
#判断所有权限金额超限
permit = Permissions.query.filter(and_(Permissions.companyid==int(addpermissionform_app.company_addpm_input.data), Permissions.term==1, Permissions.termend>datetime.now(tz), Permissions.puid==current_user.uid), Permissions.approved==1).first()
if float(addpermissionform_app.apprv100001_addpm_input.data) > permit.apprv100001:
apprv100001_data = permit.apprv100001
else:
apprv100001_data = float(addpermissionform_app.apprv100001_addpm_input.data)
if float(addpermissionform_app.apprv100002_addpm_input.data) > permit.apprv100002:
apprv100002_data = permit.apprv100002
else:
apprv100002_data = float(addpermissionform_app.apprv100002_addpm_input.data)
if float(addpermissionform_app.apprv100003_addpm_input.data) > permit.apprv100003:
apprv100003_data = permit.apprv100003
else:
apprv100003_data = float(addpermissionform_app.apprv100003_addpm_input.data)
if float(addpermissionform_app.apprv100004_addpm_input.data) > permit.apprv100004:
apprv100004_data = permit.apprv100004
else:
apprv100004_data = float(addpermissionform_app.apprv100004_addpm_input.data)
if float(addpermissionform_app.apprv100005_addpm_input.data) > permit.apprv100005:
apprv100005_data = permit.apprv100005
else:
apprv100005_data = float(addpermissionform_app.apprv100005_addpm_input.data)
if float(addpermissionform_app.apprv100006_addpm_input.data) > permit.apprv100006:
apprv100006_data = permit.apprv100006
else:
apprv100006_data = float(addpermissionform_app.apprv100006_addpm_input.data)
if float(addpermissionform_app.apprv100007_addpm_input.data) > permit.apprv100007:
apprv100007_data = permit.apprv100007
else:
apprv100007_data = float(addpermissionform_app.apprv100007_addpm_input.data)
if float(addpermissionform_app.apprv100008_addpm_input.data) > permit.apprv100008:
apprv100008_data = permit.apprv100008
else:
apprv100008_data = float(addpermissionform_app.apprv100008_addpm_input.data)
if float(addpermissionform_app.apprv100009_addpm_input.data) > permit.apprv100009:
apprv100009_data = permit.apprv100009
else:
apprv100009_data = float(addpermissionform_app.apprv100009_addpm_input.data)
if float(addpermissionform_app.apprv100010_addpm_input.data) > permit.apprv100010:
apprv100010_data = permit.apprv100010
else:
apprv100010_data = float(addpermissionform_app.apprv100010_addpm_input.data)
if float(addpermissionform_app.apprv100011_addpm_input.data) > permit.apprv100011:
apprv100011_data = permit.apprv100011
else:
apprv100011_data = float(addpermissionform_app.apprv100011_addpm_input.data)
if float(addpermissionform_app.apprv100012_addpm_input.data) > permit.apprv100012:
apprv100012_data = permit.apprv100012
else:
apprv100012_data = float(addpermissionform_app.apprv100012_addpm_input.data)
if float(addpermissionform_app.apprv100013_addpm_input.data) > permit.apprv100013:
apprv100013_data = permit.apprv100013
else:
apprv100013_data = float(addpermissionform_app.apprv100013_addpm_input.data)
if float(addpermissionform_app.apprv100014_addpm_input.data) > permit.apprv100014:
apprv100014_data = permit.apprv100014
else:
apprv100014_data = float(addpermissionform_app.apprv100014_addpm_input.data)
if float(addpermissionform_app.apprv100015_addpm_input.data) > permit.apprv100015:
apprv100015_data = permit.apprv100015
else:
apprv100015_data = float(addpermissionform_app.apprv100015_addpm_input.data)
if float(addpermissionform_app.apprv100016_addpm_input.data) > permit.apprv100016:
apprv100016_data = permit.apprv100016
else:
apprv100016_data = float(addpermissionform_app.apprv100016_addpm_input.data)
if float(addpermissionform_app.apprv100017_addpm_input.data) > permit.apprv100017:
apprv100017_data = permit.apprv100017
else:
apprv100017_data = float(addpermissionform_app.apprv100017_addpm_input.data)
if float(addpermissionform_app.apprv100018_addpm_input.data) > permit.apprv100018:
apprv100018_data = permit.apprv100018
else:
apprv100018_data = float(addpermissionform_app.apprv100018_addpm_input.data)
if float(addpermissionform_app.apprv100019_addpm_input.data) > permit.apprv100019:
apprv100019_data = permit.apprv100019
else:
apprv100019_data = float(addpermissionform_app.apprv100019_addpm_input.data)
newpermission = Permissions(companyid=addpermissionform_app.company_addpm_input.data,
positionid=addpermissionform_app.position_addpm_input.data,
puid=addpermissionform_app.user_addpm_input.data,
term=addpermissionform_app.term_addpm_input.data,
termstart=addpermissionform_app.termstart_addpm_input.data,
termend=addpermissionform_app.termend_addpm_input.data,
originstart=addpermissionform_app.termstart_addpm_input.data,
originend=addpermissionform_app.termend_addpm_input.data,
approved=True,#这个开关等授权批准后改为False
valid=True,#用于控制有效无效
apprv100001=apprv100001_data,
apprv100002=apprv100002_data,
apprv100003=apprv100003_data,
apprv100004=apprv100004_data,
apprv100005=apprv100005_data,
apprv100006=apprv100006_data,
apprv100007=apprv100007_data,
apprv100008=apprv100008_data,
apprv100009=apprv100009_data,
apprv100010=apprv100010_data,
apprv100011=apprv100011_data,
apprv100012=apprv100012_data,
apprv100013=apprv100013_data,
apprv100014=apprv100014_data,
apprv100015=apprv100015_data,
apprv100016=apprv100016_data,
apprv100017=apprv100017_data,
apprv100018=apprv100018_data,
apprv100019=apprv100019_data)
if current_user.uid == 1: #董事长创建的记录都自动生效
newpermission.approved=True
newpermission.valid=True
#这里要处理旧授权人的时效问题
oldpermissions = Permissions.query.filter(and_(Permissions.companyid==addpermissionform_app.company_addpm_input.data,
Permissions.positionid==addpermissionform_app.position_addpm_input.data,
Permissions.positionid!=2,#排除董事长,限定部门长唯一
#Permissions.term==addpermissionform_app.term_addpm_input.data,
Permissions.termend>addpermissionform_app.termstart_addpm_input.data,
Permissions.termstart<addpermissionform_app.termend_addpm_input.data)).all()
pytz.country_timezones('cn')
tz = pytz.timezone('Asia/Shanghai')
for old in oldpermissions:
if ((int(addpermissionform_app.term_addpm_input.data)==1 and
old.puid==int(addpermissionform_app.user_addpm_input.data) and
old.term==2) or old.term==int(addpermissionform_app.term_addpm_input.data)):
old.termend = addpermissionform_app.termstart_addpm_input.data
if datetime.combine(addpermissionform_app.termstart_addpm_input.data, datetime.min.time()).replace(tzinfo=tz)<=datetime.now(tz):#如果新建授权立刻生效的话就停用其他
old.valid = False
if old.termstart > datetime.combine(old.termend, datetime.min.time()):#防止出现时间倒挂
old.termstart = old.termend
mydb.session.add(old)
#需要立刻提交数据库以获得id
mydb.session.add(newpermission)# pylint: disable=no-member
mydb.session.commit()# pylint: disable=no-member
flash('授权书已生成。')
return redirect(url_for('work.permissionlist'))
else:
flash('您无法进行此项授权。')
return render_template('work/addpermission.html', addpermissionform_display=addpermissionform_app, selectoption_dict=selectoption_dict, addnewpower=addnewpower,apprv100001_data=apprv100001_data)
"""
flash('授权功能暂时关闭。')
return render_template('index.html')
@work.route('/permissionlist')
@login_required
def permissionlist():
"""授权列表"""
all_permissions = Permissions.query.filter_by(approved=True)
label_dict = LabelDict()
#刷新失效授权,效期末小于今天
pytz.country_timezones('cn')
tz = pytz.timezone('Asia/Shanghai')
invalid_permissions = Permissions.query.filter(Permissions.termend<datetime.now(tz)).all()
if invalid_permissions:
for invalid in invalid_permissions:
invalid.valid=False
mydb.session.add(invalid)
return render_template('work/permissionlist.html',
permissionlist=all_permissions,
label_dict=label_dict)
@work.route('/permissiondetail/<pmid>', methods=['GET', 'POST'])
@login_required
def permissiondetail(pmid):
"""授权详情页"""
permissiondetail_app = Permissiondetail()
permissiondetail_query = Permissions.query.filter_by(idpermission=pmid).first()
if permissiondetail_query is None:
return render_template('404.html'), 404
else:
permituser = User.query.filter_by(uid=permissiondetail_query.puid).first()
#准备数据
label_dict = LabelDict() #数据字典实例
companydict = label_dict.all_company_dict #公司字典
dptdict = label_dict.all_dpt_dict #部门字典
positiondict = label_dict.all_dptincharge_dict #职能字典
allusersdict = label_dict.all_users_dict #人员姓名字典
#填充数据
permissiondetail_app.idpermission_pmdt_input.data = "[授权书] "+str(permissiondetail_query.idpermission)+" 号"
permissiondetail_app.company_pmdt_input.data = companydict[permissiondetail_query.companyid]
permissiondetail_app.position_pmdt_input.data = positiondict[permissiondetail_query.positionid]
permissiondetail_app.usercompany_pmdt_input.data = companydict[permituser.company]
permissiondetail_app.userdpt_pmdt_input.data = dptdict[permituser.dpt]
permissiondetail_app.user_pmdt_input.data = allusersdict[permissiondetail_query.puid]
permissiondetail_app.term_pmdt_input.data = {1:"长期",2:"临时"}[permissiondetail_query.term]
permissiondetail_app.termstart_pmdt_input.data = permissiondetail_query.termstart
permissiondetail_app.termend_pmdt_input.data = permissiondetail_query.termend
permissiondetail_app.originstart_pmdt_input.data = permissiondetail_query.originstart
permissiondetail_app.originend_pmdt_input.data = permissiondetail_query.originend
permissiondetail_app.valid_pmdt_input.data = {True:"生效中",False:"已失效"}[permissiondetail_query.valid]
permissiondetail_app.apprv100001_pmdt_input.data = permissiondetail_query.apprv100001
permissiondetail_app.apprv100002_pmdt_input.data = permissiondetail_query.apprv100002
permissiondetail_app.apprv100003_pmdt_input.data = permissiondetail_query.apprv100003
permissiondetail_app.apprv100004_pmdt_input.data = permissiondetail_query.apprv100004
permissiondetail_app.apprv100005_pmdt_input.data = permissiondetail_query.apprv100005
permissiondetail_app.apprv100006_pmdt_input.data = permissiondetail_query.apprv100006
permissiondetail_app.apprv100007_pmdt_input.data = permissiondetail_query.apprv100007
permissiondetail_app.apprv100008_pmdt_input.data = permissiondetail_query.apprv100008
permissiondetail_app.apprv100009_pmdt_input.data = permissiondetail_query.apprv100009
permissiondetail_app.apprv100010_pmdt_input.data = permissiondetail_query.apprv100010
permissiondetail_app.apprv100011_pmdt_input.data = permissiondetail_query.apprv100011
permissiondetail_app.apprv100012_pmdt_input.data = permissiondetail_query.apprv100012
permissiondetail_app.apprv100013_pmdt_input.data = permissiondetail_query.apprv100013
permissiondetail_app.apprv100014_pmdt_input.data = permissiondetail_query.apprv100014
permissiondetail_app.apprv100015_pmdt_input.data = permissiondetail_query.apprv100015
permissiondetail_app.apprv100016_pmdt_input.data = permissiondetail_query.apprv100016
permissiondetail_app.apprv100017_pmdt_input.data = permissiondetail_query.apprv100017
permissiondetail_app.apprv100018_pmdt_input.data = permissiondetail_query.apprv100018
permissiondetail_app.apprv100019_pmdt_input.data = permissiondetail_query.apprv100019
return render_template('work/permissiondetail.html', permissiondetail_disp=permissiondetail_app)
@work.route('/addrules')
@login_required
def addrules():
"""授权规则说明及consultant列表"""
pytz.country_timezones('cn')
tz = pytz.timezone('Asia/Shanghai')
label_dict = LabelDict()
#只需要实际部门信息
realdpt_dict = {}
for key, value in label_dict.all_dpt_dict.items():
if key >= 4:
realdpt_dict[key] = value
#刷新失效授权,效期末小于今天
pytz.country_timezones('cn')
tz = pytz.timezone('Asia/Shanghai')
invalid_permissions = Permissions.query.filter(Permissions.termend<datetime.now(tz)).all()
if invalid_permissions:
for invalid in invalid_permissions:
invalid.valid=False
mydb.session.add(invalid)
#部门负责人表格数据
incharge_all = Permissions.query.filter(and_(Permissions.approved==True,\
Permissions.valid==True, Permissions.termstart<=datetime.now(tz),\
Permissions.termend>=datetime.now(tz))).all()
cv_all = Crossvalids.query.order_by(Crossvalids.crossdpt).all()
rule_dict = {}
uidlist = []
for key_i, _ in label_dict.all_company_dict.items():
dpt_dict = {}
for key_j, _ in realdpt_dict.items():
uidlist = [cv.crossuid for cv in cv_all if cv.companyid==key_i and cv.crossdpt==key_j]
ichlist = [incharge.puid for incharge in incharge_all if incharge.companyid==key_i and incharge.positionid==key_j]
dpt_dict[key_j] = [ichlist,uidlist]
rule_dict[key_i] = dpt_dict
#法务财务表格数据
lawyers = Lawyers.query.all()
lawyer_dict = {}
lawuidlist = []
accuidlist = []
stamperuidlist = []
for key, _ in label_dict.all_company_dict.items():
lawuidlist = [lawyer.consultantuid for lawyer in lawyers if lawyer.companyid==key and lawyer.consultant==1]
accuidlist = [acc.consultantuid for acc in lawyers if acc.companyid==key and acc.consultant==2]
stamperuidlist = [stamper.consultantuid for stamper in lawyers if stamper.companyid==key and stamper.consultant==3]
lawyer_dict[key] = [lawuidlist, accuidlist, stamperuidlist]
return render_template('work/addrules.html', label_dict=label_dict, rule_dict=rule_dict, lawyer_dict=lawyer_dict)
|
def get_city(city,country,population=''):
if population:
name=city.title()+","+country.title()+" - population "+str(population)
else:
name=city.title()+","+country.title()
return name
|
# Depth-first search
def dfs(node, explored):
pass
# Breadth-first search
def bfs(start, goal):
pass
|
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
tf.set_random_seed(777) # for reproducibility
tfe = tf.contrib.eager
x_data = [[1, 2, 1, 1], [2, 1, 3, 2],[3, 1, 3, 4],[4, 1, 5, 5],
[1, 7, 5, 5],[1, 2, 5, 6],[1, 6, 6, 6], [1, 7, 7, 7]]
y_data = [[0, 0, 1],[0, 0, 1],[0, 0, 1],[0, 1, 0],[0, 1, 0],
[0, 1, 0],[1, 0, 0],[1, 0, 0]]
x_data = np.asarray(x_data, dtype=np.float32)
y_data = np.asarray(y_data, dtype=np.float32)
nb_classes = 3 #3가지의 클래스 #one hot encoding 사용하기 위해서
#Weight and bias setting
W = tfe.Variable(tf.random_normal([4, nb_classes]), name='weight')#4개의 특징값
b = tfe.Variable(tf.random_normal([nb_classes]), name='bias')
variables = [W, b]
#print("w=",W,"\nb=", b)
# softmax = exp(logits) / reduce_sum(exp(logits), dim)
def hypothesis(X):
return tf.nn.softmax(tf.matmul(X, W) + b)#확률값으로 변환하는 과정
#x는 입력값 w는 가중치
#print(hypothesis(x_data))
##############################
# Softmax onehot test
sample_db = [[8,2,1,4]]
sample_db = np.asarray(sample_db, dtype=np.float32)
print(hypothesis(sample_db))
###############################
def cost_fn(X, Y):
logits = hypothesis(X)
cost = -tf.reduce_sum(Y * tf.log(logits), axis=1)#-y*log(y_hat)
cost_mean = tf.reduce_mean(cost)# 평균값을 구함
return cost_mean
#print(cost_fn(x_data, y_data))
def grad_fn(X, Y):#경사하강법 사용
with tf.GradientTape() as tape:
loss = cost_fn(X, Y)
grads = tape.gradient(loss, variables)
return grads
#print(grad_fn(x_data, y_data))
#학습 함수
def fit(X, Y, epochs=2000, verbose=100):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
for i in range(epochs):
grads = grad_fn(X, Y)
optimizer.apply_gradients(zip(grads, variables))
if (i == 0) | ((i + 1) % verbose == 0):
print('Loss at epoch %d: %f' % (i + 1, cost_fn(X, Y).numpy()))
fit(x_data, y_data)
#예측
a=hypothesis(x_data)
print(a)
print(tf.argmax(a,1))
print(tf.argmax(y_data,1))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 19:24:02 2020
@author: shaun
"""
import numpy as np
from scipy.optimize import brentq
def eulerstep(yn,tn,f,h):
yn1=yn+h*f(yn,tn)
return yn1
def eIlinstep(yn,tn,f,df,h):
top=yn+h*f(yn,tn+h)-h*yn*df(yn,tn+h)
bot=1-h*df(yn,tn+h)
yn1=top/bot
return yn1
def nonlinE(yn,tn1,h,f):
#solve the differential equation you have for
# the equation that satisfies yn+1 equaling some function of x and t
c=yn
def function(x):
return -x+c+np.e**(x-tn1)
n=np.linspace(-100,100,100)
a=0
b=0
for elem in range(0,len(n)-1):
A=function(n[elem])
B=function(n[elem+1])
if(A<0 and B>0):
a=n[elem]
b=n[elem+1]
elif(A>0 and B<0):
a=n[elem]
b=n[elem+1]
try:
root=brentq(function,a,b)
except:
print("an error occured will use euler for value \n")
print("c= "+str(c))
print("tn1= "+str(tn1))
root=eulerstep(yn,tn1,f,h)
return root
def trapezoidalE(yn,tn1,tn,h):
#solve the differential equation you have for yn+1
A= yn+(h/2.0)*2*((1+tn1)**3)*(np.e**(-tn1))
B=(h/2.0)*(-3*tn*yn)/(1+tn1)
C=(h/2.0)*2*((1+tn)**3)*(np.e**(-tn))
top=A+B+C
bottom=1+(h/2.0)*((3*tn1)/(1+tn1))
yn1=top/bottom
return yn1
def rk2step(yn,tn,f,h):
y1=yn +(h/2)*f(yn,tn)
yn1=yn+h*(y1,tn+tn*1/2)
return yn1
def rk4step(yn,tn,f,h):
a=1/2.0
k1=h*f(yn,tn)
k2=h*f(yn+a*k1,tn+h/2.0)
k3=h*f(yn+a*k2,tn+h/2.0)
k4=h*f(yn+a*k3,tn+h)
yn1=yn+(1/6)*k1+(1/3)*(k2+k3)+(1/6)*k4
def eulerE(y0,a,b,f,h):
y=[y0]
t=[a]
start=a
end=a
while(end<b):
newy=eulerstep(y[-1],t[-1],f,h)
y.append(newy)
t.append(t[-1]+h)
end+=h
return t,y
def eulerI(y0,a,b,h,f):
y=[y0]
t=[a,a+h]
start=a
end=a
while(end<b):
newy=nonlinE(y[-1],t[-1],h,f)
y.append(newy)
t.append(t[-1]+h)
end+=h
del t[-1]
return t,y
def eulerIlin(y0,a,b,h,f,df):
y=[y0]
t=[a]
start=a
end=a
while(end<b):
newy= eIlinstep(y[-1],t[-1],f,df,h)
y.append(newy)
t.append(t[-1]+h)
end+=h
return t,y
def eulerT(y0,a,b,h):
y=[y0]
t=[a,a+h]
start=a
end=a
while(end<b):
newy=trapezoidalE(y[-1],t[-1],t[-2],h)
y.append(newy)
t.append(t[-1]+h)
end+=h
del t[-1]
return t,y
def rk2(y0,a,b,f):
y=[y0]
t=[a]
start=a
end=a
while(end<b):
newy=rk2step(y[-1],t[-1],f,h)
y.append(newy)
t.append(t[-1]+h)
end+=h
return t,y
def rk4(y0,a,b,h,f):
y=[y0]
t=[a]
start=a
end=a
while(end<b):
newy=rk4step(y[-1],t[-1],f,h)
y.append(newy)
t.append(t[-1]+h)
end+=h
return t,y
|
import pytest
def test_cadastro():
dado1 = {"nome": "YURI"}
dado2 = {"nome": "YURI"}
assert dado1 == dado2
|
import random
import string
import pyperclip
class User:
user_list = []
def __init__(self, user_name, password):
self.user_name = user_name
self.password = password
def save_user(self):
User.user_list.append(self)
@classmethod
def display_user(cls):
return cls.user_list
class Credential:
credential_list = []
@classmethod
def verify_user(cls, user_name, password):
current_user = ''
for user in User.user_list:
if user.user_name == user_name and user.password == password:
current_user == user.user_name
return current_user
def __init__(self, account, user_name, password):
self.account = account
self.user_name = user_name
self.password = password
def save_account(self):
Credential.credential_list.append(self)
def del_account(self):
Credential.credential_list.remove(self)
@classmethod
def find_account(cls, account):
for credential in cls.credential_list:
if credential.account == account:
return credential
@classmethod
def display_account(cls):
return cls.credential_list
@classmethod
def copy_account(cls, account):
found_acc = Credential.find_account(account)
pyperclip.copy(found_acc.password)
@classmethod
def find_by_acc(cls, account):
for credential in cls.credential_list:
if credential.account == account:
return credential
def generate_password(size=8, char=string.ascii_uppercase + string.ascii_lowercase + string.digits):
gen_pass = ''.join(random.choice(char) for _ in range(size))
return gen_pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Filename: step02_download_world_bank_indicator_in_my_mac
# @Date: 2020/3/11
# @Author: Mark Wang
# @Email: wangyouan@gamil.com
"""
python -m CollectData.step02_download_world_bank_indicator_in_my_mac
"""
import os
import time
import random
import pandas as pd
from tqdm import tqdm
import wbdata
from pandas import DataFrame
if __name__ == '__main__':
source_list = wbdata.get_source(display=False)
save_path = '/Users/markwang/Google Drive/Projects/DatabaseInformation/WorldBank'
save_file = os.path.join(save_path, '20200311_indicator_information.xlsx')
if not os.path.isfile(save_file):
indicator_index_df = DataFrame(columns=['SourceID', 'SourceName', 'IndicatorID', 'IndicatorName',
'sourceOrganization', 'sourceNote'])
else:
indicator_index_df: DataFrame = pd.read_excel(save_file)
indicator_index_df['SourceID'] = indicator_index_df['SourceID'].astype(int)
for source_info in tqdm(source_list):
source_id = source_info['id']
info_dict = {'SourceID': source_id, 'SourceName': source_info['name']}
if not indicator_index_df.loc[indicator_index_df['SourceID'] == int(source_id)].empty:
continue
indicator_list = wbdata.get_indicator(source=source_id, display=False)
source_indicator_dfs = list()
for indicator_info in indicator_list:
indicator_id = indicator_info['id']
indicator_info_dict = info_dict.copy()
indicator_info_dict['IndicatorID'] = indicator_id
indicator_info_dict['IndicatorName'] = indicator_info['name']
for key in ['sourceOrganization', 'sourceNote']:
if key in indicator_info:
indicator_info_dict[key] = indicator_info[key]
indicator_index_df: DataFrame = indicator_index_df.append(indicator_info_dict, ignore_index=True)
indicator_index_df.drop_duplicates().to_excel(os.path.join(save_path, '20200311_indicator_information.xlsx'),
index=False)
time.sleep(random.randint(1, 10))
|
# importing matplotlib module for the plot
import matplotlib.pyplot as plot
# importing random module to generate random integers for the plot
import random
# initialising the lists
x = [0]
y = [0]
# initialising a variable to zero to track position
current = 0
for i in range(1, 100000):
# generating a random integer between 1 and 100
z = random.randint(1, 100)
# checking the z range and appending corresponding values to x and y
# appending values to the x and y
if z == 1:
x.append(0)
y.append(0.16 * y[current])
if z >= 2 and z <= 86:
x.append(0.85 * x[current] + 0.04 * y[current])
y.append(-0.04 * x[current] + 0.85 * y[current] +1.6)
if z>= 87 and z<= 93:
x.append(0.2 * x[current] - 0.26 * y[current])
y.append(0.23 * x[current] + 0.22*(y[current])+1.6)
if z >= 94 and z <= 100:
x.append(-0.15 * x[current] + 0.28 * y[current])
y.append(0.26 * x[current] + 0.24 * y[current] + 0.44)
# incrementing the current value
current += 1
# plotting the graph using x and y
plot.scatter(x, y, s = 0.2, edgecolor = 'blue')
plot.xlabel('x')
plot.ylabel('y')
plot.show()
|
class Solution(object):
def deleteNode(self, node):
"""
https://leetcode.com/problems/delete-node-in-a-linked-list/
just replace the next node data with current node.
"""
next_node = node.next
node.val = next_node.val
node.next = next_node.next
|
import pandas as pd
from nipype.pipeline.engine import Node, Workflow, MapNode
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import nipype.interfaces.afni as afni
import nipype.interfaces.nipy as nipy
import nipype.algorithms.rapidart as ra
from nipype.algorithms.misc import TSNR
import nipype.interfaces.ants as ants
import nilearn.image as nli
from functions import strip_rois_func, get_info, median, motion_regressors, extract_noise_components, selectindex, fix_hdr
from linear_coreg import create_coreg_pipeline
from nonlinear_coreg import create_nonlinear_pipeline
# read in subjects and file names
df=pd.read_csv('/scr/ilz3/myelinconnect/subjects.csv', header=0)
subjects_db=list(df['DB'])
subjects_db.remove('KSMT')
# sessions to loop over
sessions=['rest1_1' ,'rest1_2', 'rest2_1', 'rest2_2']
# directories
working_dir = '/scr/ilz3/myelinconnect/working_dir/mappings_fixhdr/'
data_dir= '/scr/ilz3/myelinconnect/'
out_dir = '/scr/ilz3/myelinconnect/transformations/'
# set fsl output type to nii.gz
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
# main workflow
mappings = Workflow(name='mappings')
mappings.base_dir = working_dir
mappings.config['execution']['crashdump_dir'] = mappings.base_dir + "/crash_files"
# iterate over subjects
subject_infosource = Node(util.IdentityInterface(fields=['subject']),
name='subject_infosource')
subject_infosource.iterables=[('subject', subjects_db)]
# iterate over sessions
session_infosource = Node(util.IdentityInterface(fields=['session']),
name='session_infosource')
session_infosource.iterables=[('session', sessions)]
# select files
templates={'median': 'resting/preprocessed/{subject}/{session}/realignment/corr_{subject}_{session}_roi_detrended_median_corrected.nii.gz',
'median_mapping' : 'mappings/rest/fixed_hdr/corr_{subject}_{session}_*mapping_fixed.nii.gz',
#'t1_mapping': 'mappings/t1/{subject}*T1_Images_merged_mapping.nii.gz',
't1_highres' : 'struct/t1/{subject}*T1_Images_merged.nii.gz',
'epi2highres_lin_itk' : 'resting/preprocessed/{subject}/{session}/registration/epi2highres_lin.txt',
'epi2highres_warp':'resting/preprocessed/{subject}/{session}/registration/transform0Warp.nii.gz',
'epi2highres_invwarp':'resting/preprocessed/{subject}/{session}/registration/transform0InverseWarp.nii.gz',
#'t1_prep_rh' : 'struct/surf_rh/prep_t1/smooth_1.5/{subject}_rh_mid_T1_avg_smoothdata_data.nii.gz',
#'t1_prep_lh' : 'struct/surf_lh/prep_t1/smooth_1.5/{subject}_lh_mid_T1_avg_smoothdata_data.nii.gz',
#'t1_prep_lh' : 'struct/surf_lh/prep_t1/smooth_1.5/{subject}_lh_mid_T1_avg_smoothdata_data.nii.gz',
}
selectfiles = Node(nio.SelectFiles(templates, base_directory=data_dir),
name="selectfiles")
mappings.connect([(subject_infosource, selectfiles, [('subject', 'subject')]),
(session_infosource, selectfiles, [('session', 'session')])
])
# merge func2struct transforms into list
translist_forw = Node(util.Merge(2),name='translist_forw')
mappings.connect([(selectfiles, translist_forw, [('epi2highres_lin_itk', 'in2')]),
(selectfiles, translist_forw, [('epi2highres_warp', 'in1')])])
# merge struct2func transforms into list
translist_inv = Node(util.Merge(2),name='translist_inv')
mappings.connect([(selectfiles, translist_inv, [('epi2highres_lin_itk', 'in1')]),
(selectfiles, translist_inv, [('epi2highres_invwarp', 'in2')])])
# project functional mapping to anatomical space
func2struct = Node(ants.ApplyTransforms(invert_transform_flags=[False, False],
dimension=3,
input_image_type=3,
interpolation='Linear'),
name='func2struct')
mappings.connect([(selectfiles, func2struct, [('median_mapping', 'input_image'),
('t1_highres', 'reference_image')]),
(translist_forw, func2struct, [('out', 'transforms')]),
])
# project structural mapping to functional space
#struct2func = Node(ants.ApplyTransforms(invert_transform_flags=[True, False],
# dimension=3,
# input_image_type=3,
# interpolation='Linear'),
# name='struct2func')
#mappings.connect([(selectfiles, struct2func, [('t1_mapping', 'input_image'),
# ('median', 'reference_image')]),
# (translist_inv, struct2func, [('out', 'transforms')]),
# ])
# project T1 images to functional space
#t1_preps = Node(util.Merge(2),name='t1_preps')
#mappings.connect([(selectfiles, t1_preps, [('t1_prep_rh', 'in1')]),
# (selectfiles, t1_preps, [('t1_prep_rh', 'in2')])])
#t12func = MapNode(ants.ApplyTransforms(invert_transform_flags=[True, False],
# dimension=3,
# interpolation='WelchWindowedSinc'),
# iterfield=['input_image'],
# name='t12func')
#mappings.connect([(selectfiles, t12func, [('median', 'reference_image')]),
# (t1_preps, t12func, [('out', 'input_image')]),
# (translist_inv, t12func, [('out', 'transforms')]),
# ])
# sink relevant files
sink = Node(nio.DataSink(parameterization=False,
base_directory=out_dir),
name='sink')
mappings.connect([(session_infosource, sink, [('session', 'container')]),
(func2struct, sink, [('output_image', 'func_to_t1_mapping.@func')]),
# (struct2func, sink, [('output_image', 't1_to_func_mapping.@anat')]),
# #(t12func, sink, [('output_image', 't1_in_funcspace.@anat')]),
])
mappings.run(plugin='MultiProc', plugin_args={'n_procs' : 16})
|
import unittest
from katas.kyu_5.sum_of_pairs import sum_pairs
class SumOfPairsTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(sum_pairs([11, 3, 7, 5], 10), [3, 7])
def test_equal_2(self):
self.assertEqual(sum_pairs([4, 3, 2, 3, 4], 6), [4, 2])
def test_equal_3(self):
self.assertEqual(sum_pairs([10, 5, 2, 3, 7, 5], 10), [3, 7])
def test_equal_4(self):
self.assertEqual(sum_pairs([1, 4, 8, 7, 3, 15], 8), [1, 7])
def test_equal_5(self):
self.assertEqual(sum_pairs([1, -2, 3, 0, -6, 1], -6), [0, -6])
def test_equal_6(self):
self.assertEqual(sum_pairs([1, 2, 3, 4, 1, 0], 2), [1, 1])
def test_equal_7(self):
self.assertEqual(sum_pairs([4, -2, 3, 3, 4], 8), [4, 4])
def test_equal_8(self):
self.assertEqual(sum_pairs([0, 2, 0], 0), [0, 0])
def test_equal_9(self):
self.assertEqual(sum_pairs([5, 9, 13, -3], 10), [13, -3])
def test_is_none_1(self):
self.assertIsNone(sum_pairs([0, 0, -2, 3], 2))
def test_is_none_2(self):
self.assertIsNone(sum_pairs([20, -13, 40], -7))
|
import socketserver
import socket, threading
import sys
class MyTCPHandler(socketserver.BaseRequestHandler):
BUFFER_SIZE = 4096
def handle(self):
global address
add = address
global destPort
dst = destPort
s = socket.socket()
s.connect((add,dst))
while 1:
data = self.request.recv(self.BUFFER_SIZE)
if len(data) == self.BUFFER_SIZE:
while 1:
try: # error means no more data
data += self.request.recv(self.BUFFER_SIZE, socket.MSG_DONTWAIT)
except:
break
if len(data) == 0:
break
dataClient = data.decode( "utf-8")
dataSrv = s.recv(1024).decode("utf-8")
# sending to our client which we are the server to
self.request.sendall( bytearray( "My server said: " + dataSrv, "utf-8"))
# sending to our server which we are the client to
s.sendall(dataClient.encode())
print("%s (%s) wrote: %s" % (self.client_address[0],threading.currentThread().getName(), dataClient.strip()))
address = ''
destPort = 0
if __name__ == "__main__":
if(len(sys.argv)<5):
srcPort = int(sys.argv[1])
address = sys.argv[2]
destPort = int(sys.argv[3])
else:
logOption = sys.argv[1]
replaceOption = sys.argv[2]
replace = sys.argv[3]
replaceWith = sys.argv[4]
srcPort = int(sys.argv[5])
address = sys.argv[6]
destPort = int(sys.argv[7])
HOST = "localhost"
server = socketserver.ThreadingTCPServer((HOST, srcPort), MyTCPHandler)
server.serve_forever()
|
import logging
import operator
from datetime import datetime
from functools import reduce
import matplotlib.pyplot as plt
import numpy as np
from sympy import Symbol
from sympy.functions.elementary.exponential import log
from scipy.special import gamma
from common.gen import LinearCongruentialGenerator
from common.log import init_logging
from l5_recovery.recovery import Recovery
class WeibullGenerator:
def __init__(self, lcg, alpha, lambda_):
self._lcg = lcg
self._F = Symbol('t', real=True)
self._gen = 1 / lambda_ * (-log(self._F)) ** (1 / alpha)
def __next__(self):
return self._gen.subs(self._F, self._lcg())
def __call__(self):
return self.__next__()
def main():
init_logging(file='logs/l5-output-%s.log' % datetime.now(), debug=True)
# Последовательности параметров альфа и лямбда
params = [
{
'gen': {
'alpha': 1.0,
'lambda_': 7.0
},
'max_transition': 0.5
},
{
'gen': {
'alpha': 0.1,
'lambda_': 4.0
},
'max_transition': 2.0
},
{
'gen': {
'alpha': 2.0,
'lambda_': 10.0
},
'max_transition': 0.25
}
]
# Конфигурации
initial_value = 6450435
numbers_after_dot = 5
lcg = LinearCongruentialGenerator(initial_value)
max_step = 10
experiments_number = 1000
plot_steps = 30
# Предварительные прогоны
logging.info('Generating initial state diagrams for all models...')
fig, axes = plt.subplots(1, 3)
for index, model_params in enumerate(params):
model = Recovery(gen=WeibullGenerator(lcg, **model_params['gen']))
model.model(max_steps=max_step)
state_pairs = zip(model.states[:-1], model.states[:-1])
transition_pairs = zip(model.transitions[:-1], model.transitions[1:])
ax = axes[index]
for transition_states, transitions_H in zip(state_pairs, transition_pairs):
ax.plot(transitions_H, transition_states, drawstyle='steps', color='blue')
ax.set_title('alpha=%s\nlambda=%s'
% (model_params['gen']['alpha'], model_params['gen']['lambda_']))
ax.grid()
plt.show()
# Прогоны каждой из моделей
logging.info('Performing %s experiments for all models.' % experiments_number)
for model_params in params:
lambda_ = model_params['gen']['lambda_']
alpha = model_params['gen']['alpha']
logging.info('Performing experiments for the model with alpha=%s and lambda=%s.'
% (alpha, lambda_))
models = [Recovery(gen=WeibullGenerator(lcg, **model_params['gen']))
for _ in range(0, experiments_number)]
for index, model in enumerate(models):
if index and index % 10 == 0:
logging.debug('Experiments performed: %s/%s.' % (index, experiments_number))
model.model(max_transition=model_params['max_transition'])
logging.debug('All experiments has finished.')
logging.info('Collecting transitions for %s intervals.' % plot_steps)
transitions_H = np.linspace(0, model_params['max_transition'], plot_steps)
states = []
for index, transition in enumerate(transitions_H):
if index and index % 10 == 0:
logging.debug('Transitions collected: %s/%s.' % (index, plot_steps))
transition_states = []
for model in models:
state = None
for transition_index, model_transition in enumerate(model.transitions):
if model_transition > transition:
state = model.states[transition_index]
break
transition_states.append(state if state is not None else model.states[-1])
states.append(transition_states)
logging.debug('All transitions have been collected.')
logging.info('Calculating M and D for ksi.')
all_transitions = reduce(operator.add, [model.generated_transitions for model in models])
M = (np.sum(all_transitions) / len(all_transitions)).__float__()
M2 = np.sum(np.array(all_transitions) ** 2) / len(all_transitions)
D = (M2 - M ** 2).__float__()
M_theoretical = 1 / lambda_ * gamma(1 + 1 / alpha)
D_theoretical = (1 / lambda_) ** 2 * gamma(1 + 2 / alpha) - M_theoretical ** 2
logging.info('experimental M = %s.' % (np.around(M, numbers_after_dot)))
logging.info('theoretical M = %s.' % (np.around(M_theoretical, numbers_after_dot)))
logging.info('experimental D = %s.' % (np.around(D, numbers_after_dot)))
logging.info('theoretical D = %s.' % (np.around(D_theoretical, numbers_after_dot)))
logging.info('Calculating H(t).')
# Расчет функции восстановления
H = []
for transition_states in states:
transition_M = np.sum(transition_states) / len(transition_states) \
if transition_states else 0
H.append(transition_M)
logging.info('Plotting H(t).')
plt.plot(transitions_H, H)
plt.title('H(t)')
plt.show()
logging.info('Calculating experimental F(t).')
F = []
transitions_F = np.linspace(0, model_params['max_transition'], plot_steps)
generated_transitions = [model.generated_transitions for model in models]
generated_transitions = np.array(list(reduce(np.append, generated_transitions)))
for transition in transitions_F:
transition_F = (generated_transitions <= transition).sum() / generated_transitions.size
F.append(transition_F)
logging.info('Calculating theoretical F(t).')
F_theoretical = 1 - np.exp(-(lambda_ * transitions_F) ** alpha)
logging.info('Calculating f(t).')
f = []
for index, transition in enumerate(transitions_F[:-1]):
left_condition = generated_transitions > transition
next_transition = transitions_F[index + 1]
right_condition = generated_transitions <= next_transition
number_of_transitions = np.logical_and(left_condition, right_condition).sum()
transition_f = number_of_transitions / generated_transitions.size / (
next_transition - transition)
f.append(transition_f)
f.append(0)
f = np.array(f)
logging.info('Calculating theoretical f(t).')
f_theoretical = alpha * lambda_ * (lambda_ * transitions_F) ** (alpha - 1) \
* np.exp(-(lambda_ * transitions_F) ** alpha)
logging.info('Calculating G(t).')
G = 1 - np.array(F)
logging.info('Calculating theoretical G(t).')
G_theoretical = np.exp(-(lambda_ * transitions_F) ** alpha)
G[G == 0] = G[G != 0].min()
logging.info('Calculating phi(t).')
phi = f / G
logging.info('Calculating theoretical phi(t).')
phi_theoretical = alpha * lambda_ * (lambda_ * transitions_F) ** (alpha - 1)
logging.info('Plotting F(t), f(t), G(t) and phi(t).')
fig, axes = plt.subplots(2, 2)
axes[0, 0].plot(transitions_F, F)
axes[0, 0].plot(transitions_F, F_theoretical)
axes[0, 0].set_title('F(t)')
axes[0, 1].plot(transitions_F + (transitions_F[1] - transitions_F[0]), f)
axes[0, 1].plot(transitions_F, f_theoretical)
axes[0, 1].set_title('f(t)')
axes[1, 0].plot(transitions_F, G)
axes[1, 0].plot(transitions_F, G_theoretical)
axes[1, 0].set_title('G(t)')
axes[1, 1].plot(transitions_F, phi)
axes[1, 1].plot(transitions_F, phi_theoretical)
axes[1, 1].set_title('phi(t)')
plt.show()
if __name__ == '__main__':
main()
|
"""
cYnfクラスのテスト
"""
import os
import sys
from unittest import TestCase
# srcの下をパスに追加
sys.path.append(os.path.join(os.getcwd(), 'src'))
from fig_package.format.ynf import cYnf, cYnfLine
class TestCYnf(TestCase):
"""
cYnfクラスのテスト
"""
def setUp(self):
"""
テスト前処理
"""
self.del_file_list = []
return
def tearDown(self):
"""
テスト後処理
"""
# ファイル削除
for f in self.del_file_list:
os.remove(f)
return
def test_1_default_create(self):
"""
空で作るだけ
"""
ynf = cYnf({})
# 一応インスタンス確認
self.assertTrue(isinstance(ynf, cYnf))
def test_2_append(self):
"""
要素を追加する
"""
y = cYnf({})
# 要素
ye = cYnfLine({'p1':[0,0], 'p2':(10,20)})
# 追加
y.append(ye)
# 単体テストでは確認しようがない・・・
def test_3_serialize(self):
"""
シリアライズ
"""
# 保存先
out_file = 'test_sirialyze_test3.pkl'
# cYnf
y = cYnf({})
# 要素
ye = cYnfLine({'p1':[0,0], 'p2':(10,20)})
# 追加
y.append(ye)
# シリアライズ
y.serialize(out_file)
# 削除準備
self.del_file_list.append(out_file)
def test_4_deserialize(self):
"""
デシリアライズ
"""
# 保存先
out_file = 'test_sirialyze_test4.pkl'
# cYnf
y = cYnf({})
# 要素
ye = cYnfLine({'p1':[0,0], 'p2':(10,20)})
# 追加
y.append(ye)
# シリアライズ
y.serialize(out_file)
# デシリアライズ
y2 = cYnf.deserialize(out_file)
#print(y2.to_str())
# 削除準備
self.del_file_list.append(out_file)
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import confuse
from sys import stderr
__version__ = '1.6.1'
__author__ = 'Adrian Sampson <adrian@radbox.org>'
class IncludeLazyConfig(confuse.LazyConfig):
"""A version of Confuse's LazyConfig that also merges in data from
YAML files specified in an `include` setting.
"""
def read(self, user=True, defaults=True):
super().read(user, defaults)
try:
for view in self['include']:
self.set_file(view.as_filename())
except confuse.NotFoundError:
pass
except confuse.ConfigReadError as err:
stderr.write("configuration `import` failed: {}"
.format(err.reason))
config = IncludeLazyConfig('beets', __name__)
|
n_kg = int(input())
bongzi = [5, 3]
geasu = 0
i = 0
if n_kg % 5 == 0:
geasu = n_kg // bongzi[0]
n_kg -= geasu * bongzi[0]
else:
while i*bongzi[0] < n_kg:
if n_kg - (i*bongzi[0]) == 3:
geasu += 1 + i
if n_kg - (i * bongzi[0]) == 6:
geasu += 2+ i
if n_kg - (i * bongzi[0]) == 9:
geasu += 3+ i
if n_kg - (i * bongzi[0]) == 12:
geasu += 4+ i
if geasu != 0:
n_kg =0
break
i+=1
if n_kg != 0:
print('-1')
else:
print(geasu)
|
from django.conf.urls import url
from . import views
from .views import AccountView
urlpatterns = [
#127.0.0.1:8000/v1/users
url(r'^$', views.user_view),
#获取验证码
url(r'/code$',views.code_view),
url(r'/reset$',views.password_view),
#http://127.0.0.1:8000/v1/users/activation?code=xxxx
url(r'^/activation$', views.active_view),
url(r'^/account$', AccountView.as_view()),
#地址相关
#http://127.0.0.1:8000/v1/users/guoxiao7/address
#get-查询当前用户的所有地址
#post-给当前用户创建一个地址
#微薄授权相关
url(r'^/weibo/authorization$',views.weibo_login),
#接受前端传来的code,去新浪验证服务器交换access——token
url(r'^/weibo/users$',views.WeiBoView.as_view()),
]
|
from kivy.lang import Builder
from kivy.properties import ObjectProperty
from kivymd.app import MDApp
from kivymd.uix.floatlayout import MDFloatLayout
from kivymd.uix.tab import MDTabsBase
from kivymd.icon_definitions import md_icons
colors = {
"Teal": {
"50": "e4f8f9",
"100": "bdedf0",
"200": "97e2e8",
"300": "79d5de",
"400": "6dcbd6",
"500": "6ac2cf",
"600": "63b2bc",
"700": "5b9ca3",
"800": "54888c",
"900": "486363",
"A100": "bdedf0",
"A200": "97e2e8",
"A400": "6dcbd6",
"A700": "5b9ca3",
},
"Blue": {
"50": "e3f3f8",
"100": "b9e1ee",
"200": "91cee3",
"300": "72bad6",
"400": "62acce",
"500": "589fc6",
"600": "5191b8",
"700": "487fa5",
"800": "426f91",
"900": "35506d",
"A100": "b9e1ee",
"A200": "91cee3",
"A400": "62acce",
"A700": "487fa5",
},
"Light": {
"StatusBar": "E0E0E0",
"AppBar": "F5F5F5",
"Background": "FAFAFA",
"CardsDialogs": "FFFFFF",
"FlatButtonDown": "cccccc",
},
"Dark": {
"StatusBar": "000000",
"AppBar": "212121",
"Background": "303030",
"CardsDialogs": "424242",
"FlatButtonDown": "999999",
}
}
KV = '''
BoxLayout:
orientation: "vertical"
MDToolbar:
title: "Example Tabs"
MDTabs:
id: tabs
<Tab>
MDIconButton:
id: icon
icon: root.icon
user_font_size: "48sp"
pos_hint: {"center_x": .5, "center_y": .5}
'''
class Tab(MDFloatLayout, MDTabsBase):
'''Class implementing content for a tab.'''
icon = ObjectProperty()
class Example(MDApp):
icons = list(md_icons.keys())[15:30]
def build(self):
self.theme_cls.colors = colors
self.theme_cls.primary_palette = "Blue"
self.theme_cls.accent_palette = "Teal"
return Builder.load_string(KV)
def on_start(self):
for name_tab in self.icons:
tab = Tab(text="This is " + name_tab, icon=name_tab)
self.root.ids.tabs.add_widget(tab)
Example().run()
|
try:
from urllib import request
from urllib.request import urlopen
import threading # import threadding
import json # import json
import random # import random
import requests # import requests
import ssl
except:
print("No Library Found")
def thingspeak_post():
threading.Timer(15, thingspeak_post).start()
val=random.randint(1, 30)
URL=' '
KEY='54ESHV8Z5ZF0TPX4'
HEADER='&field1={}&field2={}'.format(val, val)
NEW_URL = URL + KEY + HEADER
print(NEW_URL)
context = ssl._create_unverified_context()
data=request.urlopen(NEW_URL,context=context)
print(data)
def read_data_thingspeak():
t_data=[]
URL='http://api.thingspeak.com/channels/103357/feeds.json?api_key='
KEYS='G9SQN8G512L8SQMS'
HEADER='&results=2' # modify here to get data
NEW_URL = URL + KEYS + HEADER
print(NEW_URL)
data=requests.get(NEW_URL).json()
#print(data)
channel_id=data['channel']['id']
#print(channel_id)
x_label=data['channel']["field1"]
field1=data['feeds']
for x in field1:
#print(x)
#print(x['field1'])
t_data.append(x['field1'])
print(t_data)
if __name__ == "__main__":
read_data_thingspeak()
|
from setuptools import setup, find_packages
setup(
name='zeit.care',
version='0.3.dev0',
author='gocept, Zeit Online',
author_email='zon-backend@zeit.de',
url='http://www.zeit.de/',
description="Helper scripts for managing DAV content",
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
license='BSD',
namespace_packages=['zeit'],
install_requires=[
'zeit.connector',
'setuptools',
'pytz',
],
entry_points="""
[console_scripts]
isofication=zeit.care.worker:isofy_main
xslt=zeit.care.worker:xslt_main
divisor=zeit.care.divisor:main
boxinjector=zeit.care.boxinjector:main
ressortindexwriter=zeit.care.ressortindex:main
commentthreadworker=zeit.care.commentthread:main
propertyworker=zeit.care.worker:property_main
xmlworker=zeit.care.xmlworker:main
"""
)
|
from connection import db, Required
class Logservice(db.Entity):
_table_ = 'logservice'
uri = Required(str)
method = Required(str)
params = Required(str)
ip_address = Required(str)
request_time = Required(str)
response = Required(str, 65535)
status = Required(str)
|
PERF_VAL = [
'0 - Fully active, able to carry on all predisease activities without restrictions.',
'1 - No physically strenuous activity, but ambulatory and able to carry out light or sedentary work.',
'2 - Ambulatory/capable of self-care, unable to perform work activities. Up & about more than 50% of the day.',
'3 - Capable of only limited self-care, confined to bed or chair more than 50% of waking hours.',
'0 - Fully active, able to carry on all predisease activites without restrictions.',
'4 - Completely disabled, totally confined to bed or chair. Cannot carry on any self-care.']
|
class Tablero:
tab = []
max_p = 0
def __init__(self, Tablero, Palabra_max):
self.tab = Tablero
self.max_p = Palabra_max
|
В приведенном ниже примере, несколько регистров и числовых значений загружаются в стек. В каком порядке они будут извлекаться из стека с помощью команды pop? Расположите в верном порядке.
push edi
push ecx
push ebp
push 3
push eax
|
'''
Mirror Sequence
Print numbers in sequence is a relatively simple task.
But, and when it is a sequence mirror? This is a sequence
having a number of start and an end number and all numbers
therebetween, including these, are arranged in an increasing
sequence without spaces, and then this sequence is designed
in inverted form, as a reflection in the mirror. For example,
if the sequence is 7 to 12, the result would 789101112211101987
Write a program that, given two integers, print their mirror sequence.
Input
The input has an integer value C indicating the number of test cases.
Then each case has two integer values E and B ( 1 ≤ B ≤ E ≤ 12221 ),
indicating the start and end of the sequence.
Output
For each test case, print the respective mirror sequence.
'''
x = int(input())
while x != 0:
numbers = input().split(" ")
num1 = int(numbers[0])
num2 = int(numbers[1])
string = ''
for i in range(num1, num2+1, 1):
string += str(i)
print('{}{}'.format(string, string[::-1]))
x -= 1
|
print("*** ASSIGNMENT 1 ***")
print(" ")
print("Ex1")
def fun():
print("Hello from fun")
fun()
print(" ")
print("Ex2")
def no():
n = int(input("Any no:"))
if (n%2==0):
print("EVEN")
else:
print("Odd")
no()
print(" ")
print("Ex3")
def Add():
a = int(input("First no:"))
b = int(input("Second no:"))
sum =(a+b)
print("Adddition of no is {}".format(sum))
Add()
print(" ")
print("Ex4")
def name(n):
if n != 0:
name(n-1)
print("Marvellous")
name(5)
print(" ")
print("Ex5")
def num():
u = 0
v = 10
for num in range(v, u,-1):
print(num,end=" ")
num()
print(" ")
print(" ")
print("Ex6")
def CheckN():
n = int(input("Enter N:"))
if (n>0):
print("Positive")
elif (n<0):
print("Negative")
else:
print("Zero")
CheckN()
print(" ")
print("Ex7")
def Bool(n):
return True if n % 5 == 0 else False
print(Bool(10))
print(" ")
print("Ex8")
def star():
a = int(input("Enter No of stars: "))
print(" * "*a)
star()
print(" ")
print("Ex9")
def Even():
a = int(input("Enter Interval: "))
for Even in range(2,21,a):
print(Even,end=" ")
Even()
print(" ")
print(" ")
print("Ex10")
def name():
str = input("Enter name:")
print(len(str))
name()
def str(name):
return len(name)
a = input("Enter Name: ")
print("No of alphabets: ",str(a))
|
#!/usr/bin/env python3
import subprocess
import time
import yaml
with open('en.yml', 'r') as handle:
data = yaml.load(handle)
out = {}
for k, v in data.items():
print(k, v)
transv = subprocess.check_output(['trans', 'en:es', '-b', v])
print(k, transv)
out[k] = transv
time.sleep(5)
with open('es-cat.yml', 'w') as handle:
yaml.dump(out, handle)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.codegen.protobuf.target_types import ProtobufSourceTarget
from pants.backend.python.goals import repl as python_repl
from pants.backend.python.target_types import (
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
PythonSourceTarget,
)
from pants.backend.python.target_types_rules import rules as target_types_rules
from pants.backend.python.util_rules import local_dists, pex_from_targets
from pants.backend.python.util_rules.pex import PexProcess
from pants.core.goals.generate_lockfiles import NoCompatibleResolveException
from pants.core.goals.repl import Repl
from pants.core.goals.repl import rules as repl_rules
from pants.engine.process import Process
from pants.testutil.python_interpreter_selection import all_major_minor_python_versions
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import GoalRuleResult, QueryRule, engine_error, mock_console
@pytest.fixture
def rule_runner() -> PythonRuleRunner:
rule_runner = PythonRuleRunner(
rules=[
*repl_rules(),
*python_repl.rules(),
*pex_from_targets.rules(),
*local_dists.rules(),
*target_types_rules(),
QueryRule(Process, (PexProcess,)),
],
target_types=[
PythonSourcesGeneratorTarget,
ProtobufSourceTarget,
PythonSourceTarget,
PythonRequirementTarget,
],
)
rule_runner.write_files(
{
"src/python/foo.proto": 'syntax = "proto3";message Foo {}',
"src/python/lib.py": "from foo import Foo\nclass SomeClass:\n pass\n",
"src/python/BUILD": dedent(
"""\
protobuf_source(name='proto', source='foo.proto')
python_sources(dependencies=[':proto'])
"""
),
}
)
return rule_runner
def run_repl(
rule_runner: PythonRuleRunner, args: list[str], *, global_args: list[str] | None = None
) -> GoalRuleResult:
# TODO(#9108): Expand `mock_console` to allow for providing input for the repl to verify
# that, e.g., the generated protobuf code is available. Right now this test prepares for
# that by including generated code, but cannot actually verify it.
with mock_console(rule_runner.options_bootstrapper):
return rule_runner.run_goal_rule(
Repl,
global_args=global_args or (),
args=args,
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
def test_default_repl(rule_runner: PythonRuleRunner) -> None:
assert run_repl(rule_runner, ["src/python/lib.py"]).exit_code == 0
@pytest.mark.platform_specific_behavior
@pytest.mark.parametrize(
"major_minor_interpreter",
all_major_minor_python_versions(["CPython>=3.7,<4"]),
)
def test_ipython(rule_runner: PythonRuleRunner, major_minor_interpreter: str) -> None:
assert (
run_repl(
rule_runner,
["src/python/lib.py"],
global_args=[
"--repl-shell=ipython",
f"--python-interpreter-constraints=['=={major_minor_interpreter}.*']",
],
).exit_code
== 0
)
def test_eagerly_validate_roots_have_common_resolve(rule_runner: PythonRuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
python_requirement(name='t1', requirements=[], resolve='a')
python_source(name='t2', source='f.py', resolve='b')
"""
)
}
)
with engine_error(NoCompatibleResolveException, contains="pants peek"):
run_repl(
rule_runner,
["//:t1", "//:t2"],
global_args=["--python-resolves={'a': '', 'b': ''}", "--python-enable-resolves"],
)
|
default_app_config = 'eshop_products.apps.EshopProductsConfig'
|
# -*- coding: utf-8 -*-
from django import forms
from django.db import models
from django.template.loader import render_to_string
class TokenWidget(forms.Widget):
class Media:
js = ['random_field/token.js']
css = {
'all': ['random_field/token.css']
}
def __init__(self, max_length):
super(TokenWidget, self).__init__()
self.max_length = max_length
def render(self, name, value, attrs=None):
max_length = self.max_length
input_width = self.max_length * 5
return render_to_string('random_field/token.html', locals())
class TokenField(models.CharField):
def formfield(self, **kwargs):
kwargs['widget'] = TokenWidget(self.max_length)
return super(TokenField, self).formfield(**kwargs)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^random_field\.fields\.TokenField"])
except ImportError:
pass
|
import fool
fool.load_userdict(path)
text = "我在北京天安门看你难受香菇"
print(fool.cut(text))
|
from django.shortcuts import render
from django.shortcuts import render
from django.db.models import Q
from django.shortcuts import render
from User.models import UserExtended
from django.contrib.auth import (authenticate,
login)
from rest_framework.response import Response
from rest_framework.filters import (SearchFilter,
OrderingFilter)
from rest_framework.status import (HTTP_200_OK,
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
HTTP_204_NO_CONTENT)
from rest_framework.views import APIView
from rest_framework.generics import (CreateAPIView ,
ListAPIView,
RetrieveAPIView,
RetrieveUpdateAPIView,
DestroyAPIView)
from LiveData.serializer import (LiveDataUpdateSerializer,
LiveDataListSerializer,
LiveDataDetailSerializer)
from rest_framework.permissions import (AllowAny,
IsAuthenticated,
IsAuthenticatedOrReadOnly,
IsAdminUser)
from LiveData.permissions import IsOwner
from Device.models import Device
from django.contrib.auth import get_user_model
from LiveData.models import LiveData
from rest_framework.exceptions import APIException
User = get_user_model()
# Create your views here.
class LiveDataListAPIView(ListAPIView):
queryset = LiveData.objects.all()
serializer_class = LiveDataListSerializer
class LiveDataDetailAPIView(RetrieveAPIView):
queryset = LiveData.objects.all()
serializer_class = LiveDataDetailSerializer
permission_classes = [IsAuthenticated]
class LiveDataUpdateAPIView(RetrieveUpdateAPIView):
queryset = LiveData.objects.all()
serializer_class = LiveDataDetailSerializer
permission_classes = [IsAuthenticatedOrReadOnly, IsOwner]
class LiveDataDeleteAPIView(DestroyAPIView):
permission_classes = [IsAuthenticated, IsOwner]
queryset = LiveData.objects.all()
serializer_class = LiveDataDetailSerializer
def perform_destroy(self, instance):
instance.device.delete()
instance.delete()
|
name = "kindling"
__all__ = [
"FireActorCritic",
"FireQActorCritic",
"FireDDPGActorCritic",
"FireTD3ActorCritic",
"FireSACActorCritic",
"TensorBoardWriter",
"utils",
"ReplayBuffer",
"PGBuffer",
"Saver",
"Logger",
"EpochLogger",
]
from flare.kindling.neuralnets import (
FireActorCritic,
FireQActorCritic,
FireDDPGActorCritic,
FireTD3ActorCritic,
FireSACActorCritic,
)
from flare.kindling.tblog import TensorBoardWriter
from flare.kindling import utils
from flare.kindling.buffers import ReplayBuffer, PGBuffer
from flare.kindling.saver import Saver
from flare.kindling.loggingfuncs import Logger, EpochLogger
|
from django.test import RequestFactory
from api.models import AndelaUserProfile, UserProxy
from graphene.test import Client
from snapshottest.django import TestCase
from graphql_schemas.schema import schema
class BaseUserTestCase(TestCase):
def setUp(self):
self.user1 = UserProxy.create_user({
"username": "testuser",
"first_name": "test",
"last_name": "user",
"email": "test@andela.com"
})
self.user2 = UserProxy.create_user({
"username": "anotherUser",
"first_name": "another",
"last_name": "user",
"email": "user2@andela.com"
})
self.andela_user1 = AndelaUserProfile.objects.create(
google_id=123233,
user=self.user1,
user_picture="https://lh5.googleusercontent.com"
)
self.andela_user2 = AndelaUserProfile.objects.create(
google_id=344445,
user=self.user2,
user_picture="https://lh5.googleusercontent.com"
)
self.request = RequestFactory().get('/graphql')
self.client = Client(schema)
def tearDown(self):
AndelaUserProfile.objects.all().delete()
UserProxy.objects.all().delete()
|
print("\n*********************************************************\n")
data1 = [1,2,3,4,5,6,7,8]
print("Here is the original data:",data1)
evens_for = []
for num in data1:
if not num%2:
evens_for.append(num)
print("Here is the even numbers using for loop: ",evens_for)
evens_comp = [num for num in data1 if not num%2]
print("Here is the even numbers using List Comp: ",evens_comp)
print("\n*********************************************************\n")
data2 = [1,'one',2,'two',3,'three',4,'four']
print("Here is the original data:",data2)
words_for = []
for num in data2:
if isinstance(num,str):
words_for.append(num)
print("Here is the data uing for loop: ",words_for)
words_comp = [num for num in data2 if isinstance(num,str)]
print("Here is the data uing List Comp: ",words_comp)
print("\n*********************************************************\n")
data3 = list('So long and thanks for all the fish'.split())
print("Here is the original data: ", data3)
title_for=[]
for word in data3:
title_for.append(word.title())
print("Here is the result using for loop: ",title_for)
title_comp = [word.title() for word in data3]
print("Here is the result using List Comp: ",title_comp)
print("\n*********************************************************\n")
|
from sql_kit import SQL_kit
import lyricsgenius
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import mysql.connector
import getpass
import pathlib
from pathlib import Path
class Lyrics_Tool:
def __init__(self):
# genius API key
self.genius_api_key = getpass.getpass('Enter Genius API Key: ')
# connect to MySQL database
self.database_connect = False
self.userID = None
self.password = None
self.database = 'lyrics'
file_path = Path(str(pathlib.Path(__file__).parent.absolute())+'/'+'curse_words.csv')
# import xlsx file of curse words from Wiki, convert to list
self.curse_words_df = list(pd.read_csv(file_path,encoding='latin1')['curse_words'])
self.curse_words_list = []
for word in self.curse_words_df:
new_word = word.replace('\xa0','')
self.curse_words_list.append(new_word)
def connect(self):
""" Enter MySQL credentials and connect to database"""
self.database_connect = True
self.userID = input('User ID: ')
self.password = getpass.getpass('Password: ')
def show_lyrics(self,song_name, artist_name):
""" print song lyrics """
genius = lyricsgenius.Genius(self.genius_api_key)
song = genius.search_song(str(song_name), str(artist_name))
return print(song.lyrics)
def vet(self,song_name, artist_name):
"""
This method checks if a song has explicit lyrics.
You input a song & artist and it retrieves the lyrics from Genius API.
It returns True if the song is clean and False if explicit lyrics are detected
"""
# use Genius API to retreive lyrics data
genius = lyricsgenius.Genius(self.genius_api_key)
song = genius.search_song(str(song_name), str(artist_name))
# remove unneccesary characters and split string into list
lyrics = song.lyrics
remove_characters = [".", ",", "!", "'", '"', "?", "(", ")","[","]"]
for character in remove_characters:
lyrics = lyrics.replace(character, "")
lyrics = lyrics.split()
# list to store curse words in song
song_curse_words = []
# iterate through each word in lyrics, check if it is in curse_words_list
# if a word is a curse word, store in song_curse_words list
for word in lyrics:
word=word.lower()
if word in self.curse_words_list:
song_curse_words.append(word)
else:
pass
# If song is clean...
if len(song_curse_words) == 0:
clean=True
print("\nClean lyrics")
# If song is explicit...
else:
clean=False
print('\nExplicit lyrics detected!\n')
if self.database_connect:
# update MySQL database
db = SQL_kit(self.userID, self.password, self.database)
db.cleanlyrics_table(song_name, artist_name, clean)
else:
pass
return clean
def unique_word_count(self, song_name, artist_name):
"""
lyrics unique value counts series
"""
# use Genius API to retreive lyrics data
genius = lyricsgenius.Genius(self.genius_api_key)
song = genius.search_song(str(song_name), str(artist_name))
# remove unneccesary characters and split string into list
lyrics = song.lyrics
remove_characters = [".", ",", "!", "'", '"', "?", "(", ")","[","]"]
for character in remove_characters:
lyrics = lyrics.replace(character, "")
lyrics = lyrics.split()
# remove most common words
common_words = ['a','an','and','the','it']
comp_lyrics = []
for item in lyrics:
item=item.lower()
if '[' in item or ']' in item or item in common_words:
pass
else:
comp_lyrics.append(item)
# unique word value count series
lyrics = pd.Series(comp_lyrics).value_counts()
lyrics = pd.DataFrame(lyrics,columns=['count'])
lyrics.index.name = 'word'
# create sample of 20 unique words in lyrics
sample = list(lyrics.index)[:20]
# container for final concatenated sample
final_sample = sample[0]
for item in sample[1:]:
final_sample = final_sample+" "+item
return lyrics
def lyrics_sample(self, song_name, artist_name):
# use Genius API to retreive lyrics data
genius = lyricsgenius.Genius(self.genius_api_key)
song = genius.search_song(str(song_name), str(artist_name))
# remove unneccesary characters and split string into list
lyrics = song.lyrics
remove_characters = [".", ",", "!", "'", '"', "?", "(", ")",'[',']']
for character in remove_characters:
lyrics = lyrics.replace(character, "")
lyrics = lyrics.split()
# create sample of 20 unique words in lyrics
sample = list(lyrics)[:20]
# container for final concatenated sample
final_sample = sample[0]
for item in sample[1:]:
final_sample = final_sample+" "+item
return final_sample
def data(self):
""" Returns a DataFrame of all information in the cleanlyrics table """
db = SQL_kit(self.userID, self.password, self.database)
return db.get_data()
def dashboard(self):
""" Visual dashboard displaying trends in the cleanlyrics table """
df = self.data()
""" Top 3 Instruments """
objects = ['Clean','Explicit']
y_pos = np.arange(len(objects))
# get class info from class_absence_stats dataframe
performance = list(df['Clean'].value_counts())
#fig3 = plt.figure(3)
plt.bar(y_pos, performance, color='mediumblue', align='center', alpha=0.8)
plt.xticks(y_pos, objects)
plt.title('Clean vs Explict')
plt.ylabel('Number of Songs')
plt.xlabel('Lyrics')
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 15:56:45 2019
@author: HP
"""
import random
def power(a,n,p):
res=1
while n>0:
if n%2==1:
res=(res*a)%p
n=n/2
res=(res*res)%p
return res
def euclidean_gcd(a,b):
if b==0:
return a
else:
return euclidean_gcd(a,a%b)
def fermat(n,k):
while k>0:
a=random.randrange(2,n-1)
if euclidean_gcd(n,a)!=1:
return False
if power(a,n-1,n)!=1:
return False
k=k-1
return True
def Miller_Rabin_Exd(a,d,n):
x=power(a,d,n)
if x==1 or x==n-1:
return True
while d<n-1:
x=(x*x)%n
d=d*2
if x==1:
return False
else:
return True
def Miller_Rabin(n,k):
if n%2==0:
return False
t=n-1
d=0
s=0
while t%2==0:
s=s+1
t=t/2
d=t
while k>0:
a=random.randrange(2,n-1)
x=Miller_Rabin_Exd(a,d,n)
if x==False:
return False
return True
|
import os
import xml.etree.ElementTree as et
import cv2
object_name_dict = {
'rice': '1',
'soup': '2',
'rect': '3',
'lcir': '4',
'ssquare': '5',
'msquare': '6',
'lsquare': '7',
'bsquare': '8',
'ellipse': '9'
}
def data_transfer(xml_path, img_dir, save_dir, index):
tree = et.parse(xml_path)
root = tree.getroot()
file_name = root.find('filename')
file_name_text = os.path.join(img_dir, file_name.text)
my_object = root.findall('object')
count = 0
for single_object in my_object:
single_object_name = single_object.find('name').text
label = object_name_dict[single_object_name]
single_object_rect = single_object.find('bndbox')
x_min = int(single_object_rect.find('xmin').text)
x_max = int(single_object_rect.find('xmax').text)
y_min = int(single_object_rect.find('ymin').text)
y_max = int(single_object_rect.find('ymax').text)
image = cv2.imread(file_name_text)
cropImg = image[y_min:y_max, x_min:x_max]
cv2.imwrite(os.path.join(save_dir, str(index + count)+"_"+label+".png"), cropImg)
print("save image to "+ os.path.join(save_dir, str(index + count)+"_"+label+".png"))
count = count + 1
return count
if __name__ == "__main__":
img_dir = "../datas/datas/img/"
xml_dir = "../datas/datas/xml/"
xml_list = os.listdir(xml_dir)
list_label = []
index = 10000
for xml_path in xml_list:
file_path = os.path.join(xml_dir, xml_path)
save_dir = "../datas/transdatas/train/"
num = data_transfer(file_path, img_dir, save_dir, index)
index = index + num
|
from PyQt5.QtGui import QPixmap
from ui_msgbox import *
class RecordingBoxWindow(QtWidgets.QWidget, Ui_msgbox):
message = []
def __init__(self, type, msg,parent=None):
super(RecordingBoxWindow, self).__init__(parent)
self.setupUi(self)
init_f={"buy":self.init_buy,
"sold":self.init_sold,
"login":self.init_login,
'custom':self.init_custom,
'user':self.init_user}
init_f[type](msg)
self.loadmsg(self.message)
self.next_page.clicked.connect(self.to_next_page)
self.last_page.clicked.connect(self.to_last_page)
def init_buy(self,data):
self.setWindowTitle("购买记录")
self.send_head.setText("订单号")
self.shop_owner_head.setText("购买信息")
msg=[]
for i in range(len(data)):
m = {}
m['time'] = data[i]['time']
m['send'] = data[i]['shopping_num']
m['content'] = data[i]['goods_name'] + " × " +data[i]['num']
msg.append(m)
self.message = msg
def init_custom(self,data):
self.setWindowTitle("店内顾客")
self.send_head.setText("编号")
self.time_head.setText("店铺ID")
self.shop_owner_head.setText("顾客")
msg = []
for i in range(len(data['custom'])):
m = {}
m['time'] = data['id']
m['send'] = str(i+1)
m['content'] = data['custom'][i]
msg.append(m)
self.message = msg
def init_sold(self,data):
self.setWindowTitle("销售记录")
self.send_head.setText("订单号")
self.shop_owner_head.setText("销售信息")
msg = []
for i in range(len(data)):
m = {}
m['time'] = data[i]['time']
m['send'] = data[i]['shopping_num']
m['content'] = "向" + data[i]['user'] + "卖出" + data[i]['goods_name'] + " × " + data[i]['num']
msg.append(m)
self.message = msg
def init_login(self,data):
self.setWindowTitle("登陆记录")
self.send_head.setText("编号")
self.shop_owner_head.setText("登陆位置")
msg = []
for i in range(len(data)):
m = {}
m['time'] = data[i]['time']
m['send'] = str(i+1)
m['content'] = str(data[i]['add'])
msg.append(m)
self.message = msg
def init_user(self,data):
self.setWindowTitle("用户信息")
self.send_head.setText("UserID")
self.time_head.setText("用户名")
self.shop_owner_head.setText("是否拥有店铺")
msg = []
for i in range(len(data)):
m = {}
m['send'] = data[i]['id']
if data[i]['state'] == False:
m['time'] = data[i]['name']+":当前离线"
else:
m['time'] = data[i]['name'] + ":当前在线"
m['content'] = data[i]['shop']
msg.append(m)
self.message = msg
def loadmsg_range(self, r, data):
if len(self.message) < r * 5:
s_i = len(self.message) - 5 * (r - 1) + 1
else:
s_i = 6
for i in range(1, s_i):
method = "modify_msglist" + str(i)
getattr(self, method)(data[(r - 1) * 5 + i - 1]['time'],
data[(r - 1) * 5 + i - 1]['send'],
data[(r - 1) * 5 + i - 1]["content"])
for i in range(s_i, 6):
method = "modify_msglist" + str(i)
getattr(self, method)("", "", "")
def to_next_page(self):
current = int(self.page.text())
if (current + 1) * 5 <= (int)(self.msg_num.text()):
self.last_page.setHidden(False)
self.next_page.setHidden(False)
self.page.setText(str(current + 1))
self.loadmsg_range(current + 1, self.message)
elif (current + 1) * 5 > (int)(self.msg_num.text()):
self.last_page.setHidden(False)
self.next_page.setHidden(True)
self.page.setText(str(current + 1))
self.loadmsg_range(current + 1, self.message)
def to_last_page(self):
current = int(self.page.text())
if (current - 1) == 1:
self.last_page.setHidden(True)
self.next_page.setHidden(False)
self.page.setText(str(current - 1))
self.loadmsg_range(current - 1, self.message)
else:
self.last_page.setHidden(False)
self.next_page.setHidden(True)
self.page.setText(str(current - 1))
self.loadmsg_range(current - 1, self.message)
def modify_msglist1(self, id, name, owner):
self.time_1.setText(id)
self.send_1.setText(name)
self.content_1.setText(owner)
def modify_msglist2(self, id, name, owner):
self.time_2.setText(id)
self.send_2.setText(name)
self.content_2.setText(owner)
def modify_msglist3(self, id, name, owner):
self.time_3.setText(id)
self.send_3.setText(name)
self.content_3.setText(owner)
def modify_msglist4(self, id, name, owner):
self.time_4.setText(id)
self.send_4.setText(name)
self.content_4.setText(owner)
def modify_msglist5(self, id, name, owner):
self.time_5.setText(id)
self.send_5.setText(name)
self.content_5.setText(owner)
def loadmsg(self, data):
msgnum = data.__len__()
if msgnum <= 5:
for i in range(1, msgnum + 1):
method = "modify_msglist" + str(i)
getattr(self, method)(data[i - 1]['time'],
data[i - 1]['send'],
data[i - 1]["content"])
self.last_page.setHidden(True)
self.next_page.setHidden(True)
self.msg_num.setText(str(msgnum))
else:
for i in range(1, 6):
method = "modify_msglist" + str(i)
getattr(self, method)(data[i - 1]['time'],
data[i - 1]['send'],
data[i - 1]["content"])
self.last_page.setHidden(True)
self.next_page.setHidden(False)
self.msg_num.setText(str(msgnum))
self.page.setText("1")
|
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.core import validators
from .forms import *
from models import CustomUser as MyUser
from models import SocialNetworks as sn
from django.template import *
# Create your views here.
@login_required
def changeUserSettings(request):
if not request.method == 'POST':
form = ChangeUserSettings()
title = 'Uporabniske nastavitve - ' + str(request.user.username)
try:
userlogourl = request.user.logo.url
except:
userlogourl = '/media/profile_pic/default_user.png'
return render(request, 'User/user_settings.html', {'form': form, 'title': title, 'notificationsEmail': str(request.user.email), 'userlogourl': userlogourl})
form = ChangeUserSettings(request.POST, request.FILES)
try:
if not form.is_valid():
return HttpResponseRedirect('/user/settings')
except:
return HttpResponseRedirect('/user/settings')
currUser = MyUser.objects.get(username=request.user.username)
mail = request.POST['email']
if mail is u'' or u'@' in mail:
currUser.email = mail
soc_net = sn(user=request.user)
url_facebook = request.POST['url_facebook']
url_googleplus = request.POST['url_googleplus']
url_twitter = request.POST['url_twitter']
url_linkedin = request.POST['url_linkedin']
url_github = request.POST['url_github']
if 'facebook.com' in str(url_facebook) and ' ' not in str(url_facebook):
soc_net.url_facebook = url_facebook
if 'plus.google.com' in str(url_googleplus) and ' ' not in str(url_googleplus):
soc_net.url_googleplus = url_googleplus
if 'twitter.com' in str(url_twitter) and ' ' not in str(url_twitter):
soc_net.url_twitter = url_twitter
if 'linkedin.com' in str(url_linkedin) and ' ' not in str(url_linkedin):
soc_net.url_linkedin = url_linkedin
if 'github.com' in str(url_github) and ' ' not in str(url_github):
soc_net.url_github = url_github
soc_net.save()
try:
pic = request.FILES['logo']
if pic is not None and 'image/' in pic.content_type:
currUser.logo = pic
except:
pass
currUser.save()
return HttpResponseRedirect('/user/settings')
|
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None:
return None
tmp = head
while tmp is not None:
if tmp.next is not None and tmp.val == tmp.next.val:
tmp.next = tmp.next.next
else:
tmp = tmp.next
return head
|
from rply import LexerGenerator
class Lexer():
def __init__(self):
self.lexer = LexerGenerator()
def _add_tokens(self):
# Parentheses
self.lexer.add('OPEN_PAREN', r'\(')
self.lexer.add('CLOSE_PAREN', r'\)')
# definitions
self.lexer.add('DEF_NOT', r'def \~')
self.lexer.add('DEF_IMPLIE', r'def \->')
self.lexer.add('DEF_AND', r'def \&')
self.lexer.add('DEF_OR', r'def \|')
self.lexer.add('DEF_IFF', r'def \<->')
self.lexer.add('DEF_BASE', r'def A')
# conectives
self.lexer.add('NOT', r'\~')
self.lexer.add('IMPLIE', r'\->')
self.lexer.add('AND', r'\&')
self.lexer.add('OR', r'\|')
self.lexer.add('IFF', r'\<->')
#hifen
self.lexer.add('HYPHEN', r'\-')
#dot
self.lexer.add('DOT', r'\.')
self.lexer.add('COMMA', r'\,')
# Number
self.lexer.add('NUMBER', r'\d+')
# Atomo
self.lexer.add('ATHOM', r'[a-zA-Z][a-zA-Z0-9]*' )
# Ignore spaces
self.lexer.ignore('\s+')
def get_lexer(self):
self._add_tokens()
return self.lexer.build()
|
# definiere eine Person-Klasse
class Person():
# Initialisiere die Klasse mit Daten
def __init__(self, first_name, last_name, year_of_birth):
# speichere die uebergebenen parameter ab
# (self ist das aktuelle Objekt - also das Objekt, das
# gerade initialisiert wird)
self.first_name = first_name
self.last_name = last_name
self.year_of_birth = year_of_birth
def get_age(self):
age = 2018 - self.year_of_birth
return age
def get_full_name(self):
return self.first_name + ' ' + self.last_name
|
import sys
import random
import signal
import argparse
from functools import partial, reduce
from itertools import chain
import blessed
from .. import save
from ..grid import Direction, Actions
from .grid import Grid
from .tile import Tile
up, left = ('w', 'k', 'KEY_UP'), ('a', 'h', 'KEY_LEFT')
down, right = ('s', 'j', 'KEY_DOWN'), ('d', 'l', 'KEY_RIGHT')
grid_moves = {}
for keys, direction in zip((up, left, down, right), Direction):
grid_moves.update(dict.fromkeys(keys, direction))
def grid_dimension(string):
rows, _, cols = string.partition('x')
try:
return {'rows': int(rows), 'cols': int(cols)}
except ValueError:
raise argparse.ArgumentTypeError(
"grid dimension should look like: '4x4'")
parser = argparse.ArgumentParser(
description="A game with the objective of merging tiles by moving them.",
epilog="Use the arrow, wasd or hjkl keys to move the tiles.")
parser.add_argument('grid_dims', metavar='GRID_DIMENSIONS',
type=grid_dimension, nargs='*',
help="Dimensions used for grid(s), default: '4x4'")
parser.add_argument('-b', '--base', metavar='N', type=int,
help="base value of all tiles")
parser.add_argument('-r', '--resume', metavar='SAVE_FILE', nargs='?',
default=False, const=None,
help="resume previous game. SAVE_FILE is used to save to "
"and resume from. Specifying grid dimensions and/or base "
"starts a new game without resuming from SAVE_FILE.")
def draw_score(score, term, end=False):
msg = "score: " + str(score)
with term.location(term.width // 2 - len(msg) // 2, 0):
print(term.bold_on_red(msg) if end else term.bold(msg))
def term_resize(term, grids):
print(term.clear())
max_width = (term.width - (len(grids) + 1) * 2) // len(grids)
for grid in grids:
for tile_height in range(10, 2, -1):
grid.tile_height, grid.tile_width = tile_height, tile_height * 2
if grid.height + 1 < term.height and grid.width <= max_width:
break
else:
with term.location(0, 0):
print(term.red("terminal size is too small;\n"
"please resize the terminal"))
return False # game can not continue until after another resize
margin = (term.width - sum(g.width for g in grids) -
(len(grids) - 1) * 2) // 2
for grid_idx, grid in enumerate(grids):
grid.x = margin + sum(g.width for g in grids[:grid_idx]) + grid_idx * 2
grid.draw()
grid.update_tiles()
grid.draw_tiles()
return True
def main(args=None):
global do_resize
do_resize = True
term = blessed.Terminal()
term_too_small = False
game_over = False
def on_resize(signal, frame):
global do_resize
do_resize = True
signal.signal(signal.SIGWINCH, on_resize)
opts = parser.parse_args(args or sys.argv[1:])
grid_dims = opts.grid_dims or [{'rows': 4, 'cols': 4}]
base_num = opts.base or 2
resume = opts.resume if opts.resume is not False else False
grids = []
save_state = {}
if resume is not False and not (opts.grid_dims or opts.base):
save_state = save.load_from_file(resume)
score = save_state.get('score', 0)
for grid_state in save_state.get('grids', grid_dims):
TermTile = partial(Tile, term=term,
base=grid_state.pop('base', base_num))
tiles = grid_state.pop('tiles', ())
grid = Grid(x=0, y=1, term=term, Tile=TermTile, **grid_state)
if tiles:
for tile_state in tiles:
grid.spawn_tile(**tile_state)
else:
grid.spawn_tile()
grid.spawn_tile()
game_over = game_over or len(grid.possible_moves) == 0
grids.append(grid)
with term.fullscreen(), term.cbreak(), term.hidden_cursor():
while True:
if do_resize:
term_too_small = not term_resize(term, grids)
do_resize = False
if not term_too_small:
draw_score(score, term, end=game_over)
key = term.inkey()
if key in ('q', 'KEY_ESCAPE') or game_over:
save.write_to_file(score, grids, filename=resume or None)
break
direction = grid_moves.get(key.name or key)
if not direction or term_too_small:
continue
for grid in grids:
actions = grid.move(direction)
for action in actions:
grid.draw_empty_tile(*action.old)
if action.type == Actions.merge:
row, column = action.new
score += grid[row][column].value
if actions: # had any successfull move(s)?
grid.spawn_tile(exponent=2 if random.random() > 0.9 else 1)
grid.draw_tiles()
if all(chain(*grid)):
game_over = game_over or len(grid.possible_moves) == 0
high = 0
for max_tile in filter(None, (g.highest_tile for g in grids)):
high = max(high, max_tile.value)
print("highest tile: {}\nscore: {}".format(high, score))
return 0
|
#! C:\bin\Python35\python.exe
# -*- coding: utf-8 -*-
'''
Modified for python3 on 2012/04/29
original python2 version is Created on 2011/10/30
@author: tyama
'''
import poplib
import email.header
import string
import re
import urllib.request
import urllib.error
import urllib.parse
import http.cookiejar
import socket
import threading
import time
import random
import json
import mailcheker_data as config
from subprocess import check_call
'''
#sample
def decode_mime_header1(s0):
return ''.join( str(s, c or 'ascii') if isinstance(s, (bytes,)) \
else s for s,c in email.header.decode_header(s0) )
'''
def decode_mime_header(st):
decoded_st = ""
for s, enc in email.header.decode_header(st):
try:
if isinstance(s, str):
decoded_st += s
elif enc == 'unknown-8bit': # case of type==bytes
decoded_st += s.decode('Shift_JIS', 'ignore')
elif enc:
decoded_st += s.decode(enc, 'ignore')
else:
decoded_st += s.decode('utf-8', 'ignore')
except LookupError as e:
print('encode error:', e)
except Exception as err:
print('Unexpected error in decode, sleeping 8 sec')
print(sys.exc_info())
time.sleep(8)
return decoded_st
def extract_url(msg, fromkey, payloadkey, multiurl):
f_header = msg.get('From', str)
# rakuten mail is not correctly decoded
# the following replacement is useful
if isinstance(f_header, str):
f_header_mod = f_header.replace('==?=<', '==?= <')
else:
f_header_mod = f_header # .encode()
decoded_from = decode_mime_header(f_header_mod)
url = []
if fromkey in decoded_from:
# print "YES"
pattern = re.compile(payloadkey)
for part in msg.walk():
if part.get_content_maintype() == 'text':
body = part.get_payload()
enc = part.get_content_charset()
if isinstance(body, str):
u_body = body
elif enc == 'unknown-8bit': # case of type==bytes
u_body = body.decode('Shift_JIS', 'ignore')
elif enc:
u_body = body.decode(enc, 'ignore')
else:
u_body = body.decode('euc-jp', 'ignore')
# print enc
# print u_body
if multiurl:
result = pattern.findall(u_body)
if result:
for each in result:
url.append(each)
url = list(set(url))
# sorted(set(url), key=url.index)
else:
result = pattern.search(u_body)
if result:
url.append(result.group(1))
return url
else:
return None
def isEmailTocheck(msg, fromkey):
f_header = msg.get('From', str)
# rakuten mail is not correctly decoded
# the following replacement is useful
if isinstance(f_header, str):
f_header_mod = f_header.replace('==?=<', '==?= <')
else:
f_header_mod = f_header # .encode()
decoded_from = decode_mime_header(f_header_mod)
if fromkey in decoded_from:
return True
else:
return False
class http_get(threading.Thread):
def __init__(self, url, opener, index):
threading.Thread.__init__(self)
self.url = url
self.opener = opener
self.index = index
def run(self):
try:
response = self.opener.open(self.url)
'''
enc = response.headers.getparam('charset')
if enc:
print response.read().decode(enc,'ignore')
else:
print response.read().decode('euc-jp','ignore')
'''
print(" ", self.index, self.url)
return True
except urllib.error.HTTPError as error:
print('HTTP Error')
print(error)
except socket.timeout as error:
print('Socket time out')
print(error)
except Exception as err:
print('Unexpected error in decode, sleeping 8 sec')
print(sys.exc_info())
time.sleep(8)
return None
original_data = {
'name': 'ACME',
'shares': 100,
'price': 542.23
}
def main():
print("Base", original_data)
json_str = json.dumps(original_data)
print(json_str)
json_data = json.loads(json_str)
print(json_data)
server_list = config.server_list
user_list = config.user_list
pass_list = config.pass_list
print(server_list)
dl_list1 = config.dl_list1
dl_list2 = config.dl_list2
dl_list3 = config.dl_list3
dl_list = (dl_list1, dl_list2, dl_list3)
# lines=open('setting.dat','r').readlines()
# for line in lines:
# print line[:-1]
lastuidl_lists = []
f = open('lastmsgid.dat', 'r')
for line in f:
lastuidl_lists.append(line.split())
f.close()
out_string = []
print(lastuidl_lists)
print(dl_list)
# time out
socket.setdefaulttimeout(15.0)
# connect to server
cj = http.cookiejar.CookieJar()
cjhdr = urllib.request.HTTPCookieProcessor(cj)
opener = urllib.request.build_opener(cjhdr)
opener.addheaders = [
('User-Agent', 'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko)\
Chrome/15.0.874.120 Safari/535.2')]
for j in range(len(server_list)):
print('Start ')
server = poplib.POP3_SSL(server_list[j])
# login
server.user(user_list[j])
server.pass_(pass_list[j])
# list items on server
list_resp, list_items, list_octets = server.list()
print(list_resp)
# print (list_items)
print(list_octets)
uidl = server.uidl()
lastuidl = lastuidl_lists[j]
# print server.uidl()
'''if j==1:
lastuidl[1]='TEST'
'''
last_msg_id = 1
x = int(lastuidl[0])
if x > len(list_items):
x = len(list_items)
index = x
print(x)
if x == 0:
out_string.append('1')
out_string.append('abc')
continue
while x > 0:
# print (lastuidl[1], ":>", uidl[1][x-1].split()[1].decode('utf-8','ingore'))
if lastuidl[1] == uidl[1][x - 1].split()[1].decode('utf-8', 'ingore'):
print('equal')
break
print(x)
index = x
x -= 1
print(index)
# if uidl[1][i].split()[1] == 'ANft2MsAABBhTsOb4QzFegr+jPA':
# print 'equal'
# continue
delete_counter = 0
last_index = index
for i in range(index, len(list_items) + 1):
try:
# resp, text, octets = server.retr(i)
t_resp, t_text, t_octets = server.top(i, 1)
except Exception as err:
print('Unexpected error in server.top of Main function\n')
print('i=', i, ', index=', index)
print(sys.exc_info())
# print (text)'
t_string_text = b'\n'.join(t_text)
t_msg = email.message_from_bytes(t_string_text)
url_list = None
checkBody = False
for from_key, text_key, multiurl in dl_list[j]:
if isEmailTocheck(t_msg, from_key):
checkBody = True
break
if checkBody:
try:
resp, text, octets = server.retr(i)
except Exception as err:
print('Unexpected error in server.retr of Main function\n')
print('i=', i, ', index=', index)
print(sys.exc_info())
string_text = b'\n'.join(text)
msg = email.message_from_bytes(string_text)
for from_key, text_key, multiurl in dl_list[j]:
url_list = extract_url(msg, from_key, text_key, multiurl)
if url_list:
break
# print url_list
if url_list:
m_date = msg.get('Date')
print(m_date)
for each in url_list:
# print each
get = http_get(each, opener, i)
try:
get.start()
# server.dele(i)
delete_counter += 1
if 'r34' in each:
print('Call Chrome')
check_call(
["C:\Program Files (x86)\Google\Chrome\Application\chrome.exe",
" --disable-images", each])
except Exception as err:
print('Unexpected error in Main function', each, i)
print(sys.exc_info())
time.sleep(8)
m_subject = msg.get('Subject')
d_subject, enc = email.header.decode_header(m_subject)[0]
if enc is None:
enc = 'euc-jp'
try:
u_subject = str(d_subject, enc)
except Exception as err:
print('Unexpected error in u_subject', d_subject, enc)
print(sys.exc_info())
time.sleep(8)
print(i, " ", u_subject)
else:
print(i)
last_index = i
if i == 6:
pass # quit()
last_msg_id = len(list_items) # - delete_counter
out_string.append(str(last_msg_id))
out_string.append(uidl[1][last_index - 1].split()[1].decode('utf-8', 'ignore'))
try:
server.quit()
except Exception as err:
print('Unexpected error in server.quit()')
print(sys.exc_info())
print('End')
print(out_string[len(out_string) - 1])
# print out_string
time.sleep(2)
for i in range(len(out_string)):
if i % 2:
continue
print(out_string[i])
print(out_string[i + 1])
f = open('lastmsgid.dat', 'w')
for i in range(len(out_string)):
if i % 2:
continue
f.write(out_string[i] + ' ')
f.write(out_string[i + 1] + '\n')
f.close()
if __name__ == '__main__':
main()
print('END')
time.sleep(8)
|
import requests as http
from lib.mouse import Mouse
from lib.GPIOInteraction import GPIOInteractor
from lib.SoundPlayer import SoundPlayer
import os
import time
os.environ['PLAYING'] = 'False'
playing = False
pause_time = 0
paused_at = time.time()
pressed_at = time.time()
skip = False
playback_url = 'http://127.0.0.1:5000/playback?state='
# called whenever the physical pause/play button is pressed
def toggle_play_pause():
"""Pauses or Resumes music from playback master."""
global playing
global paused_at
global pressed_at
global pause_time
if(m.in_transition or time.time()-pressed_at < 1):
return None
pressed_at = time.time()
if(playing):
m.pause()
http.get(playback_url + 'paused')
playing = False
paused_at = time.time()
s.play_pause_tone()
elif(not playing):
m.play()
http.get(playback_url + 'resume')
playing = True
pause_time += time.time() - paused_at
def skip_song():
global skip
skip = True
s.play_skip_tone()
m = Mouse()
g = GPIOInteractor()
s = SoundPlayer()
g.set_button_callback(toggle_play_pause)
g.set_button_held_callback(skip_song)
song_url = 'http://127.0.0.1:5000/get_next_song'
s.play_boot_tone()
def get_next_song():
"""Gets next song and returns data"""
try:
response = http.get(song_url)
except http.exceptions.ConnectionError:
print('unable to connect, waiting...')
time.sleep(10)
return get_next_song()
data = response.json()
while 'error' in data:
print(data['error'])
time.sleep(10)
response = http.get(song_url)
data = response.json()
return data
# establish a connection
data = get_next_song()
duration = data['duration'] / 1000.0
start_time = time.time()
m.play_song(data['track_id'])
playing = True
# main loop
while True:
if time.time() - start_time + 3>= duration + pause_time or skip:
if(playing):
toggle_play_pause()
data = get_next_song()
duration = data['duration'] / 1000.0
m.play_song(data['track_id'])
start_time = time.time()
pause_time = 0
skip = False
playing = True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.