text stringlengths 8 6.05M |
|---|
import nltk
# from nltk import word_tokenize, pos_tag,sent_tokenize, RegexpTokenizer
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import json, gensim
print ("started")
# type(data)=dict
with open('hindustan_times_news.csv') as w:
ht = w.readlines()
with open('times_of_india_news.csv') as w:
toi = w.readlines()
#model = gensim.models.Word2Vec.-('vectors.bin', binary = True)
model = gensim.models.Word2Vec.load_word2vec_format('vector.bin', binary = True)
print ("model loaded")
# ques, tag = [], []
# for key, value in data.items():
# ques.append(key)
# tag.append(data[key])
# tags = []
# with open('tags.txt') as f:
# s = f.readlines()
# for line in s:
# tags.append(line)
# questions_train, questions_test, tags_train, tags_test = train_test_split(ques, tag, test_size=0.01, random_state = random.randint(1, 100))
# print (len(tags_test))
tokenizer = RegexpTokenizer(r'\w+')
stop = stopwords.words('english')
# #stemmer = SnowballStemmer("english")
def chk_in_model(b):
d = []
for bb in b:
if bb in model:
pass
else:
continue
d.append(bb)
return d
# def getcosine(v1, v2):
# return 1 - spatial.distance.cosine(v1, v2)
def clean_ques(query):
query = query.lower()# converted to lowercase alphabet
query = tokenizer.tokenize(query) # tokenized
query = [q for q in query if q not in stop] # removed stop words
query = chk_in_model(query)
return query
def clean_ques_not_stop(query):
# here query is list of words that are present in the question
query = query.lower()# converted to lowercase alphabet
query = tokenizer.tokenize(query) # tokenized
query = chk_in_model(query)
return query
def wordvec(word):
try:
return model[word]
except KeyError:
pass
return numpy.zeros(len(model["one"]))
#Get the Word Centroid Distance
# def wcd(sent1, sent2):
# # here sent1 & sent2 both are list of words
# if(len(sent1)>0 and len(sent2)>0):
# s1 = wordvec(sent1[0])
# s2 = wordvec(sent2[0])
# else:
# return 10000
# for i in range(1,len(sent1)):
# s1 = s1 + wordvec(sent1[i])
# for i in range(1,len(sent2)):
# s2 = s2 + wordvec(sent2[i])
# s1 = s1 / len(sent1)
# s2 = s2 / len(sent2)
# return numpy.linalg.norm(s1 - s2) # returns the norm of the difference of the two vectors
#print(word1)
#Get the Relaxed Word Mover Distance
# def rwmd(sent1, sent2):
# s1, s2 = 0, 0
# dist1 , dist2 = 0, 0
# # dist1 is distance to move from sent1 to sent2
# if len(sent1) == 0 or len(sent2) == 0:
# return 0
# for i in range(len(sent1)):
# d = numpy.linalg.norm(wordvec(sent1[i]) - wordvec(sent2[0]))
# #d = getcosine(wordvec(sent1[i]) , wordvec(sent2[0]))
# val = 0
# for j in range(len(sent2) - 1):
# if (numpy.linalg.norm(wordvec(sent1[i]) - wordvec(sent2[j + 1])) < d): # calculating the minimum distance of sent1[i] with every sent2[j]
# d = numpy.linalg.norm(wordvec(sent1[i]) - wordvec(sent2[j + 1]))
# #d = getcosine(wordvec(sent1[i]) , wordvec(sent2[j + 1]))
# val = j + 1
# dist1 = dist1 + (1.0 / len(sent1)) * d
# # dist2 is distance to move from sent2 to sent1
# for i in range(len(sent2)):
# d = numpy.linalg.norm(wordvec(sent2[i]) - wordvec(sent1[0]))
# #d = getcosine(wordvec(sent2[i]) , wordvec(sent1[0]))
# val = 0
# for j in range(len(sent1) - 1):
# if (numpy.linalg.norm(wordvec(sent2[i]) - wordvec(sent1[0])) < d):
# d = numpy.linalg.norm(wordvec(sent2[i]) - wordvec(sent1[j + 1]))
# #d = getcosine(wordvec(sent2[i]) , wordvec(sent1[j + 1]))
# val = j + 1
# dist2 = dist2 + (1.0 / len(sent2)) * d
# return max(dist1, dist2)
# #Get the one sided Relaxed Word Mover Distance
# def rwmd_(sent1, sent2):
# s1, s2 = 0, 0
# dist1 , dist2 = 0, 0
# # dist1 is distance to move from sent1 to sent2
# if len(sent1) == 0 or len(sent2) == 0:
# return 0
# for i in range(len(sent1)):
# d = numpy.linalg.norm(wordvec(sent1[i]) - wordvec(sent2[0]))
# val = 0
# for j in range(len(sent2) - 1):
# if (numpy.linalg.norm(wordvec(sent1[i]) - wordvec(sent2[j + 1])) < d): # calculating the minimum distance of sent1[i] with every sent2[j]
# d = numpy.linalg.norm(wordvec(sent1[i]) - wordvec(sent2[j + 1]))
# val = j + 1
# dist1 = dist1 + (1.0 / len(sent1)) * d
# return dist1
# def getwcd(query, num):
# dic={}
# for i in range(len(ques)):
# if(len(ques[i])==0):
# continue
# ques1=clean_ques(ques[i])
# val = wcd(query,ques1)
# if(len(dic)<num):
# dic[ques[i]]=val
# else:
# m=max(dic,key=dic.get)
# if(dic[m]>val):
# del dic[m]
# dic[ques[i]]=val
# return list(dic.keys())
# def getrwmd(query, kwcd, num):
# dic={}
# for i in range(len(kwcd)):
# ques1=clean_ques(kwcd[i])
# val=rwmd(query,ques1)
# #print (kwcd[i], val)
# if (len(dic)<num):
# dic[kwcd[i]]=val
# else:
# m=max(dic,key=dic.get)
# if(dic[m]>val):
# del dic[m]
# dic[kwcd[i]]=val
# return list(dic.keys())
# #create priority queue to store the dist
# #return top num values
# def getkNN(query, num):
# kwcd = getwcd(query, 10 * num)
# knn = getrwmd(query, kwcd, num)
# return knn
# def rank_dic_ques(dic):
# m = max(dic.values())
# for i in dic:
# dic[i] = float(float(dic[i]) / (m * 1.0))
# return dic
# def rank_dic_tags(dic):
# m = max(dic.values())
# for i in dic:
# dic[i] = 1.0 - float(float(dic[i]) / (m * 1.0))
# return dic
# #get the top 20 tags by question similarity
# def getTagsSimilarQues(query, k = 20):
# query = clean_ques(query)
# # print ("query from ques", query)
# knn = getkNN(query, 30)
# #print(knn)
# #return tags of all 50 questions returned with count of occurrence
# tags=[]
# for i in knn:
# tags.extend(data[i])
# #tag1 = Counter(tags).most_common(k)
# dic = {}
# for w, c in Counter(tags).most_common(k):
# dic[w] = c
# return rank_dic_ques(dic)
# #get the top 20 tags by tag similarity to a question
# def similar_tags(ques, num = 20):
# dic = {}
# query = clean_ques(ques)
# # print ("query from tags", query)
# for i in range(len(tags)):
# try:
# tag_query = clean_ques(tags[i])
# # print ("tag query:", tag_query)
# if(len(tag_query) == 0):
# continue
# val=rwmd_(tag_query, query)
# if (len(dic)<num):
# dic[tags[i]]=val
# else:
# m = max(dic,key=dic.get)
# if(dic[m]>val):
# del dic[m]
# dic[tags[i]]=val
# except KeyError:
# pass
# # print (ques)
# # print (dic)
# return rank_dic_tags(dic)
# pred = []
# tt = []
# def combine_linear(dic1, dic2, alpha, beta):
# dic = {}
# for a in dic1:
# dic[a.strip()] = dic1[a] * alpha
# for a in dic2:
# if a.strip() in dic:
# dic[a.strip()] = dic[a.strip()] + dic2[a]*beta
# else:
# dic[a.strip()] = dic2[a]*beta
# return dic
# def dic_to_lis_sort(dic):
# lis = []
# for a in dic:
# lis.append([dic[a], a])
# lis.sort(reverse = True)
# to_ret = []
# for a,b in lis:
# to_ret.append(b)
# return (to_ret)
# #func to get the precision
print ("start")
res = {}
for a in ht:
a_c = clean_ques(a)
dist = -1
for b in toi:
b_c = clean_ques(b)
n = model.n_similarity(a_c, b_c)
if n > dist:
dist = n
res[a] = b
for a in res:
print (a)
print (res[a])
print ("====")
|
#Project Euler Problem 32
# find the sum of all products where the multiplicand/multiplier/product are 1-9 pandigital
# e.g. 39*186=7254
numbers=[]
pandigitals=[]
#Generate a list of pandigital numbers
for i in range(1,2000):
numbers.append(i)
for i in numbers:
temp=str(i)
#print(temp)
count=0
for j in range(0,len(temp)-1):
for k in range(j,len(temp)):
if temp[k]==temp[j] and j!=k and i in numbers:
#print(temp[k],'=',temp[j])
a=numbers.index(i)
numbers[a]=0
if int(temp[k])==0 and i in numbers:
#print(temp[k],'=',0)
a=numbers.index(i)
numbers[a]=0
if int(temp[j])==0 and i in numbers:
#print(temp[j],'=',0)
a=numbers.index(i)
numbers[a]=0
numbers.sort()
while numbers[0]==0:
numbers.remove(0)
print('pandigitals',pandigitals)
print('numbers',numbers)
for multi1 in numbers:
for multi2 in numbers:
if multi2>100:
break
a=str(multi1)
b=str(multi2)
c=multi1*multi2
c=str(c)
if (len(a)+len(b)+len(c))!=9:
continue
count=0
# print(len(a)+len(b)+len(c),a,b,c)
for j in range(0,len(c)-1):
for k in range(j,len(c)):
if c[k]==c[j] and j!=k:
count+=1
if int(c[k])==0:
# print(c[k],'=',0)
count+=1
if int(c[j])==0:
# print(c[j],'=',0)
count+=1
check=0
if count!=0:
check+=1
#print(a,b,c)
for i in a:
for j in b:
for k in c:
if i==j or i==k or j==k:
check+=1
if check==0:
print(a,b,c)
pandigitals.append(int(c))
#pandigitals.sort()
pandigitals=set(pandigitals)
print(pandigitals)
print(sum(pandigitals))
#Correct!
|
import json
def get_relation_dict(relation_filepath):
relation2id = {}
id2relation = {}
with open(relation_filepath, mode='r', encoding='utf-8') as fr:
for line in fr.readlines():
split_list = line.split('\t')
relation2id[split_list[0]] = int(split_list[1])
id2relation[int(split_list[1])] = split_list[0]
json.dump([relation2id, id2relation], open('../dict/relation_dict', mode='w', encoding='utf-8'), ensure_ascii=False,
indent=4)
if __name__ == '__main__':
relation_filepath = '../data/relation2id.txt'
get_relation_dict(relation_filepath) |
import nltk
from nltk.book import *
from nltk.corpus import brown
print(brown.categories())
cfd = nltk.ConditionalFreqDist((genre,word)
for genre in brown.categories()
for word in brown.words(categories=genre))
genres =['news','religion','hobbies','science_fiction','romance','humor']
modals = ['can','could','may','might','must','will']
cfd.tabulate(conditions= genres,samples = modals)
|
"""
Do a quick analysis of the abortive and full length transcript amounts.
"""
class Quant(object):
"""
Hold the quantification objects
"""
def __init__(self, name, FL, AB, PY):
self.name = name
self.FL = float(FL)
self.AB = float(AB)
self.PY = float(PY)
def __repr__(self):
return "{0}, PY: {1}".format(self.name, self.PY)
file1 = 'summary_quant_first.csv'
file2 = 'rna_quant_summary_second_quant.csv'
f1_info = {}
f2_info = {}
for filepath, filedict in [(file1, f1_info), (file2, f2_info)]:
for line in open(filepath, 'rb'):
if line.split() == []:
continue
else:
info = line.split()
filedict[info[0]] = info[1:]
quant1_obj = {}
for name, fl, ab, py in zip(f1_info['Promoter'], f1_info['FL'], f1_info['Ab'],
f1_info['%PY']):
quant1_obj[name] = Quant(name, fl, ab, py)
quant2_obj = {}
for name, fl, ab, py in zip(f2_info['Promoter'], f1_info['FL'], f1_info['Ab'],
f1_info['%PY']):
quant2_obj[name] = Quant(name, fl, ab, py)
# plot abortive vs abortive and full length vs full length
names = quant1_obj.keys()
fl1 = [quant1_obj[name].FL for name in names]
ab1 = [quant1_obj[name].AB for name in names]
py1 = [quant1_obj[name].PY for name in names]
fl2 = [quant2_obj[name].FL for name in names]
ab2 = [quant2_obj[name].AB for name in names]
py2 = [quant2_obj[name].PY for name in names]
from matplotlib import pyplot as plt
#plt.scatter(fl2, py2)
plt.scatter(py1, py2)
plt.show()
|
import telebot
from delidog import settings
from delidog.models import Chat, Message
bot = telebot.TeleBot(settings.BOT_TOKEN)
@bot.message_handler(commands=['start', ])
def _send_token(message):
chat = Chat.get_chat(message.chat.id)
send_message(chat, chat.token)
@bot.message_handler(commands=['set_token', ])
def _set_token(message):
text_split = message.text.split()
if len(text_split) != 2:
return
token = text_split[1]
chat = Chat.set_token(message.chat.id, token)
send_message(chat, 'New token {}'.format(chat.token))
def send_message(chat, text, disable_notification=False):
bot.send_message(
chat.id, text, disable_notification=disable_notification, timeout=15)
Message.add_message(
chat,
text,
disable_notification
)
def polling():
bot.polling()
|
from onegov.core.security import Private
from onegov.org.views.export import view_export_collection, view_export
from onegov.town6 import TownApp
from onegov.org.models import Export, ExportCollection
from onegov.town6.layout import ExportCollectionLayout
@TownApp.html(
model=ExportCollection,
permission=Private,
template='exports.pt')
def town_view_export_collection(self, request):
return view_export_collection(
self, request, ExportCollectionLayout(self, request))
@TownApp.form(
model=Export,
permission=Private,
template='export.pt',
form=lambda model, request: model.form_class)
def town_view_export(self, request, form):
return view_export(
self, request, form, ExportCollectionLayout(self, request))
|
# coding = UTF-8
import logging
import re
logging.basicConfig(level=logging.DEBUG)
def get_word_count(file_name):
with open(file_name, 'r', encoding='utf-8') as f:
word_cnt = 0
i = 0
for line in f:
i += 1
line = line[:-1].strip(" ")
line = re.sub(' +', ' ', line)
line = line.replace('\t', '')
#logging.info('LINE {i}: {line}'.format(i=i, line=line))
if line != '':
word_list = line.split(' ')
#logging.info(word_list)
word_line_cnt = len(word_list)
word_cnt += word_line_cnt
rep = '{num}) line: {word_line_cnt}; file: {word_cnt}'.format(
num=i, word_line_cnt=word_line_cnt, word_cnt=word_cnt
)
#logging.info(rep)
return word_cnt
if __name__ == "__main__":
file_name = 'referat.txt'
# file_name = 'my_file.txt'
word_cnt = get_word_count(file_name)
logging.info("Слов в тексте: {wc}".format(wc=word_cnt))
|
import random
import torch
class NaturalSelection:
def __init__(self):
self.mutate_chance = 10
self.mutate_impact = 0.01
self.current_population = {}
self.new_population = {}
self.elite = {}
self.high_score = 0
self.new_population_weights = {}
self.children_weights = {}
self.elite_weights = {}
self.obj = classmethod
self.width = 0
self.height = 0
def generate_first_pop(self, num, obj, w, h):
for i in range(0, num):
self.current_population[i] = {"Object": obj(i, 0, w, h), "Gen": 0, "ID": i, "Fitness": 0}
self.update_pop_dict()
self.width = w
self.height = h
self.obj = obj
def update_pop_dict(self):
for item in self.current_population:
self.current_population[item]["Fitness"] = self.current_population[item]["Object"].global_fitness
#print("fitness",self.current_population[item]["Object"].fitness)
def select_elite(self, amount=20):
self.update_pop_dict()
temp = []
for item in self.current_population:
temp.append([item, self.current_population[item]["Fitness"]])
temp.sort(key=lambda x: int(x[1]), reverse=True)
self.high_score = temp[0][1]
for i in range(0, amount):
self.elite[i] = self.current_population[temp[i][0]]
self.elite_weights[i] = self.current_population[temp[i][0]]["Object"].brain.get_weights()
def breed(self, gen, p1, p2):
self.update_pop_dict()
par1 = torch.load('NeuralNet/Models/Gen_{}/Snake_{}.pt'.format(gen, p1))
par2 = torch.load('NeuralNet/Models/Gen_{}/Snake_{}.pt'.format(gen, p2))
child1 = par1
child2 = par2
for item in par1: # iterate through keys
temp1 = par1[item]
for i in range(0, len(temp1)):
if len(temp1[i].size()) == 0:
choice = random.choice([par1[item][i], par2[item][i]])
if choice == par1[item][i]:
child1[item][i] = self.mutate(choice)
child2[item][i] = self.mutate(par2[item][i])
else:
child1[item][i] = self.mutate(par1[item][i])
child2[item][i] = self.mutate(choice)
else:
for j in range(0, len(temp1[i])):
choice = random.choice([par1[item][i][j], par2[item][i][j]])
if choice == par1[item][i][j]:
child1[item][i][j] = self.mutate(choice)
child2[item][i][j] = self.mutate(par2[item][i][j])
else:
child1[item][i][j] = self.mutate(par1[item][i][j])
child2[item][i][j] = self.mutate(choice)
self.children_weights[len(self.children_weights)] = child1
self.children_weights[len(self.children_weights)] = child2
def mutate(self, x):
if random.random() < self.mutate_chance:
return x + random.uniform(-self.mutate_impact, self.mutate_impact)
def create_new_population(self, children, elite, gen):
self.new_population_weights = {}
self.new_population = {}
for i in range(0, children):
parent1 = random.randint(0, len(self.current_population) - 1)
parent2 = random.randint(0, len(self.current_population) - 1)
while parent2 == parent1:
parent2 = random.randint(0, len(self.current_population) - 1)
self.breed(gen - 1, parent1, parent2)
self.select_elite(elite)
for i in range(0, len(self.elite)):
self.children_weights[len(self.children_weights)] = self.elite_weights[i]
self.new_population_weights = self.children_weights
self.children_weights = {}
self.new_population = {}
for i in range(0, len(self.new_population_weights)):
self.new_population[i] = {"Object": self.obj(i, gen, self.width, self.height), "Gen": gen, "ID": i, "Fitness": 0}
self.new_population[i]["Object"].brain.set_weights(self.new_population_weights[i])
self.update_pop_dict()
self.current_population = self.new_population
|
# coding: utf-8
"""
Lilt REST API
The Lilt REST API enables programmatic access to the full-range of Lilt backend services including: * Training of and translating with interactive, adaptive machine translation * Large-scale translation memory * The Lexicon (a large-scale termbase) * Programmatic control of the Lilt CAT environment * Translation memory synchronization Requests and responses are in JSON format. The REST API only responds to HTTPS / SSL requests. ## Authentication Requests are authenticated via REST API key, which requires the Business plan. Requests are authenticated using [HTTP Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication). Add your REST API key as both the `username` and `password`. For development, you may also pass the REST API key via the `key` query parameter. This is less secure than HTTP Basic Auth, and is not recommended for production use. # noqa: E501
The version of the OpenAPI document: v2.0
Contact: support@lilt.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from lilt.configuration import Configuration
class QARuleMatchesRule(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'category': 'QARuleMatchesRuleCategory',
'description': 'str',
'id': 'str',
'issue_type': 'str',
'sub_id': 'str',
'urls': 'list[QARuleMatchesRuleUrls]'
}
attribute_map = {
'category': 'category',
'description': 'description',
'id': 'id',
'issue_type': 'issueType',
'sub_id': 'subId',
'urls': 'urls'
}
def __init__(self, category=None, description=None, id=None, issue_type=None, sub_id=None, urls=None, local_vars_configuration=None): # noqa: E501
"""QARuleMatchesRule - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._category = None
self._description = None
self._id = None
self._issue_type = None
self._sub_id = None
self._urls = None
self.discriminator = None
self.category = category
self.description = description
self.id = id
if issue_type is not None:
self.issue_type = issue_type
if sub_id is not None:
self.sub_id = sub_id
if urls is not None:
self.urls = urls
@property
def category(self):
"""Gets the category of this QARuleMatchesRule. # noqa: E501
:return: The category of this QARuleMatchesRule. # noqa: E501
:rtype: QARuleMatchesRuleCategory
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this QARuleMatchesRule.
:param category: The category of this QARuleMatchesRule. # noqa: E501
:type: QARuleMatchesRuleCategory
"""
if self.local_vars_configuration.client_side_validation and category is None: # noqa: E501
raise ValueError("Invalid value for `category`, must not be `None`") # noqa: E501
self._category = category
@property
def description(self):
"""Gets the description of this QARuleMatchesRule. # noqa: E501
:return: The description of this QARuleMatchesRule. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this QARuleMatchesRule.
:param description: The description of this QARuleMatchesRule. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and description is None: # noqa: E501
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def id(self):
"""Gets the id of this QARuleMatchesRule. # noqa: E501
An rule's identifier that's unique for this language. # noqa: E501
:return: The id of this QARuleMatchesRule. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this QARuleMatchesRule.
An rule's identifier that's unique for this language. # noqa: E501
:param id: The id of this QARuleMatchesRule. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def issue_type(self):
"""Gets the issue_type of this QARuleMatchesRule. # noqa: E501
The Localization Quality Issue Type. This is not defined for all languages, in which case it will always be 'Uncategorized'. # noqa: E501
:return: The issue_type of this QARuleMatchesRule. # noqa: E501
:rtype: str
"""
return self._issue_type
@issue_type.setter
def issue_type(self, issue_type):
"""Sets the issue_type of this QARuleMatchesRule.
The Localization Quality Issue Type. This is not defined for all languages, in which case it will always be 'Uncategorized'. # noqa: E501
:param issue_type: The issue_type of this QARuleMatchesRule. # noqa: E501
:type: str
"""
self._issue_type = issue_type
@property
def sub_id(self):
"""Gets the sub_id of this QARuleMatchesRule. # noqa: E501
An optional sub identifier of the rule, used when several rules are grouped. # noqa: E501
:return: The sub_id of this QARuleMatchesRule. # noqa: E501
:rtype: str
"""
return self._sub_id
@sub_id.setter
def sub_id(self, sub_id):
"""Sets the sub_id of this QARuleMatchesRule.
An optional sub identifier of the rule, used when several rules are grouped. # noqa: E501
:param sub_id: The sub_id of this QARuleMatchesRule. # noqa: E501
:type: str
"""
self._sub_id = sub_id
@property
def urls(self):
"""Gets the urls of this QARuleMatchesRule. # noqa: E501
An optional array of URLs with a more detailed description of the error. # noqa: E501
:return: The urls of this QARuleMatchesRule. # noqa: E501
:rtype: list[QARuleMatchesRuleUrls]
"""
return self._urls
@urls.setter
def urls(self, urls):
"""Sets the urls of this QARuleMatchesRule.
An optional array of URLs with a more detailed description of the error. # noqa: E501
:param urls: The urls of this QARuleMatchesRule. # noqa: E501
:type: list[QARuleMatchesRuleUrls]
"""
self._urls = urls
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QARuleMatchesRule):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, QARuleMatchesRule):
return True
return self.to_dict() != other.to_dict()
|
from __future__ import print_function
import numpy as np
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Activation
from keras.layers.core import Dense, Flatten
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import *
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
import matplotlib.pyplot as plt
#%matplotlib inline
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import os
import random
import cv2
from keras.utils import to_categorical
#requests.packages.urllib3.disable_warnings()
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
vgg16_model = keras.applications.vgg16.VGG16()
model_vgg16_custom = Sequential()
for layer in vgg16_model.layers:
model_vgg16_custom.add(layer)
model_vgg16_custom.layers.pop()
for layer in model_vgg16_custom.layers:
layer.trainable = False
model_vgg16_custom.add(Dense(10, activation='softmax'))
batch_size = 32
#num_classes = 10
#epochs = 100
data_augmentation = True
num_predictions = 20
nb_epoch = 1
nb_classes = 10
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_cifar10_trained_model.h5'
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
nb_train_samples = x_train.shape[0]
nb_validation_samples = x_test.shape[0]
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, nb_classes)
y_test = keras.utils.to_categorical(y_test, nb_classes)
print('y_train shape:', y_train.shape)
print(y_train.shape[0], 'train classes')
print(y_test.shape[0], 'test classes')
# limit the amount of the data
# train data
ind_train = random.sample(list(range(x_train.shape[0])), 10)
x_train = x_train[ind_train]
y_train = y_train[ind_train]
def resize_data(data):
data_upscaled = np.zeros((data.shape[0], 224, 224, 3))
for i, img in enumerate(data):
large_img = cv2.resize(img, dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
data_upscaled[i] = large_img
return data_upscaled
# resize train and test data
x_train_resized = resize_data(x_train)
x_test_resized = resize_data(x_test)
print('x_train_resized shape:', x_train_resized.shape)
print('x_test_resized shape:', x_test_resized.shape)
# make explained variable hot-encoded
y_train_hot_encoded = to_categorical(y_train)
y_test_hot_encoded = to_categorical(y_test)
print('y_train_hot_encoded shape:', y_train_hot_encoded.shape)
print('y_test_hot_encoded shape:', y_test_hot_encoded.shape)
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_datagen.fit(x_train)
train_generator = train_datagen.flow(x_train_resized, y_train, batch_size=32)
test_datagen = ImageDataGenerator(rescale=1. / 255)
validation_generator = test_datagen.flow(x_test_resized, y_test, batch_size=32)
model_vgg16_custom.compile(Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
history = model_vgg16_custom.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples)
#, callbacks=[tb])
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('cifar - model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('cifar - model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
|
#!/usr/bin/env python3
"""
Test for ip-cidr-list identifier
"""
import datetime
import unittest
from base_test import PschedTestBase
from pscheduler.limitprocessor.identifier.ipcidrlist import *
DATA = {
"cidrs": [
"10.0.0.0/8",
"192.168.1.0/24"
]
}
HINTS_HIT = {
"requester": "10.0.0.1"
}
HINTS_MISS = {
"requester": "192.168.100.1"
}
class TestLimitprocessorIdentifierAlways(PschedTestBase):
"""
Test the Identifier
"""
def test_data_is_valid(self):
"""Limit Processor / Identifier IP CIDR List / Data Validation"""
self.assertEqual(data_is_valid(DATA), (True, "OK"))
self.assertEqual(data_is_valid({}), (False, "At /: 'cidrs' is a required property"))
self.assertRaises(ValueError, data_is_valid, 123)
def test_identifier(self):
"""Limit Processor / Identifier IP CIDR List / Identifier"""
ident = IdentifierIPCIDRList(DATA)
self.assertEqual(ident.evaluate(HINTS_HIT), True)
self.assertEqual(ident.evaluate(HINTS_MISS), False)
if __name__ == '__main__':
unittest.main()
|
from myhdl import *
import random
#from myhdl._fixbv import FixedPointFormat as fpf
Bits = 31
def disp_fix(x_i):
iW = x_i._W
print float(x_i), int(x_i), repr(x_i), hex(x_i), bin(x_i, iW[0])
x = (fixbv(3.1415926535897932, min = -2**10, max=2**10, res=1e-6))
#disp_fix(x)
y = (fixbv(510.5, min = -2**10, max=2**10, res=1e-6))
#disp_fix(y)
iW = x._W
ww = (x._nrbits,iW[1])
#print x._nrbits
#x = fixbv(128.141592)[ww]
#y = fixbv(253.5)[ww]
x_sig = Signal(intbv(0, min = -2**(x._nrbits-1), max = 2**(x._nrbits-1) ))
y_sig = Signal(intbv(0, min = -2**(x._nrbits-1), max = 2**(x._nrbits-1) ))
sum_sig = Signal(intbv(0, min = -2**(x._nrbits), max = 2**(x._nrbits) ))
sub_sig = Signal(intbv(0, min = -2**(x._nrbits), max = 2**(x._nrbits) ))
prod_sig = Signal(intbv(0, min = -2**(2*(x._nrbits)), max = 2**(2*(x._nrbits)) ))
#prod_sig = Signal(intbv(0)[2*ww[0]:])
clk = Signal(bool(0))
do_add = Signal(bool(0))
do_mul = Signal(bool(0))
do_sub = Signal(bool(0))
done_add = Signal(bool(0))
done_sub = Signal(bool(0))
done_mul = Signal(bool(0))
def fixbv_sub(clk, do_sub, x_sig, y_sig, sub_sig, done_sub):
@always(clk.posedge)
def sub_rtl():
if (do_sub == 1):
done_sub.next = 0
sub_sig.next = x_sig - y_sig
else:
done_sub.next = 1
sub_sig.next = 0
return sub_rtl
def fixbv_add(clk, do_add, x_sig, y_sig, sum_sig, done_add):
@always(clk.posedge)
def add_rtl():
if (do_add == 1):
done_add.next = 0
sum_sig.next = x_sig + y_sig
else:
done_add.next = 1
sum_sig.next = 0
return add_rtl
def fixbv_mul(clk, do_mul, x_sig, y_sig, prod_sig, done_mul):
@always(clk.posedge)
def add_rtl():
if (do_mul == 1):
done_mul.next = 0
prod_sig.next = x_sig * y_sig
else:
done_mul.next = 1
prod_sig.next = 0
return add_rtl
def convert():
toVHDL(fixbv_add, clk, do_add, x_sig, y_sig, sum_sig, done_add)
toVerilog(fixbv_add, clk, do_add, x_sig, y_sig, sum_sig, done_add)
toVHDL(fixbv_sub, clk, do_sub, x_sig, y_sig, sub_sig, done_sub)
toVerilog(fixbv_sub, clk, do_sub, x_sig, y_sig, sub_sig, done_sub)
toVHDL(fixbv_mul, clk, do_mul, x_sig, y_sig, prod_sig, done_mul)
toVerilog(fixbv_mul, clk, do_mul, x_sig, y_sig, prod_sig, done_mul)
def fixbv_top(clk, do_add, x_sig, y_sig, sum_sig, done_add, do_mul, prod_sig, done_mul, do_sub, sub_sig, done_sub):
dut_fixbv_add = fixbv_add(clk, do_add, x_sig, y_sig, sum_sig, done_add)
dut_fixbv_sub = fixbv_sub( clk, do_sub, x_sig, y_sig, sub_sig, done_sub)
dut_fixbv_mul = fixbv_mul(clk, do_mul, x_sig, y_sig, prod_sig, done_mul)
return dut_fixbv_add, dut_fixbv_sub, dut_fixbv_mul
def tb():
dut_fixbv_add = fixbv_add(clk, do_add, x_sig, y_sig, sum_sig, done_add)
dut_fixbv_sub = fixbv_sub( clk, do_sub, x_sig, y_sig, sub_sig, done_sub)
dut_fixbv_mul = fixbv_mul(clk, do_mul, x_sig, y_sig, prod_sig, done_mul)
@always(delay(10))
def clkgen():
clk.next = not clk
@instance
def stimulus():
for i in range(10):
print( "%3d ") % (now())
yield clk.posedge
for j in range(512):
u = random.uniform(-512.0,512.0)
v = random.uniform(-512.0,512.0)
x = fixbv(u)[31,10]
y = fixbv(v)[31,10]
'''setting the values of x & y'''
print( "%3d x %s y %s ") % (now(), bin(x), bin(y))
x_sig.next = int(x)
yield clk.posedge
y_sig.next = int(y)
yield clk.posedge
do_add.next = 1
yield clk.posedge
do_add.next = 0
yield clk.posedge
'''x + y is done'''
print( "%3d sum %s ") % (now(), bin(sum_sig))
z = x + y
print 'x + y'
disp_fix(x)
disp_fix(y)
disp_fix(z)
do_sub.next = 1
yield clk.posedge
do_sub.next = 0
yield clk.posedge
'''x - y is done'''
print( "%3d sub %s ") % (now(), bin(sub_sig))
z = x - y
print 'x - y'
disp_fix(x)
disp_fix(y)
disp_fix(z)
do_mul.next = 1
yield clk.posedge
do_mul.next = 0
yield clk.posedge
'''x * y is done'''
print( "%3d prod %s ") % (now(), bin(prod_sig))
z = x * y
print 'x * y'
disp_fix(x)
disp_fix(y)
disp_fix(z)
raise StopSimulation
return clkgen, stimulus, dut_fixbv_add, dut_fixbv_sub, dut_fixbv_mul
def test_fixbv():
'''
print 'x + y'
z = x + y
disp_fix(x)
disp_fix(y)
disp_fix(z)
#z1 = fixbv(z)[ww]
z = x - y
print 'x - y'
disp_fix(z)
print 'x * y'
z = x * y
disp_fix(z)
#z1 = fixbv(z)[ww]
'''
tb_fsm = traceSignals(tb)
sim = Simulation(tb_fsm)
sim.run()
#convert()
test_fixbv()
#toVHDL(fixbv_top, clk, do_add, x_sig, y_sig, sum_sig, done_add, do_mul, prod_sig, done_mul, do_sub, sub_sig, done_sub )
#toVerilog(fixbv_top, clk, do_add, x_sig, y_sig, sum_sig, done_add, do_mul, prod_sig, done_mul, do_sub, sub_sig, done_sub )
|
# import the Flask class from the flask module
from flask import Flask, render_template, redirect, url_for, request
# import datetime from the dateime
from datetime import datetime
# improt flask_sqlalchemy for databases
from flask_sqlalchemy import SQLAlchemy
# import forms from the wtforms
from wtforms import Form, BooleanField, StringField, PasswordField, validators
# import flash from the flask.helpers
from flask.helpers import flash
# import LoginManager from the flask_login
from flask_login import LoginManager, UserMixin, current_user, login_user, logout_user
# create the application object
app = Flask(__name__, template_folder='templates')
# configuring databases and the relative path
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///users.db'
db = SQLAlchemy(app)
# login
login_manager = LoginManager()
login_manager.init_app(app)
app.config['SECRET_KEY']='619619'
# create table with fields
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(20), unique = True)
password = db.Column(db.String(20))
email = db.Column(db.String(50))
sign_up_date = db.Column(db.DateTime, default = datetime.utcnow)
def __repr__(self):
return '<Users %r>' % self.id
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# use decorators to link the function to a url
@app.route('/')
def home():
return render_template('welcome.html') # render a template
# Route for handling the login page logic
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
name = None
if request.method == 'POST':
user_username = request.form['username']
user_password = request.form['password']
user = User.query.filter_by(username=user_username).first()
try:
name = user_username
login_user(user)
return redirect('/profile')
except:
error = "The user does not exist"
return render_template('login.html', error=error)
# Route for handling the signup page logic
@app.route('/signup', methods = ['POST', 'GET'])
def signup():
error = None
if request.method == 'POST':
user_username = request.form['username']
user_password = request.form['password']
user_email = request.form['email']
new_user = User(username=user_username,password=user_password,email=user_email)
try:
db.session.add(new_user)
db.session.commit()
return redirect('/login')
except:
if not user_username:
error = 'Username is required.'
elif not user_password:
error = 'Password is required.'
elif not user_email:
error = 'Email address is required.'
else:
error = "This username is alredy taken"
return render_template('signup.html', error=error)
@app.route('/profile', methods=['POST', 'GET'])
def profile():
return render_template("profile.html")
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
# start the server with the 'run()' method
if __name__ == '__main__':
app.run(debug=True)
|
from keras_retinanet import models
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
import cv2
import numpy as np
def crop_edges(img):
"""
Crops black edges from a full Celigo image
Must be read in grayscale (single-channel)
"""
imarray = np.array(img)
slideIndex = [0, len(imarray) - 1, 0, len(imarray[0]) - 1]
left_indent, top_indent, right_indent, bottom_indent = [0, 0, 0, 0]
pixel_threshold = 70
while np.max(imarray[slideIndex[0]]) <= pixel_threshold:
top_indent += 1
slideIndex[0] += 1
while np.max(imarray[slideIndex[1]]) <= pixel_threshold:
bottom_indent += 1
slideIndex[1] -= 1
while np.max(imarray.T[slideIndex[2]]) <= pixel_threshold:
left_indent += 1
slideIndex[2] += 1
while np.max(imarray.T[slideIndex[3]]) <= pixel_threshold:
right_indent += 1
slideIndex[3] -= 1
slidedImarray = imarray[
slideIndex[0]: slideIndex[1],
slideIndex[2]: slideIndex[3]]
indents = [left_indent, top_indent, right_indent, bottom_indent]
# Returning slide index allows us to keep track of how far the image was cropped
return [slidedImarray, indents]
pld_model_path = '/home/nyscf/Documents/sarita/cell-classifier/preprocessing/brodie/multi_class_v1-1_epoch12.h5'
pld_model = models.load_model(pld_model_path, backbone_name='resnet50')
imagelist = [i.strip() for i in open("/home/nyscf/Documents/sarita/cell-classifier/preprocessing/brodie/MMR0028_copy_102_104_106_7-15-2019_file_names_v1.txt")]
c = 0
t = 0
for i in imagelist:
print ("Reading " + i.split("/")[-1])
file_name = i.split("/")[-1]
prefix = i.split("/")[:-1]
img_path = "/".join(prefix) + "/" + file_name.split("__")[-1]
img = cv2.imread(img_path, 0)
img, base_coords = crop_edges(img)
draw = img.copy()
draw = cv2.cvtColor(draw, cv2.COLOR_GRAY2RGB)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = preprocess_image(img)
img, scale = resize_image(img)
boxes, scores, labels = pld_model.predict_on_batch((np.expand_dims(img, axis=0)))
boxes /= scale
boxes = boxes.astype(int)
for box, score, label in zip(boxes[0], scores[0], labels[0]):
if score < 0.5:
break
print("Found something, saving..")
x1, y1, x2, y2 = box
if label == 0:
d2 = draw.copy()
d2 = d2[y1:y2, x1:x2]
cv2.imwrite("/home/nyscf/Desktop/Training_Subets/MMR0028_copy_102_104_106_7-15-2019/" + file_name.split(".")[0] + str(x1) + "--" +
str(y1) + "--" + str(x2) + "--" + str(y2) + ".jpg", d2)
elif label == 2:
d2 = draw.copy()
d2 = d2[y1:y2, x1:x2]
cv2.imwrite("/home/nyscf/Desktop/Training_Subets/MMR0028_copy_102_104_106_7-15-2019/" + file_name.split(".")[0] + str(x1) + "--" +
str(y1) + "--" + str(x2) + "--" + str(y2) + ".jpg", d2)
elif label == 1:
d2 = draw.copy()
d2 = d2[y1:y2, x1:x2]
cv2.imwrite("/home/nyscf/Desktop/Training_Subets/MMR0028_copy_102_104_106_7-15-2019/" + file_name.split(".")[0] + str(x1) + "--" +
str(y1) + "--" + str(x2) + "--" + str(y2) + ".jpg", d2)
|
#Grade Equivalent
def computegrade(score):
if (s >= 0.9):
a = "A"
elif (s >= 0.8):
a = "B"
elif (s >= 0.7):
a = "C"
elif (s >= 0.6):
a = ("D")
elif (s < 0.6):
a = ("F")
return a
#Asks for user input
score = input("Enter score: ")
try:
s = float(score)
except:
s = -1
if (s >= 0.0 and s <= 1.0):
print(computegrade(s))
else:
print("Error, input is out of range or is not a numeric input")
|
salario = float(input('Qual o teu salario: '))
print('Seu salrio é de', salario * 1.1 if salario >= 1250 else salario * 1.15) |
from pathlib import Path
from subprocess import run
import os
from tqdm import tqdm
import shutil
from psycho.psycho import Psycho
from multiprocessing import Pool, cpu_count
PHI = int(os.environ["PHI"]) if os.environ["PHI"] != "None" else None
NUMJOBS = int(os.environ["NUMJOBS"])
def process_entry(entry):
# assert that wav is send to stdout
assert entry.endswith('|')
# parse entry
utterance = entry.split(' ')[0] # extract utterance
wav_cmd = entry[len(utterance)+1:] # extract path to wav
# convert wav
wav_path = dataset_data_dir.joinpath(utterance).with_suffix(".wav")
run(f'{wav_cmd[:-1]} > {wav_path}', shell=True)
# convert
if PHI is not None:
threshs_file = Path(f'/root/WSJ_threshs/{utterance}.csv')
out_file = Path(wav_path)
in_file = wav_path.with_suffix('.original.wav')
wav_path.rename(in_file)
Psycho(PHI).convert_wav(in_file, threshs_file, out_file)
# return updated entry
return f"{utterance} {wav_path} \n"
if __name__ == "__main__":
print(f'PREPARE TRAINING DATA')
print(f"[+] parsed arguments")
print(f" -> phi : {PHI}")
print(f" -> numjobs : {NUMJOBS}")
# first, get paths of the datasets wav lists
# -> for each dataset (e.g., test_dev93, train_si284, ...),
# speech files are accessed via path stored in 'wav.scp'
# -> skip 'local/*' as these are not further used
data_dir = Path('data')
datasets = sorted([ path for path in data_dir.glob('**/*.scp')
if not path.match('local/data/*.scp') ])
for dataset in datasets:
print(f"[+] {dataset}", end=" ")
dataset_data_dir = dataset.parent.joinpath('data')
if dataset_data_dir.is_dir(): shutil.rmtree(dataset_data_dir)
dataset_data_dir.mkdir()
entries = [ entry.strip() for entry in dataset.read_text().splitlines() if entry.strip() ]
print(f'({len(entries)} wavs) ')
with Pool(NUMJOBS) as p:
updated_entries = [ e for e in tqdm(p.imap(process_entry, entries)) ]
# update wav.scp
dataset.write_text("".join(updated_entries))
|
/Users/Di/anaconda/lib/python2.7/sre_compile.py |
from django import forms
class ReviewsForm(forms.Form):
review = forms.CharField(required=True)
name = forms.CharField(required=True, max_length=14)
email = forms.EmailField(required=True) |
import unittest
import numpy as np
from multiatlas.rohlfing import multi_label_segmentation
class TestRohlfing(unittest.TestCase):
def test_multi_label_segmentation(self):
"""Tests the implementation of rohlfing (2004) """
train_labels = [[0,1,0,1,1,0,1,2,3,3],
[1,1,0,1,1,0,1,2,2,3],
[1,1,1,2,2,2,3,3,2,3]]
# Voting skeme
import ipdb; ipdb.set_trace()
segmentation, cmatrix = multi_label_segmentation(train_labels)
gt = np.array([1, 1, 0, 1, 1, 0, 1, 2, 2, 3])
np.testing.assert_array_equal(segmentation, gt)
if __name__ == '__main__':
train_labels = [[0,1,0,1,1,0,1,2,3,3],
[1,1,0,1,1,0,1,2,2,3],
[1,1,1,2,2,2,3,3,2,3]]
# Voting skeme
#import ipdb; ipdb.set_trace()
#segmentation, cmatrix = multi_label_segmentation(train_labels)
dataImageA = np.r_[0, 1, 3, 3, 0, 4, 13, 13, 0, 0]
dataImageB = np.r_[1, 1, 2, 4, 0, 4, 5, 12, 1, 0]
dataImageC = np.r_[0, 2, 2, 3, 0, 5, 5, 13, 8, 0]
combinationABC = np.r_[0, 1, 2, 3, 0, 4, 5, 13, -1, 0]
combinationAB = np.r_[-1, 1, -1, -1, 0, 4, -1, -1, -1, 0]
segmentation, confusion_matrix = multi_label_segmentation([dataImageA,
dataImageB])
np.testing.assert_equal(combinationAB, segmentation)
segmentation, confusion_matrix = multi_label_segmentation(
np.array([dataImageA, dataImageB, dataImageC])
)
np.testing.assert_equal(combinationABC, segmentation)
|
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
class Thread(models.Model):
title = models.CharField("タイトル", max_length=200, blank=False)
message = models.TextField("メッセージ", blank=False)
pub_date = models.DateTimeField("作成日時", auto_now_add=True, editable=False)
def __str__(self):
return self.message
class Meta:
verbose_name = "スレッド"
verbose_name_plural = "スレッド"
class Comment(models.Model):
user = models.ForeignKey(User, verbose_name="ユーザ")
thread = models.ForeignKey(Thread, verbose_name="スレッド")
message = models.CharField("メッセージ", max_length=500, blank=False)
pub_date = models.DateTimeField("投稿日時", auto_now_add=True, editable=False)
def __str__(self):
return self.message
class Meta:
verbose_name = "コメント"
verbose_name_plural = "コメント"
|
#!/usr/bin/env python3
#
# Run a test. Just the test spec is provided on stdin.
#
import icmperror
import pscheduler
import re
input = pscheduler.json_load(exit_on_error=True);
log = pscheduler.Log(prefix='tracepath', quiet=True)
# TODO: Validate the input
# TODO: Verify can-run
participant = input['participant']
log.debug("Participant %d", participant)
if participant != 0:
pscheduler.succeed_json( {
'succeeded': False,
'diags': None,
'error': "Invalid participant %d" % participant,
'result': None
} )
spec = input['test']['spec']
#
# Figure out how to invoke the program
#
argv = []
try:
ipversion = spec['ip-version']
except KeyError:
(ipversion, ip) = pscheduler.ip_addr_version(spec['dest'])
if ipversion is None or ipversion == 4:
tracepath = 'tracepath'
localhost = '127.0.0.1'
else:
tracepath = 'tracepath6'
localhost = '::1'
argv.append(tracepath)
# Always run without resolving IPs; we'll do that in parallel after it finishes.
argv.append('-n')
try:
length = spec['length']
argv.append('-l')
argv.append(str(length))
except KeyError:
pass
# At some point, tracepath changed the way its command line works to
# be more compatible with traceroute's options. Since it doesn't
# provide a way to determine which scheme is in use, the only way to
# figure it out is to run the program, expect a return code of 255 and
# figure out which style of invocation is expects. That's quality
# with a capital "K."
#
# Earlier: tracepath [-n] [-l <len>] <destination>[/<port>]
# Later: tracepath [-n] [-l <len>] [-b] [-p port] <destination>
# TODO: Investigate whether we care about the -b switch.
try:
start_at = input['schedule']['start']
log.debug("Sleeping until %s", start_at)
pscheduler.sleep_until(start_at)
log.debug("Starting")
except KeyError:
pscheduler.fail("Unable to find start time in input")
status, stdout, stderr = pscheduler.run_program( [tracepath], timeout=2)
if status != 255 or '<destination>' not in stderr:
pscheduler.succeed_json( {
'succeeded': False,
'diags': None,
'error': "Unable to determine version of tracepath installed.",
'result': None
} )
if '[-p port]' in stderr:
traceroute_compatible = True
else:
traceroute_compatible = False
dest = spec['dest']
try:
port = str(spec['dest-port'])
if traceroute_compatible:
argv.append('-p')
argv.append(port)
else:
dest += '/' + str(port)
except KeyError:
pass
argv.append(dest)
# Force all args to be strings
argv = [str(x) for x in argv]
#
# Run the test
#
# 94 seconds is the worst case plus a second of slop.
status, stdout, stderr = pscheduler.run_program( argv, timeout = 94 )
diags = "\n".join([ " ".join(argv), "", stdout, "", stderr ])
if status != 0:
pscheduler.succeed_json( {
'succeeded': False,
'diags': diags,
'error': stderr,
'result': None
} )
#
# Dissect the results
#
try:
as_ = spec['as']
except KeyError:
as_ = False
traced_hops = []
ips = []
last_hop = 0
ttl_re = re.compile('^(\d*)\??:');
no_reply_re = re.compile('no reply');
reached_re = re.compile('reached');
rtt_re = re.compile('([0-9]+\.[0-9]+)ms');
mtu_re = re.compile('pmtu ([0-9]+)');
error_re = re.compile('!(\w+)$');
path_mtu = None
for line in stdout.split('\n'):
line = re.sub('\s+', ' ', line).strip()
matches = ttl_re.match(line)
if matches is None:
continue
ttl = int(matches.group(1))
log.debug("LINE %s", line)
hop = {}
# Repeats of a hop usually contain more info for first, but replace any repeat info
if ttl == len(traced_hops):
hop = traced_hops.pop()
# No reply means no results
if no_reply_re.search(line):
traced_hops.append(hop)
continue
# IP. We forced tracepath to behave this way.
line_parts = line.split(' ')
ip = line_parts[1]
if ip == '[LOCALHOST]':
ip = localhost
hop['ip'] = ip
ips.append(ip)
log.debug(" IP %s", ip)
# RTT (ms)
matches = rtt_re.search(line)
if matches:
rtt = float(matches.group(1)) / 1000.0
hop['rtt'] = 'PT%fS' % rtt
# Path MTU (bytes) - update if changes, otherwise carry over from prev hop
matches = mtu_re.search(line)
if matches:
path_mtu = int(matches.group(1))
if path_mtu is not None:
hop['mtu'] = path_mtu
# Search for errors
matches = error_re.search(line)
if matches:
hop['error'] = icmperror.translate(matches.group(1))
traced_hops.append(hop)
# If we're doing hostnames, bulk-resolve them.
try:
hostnames = spec['hostnames']
except KeyError:
hostnames = True
if hostnames and len(ips) > 0:
log.debug("Reverse-resolving IPs: %s", str(ips))
revmap = pscheduler.dns_bulk_resolve(ips, reverse=True, threads=len(traced_hops))
for hop in traced_hops:
try:
ip = hop['ip']
if ip in revmap and revmap[ip] is not None:
hop.update({ 'hostname': revmap[ip] })
except KeyError:
# No IP is fine.
pass
# Figure out ASes if we're doing that
try:
do_ases = spec['as']
except KeyError:
do_ases = True
if do_ases:
ases = pscheduler.as_bulk_resolve(ips, threads=len(ips))
for index, hop in enumerate(traced_hops):
try:
hop_as = ases[hop['ip']]
if hop_as is None:
continue
(asn, owner) = hop_as
if asn is None:
continue
result = { 'number': asn }
if owner is not None:
result['owner'] = owner
traced_hops[index]['as'] = result
except KeyError:
pass
# Spit out the results
pscheduler.succeed_json( {
'succeeded': True,
'diags': diags,
'error': None,
'result': {
'schema': 1,
'succeeded': True,
'paths': [
traced_hops
]
}
} )
|
# Variaveis geral
# cod_emp nós vamos pegar direto
cod_emp = 0
N_func = 0
'''========================================================'''
# Variaveis de cada categoria
N_func_maior_grande = 0
N_func_maior_media = 0
N_func_maior_pequena = 0
N_func_maior_micro = 0
cod_emp_maior_grande = 0
cod_emp_maior_media = 0
cod_emp_maior_pequena = 0
cod_emp_maior_micro = 0
'''========================================================'''
print("Para parar a verificação, digite o código da empresa como 0")
cod_emp = int(input("Digite o código da empresa: "))
while cod_emp != 0:
categoria = input("digite a categoria da sua empresa(grande, media, pequena ou micro): ")
if categoria == "grande":
N_func = int(input("Digite o Nº de funcionários da empresa: "))
#verifica se é a maior dessa categoria
if N_func >= N_func_maior_grande:
N_func_maior_grande = N_func
cod_emp_maior_grande = cod_emp
elif categoria == "media":
N_func = int(input("Digite o Nº de funcionários da empresa: "))
#verifica se é a maior dessa categoria
if N_func >= N_func_maior_media:
N_func_maior_media = N_func
cod_emp_maior_media = cod_emp
elif categoria == "pequena":
N_func = int(input("Digite o Nº de funcionários da empresa: "))
#verifica se é a maior dessa categoria
if N_func >= N_func_maior_pequena:
N_func_maior_pequena = N_func
cod_emp_maior_pequena = cod_emp
elif categoria == "micro":
N_func = int(input("Digite o Nº de funcionários da empresa: "))
#verifica se é a maior dessa categoria
if N_func >= N_func_maior_micro:
N_func_maior_micro = N_func
cod_emp_maior_micro = cod_emp
else:
print("categoria inválida")
cod_emp = int(input("Digite o código da empresa: "))
print("O Codigo e o Nº de funcionarios da maior empresa de cada categoria é: ")
print("GRANDE: ", cod_emp_maior_grande, N_func_maior_grande)
print("MEDIA: ", cod_emp_maior_media, N_func_maior_media)
print("PEQUENA: ", cod_emp_maior_pequena, N_func_maior_pequena)
print("MICRO: ", cod_emp_maior_micro, N_func_maior_micro) |
print("Creating list of 3 list's")
l = [[]] * 3
print(l)
l[0] = 1
print(l)
l[1] = [1, 2, 3, 4]
print(l)
l[2] = 3
print(l)
print("Creating list with 3 places")
l2 = [int]*3
print(l2)
l2[0] = 1
print(l2)
l2[1] = 1
print(l2)
l2[2] = 1
print(l2)
|
# http://pise.info/algo/enonces5.htm
# Exercice 5.2
"""
Ecrire un algorithme qui demande un nombre compris entre 10 et 20,
jusqu’à ce que la réponse convienne. En cas de réponse supérieure à 20,
on fera apparaître un message :
« Plus petit ! », et inversement, « Plus grand ! » si le nombre est inférieur à 10.
"""
"""
Corection en psedo-code
Variable N en Entier
Debut
N ← 0
Ecrire "Entrez un nombre entre 10 et 20"
TantQue N < 10 ou N > 20
Lire N
Si N < 10 Alors
Ecrire "Plus grand !"
SinonSi N > 20 Alors
Ecrire "Plus petit !"
FinSi
FinTantQue
Fin
"""
###ma décision###
nombre_choix = int(input("Saisissez un nombre: "))
while nombre_choix >= 10 or nombre_choix <= 20:
if nombre_choix <= 10:
print("Plus")
nombre_choix = int(input("Saisissez un nombre: "))
elif nombre_choix >= 20:
print("mois")
nombre_choix = int(input("Saisissez un nombre: "))
else:
print("bon")
break |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('bands/', views.BandListView.as_view(), name='bands'),
path('bands/<slug:slug>', views.BandDetailView.as_view(), name='band-detail'),
path('players/', views.PlayerListView.as_view(), name='players'),
path('players/<slug:slug>', views.PlayerDetailView.as_view(), name='player-detail'),
path('venues/', views.VenueListView.as_view(), name='venues'),
path('venues/<slug:slug>', views.VenueDetailView.as_view(), name='venue-detail'),
path('events/', views.EventListView.as_view(), name='events'),
path('events/<slug:slug>', views.EventDetailView.as_view(), name='event-detail'),
path('lineups/', views.LineupListView.as_view(), name='lineups'),
path('lineups/<slug:slug>', views.LineupDetailView.as_view(), name='lineup-detail'),
]
|
import numpy as np
import random
import os
from tqdm import tqdm
from PIL import Image
import torch
import torch.nn as nn
from torch.nn.modules import loss
from torch import optim
from torchvision import transforms
def train(model, device, train_loader):
model.train()
model.to(device)
train_loss_D, train_loss_G = 0.0, 0.0
for batch_idx, batch_data in enumerate(tqdm(train_loader)):
images, conditions = batch_data.images.to(device), batch_data.conditions.to(
device
)
model.optimize_parameters(images, conditions)
ratio = len(images) / len(train_loader.dataset)
train_loss_D += model.losses["loss_D"] * ratio
train_loss_G += model.losses["loss_G"] * ratio
return train_loss_D, train_loss_G
@torch.no_grad()
def test(model, device, test_loader):
model.eval()
model.to(device)
test_loss_D, test_loss_G = 0.0, 0.0
for batch_idx, batch_data in enumerate(test_loader):
images, conditions = batch_data.images.to(device), batch_data.conditions.to(
device
)
model.evaluate(images, conditions)
ratio = len(images) / len(test_loader.dataset)
test_loss_D += model.losses["loss_D"] * ratio
test_loss_G += model.losses["loss_G"] * ratio
return test_loss_D, test_loss_G
|
from app import app
import sys
from termcolor import colored, cprint
if __name__ == "__main__":
cprint('CPILOT RUNNING...', 'green', 'on_red')
#print(colored('CPILOT RUNNING...', 'green'))
app.run() |
class Solution(object):
def removeInvalidParentheses(self, s):
from collections import deque
visited = set([s])
ans, queue, flag = [], deque([s]), False
while queue:
node = queue.popleft()
if self.isValid(node):
flag = True
ans.append(node)
if flag: continue
for i in range(len(node)):
if node[i] not in ('(',')'): continue
newnode = node[:i] + node[i+1:]
if newnode not in visited:
visited.add(newnode)
queue.append(newnode)
return ans
def isValid(self, s):
cnts = 0
for ch in s:
cnts += {'(':1, ')':-1}.get(ch, 0)
if cnts < 0: return False
return cnts == 0
class Solution(object):
def dfs(self, s, visited):
miss = self.calc(s)
if miss == 0: return [s]
ans = []
for i in range(len(s)):
if s[i] in ['(', ')']:
ns = s[:i] + s[i+1:]
if ns not in visited and self.calc(ns) < miss:
visited.add(ns)
ans.extend(self.dfs(ns, visited))
return ans
def calc(self, s):
a, b = 0, 0
for ch in s:
a += {'(' : 1, ')' : -1}.get(ch, 0)
b += a < 0 # a > 0: 0; a < 0: 1
a = max(a, 0)
return a + b
def removeInvalidParentheses(self, s):
visited = set([s])
return self.dfs(s, visited) |
# Taking set input dynamically::
s={}
print(type(s))
s={int(i) for i in input('Enter::').split()}
print(s)
print(type(s))
# Set builtin function::
s1={11,12,31,45,4}
s1.add(19)
print('add function::')
print(s1) # add(x) adds x in set unorderly
s1.remove(12)
print("remove function:: ")
print(s1)
print('pop retrns:',s1.pop())
print('pop function::')
print(s1)
s2={11,45,19,32}
print("difference function")
print(s1.difference(s2)) # s1-s2
print('intersection::')
print(s1.intersection(s2))
print('Union')
print(s1.union(s2))
print('Discard::')
s1.discard(11)
print(s1)
print(s1.symmetric_difference(s2)) # prints only non repeating items of s1 and s2
s3={12,45,30,32}
s3.difference_update(s2)
print(s3) # s3=s3-s2
print('isdisjoint Function::')
s4={1,2,11,33}
s5={22,44,55,66}
print(s4.isdisjoint(s5)) #returns boolean value True if s4 != s5
print('clear Function::')
s3.clear()
print(s3)
print("subset & superset::")
set1={1,2,3,4,5}
set2={2,3,4}
set3={2,3,9}
print('superset::')
print(set1.issuperset(set2)) # if the set2 elements are present in set1 then set2 is a superset of set1
print(set1.issuperset(set3))
print('subset::')
print(set2.issubset(set1))
print(set3.issubset(set1))
print('update function::')
print(set1)
set3={1,2,11,44}
print(set3)
set1.update(set3)
print(set1) |
#-*- coding:utf8 -*-
from django.contrib import admin
from shopback.categorys.models import Category,ProductCategory
class CategoryAdmin(admin.ModelAdmin):
list_display = ('cid','parent_cid','name','is_parent','status','sort_order')
#list_editable = ('update_time','task_type' ,'is_success','status')
list_filter = ('status','is_parent')
search_fields = ['cid','parent_cid','name']
admin.site.register(Category,CategoryAdmin)
class ProductCategoryAdmin(admin.ModelAdmin):
list_display = ('cid','parent_cid','full_name','is_parent','status','sort_order')
#list_editable = ('update_time','task_type' ,'is_success','status')
def full_name(self, obj):
return '%s'%obj
full_name.allow_tags = True
full_name.short_description = u"全名"
ordering = ['parent_cid','-sort_order',]
list_filter = ('status','is_parent')
search_fields = ['cid','parent_cid','name']
admin.site.register(ProductCategory,ProductCategoryAdmin) |
# Syntax highlighter - convert python code into html entities
# At the moment this is just a code viewer, but before long, it'll be something awesome.
import keyword # Contains a list of all the python keywords.
import re
class DocumentObj:
def __init__(self, text = ''):
self.text = text
def robust_split(self, string, sep):
# sep as a list of seperation strings
output_list = []
last_break_index = 0
for index, char in enumerate(string):
for s in sep:
if char == s[0]:
if string[index:index+len(s)] == s:
#Break the string here
output_list.append(string[last_break_index:index])
last_break_index = index
output_list.append(string[last_break_index:])
return output_list
def convert_to_html(self):
# Work on a line-by-line basis
outlines = []
inlines = self.text.splitlines(keepends=True)
seperation = [" ", ":", ".", "(",")"]
italic_list = ['self', 'super', 'int', 'str', 'bool']
for inline in inlines:
outline = ''
# Anything in brackets should be in italics
tmpinline = inline
inline = ''
for c in tmpinline:
if c == "(":
inline += "(<em>"
elif c == ")":
inline += "</em>)"
else:
inline += c
if c == "\t":
inline += " "*4
for index, word in enumerate(self.robust_split(inline, seperation)):
if "#" in word:
outline += "<em>"+"".join(self.robust_split(inline, seperation)[index:])+"</em>"
break
test_word = word.strip("".join(seperation))
if test_word in keyword.kwlist:
outline += "<strong>{0}</strong>".format(word)
elif test_word in italic_list:
outline += "<strong>{0}</strong>".format(word)
else:
outline += word #+" "
outlines.append(outline)
return outlines
|
# Generated by Django 3.2 on 2020-07-15 19:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_auto_20200715_1817'),
]
operations = [
migrations.AlterField(
model_name='bazaar_user',
name='location_id',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='location',
name='location',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
import datetime
from functools import wraps
def time_and_log(logger):
def time_and_log_decarator(function):
@wraps(function)
def wrapper(*args, **kwargs):
a = '' if not args else args
ka = '' if not kwargs else kwargs
logger.info(f'Attempting to execute `{function.__name__}` with {a}{ka}.')
start_time = datetime.datetime.now()
function(*args, **kwargs)
end_time = datetime.datetime.now()
duration = (end_time - start_time).total_seconds()
logger.info(f'Successfully executed `{function.__name__}` with {a}{ka} in {duration} seconds.')
return None
return wrapper
return time_and_log_decarator |
msg="welcome to python"
print(msg)
|
from django.db import models
from CBF.abstract_models import CommonPostInfo
from membership.models import Member
from django.template.defaultfilters import slugify
class Tag(models.Model):
name = models.CharField('Categoria', max_length=80)
def __str__(self):
return self.name
def get_count_related_objects(self):
return ( self.sermon_set.count() + self.thought_set.count() + self.event_set.count() )
def get_related_sermons(self):
return (self.sermon_set.all())
def get_related_events(self):
return (self.event_set.all())
def get_related_thoughts(self):
return (self.thought_set.all())
# TODO: Every youtube upload should generate this
class Sermon(CommonPostInfo):
url = models.URLField(max_length=250)
author = models.ForeignKey(Member, null=True, blank=True)
tags = models.ManyToManyField(Tag, blank=True)
class Meta:
verbose_name = "Sermon"
verbose_name_plural = "Sermones"
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Sermon, self).save(*args, **kwargs)
|
import json
import shortuuid
from interface import implements
from backend.common.messaging.message_handler import MessageHandler
import backend.proto.message_pb2 as pb
from backend.user_service.user.domain.rider import Rider
from backend.user_service.user.domain.driver import Driver
def _extract_user_id_list_from(rider_id_list):
result = set()
for rider_id in list(set(rider_id_list)):
rider = Rider.objects.get(pk=rider_id)
result.add(rider.user.id)
return result
def _get_all_driver_user_id_list():
result = set()
for driver in Driver.objects.all():
result.add(driver.user.id)
return result
class GroupCreatedEventHandler(implements(MessageHandler)):
def __init__(self, conn):
self.conn = conn
def handle(self, message):
# extract user id set from rider_id_list
# also add target with all driver's user id
target = set()
target = target.union(
_extract_user_id_list_from(message.rider_id_list))
target = target.union(
_get_all_driver_user_id_list()
)
self.conn.SendMessage(pb.Message(
id=shortuuid.uuid(),
target=list(target),
type=message.type_name,
data=json.dumps(vars(message)),
))
|
import sys
sys.path.insert(1, str().join(['/' + i for i in __file__.split('/')[1:-3]]))
import unittest
from sensor_controller import *
class TestSetSensor(unittest.TestCase):
def setUp(self):
self.controller = SensorController()
def test_set_sensor_1(self):
# air sensor esta disponivel e o status e do tipo float
self.controller.add_sensor('air', 0, t='float')
self.controller.setSensorStatus('air', 18.8, t='float')
self.assertEqual(self.controller.getSensorStatus('air')['status'], 18.8)
def test_set_sensor_2(self):
# air sensor esta disponivel e retorna tipo float
self.controller.add_sensor('air', 0, t='float')
self.controller.setSensorStatus('air', 18.8, t='float')
self.assertEqual(type(self.controller.getSensorStatus("air")['status']), float)
def test_set_sensor_3(self):
# air sensor retorna somente tipo float
self.controller.add_sensor('air', 0, t='float')
self.controller.setSensorStatus('air', 18.8, t='float')
self.assertNotEqual(type(self.controller.getSensorStatus("air")), int)
def test_set_sensor_4(self):
# air3dd sensor nao esta disponivel retorna 400
self.assertFalse(self.controller.setSensorStatus("air3dd", 18.8, t='float'))
def test_set_sensor_5(self):
# air sensor esta disponivel e somente aceita status do tipo float se nao retorna 400
self.controller.add_sensor('air', 0, t='float')
self.assertFalse(self.controller.setSensorStatus('air', 18, t='int'))
|
import requests
import zipfile
import os
apikey = raw_input('API Key: ')
HEADERS = {"X-API-Key": apikey}
r = requests.get("http://www.bungie.net/Platform/Destiny/Manifest/", headers=HEADERS);
manifest = r.json()
mani_url = 'http://www.bungie.net'+manifest['Response']['mobileWorldContentPaths']['en']
#Download the file, write it to 'MANZIP'
r = requests.get(mani_url)
with open("MANZIP", "wb") as zip:
zip.write(r.content)
print "Download Complete!"
#Extract the file contents, and rename the extracted file
# to 'Manifest.content'
with zipfile.ZipFile('MANZIP') as zip:
name = zip.namelist()
zip.extractall()
os.rename(name[0], 'Manifest.content')
print 'Done!'
|
import time
name = input("请您输入姓名:")
age = input("请您输入年龄: ")
print('---------------------------')
print("您的名字是:"+name)
print("您的年龄是: "+age)
a = int(time.strftime('%Y',time.localtime()))+100-int(age)
man = str(a)
print(name+"将在"+man+"年满100周岁!")
|
from flask import Flask, render_template, request
import datetime
import sqlalchemy as sa
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, Text, String, Float, DateTime, desc
import dateutil.parser
import numpy as np
from sklearn.linear_model import LinearRegression
import logging
# logger = logging.getLogger(__name__)
logging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')
logging.warning('This will get logged to a file')
# from systemd import journal #TODO can't get to install systemd in this project somehow
import logging.handlers
# Log all messages to the system journal
# loghandler = JournalHandler(SYSLOG_IDENTIFIER=SYSLOG_ID)
# logger = logging.getLogger(SYSLOG_ID)
# logger.addHandler(loghandler)
# logger.setLevel(logging.DEBUG) # TODO change to INFO in production
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///weather.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
db.create_all()
######### Models #########
class Weather(db.Model):
id = sa.Column(Integer, primary_key=True)
ts = sa.Column(DateTime, unique=False) # Timestamp of record
temp = sa.Column(Float, unique=False) # Temperature in degrees centigrade
rh = sa.Column(Float, unique=False) # Relative humidity
wind_dird = sa.Column(Integer, unique=False) # Wind direction in degrees
wind_sp = sa.Column(Float, unique=False) # Win speed in km/h
@property
def wind_cardinal(self): # Show wind direction as cardinal (eg. WNW)
return degToCardinal(self.wind_dird)
def __repr__(self):
return f'Weather at {self.ts}: Temp={self.temp} RH={self.rh} wind={self.wind_sp} dir={self.wind_cardinal}'
## Utilities
def degToCardinal(degrees):
compass_points = ["N","NNE","NE","ENE","E","ESE", "SE", "SSE","S","SSW","SW","WSW","W","WNW","NW","NNW"]
index = int((degrees/(360.0/len(compass_points)))+.5) % len(compass_points)
return compass_points[index]
def trend_slope(x_readings, y_readings):
x = np.array(x_readings).reshape((-1, 1))
y = np.array(y_readings)
model = LinearRegression().fit(x, y)
return model.coef_
## Views
@app.route('/')
def index():
return render_template('index.html')
@app.route('/latest')
def latest():
lastrep = Weather.query.order_by(desc(Weather.ts)).first() # No last() available, so get latest
return render_template('bsstationj.html', rep=lastrep)
@app.route('/latest5')
def latest5():
lastrep = Weather.query.order_by(Weather.ts).all()[:5] # Last 5 readings
last_times = [(lastrep[i].ts-lastrep[0].ts).total_seconds() for i in range(len(lastrep))]
last_tmps = [ls.temp for ls in lastrep]
last_rhs = [ls.rh for ls in lastrep]
last_winds = [ls.wind_sp for ls in lastrep]
tmp_slope = trend_slope(last_times, last_tmps)
rh_slope = trend_slope(last_times, last_rhs)
wind_slope = trend_slope(last_times, last_winds)
return 'latest5'
@app.route('/update', methods=['GET', 'POST'])
def update():
neww = Weather(ts = dateutil.parser.parse(request.values['timestamp']),
temp = float(request.values['temp_c']),
rh = float(request.values['relative_humidity']),
wind_dird = int(request.values['wind_degrees']),
wind_sp = float(request.values['wind_mph'])*1.60934) # Convert mph -> kph
db.session.add(neww)
db.session.commit()
# logging.info(neww)
print(neww)
return 'update'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
# Generated by Django 2.2.6 on 2019-12-19 03:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Word',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('word', models.CharField(max_length=25)),
('meaning', models.CharField(max_length=50)),
('pronunciation', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Verb',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('verb', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='words.Word')),
],
),
migrations.CreateModel(
name='Phrasal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phrasal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='words.Word')),
],
),
migrations.CreateModel(
name='Noun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('noun', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='words.Word')),
],
),
migrations.CreateModel(
name='Expresion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('expresion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='words.Word')),
],
),
migrations.CreateModel(
name='Adverb',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('adverb', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='words.Word')),
],
),
migrations.CreateModel(
name='Adjective',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('adjective', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='words.Word')),
],
),
]
|
from Bio import SeqIO
from math import factorial
sequence = ''
with open('sampledata.fasta', 'r') as f:
for record in SeqIO.parse(f, 'fasta'):
sequence = str(record.seq)
A, U, G, C = 0, 0, 0, 0
for nt in sequence:
if nt == 'A':
A += 1
elif nt == 'U':
U += 1
elif nt == 'G':
G += 1
elif nt == 'C':
C += 1
AU = factorial(max(A, U)) / factorial(max(A, U) - min(A, U))
GC = factorial(max(G, C)) / factorial(max(G, C) - min(G, C))
print(int(AU * GC))
|
from tile import Tile
import pygame
class Character(Tile):
"""Contain the functions relative to the main character"""
def __init__(self, img, text):
Tile.__init__(self, img, text)
self.startx = -100
self.starty = -100
def set_starting_pos(self, x, y):
"""Set the initial position"""
self.startx = x
self.starty = y
self.set_pos(x, y)
def get_starting_pos(self):
"""Return the initial position"""
return self.startx, self.starty
def go_to_start(self):
"""Set the main character position to the initial one"""
self.set_pos(*self.get_starting_pos())
def check_move(self, x, y):
"""Check if the move is possible"""
ordo = 0
maplist = [line.rstrip('\n') for line in open('levels.txt')]
walllst = []
for a in maplist:
absc = 0
for b in list(a):
if b == "#":
walllst.append((absc, ordo))
absc += 40
ordo += 40
if (x, y) not in walllst:
return True
def key_input_check(self, addx, addy):
"""Modifie position if move is possible"""
if self.check_move(self.x + addx, self.y + addy):
self.clean_a_tile()
self.set_pos(self.x + addx, self.y + addy)
def key_input(self, event):
"""Check key pressed to move"""
if event.key == pygame.K_LEFT:
self.key_input_check(-40, 0)
if event.key == pygame.K_RIGHT:
self.key_input_check(40, 0)
if event.key == pygame.K_UP:
self.key_input_check(0, -40)
if event.key == pygame.K_DOWN:
self.key_input_check(0, 40)
def is_jack_on_item(self, item):
"""Check if the main character is on the same position as an item"""
if self.get_pos() == item.get_pos() and item.collected is not True:
return item.item_event()
|
#-----------------------------------------------------------------------------
#
# Copyright (c) 2006-2007 by Enthought, Inc.
# All rights reserved.
#
#-----------------------------------------------------------------------------
"""
The default UI service factory.
"""
# Enthought library imports.
from traits.api import HasTraits, Int, Str
# Local imports.
from ui_service import UiService
class UIServiceFactory(HasTraits):
"""
The default UI service factory.
"""
# The name of the class that implements the factory.
class_name = Str
# The priority of this factory
priority = Int
###########################################################################
# 'UIServiceFactory' interface.
###########################################################################
def create_ui_service(self, *args, **kw):
""" Create the UI service. """
return UiService(*args, **kw)
#### EOF ######################################################################
|
#import sys
#input = sys.stdin.readline
from collections import defaultdict
def main():
N = int( input())
A = list( map( int, input().split()))
d = defaultdict( int)
e = defaultdict( int)
ans = 0
for i in range(N):
a = A[i]
ans += d[a+(i+1)]
ans += e[a-(i+1)]
d[(i+1)-a] += 1
e[-a-(i+1)] += 1
print(ans)
if __name__ == '__main__':
main()
|
if __name__ == "__main__":
alien_color = 'green'
if alien_color == 'green':
print("You just got 5and6 points!")
else:
print("You just got 10 points!")
# version2
alien_color = 'yellow'
if alien_color == 'green':
print("You just got 5and6 points!")
else:
print("You just got 10 points!")
|
import numpy as np
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
Note: it would be more interesting to use a HyperOpt search space:
https://github.com/hyperopt/hyperopt
"""
def __init__(self, X_train):
# Input data
self.train_count = X_train.shape[0] # 7352 training series
# self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = 10#len(X_train[0]) # 128 time_steps per series
self.img_h = 120
self.img_w = 160
# Training
self.learning_rate = 0.01#0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 10000
self.batch_size = 10#90
# LSTM structure
self.n_inputs = 6#len(X_train[0]) # Features count is of 9: 3 * 3D sensors features over time
self.n_hidden = 32 # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])),
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes]))
}
self.b = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)),
'output': tf.Variable(tf.random_normal([self.n_classes]))
}
self.weights = {'W_conv1':tf.Variable(tf.random_normal([10,10,1,16])),#[5,5,1,32]
'W_conv2':tf.Variable(tf.random_normal([10,10,16,16])),#[5,5,32,64]
'W_fc':tf.Variable(tf.random_normal([8*10*16,256])),#[35*125*256]
'out':tf.Variable(tf.random_normal([256, self.n_classes]))}
self.biases = {'b_conv1':tf.Variable(tf.random_normal([16])),
'b_conv2':tf.Variable(tf.random_normal([16])),
'b_fc':tf.Variable(tf.random_normal([256])),
'out':tf.Variable(tf.random_normal([self.n_classes]))}
self.keep_rate = 0.8
def CRNN(_X, _Y, config):
_X = tf.reshape(_X, shape=[-1, config.img_h, config.img_w, 1])
_X = tf.cast(_X, tf.float32)
conv1 = tf.nn.relu(tf.nn.conv2d(_X, config.weights['W_conv1'], strides=[1,2,2,1], padding='SAME') + config.biases['b_conv1'])
print(conv1)
conv1 = tf.nn.max_pool(conv1, ksize=[1,4,4,1], strides=[1,2,2,1], padding='SAME')
print(conv1)
conv2 = tf.nn.relu(tf.nn.conv2d(conv1, config.weights['W_conv2'], strides=[1,2,2,1], padding='SAME') + config.biases['b_conv2'])
print(conv2)
conv2 = tf.nn.max_pool(conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
print(conv2)
fc = tf.reshape(conv2,[-1, 8*10*16])#[35*125*256]
print(fc)
fc = tf.nn.relu(tf.matmul(fc, config.weights['W_fc']) + config.biases['b_fc'])
print(fc)
fc = tf.nn.dropout(fc, config.keep_rate)
out = tf.matmul(fc, config.weights['out']) + config.biases['out']
print(out)
# (NOTE: This step could be greatly optimised by shaping the dataset once
# input shape: (batch_size, n_steps, n_input)
out = tf.reshape(out, [-1,config.n_steps,config.n_classes])
print(out)
out = tf.transpose(out, [1, 0, 2]) # permute n_steps and batch_size
# Reshape to prepare input to hidden activation
print(out)
out = tf.reshape(out, [-1, config.n_inputs])
print(out)
# out = tf.cast(out, tf.float32)
# new shape: (n_steps*batch_size, n_input)
# Linear activation
out = tf.nn.relu(tf.matmul(out, config.W['hidden']) + config.b['hidden'])
# Split data because rnn cell needs a list of inputs for the RNN inner loop
out = tf.split(out, config.n_steps, 0)
# new shape: n_steps * (batch_size, n_hidden)
# Define two stacked LSTM cells (two recurrent layers deep) with tensorflow
lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(config.n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(config.n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
# Get LSTM cell output
outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, out, dtype=tf.float32)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1]
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.b['output'], _Y, config.W, config.b, config.weights, config.biases
if __name__ == "__main__":
train_x = np.load('/home/jehyunpark/data/train_x.npz')['a']
train_x = np.reshape(train_x,[-1,10,120,160])#[120,10,120,160]
test_x = np.load('/home/jehyunpark/data/test_x.npz')['a']
test_x = np.reshape(test_x,[-1,10,120,160])#[30,10,120,160]
train_y = np.load('/home/jehyunpark/data/train_y.npz')['a']
test_y = np.load('/home/jehyunpark/data/test_y.npz')['a']
print('data loading completed')
config = Config(train_x)
X = tf.placeholder(tf.float32, [None,10, config.img_h,config.img_w])
Y = tf.placeholder(tf.float32,[None, config.n_classes])
# a,b,c,d,e,f = CRNN(train_x,train_y,config)
# print(b.shape)
prediction, label, W, B, weights, biases = CRNN(X, Y, config)
# Loss,optimizer,evaluation
l2 = config.lambda_loss_amount * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=prediction) ) + l2
optimizer = tf.train.AdamOptimizer(learning_rate=config.learning_rate).minimize(cost)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
cfg = tf.ConfigProto()
# cfg.gpu_options.per_process_gpu_memory_fraction = 0.85
cfg.gpu_options.allow_growth = True
sess = tf.Session(config= cfg)
with sess.as_default():
init = tf.global_variables_initializer()
sess.run(init)
best_accuracy = 0.0
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
# for start, end in zip(range(0, config.train_count, config.batch_size),
# range(config.batch_size, config.train_count + 1, config.batch_size)):
# sess.run(optimizer, feed_dict={X: train_x[start:end],
# Y: train_y[start:end]})
sess.run(optimizer, feed_dict={X: train_x,
Y: train_y})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out, W_, B_, weights_, biases_ = sess.run(
[prediction, accuracy, cost, W, B, weights, biases], feed_dict={X: test_x, Y: test_y}
)
print("training iter: {},".format(i) +
" test accuracy : {},".format(accuracy_out) +
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
np.savez_compressed('./data/W_hidden',a=W_['hidden'])
np.savez_compressed('./data/W_output',a=W_['output'])
np.savez_compressed('./data/b_hidden',a=B_['hidden'])
np.savez_compressed('./data/b_output',a=B_['output'])
np.savez_compressed('./data/W_conv1',a=weights_['W_conv1'])
np.savez_compressed('./data/W_conv2',a=weights_['W_conv2'])
np.savez_compressed('./data/W_fc',a=weights_['W_fc'])
np.savez_compressed('./data/W_out',a=weights_['out'])
np.savez_compressed('./data/b_conv1',a=biases_['b_conv1'])
np.savez_compressed('./data/b_conv2',a=biases_['b_conv2'])
np.savez_compressed('./data/b_fc',a=biases_['b_fc'])
np.savez_compressed('./data/b_out',a=biases_['out'])
print("")
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
sess.close()
|
from ED6ScenarioHelper import *
def main():
# 格兰赛尔
CreateScenaFile(
FileName = 'T4107 ._SN',
MapName = 'Grancel',
Location = 'T4107.x',
MapIndex = 1,
MapDefaultBGM = "ed60018",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'卡露娜', # 9
'亚妮拉丝', # 10
'库拉茨', # 11
'克鲁茨', # 12
'管家菲利普', # 13
'杜南公爵', # 14
'亚鲁瓦教授', # 15
'朵洛希', # 16
'芭蒂', # 17
'拉尔夫', # 18
'蒂库', # 19
'拉尔斯', # 20
'托伊', # 21
'克劳斯市长', # 22
'观众', # 23
'观众', # 24
'观众', # 25
'观众', # 26
'观众', # 27
'观众', # 28
'观众', # 29
'观众', # 30
'观众', # 31
'观众', # 32
'观众', # 33
'观众', # 34
'观众', # 35
'观众', # 36
'观众', # 37
'观众', # 38
'观众', # 39
'观众', # 40
'观众', # 41
'观众', # 42
'观众', # 43
'观众', # 44
'观众', # 45
'观众', # 46
'观众', # 47
'观众', # 48
'观众', # 49
'观众', # 50
'观众', # 51
'观众', # 52
'观众', # 53
'观众', # 54
'观众', # 55
'观众', # 56
'观众', # 57
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH01240 ._CH', # 00
'ED6_DT07/CH01630 ._CH', # 01
'ED6_DT07/CH01260 ._CH', # 02
'ED6_DT07/CH01620 ._CH', # 03
'ED6_DT07/CH02470 ._CH', # 04
'ED6_DT07/CH02140 ._CH', # 05
'ED6_DT07/CH02050 ._CH', # 06
'ED6_DT06/CH20063 ._CH', # 07
'ED6_DT07/CH01030 ._CH', # 08
'ED6_DT07/CH01040 ._CH', # 09
'ED6_DT07/CH01160 ._CH', # 0A
'ED6_DT07/CH01470 ._CH', # 0B
'ED6_DT07/CH01060 ._CH', # 0C
'ED6_DT07/CH02350 ._CH', # 0D
'ED6_DT07/CH01150 ._CH', # 0E
'ED6_DT07/CH01020 ._CH', # 0F
'ED6_DT07/CH01220 ._CH', # 10
'ED6_DT07/CH01460 ._CH', # 11
'ED6_DT07/CH01130 ._CH', # 12
'ED6_DT07/CH01200 ._CH', # 13
'ED6_DT07/CH01210 ._CH', # 14
'ED6_DT07/CH01100 ._CH', # 15
'ED6_DT07/CH01140 ._CH', # 16
'ED6_DT07/CH01680 ._CH', # 17
'ED6_DT07/CH01690 ._CH', # 18
'ED6_DT07/CH01120 ._CH', # 19
'ED6_DT07/CH01180 ._CH', # 1A
'ED6_DT07/CH01110 ._CH', # 1B
'ED6_DT07/CH01230 ._CH', # 1C
'ED6_DT07/CH01490 ._CH', # 1D
'ED6_DT07/CH01480 ._CH', # 1E
'ED6_DT06/CH20063 ._CH', # 1F
)
AddCharChipPat(
'ED6_DT07/CH01240P._CP', # 00
'ED6_DT07/CH01630P._CP', # 01
'ED6_DT07/CH01260P._CP', # 02
'ED6_DT07/CH01620P._CP', # 03
'ED6_DT07/CH02470P._CP', # 04
'ED6_DT07/CH02140P._CP', # 05
'ED6_DT07/CH02050P._CP', # 06
'ED6_DT06/CH20063P._CP', # 07
'ED6_DT07/CH01030P._CP', # 08
'ED6_DT07/CH01040P._CP', # 09
'ED6_DT07/CH01160P._CP', # 0A
'ED6_DT07/CH01470P._CP', # 0B
'ED6_DT07/CH01060P._CP', # 0C
'ED6_DT07/CH02350P._CP', # 0D
'ED6_DT07/CH01150P._CP', # 0E
'ED6_DT07/CH01020P._CP', # 0F
'ED6_DT07/CH01220P._CP', # 10
'ED6_DT07/CH01460P._CP', # 11
'ED6_DT07/CH01130P._CP', # 12
'ED6_DT07/CH01200P._CP', # 13
'ED6_DT07/CH01210P._CP', # 14
'ED6_DT07/CH01100P._CP', # 15
'ED6_DT07/CH01140P._CP', # 16
'ED6_DT07/CH01680P._CP', # 17
'ED6_DT07/CH01690P._CP', # 18
'ED6_DT07/CH01120P._CP', # 19
'ED6_DT07/CH01180P._CP', # 1A
'ED6_DT07/CH01110P._CP', # 1B
'ED6_DT07/CH01230P._CP', # 1C
'ED6_DT07/CH01490P._CP', # 1D
'ED6_DT07/CH01480P._CP', # 1E
'ED6_DT06/CH20063P._CP', # 1F
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 40,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 1,
ChipIndex = 0x1,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 39,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 2,
ChipIndex = 0x2,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 46,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 3,
ChipIndex = 0x3,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 49,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 4,
ChipIndex = 0x4,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 5,
ChipIndex = 0x5,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = -1,
TalkScenaIndex = -1,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 6,
ChipIndex = 0x6,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 48,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 7,
ChipIndex = 0x7,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 47,
)
DeclNpc(
X = -12680,
Z = 4700,
Y = -4790,
Direction = 90,
Unknown2 = 0,
Unknown3 = 8,
ChipIndex = 0x8,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 45,
)
DeclNpc(
X = -12660,
Z = 4700,
Y = -3750,
Direction = 90,
Unknown2 = 0,
Unknown3 = 9,
ChipIndex = 0x9,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 44,
)
DeclNpc(
X = -14750,
Z = 5200,
Y = 3290,
Direction = 90,
Unknown2 = 0,
Unknown3 = 10,
ChipIndex = 0xA,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 41,
)
DeclNpc(
X = -14750,
Z = 5200,
Y = 3960,
Direction = 90,
Unknown2 = 0,
Unknown3 = 11,
ChipIndex = 0xB,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 42,
)
DeclNpc(
X = -14750,
Z = 5200,
Y = 4700,
Direction = 90,
Unknown2 = 0,
Unknown3 = 12,
ChipIndex = 0xC,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 43,
)
DeclNpc(
X = 0,
Z = 0,
Y = 0,
Direction = 180,
Unknown2 = 0,
Unknown3 = 13,
ChipIndex = 0xD,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 38,
)
DeclNpc(
X = -14740,
Z = 5200,
Y = -13430,
Direction = 90,
Unknown2 = 0,
Unknown3 = 14,
ChipIndex = 0xE,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 3,
)
DeclNpc(
X = -15550,
Z = 5450,
Y = -5010,
Direction = 90,
Unknown2 = 0,
Unknown3 = 9,
ChipIndex = 0x9,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 4,
)
DeclNpc(
X = -12650,
Z = 4700,
Y = 3270,
Direction = 90,
Unknown2 = 0,
Unknown3 = 15,
ChipIndex = 0xF,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 5,
)
DeclNpc(
X = -15550,
Z = 5450,
Y = -9240,
Direction = 90,
Unknown2 = 0,
Unknown3 = 16,
ChipIndex = 0x10,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 6,
)
DeclNpc(
X = -15550,
Z = 5450,
Y = 1890,
Direction = 90,
Unknown2 = 0,
Unknown3 = 8,
ChipIndex = 0x8,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 7,
)
DeclNpc(
X = -12650,
Z = 4700,
Y = -6590,
Direction = 90,
Unknown2 = 0,
Unknown3 = 16,
ChipIndex = 0x10,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 8,
)
DeclNpc(
X = -12680,
Z = 4700,
Y = -17670,
Direction = 90,
Unknown2 = 0,
Unknown3 = 17,
ChipIndex = 0x11,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 9,
)
DeclNpc(
X = -14720,
Z = 5200,
Y = -3720,
Direction = 90,
Unknown2 = 0,
Unknown3 = 18,
ChipIndex = 0x12,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 10,
)
DeclNpc(
X = -12650,
Z = 4700,
Y = 1670,
Direction = 90,
Unknown2 = 0,
Unknown3 = 19,
ChipIndex = 0x13,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 11,
)
DeclNpc(
X = -13550,
Z = 4950,
Y = -13580,
Direction = 90,
Unknown2 = 0,
Unknown3 = 16,
ChipIndex = 0x10,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 12,
)
DeclNpc(
X = -14750,
Z = 5200,
Y = -8060,
Direction = 90,
Unknown2 = 0,
Unknown3 = 20,
ChipIndex = 0x14,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 13,
)
DeclNpc(
X = -14720,
Z = 5200,
Y = 510,
Direction = 90,
Unknown2 = 0,
Unknown3 = 17,
ChipIndex = 0x11,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 14,
)
DeclNpc(
X = -12660,
Z = 4700,
Y = -9280,
Direction = 90,
Unknown2 = 0,
Unknown3 = 9,
ChipIndex = 0x9,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 15,
)
DeclNpc(
X = -13550,
Z = 4950,
Y = 4710,
Direction = 90,
Unknown2 = 0,
Unknown3 = 21,
ChipIndex = 0x15,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 16,
)
DeclNpc(
X = -14720,
Z = 5200,
Y = 4019,
Direction = 90,
Unknown2 = 0,
Unknown3 = 22,
ChipIndex = 0x16,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 17,
)
DeclNpc(
X = -14520,
Z = 5200,
Y = -15970,
Direction = 90,
Unknown2 = 0,
Unknown3 = 23,
ChipIndex = 0x17,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 18,
)
DeclNpc(
X = -12650,
Z = 4700,
Y = -13490,
Direction = 90,
Unknown2 = 0,
Unknown3 = 24,
ChipIndex = 0x18,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 19,
)
DeclNpc(
X = -15610,
Z = 5450,
Y = -17700,
Direction = 90,
Unknown2 = 0,
Unknown3 = 8,
ChipIndex = 0x8,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 20,
)
DeclNpc(
X = -15610,
Z = 5450,
Y = -14800,
Direction = 90,
Unknown2 = 0,
Unknown3 = 9,
ChipIndex = 0x9,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 21,
)
DeclNpc(
X = -16640,
Z = 5700,
Y = -13560,
Direction = 90,
Unknown2 = 0,
Unknown3 = 25,
ChipIndex = 0x19,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 22,
)
DeclNpc(
X = -13520,
Z = 4950,
Y = -9500,
Direction = 90,
Unknown2 = 0,
Unknown3 = 21,
ChipIndex = 0x15,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 23,
)
DeclNpc(
X = -13520,
Z = 4950,
Y = -4800,
Direction = 91,
Unknown2 = 0,
Unknown3 = 26,
ChipIndex = 0x1A,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 24,
)
DeclNpc(
X = -15440,
Z = 5450,
Y = -5520,
Direction = 90,
Unknown2 = 0,
Unknown3 = 21,
ChipIndex = 0x15,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 25,
)
DeclNpc(
X = -15440,
Z = 5450,
Y = -6530,
Direction = 90,
Unknown2 = 0,
Unknown3 = 27,
ChipIndex = 0x1B,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 26,
)
DeclNpc(
X = -15440,
Z = 5450,
Y = 3270,
Direction = 90,
Unknown2 = 0,
Unknown3 = 20,
ChipIndex = 0x14,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 27,
)
DeclNpc(
X = -12650,
Z = 4700,
Y = 520,
Direction = 90,
Unknown2 = 0,
Unknown3 = 14,
ChipIndex = 0xE,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 28,
)
DeclNpc(
X = -13520,
Z = 4950,
Y = 3330,
Direction = 90,
Unknown2 = 0,
Unknown3 = 19,
ChipIndex = 0x13,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 29,
)
DeclNpc(
X = -14520,
Z = 5200,
Y = 1860,
Direction = 90,
Unknown2 = 0,
Unknown3 = 16,
ChipIndex = 0x10,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 30,
)
DeclNpc(
X = -13520,
Z = 4950,
Y = -8039,
Direction = 90,
Unknown2 = 0,
Unknown3 = 28,
ChipIndex = 0x1C,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 31,
)
DeclNpc(
X = -15440,
Z = 5450,
Y = 550,
Direction = 90,
Unknown2 = 0,
Unknown3 = 22,
ChipIndex = 0x16,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 32,
)
DeclNpc(
X = -12660,
Z = 4700,
Y = 4760,
Direction = 90,
Unknown2 = 0,
Unknown3 = 25,
ChipIndex = 0x19,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 33,
)
DeclNpc(
X = -13520,
Z = 4950,
Y = -3700,
Direction = 90,
Unknown2 = 0,
Unknown3 = 15,
ChipIndex = 0xF,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 34,
)
DeclNpc(
X = -16620,
Z = 5700,
Y = -3710,
Direction = 90,
Unknown2 = 0,
Unknown3 = 22,
ChipIndex = 0x16,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 35,
)
DeclNpc(
X = -15440,
Z = 5450,
Y = 4750,
Direction = 90,
Unknown2 = 0,
Unknown3 = 29,
ChipIndex = 0x1D,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 36,
)
DeclNpc(
X = -12730,
Z = 4700,
Y = -8010,
Direction = 90,
Unknown2 = 0,
Unknown3 = 30,
ChipIndex = 0x1E,
NpcIndex = 0x181,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 37,
)
ScpFunction(
"Function_0_7CA", # 00, 0
"Function_1_A1B", # 01, 1
"Function_2_A1C", # 02, 2
"Function_3_BA4", # 03, 3
"Function_4_BCE", # 04, 4
"Function_5_BF6", # 05, 5
"Function_6_C17", # 06, 6
"Function_7_C7D", # 07, 7
"Function_8_CAB", # 08, 8
"Function_9_DC5", # 09, 9
"Function_10_DF1", # 0A, 10
"Function_11_E4B", # 0B, 11
"Function_12_E98", # 0C, 12
"Function_13_F0C", # 0D, 13
"Function_14_F4A", # 0E, 14
"Function_15_F94", # 0F, 15
"Function_16_FFB", # 10, 16
"Function_17_1028", # 11, 17
"Function_18_1059", # 12, 18
"Function_19_1082", # 13, 19
"Function_20_10C4", # 14, 20
"Function_21_1133", # 15, 21
"Function_22_1199", # 16, 22
"Function_23_1214", # 17, 23
"Function_24_1280", # 18, 24
"Function_25_12C9", # 19, 25
"Function_26_12FE", # 1A, 26
"Function_27_1341", # 1B, 27
"Function_28_138D", # 1C, 28
"Function_29_1424", # 1D, 29
"Function_30_144F", # 1E, 30
"Function_31_14B8", # 1F, 31
"Function_32_153C", # 20, 32
"Function_33_159F", # 21, 33
"Function_34_160D", # 22, 34
"Function_35_167B", # 23, 35
"Function_36_16D1", # 24, 36
"Function_37_1760", # 25, 37
"Function_38_1784", # 26, 38
"Function_39_180A", # 27, 39
"Function_40_1981", # 28, 40
"Function_41_1A4B", # 29, 41
"Function_42_1A75", # 2A, 42
"Function_43_1B08", # 2B, 43
"Function_44_1B2E", # 2C, 44
"Function_45_1C88", # 2D, 45
"Function_46_1F19", # 2E, 46
"Function_47_2009", # 2F, 47
"Function_48_22F3", # 30, 48
"Function_49_26B4", # 31, 49
)
def Function_0_7CA(): pass
label("Function_0_7CA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC5, 7)), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC7, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_85A")
ClearChrFlags(0xE, 0x80)
ClearChrFlags(0xF, 0x80)
ClearChrFlags(0x8, 0x80)
ClearChrFlags(0x9, 0x80)
ClearChrFlags(0xA, 0x80)
ClearChrFlags(0xB, 0x80)
SetChrPos(0xE, -16580, 5700, -9620, 90)
SetChrPos(0xF, -10500, 4200, -6510, 90)
SetChrPos(0x8, -12710, 4700, -15880, 90)
SetChrPos(0x9, -12670, 4700, -15020, 90)
SetChrPos(0xA, -12650, 4700, -16690, 90)
SetChrPos(0xB, -12650, 4700, -17560, 90)
label("loc_85A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCD, 3)), scpexpr(EXPR_END)), "loc_864")
Jump("loc_A1A")
label("loc_864")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCB, 2)), scpexpr(EXPR_END)), "loc_86E")
Jump("loc_A1A")
label("loc_86E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC9, 1)), scpexpr(EXPR_END)), "loc_878")
Jump("loc_A1A")
label("loc_878")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC6, 7)), scpexpr(EXPR_END)), "loc_882")
Jump("loc_A1A")
label("loc_882")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC5, 7)), scpexpr(EXPR_END)), "loc_948")
ClearChrFlags(0x10, 0x80)
SetChrPos(0x10, -12660, 4700, -6420, 90)
ClearChrFlags(0x11, 0x80)
SetChrPos(0x11, -12660, 4700, -5620, 90)
ClearChrFlags(0x12, 0x80)
ClearChrFlags(0x13, 0x80)
ClearChrFlags(0x14, 0x80)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC6, 5)), scpexpr(EXPR_END)), "loc_8E1")
ClearChrFlags(0x15, 0x80)
SetChrPos(0x15, -14490, 5200, 70, 90)
label("loc_8E1")
ClearChrFlags(0x25, 0x80)
ClearChrFlags(0x26, 0x80)
ClearChrFlags(0x27, 0x80)
ClearChrFlags(0x28, 0x80)
ClearChrFlags(0x29, 0x80)
ClearChrFlags(0x2A, 0x80)
ClearChrFlags(0x2B, 0x80)
ClearChrFlags(0x2C, 0x80)
ClearChrFlags(0x2D, 0x80)
ClearChrFlags(0x2E, 0x80)
ClearChrFlags(0x2F, 0x80)
ClearChrFlags(0x30, 0x80)
ClearChrFlags(0x31, 0x80)
ClearChrFlags(0x32, 0x80)
ClearChrFlags(0x33, 0x80)
ClearChrFlags(0x34, 0x80)
ClearChrFlags(0x35, 0x80)
ClearChrFlags(0x36, 0x80)
ClearChrFlags(0x37, 0x80)
ClearChrFlags(0x38, 0x80)
Jump("loc_A1A")
label("loc_948")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC4, 6)), scpexpr(EXPR_END)), "loc_952")
Jump("loc_A1A")
label("loc_952")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC4, 1)), scpexpr(EXPR_END)), "loc_9C6")
ClearChrFlags(0x10, 0x80)
SetChrPos(0x10, -13550, 4950, -6540, 90)
ClearChrFlags(0x1B, 0x80)
ClearChrFlags(0x1C, 0x80)
ClearChrFlags(0x1D, 0x80)
ClearChrFlags(0x1E, 0x80)
ClearChrFlags(0x1F, 0x80)
ClearChrFlags(0x20, 0x80)
ClearChrFlags(0x21, 0x80)
ClearChrFlags(0x22, 0x80)
ClearChrFlags(0x23, 0x80)
ClearChrFlags(0x24, 0x80)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC4, 4)), scpexpr(EXPR_END)), "loc_9C3")
ClearChrFlags(0xF, 0x80)
SetChrChipByIndex(0xF, 31)
SetChrPos(0xF, -10500, 4200, -6450, 90)
label("loc_9C3")
Jump("loc_A1A")
label("loc_9C6")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC3, 6)), scpexpr(EXPR_END)), "loc_9D0")
Jump("loc_A1A")
label("loc_9D0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC3, 1)), scpexpr(EXPR_END)), "loc_A09")
ClearChrFlags(0x10, 0x80)
SetChrPos(0x10, -12690, 4700, -4810, 90)
ClearChrFlags(0x16, 0x80)
ClearChrFlags(0x17, 0x80)
ClearChrFlags(0x18, 0x80)
ClearChrFlags(0x19, 0x80)
ClearChrFlags(0x1A, 0x80)
Jump("loc_A1A")
label("loc_A09")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC2, 0)), scpexpr(EXPR_END)), "loc_A13")
Jump("loc_A1A")
label("loc_A13")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC1, 0)), scpexpr(EXPR_END)), "loc_A1A")
label("loc_A1A")
Return()
# Function_0_7CA end
def Function_1_A1B(): pass
label("Function_1_A1B")
Return()
# Function_1_A1B end
def Function_2_A1C(): pass
label("Function_2_A1C")
OP_51(0xFE, 0x28, (scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
RunExpression(0x0, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_A4C")
OP_99(0xFE, 0x0, 0x7, 0x546)
Jump("loc_B8E")
label("loc_A4C")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_A65")
OP_99(0xFE, 0x1, 0x7, 0x514)
Jump("loc_B8E")
label("loc_A65")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_A7E")
OP_99(0xFE, 0x2, 0x7, 0x4E2)
Jump("loc_B8E")
label("loc_A7E")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_A97")
OP_99(0xFE, 0x3, 0x7, 0x4B0)
Jump("loc_B8E")
label("loc_A97")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_AB0")
OP_99(0xFE, 0x4, 0x7, 0x47E)
Jump("loc_B8E")
label("loc_AB0")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_AC9")
OP_99(0xFE, 0x5, 0x7, 0x44C)
Jump("loc_B8E")
label("loc_AC9")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_AE2")
OP_99(0xFE, 0x6, 0x7, 0x41A)
Jump("loc_B8E")
label("loc_AE2")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_AFB")
OP_99(0xFE, 0x0, 0x7, 0x54B)
Jump("loc_B8E")
label("loc_AFB")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_B14")
OP_99(0xFE, 0x1, 0x7, 0x519)
Jump("loc_B8E")
label("loc_B14")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_B2D")
OP_99(0xFE, 0x2, 0x7, 0x4E7)
Jump("loc_B8E")
label("loc_B2D")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_B46")
OP_99(0xFE, 0x3, 0x7, 0x4B5)
Jump("loc_B8E")
label("loc_B46")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_B5F")
OP_99(0xFE, 0x4, 0x7, 0x483)
Jump("loc_B8E")
label("loc_B5F")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_B78")
OP_99(0xFE, 0x5, 0x7, 0x451)
Jump("loc_B8E")
label("loc_B78")
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_B8E")
OP_99(0xFE, 0x6, 0x7, 0x41F)
label("loc_B8E")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_BA3")
OP_99(0xFE, 0x0, 0x7, 0x4B0)
Jump("loc_B8E")
label("loc_BA3")
Return()
# Function_2_A1C end
def Function_3_BA4(): pass
label("Function_3_BA4")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"比赛快点开始吧。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_3_BA4 end
def Function_4_BCE(): pass
label("Function_4_BCE")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"不管是谁取得优胜都很好啊。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_4_BCE end
def Function_5_BF6(): pass
label("Function_5_BF6")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"我现在已经开始兴奋了。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_5_BF6 end
def Function_6_C17(): pass
label("Function_6_C17")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"哈~哈,因为兴奋过度,\x01",
"来得太早了些。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_6_C17 end
def Function_7_C7D(): pass
label("Function_7_C7D")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"今年为谁加油好呢?\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_7_C7D end
def Function_8_CAB(): pass
label("Function_8_CAB")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 5)), scpexpr(EXPR_END)), "loc_D2D")
ChrTalk(
0xFE,
(
"好、好像觉得后面\x01",
"有股很强的杀气……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"是、是我多心了吧。\x02",
)
CloseMessageWindow()
Jump("loc_DC1")
label("loc_D2D")
OP_A2(0x5)
OP_62(0xFE, 0x0, 2000, 0x10, 0x13, 0xFA, 0x1)
OP_22(0x31, 0x0, 0x64)
Sleep(1000)
ChrTalk(
0xFE,
(
"好、好像觉得后面\x01",
"有股很强的杀气……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"是、是我多心了吧。\x02",
)
CloseMessageWindow()
label("loc_DC1")
TalkEnd(0xFE)
Return()
# Function_8_CAB end
def Function_9_DC5(): pass
label("Function_9_DC5")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"完了,\x01",
"导力相机忘带了。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_9_DC5 end
def Function_10_DF1(): pass
label("Function_10_DF1")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"可惜了!\x01",
"今年亲卫队没有出战呢。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_10_DF1 end
def Function_11_E4B(): pass
label("Function_11_E4B")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"团体赛比想象的要有趣呢。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_11_E4B end
def Function_12_E98(): pass
label("Function_12_E98")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"我想还是特务部队\x01",
"会取得优胜吧。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"穿着一身黑装,\x01",
"看起来就很强。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_12_E98 end
def Function_13_F0C(): pass
label("Function_13_F0C")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"今天的对阵\x01",
"会是怎么样的呢?\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_13_F0C end
def Function_14_F4A(): pass
label("Function_14_F4A")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"游击士的两个小组\x01",
"都还没有出局。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"两组都要加油啊~!\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_14_F4A end
def Function_15_F94(): pass
label("Function_15_F94")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"特务部队虽然让人觉得有些害怕,\x01",
"但实力相当强啊。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_15_F94 end
def Function_16_FFB(): pass
label("Function_16_FFB")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"比赛怎么还不开始啊。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_16_FFB end
def Function_17_1028(): pass
label("Function_17_1028")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"每年的比赛\x01",
"我都很期待呢。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_17_1028 end
def Function_18_1059(): pass
label("Function_18_1059")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"今天是总决赛的日子啊。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_18_1059 end
def Function_19_1082(): pass
label("Function_19_1082")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"哪支小组会取胜呢……\x02",
)
CloseMessageWindow()
ChrTalk(
0xFE,
"我心里扑通扑通地响呢。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_19_1082 end
def Function_20_10C4(): pass
label("Function_20_10C4")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"我喜欢游击士组里面\x01",
"那个金色头发的小哥。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"外表英俊潇洒,\x01",
"而且射击方面也无懈可击。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_20_10C4 end
def Function_21_1133(): pass
label("Function_21_1133")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"我想看那个戴着红色面具的哥哥\x01",
"和那个像熊一样的叔叔\x01",
"打架的样子呢。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_21_1133 end
def Function_22_1199(): pass
label("Function_22_1199")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"真不愧是总决赛的日子,\x01",
"一大早就已经有很多人来了。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_22_1199 end
def Function_23_1214(): pass
label("Function_23_1214")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"双方都是今年\x01",
"第一次参加比赛,\x01",
"哪一边会取胜的确是决赛的看点啊。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_23_1214 end
def Function_24_1280(): pass
label("Function_24_1280")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"游击士小组里面\x01",
"好像有个女孩子呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"这可真了不起啊。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_24_1280 end
def Function_25_12C9(): pass
label("Function_25_12C9")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"比赛还没有开始吗。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_25_12C9 end
def Function_26_12FE(): pass
label("Function_26_12FE")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"每年只有我和老头子\x01",
"两个人来看比赛,\x01",
"感到无聊也没有办法啊。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_26_12FE end
def Function_27_1341(): pass
label("Function_27_1341")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"因为太期待今天的比赛了,\x01",
"我昨天一夜都睡不着觉呢。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_27_1341 end
def Function_28_138D(): pass
label("Function_28_138D")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"我还是觉得\x01",
"特务部队会取胜。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"一看名字就知道来头不小嘛。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_28_138D end
def Function_29_1424(): pass
label("Function_29_1424")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"就算口干舌燥\x01",
"我也要全力为比赛呐喊。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_29_1424 end
def Function_30_144F(): pass
label("Function_30_144F")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"我支持游击士组哦。\x02",
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"以前我也曾受到\x01",
"游击士的很多关照啊。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_30_144F end
def Function_31_14B8(): pass
label("Function_31_14B8")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"要是把便当\x01",
"也带来就好了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"一大早就过来排队,\x01",
"肚子都饿了。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_31_14B8 end
def Function_32_153C(): pass
label("Function_32_153C")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"哎呀,\x01",
"武术大会果然很有意思啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"光是看就已经爽呆了。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_32_153C end
def Function_33_159F(): pass
label("Function_33_159F")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"游击士组里的那个男孩子\x01",
"和我儿子的年纪差不多。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"无论如何\x01",
"我也要支持游击士组。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_33_159F end
def Function_34_160D(): pass
label("Function_34_160D")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"如果从综合实力来看的话,\x01",
"不用说也知道\x01",
"那个特务部队是最强的了。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_34_160D end
def Function_35_167B(): pass
label("Function_35_167B")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"说起来,\x01",
"没有想到决赛对阵\x01",
"会是这样的呢。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_35_167B end
def Function_36_16D1(): pass
label("Function_36_16D1")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"王国军和游击士……\x02",
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"我觉得无论哪一方,\x01",
"都是保卫我们市民的、\x01",
"值得大家信赖的好战士。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_36_16D1 end
def Function_37_1760(): pass
label("Function_37_1760")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"比赛快要开始了……\x01",
"我会全力为大家呐喊助威的。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_37_1760 end
def Function_38_1784(): pass
label("Function_38_1784")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC5, 7)), scpexpr(EXPR_END)), "loc_1806")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC6, 5)), scpexpr(EXPR_END)), "loc_1806")
ChrTalk(
0x15,
(
"#600F我从年轻的时候就喜欢\x01",
"观看每年一度的武术大会。\x02\x03",
"加油啊。\x01",
"艾丝蒂尔、约修亚,\x02",
)
)
CloseMessageWindow()
label("loc_1806")
TalkEnd(0xFE)
Return()
# Function_38_1784 end
def Function_39_180A(): pass
label("Function_39_180A")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 4)), scpexpr(EXPR_END)), "loc_18C2")
ChrTalk(
0xFE,
(
"虽说那些对手\x01",
"的确不容易对付,\x01",
"#816F不过我坚信你们一定能够取胜的!\x02\x03",
"我会给你们加油哦。\x02",
)
)
CloseMessageWindow()
Jump("loc_197D")
label("loc_18C2")
OP_A2(0x4)
ChrTalk(
0xFE,
(
"#850F哟,两位新人。\x02\x03",
"你们决赛的对手相当强劲,\x01",
"不过肯定会有胜算的。\x02\x03",
"#816F我坚信你们一定能够取胜的!\x01",
"我会给你们加油哦。\x02",
)
)
CloseMessageWindow()
label("loc_197D")
TalkEnd(0xFE)
Return()
# Function_39_180A end
def Function_40_1981(): pass
label("Function_40_1981")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 3)), scpexpr(EXPR_END)), "loc_19E1")
ChrTalk(
0xFE,
(
"#830F听好,一定要放松,\x01",
"像往常那样出战就行了。\x02\x03",
"就连在气势上也要战胜对手。\x02",
)
)
CloseMessageWindow()
Jump("loc_1A47")
label("loc_19E1")
OP_A2(0x3)
ChrTalk(
0xFE,
(
"#830F啊,你们好。\x02\x03",
"听好,一定要放松,\x01",
"像往常那样出战就行了。\x02\x03",
"就连在气势上也要战胜对手。\x02",
)
)
CloseMessageWindow()
label("loc_1A47")
TalkEnd(0xFE)
Return()
# Function_40_1981 end
def Function_41_1A4B(): pass
label("Function_41_1A4B")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"快点开始吧。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_41_1A4B end
def Function_42_1A75(): pass
label("Function_42_1A75")
TalkBegin(0xFE)
ChrTalk(
0xFE,
(
"今天我一大早\x01",
"就去叫了那两个人,\x01",
"然后来竞技场了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
"因为绝对不能错过总决赛啊。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_42_1A75 end
def Function_43_1B08(): pass
label("Function_43_1B08")
TalkBegin(0xFE)
ChrTalk(
0xFE,
"哪个小组会取胜呢。\x02",
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
# Function_43_1B08 end
def Function_44_1B2E(): pass
label("Function_44_1B2E")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCD, 3)), scpexpr(EXPR_END)), "loc_1B3B")
Jump("loc_1C84")
label("loc_1B3B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCB, 2)), scpexpr(EXPR_END)), "loc_1B45")
Jump("loc_1C84")
label("loc_1B45")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC9, 1)), scpexpr(EXPR_END)), "loc_1B4F")
Jump("loc_1C84")
label("loc_1B4F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC6, 7)), scpexpr(EXPR_END)), "loc_1B59")
Jump("loc_1C84")
label("loc_1B59")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC5, 7)), scpexpr(EXPR_END)), "loc_1C4B")
ChrTalk(
0xFE,
(
"想拿个观战的好位置,\x01",
"所以我在门外彻夜排队,\x01",
"不料被那些巡逻的士兵赶回了家。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"之后,我偷偷地从家里溜出来,\x01",
"躲在大街上的草丛里等那些士兵撤走,\x01",
"然后才来排队的。\x02",
)
)
CloseMessageWindow()
Jump("loc_1C84")
label("loc_1C4B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC4, 6)), scpexpr(EXPR_END)), "loc_1C55")
Jump("loc_1C84")
label("loc_1C55")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC4, 1)), scpexpr(EXPR_END)), "loc_1C5F")
Jump("loc_1C84")
label("loc_1C5F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC3, 6)), scpexpr(EXPR_END)), "loc_1C69")
Jump("loc_1C84")
label("loc_1C69")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC3, 1)), scpexpr(EXPR_END)), "loc_1C73")
Jump("loc_1C84")
label("loc_1C73")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC2, 0)), scpexpr(EXPR_END)), "loc_1C7D")
Jump("loc_1C84")
label("loc_1C7D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC1, 0)), scpexpr(EXPR_END)), "loc_1C84")
label("loc_1C84")
TalkEnd(0xFE)
Return()
# Function_44_1B2E end
def Function_45_1C88(): pass
label("Function_45_1C88")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCD, 3)), scpexpr(EXPR_END)), "loc_1C95")
Jump("loc_1F15")
label("loc_1C95")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCB, 2)), scpexpr(EXPR_END)), "loc_1C9F")
Jump("loc_1F15")
label("loc_1C9F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC9, 1)), scpexpr(EXPR_END)), "loc_1CA9")
Jump("loc_1F15")
label("loc_1CA9")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC6, 7)), scpexpr(EXPR_END)), "loc_1CB3")
Jump("loc_1F15")
label("loc_1CB3")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC5, 7)), scpexpr(EXPR_END)), "loc_1D43")
ChrTalk(
0xFE,
(
"昨天真是辛苦我丈夫了,\x01",
"帮我拿到这么一个好位子。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"虽说我的要求很任性,\x01",
"不过没想到他能为我做到这样……\x02",
)
)
CloseMessageWindow()
Jump("loc_1F15")
label("loc_1D43")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC4, 6)), scpexpr(EXPR_END)), "loc_1D4D")
Jump("loc_1F15")
label("loc_1D4D")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC4, 1)), scpexpr(EXPR_END)), "loc_1ECD")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_END)), "loc_1DF9")
ChrTalk(
0xFE,
(
"最前排正中央\x01",
"明明一直是我的位子啊!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"看来明天的决赛\x01",
"我必须来早一点才行!!\x02",
)
)
CloseMessageWindow()
Jump("loc_1ECA")
label("loc_1DF9")
OP_A2(0x1)
OP_62(0xFE, 0x0, 1900, 0x2C, 0x2F, 0x96, 0x1)
OP_22(0x2F, 0x0, 0x64)
Sleep(1000)
ChrTalk(
0xFE,
"唉~遗憾啊!\x02",
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"最前排正中央\x01",
"明明一直是我的位子啊!\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xFE,
(
"看来明天的决赛\x01",
"我必须来早一点才行!!\x02",
)
)
CloseMessageWindow()
label("loc_1ECA")
Jump("loc_1F15")
label("loc_1ECD")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC3, 6)), scpexpr(EXPR_END)), "loc_1ED7")
Jump("loc_1F15")
label("loc_1ED7")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC3, 1)), scpexpr(EXPR_END)), "loc_1F04")
ChrTalk(
0xFE,
(
"呵呵呵,\x01",
"今年又到了这个时候了。\x02",
)
)
CloseMessageWindow()
Jump("loc_1F15")
label("loc_1F04")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC2, 0)), scpexpr(EXPR_END)), "loc_1F0E")
Jump("loc_1F15")
label("loc_1F0E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC1, 0)), scpexpr(EXPR_END)), "loc_1F15")
label("loc_1F15")
TalkEnd(0xFE)
Return()
# Function_45_1C88 end
def Function_46_1F19(): pass
label("Function_46_1F19")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCD, 3)), scpexpr(EXPR_END)), "loc_1F26")
Jump("loc_2005")
label("loc_1F26")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xCB, 2)), scpexpr(EXPR_END)), "loc_1F30")
Jump("loc_2005")
label("loc_1F30")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC9, 1)), scpexpr(EXPR_END)), "loc_1F3A")
Jump("loc_2005")
label("loc_1F3A")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC6, 7)), scpexpr(EXPR_END)), "loc_1F44")
Jump("loc_2005")
label("loc_1F44")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC5, 7)), scpexpr(EXPR_END)), "loc_1FCC")
ChrTalk(
0xFE,
(
"#820F今天大家都来到竞技场\x01",
"为你们呐喊助威。\x02\x03",
"作为游击士协会的代表,\x01",
"你们一定要为荣誉而战哦。\x02",
)
)
CloseMessageWindow()
Jump("loc_2005")
label("loc_1FCC")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC4, 6)), scpexpr(EXPR_END)), "loc_1FD6")
Jump("loc_2005")
label("loc_1FD6")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC4, 1)), scpexpr(EXPR_END)), "loc_1FE0")
Jump("loc_2005")
label("loc_1FE0")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC3, 6)), scpexpr(EXPR_END)), "loc_1FEA")
Jump("loc_2005")
label("loc_1FEA")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC3, 1)), scpexpr(EXPR_END)), "loc_1FF4")
Jump("loc_2005")
label("loc_1FF4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC2, 0)), scpexpr(EXPR_END)), "loc_1FFE")
Jump("loc_2005")
label("loc_1FFE")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC1, 0)), scpexpr(EXPR_END)), "loc_2005")
label("loc_2005")
TalkEnd(0xFE)
Return()
# Function_46_1F19 end
def Function_47_2009(): pass
label("Function_47_2009")
TalkBegin(0xF)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC5, 7)), scpexpr(EXPR_END)), "loc_225E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC6, 3)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2215")
OP_A2(0x633)
ChrTalk(
0xF,
(
"#151F啊,是小艾你们啊!\x02\x03",
"真厉害~!\x01",
"你们打进决赛了~!\x02\x03",
"我真是兴奋得都要跳起来了~!\x01",
" \x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#506F哈哈,别这么激动嘛。\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F如果不静下心来集中精神的话,\x01",
"说不定会错过很多精彩的画面哦。\x01",
" \x02",
)
)
CloseMessageWindow()
ChrTalk(
0xF,
(
"#150F哎嘿,不用担心啦。\x02\x03",
"因为我只有在静不下心的时候\x01",
"才能拍下一些好的照片呢~\x02\x03",
"这样才有自然感哦~\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#019F是、是这样吗……\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#007F不愧是朵洛希……\x01",
"完全是个另类的天才。\x02",
)
)
CloseMessageWindow()
Jump("loc_225B")
label("loc_2215")
ChrTalk(
0xF,
(
"#151F小艾你们的精彩表现,\x01",
"我一定会好好拍下来的~\x02",
)
)
CloseMessageWindow()
label("loc_225B")
Jump("loc_22EF")
label("loc_225E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC4, 4)), scpexpr(EXPR_END)), "loc_22EF")
ChrTalk(
0xF,
(
"#150F嘿嘿,\x01",
"因为我是负责报道的记者,\x01",
"所以拿到了特等席位哦。\x02\x03",
"好了,\x01",
"要快点把相机准备好~\x02",
)
)
CloseMessageWindow()
label("loc_22EF")
TalkEnd(0xF)
Return()
# Function_47_2009 end
def Function_48_22F3(): pass
label("Function_48_22F3")
TalkBegin(0xE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC6, 2)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2641")
OP_A2(0x632)
ChrTalk(
0xE,
(
"#130F你们好啊。\x01",
"艾丝蒂尔、约修亚,\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#004F哎,是亚鲁瓦教授!?\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
"#014F您也来观看比赛吗……\x02",
)
CloseMessageWindow()
ChrTalk(
0xE,
(
"#130F哈哈,\x01",
"因为受了你们好多的照顾嘛。\x02\x03",
"今天是恩人出战决赛的日子,\x01",
"我想无论如何也要来看一看的。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#001F嘿嘿,谢谢啦。\x02\x03",
"#006F不过,买决赛的门票\x01",
"肯定花了不少米拉吧?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xE,
(
"#130F哈哈,那也不是。\x02\x03",
"资料馆的馆长突然有急事,\x01",
"不能前来观看比赛了。\x02\x03",
"所以就把这张票免费转让给了我。\x01",
" \x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#506F什~么啊,果然还是没付钱嘛。\x02",
)
CloseMessageWindow()
ChrTalk(
0xE,
(
"#130F哈哈……真是不好意思。\x02\x03",
"不过,我支持你们的信念\x01",
"是绝对不会输给其他人的。\x02\x03",
"请你们一定要加油哦。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#006F嗯,包在我们身上!\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
"#010F我们必定全力出战。\x02",
)
CloseMessageWindow()
Jump("loc_26B0")
label("loc_2641")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC5, 7)), scpexpr(EXPR_END)), "loc_26B0")
ChrTalk(
0xE,
(
"#130F我支持你们的信念\x01",
"是绝对不会输给其他人的。\x02\x03",
"请你们一定要加油哦。\x02",
)
)
CloseMessageWindow()
label("loc_26B0")
TalkEnd(0xE)
Return()
# Function_48_22F3 end
def Function_49_26B4(): pass
label("Function_49_26B4")
TalkBegin(0xB)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC6, 4)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_2D38")
OP_A2(0x634)
OP_8C(0xB, 90, 0)
ChrTalk(
0xB,
"……………………………\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#004F哎……\x01",
"克鲁茨前辈,你怎么了?\x02",
)
)
CloseMessageWindow()
OP_9E(0xB, 0xF, 0x0, 0x12C, 0xFA0)
TurnDirection(0xB, 0x0, 400)
ChrTalk(
0xB,
(
"#840F哎……啊,是你们啊。\x02\x03",
"终于到了决赛呢。\x01",
"我很期待你们的表现哦。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#006F嗯,看我的吧!\x02\x03",
"#505F……不过,克鲁茨前辈,\x01",
"你的脸色好像有点不对劲啊?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#012F是啊。\x01",
"脸色铁青铁青呢。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#845F没什么……\x01",
"只是从刚才开始就觉得有点头晕。\x02\x03",
"#844F不过奇怪的是……\x01",
"我的身体没有什么事啊……\x02\x03",
"……难道是那个时候留下的后遗症……?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#580F后、后遗症……\x01",
"难道是说昨天的比赛吗!?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#841F哈哈,不是不是。\x01",
"是三个月之前的一次事故。\x02\x03",
"那时候我好像执行任务失败了,\x01",
"还弄得自己伤痕累累。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#505F好像执行任务失败了……?\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
"#012F好像是很模糊的说法啊?\x02",
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#845F啊啊,不好意思。\x01",
"因为那次事故的记忆确实很模糊。\x02\x03",
"连那是件什么样的工作\x01",
"也完全记不起来。\x02\x03",
"虽然医生说,\x01",
"这是因事故所受的刺激……\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x102,
"#012F…………………………\x02",
)
CloseMessageWindow()
ChrTalk(
0x101,
(
"#003F是这样啊……\x02\x03",
"#002F不过,以这样的状态来参加比赛,\x01",
"不会有事吗?\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#841F我刚才已经说了,\x01",
"其实这不是身体上的问题。\x02\x03",
"嗯,跟你们说了一会儿话,\x01",
"我感觉比刚才舒服多了……\x02\x03",
"已经没事了。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0x101,
"#505F是、是吗?\x02",
)
CloseMessageWindow()
ChrTalk(
0x102,
(
"#010F看起来脸色确实好些了呢。\x01",
" \x02\x03",
"不过……\x01",
"请不要勉强硬撑着啊。\x02",
)
)
CloseMessageWindow()
ChrTalk(
0xB,
(
"#841F嗯,谢谢。\x02\x03",
"你们今天一定要\x01",
"全力出战获取冠军哦。\x02",
)
)
CloseMessageWindow()
OP_8C(0xB, 90, 400)
Jump("loc_2D82")
label("loc_2D38")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC5, 7)), scpexpr(EXPR_END)), "loc_2D82")
ChrTalk(
0xFE,
(
"要连我们的份也一起加油,\x01",
"全力出战获取冠军哦。\x02",
)
)
CloseMessageWindow()
label("loc_2D82")
TalkEnd(0xB)
Return()
# Function_49_26B4 end
SaveToFile()
Try(main)
|
from intent_handling.signal import Signal
class TermAllUpperIntent:
NAME = 'TERM_ALL_UPPER'
def __init__(self, parameters):
self.parameters = parameters
def execute(self, db):
sql = 'SELECT code from course_terms WHERE code >= 300 AND term="{}"'.format(self.parameters.quarter)
result = db.call(sql)
if len(result) == 0:
return Signal.UNKNOWN, 'No upper division course data available for {}.'.format(self.parameters.quarter)
courses = ', '.join('CSC {}'.format(tup[0]) for tup in result)
return Signal.NORMAL, 'The following upper division courses are offered in {}: {}.'.format(self.parameters.quarter, courses)
|
from django.contrib.auth.forms import UserCreationForm
from . models import User
from django import forms
from Eliezer_Website.custom_functions import image_400
class SignUpForm(UserCreationForm):
class Meta:
model = User
fields = ['username', 'password1', 'password2']
class UpdateForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email', 'phone', 'news_subscriber']
class ImageForm(forms.ModelForm):
x = forms.FloatField(widget=forms.HiddenInput())
y = forms.FloatField(widget=forms.HiddenInput())
width = forms.FloatField(widget=forms.HiddenInput())
height = forms.FloatField(widget=forms.HiddenInput())
class Meta:
model = User
fields = ['image', 'x', 'y', 'width', 'height', ]
labels = {'image': ''}
def save(self, **kwargs):
image_400(
instance=super(ImageForm, self).save(),
x=self.cleaned_data['x'],
y=self.cleaned_data['y'],
w=self.cleaned_data['width'],
h=self.cleaned_data['height'],
)
class ContactForm(forms.Form):
subject = forms.CharField(max_length=150, required=True)
message = forms.CharField(max_length=500, required=True, widget=forms.Textarea(attrs={
'rows': '4'
}))
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
from sift_extractor import SIFT_Extractor
class Homography_Finder:
def __init__(self, minNumberOfMatches = 1, numKeyPoints = 500, scaleFactor = 200):
#Parameters.
self.minNumberOfMatches = minNumberOfMatches
self.siftExt = SIFT_Extractor(numKeyPoints)
self.scaleFactor = scaleFactor
'''
Compute the number of inliers in the homography between source and destination.
This code was adapted to work on Python 3.6
https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature_homography.html
'''
def findHomography(self, sourceImage, destinationImage):
MIN_MATCH_COUNT = self.minNumberOfMatches
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
img1 = cv2.imread(sourceImage, 0)
img2 = cv2.imread(destinationImage, 0)
img1 = cv2.resize(img1, (self.scaleFactor, self.scaleFactor))
img2 = cv2.resize(img2, (self.scaleFactor, self.scaleFactor))
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
inliers = 0
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
inliers = sum(matchesMask)
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
else:
#print ("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
matchesMask = None
return inliers, matchesMask, good
|
# -*- coding:utf-8 -*-
print 1.0/2
print 1/3
print 1//3
print 1.0//3
|
'''
1. This python script generates all theoretically possible (A(1-x)A'x)BO3 and A(B(1-x)B'x)O3 perovskite oxides
2. The generated new compounds are also subject to charge neutrality condition and pauling's valence rule
@Achintha_Ihalage
'''
import numpy as np
import pandas as pd
import itertools
import pathlib
from pymatgen.core.composition import Composition, Element
from arrange_ICSD_data import Perovskites
path = str(pathlib.Path(__file__).parent.absolute())
class ABSites():
def __init__(self):
self.A_sites = ['Li', 'Be', 'B', 'Na', 'Mg', 'Al', 'Si', 'K', 'Ca', 'Sc', 'Ti', 'Zn', 'Ga', 'Ge', 'As',
'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Cs', 'Ba', 'La', 'Hf', 'Ta', 'Hg', 'Tl', 'Pb', 'Bi',
'Ce', 'Pr', 'Nd', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Pu', 'Am']
self.B_sites = ['Li', 'Be', 'B', 'Na', 'Mg', 'Al', 'Si', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'Se',
'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'La', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Au', 'Hg', 'Tl', 'Pb', 'Bi',
'Ce', 'Sm', 'Eu', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am']
AB = ABSites()
class CompGen():
def __init__(self, AB_sites):
self.AB_sites = AB_sites
self.d = 0.05 # molar fraction interval
self.fractions = np.arange(self.d, 0.999, self.d)
def type1_comps(self): # generate all combinations in the format of (A(1-x)A'x)BO3
for a in itertools.combinations(self.AB_sites.A_sites, 2):
for f in self.fractions:
for b in self.AB_sites.B_sites:
if b != a[0] and b!=a[1]: # we assume same element cannot appear at both A and B sites
yield ((a[0], a[1], b, '_', 'O'), (f, 1-f, 1, 0, 3), '(%s%s%s%s)%s%s'%(a[0], str(round(f, 2)), a[1], str(round(1-f, 2)), b, 'O3')) # generator object yielding {elem: frac} dict and comp formula
def type2_comps(self): # generate all combinations in the format of A(B(1-x)B'x)O3
for b in itertools.combinations(self.AB_sites.B_sites, 2):
for f in self.fractions:
for a in self.AB_sites.A_sites:
if a!=b[0] and a!=b[1]: # we assume same element cannot appear at both A and B sites
yield ((a, '_', b[0], b[1], 'O'), (1, 0, f, 1-f, 3), '%s(%s%s%s%s)%s'%(a, b[0], str(round(f, 2)), b[1], str(round(1-f, 2)), 'O3'))
def unzip(self, b):
xs, ys, zs = zip(*b)
return xs, ys, zs
def create_df(self):
elems1, fracs1, comps1 = self.unzip(list(self.type1_comps()))
elems2, fracs2, comps2 = self.unzip(list(self.type2_comps()))
# add chemical formula
df = pd.DataFrame(comps1, columns=['StructuredFormula'])
df = df.append(pd.DataFrame(comps2, columns=['StructuredFormula']), ignore_index=True)
# add elements and corresponding molar fractions
df['A1'], df['A1_frac'] = np.concatenate([np.array(elems1)[:,0], np.array(elems2)[:,0]]), np.concatenate([np.array(fracs1)[:,0], np.array(fracs2)[:,0]])
df['A2'], df['A2_frac'] = np.concatenate([np.array(elems1)[:,1], np.array(elems2)[:,1]]), np.concatenate([np.array(fracs1)[:,1], np.array(fracs2)[:,1]])
df['B1'], df['B1_frac'] = np.concatenate([np.array(elems1)[:,2], np.array(elems2)[:,2]]), np.concatenate([np.array(fracs1)[:,2], np.array(fracs2)[:,2]])
df['B2'], df['B2_frac'] = np.concatenate([np.array(elems1)[:,3], np.array(elems2)[:,3]]), np.concatenate([np.array(fracs1)[:,3], np.array(fracs2)[:,3]])
df['O'], df['O_frac'] = np.concatenate([np.array(elems1)[:,4], np.array(elems2)[:,4]]), np.concatenate([np.array(fracs1)[:,4], np.array(fracs2)[:,4]])
d=df.iloc[:]
df1 = Perovskites().add_features(d)
df1 = df1[(df1['nA'] <= df1['nB'])]
df1.to_csv(path+'/all_comps.csv', sep='\t', index=False)
|
#!/user/bin/python
#coding:utf-8
__author__='yanshi'
from com.sy.util import data
import numpy as np
import jieba
import gensim
from gensim import models
class LSACorpus():
def __init__(self, stopWordsPath, fileTitle, fileIntro):
initData=data.Init()
self.stopWords=initData.loadStopWords(stopWordsPath)
self.filmTitles,self.filmDocs=initData.readData(fileTitle,fileIntro)
#Dictionary中的参数为被拆成单词集合的文档的集合,dictionary把所有单词取一个set(),并对set中每个单词分配一个Id号的map
#将所有文本的单词拿出来构成一个字典,将文档转换为LSA可以处理的格式
self.dictionary=gensim.corpora.Dictionary(self.iter_docs())
def __len__(self):
return len(self.filmDocs)
def __iter__(self):
for tokens in self.iter_docs():
#doc2bow根据本词典构构造的向量,是把文档 doc变成一个稀疏向量,[(0, 1), (1, 1)],表明id为0,1的词汇出现了1次,至于其他词汇,没有出现。
yield self.dictionary.doc2bow(tokens)
def iter_docs(self):
for filmDoc in self.filmDocs:
yield( word for word in jieba.cut(filmDoc) if word not in self.stopWords)
'''
利用潜在语义分析计算查询与文档的相关度
首先将文档语料映射成三个矩阵U*S*V,这三个矩阵分别是词与主题矩阵,代表词与主题的相关度;主题的对角矩阵;主题与文档矩阵,
表示主题在文档中的分布
然后将查询词也映射到空间中qt=q*U*S中,再qt*V得到查询与每个文档的相关度,返回前top-k个文档
这个方法不同于传统的基于词存在的相关计算,它可以计算出词的相近词,就是説可以计算词不在文档中的相关度
'''
class LSA():
def __init__(self,stopWordsPath, fileTitle, fileIntro):
# 将文档转为gensim中的LSA可以读取和处理的格式
self.corpus = LSACorpus(stopWordsPath, fileTitle, fileIntro)
def lsaSearch(self,query):
dict_copus = self.corpus.dictionary
# 指定10个主题
topics = 10
lsi = models.LsiModel(self.corpus, num_topics=topics, id2word=dict_copus)
# 获取U、V、S矩阵,查询词转换到潜在空间需要这些分解的矩阵
U = lsi.projection.u
S = np.eye(topics) * lsi.projection.s
V = gensim.matutils.corpus2dense(lsi[self.corpus], len(lsi.projection.s)).T / lsi.projection.s
# 单词的索引字典,将查询词转换为它在dict_copus相应的索引词
dict_words = {}
for i in range(len(dict_copus)):
dict_words[dict_copus[i]] = i
#将查询query转换为查询词向量
q=np.zeros(len(dict_words.keys()))
for word in jieba.cut(query):
q[dict_words[word]]=1
#将query的q权重向量(它经分词后的单词在dict_words中的相应索词)
# 映射到qt中 qt=q*U*S为查询的词矩阵(就是查询中的词与主题矩阵,与主题的相关度),大小与字典库相同
qt=np.dot(np.dot(q,U),S)
#与电影中的每篇简介的相关度
similarity=np.zeros(len(self.corpus.filmDocs))
for index in range(len(V)):#这里的V应该行是文档,列是主题(代表该文档在各个主题上的相关度),便与查询词矩阵点乘得到查询与文档的相关度
similarity[index]=np.dot(qt,V[index])
index_sim=np.argsort(similarity)[::-1]#排序
for index in list(index_sim)[:5]:#最相关的前5个文档
print('sim: %f,title: %s' % (similarity[index], self.corpus.filmTitles[index]))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import profiles.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PasswordReset',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('temp_key', models.CharField(max_length=100, verbose_name='temp_key')),
('timestamp', models.DateTimeField(default=profiles.models.now, verbose_name='timestamp')),
('reset', models.BooleanField(default=False, verbose_name='reset yet?')),
('user', models.ForeignKey(verbose_name='user', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'password reset',
'verbose_name_plural': 'password resets',
},
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('profile_email', models.EmailField(unique=True, max_length=254, blank=True)),
('address', models.CharField(max_length=100, null=True, verbose_name='Indirizzo', blank=True)),
('phone', models.CharField(max_length=15, null=True, verbose_name='Tel', blank=True)),
('picture', models.ImageField(upload_to=b'profile_images', blank=True)),
('public_email', models.BooleanField(default=False)),
('public_phone', models.BooleanField(default=False)),
('public_address', models.BooleanField(default=False)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
#CSCI 1133 Homework 3
#Sid Lin
#Problem 3A
def doubleCheck(lst, order):
count = 0
for i in range(len(lst)):
#this loop counts how many times a term occurs in the list
#so it checks for duplicates
if(order == ""): #disregards null orders
count += 0
elif(order == lst[i]):
count += 1
if(count == 1):
return True
#true means the term will be appended
else:
return False
#the order will NOT be appended
def checkPrice(order):
#special prices function
priceTag = 0
if("burger" in order):
priceTag = 3
elif("soda" in order):
priceTag = 2
else:
priceTag = 5
return priceTag
def main():
print("Welcome to the Python Cafe.")
shopls = [] #first list
finalList = [] #final list used to eliminate dupes and check price
finalCost = 0
flag = False #keeps loop going
while (not flag): #while true
food = str(input("What would you like:"))
shopls.append(food)
if(food == ""): #break statement
flag = True
temp = doubleCheck(shopls, food) #checks for duplicate order
if(temp == True):
finalList.append(food) #adds order to price check list
cost = checkPrice(food) #returns the cost of the item
finalCost += cost #adds to total
#loop has ended
print("You have ordered:")
for i in finalList:
print(i)
finalCost = format(finalCost, ".2f")
print("This order will cost you: $", finalCost)
print("Thank you for your patronage!")
if __name__ == "__main__":
main()
|
from typing import List, Dict, Tuple
import re
from .base_factory import BaseFactory
PROG = re.compile(r"([0-9A-Z\s\-]+)\:([0-9A-Za-z]+)")
PROG_DASH = re.compile(r"([0-9A-Z]+)\-([0-9A-Z]+)")
class UnicodeMapping(BaseFactory):
def __init__(
self,
unicode_mapping_path: str,
other: hex = "0x20",
name: str = 'unicode_normalizer',
denormalizable: bool = True,
) -> None:
self.denormalizable = denormalizable
self.mapping_table = self._gen_unicode_mapping_table(
unicode_mapping_path=unicode_mapping_path,
)
if len(other) > 0:
self.u_other = other
self.other = chr(int(other, 16))
else:
self.u_other = None
self.other = other
self.denormalizable = False
super().__init__(
name=name,
denormalizable=self.denormalizable,
)
@staticmethod
def _gen_unicode_mapping_table(
unicode_mapping_path: str,
) -> Dict[hex, str]:
with open(unicode_mapping_path, "r") as filep:
mapping_list = filep.read().split("\n")
mapping_table = {}
for map_ in mapping_list:
if len(map_) == 0:
continue
input_, output = PROG.findall(map_)[0]
range_or_not = PROG_DASH.findall(input_)
if len(range_or_not) > 0:
for uninum in range(
int(range_or_not[0][0], 16),
int(range_or_not[0][1], 16) + 1,
):
if output == "one2one":
output_token = chr(uninum)
else:
output_token = chr(int(output, 16))
mapping_table[hex(uninum)] = output_token
else:
for uninum in input_.split(" "):
mapping_table[hex(int(uninum, 16))] = chr(int(output, 16))
return mapping_table
@staticmethod
def _check_utf8_encoding(sentence: str):
try:
output_sentence = sentence.encode('utf-8').decode('utf-8')
except UnicodeEncodeError as e:
print("sentence: {}, error: {}".format(sentence, e))
return False
if output_sentence != sentence:
return False
return True
def normalize(
self,
sentence: str,
) -> Tuple[str, Dict[str, List[str]]]:
if not self._check_utf8_encoding(sentence):
raise ValueError(
"sentence: {} can not be encoded by UTF-8".format(sentence),
)
output_sentence = []
meta = {}
for char in sentence:
uchar = hex(ord(char))
if uchar in self.mapping_table:
output_char = self.mapping_table[uchar]
else:
output_char = self.other
if output_char not in meta:
meta[output_char] = [char]
else:
meta[output_char].extend(char)
output_sentence.append(output_char)
return "".join(output_sentence), meta
def denormalize(
self,
sentence: str,
meta: Dict[str, List[str]],
) -> str:
if not self.denormalizable:
return sentence
for org_o, org_i in meta.items():
splited_sent = sentence.split(org_o)
output_sentence = []
for i, token in enumerate(splited_sent):
output_sentence.append(token)
if i != len(org_i):
output_sentence.append(org_i[i])
sentence = "".join(output_sentence)
return sentence
|
def checkrow(matrix,max,possiblemaze):
for i in range(max):
for j in range(max):
if matrix[i][j]!=0:
possiblemaze[i][j].clear()
possiblemaze[i][j].add(matrix[i][j])
for x in range(max):
if x!=j:
possiblemaze[i][x].discard(matrix[i][j])
return possiblemaze
|
# File: pos_tagging.py
# Template file for Informatics 2A Assignment 2:
# 'A Natural Language Query System in Python/NLTK'
# John Longley, November 2012
# Revised November 2013 and November 2014 with help from Nikolay Bogoychev
# Revised November 2015 by Toms Bergmanis
# PART B: POS tagging
# The tagset we shall use is:
# P A Ns Np Is Ip Ts Tp BEs BEp DOs DOp AR AND WHO WHICH ?
# Tags for words playing a special role in the grammar:
from statements import *
function_words_tags = [('a','AR'), ('an','AR'), ('and','AND'),
('is','BEs'), ('are','BEp'), ('does','DOs'), ('do','DOp'),
('who','WHO'), ('which','WHICH'), ('Who','WHO'), ('Which','WHICH'), ('?','?')]
# upper or lowercase tolerated at start of question.
function_words = [p[0] for p in function_words_tags]
def unchanging_plurals():
dic = {}
with open("sentences.txt", "r") as f:
for line in f:
#print (line)
groups = line[:-1].split(' ')
#print (groups)
for group in groups:
w_c = group.split('|')
#print (w_c)
if w_c[1] == 'NN' or w_c[1] == 'NNS':
if w_c[0] not in dic:
dic.update({w_c[0]: [w_c[1]]})
else:
if w_c[1] not in dic[w_c[0]]:
dic[w_c[0]].append(w_c[1])
#print (dic)
unchanged_plurals = []
for word in dic.keys():
if len(dic[word]) == 2:
unchanged_plurals.append(word)
#print (unchanged_plurals)
return unchanged_plurals
# add code here
unchanging_plurals_list = unchanging_plurals()
#unchanging_plurals_list
def noun_stem(s):
"""extracts the stem from a plural noun, or returns empty string"""
s_length = len(s)
if s in unchanging_plurals_list:
return s
if s[-3:] == "men":
return s[:-3] + "man"
if re.match("([a-z]|[A-Z])*([^iosxz])es", s) is not None:
if re.match("([a-z]|[A-Z])*(sh|ch)es", s) is None:
return s[:-1]
if re.match("([a-z]|[A-Z])*([^s]se|[^z]ze)s", s) is not None:
return s[:-1]
if re.match("([a-z]|[A-Z])*(o|x|ch|sh|ss|zz)es", s) is not None:
return s[:-2]
if re.match("[^AEIOUaeiou]ies", s) is not None:
return s[:-1]
if s_length>= 5 and re.match("([a-z]|[A-Z])*[^aeiou]ies", s) is not None:
return s[:-3] + 'y'
if re.match("([a-z]|[A-Z])*(a|e|i|o|u)ys", s) is not None:
return s[:-1]
if re.match("([a-z]|[A-Z])*([^sxyzaeiou])s", s) is not None:
if re.match("([a-z]|[A-Z])*(sh|ch)s", s) is None:
return s[:-1]
return ""
# --THIS IS THE OLD CODE---
# s_length = len(s)
# if re.match(r"([a-z]|[A-Z])*([^sxyzaeiou])s", s) != None:
# if re.match(r"([a-z]|[A-Z])*[^sc][^h]s", s) != None:
# return s[:-1]
#
#
# if re.match(r"([a-z]|[A-Z])*(a|e|i|o|u)ys", s) != None:
# return s[:-1]
#
#
# if s_length >= 5 and re.match(r"([a-z]|[A-Z])*[^aeiou]ies", s) != None:
# return s[:-3] + 'y'
#
#
# if re.match(r"[^AEIOUaeiou]ies", s) != None:
# return s[:-1]
#
#
# if re.match(r"([a-z]|[A-Z])*(o|x|ch|sh|ss|zz)es", s) != None:
# return s[:-2]
#
#
# if re.match(r"([a-z]|[A-Z])*([^s]se|[^z]ze)s", s) != None:
# return s[:-1]
# return ""
# add code here
def tag_word(lx, wd):
"""returns a list of all possible tags for wd relative to lx"""
word_tags = []
pos_tags = [
'P',
'N',
'A',
'I',
'T',
]
#for tags in lx.getAll():
if wd in lx.getAll('P'):
word_tags.append('P')
if wd in lx.getAll('A'):
word_tags.append('A')
noun = noun_stem(wd)
if wd in lx.getAll('N') or noun in lx.getAll('N'):
if noun == "":
word_tags.append('Ns')
elif wd == noun:
word_tags.append('Ns')
word_tags.append('Np')
else:
word_tags.append('Np')
verb = verb_stem(wd)
if wd in lx.getAll('I') or verb in lx.getAll('I'):
if verb == "":
word_tags.append('Ip')
else:
word_tags.append('Is')
if wd in lx.getAll('T') or verb in lx.getAll('T'):
if verb == "":
word_tags.append('Tp')
else:
word_tags.append('Ts')
for (word, tag) in function_words_tags:
if word == wd:
word_tags.append(tag)
break
return word_tags
#if noun_stem(wd) != "":
# tags.append('NN')
#for tag in pos_tags:
# if wd in lx.getAll(tag):
# word_tags.append(tag)
# add code here
def tag_words (lx, wds):
"""returns a list of all possible taggings for a list of words"""
if (wds == []):
return [[]]
else:
tag_first = tag_word (lx, wds[0])
tag_rest = tag_words (lx, wds[1:])
return [[fst] + rst for fst in tag_first for rst in tag_rest]
# End of PART B.
"""print noun_stem("fly")
print noun_stem("flies")
print noun_stem("ducks")
print noun_stem("dogs")
print noun_stem("bathes")
print noun_stem("analyses")
print noun_stem("goes")
print noun_stem("dies")""" |
import pysickle.inout as io
import os
import sys
def choose_file():
file_list = os.listdir(os.getcwd())
print('Which file would you like to analyse?')
# print files in current directory
i = 1
for x in file_list:
print('%s: %s' % (i, x))
i += 1
# ask for input file number until no error
_cont = False
while _cont is False:
try:
file_number = int(input('Input file number')) - 1
file_name = file_list[file_number]
_cont = True
except Exception as e:
print(e)
print('Could not choose file. Try again.')
print('Analysing ' + file_name)
file_path = os.path.join(os.getcwd(), file_name)
return file_path
file_path = sys.argv[1:]
if file_path is None:
file_path = (choose_file(), )
io.parse_files(file_path)
else:
io.parse_files()
# sys.stdout = sys.__stdout__
input('Press enter to continue...')
|
def addNumbers(x, y):
return x + y
def subtractNumbers(x, y):
return x - y
def multiplyNumbers(x, y):
return x * y |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 16 16:19:51 2018
@author: meicanhua
"""
import jieba
import jieba.posseg as pseg
import multiprocessing
import os
import time
import sys
# 多进程跑
# jieba.enable_parallel(multiprocessing.cpu_count())
stopwords_nature = ["m","mq","mg","b","begin","bg","bl","c","cc","e","end","o","p","pba","pbei","q","qt","qv","u",
"ude1","ude2","ude3","udeng","udh","uguo","ule","ulian","uls","usuo","uyy","uzhe","uzhi","y","z",
"r","rr","ry","rys","ryt","ryv","rz","rzs","rzt","rzv","w","nx"]
with open("stopwords.txt", "r") as f:
stopwords = [x.strip() for x in f.readlines()]
def load_dict(dict_path):
for dict in os.listdir(dict_path):
jieba.load_userdict(dict_path + "/" + dict)
def engine(infile, outfile):
line_number = 1
with open(infile, 'r', errors='ignore') as f:
with open(outfile, 'a') as g:
for line in f:
print("正在对第{0}行分词".format(str(line_number)))
word_nature = pseg.cut(line.strip())
for word, nature in word_nature:
if word not in stopwords and nature not in stopwords_nature:
g.write(word + " ")
g.write("\n")
line_number += 1
if __name__ == "__main__":
#infile = input(">Enter infile path:")
infile = sys.argv[1]
outfile = "cut_" + infile
load_dict("custom_dict")
engine(infile, outfile) |
"""
this is the base parser for html and json and other format
"""
import json
from bs4 import BeautifulSoup as bs
from requests import Session
class BaseParser(object):
"""
base parser for parser requests content
"""
def __init__(self, *args, **kwargs):
"""
supply the way to query html
:param args the first is content
:param kwargs the key is content
"""
if args:
self.content = args[0]
elif kwargs:
self.content = kwargs.get('content', '')
else:
self.content = ''
@property
def to_html(self):
"""
change content to beautifulsoup html
"""
result = bs(self.content, 'lxml')
return result
@property
def to_dict(self):
"""
change content to dict
"""
try:
result = json.loads(self.content)
except Exception as tmp:
print(tmp)
result = {}
return result
|
#./bin/spark-submit --packages org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.0 spark_test.py
from pyspark import SparkContext
from pyspark.sql.session import SparkSession
from pyspark.sql.functions import col
spark = SparkSession.builder.master("local").appName("Test PY App").getOrCreate()
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import StringType
from pyspark.sql.functions import from_json
from pyspark.sql.types import StructType
import json
df1 = spark.readStream.format("kafka").option("kafka.bootstrap.servers", "172.16.3.129:9092").option("subscribePattern", "connection").load()
udf = UserDefinedFunction(lambda x: x.decode("utf-8"), StringType())
df2 = df1.withColumn("value", udf(df1.value))
schema_file = open("kafka_conn_schema.json")
new_schema = StructType.fromJson(json.load(schema_file))
#Remove the top level object "from_json"
schemadf = df2.select(from_json(col("value"), new_schema).alias("tmp")).select("tmp.*")
schemadf.printSchema()
#query = schemadf.writeStream.format("console").start()
#query.awaitTermination()
|
from django.urls import path
from . import views
urlpatterns=[
path('git/',views.deptGit,name='git'),
path('git/enseingnement',views.deptGitEns,name='giten'),
path('git/matiere',views.deptGitMat,name='gitmt'),
path('git/DIC1',views.deptETDIC1,name='gitdic1'),
path('git/DIC2',views.deptETDIC2,name='gitdic2'),
path('git/DIC3',views.deptETDIC3,name='gitdic3'),
path('civil/',views.deptCivil,name='civil'),
path('civil/enseingnement',views.deptCivilEns,name='civilen'),
path('civil/matiere',views.deptCivilMat,name='civilmt'),
path('civil/DIC1',views.deptCETDIC1,name='civildic1'),
path('civil/DIC2',views.deptCETDIC2,name='civildic2'),
path('civil/DIC3',views.deptCETDIC3,name='civildic3'),
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 9 11:24:29 2018
@author: Marcin
"""
balance = 3329
annualInterestRate = 0.2
minimumMonthlyPayment = 0
previousBalance = balance
monthlyInterestRate = annualInterestRate / 12
while previousBalance >= 0:
previousBalance = balance
minimumMonthlyPayment +=10
for i in range(12):
monthlyUnpaidBalance = previousBalance - minimumMonthlyPayment
unpaidBalanceEachMonth = monthlyUnpaidBalance + (monthlyInterestRate*monthlyUnpaidBalance)
previousBalance = unpaidBalanceEachMonth
print('Lowest Payment: '+ str(minimumMonthlyPayment))
|
class TreeNode:
def __init__(self, x,left=None,right=None):
self.val = x
self.left = left
self.right = right
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
return self.isValidBSTHelper(root,float('-inf'),float('inf'))
def isValidBSTHelper(self, root: TreeNode,lower,higher):
if root==None:
return True
val = root.val
if val<=lower or val>=higher:
return False
l1 = self.isValidBSTHelper(root.left,lower,val)
l2 = self.isValidBSTHelper(root.right,val,higher)
return l1 and l2
s = Solution()
a5 = TreeNode(6)
a4 = TreeNode(3)
a3 = TreeNode(4,a4,a5)
a2 = TreeNode(1)
a1 = TreeNode(5,a2,a3)
# a3 = TreeNode(3)
# a2 = TreeNode(1)
# a1 = TreeNode(2,a2,a3)
# a5 = TreeNode(20)
# a4 = TreeNode(6)
# a3 = TreeNode(15,a4,a5)
# a2 = TreeNode(5)
# a1 = TreeNode(10,a2,a3)
f = s.isValidBST(a1)
print(f)
#
|
import sys
# import scipy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import distance
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
random_state = 123
def pca_analysis(X, n_components=2, random_state=random_state):
X_std = StandardScaler().fit_transform(X)
pca_model = PCA(n_components=n_components, random_state=random_state)
coords = pca_model.fit_transform(X_std)
explained_variance = pca_model.explained_variance_ratio_
return pca_model, coords, explained_variance
def tsne_analysis(X, n_components=2, random_state=random_state):
tsne_model = TSNE(n_components=n_components, random_state=random_state)
coords = tsne_model.fit_transform(X)
return coords
def dim_reduc_plot(coords, color_var=None):
plt.subplots(figsize=(10,10))
plt.scatter(
coords[:,0], coords[:,1],
c=color_var,
marker='.'
)
plt.show()
def k_means(X, n_clusters, random_state=random_state):
kmeans = KMeans(n_clusters=n_clusters, random_state=random_state)
kmeans.fit(X)
labels = kmeans.predict(X)
C = kmeans.cluster_centers_
inertia = kmeans.inertia_
return kmeans, labels, C, inertia
def plot_kmeans_inertia(inertia_dict):
keys = sorted(inertia_dict.keys())
values = [inertia_dict[k] for k in keys]
plt.plot(keys, values)
plt.show() |
# -*- coding: utf-8 -*-
#############
#
# Copyright - Nirlendu Saha
#
# author - nirlendu@gmail.com
#
#############
import re, sys, inspect
from app_core import core_interface as core
from libs.logger import app_logger as log
def get_url(text):
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)
try:
return urls[0]
except:
return None
def new_expression(
expression_owner_id,
expression_text,
expression_imagefile,
channels,
):
"""New Expression Primary views
:param expression_owner_id:
:param expression_text:
:param expression_imagefile:
:param channels:
:return:
"""
log.info('New Expression expression views')
url = get_url(expression_text)
if url:
expression_content = expression_text.replace(url, '')
url_id = core.find_url_id(url)
else:
expression_content = expression_text
url_id = None
core.new_expression(
expression_owner_id=expression_owner_id,
expression_content=expression_content,
expression_content_url=url_id,
expression_imagefile=expression_imagefile,
channels=channels,
)
return
|
import json
import urllib2, httplib
import time
import smtplib
from datetime import date, timedelta
import shutil
import math
import sys
import glob
import os
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
import config as cfg
import template
import logging
class RedditOpener:
def __init__(self):
self.user_agent = cfg.user_agent
self.opener = urllib2.build_opener()
self.opener.add_handler(urllib2.HTTPCookieProcessor())
self.opener.addheaders = [('User-agent', self.user_agent)]
def open(self, url):
return self.opener.open(url)
class RedditPost:
ref_score = {}
def __init__(self, ref_subreddit = None, pp_alg = cfg.default_user_cfg['pp_alg'], **data ):
self.subreddit = data['subreddit']
self.id = data['id']
self.title = data['title']
self.num_comments = data['num_comments']
self.score = data['score']
self.permalink = 'http://reddit.com' + data['permalink']
self.name = data['name']
self.url = data['url']
self.created_utc = data['created_utc']
self.domain = data['domain']
self.is_self = data['is_self']
self.pp_alg = pp_alg
self.time_of_download = time.time()
self._pp = None
self._ha = None
self._tp = None
self.hours_ago_int = int(math.ceil((self.time_of_download - self.created_utc) / 3600))
if ref_subreddit == None:
self.ref_subreddit = self.subreddit
else:
self.ref_subreddit = ref_subreddit
def __str__(self):
return u'title: {} - score: {} - posted: {} hours ago - post_power: {}'.format(self.title,
self.score + self.num_comments, (time.time() - self.created_utc) / 3600, self.post_power()).encode("utf-8")
@classmethod
def load_posts(cls, posts_json, ref_subreddit = None, pp_alg = cfg.default_user_cfg['pp_alg']):
return [RedditPost(pp_alg = pp_alg, ref_subreddit = ref_subreddit, **post['data']) for post in posts_json ]
@classmethod
def calculate_ref_score(cls, reddit_posts, subreddit = '', pp_alg = cfg.default_user_cfg['pp_alg']):
if len(reddit_posts) > 0:
if subreddit == '':
subreddit = reddit_posts[0].subreddit
pp_alg = reddit_posts[0].pp_alg
cls.ref_score[subreddit] = {}
if pp_alg == 'default':
cls.ref_score[subreddit][pp_alg] = cls._calculate_ref_score_default(reddit_posts, subreddit = subreddit)
else:
raise NotImplementedError('Unknown post_power alghorithm.')
logging.debug('ref_score for subreddit '+ subreddit + ': ' + str(cls.ref_score[subreddit][pp_alg]) + ' pp_alg: ' + pp_alg)
return cls.ref_score[subreddit][pp_alg]
@classmethod
def _calculate_ref_score_default(cls, reddit_posts, subreddit = ''):
ref_score = 0
for item in reddit_posts[:3]:
ref_score += item.score + item.num_comments
ref_score /= 7.0
return ref_score
def post_power(self):
if self.ref_subreddit not in self.ref_score:
raise RuntimeError('Invalid state: call calculate_ref_score() BEFORE post_power()')
if self._pp != None: return self._pp
if self.pp_alg == 'default':
self._pp = self._post_power_default()
else:
raise NotImplementedError('Unknown post_power alghorithm.')
return self._pp
def _post_power_default(self):
ago = (time.time() - self.created_utc) / 3600
postscore = self.score + self.num_comments
pp = (25 / (ago+1) * postscore / (self.ref_score[self.ref_subreddit][self.pp_alg]+0.01))
return pp
def hours_ago(self):
if self._ha != None: return self._ha
ago = self.hours_ago_int# int(math.ceil((self.time_of_download - self.created_utc) / 3600))
string = "%r" % (ago)
self._ha = string
return string
def type(self):
if self._tp != None: return self._tp
type=''
for item in cfg.image_types:
if self.url.find(item) != -1:
type = 'image'
for item in cfg.video_types:
if self.url.find(item) != -1:
type = 'video'
self._tp = type
return type
class RedditLoader:
last_req_time = 0
retries = 0
opener = RedditOpener()
reddit_cache = {}
@classmethod
def get_url(cls, url, delay = 0):
time_elapsed_since_last_req = time.time() - cls.last_req_time
time_required = delay
if (time_elapsed_since_last_req < time_required):
logging.debug('Sleeping for {}'.format(time_required - time_elapsed_since_last_req))
time.sleep(time_required - time_elapsed_since_last_req)
logging.info('Requesting url {}'.format(url))
cls.last_req_time = time.time()
try:
response = cls.opener.open(url)
logging.info('Site responded with HTTP code: {}'.format(response.code))
json_message = response.read()
except urllib2.HTTPError as error:
logging.error('Site responded with unhandled HTTP error code: {}'.format(error.code))
json_dct = {}
except urllib2.URLError as error:
logging.error('Request failed to reach a server. Reason: {}'.format(error.reason))
json_dct = {}
except httplib.IncompleteRead as error:
logging.error('Request failed, httplib.IncompleteRead encounterd. Reason: {}'.format(error))
json_dct = {}
except:
logging.error('Unexpected error from urllib2:', sys.exc_info()[0])
raise
else:
logging.debug('Message recieved: {}'.format(json_message))
json_dct = json.loads(json_message)
return json_dct
@classmethod
def load_json_from_url(cls, url, delay = cfg.default_request_delay, cache_refresh_interval = cfg.default_cache_refresh_interval):
if cls.is_cached(url, cache_refresh_interval):
logging.info('Url ' + url + ' arleady in cache, NOT REQUESTING')
return cls.get_cache(url)
json_dct = cls.get_url(url, delay)
if 'data' in json_dct and 'children' in json_dct['data']:
cls.retries = 0
cls.set_cache(url, json_dct['data']['children'])
return json_dct['data']['children']
elif cls.retries >= cfg.max_retries:
logging.error('max_retries exceeded... exiting')
sys.exit(1)
else:
logging.info('Response cointained no posts: {}'.format(json_dct))
cls.retries += 1
logging.warning('Retrying last request.... retry count: {}'.format(cls.retries))
return cls.load_json_from_url(url, delay = delay * cfg.retry_delay_multiplier, cache_refresh_interval = cache_refresh_interval)
@classmethod
def is_cached(cls, url, refresh_time = cfg.default_cache_refresh_interval):
return url in cls.reddit_cache and time.time() - cls.reddit_cache[url]['last_refresh'] < refresh_time
@classmethod
def get_cache(cls, url):
return cls.reddit_cache[url]['posts']
@classmethod
def set_cache(cls, url, data, entry_limit = cfg.cache_entry_limit):
if len(cls.reddit_cache) + 1 >= entry_limit:
logging.info("Cache entry limit reached, making free space...")
cls._del_cache_entries(no_oldest = len(cls.reddit_cache) + 1 - entry_limit + cfg.cache_entries_to_clear)
if url not in cls.reddit_cache:
cls.reddit_cache[url] = {}
cls.reddit_cache[url]['last_refresh'] = time.time()
cls.reddit_cache[url]['posts'] = data
@classmethod
def _del_cache_entries(cls, no_oldest = cfg.cache_entries_to_clear):
logging.info("Deleting {} oldest entr(y/ies) from cache".format(no_oldest))
for url in sorted(cls.reddit_cache.keys(), key = lambda k: cls.reddit_cache[k]['last_refresh']):
if no_oldest <= 0: break
logging.debug('Deleting cache entry for url: {}'.format(url))
del cls.reddit_cache[url]
no_oldest -= 1
@classmethod
def build_url(cls, subreddit, site = '', t = '', after = ''):
if subreddit == '':
return 'http://www.reddit.com/'
if site == '':
url = 'http://www.reddit.com/r/' + subreddit + '/.json'
else:
url = 'http://www.reddit.com/r/' + subreddit + '/' + site + '/.json'
params = []
if t != '':
params.append('t=' + t)
if after != '':
params.append('after=' + after)
if len(params) == 0:
return url
else:
url += '?'
for i, param in enumerate(params):
url += param
if i + 1 == len(params): break
url += '&'
return url
@classmethod
def load_subreddit(cls, subreddit, suffix = '', t = '', post_no = cfg.posts_in_json_page, pp_alg = cfg.default_user_cfg['pp_alg']):
posts = cls.load_json_from_url(cls.build_url(subreddit, site = suffix, t = t))
loaded = len(posts)
if loaded < cfg.posts_in_json_page :
return RedditPost.load_posts(posts, pp_alg = pp_alg, ref_subreddit = subreddit)
else:
while len(posts) >= cfg.posts_in_json_page and len(posts) < post_no and loaded > 0:
last_post_id = posts[-1]['data']['name']
next_site = cls.load_json_from_url(cls.build_url(subreddit, site = suffix, t = t, after = last_post_id))
loaded = len(next_site)
posts += next_site
return RedditPost.load_posts(posts[:post_no], pp_alg = pp_alg, ref_subreddit = subreddit)
@classmethod
def aggregate_subreddits(cls, reddit_list = [], user = None, ref_cat = cfg.default_user_cfg['ref_cat'],
ref_t = cfg.default_user_cfg['ref_t'],
posts_per_sub = cfg.default_user_cfg['posts_per_sub'] ,
time_frame = cfg.default_user_cfg['time_frame'],
pp_treshold = cfg.default_user_cfg['pp_treshold'],
sort_key = None, reverse_sort_order = True,
pp_alg = cfg.default_user_cfg['pp_alg'] ,
domain_filter = cfg.default_user_cfg['domain_filter'] ,
reverse_domain_filter = cfg.default_user_cfg['reverse_domain_filter']):
if user != None:
reddit_list = user.subreddits
ref_cat = user.ref_cat
ref_t = user.ref_t
posts_per_sub = user.posts_per_sub
time_frame = user.time_frame
pp_treshold = user.pp_treshold
sort_key = user.sort_key
reverse_sort_order = user.reverse_sort_order
pp_alg = user.pp_alg
domain_filter = user.domain_filter
reverse_domain_filter = user.reverse_domain_filter
output_list = []
for entry in reddit_list:
post_list = []
if not isinstance(entry, list):
grouplist = [entry]
else:
grouplist = entry
for subreddit in grouplist:
top_posts = RedditLoader.load_subreddit(subreddit, ref_cat, ref_t)
RedditPost.calculate_ref_score(top_posts, subreddit = subreddit)
posts = RedditLoader.load_subreddit(subreddit, post_no = posts_per_sub)
for item in posts:
filtered = False
if domain_filter != '':
for expr in domain_filter.split(cfg.domain_filter_spliter):
if item.domain.find(expr) != -1:
filtered = True
break
if not filtered and reverse_domain_filter != '':
for expr in reverse_domain_filter.split(cfg.domain_filter_spliter):
if item.domain.find(expr) != -1 : break
else:
filtered = True
if not filtered and (time.time()-item.created_utc) < time_frame and item.post_power() >= pp_treshold:
post_list.append(item)
if post_list:
if sort_key != None: post_list.sort(key = sort_key, reverse = reverse_sort_order)
output_list.append({';'.join(grouplist) : post_list})
return output_list
class UserCfg:
_default_cfg = cfg.default_user_cfg
def __init__(self, **usercfg):
for key in self._default_cfg.iterkeys():
if key not in usercfg:
usercfg[key] = self._default_cfg[key]
self.username = usercfg['username']
self.usr_mail = usercfg['usr_mail']
self.gmail_login_user = usercfg['gmail_login_user']
self.gmail_login_pwd = usercfg['gmail_login_pwd']
self.subject_tmpl = usercfg['subject_tmpl']
self.ref_cat = usercfg['ref_cat']
self.ref_t = usercfg['ref_t']
self.posts_per_sub = usercfg['posts_per_sub']
self.time_frame = usercfg['time_frame']
self.pp_treshold = usercfg['pp_treshold']
self.pp_alg = usercfg['pp_alg']
self.domain_filter = usercfg['domain_filter']
self.reverse_domain_filter = usercfg['reverse_domain_filter']
self.subreddits = usercfg['subreddits']
self.posts_sort_by = usercfg['posts_sort_by']
self.posts_sort_order = usercfg['posts_sort_order']
if self.posts_sort_by == 'num_comments':
self.sort_key = lambda post: post.num_comments
elif self.posts_sort_by == 'score':
self.sort_key = lambda post: post.score
elif self.posts_sort_by == 'post_power':
self.sort_key = lambda post: post.post_power()
elif self.posts_sort_by == 'hours_ago':
self.sort_key = lambda post: post.hours_ago_int
else:
self.sort_key = None
if usercfg['posts_sort_order'] == 'asc':
self.reverse_sort_order = False
else:
self.reverse_sort_order = True
def dump_posts_to_json(posts):
output_list = []
for subreddit_dct in posts:
post_list = []
name = ''
for subreddit, postlist in subreddit_dct.iteritems():
name += subreddit
for item in postlist:
post_list.append([item.title, item.url, item.subreddit, item.num_comments, item.score, item.permalink,
item.post_power(), item.hours_ago()])
output_list.append({subreddit : post_list})
return json.dumps(output_list, indent = 4)
def mail(to, subject, text, gmail_user, gmail_pwd):
msg = MIMEMultipart()
msg['From'] = gmail_user
msg['To'] = to
msg['Subject'] = subject
msg.attach(MIMEText(text, 'html'))
mailServer = smtplib.SMTP(cfg.gmail_smtp_server, cfg.gmail_smtp_port)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmail_user, gmail_pwd)
mailServer.sendmail(gmail_user, to, msg.as_string())
mailServer.quit()
logging.info('Email sent to: {}'.format(to))
def load_configs():
logging.info('Started loading user configs')
configs =[]
for cfg_file in glob.iglob(cfg.userconfig_file_pattern):
with open(cfg_file) as usrcfg:
try:
configs.append(UserCfg(**json.load(usrcfg)))
logging.info('Config from file: {} successfully loaded'.format(cfg_file))
except ValueError:
logging.error('Error parsing config file: ' + cfg_file + ', file ommited.')
logging.info('Finished loading user configs')
return configs
def build_html(value, html, user):
output=''
tabs=0
for subreddit in value:
tabs+=1
subnames=[]
for subreddit in value:
for name, posts in subreddit.iteritems():
subnames.append(name)
output+= html.head(tabs, subnames)
num=0
for subreddit in value:
for name, posts in subreddit.iteritems():
tit=' | '.join(name.title().split(';'))
if tit.__len__() > 150:
tit2=tit
tit3=tit
tit=tit[0:150]
tit+=" <img src=img/questionmark.gif alt='{}' title='{}'>".format(tit2, tit3)
output+= html.tablestart(tit, num)
for item in posts:
output+= html.item(item.url.encode('ascii', 'replace'), item.title.encode('ascii', 'replace'),
item.permalink.encode('ascii', 'replace'), item.num_comments, item.score,
'{0:.2f}'.format(item.post_power()), item.hours_ago(), item.subreddit, item.is_self, item.type())
output+= html.tableend()
num+=1
return output
def main():
userlist = load_configs()
html = template.Template
for user in userlist:
logging.info('###################### Started processing user: {}'.format(user.username))
value = RedditLoader.aggregate_subreddits(user = user)
output = build_html(value, html, user)
if not os.path.exists('public/archive/'):
os.makedirs('public/archive/')
if os.path.exists('public/' + user.username + '.html')==True:
filedate = time.strftime("%m-%d-%Y",time.localtime(os.path.getmtime('public/' + user.username +'.html')))
shutil.move('public/' + user.username +'.html', 'public/archive/' + user.username + '-' + filedate + '.html')
############### CIEPIEL'S temporary testing code ########################################################
if os.path.exists('public/hn.html')==True:
filedate = time.strftime("%m-%d-%Y",time.localtime(os.path.getmtime('public/hn.html')))
shutil.copy('public/hn.html', 'public/archive/HackerNews-' + filedate + '.html')
################################################################################################################
f = open('public/' + user.username + '.html', 'w+')
f.write(output)
f.close()
#mail(user.usr_mail, user.subject_tmpl.format(date = datetime.datetime.now().strftime("%d-%m-%Y")), output, user.gmail_login_user, user.gmail_login_pwd)
if __name__ == "__main__":
cfg.logging_config['level'] = getattr(logging, cfg.logging_config['level'].upper())
logging.basicConfig(**cfg.logging_config)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter(cfg.logging_config['format'])
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info('########################## Application started')
main()
logging.info('########################## Application finished')
|
from typing import List
class Solution:
def run_fastest_dfs(self, i, j, grid):
if i in [-1, self.xlen] or j in [-1, self.ylen]:
print("越界", i, j)
return
elif grid[i][j] != '1':
return
else:
grid[i][j] = '2'
# shang
self.run_fastest_dfs(i - 1, j, grid)
# xia
self.run_fastest_dfs(i + 1, j, grid)
# zuo
self.run_fastest_dfs(i, j - 1, grid)
# you
self.run_fastest_dfs(i, j + 1, grid)
def numIslands(self, grid: List[List[str]]) -> int:
if not grid or not grid[0]:
return 0
self.ylen = len(grid[0])
self.xlen = len(grid)
res = 0
for i in range(self.xlen):
for j in range(self.ylen):
if grid[i][j] == '1':
self.run_fastest_dfs(i, j, grid)
res += 1
return res
a = [["1", "1", "1", "1", "0"], ["1", "1", "0", "1", "0"], ["1", "1", "0", "0", "0"], ["0", "0", "0", "0", "0"]]
print(Solution().numIslands(a))
|
from getpass import getpass
DATABASE = "database.txt"
ban_status = "No"
restrictions = "No"
def displayAdminMenu():
print("Choose an option: ")
print("1. Change password.")
print("2. Show the list of users.")
print("3. Add a new unique user.")
print("4. Block user.")
print("5. Turn on/off restricts for a password.")
print("6. Exit.")
def displayUserMenu():
print("Choose an option: ")
print("1. Change password.")
print("2. Exit.")
def adminPanel(username):
name = username
displayAdminMenu()
choice = int(input("Your choice: "))
if choice == 1:
changePass(name)
print("\n")
adminPanel(name)
elif choice == 2:
printUsers()
print("\n")
adminPanel(name)
elif choice == 3:
addUniqueUser()
print("\n")
adminPanel(name)
elif choice == 4:
banUser()
print("\n")
adminPanel(name)
elif choice == 5:
name = str(input("Enter user's login: "))
userlist = open(DATABASE).readlines()
for user in userlist:
login = user.split()[0]
if login == name:
fin = open(DATABASE, "rt")
if user.split()[3] == "Yes":
turnOffRestrictions(name)
elif user.split()[3] == "No":
turnOnRestrictions(name)
fin.close()
print("\n")
adminPanel(name)
elif choice == 6:
exit(0)
else:
print("Wrong option.")
print("\n")
displayAdminMenu()
def userPanel(username):
name = username
displayUserMenu()
choice = int(input("Your choice: "))
if choice == 1:
userlist = open(DATABASE).readlines()
for user in userlist:
login = user.split()[0]
if login == name:
fin = open(DATABASE, "rt")
if user.split()[3] == "Yes":
passRestrictions(name)
elif user.split()[3] == "No":
changePass(name)
fin.close()
print("\n")
userPanel(name)
elif choice == 2:
exit(0)
else:
print("Wrong option.")
print("\n")
displayAdminMenu()
def mainMenu():
print("Here you can log in, register or get help.")
print("What would you like to do?")
print("1. Register.")
print("2. Log in.")
print("3. Get help.")
print("4. Exit.")
choice = int(input("Your choice: "))
if choice == 1:
return funcRegister()
if choice == 2:
return funcLogin()
elif choice == 3:
print("Тущенко Денис Михайлович, ФБ-83, варіант 18")
print("18. Неспівпадання з ім'ям користувача, записаним в зворотному порядку.")
mainMenu()
elif choice == 4:
exit(0)
def changePass(username):
name = username
old_pass = str(input("Old password: "))
if is_authorized(name, old_pass):
print("Correct, now enter your new password: ")
new_pass = str(input("New password: "))
new_pass_pass = str(input("Enter again: "))
if new_pass == new_pass_pass:
fin = open(DATABASE, "rt")
data = fin.read()
data = data.replace(name + ' ' + old_pass, name + ' ' + new_pass)
fin.close()
fin = open(DATABASE, "wt")
fin.write(data)
fin.close()
print("Password changed!")
else:
print("Error occured.")
if name == "admin":
adminPanel(name)
else:
userPanel(name)
def passRestrictions(name):
login = name
old_pass = str(input("Old password: "))
if is_authorized(name, old_pass):
print("Correct, now enter your new password: ")
new_pass = str(input("New password: "))
if new_pass == login[::-1]:
print("You can't set this password.\n")
userPanel(login)
else:
new_pass_pass = str(input("Enter again: "))
if new_pass == new_pass_pass:
fin = open(DATABASE, "rt")
data = fin.read()
data = data.replace(name + ' ' + old_pass, name + ' ' + new_pass)
fin.close()
fin = open(DATABASE, "wt")
fin.write(data)
fin.close()
def printUsers():
userlist = open(DATABASE).readlines()[1:]
for user in userlist:
login = user.split()[0]
password = user.split()[1]
ban_status = user.split()[2]
restrictions = user.split()[3]
print("Login: " + login + " " + "Password: " + password + " " + "Is banned? " + ban_status + " " + "Restrictions: " + restrictions)
def addUniqueUser():
const_pass = ""
print("Add new unique user: ")
login = str(input("Enter unique login: "))
if user_exists(login):
print("Name Unavailable. Please Try Again")
else:
f = open(DATABASE,'r')
info = f.read()
f.close()
f = open(DATABASE,'w')
info = info + "\n" + login + " " + const_pass + " " + ban_status + " " + restrictions
f.write(info)
def banUser():
name = str(input("Ban user: "))
userlist = open(DATABASE).readlines()
for user in userlist:
login = user.split()[0]
if login == name:
fin = open(DATABASE, "rt")
data = fin.read()
new_ban_status = "Yes"
data = data.replace(name + ' ' + user.split()[1] + ' ' + ban_status, name + ' ' + user.split()[1] + ' ' + new_ban_status)
fin.close()
fin = open(DATABASE, "wt")
fin.write(data)
fin.close()
def turnOnRestrictions(username):
name = username
userlist = open(DATABASE).readlines()
for user in userlist:
login = user.split()[0]
if login == name:
fin = open(DATABASE, "rt")
data = fin.read()
new_restrictions = "Yes"
data = data.replace(name + ' ' + user.split()[1] + ' ' + user.split()[2] + ' ' + user.split()[3], name + ' ' + user.split()[1] + ' ' + user.split()[2] + ' ' + new_restrictions)
fin.close()
fin = open(DATABASE, "wt")
fin.write(data)
fin.close()
def turnOffRestrictions(username):
name = username
userlist = open(DATABASE).readlines()
for user in userlist:
login = user.split()[0]
if login == name:
fin = open(DATABASE, "rt")
data = fin.read()
new_restrictions = "No"
data = data.replace(name + ' ' + user.split()[1] + ' ' + user.split()[2] + ' ' + user.split()[3], name + ' ' + user.split()[1] + ' ' + user.split()[2] + ' ' + new_restrictions)
fin.close()
fin = open(DATABASE, "wt")
fin.write(data)
fin.close()
def isBanned(name):
userlist = open(DATABASE).readlines()
for user in userlist:
login = user.split()[0]
ban_status = user.split()[2]
if login == name:
return ban_status
def get_existing_users():
with open(DATABASE, "r") as fp:
for line in fp.readlines():
username = line.split()[0]
password = line.split()[1]
ban_status = line.split()[2]
restrictions = line.split()[3]
yield username, password
def is_authorized(name, password):
return any((user == (name, password)) for user in get_existing_users())
def user_exists(name):
return any((usr_name == name) for usr_name, _ in get_existing_users())
def ask_user_credentials():
print("Enter your data:")
name = str(input("Login: "))
#password = str(input("Password: "))
password = getpass("Password: ")
if password is None:
return name, ''
else:
return name, password
def funcLogin():
name, password = ask_user_credentials()
ban_status = isBanned(name)
if ban_status == "Yes":
print("Your account is banned.")
mainMenu()
elif name == "admin" and is_authorized(name, password):
print("Welcome to admin panel.")
adminPanel(name)
elif is_authorized(name, password):
print("Welcome to user panel, " + name)
userPanel(name)
elif user_exists(name):
print("Wrong password! Try again: ")
print("Login: " + name)
password = getpass("Password: ")
if name == "admin" and is_authorized(name, password):
print("Welcome to admin panel.")
adminPanel(name)
elif is_authorized(name, password):
print("Welcome to user panel, " + name)
userPanel(name)
elif user_exists(name):
print("Wrong password! Try again: ")
print("Login: " + name)
password = getpass("Password: ")
if name == "admin" and is_authorized(name, password):
print("Welcome to admin panel.")
adminPanel(name)
elif is_authorized(name, password):
print("Welcome to user panel, " + name)
userPanel(name)
elif user_exists(name):
print("Out of tries! Exiting...")
exit(0)
else:
print("This user does not exist.")
mainMenu()
def funcRegister():
name, password = ask_user_credentials()
if user_exists(name):
print("This user already exists. Pick another name.\n")
mainMenu()
else:
f = open(DATABASE,'r')
info = f.read()
f.close()
f = open(DATABASE,'w')
info = info + "\n" + name + " " + password + ' ' + ban_status + ' ' + restrictions
f.write(info)
f.close()
print("Your account has been created!\n")
mainMenu()
def main():
file = open("database.txt", "w")
file.write("admin admin No No")
file.close()
mainMenu()
if __name__ == "__main__":
main()
|
from django.contrib import admin
from models import Document
class DocumentAdmin(admin.ModelAdmin):
list_display = ('title','add_date')
ordering = ('add_date',)
admin.site.register(Document, DocumentAdmin)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @python: 3.6
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
def test_img(net_g, datatest, args):
net_g.eval()
# testing
test_loss_2 = 0
correct_2 = 0
counter_2_target = 0
counter_2_bool = False
counter_2_pred = 0
total_2 = 0
correct_2 = 0
test_loss = 0
correct = 0
data_loader = DataLoader(datatest, batch_size=args.bs)
l = len(data_loader)
for idx, (data, target) in enumerate(data_loader):
counter_2_target,counter_2_pred = 0,0
for i in target:
if i == 2:
counter_2_target+=1
total_2+=counter_2_target
if args.gpu != -1:
data, target = data.cuda(), target.cuda()
log_probs = net_g(data)
# sum up batch loss
test_loss += F.cross_entropy(log_probs, target, reduction='sum').item()
# get the index of the max log-probability
y_pred = log_probs.data.max(1, keepdim=True)[1]
for i in y_pred:
if i[0] == 2:
counter_2_pred+=1
correct_2+=counter_2_pred
#print(y_pred)
#print(target.data.view_as(y_pred))
#print(y_pred.eq(target.data.view_as(y_pred)))
correct += y_pred.eq(target.data.view_as(y_pred)).long().cpu().sum()
test_loss /= len(data_loader.dataset)
accuracy = 100.00 * correct / len(data_loader.dataset)
accuracy_2 = 100.00 * correct_2/total_2
if args.verbose:
print('\nTest set: Average loss: {:.4f} \nAccuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(data_loader.dataset), accuracy))
return accuracy, test_loss,accuracy_2
|
import warnings
import numpy as np
class Predictor():
""" Object representing a function to fit a learning curve (See :class:`learning_curves.LearningCurve`). """
def __init__(self, name, func, guess, inv=None, diverging=False, bounds=None):
""" Create a Predictor.
Args:
name (str): name of the function
func (Callable): lambda expression, function to fit
guess (Tuple): Initial parameters
inv (Callable): lambda expression corresponding to the inverse function.
diverging (bool): False if the function converge. In this case the first parameter must be the convergence parameter (enforced to be in [-inf,1]).
bounds (array of tuples): Bounds of the parameters. Default is [-inf, inf] for all parameters, except for the convergence parameter whose bounds are [-inf,1]
if diverging is True.
"""
self.name = name
self.func = func
self.guess = guess
self.params = self.guess
self.score = None
self.cov = {}
self.diverging = diverging
self.params_up = None
self.params_low = None
if callable(inv):
self.inv = lambda x, *args: inv(x, *args) if len(args) > 0 else inv(x, *self.params)
else:
self.inv = None
if bounds:
self.bounds = bounds
else:
self.bounds = (-np.inf, np.inf) if self.diverging else ([-np.inf] * (len(self.params)), [1]+[np.inf] * (len(self.params) - 1))
def __call__(self, x, *args):
# with warnings.catch_warnings():
#warnings.simplefilter("ignore", RuntimeWarning)
x = np.array(x) # Enforce x to be a np array because a list of floats would throw a TypeError
return self.func(x, *args) if len(args) > 1 else self.func(x, *self.params)
def __repr__(self):
return f"Predictor {self.name} with params {self.params} and score {self.score}"
def get_saturation(self):
""" Compute the saturation accuracy of the Predictor.
The saturation accuracy is the best accuracy you will get from the model without changing any other parameter than the training set size.
If the Predictor is diverging, this value should be disregarded, being meaningless.
Returns:
float: saturation accuracy of the Predictor.
This value is 1 if the Predictor is diverging without inverse function.
This valus is the first parameter of the Predictor if it is converging.
This value is calculated if the Predictor is diverging with inverse function.
"""
if not self.diverging: sat_acc = self.params[0]
elif callable(self.inv):
sat_acc = 1 # if predictor is diverging, set saturation accuracy to 1
sat_val = self.inv(sat_acc)
while not np.isfinite(sat_val): # Decrease the saturation accuracy until finding a value that is not inf
sat_acc -= 0.01
sat_val = self.inv(sat_acc)
else: sat_acc = 1 # Default value if diverging Perdictor
return sat_acc
def __eq__(self, other):
if not isinstance(other, Predictor): return RuntimeError("Trying to compare Predictor with not Predictor object.")
return self.name == other.name |
# diaryhelper asks questions about your day in a regular interval so that you don't forget about writing your diary
q = {}
localq = q
def debug():
global q, localq
q={'what did you eat today?': '21', 'what time did you get up?': '15', 'what did you do today?': '22'}
localq = q
print(q)
# call debug if you need to test it
# debug()
def mainbody():
# READ DIARY FILE
print(" MENU")
print("---")
print("[1] ADD QUESTIONS")
print("[2] REMOVE QUESTIONS")
print("[3] UPDATE AND EXIT")
print("---")
# this is a trigger for the answer() since it falls in else:
x = 55
def answer(a):
a = int(input("Choose: "))
if a == 1:
# print("#1")
def addq():
print('CURRENT QUESTIONS')
print("")
print("ADD NEW")
qname = input("QUESTION NAME: ")
print(qname)
trigger = input("TRIGGER TIME: ")
print(trigger)
if trigger.isdigit() == False:
print("TRIGGER NOT AN INTEGER")
print("")
addq()
else:
if int(trigger) < 1 or int(trigger) > 24:
print("TRIGGER SHOULD BE 1 TO 24")
addq()
else:
pass
q.update({qname: trigger})
print(q)
print("MORE QUESTIONS? YES=1, NO =0 ")
more = input(": ")
if more.isdigit() == False:
print("NOT AN INTEGER SHOULD BE 1 OR 0")
print("")
more = ""
addq()
else:
pass
more = int(more)
# Essa solução não parece ser boa, mas é um jeito de acessar a q local do addq
# e usar ela em uma var global localq, a sintaze tbm ta confusa...
# o problema aqui é que eu tenho que citar localq =q aqui e
# no inicio do programa, pra poder executar o elif == 3
global localq
localq = q
if more == 1:
print("## MORE 1")
addq()
elif more == 0:
mainbody()
addq()
elif a == 2:
def rmq():
if localq == {}:
print("NO QUESTIONS ... ")
mainbody()
else:
pass
print("WICH QUESTIONS YOU WANT TO REMOVE?")
i = 0
ll = []
# print(localq)
for a in localq:
i = i + 1
print(i, ".", a, ":", localq[a], "HOURS")
ll.append(a)
####
print("")
rname = input("CHOSE A NUMBER: ")
rname = int(rname)
rname = rname - 1
ff = str(ll[rname])
del localq[ff]
del ll[rname]
print(ll)
mainbody()
rmq()
elif a == 3:
def w(val):
with open("data.csv", "w") as f:
# f.write(str(print(a, localq[a])))
print("CURRENT CONFIG FILE:")
print(localq)
print("")
for a in localq:
f.write(str(a))
f.write(":")
f.write(str(localq[a]))
f.write(",")
w(localq)
# This is answer filter...
else:
print("Choose a number between 1 and 3")
# repeat
answer(a)
answer(x)
if __name__ == '__main__':
mainbody()
print("EXIT")
|
import os
import shutil
import sys ########
# importing csv module
import csv
import shutil
# csv file name
filename = "train.csv"
# initializing the titles and rows list
fields = []
rows = []
non_bmp_map = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
# reading csv file
with open(filename, 'r',encoding="utf8") as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
# extracting field names through first row
#fields = csvreader.next()
# extracting each data row one by one
for row in csvreader:
rows.append(row)
# get total number of rows
print("Total no. of rows: %d"%(csvreader.line_num))
rows.pop(0)
#Common spam world database
spamWords=['0% risk','777','99%','99.9%','100%','100% more','#1','$$$','100% free','100% satisfied','4U','50% off','Accept credit cards','Acceptance','Access','Access now','Access for free'
'Accordingly','Act Now','Act immediately','Action','Action required','Ad','Additional income','Addresses on CD',
'Affordable','Affordable deal','All natural','All new','Amazed','Amazing','Amazing offer','Amazing stuff','Apply here','Apply now','Apply Online','As seen on','At no cost','Auto email removal','Avoid',
'Avoid bankruptcy','Bargain','Be amazed','Be surprised','Be your own boss','Believe me','Being a member','Beneficiary',
'Best bargain','Best deal','Best price','Best offer','Beverage','Big bucks','Bill 1618','Billing','Billing address','Billionaire','Billion','Billion dollars','Bonus','Boss','Brand new pager','Bulk email','Buy','Buy now','Buy direct','Buying judgments','Cable converter','Call','Call free','Call me','Call now','Calling creditors','Can’t live without','Cancel',
'Cancel at any time','Cancel now','Cancellation required','Cannot be combined with any other offer','Cards accepted','Cash','Cash out','Cash bonus','Cashcashcash','Casino','Celebrity','Cell phone cancer scam',
'Cents on the dollar','Certified','Chance','Cheap','Check','Check or money order','Claims','Claim now','Claim your discount','Claims not to be selling anything','Claims to be in accordance with some spam law','Claims to be legal','Clearance','Click','Click below','Click here','Click now','Click to get','Click to remove','Collect','Collect child support','Compare','Compare now','Compare online','Compare rates','Compete for your business','Confidentially on all orders','Congratulations','Consolidate debt and credit',
'Consolidate your debt','Copy accurately','Copy DVDs','Costs','Credit','Credit bureaus','Credit card offers','Cures','Cures baldness','Deal','Dear','Debt','Diagnostics','Dig up dirt on friends','Direct email','Direct marketing','Discount','Do it now','Do it today','Don’t delete','Don’t hesitate','Dormant','Double your',
'Double your cash','Double your income','Double your wealth','Drastically reduced','Earn','Earn $','Earn extra cash','Earn money','Earn monthly','Earn from home','Earn per month','Earn per week','Easy terms','Eliminate bad credit','Eliminate debt','Email extractor',
'Email harvest','Email marketing','Exclusive deal','Expect to earn','Expire','Explode your business','Extra','Extra cash','Extra income','Extract email','F r e e','Fantastic','Fantastic deal','Fantastic offer',
'Fast cash','Fast Viagra delivery','Financial freedom',
'Financially independent','For free','For instant access','For just $','For Only','For you Form','Free','Free access','Free bonus','Free cell phone','Free consultation',
'Free DVD','Free gift','Free grant money','Free hosting','Free info','Free information','Free installation','Free instant','Free investment','Free iPhone','Free leads','Free Macbook','Free membership',
'Free money','Free offer','Free preview',
'Free priority mail','Free quote','Free sample',
'Free trial','Free website','Freedom','Friend','Full refund','Get','Get it now','Get out of debt','Get paid','Get started now',
'Gift certificate','Give it away','Giving away',
'Great','Great deal','Great offer','Guarantee','Guaranteed','Guaranteed deposit','Guaranteed income','Guaranteed payment','Have you been turned down?','Hello',
'Here','Hidden','Hidden assets','Hidden charges','Hidden fees','High score','Home','Home based','Home employment','Home based business','Human growth hormone','Huge discount',
'Hurry up','If only it were that easy','Important information regarding','Important notification','In accordance with laws','Income','Income from home','Increase sales',
'Increase traffic','Increase your chances',
'Increase your sales','Incredible deal','Info you requested',
'Information you requested','Instant','Instant earnings',
'Instant income','Insurance','Insurance','Internet market','Internet marketing','Investment','Investment decision','It’s effective','Join millions','Join millions of Americans',
'Junk','Laser printer','Leave','Legal','Legal notice','Life','Life Insurance',
'Lifetime','Lifetime access','Lifetime deal',
'Limited amount','Limited number','Limited offer','Limited supply','Limited time',
'Limited time offer','Limited time only','Loan','Long distance phone offer','Lose','Lose weight','Lose weight spam','Lower interest rates','Lower monthly payment','Lower your mortgage rate','Lowest insurance rates',
'Lowest price','Lowest rate','Luxury','Luxury car','Mail in order form','Maintained',
'Make $','Make money','Marketing','Marketing solutions','Mass email','Medicine','Medium','Meet girls','Meet me','Meet singles',
'Meet women','Member','Member stuff','Message contains','Message contains disclaimer','Million','Millionaire','Million dollars','Miracle','MLM','Money','Money back','Money making','Month trial offer',
'More Internet Traffic','Mortgage','Mortgage rates',
'Multi-level marketing','Name brand','Never','Never before','New customers only','New domain extensions','Nigerian','No age restrictions','No catch','No claim forms','No cost','No credit check','No deposit required',
'No disappointment','No experience','No fees','No gimmick','No hidden','No hidden сosts','No hidden fees','No interests',
'No inventory','No investment','No investment required','No medical exams',
'No middleman','No obligation','No payment required',
'No purchase necessary','No questions asked',
'No selling','No strings attached','No-obligation','Not intended','Not junk','Not scam','Not spam','Now','Now only','Number 1','Number one',
'Obligation','Offshore','Offer','Offer expires','Once in lifetime','Once in a lifetime',
'One hundred percent free','One hundred percent guaranteed',
'One time','One time mailing','Online biz opportunity','Online degree',
'Online job','Online income','Online marketing','Online pharmacy','Only','Only $','Open','Opportunity','Opt in','Order',
'Order now','Order shipped by','Order status','Order today',
'Outstanding values','Passwords','Pennies a day','Per day','Per month','Per week','Performance','Phone','Please read','Potential earnings','Pre-approved','Presently','Price',
'Price protection','Print form signature',
'Print out and fax','Priority mail','Prize','Problem','Produced and sent out','Profits','Promise','Promise you','Purchase','Pure profits','Quote','Rates','Real thing','Refinance',
'Refinance home','Refund','Removal','Removal instructions','Remove','Removes wrinkles','Request','Request now','Request today','Requires initial investment',
'Reserves the right','Reverses','Reverses aging','Risk free','Risk-free','Rolex','Round the world','S 1618',
'Safeguard notice','Sale','Sample','Satisfaction','Satisfaction guaranteed','Save $',
'Save money','Save now','Save big money','Save up to','Score',
'Score with babes','Search engine listings','Search engines','Section 301',
'See for yourself','Sent in compliance','Serious','Serious cash',
'Serious only','Serious offer','Shopper','Shopping spree',
'Sign up free today','Social security number','Solution','Spam','Special deal','Special discount',
'Special for you','Special offer','Special promotion','Stainless steel',
'Stock alert','Stock disclaimer statement','Stock pick','Stop',
'Stop calling me','Stop emailing me','Stop snoring','Strong buy','Stuff on sale','Subject to cash',
'Subject to credit','Subscribe','Subscribe now','Subscribe for free','Success','Supplies','Supplies are limited','Take action','Take action now',
'Talks about hidden charges','Talks about prizes','Teen','Tells you it’s an ad','Terms','Terms and conditions','The best rates','The following form','They keep your money — no refund!',
'They’re just giving it away','This isn’t a scam','This isn’t junk','This isn’t spam','This won’t last','Thousands','Time limited','Traffic',
'Trial','Undisclosed recipient','University diplomas','Unlimited','Unsecured credit','Unsecured debt','Unsolicited','Unsubscribe','Urgent','US dollars','Vacation','Vacation offers','Valium','Viagra','Vicodin','VIP',
'Visit our website','Wants credit card','Warranty','Warranty expired','We hate spam','We honor all','Web traffic',
'Website visitors','Weekend getaway','Weight,''Weight loss','What are you waiting for?','What’s keeping you?','While available','While in stock','While supplies last','While you sleep','Who really wins?',
'Why pay more?','Wife','Will not believe your eyes','Win','Winner','Winning','Won','Work from home','Xanax','You are a winner!','You have been chosen','You have been selected','Your chance',
'Your income','Your status','Zero chance','Zero percent','Zero risk']
# Wordpress blacklist database
with open("blacklist_wordpress.txt", 'r', encoding="utf8") as blacklist:
spamWords.extend(blacklist.read().splitlines())
with open("data_train.txt", 'w+',encoding="utf8") as trainfile:
trainfile.write("following;;$;;followers;;$;;actions;;$;;is_retweet;;$;;Type;;$;;URLCounted;;$;;HashtagCounted;;$;;MensionCounted;;$;;averageHashtag;;$;;averageURL;;$;;wordsCounted;;$;;SpamWordsCounted\n")
#Building data train file
for row in rows:
i=0
value = []
spam=0
for col in row:
col=col.translate(non_bmp_map)
print(col)
if col == '': col='0'
if (i==0 or i==1):
value.append(col.rstrip())
if (i!=6 and i!=1 and i!=0): #rimuovo testo, identificativo e località dal testo
value.append(col.rstrip())
trainfile.write(col.rstrip()+";;$;;")
i=i+1
print('\n')
print('\n')
#Compute some features
countURL=value[1].count("https")
value.append(countURL)
trainfile.write(str(countURL)+";;$;;")
countHashtag=value[1].count("#")
value.append(countHashtag)
trainfile.write(str(countHashtag)+";;$;;")
countMensions=value[1].count("@")
value.append(countMensions)
trainfile.write(str(countMensions)+";;$;;")
averageHashtag=countHashtag/len(value[1].split()) #rispetto le parole della frase
averageURL=countURL/len(value[1].split())
value.append(averageHashtag)
value.append(averageURL)
value.append(len(value[1].split()))
trainfile.write(str(averageHashtag)+";;$;;")
trainfile.write(str(averageURL)+";;$;;")
trainfile.write(str(len(value[1].split()))+";;$;;")
# Compute number of spamWords found in the sentences
for v in spamWords:
if v in value[1]: spam=spam+1
value.append(spam)
trainfile.write(str(spam)+"\n")
print(value)
|
from abc import abstractmethod, ABC
from pepy.domain.model import ProjectName, Password
class DomainException(ABC, Exception):
@abstractmethod
def message(self) -> str:
pass
class ProjectNotFoundException(DomainException):
def __init__(self, project_name: str):
self.project_name = project_name
def message(self) -> str:
return "Project with name {} does not exist".format(self.project_name)
class ProjectNameLengthIsNotValidException(DomainException):
def __init__(self, project_name: str, min_length: int, max_length: int):
self.project_name = project_name
self.min_length = min_length
self.max_length = max_length
def message(self) -> str:
return 'Name "{}" is not valid, length should be between {} and {}'.format(
self.project_name, self.min_length, self.max_length
)
class InvalidAdminPassword(DomainException):
def __init__(self, password: Password):
self.password = password
def message(self) -> str:
return 'Password "{}" is not a valid admin password'.format(self.password.password)
|
# Matematiske operatorer og operatorpresedens (hva som utføres først).
# Multiplikasjon, *
produkt = 8 * 7
print('Produktet er', produkt)
print() #Linjeskift
# Divisjon, /
resultat = 76 / 4
print('Resultatet av 76:4 er', resultat) #Merk at svaret kommer som float()
print()
# Helgens lektyre, oppgave til onsdaga:
# Heltallsdivisjon, //
resultat = 77 / 4
heltallsdivisjon = 77 // 4 #Får bare heltallssvar!
print('Resultatet av 77:4 er', resultat)
print('Heltallsdivisjonen av 77:4 er', heltallsdivisjon)
print()
# Restdivisjon, %
resultatet = 77 / 4 # Gjentas for ryddighet.
restdivisjon = 77 % 4 # Ser hva som er resten dersom divisjonen ikke går opp.
print('Resultatet av 77:4 er', resultat)
print('Restdivisjonen 77:4 e', restdivisjon)
# Leksen: Se notater for forelesning #3.
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
if len(inorder) == 0:
return None
root = TreeNode(postorder[-1])
if len(inorder) == 1:
return root
root_index = inorder.index(postorder[-1])
root.left = self.buildTree(inorder[:root_index], postorder[:root_index])
root.right = self.buildTree(inorder[root_index+1:], postorder[root_index:-1])
return root
|
#coding=utf-8
import time
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
def send(head, content) :
try:
local_date = time.strftime("%Y%m%d", time.localtime())
sender = '' #发件人的邮件地址 e.g.: 111111@qq.com
password='' #发件人的客户端授权码(非密码) e.g.: xxxxxxxxxxxxxx
host='' #发件人用的邮件服务器 e.g.:QQ邮箱默认为 smtp.qq.com
receivers = [''] # 接收邮件,可设置为你的邮箱并可添加多个 e.g.: 111111@qq.com
meg_text = content #邮件内容
message = MIMEMultipart()
message.attach(MIMEText(meg_text, 'html', 'utf-8'))
# 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
message['From'] = Header("fajianren", 'utf-8') #内容中显示的发件人
message['To'] = Header("shoujianren", 'utf-8') #内容中显示的收件人
message['Subject'] = Header(head, 'utf-8') #邮件的题目
# 构造附件1,传送当前目录下的 test.txt 文件
#att1 = MIMEText(open('/tmp/hunting/mcy/'+local_date+'.xls', 'rb').read(), 'base64', 'utf-8')
#att1["Content-Type"] = 'application/octet-stream'
# 这里的filename可以任意写,写什么名字,邮件中显示什么名字
#att1["Content-Disposition"] = 'attachment; filename="' + local_date + '.xls"'
#message.attach(att1)
## 构造附件2,传送当前目录下的 runoob.txt 文件
#att2 = MIMEText(open('test/test.mp3', 'rb').read(), 'base64', 'utf-8')
#att2["Content-Type"] = 'application/octet-stream'
#att2["Content-Disposition"] = 'attachment; filename="two.mp3"'
#message.attach(att2)
smtpObj = smtplib.SMTP_SSL() #这个点要注意
smtpObj.connect(host)
smtpObj.login(sender,password) #邮箱登录
smtpObj.sendmail(sender, receivers, message.as_string())
smtpObj.close()
except Exception as e:
print('邮件发送失败:')
print(e)
else:
print ("邮件发送成功") |
"""
A cache stores static files to speed up future requests.
A few built-in caches are found here, but it's possible to define your
own and pull them in dynamically by class name.
Built-in caches:
- test
- disk
Example built-in cache configuration:
"cache": {
"name": "Disk",
"path": "/tmp/data",
"umask": "0000"
}
Example external cache configuration:
"cache": {
"class": "Module.Classname",
"kwargs": {"frob": "yes"}
}
- The "class" value is split up into module and classname, and dynamically
included. Exception thrown on failure to include.
- The "kwargs" value is fed to the class constructor as a dictionary of keyword
args. If your defined class doesn't accept any of these keyword arguments,
an exception is thrown.
A cache must provide these methods: lock(), unlock(), read(), and save().
Each method accepts three arguments:
- layer: layer name
- coord: single Coordinate that represents a tile.
- format: string like "png" or "jpg" that is used as a filename extension.
The save() method accepts an additional argument before the others:
- body: raw content to save to the cache.
"""
import os
import sys
import time
import gzip
import portalocker
from tempfile import mkstemp
from os.path import isdir, exists, dirname, basename, join as pathjoin
def get_cache_by_name(name):
if name.lower() == 'disk': return Disk
else: raise Exception('Unknown cache: %s' % name)
class Disk:
""" Caches files to disk.
Example configuration:
"cache": {
"name": "Disk",
"path": "/tmp/stache",
"umask": "0000",
"dirs": "portable"
}
Extra parameters:
- path: required local directory path where files should be stored.
- umask: optional string representation of octal permission mask
for stored files. Defaults to 0022.
- dirs: optional string saying whether to create cache directories that
are safe, portable or quadtile. For an example tile 12/656/1582.png,
"portable" creates matching directory trees while "safe" guarantees
directories with fewer files, e.g. 12/000/656/001/582.png.
Defaults to safe.
- gzip: optional list of file formats that should be stored in a
compressed form. Defaults to "txt", "text", "json", and "xml".
Provide an empty list in the configuration for no compression.
If your configuration file is loaded from a remote location, e.g.
"http://example.com/tilestache.cfg", the path *must* be an unambiguous
filesystem path, e.g. "file:///tmp/cache"
"""
def __init__(self, path, umask=0022, dirs='safe', gzip='txt text json xml'.split()):
self.cachepath = path
self.umask = int(umask)
self.dirs = dirs
self.gzip = [format.lower() for format in gzip]
def _is_compressed(self, format):
return format.lower() in self.gzip
def _filepath(self, layer, coord, format):
l = layer
z = '%d' % coord.zoom
e = format.lower()
e += self._is_compressed(format) and '.gz' or ''
if self.dirs == 'safe':
x = '%06d' % coord.column
y = '%06d' % coord.row
x1, x2 = x[:3], x[3:]
y1, y2 = y[:3], y[3:]
filepath = os.sep.join( (l, z, x1, x2, y1, y2 + '.' + e) )
elif self.dirs == 'portable':
x = '%d' % coord.column
y = '%d' % coord.row
filepath = os.sep.join( (l, z, x, y + '.' + e) )
elif self.dirs == 'quadtile':
pad, length = 1 << 31, 1 + coord.zoom
# two binary strings, one per dimension
xs = bin(pad + int(coord.column))[-length:]
ys = bin(pad + int(coord.row))[-length:]
# interleave binary bits into plain digits, 0-3.
# adapted from ModestMaps.Tiles.toMicrosoft()
dirpath = ''.join([str(int(y+x, 2)) for (x, y) in zip(xs, ys)])
# built a list of nested directory names and a file basename
parts = [dirpath[i:i+3] for i in range(0, len(dirpath), 3)]
filepath = os.sep.join([l] + parts[:-1] + [parts[-1] + '.' + e])
else:
raise Exception('Please provide a valid "dirs" parameter to the Disk cache, either "safe", "portable" or "quadtile" but not "%s"' % self.dirs)
return filepath
def _fullpath(self, layer, coord, format):
filepath = self._filepath(layer, coord, format)
fullpath = pathjoin(self.cachepath, filepath)
return fullpath
def _lockpath(self, layer, coord, format):
return self._fullpath(layer, coord, format) + '.lock'
def lock(self, layer, coord, format):
umask_old = os.umask(self.umask)
path = self._lockpath(layer, coord, format)
try:
os.makedirs(os.path.dirname(path), 0777&~self.umask)
except OSError, e:
# errno=17 means that parent directories already exist, which is fine
if e.errno != 17: raise
finally:
os.umask(umask_old)
self.lockfile = open(path, 'w+')
portalocker.lock(self.lockfile, portalocker.LOCK_EX | portalocker.LOCK_NB)
def unlock(self, layer, coord, format):
self.lockfile.close()
os.remove(self.lockfile.name)
self.lockfile = None
def remove(self, layer, coord, format):
fullpath = self._fullpath(layer, coord, format)
try:
os.remove(fullpath)
except OSError, e:
# errno=2 means that the file does not exist, which is fine
if e.errno != 2: raise
def read(self, layer, coord, format):
fullpath = self._fullpath(layer, coord, format)
if not exists(fullpath):
return None
if self._is_compressed(format):
return gzip.open(fullpath, 'r').read()
else:
body = open(fullpath, 'rb').read()
return body
def save(self, body, layer, coord, format):
umask_old = os.umask(self.umask)
fullpath = self._fullpath(layer, coord, format)
try:
os.makedirs(dirname(fullpath), 0777&~self.umask)
except OSError, e:
if e.errno != 17: raise
finally:
os.umask(umask_old)
suffix = '.' + format.lower()
suffix += self._is_compressed(format) and '.gz' or ''
fh, tmp_path = mkstemp(dir=self.cachepath, suffix=suffix)
if self._is_compressed(format):
os.close(fh)
tmp_file = gzip.open(tmp_path, 'w')
tmp_file.write(body)
tmp_file.close()
else:
os.write(fh, body)
os.close(fh)
try:
os.rename(tmp_path, fullpath)
except OSError:
os.unlink(fullpath)
os.rename(tmp_path, fullpath)
os.chmod(fullpath, 0666&~self.umask)
|
"""
This module helps reduce the need to know arcpy for mapping. There are a few basic functions here that, when combined correctly, can create any number of maps quickly. This tool can use multiple CSVs, columns, and MXDs to create a large number of maps. Module users should use the create_dir() function first to set-up the correct C:/Mapping_Project structure. This module is designed to work with a csv containing values that should be mapped using graduated symbology. The user needs a CSV, shapefiles for mapping, mapping documents (.mxd files), and symbology.
"""
import os
import operator
import arcpy
from glob import glob
def create_dir():
"""Creates an empty folder directory on the C drive called Mapping_Project. """
try:
if not os.path.exists("C:/Mapping_Project"):
os.mkdir("C:/Mapping_Project")
if not os.path.exists("C:/Mapping_Project/MXDs"):
os.mkdir("C:/Mapping_Project/MXDs")
if not os.path.exists("C:/Mapping_Project/Shapefiles"):
os.mkdir("C:/Mapping_Project/Shapefiles")
if not os.path.exists("C:/Mapping_Project/Out"):
os.mkdir("C:/Mapping_Project/Out")
except:
print "There was an error creating the directories."
def create_workspace():
"""Checks if a .gbp workspace exists. If there is not one, the script will make one. This script returns the path of the workspace. The .gbp workspace is useful for working with csv data.
"""
path = 'C://Mapping_Project//workspace.gdb'
if not os.path.exists('C://Mapping_Project//workspace.gdb'):
arcpy.CreateFileGDB_management('C://Mapping_Project//', 'workspace.gdb')
else:
print 'you already have a workspace there'
return path
def csv_checkmissingshpvals(csvfile,joincol, shpfile, shpfileheader):
"""This script will check to see if any join column values in the CSV are missing in the shapefile. Returns a list of missing shapefile join data.CheckMissingSHPVals(csvfile should be a filepath. joincol is the column index in the csv starting at 0. shapefile is shapefile path. shapefile header should be the column lookup name.)"""
csvvals = []
with open(csvfile) as csv:
csv.next()
for L in csv:
csvvals.append(l.split(',')[joincol])
shpvals = []
rows = arcpy.SearchCursor(shpfile,fields = shpfileheader)
for row in rows:
shpvals.append(str(row.getValue(shpfileheader)))
results = []
for val in csvvals:
if val not in shpvals:
results.append(val)
if results == []:
print "All values were joined"
return results
def csv_getcols(csvfile):
""" Returns a list of the CSV headers."""
with open(csvfile, 'rb') as csv:
cols = csv.next().strip().split(',')
return cols
def csv_getall(csvfile):
""" Prints the lines in an unformatted csv. To join the csv, please use the JoinCSV function.
"""
with open(csvfile) as csv:
for L in csv:
print l
def csv_sort(csvfile, colindex = 0, reverse = False):
""" This script will sort a csv based on the colindex and csvfile path. If reverse is True, the values will be sorted in reverse index. This function assumes that the csv has headers. colindex starts at 0.
"""
data = []
with open(csvfile,'r') as f:
for line in f:
data.append(line)
header = csv.reader(data, delimiter=",").next()
reader = csv.reader(data[1:], delimiter=",")
if reverse:
sortedlist = sorted(reader, key=operator.itemgetter(colindex),reverse = True)
else:
sortedlist = sorted(reader, key=operator.itemgetter(colindex))
os.remove(csvfile)
ResultFile = open(csvfile,'wb')
wr = csv.writer(ResultFile)
wr.writerow(header)
for L in sortedlist:
wr.writerow(l)
ResultFile.close()
print "Finished sorting the csv"
def csv_jointable(csvfile, workspace):
""" This function will import the csv to the workspace. This datatable will then be imported to a shapefile using the JoinSHP() function. This returns a string of the workspace and table name"""
tablename = os.path.basename(csvfile).rstrip('.csv')
try:
arcpy.Delete_management(workspace + '//' + tablename)
arcpy.TableToTable_conversion(csvfile, workspace, tablename)
print "Old table in workspace deleted, replaced by new table ", workspace + '//'+tablename
except:
arcpy.TableToTable_conversion(csvfile, workspace, tablename)
print "New table in workspace added to the workspace with name ", workspace + '//'+tablename
return workspace + '//'+tablename
def shp_getcols(shapefile):
"""Returns a list of shapefile columns."""
mylist = []
for field in arcpy.ListFields(shapefile):
mylist.append(str(field.name.strip()))
return mylist
def shp_removecols(shapefile, cols):
"""Removes fields from shapefile specified in the cols list. Columns can only have 10 characters."""
for col in cols:
col = col[:10]
if arcpy.ListFields(shapefile, col):
arcpy.DeleteField_management(shapefile, col)
print 'Field deleted:', col
else:
print 'No field to delete:', col
def shp_addcols(shapefile, cols, datatype):
""" Adds each column in the list of cols. Columns can only have 10 characters. All columns added will be given the same datatype.
Possible fields types:
TEXT Any string of characters.
FLOAT Fractional numbers between -3.4E38 and 1.2E38.
DOUBLE Fractional numbers between -2.2E308 and 1.8E308.
SHORT Whole numbers between -32,768 and 32,767.
LONG Whole numbers between -2,147,483,648 and 2,147,483,647.
DATE Date and/or time.
BLOB Long sequence of binary numbers. You need a custom loader or viewer or a third-party application to load items into a BLOB field or view the contents of a BLOB field.
RASTER Raster images. All ArcGIS software-supported raster dataset formats can be stored, but it is highly recommended that only small images be used.
GUID Globally unique identifier.
If you try to add a duplicate column that is already in the shapefile, the existing duplicate column will be deleted.
"""
if type(cols) is list:
for col in cols:
col = col[:10]
if arcpy.ListFields(shapefile, col):
print 'Removed existing column from the shapefile:', col
arcpy.DeleteField_management(shapefile, col)
arcpy.AddField_management(shapefile, col, datatype)
else:
arcpy.AddField_management(shapefile, col, datatype)
print 'Added column to the shapefile:', col, datatype
else:
col = cols[:10]
if arcpy.ListFields(shapefile, col):
print 'Removed existing column from the shapefile:', col
arcpy.DeleteField_management(shapefile, col)
arcpy.AddField_management(shapefile, col, datatype)
else:
arcpy.AddField_management(shapefile, col, datatype)
print 'Added column to the shapefile:', col, datatype
def shp_joincsv(csvfile, shapefile, shapefilejoincol, csvjoinindex, csvfieldindex):
""" This function manually joins the CSV to the shapefile and does not use geodatabase tables like the JoinCSV() and JoinSHP() functions. This method should be easier and faster in most cases. In the CSV, the join column must be before the columns with mapping values. This code will map all fields from the mapping column onward (to the right). Returns missing cols. Column limit should be 10 characters."""
cols = GetCSVcols(csvfile)
i = 0
newcols = []
for col in cols:
if i >= csvfieldindex:
newcols.append(col[:10])
i += 1
AddSHPcols(shapefile, newcols, "double")
i = 0
ct = 0
csvjoinlist = []
with open(csvfile, 'rb') as csvfile:
lib = dict()
csvfile.next() #scip the headers
for L in csvfile:
line = l.rstrip().split(",")
csvjoinlist.append(line[csvjoinindex])
lib[line[csvjoinindex]] = lib.get(line[csvjoinindex],line[csvfieldindex:])
rows = arcpy.UpdateCursor(shapefile)
#rows = arcpy.UpdateCursor(shpfile,"","","","%s %s" % (shapefilejoincol, method)) ##sorted
shpjoinlist = []
missingshpvals = []
for row in rows:
shpjoinval = str(row.getValue(shapefilejoincol))
shpjoinlist.append(shpjoinval)
try:
vals = lib.get(shpjoinval)
for ind, field in enumerate(newcols):
row.setValue(str(field),float(vals[ind]))
rows.updateRow(row)
except:
pass
# missingshpvals.append(shpjoinval) #This is the shapefile value that there is no corresponding CSV value for. This list is for debugging.
# missingcsvvals = []
# for L in csvjoinlist:
# if L not in shpjoinlist:
# missingcsvvals.append(l)
return #missingcsvvals #these values are missing
def shp_jointable(jointable, joinfield, shapefile, shpjoinfield, add_fields):
""" Joins the workspace table to the shapefile. The workspace table is generated by csv_jointable(). jointable and shapefile should be the full path of the file ie. C:/path/to/shapefile.shp and c:path/to/workspace.gbp/tablename """
new_fields = []
for col in add_fields:
col = col[:10]
new_fields.append(col)
if arcpy.ListFields(shapefile, col):
arcpy.DeleteField_management(shapefile, col)
arcpy.JoinField_management(shapefile, shpjoinfield,jointable, joinfield, new_fields)
print "Finished shapefile join."
def CalcPerChangeSHP(shapefile, shapejoincol, aggregation_column, maxmin_cols):
"""This function will loop through a shapefile and group values based upon the specified 'aggregation_column'. The function will then calculate the maximum and minimum for each of the maxmin_cols specified. A new field will be added to the shapefile that included "L_" and the first 8 characters of each value in the maxmin_cols. Use these new columns to label the max and min values when creating maps. Returns the new label columns"""
newcols = []
for col in maxmin_cols:
newcols.append("L_" + col[:8])
AddSHPcols(shapefile, newcols, "STRING")
rows = arcpy.SearchCursor(shapefile)
shpvallist = []
joinlist = []
for row in rows:
vals = {}
vals[aggregation_column] = str(row.getValue(aggregation_column))
vals[shapejoincol] = str(row.getValue(shapejoincol))
joinlist.append(vals[aggregation_column])
for val in maxmin_cols:
vals[val[:10]] = float(row.getValue(val[:10]))
shpvallist.append(vals)
# print shpvallist[:10]
joinlist = set(joinlist)
coldict = {}
for col in maxmin_cols:
col = col[:10]
newdict = {}
for adminval in joinlist:
vals = []
for row in shpvallist:
if row[aggregation_column] == adminval:
postalcode = row[shapejoincol]
if int(row[col]) == -9999: #use -9999 as a key for no data
val = ''
else:
val = row[col]
vals.append((postalcode, val))
# try:
i = 0
for postalcode, val in vals:
if val == -9999:
continue
elif i == 0:
maxpost, maxval = postalcode, val
minpost, minval = postalcode, val
elif val > maxval:
maxpost, maxval = postalcode, val
elif val < minval:
minpost, minval = postalcode, val
i += 1
i = 0
newdict[adminval] = (maxpost, maxval,minpost, minval)
coldict[col] = newdict
for col in maxmin_cols:
col = col[:10]
l_col = "L_" + str(col)[:8]
vals = coldict[col]
del rows
rows = arcpy.UpdateCursor(shapefile)
for row in rows:
shpjoinval = row.getValue(aggregation_column)
post = row.getValue(shapejoincol)
currentval = row.getValue(col)
maxpost = vals[shpjoinval][0]
minpost = vals[shpjoinval][2]
if post in (maxpost, minpost):
row.setValue(l_col,"{0:.0f}%".format(currentval*100))
rows.updateRow(row)
print "Finished adding the max and min percent change values to the shapefile. Here are the new column headers"
print newcols
return newcols
def CalculateField(shapefile, fieldname, py_expression):
"""Calculate values for a field given a python expression as a string. The py expression should be formatted with ! characters before and after the field name. ie.py_expression ='str(!POSTCODE!) + '_' + str(!JOIN!) """
arcpy.CalculateField_management (shapefile, fieldname, py_expression,"Python")
def GetMXDList():
return glob(os.path.join("C:/Mapping_Project/MXDs","*.mxd"))
def GetLayers(mxds):
"""Prints the available layers in the mxd document. A string version of the layer name is returned. GetLayers(mxds = 'mxdpath' or ['mxdpath1','mxdpath2'])"""
lyrlist = []
if type(mxds) is list:
for mxdpath in mxds:
print mxdpath
mxd = arcpy.mapping.MapDocument(mxdpath)
i = 0
for lyr in arcpy.mapping.ListLayers(mxd):
lyrlist.append([os.path.basename(mxdpath), str(lyr.name), i])
i += 1
print 'MXD/tLAYER/tLAYER_INDEX'
for row in lyrlist:
print row
return lyrlist
elif type(mxds) is str:
mxd = arcpy.mapping.MapDocument(mxds)
i = 0
for lyr in arcpy.mapping.ListLayers(mxd):
lyrlist.append([os.path.basename(mxds), str(lyr.name), i])
i += 1
print 'MXD/tLAYER/tLAYER_INDEX'
for row in lyrlist:
print row
return lyrlist
else:
print "The mxd needs to be formatted as a list, not a string. add brackets around the variable ['mxdpath']"
def CreateMaps(mxds,shapefile, mapfields,symbology, labels = False):
"""This function will create maps for all mxds specified and all fields in the mapfields list. The symbology options = 'Percent_Change' and 'Diff_LC'. If the symbology does not exist locally, this function will copy the necessary files from the network into the mxd/symbology folder. """
i= 0
for col in mapfields:
mapfields[i] = col[:10]
i += 1
i = 0
if type(mxds) is str:
newmxd = []
newmxd.append(mxds)
mxds = newmxd
if type(mapfields) is str:
newmapfields = []
newmapfields.append(mapfields)
mapfields = newmapfields
mapresolution = 300 #300 is common.
if symbology.lower() == "percent_change":
symbpath = arcpy.mapping.Layer("C:/Mapping_Project/MXDs/Symbology/PercentChange.lyr")
elif symbology.lower() == "diff_lc":
symbpath = arcpy.mapping.Layer('C:/Mapping_Project/MXDs/Symbology/DifferenceinLossCost.lyr')
elif symbology [-4:] == '.lyr':
symbpath = arcpy.mapping.Layer(symbology)
else:
print "You need to choose a symbology type: 'Percent_Change','Diff_LC', or a layerpath"
return
for mxd in mxds:
mxdobj = arcpy.mapping.MapDocument(mxd)
df = arcpy.mapping.ListDataFrames(mxdobj)[0] #leave as default for these maps(will it change for other perils????)
for lyr in arcpy.mapping.ListLayers(mxdobj):
if lyr.name == os.path.basename(shapefile).replace(".shp",""): #leave as default for these maps(will it change for other perils????)
lyr.symbologyType == "GRADUATED_COLORS"
for field in mapfields:
arcpy.mapping.UpdateLayer(df, lyr, symbpath, True) #if you get a value error, it could be because of the layers source symbology no longer being available. It could also be because of a join issue or incorrect column names. The column name character limit is 10.
lyr.symbology.valueField = field
if labels:
lyr.showLabels = True
if symbology.lower() == "percent_change":
expres = "str(int(round(float(["+field[:10]+"])*100,0))) + '%'"
elif symbology.lower() == "diff_lc":
expres = "str(round(float(["+field[:10]+"]),3))"
for lblClass in lyr.labelClasses:
lblClass.expression = expres
lblClass.SQLQuery = field +" <> -9999"
lblClass.showClassLabels = True
else:
lyr.showLabels = False
arcpy.RefreshActiveView()
arcpy.mapping.ExportToJPEG(mxdobj, "C:/Mapping_Project/Out/"+ os.path.basename(mxd).rstrip('.mxd') +'_' + field +".jpg", resolution=mapresolution)
print "New map: C:/Mapping_Project/Out/"+ os.path.basename(mxd).rstrip('.mxd') +'_' + field + ".jpg"
def CreateMaps2(mxds,shp1, shp2, mapfields,symbology, labels1 = False,labels2 = False):
"""This function will create maps for all mxds specified and all fields in the mapfields list. The symbology options = 'Percent_Change' and 'Diff_LC'. This function will update the symbology and labels for two shapefiles. They must have the same mapfields. Symbology options are diff_lc and percent_change. If labels1 or labels2 is True, the mapfields will be labelled """
i= 0
for col in mapfields:
mapfields[i] = col[:10]
i += 1
if type(mxds) is str:
newmxd = []
newmxd.append(mxds)
mxds = newmxd
if type(mapfields) is str:
newmapfields = []
newmapfields.append(mapfields)
mapfields = newmapfields
mapresolution = 300 #300 is common.
if symbology.lower() == "percent_change":
symbpath = arcpy.mapping.Layer("C:/Mapping_Project/MXDs/Symbology/PercentChange.lyr")
elif symbology.lower() == "diff_lc":
symbpath = arcpy.mapping.Layer('C:/Mapping_Project/MXDs/Symbology/DifferenceinLossCost.lyr')
else:
print "You need to choose a symbology type: 'Percent_Change' or 'Diff_LC'"
return
for mxd in mxds:
mxdobj = arcpy.mapping.MapDocument(mxd)
df = arcpy.mapping.ListDataFrames(mxdobj)[0] #leave as default for these maps(will it change for other perils????)
for lyr in arcpy.mapping.ListLayers(mxdobj):
if lyr.name == os.path.basename(shp1).replace(".shp",""):
lyr1 = lyr
elif lyr.name == os.path.basename(shp2).replace(".shp",""):
lyr2 = lyr
lyr1.symbologyType == "GRADUATED_COLORS"
lyr2.symbologyType == "GRADUATED_COLORS"
for field in mapfields:
field = field [:10]
print os.path.basename(mxd).rstrip(".mxd"), field
arcpy.mapping.UpdateLayer(df, lyr1, symbpath, True)
if symbology.lower() == "percent_change":
expres = "str(int(round(float(["+field+"])*100,0))) + '%'"
elif symbology.lower() == "diff_lc":
expres = "str(int(round(float(["+field+"])*100,0)))"
lyr1.symbology.valueField = field
if labels1:
if lyr1.supports("LABELCLASSES"):
lyr1.showLabels = True
# print "Layer name: " + lyr1.name
for lblClass in lyr1.labelClasses:
lblClass.expression = expres
lblClass.SQLQuery = field +" <> -9999"
lblClass.showClassLabels = True
else:
lyr1.showLabels = False
arcpy.mapping.UpdateLayer(df, lyr2, symbpath, True)
lyr2.symbology.valueField = field
if labels2:
if lyr2.supports("LABELCLASSES"):
lyr2.showLabels = True
# print "Layer name: " + lyr2.name
for lblClass in lyr2.labelClasses:
lblClass.expression = expres
lblClass.SQLQuery = field +" <> -9999"
lblClass.showClassLabels = True
else:
lyr2.showLabels = False
arcpy.RefreshActiveView()
arcpy.mapping.ExportToJPEG(mxdobj, "C:/Mapping_Project/Out/"+ os.path.basename(mxd).rstrip('.mxd') +'_' + field + symbology +".jpg", resolution=mapresolution)
print "New map: C:/Mapping_Project/Out/"+ os.path.basename(mxd).rstrip('.mxd') +'_' + field + symbology +".jpg"
def CreateMaps3(mxds,shapefile,mapfields, labelfields, symbology):
"""This function will create maps for all mxds specified and all fields in the mapfields list. The symbology options = 'Percent_Change' and 'Diff_LC'. This function allows specification of different label fields for the mapfields labels. ie use mapfields as difference in loss cost, but label the max and min percent change column. The mapfields and labelfields lists must be ordered in the same order so that the first value of mapfields will get labelled with the first value in labelfields."""
i= 0
for col in mapfields:
mapfields[i] = col[:10]
i += 1
if type(mxds) is str:
newmxd = []
newmxd.append(mxds)
mxds = newmxd
if type(mapfields) is str:
newmapfields = []
newmapfields.append(mapfields)
mapfields = newmapfields
mapresolution = 300 #300 is common.
if symbology.lower() == "percent_change":
symbpath = arcpy.mapping.Layer("C:/Mapping_Project/MXDs/Symbology/PercentChange.lyr")
elif symbology.lower() == "diff_lc":
symbpath = arcpy.mapping.Layer('C:/Mapping_Project/MXDs/Symbology/DifferenceinLossCost.lyr')
else:
print "You need to choose a symbology type: 'Percent_Change' or 'Diff_LC'"
return
for mxd in mxds:
mxdobj = arcpy.mapping.MapDocument(mxd)
df = arcpy.mapping.ListDataFrames(mxdobj)[0] #leave as default for these maps(will it change for other perils????)
for lyr in arcpy.mapping.ListLayers(mxdobj):
if lyr.name == os.path.basename(shapefile).replace(".shp",""):
lyr.symbologyType == "GRADUATED_COLORS"
for field, label in zip(mapfields, labelfields):
field = field [:10]
label = label [:10]
print field, label
arcpy.mapping.UpdateLayer(df, lyr, symbpath, True)
lyr.symbology.valueField = field
expres = "["+label+"]"
print expres
if lyr.supports("LABELCLASSES"):
lyr.showLabels = True
for lblClass in lyr.labelClasses:
lblClass.expression = expres
lblClass.SQLQuery = field +" <> -9999"
lblClass.showClassLabels = True
arcpy.RefreshActiveView()
arcpy.mapping.ExportToJPEG(mxdobj, "C:/Mapping_Project/Out/"+ os.path.basename(mxd).rstrip('.mxd') +'_' + field + symbology +".jpg", resolution=mapresolution)
print "New map: C:/Mapping_Project/Out/"+ os.path.basename(mxd).rstrip('.mxd') +'_' + field + symbology +".jpg"
def CreateMaps4(mxds,shapefile,shpsubregioncol, mapfields, labelfields, symbology):
"""This function will create maps for all mxds specified and all fields in the mapfields list. The symbology options = 'Percent_Change' and 'Diff_LC'. This function allows specification of different label fields for the mapfields labels. ie use mapfields as difference in loss cost, but label the max and min percent change column. The mapfields and labelfields lists must be ordered in the same order so that the first value of mapfields will get labelled with the first value in labelfields. This function will zoom to the different layer attributes specified in the shpsubregioncol field."""
i= 0
for col in mapfields:
mapfields[i] = col[:10]
i += 1
i = 0
for col in labelfields:
labelfields[i] = col[:10]
i += 1
if type(mxds) is str:
newmxd = []
newmxd.append(mxds)
mxds = newmxd
if type(mapfields) is str:
newmapfields = []
newmapfields.append(mapfields)
mapfields = newmapfields
if type(labelfields) is str:
newlabelfields = []
newlabelfields.append(mapfields)
labelfieldsfields = newlabelfields
mapresolution = 300 #300 is common.
if symbology.lower() == "percent_change":
symbpath = arcpy.mapping.Layer("C:/Mapping_Project/MXDs/Symbology/PercentChange.lyr")
elif symbology.lower() == "diff_lc":
symbpath = arcpy.mapping.Layer('C:/Mapping_Project/MXDs/Symbology/DifferenceinLossCost.lyr')
else:
print "You need to choose a symbology type: 'Percent_Change' or 'Diff_LC'"
return
rows = arcpy.SearchCursor(shapefile)
adminIDs = []
for row in rows:
val = row.getValue(shpsubregioncol)
if val not in adminIDs:
adminIDs.append(val)
del rows
for mxd in mxds:
mxdobj = arcpy.mapping.MapDocument(mxd)
df = arcpy.mapping.ListDataFrames(mxdobj)[0] #leave as default for these maps(will it change for other perils????)
for lyr in arcpy.mapping.ListLayers(mxdobj):
if lyr.name == "EUFL_RL15_Zips_Cover":
lyr2 = lyr
elif lyr.name == "EUFL_RL15_Zips_Cover2":
lyr3 = lyr
for lyr in arcpy.mapping.ListLayers(mxdobj):
if lyr.name == os.path.basename(shapefile).replace(".shp",""):
lyr.symbologyType == "GRADUATED_COLORS"
for field, label in zip(mapfields, labelfields):
field = field [:10]
label = label [:10]
arcpy.mapping.UpdateLayer(df, lyr, symbpath, True)
lyr.symbology.valueField = field
expres = "str(int(round(float(["+label+"]) *100,0))) + '%'"
print expres
if lyr.supports("LABELCLASSES"):
print "here"
lyr.showLabels = True
for lblClass in lyr.labelClasses:
lblClass.expression = expres
lblClass.SQLQuery = field +" <> -9999"
lblClass.showClassLabels = True
for adminID in adminIDs:
if adminID[:2] in ['BE','UK','GM']:
adminID = str(adminID)
query1 = '"'+ shpsubregioncol + '" = ' + "'" + adminID + "'"
query2 = '"'+ shpsubregioncol + '" <> ' + "'" + adminID + "'"
query3 = '"'+ shpsubregioncol + '" <> ' + "'" + adminID + "'"
print shpsubregioncol, adminID, query1
lyr.definitionQuery = query1
lyr2.definitionQuery = query2
lyr3.definitionQuery = query3
ext = lyr.getSelectedExtent(True)
df.extent = ext
# df.panToExtent(lyr.getSelectedExtent())
# df.zoomToSelectedFeatures()
arcpy.RefreshActiveView()
arcpy.mapping.ExportToJPEG(mxdobj, "C:/Mapping_Project/Out/"+ os.path.basename(mxd).rstrip('.mxd') +'_' + adminID + "_" + field + symbology + ".jpg", resolution=mapresolution)
print "New map: C:/Mapping_Project/Out/"+ os.path.basename(mxd).rstrip('.mxd') +'_' + adminID +"_" +field + symbology + ".jpg"
###############################################################################
def JoinSHPspecial(shapefile, shapejoincol, shapefile2, shapejoincol2, addcols):
"""Not using this function for the project, but it works, so i'm leaving it here. This function adds max and min values associated with shapefile1 to shapefile2. If shapefile 1 has 3 postcodes in a county, this script will add the max and min value to shapefile 2 for that county. The value -9999 in a shpfile is treated as unknown rather than a minimum change."""
i= 0
for col in addcols:
addcols[i] = col[:10]
i += 1
i = 0
AddSHPcols(shapefile2, addcols, "STRING")
rows = arcpy.SearchCursor(shapefile)
shpvallist = []
joinlist = []
for row in rows:
vals = {}
vals[shapejoincol2] = row.getValue(shapejoincol2)
joinlist.append(vals[shapejoincol2])
for val in addcols:
vals[val] = float(row.getValue(val))
shpvallist.append(vals)
joinlist = set(joinlist)
coldict = {}
for col in addcols:
newdict = {}
for adminval in joinlist:
vals = []
for row in shpvallist:
if row[shapejoincol2] == adminval:
if int(row[col]) == -9999: #use -9999 as a key for no data
vals.append('')
else:
vals.append(row[col])
try:
maxval = max(v for v in vals if v <> '')
except:
maxval = "None"
try:
minval = min(vals)
except:
minval = "None"
maxval = str(int(round(maxval *100,0)))
minval = str(int(round(minval *100,0)))
newdict[adminval] = minval + "% to "+ maxval + "%"
coldict[col] = newdict
for col in addcols:
vals = coldict[col]
del rows
rows = arcpy.UpdateCursor(shapefile2)
for row in rows:
shpjoinval = row.getValue(shapejoincol2)
try:
row.setValue(str(col),str(vals[shpjoinval]))
rows.updateRow(row)
except:
pass
|
import boto3
def lambda_handler(event, context):
instances = event.get("instance_ids") or []
state = event.get("state")
ec2 = boto3.client('ec2')
if state == "running":
ec2.start_instances(InstanceIds=instances)
elif state == "stopped":
ec2.stop_instances(InstanceIds=instances)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The script gets Teamcity remote build status and set the REMOTE_BUILD_NUMBER parameter.
If no builds found the script will exit with errorcode 1. To change this behaviour add argument --no-fail-missing
You can customize messages with template strings. See expand_template function
"""
from argparse import ArgumentParser
import logging
import os
import sys
import ssl
import time
import json
import urllib.request
RETRY_SLEEP_TIME_SECONDS = 10
def parse_args(args=None):
"""
Define command-line arguments.
args parameter is for unittests. It defines arguments values when unittesting.
"""
parser = ArgumentParser(prog=os.path.basename(__file__),
description="Teamcity artifacts downloader")
parser.add_argument("--api-url", required=False, default="https://teamcity.corp.local",
help="Teamcity api access user")
parser.add_argument("--api-username", required=False, default="tcuser",
help="Teamcity api access user")
parser.add_argument("--api-password", required=False, default='tcpassword',
help="Teamcity api users' password")
parser.add_argument("--build-locator", required=True,
help="Build locator to get artifacts from."
"For example, buildType:Project_Stable_BuildAndroid,number:1.11.0.56021,tag:xsolla,count:1")
parser.add_argument("--failed-build-message",
default='build __buildTypeId__:__number__ is in __status__ state',
help="set teamcity status message on failed remote build")
parser.add_argument("--cancelled-build-message",
default='build __buildTypeId__:__number__ cancelled',
help="set teamcity status message on cancelled remote build")
parser.add_argument("--timeout-build-message",
default='timeout waitng for the build __buildTypeId__:__number__ finish',
help="set teamcity status message on waitng timeoute for the remote build finish")
parser.add_argument("--max-wait-seconds", required=False, type=int, default=240,
help="Maximum seconds to wait for the remote build finish")
parser.add_argument("--no-fail-missing", action='store_true',
help="Assume its OK if no build found. And log with warning.")
parser.add_argument("--update-build-number", action='store_true',
help="Set own build number equal to remote build's")
parsed = parser.parse_args(args, namespace=None)
# ensure running builds are also returned.
if not 'running:' in parsed.build_locator:
parsed.build_locator += ',running:any'
# ensure canceled builds are also returned.
if not 'canceled:' in parsed.build_locator:
parsed.build_locator += ',canceled:any'
# ensure to return only latest build.
if not 'count:1' in parsed.build_locator:
parsed.build_locator += ',count:1'
return parsed
def get_status(req):
try:
with urllib.request.urlopen(req) as r:
content = r.read().decode()
result = json.loads(content)
except urllib.error.HTTPError as e:
logging.error(f'Couldnt fetch status. Ensure:\n'
f' --build-locator is valid and builds are reachable by it ({ARGS.build_locator})\n'
f' --api-url is valid ({ARGS.api_url})\n'
f'\n --- return status ---\nreason: {e.reason}\nmsg:{e.msg}\nReturn code: {e.code}\n')
webpage = '\n'.join(( l.decode() for l in e.readlines()))
print(webpage)
sys.exit(1)
if result['count'] == 0:
msg = f'couldn\'t find any builds matching the request'
if ARGS.no_fail_missing:
logging.warning(msg)
else:
logging.error(msg)
sys.exit(1)
return result
def expand_template(tpl, schema):
""" Expand build status fields found in template.
the schema looks like this:
'id': 533691,
'buildTypeId': 'Tst_TstRunning',
'number': '22',
'status': 'SUCCESS',
'state': 'finished',
'href': '/httpAuth/app/rest/builds/id: 533691',
'webUrl': 'https://teamcity.corp.local/viewLog.html?buildId=533691&buildTypeId=Tst_TstRunning',
'finishOnAgentDate': '20221219T170121+0000'}
you can insert any of the key in your string like this: "The build number is: __number__"
"""
for k,v in schema.items():
tpl = tpl.replace(f'__{k}__', str(v))
return tpl
def get_build_info(url):
"""Return build status.
The json returned will be structured like this:
{'count': 1,
'href': '/httpAuth/app/rest/builds/?locator=buildType:Tst_TstRunning,count: 1,number: 22',
'nextHref': '/httpAuth/app/rest/builds/?locator=buildType:Tst_TstRunning,count: 1,number: 22,start: 1',
'build': [
{'id': 533691,
'buildTypeId': 'Tst_TstRunning',
'number': '22',
'status': 'SUCCESS',
'state': 'finished',
'href': '/httpAuth/app/rest/builds/id: 533691',
'webUrl': 'https://teamcity.corp.local/viewLog.html?buildId=533691&buildTypeId=Tst_TstRunning',
'finishOnAgentDate': '20221219T170121+0000'}
]
}
"""
# Ignore SSL errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# create an authorization handler
p = urllib.request.HTTPPasswordMgrWithDefaultRealm()
p.add_password(None, ARGS.api_url, ARGS.api_username, ARGS.api_password)
auth_handler = urllib.request.HTTPBasicAuthHandler(p)
opener = urllib.request.build_opener(auth_handler, urllib.request.HTTPSHandler(context=ctx))
urllib.request.install_opener(opener)
req = urllib.request.Request(url)
req.add_header('Accept', 'Application/json')
logging.info(f'processing build matching request: "{url}"')
timer = time.perf_counter()
while True:
status = get_status(req)
if status['build'][0]['state'] == 'running':
logging.info(f"build {status['build'][0]['buildTypeId']}:{status['build'][0]['number']} is still running. waiting till it finish...")
if time.perf_counter()-timer > ARGS.max_wait_seconds:
msg = expand_template(ARGS.timeout_build_message, status['build'][0])
logging.warning(f"{msg}"
f"##teamcity[buildStatus text='{msg}']"
f"##teamcity[buildStop comment='{msg}' readdToQueue='false']")
else:
return status['build'][0]
time.sleep(RETRY_SLEEP_TIME_SECONDS)
if __name__ == "__main__":
# set loglevel to INFO. (by default its warning) and simplify format
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='[%(levelname)s] %(message)s')
ARGS = parse_args()
build = get_build_info(f'{ARGS.api_url}'
f'/httpAuth/app/rest/builds/?locator='
f'{ARGS.build_locator}')
logging.info(f"setting REMOTE_BUILD_NUMBER to {build['number']}"
f"##teamcity[setParameter name='REMOTE_BUILD_NUMBER' value='{build['number']}']")
if ARGS.update_build_number:
logging.info(f"setting my build number to {build['number']}"
f"##teamcity[buildNumber '{build['number']}']")
if build['status'] == 'SUCCESS':
logging.info('remote build status: SUCCESS')
elif build['status'] == 'UNKNOWN':
msg = expand_template(ARGS.cancelled_build_message, build)
logging.warning(f"{msg}"
f"##teamcity[buildStatus text='{msg}']"
f"##teamcity[buildStop comment='{msg}' readdToQueue='false']")
else:
msg = expand_template(ARGS.failed_build_message, build)
logging.error(f'{msg}'
f"##teamcity[buildStatus text='{msg}']")
sys.exit(1)
|
from django.db import models
# Create your models here.
class Book(models.Model):
nome = models.CharField(max_length=50,unique=True)
descricao = models.CharField(max_length=100)
nota = models.IntegerField()
def __str__(self):
return self.nome
|
import logging
from flask import Flask
from app import factory
def add_logging_to_app(app):
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
app.logger.addHandler(handler)
return app
def create_app(*args, **kwargs):
app = factory.create_app(*args, **kwargs)
return add_logging_to_app(app)
def create_empty_flask_app(name='app'):
return add_logging_to_app(Flask(name))
# Taken from nose:
#
# https://github.com/nose-devs/nose/blob/master/nose/tools/trivial.py
def eq_(a, b, msg=None):
"""Shorthand for 'assert a == b, "%r != %r" % (a, b)
"""
if not a == b:
raise AssertionError(msg or "%r != %r" % (a, b))
|
#!/usr/bin/python
from flask import Flask
from flask_restful import Resource, Api, fields, marshal_with, reqparse
import hue
app = Flask(__name__)
api = Api(app)
parser = reqparse.RequestParser()
parser.add_argument('on', type=bool)
parser.add_argument('saturation', type=int)
parser.add_argument('value', type=int)
parser.add_argument('hue', type=int)
light = hue.get_corner_light()
resource_fields = {
'name': fields.String,
'on': fields.Boolean,
'saturation': fields.Integer,
'value': fields.Integer(attribute='brightness'),
'hue': fields.Integer
}
class Light(Resource):
@marshal_with(resource_fields)
def get(self):
return light
@marshal_with(resource_fields)
def post(self):
args = parser.parse_args()
if(args['on'] != None):
light.on = args['on']
if(args['saturation'] != None):
light.saturation = args['saturation']
if(args['value'] != None):
light.brightness = args['value']
if(args['hue'] != None):
light.hue = args['hue']
return light
api.add_resource(Light, '/api/')
if __name__ == '__main__':
app.run(debug=True) |
from collections import namedtuple, defaultdict
Grade = namedtuple('Grade', ('score', 'weight'))
class Subject:
def __init__(self):
self._grades = []
def report_grade(self, score, weight):
self._grades.append(Grade(score, weight))
def average_grade(self):
total, total_weight = 0, 0
for grade in self._grades:
total += grade.score * grade.weight
total_weight += grade.weight
return total / total_weight
class Student:
def __init__(self):
self._subjects = defaultdict(Subject)
def get_subject(self, name):
return self._subjects[name]
def average_grade(self):
total, count = 0, 0
for subject in self._subjects.values():
total += subject.average_grade()
count += 1
return total / count
class Gradebook:
def __init__(self):
self._students = defaultdict(Student)
def get_student(self, name):
return self._students[name]
if __name__ == '__main__':
gradebook = Gradebook()
john = gradebook.get_student('John')
math = john.get_subject('Math')
math.report_grade(90, 0.7)
math.report_grade(70, 0.3)
gym = john.get_subject('Gym')
gym.report_grade(95, 0.4)
gym.report_grade(60, 0.6)
print(john.average_grade()) |
from torch.utils.data import Dataset
from skimage import io
from utils import read_data
import torch
import torch.nn as nn
import torch.nn.functional as F
class IDCardsDataset(Dataset):
"""
Dataset of ID card images.
"""
def __init__(self, dataset_path, transform=None):
self.data = read_data(dataset_path)
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
temp = self.data[idx]
image = io.imread(temp['image_path'])
sample = {'image': image, 'keypoints': temp['keypoints'], 'label': temp['label']}
if self.transform:
sample = self.transform(sample)
return sample
class ToTensor(object):
"""
Class that defines a method for images of ID cards for converting them into Tensors.
"""
def __call__(self, sample):
image, keypoints = sample['image'], sample['keypoints']
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image)
return {'image': image,
'keypoints': torch.FloatTensor(keypoints),
'label': torch.FloatTensor(sample['label'])}
class Net(nn.Module):
"""
CNN with ResNet152 backbone for finding keypoints of an ID card.
"""
def __init__(self, resnet):
super(Net, self).__init__()
self.resnet = resnet
self.fc1 = nn.Linear(100352, 120)
self.fc2 = nn.Linear(120, 84)
self.fc_cls = nn.Linear(84, 1)
self.fc_reg = nn.Linear(84, 8)
def forward(self, x):
x = self.resnet(x)
x = x.view(-1, self.num_flat_features(x))
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x_cls = self.fc_cls(x)
x_cls = torch.sigmoid(x_cls)
x_reg = self.fc_reg(x)
return x_cls, x_reg
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
class Identity(nn.Module):
"""
Identity module for changing the output of a ResNet152 backbone.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x):
return x
# if __name__ == "__main__":
# Step 1. Loading the data and transforming it into tensor
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#
# train_dataset = IDCardsDataset("Resized_Dataset", transform=ToTensor())
# test_dataset = IDCardsDataset("Test_Dataset", transform=ToTensor())
#
# trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=8, shuffle=True)
# validloader = torch.utils.data.DataLoader(test_dataset, batch_size=4)
#
# print("Size of training set:", len(train_dataset))
# print("Size of testing set:", len(test_dataset))
# Step 2. Plotting a sample from the dataset
# image = cv2.imread("Resized_Dataset/Ablan Abkenov/1.jpg")
# with open("Resized_Dataset/Ablan Abkenov/Labels/1.json", 'r') as json_file:
# json_data = json.load(json_file)
# for p in json_data["shapes"]:
# label = p["points"]
#
# show_image(image, label)
# print("STEP 2 - SUCCESS! Sample image was plotted!")
# Step 3. Preparing CNN, criterion and optimizer for training
# resnet = torchvision.models.resnet152(pretrained=True, progress=True).cuda()
# resnet.avgpool = Identity()
# resnet.fc = Identity()
#
# net = Net(resnet)
# net.to(device)
# net.load_state_dict(torch.load("IDregression.pt"))
#
# criterion_cls = nn.BCELoss()
# criterion_reg = nn.MSELoss()
# optimizer = torch.optim.Adam(net.parameters(), lr=0.0001)
# Step 4. Training loop
# model_check(net)
#
# with open("Check/11.json", 'r') as json_file:
# json_data = json.load(json_file)
# for p in json_data["shapes"]:
# label = p["points"]
#
# reg_out = np.concatenate(reg_out)
# keypoints = keypoints_to_coordinates(reg_out)
# label = np.asarray(label)
#
# image_copy = bird_view(image_copy, label, width=w)
#
# config = "-l rus --oem 1 --psm 7"
# print(image_to_string(image_copy, config=config))
#
# show_image(image_copy, label)
# # newline(p1, p2, color='red')
# # newline(p2, p3, color='red')
# # newline(p3, p4, color='red')
# # newline(p4, p1, color='red')
# # plt.show()
# net.eval()
# with torch.no_grad():
#
# valid_loss = 0.0
# accuracy = 0
#
# for i, data in enumerate(validloader):
# inputs, keypoints, labels = data['image'].to(device, dtype=torch.float), data['keypoints'].to(device), \
# data['label'].to(device)
# cls_out, reg_out = net(inputs)
# loss_reg = criterion_reg(reg_out, keypoints)
# valid_loss += loss_reg
# plt.figure()
# plt.plot(1000, 250)
# cls_out = cls_out >= 0.5
# labels = labels.type(torch.ByteTensor)
# labels = labels.to(device)
# show_landmarks_batch(data, reg_out)
# plt.axis('off')
# plt.ioff()
# plt.show()
# for i in range(len(cls_out)):
# if cls_out[i] == labels[i]:
# accuracy += 1
# print('-' * 250)
# print('Accuracy: %.3f%%' % (accuracy / len(test_dataset) * 100))
# print("Finished.")
# min_valid = 0.228
#
# for epoch in range(50):
#
# net.train()
# running_loss = 0.0
# valid_loss = 0.0
#
# print("Epoch: {}".format(epoch + 1))
#
# for i, data in enumerate(trainloader):
#
# inputs, keypoints, labels = data['image'].to(device, dtype=torch.float), data['keypoints'].to(device), data[
# 'label'].to(device)
# optimizer.zero_grad()
#
# ind = labels.squeeze(-1).type(torch.ByteTensor)
#
# cls_out, reg_out = net(inputs)
#
# loss_cls = criterion_cls(cls_out, labels)
# loss_reg = criterion_reg(reg_out[ind], keypoints[ind])
# total_loss = 0.7 * loss_cls + 0.3 * loss_reg
# total_loss.backward()
# optimizer.step()
# running_loss += total_loss.item()
#
# if i % 100 == 99:
# print('Training - [%d, %5d] loss: %.3f' %
# (epoch + 1, i + 1, running_loss / 100))
# running_loss = 0.0
#
# # Validation
# net.eval()
# with torch.no_grad():
#
# for i, data in enumerate(validloader):
# inputs, keypoints, labels = data['image'].to(device, dtype=torch.float), data['keypoints'].to(device), \
# data['label'].to(device)
#
# ind = labels.squeeze(-1).type(torch.ByteTensor)
#
# cls_out, reg_out = net(inputs)
#
# loss_cls = criterion_cls(cls_out, labels)
# loss_reg = criterion_reg(reg_out[ind], keypoints[ind])
# total_loss = 0.5 * loss_cls + 0.5 * loss_reg
# valid_loss += total_loss
#
# print('Validation - [%d, %5d] loss: %.3f' %
# (epoch + 1, i + 1, valid_loss / len(test_dataset)))
# if min_valid > (valid_loss / len(test_dataset)):
# min_valid = valid_loss / len(test_dataset)
# torch.save(net.state_dict(), 'model_regcls.pt')
#
# print(min_valid)
#
# valid_loss = 0.0
#
# # plt.figure()
# # plt.plot(1000, 250)
# # show_landmarks_batch(data, reg_out)
# # plt.axis('off')
# # plt.ioff()
# # plt.show()
#
# print('-' * 250)
#
# print('Finished Training')
|
import pyautogui
import time
from PIL import ImageGrab,ImageOps
from numpy import *
class cordinates():
replay=(960,450)
dino=(663,464)
tree1=(708+26,458)
tree2=(741+26,498)
spbreak1=(1000,470)
spbreak2=(1100,470)
def restartgame():
pyautogui.click(cordinates.replay)
restartgame()
def space():
pyautogui.keyDown('space')
time.sleep(0.05)
print("jump")
pyautogui.keyUp('space')
restartgame()
#space()
def imagegrab(speed):
box=(cordinates.tree1[0]+speed,cordinates.tree1[1],cordinates.tree2[0]+speed,cordinates.tree2[1])
image=ImageGrab.grab(box)
grayImage=ImageOps.grayscale(image)
a=array(grayImage.getcolors())
print(a.sum())
return a.sum()
i=0
rate=6
while True:
i+=0.4
if (imagegrab(i)!=1567):
space()
if pyautogui.locateOnScreen("restart.png",region=(920,430,1000,490)):
print("generation terminated")
break
|
import numpy as np
import utils
from pydrake.all import MathematicalProgram, Solve, Variables
from pydrake.symbolic import Polynomial
from pydrake.common.containers import EqualToDict
# the parameters just have to be two arbitrary functions
# not necessarily in the nocontact/leftcart contact modes
def fuse_functions(V_no_contact, V_left_cart, deg_V=2):
prog = MathematicalProgram()
s = prog.NewIndeterminates(1, "s")[0]
c = prog.NewIndeterminates(1, "c")[0]
thetadot = prog.NewIndeterminates(1, "thetadot")[0]
x_cart = prog.NewIndeterminates(1, "x_cart")[0]
xdot_cart = prog.NewIndeterminates(1, "xdot_cart")[0]
z = prog.NewIndeterminates(1, "z")[0]
x = np.array([x_cart, xdot_cart, s, c, thetadot, z])
V_no_contact_new = prog.NewFreePolynomial(Variables(x), deg_V)
V_left_cart_new = prog.NewFreePolynomial(Variables(x), deg_V)
left_cart_new_partial_int = utils.integrate_c2g(V_left_cart_new, x_cart_min=-1.6,
x_cart_max=-1.4, variables=x)
no_contact_new_partial_int = utils.integrate_c2g(V_no_contact_new, x_cart_min=-1.6, \
x_cart_max=-1.4, variables=x)
old_f1_partial_int = utils.integrate_c2g(V_no_contact, x_cart_min=-1.6, x_cart_max=-1.4)
old_f2_partial_int = utils.integrate_c2g(V_left_cart, x_cart_min=-1.6, x_cart_max=-1.4)
loss_term1 = (left_cart_new_partial_int - old_f1_partial_int)**2
loss_term2 = (no_contact_new_partial_int - old_f2_partial_int)**2
change_left_cart_no_contact = no_contact_new_partial_int - left_cart_new_partial_int
no_contact_new_monom_map = V_no_contact_new.monomial_to_coefficient_map()
no_contact_monom_map = V_no_contact.monomial_to_coefficient_map()
left_cart_new_monom_map = V_left_cart_new.monomial_to_coefficient_map()
left_cart_monom_map = V_left_cart.monomial_to_coefficient_map()
#prog.AddCost((change_left_cart_no_contact**2).ToExpression())
prog.AddCost(loss_term1.ToExpression())
prog.AddCost(loss_term2.ToExpression())
prog.AddBoundingBoxConstraint(-50, 50, np.array([prog.decision_variables()]))
add_coefficient_constraints(prog, V_no_contact, V_no_contact_new)
add_coefficient_constraints(prog, V_left_cart, V_left_cart_new)
print("solving fusion(ish)")
result = Solve(prog)
V_left_cart_new = Polynomial(result.GetSolution(V_left_cart_new.ToExpression()))
V_no_contact_new = Polynomial(result.GetSolution(V_no_contact_new.ToExpression()))
print_diagnostic_info(V_no_contact, V_left_cart, V_no_contact_new, V_left_cart_new)
return V_no_contact_new, V_left_cart_new
# EqualToDict isn't really cooperating
def compare_monomial_powers(m1, m2):
m1_power_map = list(m1.items())
m2_power_map = list(m2.items())
m1_power_map = [(k.get_name(), v) for k, v in m1_power_map]
m2_power_map = [(k.get_name(), v) for k, v in m2_power_map]
return set(m1_power_map) == set(m2_power_map)
"""
Monomials don't match just by having the same name, Maybe the more Drake friendly
way to do this would be to mess with variable IDs.... For now this does finds
a monomial in a polynomial by comparing string names
"""
def find_matching_monomial(monomial, polynomial):
given_monomial_vars = list(monomial.GetVariables())
given_monomial_varnames = [x.get_name() for x in given_monomial_vars]
given_monomial_powers = EqualToDict(monomial.get_powers())
poly_map = polynomial.monomial_to_coefficient_map()
for curr_monom in poly_map.keys():
curr_monom_vars = list(monomial.GetVariables())
curr_monom_varnames = [x.get_name() for x in curr_monom_vars]
curr_monom_powers = curr_monom.get_powers()
if set(curr_monom_varnames) != set(given_monomial_varnames):
continue
equalmonoms = compare_monomial_powers(curr_monom_powers, given_monomial_powers)
if equalmonoms:
return curr_monom
def add_coefficient_constraints(prog, f1, f2):
f1_coeff_map = f1.monomial_to_coefficient_map()
f2_coeff_map = f2.monomial_to_coefficient_map()
for f1_monom in f1_coeff_map.keys():
f2_monom = find_matching_monomial(f1_monom, f2)
f1_coeff = f1_coeff_map[f1_monom]
f2_coeff = f2_coeff_map[f2_monom]
prog.AddConstraint((f1_coeff - f2_coeff)**2 <= 1e-7) #1e-20
def print_diagnostic_info(old_f1, old_f2, new_f1, new_f2):
old_f1_partial_int = utils.integrate_c2g(old_f1, x_cart_min=-1.6, x_cart_max=-1.4)
old_f2_partial_int = utils.integrate_c2g(old_f2, x_cart_min=-1.6, x_cart_max=-1.4)
print("difference in olds: ", old_f1_partial_int - old_f2_partial_int)
new_f1_partial_int = utils.integrate_c2g(new_f1, x_cart_min=-1.6, x_cart_max=-1.4)
new_f2_partial_int = utils.integrate_c2g(new_f2, x_cart_min=-1.6, x_cart_max=-1.4)
print("difference in news: ", new_f1_partial_int - new_f2_partial_int)
old_f1_full_int = utils.integrate_c2g(old_f1)
new_f1_full_int = utils.integrate_c2g(new_f1)
print("difference in f1s: ", old_f1_full_int - new_f1_full_int)
old_f2_full_int = utils.integrate_c2g(old_f2)
new_f2_full_int = utils.integrate_c2g(new_f2)
print("difference in f2s: ", old_f2_full_int - new_f2_full_int)
|
from sklearn.externals import joblib
from sklearn.datasets import fetch_20newsgroups
import pprint
categories = [
'alt.atheism',
'talk.religion.misc'
]
print type(categories)
data = fetch_20newsgroups(subset='test',categories = categories,remove=('headers','footers','quotes'))
def main():
datatemp = [data['data'][i] for i in range(len(data['data'])) if data['target_names'][data['target'][i]] in categories]
targettemp = [data['target'][i] for i in range(len(data['data'])) if data['target_names'][data['target'][i]] in categories]
data['data'] = datatemp
data['target'] = targettemp
print (data['target'])
grid_search = joblib.load("gridsearchdump.pkl")
pprint.pprint((data['data'][0]))
#print dir(grid_search)
searchoutput = grid_search.best_estimator_.predict(data['data'])
testset = ['I love jesus, jesus jesus jesus, anyone know where I can find a local church','These creationists are taking over the public schools']
testoutput = grid_search.best_estimator_.predict(testset)
print testoutput
if __name__ == "__main__":
main() |
"""
PCPP-32-101 1.3 Understand and use the concepts of inheritance,
polymorphism, and composition
- class hierarchies
- single vs. multiple inheritance
- Method Resolution Order (MRO)
- duck typing
"""
class A:
# noinspection PyMethodMayBeStatic
def method(self) -> None:
print("A.method() called")
class B:
# noinspection PyMethodMayBeStatic
def method(self) -> None:
print("B.method() called")
class C(A, B):
pass
class D(C, B):
pass
d = D()
d.method()
|
import numpy
x = int(input())
A=[]
B=[]
for i in range(x):
A.append(list(map(int,input().split())))
for i in range(x):
B.append(list(map(int,input().split())))
print (numpy.dot(numpy.array(A), numpy.array(B)))
|
# Define a function reverse() that computes the reversal of a string.
# For example, reverse("I am
# testing") should return the string "gnitset ma I".
def string_reverse(string1):
reverse_str = string1[::-1]
return reverse_str
print(string_reverse("I am testing"))
def reverse_str1(string2):
reverse_str2 = ""
string_len = int(len(string2))
for i in range(0, string_len):
reverse_str2 = reverse_str2 + string2[-i - 1]
print(reverse_str2)
reverse_str1("I am testing")
def reversedStr(string3):
rev_str = ''
for s in string3:
rev_str = s + rev_str
print(rev_str)
reversedStr("I am reverse")
def reversed_j(s):
s1 = ''.join(reversed(s))
return s1
print(reversed_j("I am testing"))
def rev_str(s):
temp_list = list(s)
temp_list.reverse()
return ''.join(temp_list)
print(rev_str("I am testing"))
|
#Project Euler Problem 44
# What is the smallest pair of pentagonal numbers whos sum and difference is also pentagonal
Range=10000
PentagonalNum=[]
PentDiff=[]
#Make a bunch of pentagonal numbers and store them in an array
for i in range(1,Range):
PentagonalNum.append(int(i*(3*i-1)/2))
for i in PentagonalNum:
for j in PentagonalNum:
if i+j in PentagonalNum and j-i in PentagonalNum:
PentDiff.append(i-j) #Save the differences of pentagonal numbers
print(i,j,j-i)
print(min(PentDiff))# Print the minimized difference of pentagonal numbers
#Seems to run forever but produces correct numbers at some point
|
from django_includes._version import __version__
__all__ = ["__version__"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.