text stringlengths 8 6.05M |
|---|
from .f_wrapper import Syn_FWrapper
from .f_channel import Syn_Channel
#import Syn_FWrapper
#from f_wrapper import Syn_FWrapper
#from f_channel import Syn_Channel
#from prot_bracha import Syn_Bracha_Protocol
#import broken_prot_bracha
##from f_bracha import Syn_Bracha_Functionality, RBC_Simulator
#import f_bracha
|
class Solution:
def largestNumber(self, nums: List[int]) -> str:
def mycmp(a, b):
if a + b < b + a:
return 1
elif a + b > b + a:
return -1
else:
return 0
nums = list(map(str, nums))
nums.sort(key = functools.cmp_to_key(mycmp))
s = ''.join(nums)
return s |
#
# Assignment 2
#
# Student Name : Aausuman Deep
# Student Number : 119220605
#
# Assignment Creation Date : February 10, 2020
import re
def moolah(s):
# returns a list of every Euro amount (as a string) that is mentioned in the input
amounts = []
regexp = re.compile(r'EUR\s?\d+(\.\d+)?')
for match in regexp.finditer(s):
amounts.append(match.group())
return amounts
def bleep(s):
# returns a modified copy of the input in which all four-letter words are replaced by ****
regexp = re.compile(r'(^|\b)\S{4}(\b|$)')
s = regexp.sub("****", s)
return s
def to_english(s):
# returns a modified copy of the input in which all numbers appearing in the original are spelled out in English
units = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
regexp = re.compile(r'\d+')
numbers = []
for match in re.finditer(regexp, s):
number = match.group()
if number not in numbers:
numbers.append(number)
number_in_text = " ("
for i in range(len(number)):
index = int(number[i])
number_in_text += units[index]
number_in_text += " "
number_in_text = number_in_text.rstrip() + ")"
s = re.sub(r'\b{}\b'.format(number), number + number_in_text, s)
return s
def harvest_emails(s):
# returns a list of the email addresses that occur within the input
potential_emails = re.findall(r'\S+@\S+', s)
emails = []
for potential_email in potential_emails:
parts = potential_email.split("@", 1)
local_part = parts[0]
domain = parts[1]
if re.match(r'^[A-Za-z0-9_.]+$', local_part) and re.match(r'^[A-Za-z0-9-.]+$', domain):
local_part_flag = 1
domain_flag = 1
if local_part[0] != '.' and local_part[len(local_part)-1] != '.':
for i in range(1, len(local_part)):
if local_part[i] == '.':
if local_part[i-1] == '.':
local_part_flag = 0
else:
local_part_flag = 0
if '.' not in domain:
domain_flag = 0
domain_labels = domain.split(".")
if domain[0] != '.' and domain[len(domain) - 1] != '.':
for label in domain_labels:
if label[0] == '-' or label[len(label)-1] == '-':
domain_flag = 0
else:
domain_flag = 0
if local_part_flag == 1 and domain_flag == 1:
emails.append(potential_email)
emails = sorted(emails, key=lambda x: (x.rsplit('@', 1)[::-1], x))
return emails
|
import math
import re
import os
import time
from collections import Counter
from collections import defaultdict
from definitions import NOMENCLATURES_DIR, TFIDF_PROFESSIONS_DIR
def get_lower_words(text):
return re.findall(r"\b[a-z]+\b", text)
def tf(word, profession, words_dict, doc_words_count):
return words_dict[word][profession] / doc_words_count
def idf(word, total_count, words_dict):
return math.log(total_count / (1 + len(words_dict[word])))
def tfidf(word, profession, total_count, words_dict, doc_words_count):
return tf(word, profession, words_dict, doc_words_count) * idf(word, total_count, words_dict)
def get_professions_words_list():
with open(os.path.join(NOMENCLATURES_DIR, 'professions.txt'), encoding='utf8', mode='r') as f:
for line in f:
profession = line.rstrip()
with open(os.path.join(TFIDF_PROFESSIONS_DIR, profession + '.txt'), encoding='utf8', mode='r') as prof_file:
yield profession, get_lower_words(prof_file.read())
def get_professions_list():
return [line.rstrip() for line in open(os.path.join(NOMENCLATURES_DIR, 'professions.txt'), encoding='utf8', mode='r')]
def init_words_dict():
result = defaultdict(dict)
index = 1
for profession, doc_words in get_professions_words_list():
document_dict = Counter(doc_words)
for word in document_dict:
if len(result) % 20000 == 0:
print(index, "document:", len(result), "words", time.strftime("%H:%M:%S"))
result[word][profession] = document_dict[word]
index += 1
return result
def main():
professions = get_professions_list()
words_dict = init_words_dict()
print("{")
for profession, doc_words in get_professions_words_list():
scores = {word: tfidf(word, profession, len(professions), words_dict, len(doc_words)) for word in doc_words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)[:20]
print("# {0}".format(time.strftime("%H:%M:%S")))
print("'{0}': {{".format(profession))
for word, score in sorted_words:
print("\t'{0}': {1},".format(word, round(score, 8)))
print('},')
print("}")
if __name__ == '__main__':
main() |
#!/usr/bin/env python3
from hw8_1 import getDict
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
import string
import os
from matplotlib.pyplot import figure
#creates a mega set of all ngrams and sorts them alphabetically
def hugeDict(file1, file2, file3, file4, file5, file6) :
dict1 = getDict(file1)
dict2 = getDict(file2)
dict3 = getDict(file3)
dict4 = getDict(file4)
dict5 = getDict(file5)
dict6 = getDict(file6)
bigSet = set()
bigSet.update(dict1.keys())
bigSet.update(dict2.keys())
bigSet.update(dict3.keys())
bigSet.update(dict4.keys())
bigSet.update(dict5.keys())
bigSet.update(dict6.keys())
return bigSet
#makes a list of length alphabetSet. Iterates through alphabetSet. For each element, counts how
#many times it shows up in filename. Send the full list and filename to plotHisto to make six plots.
def countMaker(alphabetSet, filename) :
countList = [0] * len(alphabetSet)
diction = getDict(filename)
dictionSet = set()
dictionSet.update(diction.keys())
count = 0
for item in alphabetSet :
for entry in dictionSet :
if entry == item :
countList[count] += 1
count += 1
plotHisto(countList, filename)
#plots histogram
def plotHisto(bars, filename, minrange = 0.0, maxrange = 100.0, plotinline = False) :
figure(num=None, figsize=(8, 6), dpi=400, facecolor='w', edgecolor='k')
mrange = maxrange - minrange
binsize = mrange/len(bars)
labels = [(mrange / len(bars)) * i + minrange for i in range(len(bars))]
plt.bar(labels, bars, align = 'edge', width = binsize)
newFile = os.path.splitext(filename)[0]
if plotinline :
pltshow()
else :
plt.savefig(newFile)
#plt.show()
plt.clf()
#gets text
def getText(filename) :
with open(filename) as f :
read_data = f.read()
return read_data.splitlines()
#ngram preprocessing
#adds padding and makes text lowercase
def getNgrams(line) :
var = "__"
line = var + line + var
line = line.lower()
ngrams = []
for i in range(len(line)-4) :
ngrams.append(line[i:i+3])
return ngrams
#creates dictionaries of ngrams
def getDict(filename) :
stringList = getText(filename)
ngram = []
for i in stringList :
ngram.extend(getNgrams(i))
diction = {}
diction = dict(Counter(ngram))
return diction
#controls the sending of the 6 text files
#this part can be replaces with any text files; it will work with text of any language
def main() :
alphabetSet = hugeDict('ngrams/english.txt', 'ngrams/spanish.txt', 'ngrams/italian.txt', 'ngrams/french.txt', 'ngrams/german.txt', 'ngrams/portuguese.txt')
test = countMaker(alphabetSet, 'ngrams/english.txt')
countMaker(alphabetSet, 'ngrams/spanish.txt')
countMaker(alphabetSet, 'ngrams/italian.txt')
countMaker(alphabetSet, 'ngrams/french.txt')
countMaker(alphabetSet, 'ngrams/german.txt')
countMaker(alphabetSet, 'ngrams/portuguese.txt')
countMaker(alphabetSet, 'ngrams/mystery.txt')
if __name__ == '__main__' :
main()
|
""""
"""
pin = 1234
attmpt=3
x = 1
while x<=3:
inp=int(input("Enter PIN"))
if inp!=pin:
attmpt=attmpt-1
if attmpt ==0:
print("Blocked")
else:
print("Wrong! You have {}".format(attmpt))
x+=1
else:
print("Sucess")
break
# z=list(range(1,21))
# print(z)
x= list(range(1,100))
evenno=[]
oddno=[]
print(x)
for i in x:
if i %2 == 0 or i%3==0:
evenno.append(i)
else:
oddno.append(i)
print(evenno)
print(oddno)
|
import json, re
import urllib.request
from hendlers import cred_handler, help_hendler, srv_hendler
from chatterbot.conversation import Statement
from chatterbot.logic import LogicAdapter
class CallFunctionAdapter(LogicAdapter):
def __init__(self, chatbot, **kwargs):
super().__init__(chatbot, **kwargs)
self.known_commands = {
'АНЕКДОТ': self.get_funny_story,
'ТОСТ': self.get_tost,
'СПРАВКА': self.get_help,
'РЕГИСТРАЦИЯ': self.process_cred,
'ЗАПУЩЕННЫЕ ПРОЦЕССЫ СЕРВЕРА': self.process_srv,
'СОСТОЯНИЕ РЕСУРСОВ СЕРВЕРА': self.process_srv,
'ПЕРЕЗАГРУЗИ СЕРВЕР': self.process_srv
}
@staticmethod
def process_srv(statement):
return srv_hendler.process_host_info(statement)
@staticmethod
def process_cred(statement):
return cred_handler.process_cred(statement)
@staticmethod
def get_help(statement):
return help_hendler.get_help(statement)
@staticmethod
def get_funny_story(statement):
with urllib.request.urlopen("http://rzhunemogu.ru/RandJSON.aspx?CType=1") as url:
data = json.loads(url.read().decode('cp1251'), strict=False)
print(data)
return data.get('content', 'Не прошло (')
@staticmethod
def get_tost(statement):
with urllib.request.urlopen("http://rzhunemogu.ru/RandJSON.aspx?CType=6") as url:
data = json.loads(url.read().decode('cp1251'), strict=False)
print(data)
return data.get('content', 'Не прошло (')
def can_process(self, statement):
return any(com in str(statement).upper() for com in self.known_commands)
def process(self, statement, additional_response_selection_parameters=None):
response = Statement('Что-то не выходит!')
response.confidence = 0.1
statement_text = str(statement).upper()
try:
for com in self.known_commands:
if com in statement_text:
response.text = self.known_commands[str(com)](statement)
response.confidence = 1.0
return response
except Exception as e:
print(e)
return response
|
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019 - 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# Third party import
import logging
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.autograd import Variable
from pynet.utils import Losses
# Global parameters
logger = logging.getLogger("pynet")
def dice_loss_1(logits, true, eps=1e-7):
""" Computes the Sørensen–Dice loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
true: a tensor of shape [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
dice_loss: the Sørensen–Dice loss.
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
dice_loss = (2. * intersection / (cardinality + eps)).mean()
return (1 - dice_loss)
def dice_loss_2(output, target, weights=1):
"""
output : NxCxHxW Variable
target : NxHxW LongTensor
weights : C FloatTensor
"""
output = func.softmax(output, dim=1)
target = torch.argmax(target, dim=1).type(torch.LongTensor)
encoded_target = output.data.clone().zero_()
encoded_target.scatter_(1, target.unsqueeze(1), 1)
encoded_target = Variable(encoded_target)
assert output.size() == encoded_target.size(), "Input sizes must be equal."
assert output.dim() == 4, "Input must be a 4D Tensor."
num = (output * encoded_target).sum(dim=3).sum(dim=2)
den1 = output.pow(2).sum(dim=3).sum(dim=2)
den2 = encoded_target.pow(2).sum(dim=3).sum(dim=2)
dice = (2 * num / (den1 + den2)) * weights
return dice.sum() / dice.size(0)
@Losses.register
class MultiDiceLoss(object):
""" Define a multy classes dice loss.
Note that PyTorch optimizers minimize a loss. In this case, we would like
to maximize the dice loss so we return the negated dice loss.
"""
def __init__(self, weight=None, ignore_index=None, nb_batch=None):
""" Class instanciation.
Parameters
----------
weight: FloatTensor (C), default None
a manual rescaling weight given to each class.
ignore_index: int, default None
specifies a target value that is ignored and does not contribute
to the input gradient.
nb_batch: int, default None
the number of mini batch to rescale loss between 0 and 1.
"""
self.weight = weight or 1
self.ignore_index = ignore_index
self.nb_batch = nb_batch or 1
def __call__(self, output, target):
""" Compute the loss.
Note that this criterion is performing nn.Softmax() on the model
outputs.
Parameters
----------
output: Variable (NxCxHxW)
unnormalized scores for each class (the model output) where C is
the number of classes.
target: LongTensor (NxCxHxW)
the class indices.
"""
eps = 1 # 0.0001
n_classes = output.size(1) * self.nb_batch
output = func.softmax(output, dim=1)
target = torch.argmax(target, dim=1).type(torch.LongTensor)
# output = output.exp()
encoded_target = output.detach() * 0
if self.ignore_index is not None:
mask = target == self.ignore_index
target = target.clone()
target[mask] = 0
encoded_target.scatter_(1, target.unsqueeze(1), 1)
mask = mask.unsqueeze(1).expand_as(encoded_target)
encoded_target[mask] = 0
else:
encoded_target.scatter_(1, target.unsqueeze(1), 1)
intersection = output * encoded_target
numerator = 2 * intersection.sum(0).sum(1).sum(1) + eps
denominator = output + encoded_target
if self.ignore_index is not None:
denominator[mask] = 0
denominator = denominator.sum(0).sum(1).sum(1) + eps
loss_per_channel = self.weight * (1 - (numerator / denominator))
logger.info(loss_per_channel)
return loss_per_channel.sum() / n_classes
@Losses.register
class SoftDiceLoss(object):
""" Soft Dice Loss.
"""
def __init__(self, *args, **kwargs):
super(SoftDiceLoss, self).__init__()
def __call__(self, y_pred, y_true, eps=1e-8):
intersection = torch.sum(torch.mul(y_pred, y_true))
union = (torch.sum(torch.mul(y_pred, y_pred)) +
torch.sum(torch.mul(y_true, y_true)) + eps)
dice = 2 * intersection / union
dice_loss = 1 - dice
return dice_loss
@Losses.register
class CustomKLLoss(object):
""" KL Loss.
"""
def __init__(self, *args, **kwargs):
super(CustomKLLoss, self).__init__()
def __call__(self, mean, std):
return (torch.mean(torch.mul(mean, mean)) +
torch.mean(torch.mul(std, std)) -
torch.mean(torch.log(torch.mul(std, std))) - 1)
@Losses.register
class NvNetCombinedLoss(object):
""" Combined Loss.
Diceloss + k1 * L2loss + k2 * KLloss
Since the output of the segmentation decoder has N channels (prediction
for each tumor subregion), we simply add the N dice loss functions.
A hyper-parameter weight of k1=0.1, k2=0.1 was found empirically in the
paper.
"""
def __init__(self, num_classes, k1=0.1, k2=0.1):
super(NvNetCombinedLoss, self).__init__()
self.num_classes = num_classes
self.k1 = k1
self.k2 = k2
self.dice_loss = SoftDiceLoss()
self.l2_loss = nn.MSELoss()
self.kl_loss = CustomKLLoss()
def __call__(self, outputs, y_true):
y_pred, y_mid = outputs
est_mean, est_std = (y_mid[:, :128], y_mid[:, 128:])
seg_pred = y_pred[:, :self.num_classes]
seg_truth = y_true[:, :self.num_classes]
vae_pred = y_pred[:, self.num_classes:]
vae_truth = y_true[:, self.num_classes:]
dice_loss = None
for idx in range(self.num_classes):
if dice_loss is None:
dice_loss = self.dice_loss(
seg_pred[:, idx], seg_truth[:, idx])
else:
dice_loss += self.dice_loss(
seg_pred[:, idx], seg_truth[:, idx])
l2_loss = self.l2_loss(vae_pred, vae_truth)
kl_div = self.kl_loss(est_mean, est_std)
combined_loss = dice_loss + self.k1 * l2_loss + self.k2 * kl_div
logger.debug(
"dice_loss:%.4f, L2_loss:%.4f, KL_div:%.4f, combined_loss:"
"%.4f" % (dice_loss, l2_loss, kl_div, combined_loss))
return combined_loss
@Losses.register
class MSELoss(object):
""" Calculate the Mean Square Error loss between I and J.
"""
def __init__(self, concat=False):
""" Init class.
Parameters
----------
concat: bool, default False
if set asssume that the target image J is a concatenation of the
moving and fixed.
"""
super(MSELoss, self).__init__()
self.concat = concat
def __call__(self, arr_i, arr_j):
""" Forward method.
Parameters
----------
arr_i, arr_j: Tensor (batch_size, channels, *vol_shape)
the input data.
"""
logger.debug("Compute MSE loss...")
if self.concat:
nb_channels = arr_j.shape[1] // 2
arr_j = arr_j[:, nb_channels:]
logger.debug(" I: {0} - {1} - {2}".format(
arr_i.shape, arr_i.get_device(), arr_i.dtype))
logger.debug(" J: {0} - {1} - {2}".format(
arr_j.shape, arr_j.get_device(), arr_j.dtype))
loss = torch.mean((arr_i - arr_j) ** 2)
logger.debug(" loss: {0}".format(loss))
logger.debug("Done.")
return loss
@Losses.register
class PCCLoss(object):
""" Calculate the Pearson correlation coefficient between I and J.
"""
def __init__(self, concat=False):
""" Init class.
Parameters
----------
concat: bool, default False
if set asssume that the target image J is a concatenation of the
moving and fixed.
"""
super(PCCLoss, self).__init__()
self.concat = concat
def __call__(self, arr_i, arr_j):
""" Forward method.
Parameters
----------
arr_i, arr_j: Tensor (batch_size, channels, *vol_shape)
the input data.
"""
logger.debug("Compute PCC loss...")
if self.concat:
nb_channels = arr_j.shape[1] // 2
arr_j = arr_j[:, nb_channels:]
logger.debug(" channels: {0}".format(nb_channels))
logger.debug(" I: {0} - {1} - {2}".format(
arr_i.shape, arr_i.get_device(), arr_i.dtype))
logger.debug(" J: {0} - {1} - {2}".format(
arr_j.shape, arr_j.get_device(), arr_j.dtype))
centered_arr_i = arr_i - torch.mean(arr_i)
centered_arr_j = arr_j - torch.mean(arr_j)
pearson_loss = torch.sum(
centered_arr_i * centered_arr_j) / (
torch.sqrt(torch.sum(centered_arr_i ** 2) + 1e-6) *
torch.sqrt(torch.sum(centered_arr_j ** 2) + 1e-6))
loss = 1. - pearson_loss
logger.debug(" loss: {0}".format(loss))
logger.info("Done.")
return loss
@Losses.register
class NCCLoss(object):
""" Calculate the normalize cross correlation between I and J.
"""
def __init__(self, concat=False, win=None):
""" Init class.
Parameters
----------
concat: bool, default False
if set asssume that the target image J is a concatenation of the
moving and fixed.
win: list of in, default None
the window size to compute the correlation, default 9.
"""
super(NCCLoss, self).__init__()
self.concat = concat
self.win = win
def __call__(self, arr_i, arr_j):
""" Forward method.
Parameters
----------
arr_i, arr_j: Tensor (batch_size, channels, *vol_shape)
the input data.
"""
logger.debug("Compute NCC loss...")
if self.concat:
nb_channels = arr_j.shape[1] // 2
arr_j = arr_j[:, nb_channels:]
ndims = len(list(arr_i.size())) - 2
if ndims not in [1, 2, 3]:
raise ValueError("Volumes should be 1 to 3 dimensions, not "
"{0}.".format(ndims))
if self.win is None:
self.win = [9] * ndims
device = arr_i.get_device()
sum_filt = torch.ones([1, 1, *self.win]).to(device)
pad_no = math.floor(self.win[0] / 2)
stride = tuple([1] * ndims)
padding = tuple([pad_no] * ndims)
logger.debug(" ndims: {0}".format(ndims))
logger.debug(" stride: {0}".format(stride))
logger.debug(" padding: {0}".format(padding))
logger.debug(" filt: {0} - {1}".format(
sum_filt.shape, sum_filt.get_device()))
logger.debug(" win: {0}".format(self.win))
logger.debug(" I: {0} - {1} - {2}".format(
arr_i.shape, arr_i.get_device(), arr_i.dtype))
logger.debug(" J: {0} - {1} - {2}".format(
arr_j.shape, arr_j.get_device(), arr_j.dtype))
var_arr_i, var_arr_j, cross = self._compute_local_sums(
arr_i, arr_j, sum_filt, stride, padding)
cc = cross * cross / (var_arr_i * var_arr_j + 1e-5)
loss = -1 * torch.mean(cc)
logger.debug(" loss: {0}".format(loss))
logger.info("Done.")
return loss
def _compute_local_sums(self, arr_i, arr_j, filt, stride, padding):
conv_fn = getattr(func, "conv{0}d".format(len(self.win)))
logger.debug(" conv: {0}".format(conv_fn))
arr_i2 = arr_i * arr_i
arr_j2 = arr_j * arr_j
arr_ij = arr_i * arr_j
sum_arr_i = conv_fn(arr_i, filt, stride=stride, padding=padding)
sum_arr_j = conv_fn(arr_j, filt, stride=stride, padding=padding)
sum_arr_i2 = conv_fn(arr_i2, filt, stride=stride, padding=padding)
sum_arr_j2 = conv_fn(arr_j2, filt, stride=stride, padding=padding)
sum_arr_ij = conv_fn(arr_ij, filt, stride=stride, padding=padding)
win_size = np.prod(self.win)
logger.debug(" win size: {0}".format(win_size))
u_arr_i = sum_arr_i / win_size
u_arr_j = sum_arr_j / win_size
cross = (sum_arr_ij - u_arr_j * sum_arr_i - u_arr_i * sum_arr_j +
u_arr_i * u_arr_j * win_size)
var_arr_i = (sum_arr_i2 - 2 * u_arr_i * sum_arr_i + u_arr_i *
u_arr_i * win_size)
var_arr_j = (sum_arr_j2 - 2 * u_arr_j * sum_arr_j + u_arr_j *
u_arr_j * win_size)
return var_arr_i, var_arr_j, cross
@Losses.register
class RCNetLoss(object):
""" RCNet Loss function.
This loss needs intermediate layers outputs.
Use a callback function to set the 'layer_outputs' class parameter before
each evaluation of the loss function.
If you use an interface this parameter is updated automatically?
PCCLoss
"""
def __init__(self):
self.similarity_loss = PCCLoss(concat=True)
self.layer_outputs = None
def __call__(self, moving, fixed):
logger.debug("Compute RCNet loss...")
if self.layer_outputs is None:
raise ValueError(
"This loss needs intermediate layers outputs. Please register "
"an appropriate callback.")
stem_results = self.layer_outputs["stem_results"]
for stem_result in stem_results:
params = stem_result["stem_params"]
if params["raw_weight"] > 0:
stem_result["raw_loss"] = self.similarity_loss(
stem_result["warped"], fixed) * params["raw_weight"]
loss = sum([
stem_result["raw_loss"] * stem_result["stem_params"]["weight"]
for stem_result in stem_results if "raw_loss" in stem_result])
self.layer_outputs = None
logger.debug(" loss: {0}".format(loss))
logger.debug("Done.")
return loss
@Losses.register
class VMILoss(object):
""" Variational Mutual information loss function.
Reference: http://bayesiandeeplearning.org/2018/papers/136.pdf -
https://discuss.pytorch.org/t/help-with-histogram-and-loss-
backward/44052/5
"""
def get_positive_expectation(self, p_samples, average=True):
log_2 = math.log(2.)
Ep = log_2 - F.softplus(-p_samples)
# Note JSD will be shifted
if average:
return Ep.mean()
else:
return Ep
def get_negative_expectation(self, q_samples, average=True):
log_2 = math.log(2.)
Eq = F.softplus(-q_samples) + q_samples - log_2
# Note JSD will be shifted
if average:
return Eq.mean()
else:
return Eq
def __call__(self, lmap, gmap):
""" The fenchel_dual_loss from the DIM code
Reshape tensors dims to (N, Channels, chunks).
Parameters
----------
lmap: Tensor
the moving data.
gmap: Tensor
the fixed data.
"""
lmap = lmap.reshape(2, 128, -1)
gmap = gmap.squeeze()
N, units, n_locals = lmap.size()
n_multis = gmap.size(2)
# First we make the input tensors the right shape.
l = lmap.view(N, units, n_locals)
l = lmap.permute(0, 2, 1)
l = lmap.reshape(-1, units)
m = gmap.view(N, units, n_multis)
m = gmap.permute(0, 2, 1)
m = gmap.reshape(-1, units)
u = torch.mm(m, l.t())
u = u.reshape(N, n_multis, N, n_locals).permute(0, 2, 3, 1)
mask = torch.eye(N).to(l.device)
n_mask = 1 - mask
E_pos = get_positive_expectation(u, average=False).mean(2).mean(2)
E_neg = get_negative_expectation(u, average=False).mean(2).mean(2)
E_pos = (E_pos * mask).sum() / mask.sum()
E_neg = (E_neg * n_mask).sum() / n_mask.sum()
loss = E_neg - E_pos
return loss
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'coreMock.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
class Ui_Form(QtWidgets.QWidget):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1820, 880)
Form.setMaximumSize(QtCore.QSize(1820, 880))
self.textEdit = QtWidgets.QTextEdit(Form)
self.textEdit.setGeometry(QtCore.QRect(10, 920, 1901, 71))
self.textEdit.setObjectName("textEdit")
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(10, 1002, 171, 71))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(520, 1000, 171, 71))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(1060, 1000, 171, 71))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(Form)
self.pushButton_4.setGeometry(QtCore.QRect(1740, 1000, 171, 71))
self.pushButton_4.setObjectName("pushButton_4")
self.tableWidget = QtWidgets.QTableWidget(Form)
self.tableWidget.setGeometry(QtCore.QRect(10, 0, 1801, 181))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.textEdit_2 = QtWidgets.QTextEdit(Form)
self.textEdit_2.setGeometry(QtCore.QRect(10, 730, 1801, 51))
self.textEdit_2.setObjectName("textEdit_2")
self.pushButton_5 = QtWidgets.QPushButton(Form)
self.pushButton_5.setGeometry(QtCore.QRect(10, 802, 181, 71))
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_6 = QtWidgets.QPushButton(Form)
self.pushButton_6.setGeometry(QtCore.QRect(550, 800, 181, 71))
self.pushButton_6.setObjectName("pushButton_6")
self.pushButton_7 = QtWidgets.QPushButton(Form)
self.pushButton_7.setGeometry(QtCore.QRect(1630, 800, 181, 71))
self.pushButton_7.setObjectName("pushButton_7")
self.pushButton_8 = QtWidgets.QPushButton(Form)
self.pushButton_8.setGeometry(QtCore.QRect(1090, 800, 181, 71))
self.pushButton_8.setObjectName("pushButton_8")
self.label = QtWidgets.QLabel(Form)
self.label.setEnabled(True)
self.label.setGeometry(QtCore.QRect(10, 190, 891, 521))
font = QtGui.QFont()
font.setPointSize(18)
self.label.setFont(font)
self.label.setText("")
self.label.setObjectName("label")
pixmap = QtGui.QPixmap("image_name.jpg")
self.label.setPixmap(pixmap)
self.label.setScaledContents(True)
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setEnabled(True)
self.label_2.setGeometry(QtCore.QRect(910, 190, 901, 521))
font = QtGui.QFont()
font.setPointSize(18)
self.label_2.setFont(font)
self.label_2.setText("")
self.label_2.setObjectName("label_2")
pixmap2 = QtGui.QPixmap("image_name.jpg")
self.label_2.setPixmap(pixmap2)
self.label_2.setScaledContents(True)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.pushButton.setText(_translate("Form", "PushButton"))
self.pushButton_2.setText(_translate("Form", "PushButton"))
self.pushButton_3.setText(_translate("Form", "PushButton"))
self.pushButton_4.setText(_translate("Form", "PushButton"))
self.pushButton_5.setText(_translate("Form", "PushButton"))
self.pushButton_6.setText(_translate("Form", "PushButton"))
self.pushButton_7.setText(_translate("Form", "PushButton"))
self.pushButton_8.setText(_translate("Form", "PushButton"))
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_Form()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
import factory
import pytest
from rest_framework.relations import SlugRelatedField
from apps.Payment.serializers import CurrencySerializer, UnfilledTransactionSerializer, FilledTransactionSerializer
from tests.Payment.factory import CurrencyFactory, TransactionFactory, FilledTransactionFactory, CurrencylessTransactionFactory
class TestCurrencySerializer:
@pytest.mark.unit
def test_serialize_model(self):
currency = CurrencyFactory.build()
serializer = CurrencySerializer(currency)
assert serializer.data
@pytest.mark.unit
def test_serialized_data(self):
# returns a dict containing data of model instance
valid_serializer_data = factory.build(
dict,
FACTORY_CLASS=CurrencyFactory
)
serializer = CurrencySerializer(data=valid_serializer_data)
assert serializer.is_valid(raise_exception=True)
assert serializer.errors == {}
class TestUnfilledTransactionSerializer:
@pytest.mark.unit
def test_serialize_model(self):
transaction = TransactionFactory.build()
expected_serialized_data = {
'name': transaction.name,
'currency': transaction.currency.code,
'email': transaction.email,
'message': transaction.message,
}
serializer = UnfilledTransactionSerializer(transaction)
assert serializer.data == expected_serialized_data
def test_serialized_data(self, mocker):
currency = CurrencyFactory.build()
transaction = CurrencylessTransactionFactory.build()
transaction.currency = currency
valid_serialized_data = {
'name': transaction.name,
'currency': transaction.currency.code,
'email': transaction.email,
'message': transaction.message,
}
# we do this to avoid searching DB for currency instance
# with respective currency code
retrieve_currency = mocker.Mock(return_value=currency)
SlugRelatedField.to_internal_value = retrieve_currency
serializer = UnfilledTransactionSerializer(data=valid_serialized_data)
assert serializer.is_valid(raise_exception=True)
assert serializer.errors == {}
class TestFilledTransactionSerializer:
@pytest.mark.unit
def test_serializer_model(self):
transaction = FilledTransactionFactory.build()
serializer = FilledTransactionSerializer(transaction)
expected_data = {
'id': transaction.id,
'currency': str(transaction.currency),
'link': transaction.link,
'uid': str(transaction.uid),
'name': transaction.name,
'email': transaction.email,
'creation_date': transaction.creation_date,
'payment_intent_id': transaction.payment_intent_id,
'message': transaction.message
}
assert serializer.data == expected_data
@pytest.mark.unit
def test_serializer_data(self):
transaction = FilledTransactionFactory.build()
validated_data = {
'id': transaction.id,
'currency': str(transaction.currency),
'link': transaction.link,
'uid': str(transaction.uid),
'name': transaction.name,
'email': transaction.email,
'creation_date': transaction.creation_date,
'payment_intent_id': transaction.payment_intent_id,
'message': transaction.message
}
serializer = FilledTransactionSerializer(data=validated_data)
assert serializer.is_valid(raise_exception=True)
assert serializer.errors == {}
|
#
# @lc app=leetcode.cn id=124 lang=python3
#
# [124] 二叉树中的最大路径和
#
# @lc code=start
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maxPathSum(self, root: TreeNode) -> int:
# self.max_sum = float('-inf')
# def traverse(root):
# if not root:
# return float('-inf')
# left = traverse(root.left)
# right = traverse(root.right)
# # 以下三种情况不可向上累加,因此设置全局变量存储不可累加的最大值
# # 向上累加:是指最大路劲和可包含当前根节点
# self.max_sum = max(self.max_sum, left, right, root.val + left + right)
# # 用递归返回可向上累加的最大值
# return max(root.val, root.val + left, root.val + right)
# new_max = traverse(root)
# return max(self.max_sum, new_max)
self.max_sum = float('-inf')
self.oneSideMax(root)
return self.max_sum
def oneSideMax(self, root):
if not root:
return 0
left = max(0, self.oneSideMax(root.left))
right = max(0, self.oneSideMax(root.right))
self.max_sum = max(self.max_sum, root.val + left + right)
return max(left, right) + root.val
# @lc code=end
|
import socket
from lib.data_manager import DataManager
class NetworkManager:
def __init__(self, mutex):
self.socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
self.socket.bind(('', 20777))
self.data_manager = DataManager()
self.mutex = mutex
def receive_packet(self):
while True:
udp_packet = self.socket.recv(2048)
self.mutex.acquire()
self.data_manager.update_from_packet(udp_packet)
self.mutex.release()
|
from django.urls import path
from .views import ProvinceImportView, DistrictImportView
from django.views.generic import TemplateView
urlpatterns = [
path('import/', TemplateView.as_view(template_name="dashboard/import/reference_import.html"), name='importdata'),
path('import/import_province/', ProvinceImportView.as_view(), name='importdataprovince'),
path('import/confirm_import_province/', ProvinceImportView.as_view(confirm=True), name='confirmimportdataprovince'),
path('import/import_district/', DistrictImportView.as_view(), name='importdatadistrict'),
path('import/confirm_import_district/', DistrictImportView.as_view(confirm=True), name='confirmimportdatadistrict'),
]
|
import igraph
import graphs.plotlyCreator # pylint: disable=import-error
vertex_size_max = 50
class NetworkGraph():
def __init__(self, fetcher):
self.fetcher = fetcher
self.graph = igraph.Graph(directed=True)
self.urls = []
self.types = []
self.depths = []
def create(self):
for website in self.fetcher.website_list.values():
if website.link not in self.urls:
self.graph.add_vertex()
self.urls.append(website.link)
self.types.append('internal')
self.depths.append(website.depth)
website_index = self.urls.index(website.link)
for external_link in website.external_linklist:
if external_link not in self.urls:
self.graph.add_vertex()
self.urls.append(external_link)
self.types.append('external')
self.depths.append(10)
external_link_index = self.urls.index(external_link)
self.graph.add_edge(website_index, external_link_index)
for internal_link in website.linklist:
if internal_link not in self.urls:
self.graph.add_vertex()
self.urls.append(internal_link)
self.types.append('internal')
self.depths.append(self.fetcher.website_list[internal_link].depth)
internal_link_index = self.urls.index(internal_link)
self.graph.add_edge(website_index, internal_link_index)
# Vertex attributes
self.graph.vs['urls'] = self.urls
self.graph.vs['types'] = self.types
# self.graph.vs["label"] = self.graph.vs["urls"]
color_dict = {"internal": "blue", "external": "red"}
self.graph.vs["color"] = [color_dict[_type] for _type in self.graph.vs["types"]]
self.graph.vs["size"] = list(map(lambda depth: vertex_size_max / (depth+1), self.depths))
# Selecting Layout
# Kamada-Kawai
# for more information see:
# https://www.cs.rhul.ac.uk/home/tamas/development/igraph/tutorial/tutorial.html
layout = self.graph.layout("kk")
# igraph.plot(self.graph, layout=layout)
graphs.plotlyCreator.create_plotly_plot(
layout,
len(self.urls),
[e.tuple for e in self.graph.es],
self.urls,
[color_dict[_type] for _type in self.graph.vs["types"]],
list(map(lambda depth: vertex_size_max / (depth + 1), self.depths))
)
|
import requests
from time import sleep
def get_id():
url_sign = 'https://elements.envato.com/sign-up'
key = '6Lcs71EUAAAAAJy8xeSKqmof7E35MsfvQmdrE4DD'
url_id = 'https://2captcha.com/in.php?key=281ec4a6084e341f5ebb845513096114&method=userrecaptcha&googlekey=%s&pageurl=%s&json=1'%(key,url_sign)
id_google = get_response(url_id)
print(id_google)
return id_google
def wait_token(id_google):
i = 1
while True:
url_code = 'https://2captcha.com/res.php?key=281ec4a6084e341f5ebb845513096114&action=get&id=%s&json=1'%(str(id_google))
token = get_response(url_code)
if token != 'CAPCHA_NOT_READY':
return token
else:
print(token,i)
sleep(3)
def get_response(url):
response = requests.get(url)
print('get_response:',response)
res = response.json()['request']
return res
def get_token(id_google):
print('id_google:',id_google)
token = wait_token(id_google)
print('token:',token)
return token
def get_js(id_google):
token = get_token(id_google)
js = 'document.getElementById("g-recaptcha-response").innerHTML="%s";'%token
print(js)
return js
if __name__ == '__main__':
get_js(id_google)
|
import os
import crypt
import logging
import pwd
#class Pwd(object):
# def userPasswd(self, login, password):
# encPass = crypt.crypt(password, password)
# command = "sudo usermod -p '{0:s}' {1:s}".format(encPass, login)
# result = os.system(command)
# if result != 0:
# logging.error(command)
# return result
#
#pp = Pwd()
#a = pp.userPasswd('sada', 'sad')
#password = 'sada'
#encpass = crypt.crypt(password, '22')
#a= os.system("sudo useradd sada -p "+encpass+" -m -s /bin/bash")
#print a
#a= os.system("sudo useradd sada -p "+encpass+" -m")
#print a
#usermod -l sadaa sada
for u in pwd.getpwall():
print u[0]
if u[0] == 'sadaa':
pass
try:
a = pwd.getpwnam('sada')
print a
except:
print "bye"
|
#!/usr/bin/python
from sys import stdin, stdout
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import HtmlFormatter
from pygments import highlight
code = stdin.read()
lexer = get_lexer_by_name('html')
formatter = HtmlFormatter()
print "Content-Type: text/html"
print
highlight(code, lexer, formatter, outfile=stdout)
|
"""In order to make the IRFlowApi Class available globally we need the below input statement
TODO Determine if we should call irflow_api.py irflow_client.py"""
try:
from irflow_client.irflow_client import IRFlowClient
except ImportError:
from irflow_client import IRFlowClient
|
# -*- coding: utf-8 -*-
# Copyright 2013-2020 The Wazo Authors (see the AUTHORS file)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import os
common_globals = {}
execfile_('common.py', common_globals)
MODEL_VERSIONS = {
u'T27G': u'69.85.0.5',
u'T30': u'124.85.0.40',
u'T30P': u'124.85.0.40',
u'T31': u'124.85.0.40',
u'T31P': u'124.85.0.40',
u'T31G': u'124.85.0.40',
u'T33P': u'124.85.0.40',
u'T33G': u'124.85.0.40',
u'T41S': u'66.85.0.5',
u'T42S': u'66.85.0.5',
u'T46S': u'66.85.0.5',
u'T48S': u'66.85.0.5',
u'T53': u'96.85.0.5',
u'T53W': u'96.85.0.5',
u'T54W': u'96.85.0.5',
u'T57W': u'96.85.0.5',
u'T58': u'58.85.0.5',
u'W60B': u'77.85.0.20',
u'CP960': u'73.85.0.5',
u'CP920': u'78.85.0.5',
}
COMMON_FILES = [
('y000000000069.cfg', u'T27G-69.85.0.5.rom', 'model.tpl'),
('y000000000065.cfg', u'T46S(T48S,T42S,T41S)-66.85.0.5.rom', 'model.tpl'),
('y000000000066.cfg', u'T46S(T48S,T42S,T41S)-66.85.0.5.rom', 'model.tpl'),
('y000000000067.cfg', u'T46S(T48S,T42S,T41S)-66.85.0.5.rom', 'model.tpl'),
('y000000000068.cfg', u'T46S(T48S,T42S,T41S)-66.85.0.5.rom', 'model.tpl'),
('y000000000073.cfg', u'CP960-73.85.0.5.rom', 'model.tpl'),
('y000000000078.cfg', u'CP920-78.85.0.5.rom', 'model.tpl'),
('y000000000123.cfg', u'T31(T30,T30P,T31G,T31P,T33P,T33G)-124.85.0.40.rom', 'model.tpl'),
('y000000000124.cfg', u'T31(T30,T30P,T31G,T31P,T33P,T33G)-124.85.0.40.rom', 'model.tpl'),
('y000000000127.cfg', u'T31(T30,T30P,T31G,T31P,T33P,T33G)-124.85.0.40.rom', 'model.tpl'),
]
COMMON_FILES_DECT = [
{
'filename': u'y000000000058.cfg',
'fw_filename': u'T58-58.85.0.5.rom',
'handsets_fw': {
'w53h': u'W53H-88.85.0.20.rom',
'w56h': u'W56H-61.85.0.20.rom',
},
'tpl_filename': u'dect_model.tpl',
},
{
'filename': u'y000000000077.cfg',
'fw_filename': u'W60B-77.85.0.20.rom',
'handsets_fw': {
'w53h': u'W53H-88.85.0.20.rom',
'w56h': u'W56H-61.85.0.20.rom',
'w59r': u'W59R-115.85.0.20.rom',
'cp930w': u'CP930W-87.85.0.20.rom',
},
'tpl_filename': u'dect_model.tpl',
},
{
'filename': u'y000000000095.cfg',
'fw_filename': u'T54W(T57W,T53W,T53)-96.85.0.5.rom',
'handsets_fw': {
'w53h': u'W53H-88.85.0.20.rom',
'w56h': u'W56H-61.85.0.20.rom',
},
'tpl_filename': u'dect_model.tpl',
},
{
'filename': u'y000000000096.cfg',
'fw_filename': u'T54W(T57W,T53W,T53)-96.85.0.5.rom',
'handsets_fw': {
'w53h': u'W53H-88.85.0.20.rom',
'w56h': u'W56H-61.85.0.20.rom',
},
'tpl_filename': u'dect_model.tpl',
},
{
'filename': u'y000000000097.cfg',
'fw_filename': u'T54W(T57W,T53W,T53)-96.85.0.5.rom',
'handsets_fw': {
'w53h': u'W53H-88.85.0.20.rom',
'w56h': u'W56H-61.85.0.20.rom',
},
'tpl_filename': u'dect_model.tpl',
}
]
class YealinkPlugin(common_globals['BaseYealinkPlugin']):
IS_PLUGIN = True
pg_associator = common_globals['BaseYealinkPgAssociator'](MODEL_VERSIONS)
# Yealink plugin specific stuff
_COMMON_FILES = COMMON_FILES
def configure_common(self, raw_config):
super(YealinkPlugin, self).configure_common(raw_config)
for dect_info in COMMON_FILES_DECT:
tpl = self._tpl_helper.get_template('common/%s' % dect_info[u'tpl_filename'])
dst = os.path.join(self._tftpboot_dir, dect_info[u'filename'])
raw_config[u'XX_handsets_fw'] = dect_info[u'handsets_fw']
raw_config[u'XX_fw_filename'] = dect_info[u'fw_filename']
self._tpl_helper.dump(tpl, raw_config, dst, self._ENCODING)
|
import random
class Initialization():
def __init__(self, dataname):
### dataname, data_data_path, data_weight_path, data_degree_path: (str)
self.dataname = dataname
self.data_data_path = "data/" + dataname + "/" + dataname + '_data.txt'
self.data_weight_path = "data/" + dataname + "/" + dataname + '_weight.txt'
self.data_degree_path = "data/" + dataname + "/" + dataname + '_degree.txt'
self.data_wallet_path = "data/" + dataname + "/" + dataname + '_wallet.txt'
def setEdgeWeight(self):
# -- set weight on edge --
fw = open(self.data_weight_path, 'w')
with open(self.data_data_path) as f:
for line in f:
(key, val) = line.split()
# --- first node, second node, weight on the edge within nodes ---
fw.write(key + " " + val + " " + str(round(random.random(), 2)) + "\n")
fw.close()
f.close()
def setSeedCost(self):
# -- count the degree --
### numnode: (int) the number of nodes in data
fw = open(self.data_degree_path, 'w')
with open(self.data_data_path) as f:
numnode = 0
list = []
for line in f:
(node1, node2) = line.split()
numnode = max(numnode, int(node1), int(node2))
list.append(node1)
for num in range(1, numnode):
# --- node, the cost of the node ---
fw.write(str(num) + " " + str(list.count(str(num))) + "\n")
fw.close()
f.close()
def constructSeedCostDict(self):
# -- calculate the cost for each seed
### seedcost: (dict) the set of cost for each seed
### seedcost[num]: (float2) the degree of num's seed
### numnode: (int) the number of nodes in data
### maxdegree: (int) the maximum degree in data
seedcost = {}
with open(self.data_degree_path) as f:
numnode, maxdegree = 0, 0
list = []
for line in f:
(node, degree) = line.split()
numnode = max(numnode, int(node))
maxdegree = max(maxdegree, int(degree))
list.append([node, degree])
for num in range(numnode + 1):
seedcost[str(num)] = round(int(list[num][1]) / maxdegree, 2)
f.close()
return seedcost
def constructGraphDict(self):
# -- build graph --
### graph: (dict) the graph
### graph[node1]: (dict) the set of node1's receivers
### graph[node1][node2]: (str) the weight one the edge of node1 to node2
graph = {}
with open(self.data_weight_path) as f:
for line in f:
(node1, node2, wei) = line.split()
if node1 in graph:
graph[node1][node2] = str(wei)
else:
graph[node1] = {node2: str(wei)}
f.close()
return graph
def setNodeWallet(self):
fw = open(self.data_wallet_path, 'w')
with open(self.data_degree_path) as f:
for line in f:
(key, val) = line.split()
# --- first node, second node, weight on the edge within nodes ---
fw.write(key + " " + str(round(random.uniform(0, 2), 2)) + "\n")
fw.close()
f.close()
class Product():
def __init__(self, numprice, numratio):
### numprice: (int) the kinds of generated price
### numratio: (int) the kinds of generated ratio
### numproduct: (int) the kinds of generated product
self.numprice = numprice
self.numratio = numratio
self.numproduct = numprice * numratio
def setPriceRatioSimilar(self):
# -- set some products with similar ratio --
### ratiolist: (list) the set ti store all candidate
### ratiolist[num]: (list) [num's profit, num's cost, num's ratio]
### ratiolist[num][]: (float2)
### maxs: (int) the number for consecutive similar ratio cumulatively, to find the number equal to the product number
### sp: (int) the pointer to the last consecutive ratio, to record what products are
ratiolist = []
maxs, sp = 0, 0
while (maxs < self.numproduct - 1):
generateprofit = round(random.random(), 2)
generatecost = round(random.random(), 2)
# -- define the profit and cost and price --
if generateprofit == 0 or generatecost == 0 or generateprofit + generatecost >= 1:
continue
ratiolist.append((generateprofit, generatecost, round(generateprofit / generatecost, 2)))
# - sort the ratiolist with ratio -
ratiolist.sort(key=lambda tup: tup[2])
for num in range(len(ratiolist) - 1):
if maxs == self.numproduct - 1:
continue
if len(ratiolist) >= self.numproduct and abs(ratiolist[num][2] - ratiolist[num + 1][2]) <= 0.1:
s += 1
if s >= maxs:
maxs = s
sp = num
else:
s = 0
sp = 0
### productlist: (list) the set of output products with similar ratio
### productlist[num]: (list) [num's profit, num's cost, num's ratio, num's price]
### productlist[num][]: (float2)
# -- set output products --
productlist = []
sp = sp - self.numproduct + 1
for num in range(self.numproduct):
sp = sp + 1
productlist.append(ratiolist[sp])
fw = open("product/prod_ratio_similar_n" + str(self.numproduct) + "p1000.txt", 'w')
for p, c, r in productlist:
# --- profit, cost, ratio, price ---
fw.write(str(p) + " " + str(c) + " " + str(r) + " " + str(p + c) + "\n")
fw.close()
return productlist
def setPriceDiffRatioDiff(self):
# -- set the price with different prices and ratios
### plist: (list) the list to record different price
### plist[num]: (float2) the bias price for output price
### rlist: (list) the list to record different ratio
### rlist[num]: (float2) the bias ratio for output ratio
plist, rlist = [], []
# -- set the bias price --
# -- the multiple between each bias price has to be greater than 2 --
### dp: (int) the definition of price
dp = 1
while dp:
for p in range(self.numprice):
plist.append(round(random.uniform(p / self.numprice, (p + 1) / self.numprice), 2))
for p in range(len(plist) - 1):
if plist[p + 1] - plist[p] < 0.1 or plist[p] < 0.1:
dp += 1
continue
if dp == 1:
dp = 0
else:
dp = 1
plist = []
# -- set the bias ratio --
# -- the difference between each bias ratio has to be greater tha 0.1 --
### dr: (int) the definition of ratio
dr = 1
while dr:
rlist = []
for r in range(self.numratio):
rlist.append(round(random.uniform(0, 2), 2))
rlist.sort()
if 0.0 in rlist:
continue
for r in range(len(rlist) - 1):
if rlist[r + 1] / rlist[r] < 2:
dr += 1
continue
for r in range(len(rlist) - 1):
if rlist[r + 1] - rlist[r] < 0.1 or rlist[r] < 0.1:
dr += 1
continue
if dr == 1:
dr = 0
else:
dr = 1
# -- set output products --
### productlist: (list) the set to record output products
### productlist[num]: (list) [num's profit, num's cost, num's ratio, num's price]
### productlist[num][]: (float2)
productlist = []
for r in range(len(rlist)):
for p in range(len(plist)):
price, profit, cost = 0.0, 0.0, 0.0
while price == 0.0 or profit == 0.0 or cost == 0.0 or price > 1:
price = plist[p] + random.uniform(-0.5, 0.5) * 0.1
profit = round(price * (rlist[r] / (1 + rlist[r])), 2)
cost = round(price * (1 / (1 + rlist[r])), 2)
price = round(profit + cost, 2)
productlist.append([profit, cost, round((profit / cost), 2), price])
fw = open("product/prod_r" + str(self.numratio) + "p" + str(self.numprice) + "n1000.txt", 'w')
for p, c, r, pr in productlist:
fw.write(str(p) + " " + str(c) + " " + str(r) + " " + str(pr) + "\n")
fw.close()
return productlist
if __name__ == "__main__":
## input ##
## data_name: (str) the dataset ##
## num_price: (int) the kinds of generated price ##
## num_ratio: (int) the kinds of generated ratio ##
data_name = "email"
num_price, num_ratio = 2, 2
ini = Initialization(data_name)
prod = Product(num_price, num_ratio)
# ini.setEdgeWeight()
# ini.setSeedCost()
# ini.setNodeWallet()
graph_dict = ini.constructGraphDict()
seedcost_dict = ini.constructSeedCostDict()
# prod.setPriceRatioSimilar()
prod.setPriceDiffRatioDiff()
# print(graph_dict) |
#coding: utf-8
#***定义表单类***
from flask_wtf import FlaskForm
from wtforms import StringField , PasswordField, BooleanField, SubmitField, TextAreaField, SelectMultipleField #表单字段的类
from wtforms.validators import DataRequired, ValidationError,Email, EqualTo, Length
from webapp.models import User
#字段中的可选参数validators用于验证字段是否符合预期,DataRequired验证器仅验证字段输入是否为空
from flask_babel import _, lazy_gettext as _l
from config import lable
#***用户登录表单***
class LoginForm(FlaskForm):
#用户登录表单,每个字段类都接受一个描述或别名作为第一个参数,
#并生成一个实例来作为LoginForm的类属性。
username = StringField(_l('Username'), validators=[DataRequired()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember_me = BooleanField(_l('Remember Me'))
submit = SubmitField(_l('Sign in'))
#***用户注册表单***
class RegistrationForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(), EqualTo('password')])
submit = SubmitField(_l('Register'))
#**添加usermame和email验证器**
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_('Please use a different email address.'))
#***用户个人资料表单***
class EditProfileForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
about_me = TextAreaField(_l('About me'), validators=[Length(min=0, max=140)])
submit = SubmitField(_l('Submit'))
def __init__(self, original_username, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
def validate_username(self, username):
if username.data != self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
#***用户动态表单***
class PostForm(FlaskForm):
post = TextAreaField(_l('Say something'), validators=[DataRequired(),
Length(min=1, max= 140)])
submit = SubmitField(_l('Submit'))
#***请求重置密码表单***
class ResetPasswordRequestForm(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
#***重置密码表单***
class ResetPasswordForm(FlaskForm):
password = PasswordField(_l('Password'), validators=[DataRequired()])
password2 = PasswordField(
_l('Repeat Password'), validators=[DataRequired(), EqualTo('password')])
submit = SubmitField(_l('Request Password Reset'))
#***请求标注数据***
class DataLabelRequestForm(FlaskForm):
table_name = StringField(_l('Select Data'), validators=[DataRequired(), ])
submit = SubmitField(_l('Submit'))
#***数据标注***
class DataLabelForm(FlaskForm):
#根据客户要求制作标签
level = SelectMultipleField(
label='情感类别',
validators=[DataRequired('请选择标签')],
render_kw={
'class': 'form-control'
},
choices=[(0,'0'),(1, '1'), (2, '2'), (3, '3')],
default=4,
coerce=int
)
submit = SubmitField(_l('Submit'))
#***训练数据***
class TrainlabelForm(FlaskForm):
submit = SubmitField(_('Trian Data'))
|
import os
import time
import logging
from multiprocessing import cpu_count
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
import tqdm
import numpy as np
import torch
from torch.utils.data import DataLoader
from model import TGCN
from metrics import ndcg
from graph import NeighborFinder
from data import data_partition_amz, TrainDataset, ValidDataset, TestDataset
from global_flag import flag_true, flag_false
CODE_VERSION = '0518-2052'
LOAD_VERSION = None # '1105-2000' for Amazon
SAVE_CHECKPT = False
DATASET = 'amazon_movies_tv' # beauty, cds_vinyl, game, movies_tv, gowalla, steam
TOPK = 10
PRETRAIN_EPOCH = 50 # 20
EPOCH = 30
LR = 0.001
BATCH_SIZE = 512 # mix with pretrain: 512 for 40ngh & 2048 for 20ngh; 3072 for 10/20, 384 for 20/100
NUM_WORKERS_DL = 0 # dataloader workers, 0 for for single process
NUM_WORKERS_SN = 0 # search_ngh workers, 0 for half cpu core, None for single process
USE_MEM = False
if cpu_count() <= 4:
NUM_WORKERS_SN = cpu_count()
USE_MEM = True
FEATURE_DIM = 40
EDGE_DIM = 8
TIME_DIM = 16
NUM_NEIGHBORS = 40
POS_ENCODER = 'pos' # time, pos, empty
AGG_METHOD = 'mix' # attn, lstm, mean, mix
PRUNE = False
LAM = 1e-4
LAYERS = 2
TARGET_MODE = 'prod' # prod, dist
MARGIN = 10
N_HEAD = 4
DROP_OUT = 0.1
USE_TD = True # use time_diff
SA_LAYERS = 0 # self_attn layers
UNIFORM = False
if DATASET == 'newAmazon':
MIN_TRAIN_SEQ = 5
elif DATASET == 'goodreads_large':
MIN_TRAIN_SEQ = 8
else:
MIN_TRAIN_SEQ = 3
# GPU / CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# register logging logger
logger = logging.getLogger()
logger.setLevel(level=logging.DEBUG)
time_line = time.strftime('%Y%m%d_%H:%M', time.localtime(time.time()))
logfile = time_line + '_tgcn4sr.log'
print('logfile', logfile)
formatter = logging.Formatter('%(asctime)s - %(message)s', datefmt='%d%b %H:%M')
console_h = logging.StreamHandler()
console_h.setLevel(logging.INFO)
console_h.setFormatter(formatter)
logger.addHandler(console_h)
if torch.cuda.is_available():
logfile_h = logging.FileHandler(logfile, mode='w')
logfile_h.setLevel(logging.DEBUG)
logfile_h.setFormatter(formatter)
logger.addHandler(logfile_h)
def train(model, data_loader, optimizer, is_pretrain=False, log_interval=50):
time_start = time.time()
model.train()
model.init_workers()
total_loss = 0
time_one_interval = time.time()
# for i, (user_id, pos_id, neg_id, time_stamp) in enumerate(tqdm.tqdm(data_loader)):
for i, (user_id, pos_id, neg_id, time_stamp) in enumerate(data_loader):
user_id = user_id.numpy()
pos_id = pos_id.numpy()
neg_id = neg_id.numpy()
time_stamp = time_stamp.numpy()
if is_pretrain:
loss = model.mf_bpr_loss(user_id, pos_id, neg_id, time_stamp, num_neighbors=NUM_NEIGHBORS)
else:
loss = model.bpr_loss(user_id, pos_id, neg_id, time_stamp, num_neighbors=NUM_NEIGHBORS)
model.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.cpu().item()
flag_false()
if (i + 1) % log_interval == 0:
avg_loss = total_loss / log_interval
d_time = time.time() - time_one_interval
logging.info('Train step: ' + str(i+1) + '/' + str(len(data_loader)) + ' - avg loss: ' + '%.3f' % avg_loss + ' - time: ' + '%.2f' % d_time + 's')
time_one_interval = time.time()
total_loss = 0
flag_true()
model.del_workers()
total_time = time.time() - time_start
logging.info('Train one epoch time: ' + '%.2f' % total_time + 's')
def evaluate(model, data_loader, is_pretrain=False):
with torch.no_grad():
# logging.info('----- start_evaluate -----')
model.eval()
model.init_workers()
total_loss = 0
# for i, (user_id, pos_id, neg_id, time_stamp) in enumerate(tqdm.tqdm(data_loader)):
for i, (user_id, pos_id, neg_id, time_stamp) in enumerate(data_loader):
user_id = user_id.numpy()
pos_id = pos_id.numpy()
neg_id = neg_id.numpy()
time_stamp = time_stamp.numpy()
if is_pretrain:
loss = model.mf_bpr_loss(user_id, pos_id, neg_id, time_stamp, num_neighbors=NUM_NEIGHBORS)
else:
loss = model.bpr_loss(user_id, pos_id, neg_id, time_stamp, num_neighbors=NUM_NEIGHBORS)
total_loss += loss.cpu().item()
avg_loss = total_loss / len(data_loader)
logging.info('evaluate loss: ' + '%.3f' % avg_loss)
model.del_workers()
def test(model, data_loader, is_pretrain=False, fast_test=1):
with torch.no_grad():
logging.info('----- start_test -----')
model.eval()
model.init_workers()
hit = 0
total = 0
ndcg_score = []
for i, (user_id, target_id, candidate_ids, time_stamp) in enumerate(tqdm.tqdm(data_loader)):
if fast_test != 1:
cut_len = len(user_id) // fast_test
user_id = user_id[:cut_len]
target_id = target_id[:cut_len]
candidate_ids = candidate_ids[:cut_len]
time_stamp = time_stamp[:cut_len]
user_id = user_id.numpy()
target_id = target_id.numpy()
candidate_ids = candidate_ids.numpy()
time_stamp = time_stamp.numpy()
# logging.info(candidate_ids.shape) # (2048, 101)
if is_pretrain:
batch_topk_ids = model.mf_get_top_n(user_id, candidate_ids, time_stamp, num_neighbors=NUM_NEIGHBORS, topk=TOPK).cpu().numpy()
else:
batch_topk_ids = model.get_top_n(user_id, candidate_ids, time_stamp, num_neighbors=NUM_NEIGHBORS, topk=TOPK).cpu().numpy()
batch_ndcg = ndcg(batch_topk_ids, target_id)
ndcg_score.append(batch_ndcg)
for tgt, topk_ids in zip(target_id, batch_topk_ids):
total += 1
if tgt in topk_ids:
hit += 1
ndcg_score = float(np.mean(ndcg_score))
logging.info('Test hit rage: ' + str(hit) + '/' + str(total) + ' (' + '%.4f' % (hit/total) + ')' + ', ndcg: ' + '%.4f' % ndcg_score)
model.del_workers()
return ndcg_score
def load_checkpoint(model, file_path):
logging.info('Use checkpoint')
saved_file = torch.load(file_path)
current_hyper_p = {
'DATASET': DATASET,
'LAM': LAM,
'FEATURE_DIM': FEATURE_DIM,
'EDGE_DIM': EDGE_DIM,
'TIME_DIM': TIME_DIM,
'LAYERS': LAYERS,
'NUM_NEIGHBORS': NUM_NEIGHBORS,
'POS_ENCODER': POS_ENCODER,
'AGG_METHOD': AGG_METHOD,
'TARGET_MODE': TARGET_MODE,
'MARGIN': MARGIN,
'N_HEAD': N_HEAD,
'DROP_OUT': DROP_OUT,
'USE_TD': USE_TD,
'SA_LAYERS': SA_LAYERS,
'UNIFORM': UNIFORM,
'MIN_TRAIN_SEQ': MIN_TRAIN_SEQ,
}
flag = True
for key in current_hyper_p:
if current_hyper_p[key] != saved_file[key]:
logging.info(key + ' key diff, crt: ' + str(current_hyper_p[key]) + ' - svd: ' + str(saved_file[key]))
flag = False
if flag:
logging.info('All Hyper parameters are same as saved')
model.load_state_dict(saved_file['state_dict'])
if __name__ == "__main__":
print('CODE_VERSION: ' + CODE_VERSION, '- DATASET: ' + DATASET)
adj_list_train, adj_list_tandv, adj_list_tavat, test_candidate, n_user, n_item = data_partition_amz(DATASET)
# train_dataset = TrainDataset(adj_list_train, n_user, n_item, MIN_TRAIN_SEQ)
tandv_dataset = TrainDataset(adj_list_tandv, n_user, n_item, MIN_TRAIN_SEQ)
valid_dataset = ValidDataset(adj_list_tavat, n_user, n_item)
test_dataset = TestDataset(adj_list_tavat, test_candidate, n_user, n_item)
train_data_loader = DataLoader(tandv_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS_DL)
valid_data_loader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS_DL)
test_data_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS_DL)
train_ngh_finder = NeighborFinder(adj_list_train, n_user, n_item, uniform=UNIFORM, use_mem=USE_MEM) # Initialize training neighbor finder(use train edges)
test_ngh_finder = NeighborFinder(adj_list_tandv, n_user, n_item, uniform=UNIFORM, use_mem=USE_MEM) # Initialize test neighbor finder(use train and valid edges)
if POS_ENCODER == 'pos':
seq_len = 0
for u in adj_list_tavat:
if len(adj_list_tavat[u]) > seq_len:
seq_len = len(adj_list_tavat[u])
else:
seq_len = None
tgcn_model = TGCN(train_ngh_finder, FEATURE_DIM, EDGE_DIM, TIME_DIM, n_user+n_item, 2, device,
LAYERS, USE_TD, TARGET_MODE, MARGIN, PRUNE, NUM_WORKERS_SN, pos_encoder=POS_ENCODER,
agg_method=AGG_METHOD, n_head=N_HEAD, drop_out=DROP_OUT,
seq_len=seq_len, sa_layers=SA_LAYERS, data_set=DATASET).to(device)
if PRETRAIN_EPOCH != 0:
optimizer_pretrain = torch.optim.AdamW(params=tgcn_model.parameters(), lr=LR, weight_decay=LAM)
optimizer = torch.optim.Adam(params=tgcn_model.parameters(), lr=LR, weight_decay=LAM)
if LOAD_VERSION is not None:
load_checkpoint(tgcn_model, LOAD_VERSION + '-' + DATASET + '.pkl')
tgcn_model.ngh_finder = test_ngh_finder
test(tgcn_model, test_data_loader, fast_test=10)
tgcn_model.ngh_finder = train_ngh_finder
for epoch_i in range(PRETRAIN_EPOCH):
logging.info('Pretrain mf - epoch ' + str(epoch_i + 1) + '/' + str(PRETRAIN_EPOCH))
train(tgcn_model, train_data_loader, optimizer_pretrain, is_pretrain=True, log_interval=100)
evaluate(tgcn_model, valid_data_loader, is_pretrain=True)
if (epoch_i+1) % 10 == 0:
ndcg_score = test(tgcn_model, test_data_loader, is_pretrain=True, fast_test=10)
for epoch_i in range(EPOCH):
logging.info('Train tgcn - epoch ' + str(epoch_i + 1) + '/' + str(EPOCH))
train(tgcn_model, train_data_loader, optimizer)
tgcn_model.ngh_finder = test_ngh_finder
evaluate(tgcn_model, valid_data_loader)
test_span = 5 if AGG_METHOD == 'mix' else 10
if (epoch_i+1) % test_span == 0:
ndcg_score = test(tgcn_model, test_data_loader, fast_test=5)
if DATASET == 'amazon_beauty':
if ndcg_score > 0.3:
logging.info('NDCG > 0.3, do full retest')
test(tgcn_model, test_data_loader)
else:
if ndcg_score > 0.84:
logging.info('NDCG > 0.5, do full retest')
test(tgcn_model, test_data_loader)
tgcn_model.ngh_finder = train_ngh_finder
logging.info('--------------------------------------------------')
logging.info('==================================================')
if SAVE_CHECKPT:
file_to_save = {
'state_dict': tgcn_model.state_dict(),
'DATASET': DATASET,
'LAM': LAM,
'FEATURE_DIM': FEATURE_DIM,
'EDGE_DIM': EDGE_DIM,
'TIME_DIM': TIME_DIM,
'LAYERS': LAYERS,
'NUM_NEIGHBORS': NUM_NEIGHBORS,
'POS_ENCODER': POS_ENCODER,
'AGG_METHOD': AGG_METHOD,
'TARGET_MODE': TARGET_MODE,
'MARGIN': MARGIN,
'N_HEAD': N_HEAD,
'DROP_OUT': DROP_OUT,
'USE_TD': USE_TD,
'SA_LAYERS': SA_LAYERS,
'UNIFORM': UNIFORM,
'MIN_TRAIN_SEQ': MIN_TRAIN_SEQ,
}
save_path = CODE_VERSION + '-' + DATASET + '.pkl'
torch.save(file_to_save, save_path)
tgcn_model.ngh_finder = test_ngh_finder
test(tgcn_model, test_data_loader)
|
# Generated by Django 2.1.5 on 2019-07-19 07:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Department',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, verbose_name='部门名称')),
('brief_introduction', models.CharField(blank=True, max_length=500, null=True, verbose_name='部门简介')),
('is_delete', models.BooleanField(verbose_name='是否删除')),
],
),
migrations.CreateModel(
name='StaffMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, verbose_name='名字')),
('phone_num', models.CharField(blank=True, max_length=11, null=True, verbose_name='手机号码')),
('grade', models.CharField(max_length=4, verbose_name='年级')),
('school_department', models.CharField(max_length=10, verbose_name='系别')),
('major', models.CharField(max_length=20, verbose_name='专业')),
('personal_signature', models.CharField(blank=True, max_length=30, null=True, verbose_name='个性签名')),
('brief_introduction', models.CharField(blank=True, max_length=500, null=True, verbose_name='个人简介')),
('start_entry', models.DateField(blank=True, null=True, verbose_name='起始任职')),
('end_quit', models.DateField(blank=True, null=True, verbose_name='结束任职')),
('is_incumbent', models.BooleanField(verbose_name='是否在任')),
('is_first_generation', models.BooleanField(verbose_name='是否初代')),
('is_man', models.BooleanField(verbose_name='性别男')),
('is_delisting', models.BooleanField(verbose_name='是否除名')),
('work_department', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='about.Department')),
],
),
]
|
my_list = []
if not my_list:
print("The list is empty.")
if my_list == []:
print("The list is empty.") |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Let’s play Rock, Paper, Scissors? (yes/no) yes\n",
"What's your choice?rock\n",
"Tie!\n",
"Play again?yes\n",
"What's your choice?paper\n",
"Tie!\n",
"Play again?yes\n",
"What's your choice?rock\n",
"You Lose!\n"
]
}
],
"source": [
"# Python Program for Rock Paper Scissors Game\n",
"from random import *\n",
"from sys import * # used for the function 'exit()' to end the program\n",
"def play(): # play() function\n",
" choice = input(\"What's your choice?\")\n",
" choices = {1 : 'rock', 2 : 'paper', 3 : 'scissors'}\n",
" c_choice = choices[randint(1,3)] # randomly generaed choice\n",
" if choice == c_choice: # choice- player's choice, c_choice- computer's choice\n",
" return print('Tie!')\n",
" if compare(choice,c_choice):\n",
" return print('You Win!')\n",
" else:\n",
" return print('You Lose!')\n",
"def compare(pChoice,compChoice): # compare() function\n",
" results = {('paper','rock') : True,\n",
" ('paper','scissors') : False,\n",
" ('rock','paper') : False,\n",
" ('rock','scissors') : True,\n",
" ('scissors','paper') : True,\n",
" ('scissors','rock') : False}\n",
" return results[(pChoice,compChoice)]\n",
"\n",
"def game_play(): # game_play() function\n",
" begin = input(\"Let’s play Rock, Paper, Scissors? (yes/no) \")\n",
" while begin != \"yes\":\n",
" if begin == \"no\":\n",
" print(\"Game Over\")\n",
" sys.exit()\n",
" else:\n",
" print(\"Please try again\")\n",
" begin = input(\"Let’s play Rock, Paper, Scissors? (yes/no)\")\n",
" play() # play() function called \n",
" while True:\n",
" begin = input('Play again?')\n",
" while begin != \"yes\":\n",
" if begin == \"no\":\n",
" print(\"Game Over\")\n",
" exit()\n",
" else:\n",
" print(\"Please try again\")\n",
" begin = input(\"Play again? \")\n",
" play() # play() function called \n",
"game_play() # game_play() function called"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|
#!/usr/bin/env python3
from flask import Flask, request, jsonify
from flask_cors import CORS
import argparse
import sys
import json
from genderbias import ALL_SCANNED_DETECTORS, Document
SERVER_VERSION = "0.1.0"
APP = Flask(__name__)
CORS(APP)
# Parse arguments. If the --detectors flag is used, then only use the detectors
# that are requested.
parser = argparse.ArgumentParser(
description="Flask-based server for gender-bias detection over REST API"
)
parser.add_argument(
"--detectors", dest="detectors", default="",
help="Use specific detectors, not all available"
)
args = parser.parse_args()
if args.detectors:
if args.detectors in ALL_SCANNED_DETECTORS:
detectors = [ALL_SCANNED_DETECTORS[args.detectors]]
else:
print("Detector named '{}' not available.".format(args.detectors))
sys.exit(1)
else:
detectors = ALL_SCANNED_DETECTORS.values()
@APP.route("/")
def route_home():
"""
A "heartbeat" route for the homepage.
Returns:
str
"""
return "genderbias server v{}".format(SERVER_VERSION)
@APP.route("/check", methods=["POST"])
def route_check():
"""
Check POSTed text for gender bias.
POST an application/json body with a "text" key and string value; results
will be returned in the form:
{
reports: List[dict]
}
Where each report has zero or more flags.
"""
text = request.json['text']
doc = Document(text)
reports = []
for detector in detectors:
reports.append(detector().get_report(doc))
reports_data = [report.to_dict() for report in reports]
return jsonify({
"issues": reports_data,
"text": request.json['text']
})
# Run the server.
APP.run(host="0.0.0.0", debug=True)
|
Str = input()
sub_str = input()
i,j,count = 0,0,0
N , n = len(Str) , len(sub_str)
while i<N:
j =0
while i<N and j<n and Str[i]==sub_str[j]:
i+=1
j+=1
if j==n:
count+=1
i-=n
i+=1
print(count)
|
# import xmlrpc bagian client
import xmlrpc.client
# buat stub proxy client
s = xmlrpc.client.ServerProxy('http://192.168.0.36:9999')
# buka file yang akan diupload
with open("file_diupload.txt",'rb') as handle:
# baca file dan ubah menjadi biner dengan xmlrpc.client.Binary
file = xmlrpc.client.Binary(handle.read())
# panggil fungsi untuk upload yang ada di server
s.file_upload(file)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 13 15:49:55 2018
@author: HP
"""
#import urllib.request
##filename = 2
#urllib.request.urlretrieve(url, filename)
#import pandas as pd
#df = pd.read_csv('c:/users/HP/Desktop/2.csv')
#print(df.head())
#
#import requests
#image_url = 'http://numerologystars.com/wp-content/uploads/2011/12/number-2-2.jpg'
#img_data = requests.get(image_url).content
#with open('image_name.jpg', 'wb') as handler:
# handler.write(img_data)
#Import Python Imaging Library (PIL)
#import Image
from PIL import Image
import numpy as np
#pilimg = Image.open('2.jpg')
#pilimg.save(“pil.jpg”)
img = Image.open( '2.jpg')
img.load()
data = np.asarray( img, dtype="int32" )
print(data)
# Convert PIL image to numpy array
#arr = numpy.array(pilimg)
#Convert numpy array to PIL image
#pilimg = Image.fromarray(arr) |
from nltk.tokenize import sent_tokenize
def lines(a, b):
"""Return lines in both a and b"""
# Split each string into lines
asplit = splitter(a, "l", 0)
bsplit = splitter(b, "l", 0)
# Find matches
matches = matcher(asplit, bsplit)
return matches
def sentences(a, b):
"""Return sentences in both a and b"""
# Turn the inputs into sentences
asplit = splitter(a, "se", 0)
bsplit = splitter(b, "se", 0)
# Find matches
matches = matcher(asplit, bsplit)
return matches
def substrings(a, b, n):
"""Return substrings of length n in both a and b"""
# Turn the inputs into substings
asplit = splitter(a, "su", n)
bsplit = splitter(b, "su", n)
# Find matches
matches = matcher(asplit, bsplit)
return matches
def splitter(s, t, n):
"""Split a string in the requested way"""
# Line split
if t == "l":
split = s.split('\n')
# Sentence split
if t == "se":
split = sent_tokenize(s)
# Substring in range of n into split
if t == "su":
split = set()
c = 0
# Iterate over every character
for i in s:
# Prevent adding substrings past the end of the input
if n+c <= len(s):
# Add the correct length substring to the set
split.add(s[c:n+c])
c += 1
return split
def matcher(a, b):
"""Compare input strings and match"""
# Use set data type to remove duplicates
matches = set()
# Compare them
for i in a:
# Add if matched
if i in b:
matches.add(i)
# Return the list
return list(matches) |
////
# get data from hive
sqlContext.sql("SELECT * FROM lab_ent_anltc.user_recsys LIMIT 25").collect().foreach(println)
from pyspark.sql import SQLContext
sqlCtx = SQLContext(sc)
# load data from hive parquet (3 columns: mstr_prsna_key_id, ei_sku_id, rating)
ur = sqlCtx.parquetFile("hdfs://HADOOP/lab/user_recsys_parquet_nonnull")
ur = sqlCtx.parquetFile("hdfs://HADOOP/user_recsys_parquet_nonnull")
# count all records
ur.count()
////
%hdfs
hdfs dfs -put /home/localuser/edl-in/test/u.data hdfs://HADOOP/lab/
%pyspark
movielens = sc.textFile("hdfs://HADOOP/lab/u.data")
movielens.count()
movielens.first() #'u '
# isolate rating column (test) & count distinct values
rate = ur.map(lambda y: float(y[2]))
rate.distinct().count()
# import three function from mllib
from pyspark.mllib.recommendation import ALS,MatrixFactorizationModel,Rating
# create ratings object
ratings = ur.map(lambda z: Rating(int(z[0]), int(z[1]), int(z[2])))
#ratings = ur.map(lambda z: Rating(z[0]), float(z[1]), float(z[2])))
# create training and test set with random seed
train, test = ratings.randomSplit([0.71,0.29],6999)
# cache data to speed up traning
train.cache()
test.cache()
# set up paramaters for ALS and create model on training data
rank = 5 # latent factors to be made
numIterations = 10 # times to repeat process
model = ALS.train(train, rank, numIterations)
|
from urlrepo import UrlRepo
import json
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from menudatabase import MenuDatabase
from google.appengine.ext import db
from storemenu import MenuStorage
from mytime import MyTime
from mymenuparser import MyMenuParser
from datetime import date, timedelta, datetime
class GetMenu(webapp.RequestHandler):
def getMenu(self):
dh = self.request.get("dh")
self.response.headers["Content-Type"] = "application/json"
if self.request.get('time') == "true":
self.response.out.write(datetime.now())
self.response.out.write("\n")
self.response.out.write(MyTime.getTheTimeNow())
self.response.out.write("\n")
#"Hack" to allow first-time storage of menus,
#where necessary url-command is: slugmenu.appspot.com/getmenu.py?exe=storeAllMenus[&num_to_store=#]
if self.request.get('exe') == "storeAllMenus":
num_dh = 8;
if self.request.get('num_to_store') != "":
num_dh = self.request.get('num_to_store')
self.response.out.write( MenuStorage.storeAllMenus(num_dh) )
return
if dh == "":
self.response.out.write(
json.dumps(
{"request":{"success":0},
"response":{"message":"Error! Null Dining Hall!"}
}
)
)
return
if dh not in UrlRepo.dhs:
self.response.out.write(
json.dumps(
{"request":{"success":0},
"response":{"message":"Invalid Dining Hall: "+dh}
}
)
)
return
#For testing!
if self.request.get('debug') == "url":
self.response.out.write("#URL")
self.response.out.write("\n")
self.response.out.write(UrlRepo.getUrl(dh, MyTime.getTheTimeNow()))
self.response.out.write("\n")
if self.request.get('debug') == "simple":
self.response.out.write("#MENU")
self.response.out.write("\n")
self.response.out.write(UrlRepo.getUrl(dh, MyTime.getTheTimeNow()))
self.response.out.write("\n")
self.response.out.write(
json.dumps(
MyMenuParser.getMenuFor(dh, MyTime.getTheTimeNow())
, indent = 4, sort_keys = True
))
self.response.out.write("\n")
if self.request.get('debug') == "verbose":
self.response.out.write("#HTML")
self.response.out.write("\n")
html = MyMenuParser.getHtmlFrom( UrlRepo.getUrl(dh, MyTime.getTheTimeNow()) )
self.response.out.write(html)
self.response.out.write("\n")
dtdate = 0
if self.request.get('dtdate') != '':
dtdate = int(self.request.get('dtdate'))
if dtdate > 7:
self.response.out.write(
json.dumps(
{"request":{"success":0},
"response":{"message":"Cannot get more than 1 week ahead!"}
}
)
)
return
q = db.GqlQuery(
"SELECT * FROM MenuDatabase " +
"WHERE dh=:1 AND time=:2",
dh, MyTime.getTheTimeNowPlus(dtdate))
json_str = ''
for i in q:
json_str += i.menu
try:
self.response.out.write(
json.dumps(
json.loads(json_str), indent=4, sort_keys=True
)
)
except ValueError as ve:
self.response.out.write(
json.dumps(
{"request":{"success":0},
"response":{"message":ve.args[0]}
}
)
)
def get(self):
self.getMenu()
def post(self):
self.getMenu()
def main():
application = webapp.WSGIApplication([("/getmenu.py", GetMenu)], debug=True)
util.run_wsgi_app(application)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
from django.conf.urls import patterns, include, url
from authentication.views import LoginView, RegisterTeacherView, RegisterStudentView, LogoutView, ProfileView, TeacherAccountView, StudentAccountView, AdminAccountView
urlpatterns = patterns('project.core.views',
url('^profile/$', ProfileView.as_view(), name='profile'),
url('^teacher-account/$', TeacherAccountView.as_view(), name='teacher-account'),
url('^student-account/$', StudentAccountView.as_view(), name='student-account'),
url('^admin-account/$', AdminAccountView.as_view(), name='admin-account'),
url('^login/$', LoginView.as_view(), name='login'),
url('^logout/$', LogoutView.as_view(), name='logout'),
url('^register-teacher/$', RegisterTeacherView.as_view(), name='register-teacher'),
url('^register-student/$', RegisterStudentView.as_view(), name='register-student'),
)
|
from threading import local
_thread_locals = local()
def get_current_user():
return getattr(_thread_locals, 'user', None)
class CurrentUserMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
_thread_locals.user = getattr(request, 'user', None)
response = self.get_response(request)
return response
|
from flask import abort, redirect, request, url_for
from flask_admin.contrib.sqla import ModelView
from flask_admin.form import SecureForm
from flask_login import current_user
class AuthenticatedModelView(ModelView):
form_base_class = SecureForm
def is_accessible(self):
return (current_user.is_active and
current_user.is_authenticated)
def _handle_view(self, name, **kwargs):
"""
Override builtin _handle_view in order to redirect users when a view is not accessible.
"""
if not self.is_accessible():
if current_user.is_authenticated:
# permission denied
abort(403)
else:
# login
return redirect(url_for('github.login', next=request.url))
can_create = False
can_delete = False
can_edit = False
can_view_details = False
column_display_actions = False
|
# Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Code Jam 2021 Virtual World Finals - Problem C. Ropes
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000436329/000000000084fad0
#
# Time: O(N^3), pass in PyPy2 but Python2
# Space: O(N^2)
#
# Usage: python interactive_runner.py python3 testing_tool.py 2 -- python ropes.py
#
# push it east solution
# - Z=10, 18322/20000 = 91.610 %
# - Z=14, 18251/20000 = 91.255 %
#
from sys import stdout
from itertools import imap
def play(i, j):
print i+1, j+1 # 0-indexed
stdout.flush()
return map(lambda x: int(x)-1, raw_input().strip().split()) # 0-indexed
def check_result(A_score, B_score):
assert(input() == int(A_score > B_score))
def greedy(score_matrix):
# given that opposing team choose the play that yields the maximum possible score for this turn.
# if there is a tie:
# - they will choose one at random as problem description claims
# - they will not choose by a counter-greedy strategy (they choose as far east as possible), since we can never win by this greedy strategy if they can always do that
result = None
best_score = max(imap(max, score_matrix))
for i in xrange(2*N):
for j in xrange(2*N):
if score_matrix[i][j] == best_score and (result is None or (sum(result) > i+j)): # we choose as far west as possible if there is a tie
result = (i, j)
return result
def update(i, j, score_matrix):
for r in xrange(2*N):
for c in xrange(2*N):
if r == i or c == j:
score_matrix[r][c] = NEG_INF
elif (r-i)*(c-j) < 0:
score_matrix[r][c] += 1
def ropes():
score_matrix = [[0 for _ in xrange(2*N)] for _ in xrange(2*N)]
A_score = B_score = 0
for k in xrange(N):
i, j = greedy(score_matrix) if k else (Z-1, Z-1) # tuned by experitments that i=j at the first play is better
A_score += score_matrix[i][j]
update(i, j, score_matrix)
i, j = play(i, j)
B_score += score_matrix[i][j]
update(i, j, score_matrix)
check_result(A_score, B_score)
NEG_INF = float("-inf")
Z = 10 # tuned by experiments that top 4 win rates are with Z = [10, 14, 9, 11]
T, N, W = map(int, raw_input().strip().split())
for case in xrange(T):
ropes()
|
#
# Secret Santa
# Given N people, assign each person a 'designated gift recipient'(TM).
# - everyone should receive exactly one gift
# - no one should be their own designated gift recipient
#
# (a.k.a. Generate a random cycle of length N)
#
import itertools
import random
import pprint
class Person:
def __init__(self, name: str, email: str):
self.name = name
self.email = email
def getGiftRecipients(names: tuple) -> dict:
p = list( itertools.permutations(names) )
cycle = random.choice(p)
x_gives_to_y = {}
for i in range(len(names)):
x_gives_to_y[ cycle[i-1].email ] = cycle[i].name
return x_gives_to_y
def main():
people = (
Person('Ixxxx', 'ixxx@gmail.com'),
Person('Mxxxx', 'mxxx@gmail.com'),
Person('Vxxxx', 'vxxx@gmail.com'),
Person('Axxxx', 'axxx@gmail.com'),
Person('Lxxxx', 'lxxx@gmail.com')
)
recipients = getGiftRecipients(people)
pprint.pprint(recipients)
main()
# TO DO:
# inform everyone of their designated gift recipient by email
|
#!/usr/bin/env python3
from ev3dev2.motor import MoveSteering, MoveTank, MediumMotor, LargeMotor, OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D
from ev3dev2.sensor.lego import TouchSensor, ColorSensor, GyroSensor
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4
import xml.etree.ElementTree as ET
import threading
import time
from sys import stderr
def Do_nothing(stop):
# just wait...
print('Doing nothing...', file= stderr)
while True:
if stop():
break |
# coding=utf-8
import logging
import os
import time
import numpy as np
import tensorflow as tf
import conf
from src.util.common import dump_model, load_model
from src.util.sampler import random_sample
class CNNTrainer(object):
# cnn configuration
CONV_STRIDES_H, CONV_STRIDES_W = 1, 1
DEFAULT_CONV_HEIGHT, DEFAULT_CONV_WIDTH = 5, 5
POOL_STRIDES_H, POOL_STRIDES_W = 2, 2
POOL_SHAPE = [1, 2, 2, 1]
KEEP_PROB = 0.4
@staticmethod
def print_usage():
print("""
need args:
<delimiter>
<train_data_x_file_path> <train_data_y_file_path> <test_data_x_file_path> <test_data_y_file_path>
<initial_height> <initial_width> <initial_channels> <target_class_cnt>
<iteration> <batch_size>
<model_file_path>
<summary_log_dir_path>
<conv_height> <conv_width>
<neurons_nums>
[cpu_core_num]
where
neurons_nums is numbers of neurons in each conv layer, separated by comma(support no more than 3 conv layers)
""")
@staticmethod
def fit(argv):
"""训练并评估 cnn 模型
样本格式(x): 每行样本是一张拉成1维的图片(height*weight*in_channels)
标签格式(y): 每行标签是一个 one_hot 形式的向量(长度为 target_class_cnt )
@:param argv list, 详见 print_usage() 方法
"""
if len(argv) < 16:
CNNTrainer.print_usage()
return 1
logging.info('argv: "%s"', ' '.join(argv))
# argv
# required
_offset, _length = 0, 1
delimiter, = argv[_offset:_offset + _length]
_offset, _length = _offset + _length, 4
train_data_x_file_path, train_data_y_file_path, test_data_x_file_path, test_data_y_file_path = \
argv[_offset:_offset + _length]
_offset, _length = _offset + _length, 4
initial_height, initial_width, initial_channels, target_class_cnt = map(int, argv[_offset:_offset + _length])
_offset, _length = _offset + _length, 2
iteration, batch_size = map(int, argv[_offset:_offset + _length])
_offset, _length = _offset + _length, 1
model_file_path, = argv[_offset:_offset + _length]
_offset, _length = _offset + _length, 1
summary_log_dir_path, = argv[_offset:_offset + _length]
_offset, _length = _offset + _length, 2
conv_height, conv_width = map(int, argv[_offset:_offset + _length])
_offset, _length = _offset + _length, 1
_neurons_nums_str, = argv[_offset:_offset + _length]
neurons_nums = map(int, str(_neurons_nums_str).strip().split(','))
# optional
_offset, _length = _offset + _length, 1
cpu_core_num = conf.CPU_COUNT
if len(argv) > _offset:
cpu_core_num, = map(int, argv[_offset:_offset + _length])
# Construct
# input and labels
with tf.name_scope('input') as _:
x = tf.placeholder(tf.float32, shape=[None, initial_height * initial_width], name='x', )
y_ = tf.placeholder(tf.float32, shape=[None, target_class_cnt], name="y_", )
keep_prob = tf.placeholder(tf.float32, name='keep_prob', )
# trainer and evaluator
trainer, evaluator = CNNTrainer.construct(
initial_height, initial_width, initial_channels, target_class_cnt,
x, y_, keep_prob,
conv_height, conv_width,
neurons_nums,
)
# load data
logging.info("start to load data.")
start_time = time.time()
_basedir_path = os.path.dirname(train_data_x_file_path)
train_data = CNNTrainer.load_data(
train_data_x_file_path, train_data_y_file_path,
delimiter,
os.path.join(_basedir_path, 'train_data.npy'),
)
test_data = CNNTrainer.load_data(
test_data_x_file_path, test_data_y_file_path,
delimiter,
os.path.join(_basedir_path, 'test_data.npy'),
)
end_time = time.time()
logging.info("end to load data.")
logging.info('cost time: %.2fs' % (end_time - start_time))
config = tf.ConfigProto(
device_count={"CPU": cpu_core_num},
inter_op_parallelism_threads=cpu_core_num,
intra_op_parallelism_threads=cpu_core_num,
)
with tf.Session(config=config) as sess:
# train
logging.info("start to train.")
start_time = time.time()
trainer.train(
sess, summary_log_dir_path,
evaluator,
iteration, batch_size,
train_data, test_data, target_class_cnt,
x, y_, keep_prob,
)
del train_data
end_time = time.time()
logging.info("end to train.")
logging.info('cost time: %.2fs' % (end_time - start_time))
# dump model
dump_model(sess, model_file_path)
logging.info("dump model into: %s" % model_file_path)
# evaluate
logging.info("start to evaluate.")
start_time = time.time()
test_data_len = len(test_data)
evaluate_result = evaluator.evaluate(
sess,
batch_size,
test_data, target_class_cnt,
x, y_, keep_prob,
)
del test_data
end_time = time.time()
logging.info("end to evaluate.")
logging.info('cost time: %.2fs' % (end_time - start_time,))
logging.info('total test data: %d' % (test_data_len,))
logging.info("final evaluate_result: %s" % (evaluate_result,))
return 0
@staticmethod
def construct(
initial_height, initial_width, initial_channels, target_class_cnt,
x, y_, keep_prob,
conv_height, conv_width,
neurons_nums,
):
def weight_variable(shape, name=None, ):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name, )
def bias_variable(shape, name=None, ):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name, )
def conv2d(x, W,
strides=(1, CNNTrainer.CONV_STRIDES_H, CNNTrainer.CONV_STRIDES_W, 1),
padding='SAME', name=None, ):
return tf.nn.conv2d(x, W, strides, padding, name=name, )
def max_pool(x, ksize,
strides=(1, CNNTrainer.POOL_STRIDES_H, CNNTrainer.POOL_STRIDES_W, 1),
padding='SAME', name=None, ):
return tf.nn.max_pool(x, ksize, strides, padding, name=name, )
with tf.name_scope('model') as _:
# Input Layer
with tf.name_scope('InputLayer') as _:
in0 = x
x_image = tf.reshape(in0, [-1, initial_height, initial_width, initial_channels], name='x_image', )
out0 = x_image
CNNTrainer.add_image2summary(out0, 'out0')
# C1
with tf.name_scope('C1') as _:
_in = out0
_height, _width = _in.get_shape()[1].value, _in.get_shape()[2].value
_in_channels = _in.get_shape()[3].value
_out_channels = neurons_nums[0]
W_conv = weight_variable(
[conv_height, conv_width, _in_channels, _out_channels], name='W_conv', )
tf.summary.histogram('W_conv', W_conv)
b_conv = bias_variable([_out_channels], name='b_conv', )
tf.summary.histogram('b_conv', b_conv)
h_conv = tf.nn.relu(conv2d(_in, W_conv) + b_conv, name='h_conv', )
out = h_conv
CNNTrainer.add_image2summary(out, 'out')
# S2
with tf.name_scope('S2') as _:
_in = out
h_pool = max_pool(_in, CNNTrainer.POOL_SHAPE, name='h_pool', )
out = h_pool
CNNTrainer.add_image2summary(out, 'out')
# C3
with tf.name_scope('C3') as _:
_in = out
_height, _width = _in.get_shape()[1].value, _in.get_shape()[2].value
_in_channels = _in.get_shape()[3].value
_out_channels = neurons_nums[1]
W_conv = weight_variable(
[conv_height, conv_width, _in_channels, _out_channels], name='W_conv', )
tf.summary.histogram('W_conv', W_conv)
b_conv = bias_variable([_out_channels], name='b_conv', )
tf.summary.histogram('b_conv', b_conv)
h_conv = tf.nn.relu(conv2d(_in, W_conv) + b_conv, name='h_conv', )
out = h_conv
CNNTrainer.add_image2summary(out, 'out')
# S4
with tf.name_scope('S4') as _:
_in = out
h_pool = max_pool(_in, CNNTrainer.POOL_SHAPE, name='h_pool', )
out = h_pool
CNNTrainer.add_image2summary(out, 'out')
# C5
with tf.name_scope('C5') as _:
_in = out
_height, _width = _in.get_shape()[1].value, _in.get_shape()[2].value
_in_channels = _in.get_shape()[3].value
_out_channels = neurons_nums[2]
W_conv = weight_variable(
[conv_height, conv_width, _in_channels, _out_channels], name='W_conv', )
tf.summary.histogram('W_conv', W_conv)
b_conv = bias_variable([_out_channels], name='b_conv', )
tf.summary.histogram('b_conv', b_conv)
h_conv = tf.nn.relu(conv2d(_in, W_conv) + b_conv, name='h_conv', )
out = h_conv
CNNTrainer.add_image2summary(out, 'out')
# F6, Densely Connected Layer(Full Connected Layer)
with tf.name_scope('F6') as _:
_in = out
_height, _width = _in.get_shape()[1].value, _in.get_shape()[2].value
_in_channels = _in.get_shape()[3].value
_out_width = 1024
W_fc = weight_variable([_height * _width * _in_channels, _out_width], name='W_fc', )
b_fc = bias_variable([_out_width], name='b_fc', )
h_pool_flat = tf.reshape(_in, [-1, _height * _width * _in_channels], name='h_pool_flat', )
h_fc = tf.nn.relu(tf.matmul(h_pool_flat, W_fc) + b_fc, name='h_fc', )
out = h_fc
# Dropout Layer
with tf.name_scope('DropoutLayer') as _:
_in = out
h_fc_drop = tf.nn.dropout(_in, keep_prob, name='h_fc_drop', )
out = h_fc_drop
# Output Layer
with tf.name_scope('OutputLayer') as _:
_in_ = out
_in_width_ = _in_.get_shape()[1].value
_out_width_ = target_class_cnt
W_fc_ = weight_variable([_in_width_, _out_width_], name='W_fc_', )
b_fc_ = bias_variable([_out_width_], name='b_fc_', )
y = tf.add(tf.matmul(_in_, W_fc_), b_fc_, name='y', )
# Trainer
with tf.name_scope('trainer') as _:
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y), name='loss', )
train_per_step = tf.train.AdamOptimizer(1e-5).minimize(loss, name='train_per_step', )
tf.summary.scalar('loss', loss)
trainer = CNNTrainer.Trainer(train_per_step)
# Evaluator
with tf.name_scope('evaluator') as _:
example_cnt = tf.count_nonzero(
tf.logical_or(tf.cast(tf.argmax(y_, 1), dtype=tf.bool), True), name='example_cnt') # 样本总数
_correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
correct_cnt = tf.count_nonzero(_correct_prediction, name='correct_cnt') # 将正样本预测为正,负样本预测为负的数量
accuracy = tf.reduce_mean(tf.cast(_correct_prediction, tf.float32), name='accuracy', )
tf.summary.scalar('accuracy', accuracy)
# 该模型初步处理2种用户类型,正常、非正常用户,正常用户序号为0,异常用户序号为1
# 关心异常用户的查全率、查准率
TARGET_LABEL_IDX = 1 # 目标类别(假设为正)的索引
_right_label = TARGET_LABEL_IDX
_true_right = tf.equal(tf.argmax(y_, 1), _right_label)
_predicted_right = tf.equal(tf.argmax(y, 1), _right_label)
both_right_cnt = tf.count_nonzero(
tf.logical_and(_true_right, _predicted_right), name='both_right_cnt') # 将正样本预测为正的数量
true_right_cnt = tf.count_nonzero(
_true_right, name='true_right_cnt') # 正样本的数量
predicted_right_cnt = tf.count_nonzero(_predicted_right, name='predicted_right_cnt') # 预测为正样本的数量
recall = -1.0 if true_right_cnt == 0 else \
tf.divide(tf.to_float(both_right_cnt), tf.to_float(true_right_cnt), name='recall')
precision = -1.0 if predicted_right_cnt == 0 else \
tf.divide(tf.to_float(both_right_cnt), tf.to_float(predicted_right_cnt), name='precision')
tf.summary.scalar('recall', recall)
tf.summary.scalar('precision', precision)
evaluator = CNNTrainer.Evaluator(
accuracy, recall, precision,
example_cnt, correct_cnt, both_right_cnt,
true_right_cnt, predicted_right_cnt,
)
return trainer, evaluator
@staticmethod
def format_inputs(example, target_class_cnt):
example = np.array(example)
return example[:, :-target_class_cnt], example[:, -target_class_cnt:]
@staticmethod
def load_data(
data_x_file_path, data_y_file_path,
delimiter,
cache=None,
):
if cache is None or not os.path.exists(cache):
# TODO 减小数据加载的内存占用(如预先shuffle,然后顺序yield读)
data_x, data_y = map(lambda _: np.loadtxt(_, delimiter=delimiter),
(data_x_file_path, data_y_file_path,))
data = np.column_stack((data_x, data_y,))
del data_x
del data_y
if cache is not None:
np.save(cache, data)
else:
data = np.load(cache)
return data
@staticmethod
def view_evaluate_result(
delimiter,
test_data_x_file_path, test_data_y_file_path,
target_class_cnt,
batch_size,
model_file_path,
cpu_core_num=conf.CPU_COUNT,
):
# load data
logging.info("start to load data.")
start_time = time.time()
_basedir_path = os.path.dirname(test_data_x_file_path)
test_data = CNNTrainer.load_data(
test_data_x_file_path, test_data_y_file_path,
delimiter,
os.path.join(_basedir_path, 'test_data.npy'),
)
end_time = time.time()
logging.info("end to load data.")
logging.info('cost time: %.2fs' % (end_time - start_time))
config = tf.ConfigProto(
device_count={"CPU": cpu_core_num},
inter_op_parallelism_threads=cpu_core_num,
intra_op_parallelism_threads=cpu_core_num,
)
with tf.Session(config=config) as sess:
# load model
graph = load_model(sess, model_file_path)
x = graph.get_tensor_by_name("input/x:0")
y_ = graph.get_tensor_by_name("input/y_:0")
keep_prob = graph.get_tensor_by_name("input/keep_prob:0")
accuracy = graph.get_tensor_by_name("evaluator/accuracy:0")
recall = graph.get_tensor_by_name("evaluator/recall:0")
precision = graph.get_tensor_by_name("evaluator/precision:0")
example_cnt = graph.get_tensor_by_name("evaluator/example_cnt:0")
correct_cnt = graph.get_tensor_by_name("evaluator/correct_cnt:0")
both_right_cnt = graph.get_tensor_by_name("evaluator/both_right_cnt:0")
true_right_cnt = graph.get_tensor_by_name("evaluator/true_right_cnt:0")
predicted_right_cnt = graph.get_tensor_by_name("evaluator/predicted_right_cnt:0")
logging.info("load model from: %s" % model_file_path)
# evaluate
logging.info("start to evaluate.")
start_time = time.time()
test_data_len = len(test_data)
evaluator = CNNTrainer.Evaluator(
accuracy, recall, precision,
example_cnt, correct_cnt, both_right_cnt,
true_right_cnt, predicted_right_cnt,
)
evaluate_result = evaluator.evaluate(
sess,
batch_size,
test_data, target_class_cnt,
x, y_, keep_prob
)
del test_data
end_time = time.time()
logging.info("end to evaluate.")
logging.info('cost time: %.2fs' % (end_time - start_time,))
logging.info('total data: %d' % (test_data_len,))
logging.info("evaluate result %s" % (evaluate_result,))
@staticmethod
def add_image2summary(x, image_name_prefix):
channels = x.get_shape()[3].value
for channel_no in range(channels):
image = x[:, :, :, channel_no:channel_no + 1]
image_name = '%s-%d' % (image_name_prefix, channel_no,)
tf.summary.image(image_name, image)
class Trainer(object):
PRINT_PROGRESS_PER_STEP_NUM = 100
def __init__(self, train_per_step, ):
super(CNNTrainer.Trainer, self).__init__()
self.train_per_step = train_per_step
def train(
self,
sess, summary_log_dir_path,
evaluator=None,
iteration=None, batch_size=None,
train_data=None, test_data=None, target_class_cnt=None,
x=None, y_=None, keep_prob=None,
):
train_per_step = self.train_per_step
summaries = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(logdir=summary_log_dir_path, graph=sess.graph)
sess.run(tf.global_variables_initializer())
for i in range(iteration):
batch_train = random_sample(train_data, batch_size)
_X_train, _Y_train = CNNTrainer.format_inputs(batch_train, target_class_cnt, )
# print progress
if evaluator is not None:
if i % CNNTrainer.Trainer.PRINT_PROGRESS_PER_STEP_NUM == 0:
train_evl_rs = evaluator.evaluate_one(
sess,
batch_train, target_class_cnt,
x, y_, keep_prob
)
batch_test = random_sample(test_data, 2 * batch_size)
test_evl_rs = evaluator.evaluate_one(
sess,
batch_test, target_class_cnt,
x, y_, keep_prob
)
logging.info(
"step %d, training evaluate_result: %s, testing evaluate_result: %s"
% (i, train_evl_rs, test_evl_rs))
# TODO 注释掉下段代码。实验阶段应该关注各评估指标的变化,上线时可根据实际效果选择是否打开
# accuracy_threshold = 0.83
# if test_evl_rs.accuracy_ratio > accuracy_threshold \
# and train_evl_rs.accuracy_ratio > accuracy_threshold:
# logging.info(
# "exiting for reason: both train_accuracy and test_accuracy gt accuracy_threshold(%s)"
# % (accuracy_threshold,))
# return
feed = {x: _X_train, y_: _Y_train, keep_prob: CNNTrainer.KEEP_PROB}
train_per_step.run(feed_dict=feed, session=sess)
summaries_result = sess.run(summaries, feed_dict=feed, )
summary_writer.add_summary(summaries_result, global_step=i)
summary_writer.close()
class Evaluator(object):
def __init__(
self,
accuracy, recall, precision,
example_cnt, correct_cnt, both_right_cnt,
true_right_cnt, predicted_right_cnt,
):
super(CNNTrainer.Evaluator, self).__init__()
self.accuracy = accuracy
self.recall = recall
self.precision = precision
self.example_cnt = example_cnt
self.correct_cnt = correct_cnt
self.both_right_cnt = both_right_cnt
self.true_right_cnt = true_right_cnt
self.predicted_right_cnt = predicted_right_cnt
def evaluate(
self,
sess,
batch_size,
data, target_class_cnt,
x, y_, keep_prob,
):
iteration = int(len(data) / batch_size) + 1
sum_example_cnt = 0
sum_both_right_cnt = 0
sum_correct_cnt = 0
sum_true_right_cnt = 0
sum_predicted_right_cnt = 0
for i in range(iteration):
batch_test = random_sample(data, batch_size)
_result = self.evaluate_one(
sess,
batch_test, target_class_cnt,
x, y_, keep_prob
)
sum_example_cnt += _result.example_cnt
sum_correct_cnt += _result.correct_cnt
sum_both_right_cnt += _result.both_right_cnt
sum_true_right_cnt += _result.true_right_cnt
sum_predicted_right_cnt += _result.predicted_right_cnt
final_accuracy = -1.0 if sum_example_cnt == 0 else \
1.0 * sum_correct_cnt / sum_example_cnt
final_recall = -1.0 if sum_true_right_cnt == 0 else \
1.0 * sum_both_right_cnt / sum_true_right_cnt
final_precision = -1.0 if sum_predicted_right_cnt == 0 else \
1.0 * sum_both_right_cnt / sum_predicted_right_cnt
result = CNNTrainer.Evaluator.Result(
final_accuracy, final_recall, final_precision,
sum_example_cnt, sum_correct_cnt, sum_both_right_cnt,
sum_true_right_cnt, sum_predicted_right_cnt,
)
return result
def evaluate_one(
self,
sess,
data, target_class_cnt,
x, y_, keep_prob,
):
_X, _Y = CNNTrainer.format_inputs(data, target_class_cnt, )
feed_dict = {x: _X, y_: _Y, keep_prob: 1.0}
accuracy_ratio = self.accuracy.eval(feed_dict=feed_dict, session=sess)
recall_ratio = self.recall.eval(feed_dict=feed_dict, session=sess)
precision_ratio = self.precision.eval(feed_dict=feed_dict, session=sess)
example_cnt = self.example_cnt.eval(feed_dict=feed_dict, session=sess)
correct_cnt = self.correct_cnt.eval(feed_dict=feed_dict, session=sess)
both_right_cnt = self.both_right_cnt.eval(feed_dict=feed_dict, session=sess)
true_right_cnt = self.true_right_cnt.eval(feed_dict=feed_dict, session=sess)
predicted_right_cnt = self.predicted_right_cnt.eval(feed_dict=feed_dict, session=sess)
result = CNNTrainer.Evaluator.Result(
accuracy_ratio, recall_ratio, precision_ratio,
example_cnt, correct_cnt, both_right_cnt,
true_right_cnt, predicted_right_cnt,
)
return result
class Result(object):
def __init__(
self,
accuracy_ratio, recall_ratio, precision_ratio,
example_cnt, correct_cnt, both_right_cnt,
true_right_cnt, predicted_right_cnt,
):
super(CNNTrainer.Evaluator.Result, self).__init__()
self.accuracy_ratio = accuracy_ratio
self.recall_ratio = recall_ratio
self.precision_ratio = precision_ratio
self.example_cnt = example_cnt
self.correct_cnt = correct_cnt
self.both_right_cnt = both_right_cnt
self.true_right_cnt = true_right_cnt
self.predicted_right_cnt = predicted_right_cnt
def __str__(self):
return "result {accuracy: %g, recall: %g, precision: %g, " \
"example_cnt: %g, correct_cnt: %g, both_right_cnt: %g, " \
"true_right_cnt: %g, predicted_right_cnt: %g}" \
% (self.accuracy_ratio, self.recall_ratio, self.precision_ratio,
self.example_cnt, self.correct_cnt, self.both_right_cnt,
self.true_right_cnt, self.predicted_right_cnt,)
|
"""
Obtain a training data set that can be used to train the network
"""
import os
import sys
sys.path.append(os.path.split(sys.path[0])[0])
import shutil
from time import time
import numpy as np
from tqdm import tqdm
import SimpleITK as sitk
import scipy.ndimage as ndimage
import parameter as para
if os.path.exists(para.training_set_path):
shutil.rmtree(para.training_set_path)
new_ct_path = os.path.join(para.training_set_path, 'ct')
new_seg_dir = os.path.join(para.training_set_path, 'seg')
os.mkdir(para.training_set_path)
os.mkdir(new_ct_path)
os.mkdir(new_seg_dir)
start = time()
for file in tqdm(os.listdir(para.train_ct_path)):
#Load CT and gold standard into memory
ct = sitk.ReadImage(os.path.join(para.train_ct_path, file), sitk.sitkInt16)
ct_array = sitk.GetArrayFromImage(ct)
seg = sitk.ReadImage(os.path.join(para.train_seg_path, file.replace('volume', 'segmentation')), sitk.sitkUInt8)
seg_array = sitk.GetArrayFromImage(seg)
# Fusion of the liver and liver tumor labels in the gold standard into one
seg_array[seg_array > 0] = 1
# Truncate the gray value outside the threshold
ct_array[ct_array > para.upper] = para.upper
ct_array[ct_array < para.lower] = para.lower
# Downsample the CT data on the cross-section and resample, adjust the spacing of the z-axis of all data to 1mm
seg_array = ndimage.zoom(seg_array, (ct.GetSpacing()[-1] / para.slice_thickness, 1, 1), order=0)
# Find the slices at the beginning and end of the liver area, and expand the slices outwards
z = np.any(seg_array, axis=(1, 2))
start_slice, end_slice = np.where(z)[0][[0, -1]]
# Expand slices in both directions
start_slice = max(0, start_slice - para.expand_slice)
end_slice = min(seg_array.shape[0] - 1, end_slice + para.expand_slice)
# If the number of remaining slices is less than size at this time, just give up the data. There is very little data, so don’t worry.
if end_slice - start_slice + 1 < para.size:
print('!!!!!!!!!!!!!!!!')
print(file, 'have too little slice', ct_array.shape[0])
print('!!!!!!!!!!!!!!!!')
continue
ct_array = ct_array[start_slice:end_slice + 1, :, :]
seg_array = seg_array[start_slice:end_slice + 1, :, :]
# Finally save the data as nii
new_ct = sitk.GetImageFromArray(ct_array)
new_ct.SetDirection(ct.GetDirection())
new_ct.SetOrigin(ct.GetOrigin())
new_ct.SetSpacing((ct.GetSpacing()[0] * int(1 / para.down_scale), ct.GetSpacing()[1] * int(1 / para.down_scale), para.slice_thickness))
new_seg = sitk.GetImageFromArray(seg_array)
new_seg.SetDirection(ct.GetDirection())
new_seg.SetOrigin(ct.GetOrigin())
new_seg.SetSpacing((ct.GetSpacing()[0], ct.GetSpacing()[1], para.slice_thickness))
sitk.WriteImage(new_ct, os.path.join(new_ct_path, file))
sitk.WriteImage(new_seg, os.path.join(new_seg_dir, file.replace('volume', 'segmentation').replace('.nii', '.nii.gz'))) |
#!/usr/bin/python
#This file converts the transcript range into genomic range
#This files requies alignment data along with file with query ranges
def main():
file1 = open("file1","r")
f1 = file1.readlines()
D = {}
import re
for x1 in f1:
y1 = re.search("(^TR\d+)\t(CHR\d+)\t(\d+)\t(.+)", x1)
if y1:
D.update( {(y1.group(1)) : [(y1.group(2)),(y1.group(3)),(y1.group(4))]} )
file4 = open("file4","r")
file_out = open("result","w+")
f2 = file4.readlines()
for x2 in f2:
y2 = re.search("(^TR\d+)\t(\d+)\t(\d+)", x2)
x2 = x2[:-1]
val_g = 0
val_t = 0
if (y2.group(1)) in D:
valg = D[(y2.group(1))][1]
val_g = int(valg)
valt1 = (y2.group(2))
val_t1 = int(valt1)
valt2 = (y2.group(3))
val_t2 = int(valt2)
cigar = D[(y2.group(1))][2]
pattern = re.compile(r'([0-9]+)([MIDNSHPX=])')
seq = ''
for (numbers, letter) in re.findall(pattern, cigar):
numbers = int(numbers)
seq = seq + (numbers * letter)
seq = str(seq)
count = 0
t1_count = 0
t2_count = 0
g1_count = 0
g2_count = 0
Se = {'M':1, 'I':1, 'S':1, '=':1,'X':'1'}
Re = {'M':1, 'D':1, 'N':1, '=':1,'X':'1'}
for op in seq:
count += 1
if op in Se:
t2_count += 1
if (t1_count <= val_t1):
t1_count += 1
if t2_count > val_t2:
break
if op in Re:
g2_count += 1
if (t1_count <= val_t1):
g1_count += 1
total1 = val_g + g1_count
total2 = val_g + g2_count
st = D[(y2.group(1))][0]
print ('{0}\t{1}\t{2}\t{3}\n'.format(x2, st, total1, total2))
main()
|
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'LSMS.views.home', name='home'),
url(r'^stuhome', 'LSMS.views.stuHome'),
url(r'^teahome', 'LSMS.views.teaHome'),
url(r'^cmhome', 'LSMS.views.cmHome'),
url(r'^user/register', 'LSMS.views.register'),
url(r'^user/authorize', 'LSMS.views.authorize'),
url(r'^user/modpass', 'LSMS.views.modPass'),
url(r'user/disable', 'LSMS.views.disableUser'),
url(r'user/getpass', 'LSMS.views.getPass'),
url(r'user/logout', 'LSMS.views.logout'),
url(r'^read/sturoll', 'LSMS.views.readStuRoll'),
url(r'^read/stuscore', 'LSMS.views.readStuScore'),
url(r'^read/stuperf', 'LSMS.views.readStuPerf'),
url(r'^read/stunoti', 'LSMS.views.readStuNoti'),
url(r'^list/sturoll', 'LSMS.views.listStuRoll'),
url(r'^list/stuscore', 'LSMS.views.listStuScore'),
url(r'^list/stuperf', 'LSMS.views.listStuPerf'),
url(r'^list/stunoti', 'LSMS.views.listStuNoti'),
url(r'^list/course', 'LSMS.views.listCourse'),
url(r'^list/class', 'LSMS.views.listClass'),
url(r'^new/sturoll', 'LSMS.views.newStuRoll'),
url(r'^new/stuevent', 'LSMS.views.newStuEvent'),
url(r'^new/stuscore', 'LSMS.views.newStuScore'),
url(r'^new/stuperf', 'LSMS.views.newStuPerf'),
url(r'^new/stunoti', 'LSMS.views.newStuNoti'),
url(r'^new/class', 'LSMS.views.newClass'),
url(r'^mod/sturoll', 'LSMS.views.modStuRoll'),
url(r'^mod/class', 'LSMS.views.modClass'),
url(r'^del/stunoti', 'LSMS.views.delStuNoti'),
url(r'^del/stuevent', 'LSMS.views.delStuEvent'),
url(r'^del/class', 'LSMS.views.delClass'),
url(r'^del/sturoll', 'LSMS.views.delStuRoll'),
url(r'^msg', 'LSMS.views.msg'),
# url(r'^LSMS/', include('LSMS.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
'''
Pre-training DNNs with Noisy Data
the DNNs contains one Gaussian-Bernoulli RBM and two Bernoulli-Bernoulli RBMs
'''
import numpy as np
import tensorflow as tf
from tfrbm import BBRBM, GBRBM
import os
import scipy.io as scio
def prepare_data(file_path):
all_data = np.zeros([1,257])
#all_data = np.array(all_data)
file_names = os.listdir(file_path)
for name in file_names:
full_path = file_path + name
data = scio.loadmat(full_path)
data = np.array(data['htkdata']).transpose()
all_data = np.vstack((all_data, data))
return all_data[1:all_data.shape[0]]
#prepare the noisy data set
noisy_data_path = "DataSet/train/noisy_speech/"
input_data = prepare_data(file_path = noisy_data_path)
#begin pretraining Gaussian-Bernoulli RBM
gb_n_visible = input_data.shape[1]
gb_n_hid = 2048
gb_learning_rate=0.01
gb_momentum=0.95
gb_err_function='mse'
sigma=1
gbrbm = GBRBM(n_visible = gb_n_visible, n_hidden = gb_n_hid, learning_rate = gb_learning_rate, momentum = gb_momentum, err_function = gb_err_function, use_tqdm=False, sample_visible=True, sigma = sigma)
gb_n_epoches = 40
gb_batch_size=128
errs = gbrbm.fit(data_x = input_data, n_epoches = gb_n_epoches, batch_size = gb_batch_size, shuffle=True, verbose=True)
gb_filename = 'pretrain_models/gbrbm.ckpt'
gb_name = 'rbm'
gbrbm.save_weights(filename = gb_filename, name = gb_name)
#begin pretraining the first Bernoulli-Bernoulli RBM
bb_input_data_1 = gbrbm.transform(input_data)
bb_input_data_1 = np.array(bb_input_data_1)
#print(bb_input_data_1.shape)
bb_n_visible_1 = bb_input_data_1.shape[1]
bb_n_hid_1 = 2048
bb_learning_rate_1 = 0.01
bb_momentum_1 = 0.95
bb_err_function_1 = 'mse'
bbrbm_1 = BBRBM(n_visible = bb_n_visible_1, n_hidden = bb_n_hid_1, learning_rate = bb_learning_rate_1, momentum = bb_momentum_1, err_function = bb_err_function_1, use_tqdm=False)
bb_n_epoches_1 = 10
bb_batch_size_1 = 128
errs_1 = bbrbm_1.fit(data_x = bb_input_data_1, n_epoches = bb_n_epoches_1, batch_size = bb_batch_size_1, shuffle=True, verbose=True)
bb_filename_1 = 'pretrain_models/bbrbm_1.ckpt'
bb_name_1 = 'rbm'
bbrbm_1.save_weights(filename = bb_filename_1, name = bb_name_1)
#begin pretraining the second Bernoulli-Bernoulli RBM
bb_input_data_2 = bbrbm_1.transform(bb_input_data_1)
bb_input_data_2 = np.array(bb_input_data_2)
bb_n_visible_2 = bb_input_data_2.shape[1]
bb_n_hid_2 = 2048
bb_learning_rate_2 = 0.01
bb_momentum_2 = 0.95
bb_err_function_2 = 'mse'
bbrbm_2 = BBRBM(n_visible = bb_n_visible_2, n_hidden = bb_n_hid_2, learning_rate = bb_learning_rate_2, momentum = bb_momentum_2, err_function = bb_err_function_2, use_tqdm=False)
bb_n_epoches_2 = 5
bb_batch_size_2 = 128
errs_2 = bbrbm_1.fit(data_x = bb_input_data_2, n_epoches = bb_n_epoches_2, batch_size = bb_batch_size_2, shuffle=True, verbose=True)
bb_filename_2 = 'pretrain_models/bbrbm_2.ckpt'
bb_name_2 = 'rbm'
bbrbm_2.save_weights(filename = bb_filename_2, name = bb_name_2)
|
A=int(input("A= "))
if A>0:
print(A-8)
if A==0:
print(10)
if A<0:
print(A+6)
|
reference_data_path = "../testing_data/test_model_data_for_e3sm_diags/climatology"
test_data_path = "../testing_data/test_model_data_for_e3sm_diags/climatology"
test_name = "20161118.beta0.F1850COSP"
reference_name = "20161118.beta0.F1850COSP"
results_dir = "model_climo_vs_model_climo_results"
run_type = "model_vs_model"
backend = "mpl"
debug = True
|
import unittest
import main
class TestMain(unittest.TestCase):
def test_do_stuff(self):
test_param = 20
result = main.do_stuff(test_param)
self.assertEqual(result, 25)
unittest.main()
print(main) |
# -*- coding: utf-8 -*-
import inject
import json
import uuid
import re
import logging
import psycopg2
import asyncio
from asyncio import coroutine
from autobahn.asyncio.wamp import ApplicationSession
from model.registry import Registry
from model.connection.connection import Connection
from model.users.users import User, UserPassword
from model.login.login import Login
from model.systems.systems import Systems
class FceWamp(ApplicationSession):
def __init__(self, config=None):
logging.debug('instanciando')
ApplicationSession.__init__(self, config)
registry = inject.instance(Registry)
self.reg = registry.getRegistry('dcsys')
self.conn = Connection(self.reg)
self.loginModel = inject.instance(Login)
self.systemsModel = inject.instance(Systems)
@coroutine
def onJoin(self, details):
yield from self.register(self.listSystems_async, 'fce.listSystems')
yield from self.register(self.changePassword_async, 'fce.changePassword')
def listSystems(self, sid):
con = self.conn.get()
try:
userId = self.loginModel.getUserId(con, sid)
systems = Systems.listSystems(con, userId)
return systems
finally:
self.conn.put(con)
@coroutine
def listSystems_async(self, sid):
loop = asyncio.get_event_loop()
r = yield from loop.run_in_executor(None, self.listSystems, sid)
return r
def changePassword(self, sid, password):
con = self.conn.get()
try:
userId = self.loginModel.getUserId(con, sid)
passwords = UserPassword.findByUserId(con, userId)
for passwd in passwords:
passwd.setPassword(password)
passwd.persist(con)
con.commit()
finally:
self.conn.put(con)
@coroutine
def changePassword_async(self, sid, password):
loop = asyncio.get_event_loop()
r = yield from loop.run_in_executor(None, self.changePassword, sid, password)
return r
|
def fun(arr):
arr = sorted(arr)
for i in range(len(arr)):
if(arr[i]>=0):
break
if(i%2==0):
ans = 1
for a in arr:
if(a!=0):
ans = ans*a
else:
ans = 1
for a in arr:
if(a<0 and i!=1):
ans = ans*a
i = i-1
if(a>0):
ans = ans*a
return ans
arr = [-1, 0]
print(fun(arr)) |
import numpy as np
import PrplxWrap
import sys
import pprint as pp
import interpolate as ip
import pylitho_exceptions as pyle
import copy
import Grid
from PhysicalConstants import *
reload(ip)
reload(PrplxWrap)
reload(pyle)
reload(Grid)
class Container(object):
@property
def T(self):
return self._T
@T.setter
def T(self, value):
self._T = value
self.perplexResult = None
@T.deleter
def T(self):
self._T = None
self.perplexResult = None
@property
def P(self):
return self._P
@P.setter
def P(self, value):
self._P = value
self.perplexResult = None
@P.deleter
def P(self):
self._P = None
self.perplexResult = None
def __init__(self, copyfrom=None, compos=None, masses=None, perplex=None):
if copyfrom is None:
if compos is None:
self.components = []
self.masses = []
self.ncomponents = 0
self._T = None
self._P = None
self._H = None
else:
self.components = compos[:]
self.ncomponents = len(self.components)
if masses is None:
self.masses = [0] * self.ncomponents
else:
if self.ncomponents != len(masses):
raise pyle.PyLerr_Undefined("len(components) != len(masses)")
self.masses = masses[:]
self._T = None
self._P = None
self._H = None
else:
if not isinstance(copyfrom, Container):
raise pyle.PyLerr_TypeError("Argument is not instance of Container")
self.components = copy.components[:]
self.masses = copyfrom.masses[:]
self.ncomponents = copyfrom.ncomponents
self._T = copyfrom._T
self._P = copyfrom._P
self._H = copyfrom._H
self.perplex = perplex
self.perplexResult = None
#T = property(get_T, set_T, del_T, "Temperature")
#P = property(get_P, set_P, del_P, "Pressure")
#self.H = property(get_H, set_H, del_H, "Enthalpy")
def updatePerplex(self, perplex=None):
if perplex is None:
if self.perplex is None:
raise pyle.PyLerr_Undefined("No valid perplex module")
else:
perplex = self.perplex
else:
self.perplex = perplex
if set(self.components) != set(perplex.callerComponents) or len(self.components) != len(perplex.callerComponents):
raise pyle.PyLerr_Undefined("component set mismatch")
if self._P is None or self._T is None:
raise pyle.PyLerr_Undefined("P or T not set")
idxs = [self.components.index(a) for a in perplex.callerComponents]
masses = np.array(self.masses, dtype=float)
masses = masses/np.sum(masses)
masses = masses[idxs]
self.perplexResult = None
self.perplexResult = perplex.phaseq(self._P, self._T, masses.tolist(), debug=False)
def getVolume(self, perplex=None):
if self.perplexResult is None:
self.updatePerplex(perplex=perplex)
rho = self.perplexResult['SYSPROP'][self.perplex.syspropnum['rho']]
return np.sum(self.masses)/rho
def move_isentropic(self, dP, perplex=None):
if self.perplexResult is None:
self.updatePerplex(perplex=perplex)
if self._P is None or self._T is None:
raise pyle.PyLerr_Undefined("T/P not defined")
maxerr = 0.01
curerr = 1.1*maxerr
T0 = self.T
P0 = self.P
V0 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['V']]
Cp0 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['Cp']]
S0 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['S']]
H0 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['H']]
dH = V0 * dP # q=0, only expansion work
dT = dH / Cp0
self.T = T0 + dT
self.P = P0 + dP
self.updatePerplex(perplex=perplex)
S1 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['S']]
Cp1 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['Cp']]
dSph = S1-S0 # entropy mismatch (entropy change of phase changes)
dHadj = (-dSph) * self.T # to gain it back, modify enthalpy
dTadj = dHadj / Cp1 # calculate corresponding temp change
self.T = self.T + dTadj
self.updatePerplex(perplex=perplex)
S1 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['S']]
Cp1 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['Cp']]
def move_adiab(self, dPtot, dP=3e6*1e-5, perplex=None):
# Assume adiabatic AND isentropic process
#
# dS = 0
# dH = V dp
#
# 1. Read current H (H_1), S (S_1) and V (V_1) at T_1, P_1
# 2. Calc dH = V dP
# 3. Calc H_2 = H_1 + dH
# 4. Find T_2, P_2==P_1 where H==H_2
# 5. Check that S_2 == S_1
#
# 1. Estimate dH: dH = V dP
# 2. Estimate dT: dT = dH / Cp
# 3.
method = 2
if self.perplexResult is None:
self.updatePerplex(perplex=perplex)
if self._P is None or self._T is None:
raise pyle.PyLerr_Undefined("T/P not defined")
maxdT = 5.
if dPtot < 0:
dPdir = -1.
if dP > 0:
dP = -dP
else:
dPdir = 1.
dPsum = 0.0
dTsum = 0.0
T0 = self._T
rho0 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['rho']] # kg/m3
Cp0 = 1000. * self.perplexResult['SYSPROP'][self.perplex.syspropnum['Cp']] / self.perplexResult['SYSPROP'][self.perplex.syspropnum['N']] # J K^-1 kg^-1
alpha0 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['alpha']]
#V0 = sum(self.masses) / rho0
V0 = 1000. * self.perplexResult['SYSPROP'][self.perplex.syspropnum['V']] / self.perplexResult['SYSPROP'][self.perplex.syspropnum['N']] # J/bar/kg
H0 = 1000. * self.perplexResult['SYSPROP'][self.perplex.syspropnum['H']] / self.perplexResult['SYSPROP'][self.perplex.syspropnum['N']] # J/kg
origP = self.P
origT = T0
origV = V0
origH = H0
origCp = Cp0
while abs(dPtot - dPsum) > dPdir*dPtot/1e3:
dP0 = dPdir * min(dPdir * dP, dPdir * dPtot - dPdir * dPsum)
if method == 1:
dT0 = T0 * alpha0 * dP0*1e5 / (Cp0 * rho0)
elif method == 2:
dH0 = dP0 * V0
dT0 = dH0 / Cp0
self.T = self.T + dT0
self.P = self.P + dP0
dPsum = dPsum + dP0
print dP0, dPsum, dT0, self.T, self.P
self.updatePerplex(perplex=perplex)
T0 = self.T
Cp0 = 1000. * self.perplexResult['SYSPROP'][self.perplex.syspropnum['Cp']] / self.perplexResult['SYSPROP'][self.perplex.syspropnum['N']]
alpha0 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['alpha']]
rho0 = self.perplexResult['SYSPROP'][self.perplex.syspropnum['rho']]
V0 = 1000. * self.perplexResult['SYSPROP'][self.perplex.syspropnum['V']] / self.perplexResult['SYSPROP'][self.perplex.syspropnum['N']]
if method == 2:
H1 = 1000. * self.perplexResult['SYSPROP'][self.perplex.syspropnum['H']] / self.perplexResult['SYSPROP'][self.perplex.syspropnum['N']]
print "H mismatch: ", (H1-H0-dH0), (H1-H0), dH0
H0 = H1
print self.perplexResult['NAMEPHASES']
if "melt(HP)" in self.perplexResult['NAMEPHASES']:
print "Melt!"
finalV = 1000. * self.perplexResult['SYSPROP'][self.perplex.syspropnum['V']] / self.perplexResult['SYSPROP'][self.perplex.syspropnum['N']]
finalH = 1000. * self.perplexResult['SYSPROP'][self.perplex.syspropnum['H']] / self.perplexResult['SYSPROP'][self.perplex.syspropnum['N']]
finalCp = 1000. * self.perplexResult['SYSPROP'][self.perplex.syspropnum['Cp']] / self.perplexResult['SYSPROP'][self.perplex.syspropnum['N']]
print "dPtot:", dPtot
print "P, T, V, H, Cp:"
print "orig: ", origP, origT, origV, origH, origCp
print "final:", self.P, self.T, finalV, finalH, finalCp
def addComponent(self, component, mass, overwrite=False):
if component in self.components:
idx = self.components.index(component)
if overwrite:
masses[idx] = mass
else:
masses[idx] = masses[idx] + mass
else:
self.components.append(component)
self.components.append(mass)
self.perplexResult = None
class Crust:
def __init__(self, datfile, config=None):
if config is None:
self.config = {
'DT_MULTIPLIER' : 0.5,
'T_ITER_THRESHOLD' : 1e-1, # converge criteria for dT=f(dH) iteration (Kelvins)
'GRID_NX' : 35, # num of grid points (= num of elements + 1)
'GRID_H' : 35000.0, # original height of the model, meters
'GRID_STATIC_POINT' : 0, # which grid point is kept static (x doesn't change) during volume changes
'BND_UPPER_PRESSURE' : 3000.0, # pressure at the upper boundary, bar
'BND_UPPER_TEMPERATURE' : 273.0 + 300.0, # temperature at the upper boundary, Kelvin
'BND_LOWER_TEMPERATURE' : 273.0 + 600.0, # temperature at the lower boundary, Kelvin
'RECORD_TYPE' : 1,
'EVENT_TYPE' : [2, 3],
'EVENT_TIMING' : [1e3 * SECINYR, 500e3 * SECINYR],
'INI_FIELD_TYPE' : 1,
}
else:
self.config = config
self.time = 0.0 # model time
self.last_dt = 0.0 # latest time step taken
self.timestep = 0 # counter for time steps
# (initial) grid specification
self.nx = self.config['GRID_NX'] # num of grid points
self.ne = self.nx-1 # num of elements
self.xs = np.linspace(0.0,self.config['GRID_H'],self.nx) # xs at grid points
self.xs_e = 0.5 * (self.xs[0:self.nx-1] + self.xs[1:self.nx]) # xs at elements
# boundary condition stuff
self.bnd_upper_pressure = self.config['BND_UPPER_PRESSURE']
self.bnd_upper_temperature = self.config['BND_UPPER_TEMPERATURE']
self.bnd_lower_temperature = self.config['BND_LOWER_TEMPERATURE']
# physical constants
self.accel_g = 9.81
# (initial) field specification
# "_e" refers to elements (grid points in the middle of elements);
# without '_e': the main grid points, defining (e.g.) upper and lower boundaries
# of the model and placed also in between lithological units.
# Num of main grid points (self.nx) = num of element grid points (self.ne) + 1
# Most of the data is stored in the elements.
self.T_e = self.getIniField("temp", self.xs_e) # K
self.C_e = self.getIniField("compo", self.xs_e) # wt% (0..1)
self.k_e = self.getIniField("conductivity", self.xs_e) #
self.Cp_e = self.getIniField("heatcapacity", self.xs_e) # J/kgK
self.rho_e = self.getIniField("density", self.xs_e) # kg/m3
self.pres_e = self.getIniField("pressure", self.xs_e) # bar
self.pres = self.getIniField("pressure", self.xs)
self.mass_e = self.getIniField("mass", self.xs_e) # kg
#self.T = self.getIniField("temp", self.xs) # K # seems not to be needed....
# initiate perplex
self.components = self.getIniField("componames")
self.perplex = PrplxWrap.perplex(datfile, callerCompnames=self.components)
self.ncomponents = len(self.components)
# latest perplex results
self.perplexResult = None
# flag to indicate whether perplex data calculated
# for the grid points is up to date
self.perplexOK = False
# generate initial info by perplex
self.updatePerplex(initial=True) # initial call also adjusts Cp/Rho and pressure and mass
# update grid according to new density (=volume) values from PerpleX
self.updateGridToRho() # in principle, after initial call to updatePerpleX(), this shouldn't do anything
def output(self):
pass
def updatePressure(self):
# NB! and TODO:
# Pressure changes involve change in enthalpy (dH=VdP)
# which is not currently calculated
self.pres[:] = self.bnd_upper_pressure
self.pres_e[:] = self.bnd_upper_pressure
for i in range(1,self.ne):
self.pres[i] += sum(self.mass_e[0:i]) * self.accel_g * 1e-5
self.pres_e[i] = self.pres[i] # simplification: the pressure of the element is the pressure
# at the upper surface of the element
def updatePerplex(self, ielem=-1, initial=False):
if ielem >= 0 and initial:
raise Exception("ielem >=0 and initial == True in updatePerplex()")
if ielem < 0:
self.perplexResult = []
self.updatePressure()
for i in range(self.ne):
if ielem < 0:
self.perplexResult.append(self.perplex.phaseq(self.pres_e[i], self.T_e[i], self.C_e[i]))
testval = np.sum(np.sum(np.array(self.perplexResult[-1]['WTPHASES'])))
if (self.perplexResult[-1])['RETVAL'] != 0 or testval != testval:
pp.pprint(self.perplexResult[-1], stream=sys.stderr)
raise Exception("Ooo..ps")
self.perplexOK = True
if initial:
self.updateCpRho(i)
self.mass_e[i] = self.rho_e[i] * (self.xs[i+1] - self.xs[i])
self.updatePressure()
elif ielem == i:
self.perplexResult[ielem] = self.perplex.phaseq(self.pres_e[i], self.T_e[i], self.C_e[i])
testval = np.sum(np.sum(np.array(self.perplexResult[ielem]['WTPHASES'])))
if (self.perplexResult[ielem])['RETVAL'] != 0 or testval != testval:
pp.pprint(self.perplexResult[ielem], stream=sys.stderr)
raise Exception("Ooo..ps")
def updateCpRho(self, ielem=-1):
#if not self.perplexOK:
# self.updatePerplex()
if ielem >= 0:
doElems = [ielem]
else:
doElems = range(self.ne)
for i in doElems:
self.rho_e[i] = self.perplexResult[i]['SYSPROP'][self.perplex.syspropnum['rho']]
self.Cp_e[i] = G2KG * self.perplexResult[i]['SYSPROP'][self.perplex.syspropnum['Cp']] / self.perplexResult[i]['SYSPROP'][self.perplex.syspropnum['N']]
def updateGridToRho(self):
volumes = self.mass_e / self.rho_e # in 1D this is directly dx
new_x = self.xs[:] * 0.0
for i in range(0,self.config['GRID_STATIC_POINT']):
new_x[i] = self.xs[self.config['GRID_STATIC_POINT']] - sum(volumes[i:self.config['GRID_STATIC_POINT']])
for i in range(self.config['GRID_STATIC_POINT'],self.nx):
new_x[i] = self.xs[self.config['GRID_STATIC_POINT']] + sum(volumes[self.config['GRID_STATIC_POINT']:i])
self.xs[:] = new_x[:]
print " * updateGridToRho(): New extents are ", new_x[0], new_x[-1]
def maxdt(self):
# return maximum time step for diffusion,
# assume that information for k, rho and Cp is up-to-date
dx = self.xs[1:self.nx] - self.xs[0:(self.nx-1)]
diff = self.k_e / (self.rho_e * self.Cp_e)
maxdt = min(0.5 * dx * dx / diff)
return maxdt
def addEnthalpy(self, ielem, dH):
# input:
# ielem: element to which the enthalpy is added
# dH: amount of added enthalpy [J]
#if not self.perplexOK:
# self.updatePerplex() # to make sure pressure field is OK.
# probably unnecessary.
doNewIteration = True
# dH is given in Joules (total), transform to J/kg
dH = dH / (self.rho_e[ielem] * (self.xs[ielem+1] - self.xs[ielem]))
Cp0 = G2KG * self.perplexResult[ielem]['SYSPROP'][self.perplex.syspropnum['Cp']] / self.perplexResult[ielem]['SYSPROP'][self.perplex.syspropnum['N']]
T0 = self.T_e[ielem]
H0 = G2KG * self.perplexResult[ielem]['SYSPROP'][self.perplex.syspropnum['H']] / self.perplexResult[ielem]['SYSPROP'][self.perplex.syspropnum['N']]
Tini = T0
Hini = H0
Cpini = Cp0
niter = 0
stepDecr = 1e10 # decrease in solution between successive steps
stepMultip = 1.0
while doNewIteration:
T1 = T0 - stepMultip * (H0-Hini-dH) / Cp0
self.T_e[ielem] = T1
self.updatePerplex(ielem)
testval = self.perplexResult[ielem]['SYSPROP'][self.perplex.syspropnum['Cp']]
if testval != testval:
pp.pprint(self.perplexResult[ielem], stream=sys.stderr)
sys.exit(0)
H1 = G2KG * self.perplexResult[ielem]['SYSPROP'][self.perplex.syspropnum['H']] / self.perplexResult[ielem]['SYSPROP'][self.perplex.syspropnum['N']]
Cp1 = G2KG * self.perplexResult[ielem]['SYSPROP'][self.perplex.syspropnum['Cp']] / self.perplexResult[ielem]['SYSPROP'][self.perplex.syspropnum['N']]
if abs(T1-T0) < self.config['T_ITER_THRESHOLD']:
doNewIteration = False
else:
if abs(T1-T0) >= stepDecr:
# halve the step
stepMultip = stepMultip / 2.0
stepDecr = abs(T1-T0)
niter = niter + 1
if niter > 10:
sys.stdout.write(" WARN niter = " + str(niter) + "(T1-T0 =" + str(T1-T0) + ")\r")
T0 = T1
H0 = H1
Cp0 = Cp1
#if niter > 0:
# print "Needed", niter, "iterations"
T_react = T1 - Tini - dH/Cpini
print "dH = ", dH, "; dT = ", (T1-Tini), "; Cp = ", Cp1, ", Treact = ", T_react
return T1-Tini
def diffuseT(self, dt = 0.0):
self.timestep = self.timestep + 1
print " * Diffusion, time step", self.timestep, ", time = ", (self.time / SECINYR), " yrs"
T_e = self.T_e
if dt <= 0:
# estimate largest time step, use a fraction of that
dt = self.config['DT_MULTIPLIER'] * self.maxdt()
self.last_dt = dt
# calculate conductivity for main grid points by averaging from elements (wgtd avg)
# boundaries excluded
k_g = 0.5 * (self.xs[2:self.nx] - self.xs[1:(self.nx-1)]) * self.k_e[1:self.ne]
k_g = k_g + 0.5 * (self.xs[1:(self.nx-1)] - self.xs[0:(self.nx-2)]) * self.k_e[0:(self.ne-1)]
k_g = k_g / (0.5 * (self.xs[2:self.nx] - self.xs[0:(self.nx-2)]))
n_kg = k_g.size
## heat diffusion for
# ... internal grid points.
# At elemental grid points we define T, rho, Cp, k, d2T/dz2;
# at main grid points we define dT/dz.
dT1 = k_g[1:n_kg] * (T_e[2:self.ne] - T_e[1:(self.ne-1)]) / (self.xs_e[2:self.ne] - self.xs_e[1:(self.ne-1)])
dT2 = k_g[0:(n_kg-1)] * (T_e[1:(self.ne-1)] - T_e[0:(self.ne-2)]) / (self.xs_e[1:(self.ne-1)] - self.xs_e[0:(self.ne-2)])
d2T = (dT1 - dT2) / (0.5 * (self.xs_e[2:self.ne] - self.xs_e[0:(self.ne-2)]))
#DTinternal = d2T * dt / (self.rho_e[1:(self.ne-1)] * self.Cp_e[1:(self.ne-1)])
DHinternal = d2T * dt # J/m3
DHinternal = DHinternal * (self.xs[2:(self.nx-1)] - self.xs[1:(self.nx-2)]) # = Cp * m * dT = dH
# ... uppermost grid point
dT1 = k_g[0] * (T_e[1] - T_e[0]) / (self.xs_e[1] - self.xs_e[0])
dT2 = self.k_e[0] * (T_e[0] - self.bnd_upper_temperature) / (self.xs_e[0]-self.xs[0])
d2T = (dT1 - dT2) / (0.5 * (self.xs_e[1] - (-self.xs_e[0] + self.xs[0])))
#DTupper = d2T * dt / (self.rho_e[0] * self.Cp_e[0])
DHupper = d2T * dt
DHupper = DHupper * (self.xs[1] - self.xs[0])
# ... lowermost grid point
dT1 = self.k_e[self.ne-1] * (self.bnd_lower_temperature - T_e[self.ne-1]) / (self.xs[self.nx-1]-self.xs_e[self.ne-1])
dT2 = k_g[n_kg-1] * (T_e[self.ne-1] - T_e[self.ne-2]) / (self.xs_e[self.ne-1] - self.xs_e[self.ne-2])
d2T = (dT1 - dT2) / (0.5 * (self.xs[self.nx-1] + (self.xs[self.nx-1]-self.xs_e[self.ne-1]) - self.xs_e[self.ne-2]))
#DTlower = d2T * dt / (self.rho_e[self.ne-1] * self.Cp_e[self.ne-1])
DHlower = d2T * dt
DHlower = DHlower * (self.xs[self.nx-1]-self.xs[self.nx-2])
self.addEnthalpy(0,DHupper)
for ielem in range(1,self.ne-1):
if (DHinternal[ielem-1] != 0.0):
self.addEnthalpy(ielem, DHinternal[ielem-1])
self.addEnthalpy(self.ne-1,DHlower)
self.perplexOK = True # ... since we just called addEnthalpy on all elements
self.time = self.time + self.last_dt
print "\tdt used: ", self.last_dt/SECINYR, "yrs"
def getIniField(self, field, xs=[0]):
fieldType = self.config['INI_FIELD_TYPE']
if field == "temp":
if fieldType in [0,1]:
#retField = xs * 0.0 + 1000.0 + 273.0
retField = self.bnd_upper_temperature + (self.bnd_lower_temperature-self.bnd_upper_temperature) * (xs-xs[0]) / (xs[-1]-xs[0])
else:
raise Exception("Invalid fieldType")
elif field == "componames":
if fieldType in [0,1]:
retField = ['NA2O',
'MGO',
'AL2O3',
'SIO2',
'K2O',
'CAO',
'TIO2',
'MNO',
'FEO',
'H2O']
else:
raise Exception("Invalid fieldType")
elif field == "compo":
if fieldType in [0,1]:
retField = []
for i in range(len(xs)):
retField.append([[]] * 10)
retField[i][0] = 0.0327
retField[i][1] = 0.0248
retField[i][2] = 0.1540
retField[i][3] = 0.6662
retField[i][4] = 0.0280
retField[i][5] = 0.0359
retField[i][6] = 0.0064
retField[i][7] = 0.0010
retField[i][8] = 0.0504
retField[i][9] = 0.0100
if fieldType == 1 and xs[i] > 0.9*xs[-1]:
retField[i][9] = 0.0000
else:
raise Exception("Invalid fieldType")
elif field == "conductivity":
if fieldType in [0,1]:
retField = 0.0 * xs + 3.5
else:
raise Exception("Invalid fieldType")
elif field == "heatcapacity":
retField = 0.0 * xs
# (this is a dummy, need perplex to do properly)
elif field == "mass":
retField = 0.0 * xs
# (this is a dummy, need perplex to do properly)
elif field == "density":
retField = 0.0 * xs
# (this is a dummy, need perplex to do properly)
elif field == "pressure":
retField = 0.0 * xs
# (this is a dummy, need perplex to do properly)
else:
raise Exception("Invalid field")
return retField
def initRecord(self):
if self.config['RECORD_TYPE'] == 1:
self.recdata = []
self.recdata_depths = [] # these will be update on every call to doRecord()
# (for RECORD_TYPE 1)
self.recdata_sources = { 'T' : "self.T_e",
'Cp' : "self.Cp_e",
'rho' : "self.rho_e",
'Vp' : "[dic['SYSPROP'][self.perplex.syspropnum['Vp']] for dic in self.perplexResult[:]]",
'Vs' : "[dic['SYSPROP'][self.perplex.syspropnum['Vs']] for dic in self.perplexResult[:]]",
'!ph' : "[dic['NAMEPHASES'] for dic in self.perplexResult[:]]",
'!wp' : "[dic['WTPHASES'] for dic in self.perplexResult[:]]",
} # N.B. these must be values at the elements, *_e
else:
raise Exception("Invalid RECORD_TYPE")
def doRecord(self):
if self.config['RECORD_TYPE'] == 1:
# we'll always record the data at elements
self.recdata_depths = list(self.xs_e)
# add new record grid for this timestep
self.recdata.append(Grid.GriddedData(self.ne, self.xs_e))
self.recdata[-1].addMetaData("time", self.time)
# add data to the grid
for dataname in self.recdata_sources.keys():
if dataname[0] != "!":
# interpolatable data
self.recdata[-1].addData(dataname, ip.interpolate(self.xs_e, eval(self.recdata_sources[dataname]), self.recdata_depths))
else:
# non-interpolatable data
self.recdata[-1].addData(dataname, eval(self.recdata_sources[dataname]))
|
# -*- encoding: utf-8 -*-
from django import forms
from posts.models import Commentary
class AddCommentaryForm(forms.ModelForm):
class Meta:
model = Commentary
fields = ('author', 'content')
# fields = ('post', 'content')
# fields = ('owner', 'content') |
#Modules/Libraries
import sys #System commands
import random #Psuedo random number module
import string
#Functions#
def rand_int_range(n):
number = random.randrange(n)
return number
def rand_float_range(n):
flt_number = random.uniform(0, n)
return flt_number
def rand_char_range():
rand = random.randrange(2)
if rand == 0:
char = chr(random.randrange(65, 65 + 26))
if rand == 1:
char = chr(random.randrange(97, 97 + 26))
return char
def rand_symbol_range():
rand = random.randrange(4)
if rand == 0:
symbol = chr(random.randrange(33, 47))
if rand == 1:
symbol = chr(random.randrange(58, 64))
if rand == 2:
symbol = chr(random.randrange(91, 96))
if rand == 3:
symbol = chr(random.randrange(123, 126))
return symbol
def symbol_range():
symbol_list = []
for i in range(16):
symbol_list.append(chr(32+i))
for i in range(7):
symbol_list.append(chr(58+i))
for i in range(6):
symbol_list.append(chr(91+i))
for i in range(4):
symbol_list.append(chr(123+i))
return symbol_list
def sequential_load_test_data(data_set, int_rand, int_non_rand, float_rand, float_non_rand, char_low_n_upper, char_upper_non_rand, char_lower_non_rand, symbol_rand, symbol_non_rand):
if len(int_rand) != 0:
for i in range(len(int_rand)):
data_set.append(int_rand[i])
if len(int_non_rand) != 0:
for i in range(len(int_non_rand)):
data_set.append(int_non_rand[i])
if len(float_rand) != 0:
for i in range(len(float_rand)):
data_set.append(float_rand[i])
if len(float_non_rand) != 0:
for i in range(len(float_non_rand)):
data_set.append(float_non_rand[i])
if len(char_low_n_upper) != 0:
for i in range(len(char_low_n_upper)):
data_set.append(char_low_n_upper[i])
if len(char_upper_non_rand) != 0:
for i in range(len(char_upper_non_rand)):
data_set.append(char_upper_non_rand[i])
if len(char_lower_non_rand) != 0:
for i in range(len(char_lower_non_rand)):
data_set.append(char_lower_non_rand[i])
if len(symbol_rand) != 0:
for i in range(len(symbol_rand)):
data_set.append(symbol_rand[i])
if len(symbol_non_rand) != 0:
for i in range(len(symbol_non_rand)):
data_set.append(symbol_non_rand[i])
return data_set
def non_sequential_load_test_data(data_set, int_rand, int_non_rand, float_rand, float_non_rand, char_low_n_upper, char_upper_non_rand, char_lower_non_rand, symbol_rand, symbol_non_rand, max_count):
max_count = [len(int_rand), len(int_non_rand), len(float_rand), len(float_non_rand), len(char_low_n_upper), len(char_upper_non_rand), len(char_lower_non_rand), len(symbol_rand), len(symbol_non_rand)]
largest = max(max_count)
for i in range(largest):
if len(int_rand) != 0:
data_set.append(int_rand[i])
if len(int_non_rand) != 0:
data_set.append(int_non_rand[i])
if len(float_rand) != 0:
data_set.append(float_rand[i])
if len(float_non_rand) != 0:
data_set.append(float_non_rand[i])
if len(char_low_n_upper) != 0:
data_set.append(char_low_n_upper[i])
if len(char_upper_non_rand) != 0:
data_set.append(char_upper_non_rand[i])
if len(char_lower_non_rand) != 0:
data_set.append(char_lower_non_rand[i])
if len(symbol_rand) != 0:
data_set.append(symbol_rand[i])
if len(symbol_non_rand) != 0:
data_set.append(symbol_non_rand[i])
return data_set
def write_to_file(file_name, data_set):
file = open(file_name, 'w')
for i in range(len(data_set)):
file.write(str(data_set[i]) + " ")
file.close()
def menu():
print("\n1. Integer Range Random")
print("2. Integer Range Non-Random")
print("3. Float Range Random")
print("4. Float Range Non-Random")
print("5. Char Alphabet Lower & Upper Range Random")
print("6. Char Alphabet Upper Range Non-Random")
print("7. Char Alphabet Lower Non-Random")
print("8. Symbol Range Random")
print("9. Symbol Range Non-Random")
print("10. [0-31] 'Char' on ASCII Chart")
print("11. Sequencial Load Testing Data Set")
print("12. Non-Sequencial Load Testing Data Set")
print("13. Export Testing Data")
print("14. Delete Data Set")
print("15. Quit")
def options(data_set, int_rand, int_non_rand, float_rand, float_non_rand, char_low_n_upper, char_upper_non_rand, char_lower_non_rand, symbol_rand, symbol_non_rand, max_count, option, file_name):
#1. Integer Range Random
if option == 1:
bound = int(input("Range: "))
entries = int(input("Index: "))
for i in range(entries):
int_rand.append(rand_int_range(bound))
#print(int_rand)
#2. Integer Range Non-Random
if option == 2:
bound = int(input("Range: "))
for i in range(bound):
int_non_rand.append(i)
#print(int_non_rand)
#3. Float Range Random
if option == 3:
bound = int(input("Range: "))
entries = int(input("Index: "))
for i in range(entries):
float_rand.append(rand_float_range(bound))
#print(float_rand)
#4. Float Range Non-Random
if option == 4:
bound = int(input("Range: "))
flt = 0.0
for i in range(bound):
float_non_rand.append(flt+i)
#print(float_non_rand)
#5. Char Alphabet Lower & Upper Range Random
if option == 5:
entries = int(input("Index: "))
for i in range(entries):
char_low_n_upper.append(rand_char_range())
#print(char_low_n_upper)
#6. Char Alphabet Upper Range Non-Random
if option == 6:
entries = int(input("Index: "))
for i in range(entries):
char_upper_non_rand.append(chr(65+i))
#print(char_upper_non_rand)
#7. Char Alphabet Lower Range Non-Random
if option == 7:
entries = int(input("Index: "))
for i in range(entries):
char_lower_non_rand.append(chr(97+i))
#print(char_lower_non_rand)
#8. Symbol Range Random")
if option == 8:
entries = int(input("Index: "))
for i in range(entries):
symbol_rand.append(rand_symbol_range())
#print(symbol_rand)
#9. Symbol Range Non-Random
if option == 9:
entries = int(input("Index [1-32]: "))
symbols = symbol_range()
for i in range(entries):
symbol_non_rand.append(symbols[i])
#print(symbol_non_rand)
#10. [0-31] 'Char' on ASCII Chart
if option == 10:
entries = int(input("Index: "))
for i in range(entries):
remainder_ASCII.append((chr(0+i)))
#print(remainder_ASCII)
#11. Sequential Write of Data_Set
if option == 11:
sequential_load_test_data(data_set, int_rand, int_non_rand, float_rand, float_non_rand, char_low_n_upper, char_upper_non_rand, char_lower_non_rand, symbol_rand, symbol_non_rand)
#print(data_set)
#12. Non-Sequential Write of Data_Set
if option == 12:
non_sequential_load_test_data(data_set, int_rand, int_non_rand, float_rand, float_non_rand, char_low_n_upper, char_upper_non_rand, char_lower_non_rand, symbol_rand, symbol_non_rand, max_count)
#print(data_set)
#13. Export Data_Set
if option == 13:
file_name = str(input("File Name: "))
write_to_file(file_name, data_set)
#14. Delete Data_Set
if option == 14:
del data_set[:]
#Exit
if option == 15:
del data_set[:]
sys.exit()
#MAIN#
global data_set, int_rand, int_non_rand, float_rand, float_non_rand, char_low_n_upper, char_upper_non_rand, char_lower_non_rand, symbol_rand, symbol_non_rand
data_set, int_rand, int_non_rand, float_rand, float_non_rand, char_low_n_upper, char_upper_non_rand, char_lower_non_rand, symbol_rand, symbol_non_rand, max_count = [], [], [], [], [], [], [], [], [], [], []
global file_name
file_name = ""
while True:
menu()
option = int(input("Option: "))
options(data_set, int_rand, int_non_rand, float_rand, float_non_rand, char_low_n_upper, char_upper_non_rand, char_lower_non_rand, symbol_rand, symbol_non_rand, max_count, option, file_name)
|
from __future__ import unicode_literals
import grequests
from pyaib.plugins import keyword, plugin_class
@plugin_class('pug')
class PugMe(object):
def __init__(self, irc_context, config):
pass
# See: https://github.com/github/hubot/blob/master/src/scripts/pugme.coffee
@keyword('pugme')
@keyword.autohelp
def get_pug(self, irc_c, msg, trigger, args, kargs):
""":: You need more small yappy dogs in your life."""
res = grequests.map([grequests.get('http://pugme.herokuapp.com/random')])[0]
msg.reply(res.json()['pug'])
@keyword('pugbomb')
@keyword.autohelp
def pug_bomb(self, irc_c, msg, trigger, args, kwargs):
""":: You need WAY more small yappy dogs in your life!"""
res = grequests.map([grequests.get('http://pugme.herokuapp.com/bomb?count=5')])[0]
for pug in res.json()['pugs']:
msg.reply(pug)
|
# python3 predict.py
from pathlib import Path
import numpy as np
from PIL import Image
from keras.models import load_model
import sys
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
model_path = "../model/mnist_model.h5"
images_folder = "/home/amsl/Pictures/sample/"
# load model
model = load_model(model_path)
image_shape = (28, 28, 1)
# load images
def crop_resize(image_path):
# img_gbr = cv2.imread(image_path)
# img_gray = cv2.cvtColor(img_gbr, cv2.COLOR_BGR2GRAY)
image_ = Image.open(image_path)
img_gray = image_.convert('L')
length = min(img_gray.size)
crop = img_gray.crop((0, 0, length, length))
resized = crop.resize(image_shape[:2]) # use width x height
img = np.array(resized).astype("float32")
print(length)
print(image_.size)
print(img.shape)
# img = np.array(img_gray).astype("float32")
img /= 255
img = img.reshape(28, 28, 1)
return img
folder = Path(images_folder)
image_paths = [str(f) for f in folder.glob("*.jpg")]
images = [crop_resize(p) for p in image_paths]
images = np.asarray(images)
print("a",images.shape)
predicted = model.predict_classes(images)
print("\n")
print("-----result-----\n")
for image_path, predict in zip(image_paths, predicted):
print(image_path, predict)
|
num1 = int(input("Entre com o valor inicial-> "))
num2 = int(input("Entre com o valor final-> "))
soma = 0
while num1 <= num2:
resto = num1 % 2
if resto == 0:
soma = soma + num1
num1 = num1 + 1
print ("A soma eh-> ", soma) |
"""
Scripts related to Gaussian Processes.
author: Andreas Rene Geist
email: andreas.geist@tuhh.de
website: https://github.com/AndReGeist
license: BSD
Please feel free to use and modify this, but keep the above information. Thanks!
"""
import Config
import os
import numpy as np
import scipy
import scipy.sparse as sp
from scipy import exp, sin, cos, sqrt, pi, interpolate
from scipy.sparse.linalg import spsolve
import matplotlib.pyplot as plt
from matplotlib import cm
# from scikits.sparse.cholmod import cholesky
"""Define sparse matrix save and load"""
def save_sparse_csr(filename, array):
# note that .npz extension is added automatically
np.savez(filename, data=array.data, indices=array.indices,
indptr=array.indptr, shape=array.shape)
def load_sparse_csr(filename):
# here we need to add .npz extension manually
loader = np.load(filename + '.npz')
return sp.csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
"""Calculate entry topology of the precision matrix Q"""
def calculate_precision_matrix(lx, ly, kappa, alpha, car1=False):
"""TORUS VERTICE TOPOLOGY
Define entries of the precision matrix to
represent a conitional CAR 1 or 2 model"""
field_info = np.arange(lx * ly).reshape((ly, lx))
"""
Indices for precision values of field vertice i,j:
a2,j
a1,d1 a1,j a1,c1
i,d2 i,d1 i,j i,c1 i,c2
b1,d1 b1,j b1,c1
b2,j
Note: - all field indices with "1"-indice mark vertices directly around current field vertice
- field vertices with two "1"-indices are diagonal field vertices (relative to the current field vertice)
Field structure:
LEFT UPPER CORNER ### NORTH BORDER ### RIGHT UPPER CORNER ^
# # |
# # |
# # |
WEST BORDER INNER VERTICES EAST BORDER ly
# # |
# # |
# # |
LEFT LOWER CORNER ### SOUTH BORDER ### RIGHT LOWER CORNER v
<----------------lx--------------------->
"""
if Config.set_gmrf_torus == True:
"""
Indices for precision values of field vertice i,j:
a2,j
a1,d1 a1,j a1,c1
i,d2 i,d1 i,j i,c1 i,c2
b1,d1 b1,j b1,c1
b2,j
Note: - all field indices with "1"-indice mark vertices directly around current field vertice
- field vertices with two "1"-indices are diagonal field vertices (relative to the current field vertice)
"""
infmat = np.dot(-1, np.ones((lx * ly, 13)))
for ii in range(0, ly): # Step to next field vertice in y-direction
for jj in range(0, lx): # Step to next field vertice in x-direction
# The GMRF field has ly entries in Y-direction (indices in field array 0 to ly-1)
# Check if field vertice is inside field, on border or in corner of field
# To define its neighbours
if (ii + 2) <= (ly - 1):
a1 = ii + 1;
a2 = ii + 2
# If moving two fields upwards in y-direction does not result in leaving the field
# The uper field neighbours "a1" and "a2" are directly above the vertice being a1=ii+1; a2=ii+2
elif (ii + 1) <= (ly - 1):
a1 = ii + 1;
a2 = 0
# If moving two fields upwards in y-direction does result in a2 leaving the field
# The uper field neighbour "a1" is still inside field, but "a2" is due to the torus topology now a2=0
else:
a1 = 0;
a2 = 1
# If moving one field upwards in y-direction does result in leaving the field (ii is on upper field border)
# The uper field neighbour is in in y-direction with "a1" and "a2" due to the torus topology now a1=0 and a2=1
if (ii - 2) >= 0:
b1 = ii - 1;
b2 = ii - 2
elif (ii - 1) >= 0:
b1 = ii - 1;
b2 = ly - 1
else:
b1 = ly - 1;
b2 = ly - 2
if (jj + 2) <= (lx - 1):
c1 = jj + 1;
c2 = jj + 2
elif (jj + 1) <= (lx - 1):
c1 = jj + 1;
c2 = 0
else:
c1 = 0;
c2 = 1
if (jj - 2) >= 0:
d1 = jj - 1;
d2 = jj - 2
elif (jj - 1) >= 0:
d1 = jj - 1;
d2 = lx - 1
else:
d1 = lx - 1;
d2 = lx - 2
# field i,j a1,j b1,j i,c1 i,d1
infmat[field_info[ii, jj], :] = np.array([field_info[ii, jj], field_info[a1, jj], field_info[b1, jj], field_info[ii, c1], field_info[ii, d1],
# a2,j b2,j i,c2 i,d2
field_info[a2, jj], field_info[b2, jj], field_info[ii, c2], field_info[ii, d2],
# a1,c1 a1,d1 b1,d1 b1,c1
field_info[a1, c1], field_info[a1, d1], field_info[b1, d1], field_info[b1, c1]])
a = alpha + 4
if car1 == True:
Q_rc = np.zeros(shape=(2, 5 * lx * ly)).astype(int) # Save row colum indices of Q in COO-sparse-format
Q_d = np.zeros(shape=(1, 5 * lx * ly)).astype(float) # Save data of Q in COO-sparse-format
for i1 in range(0, (lx * ly)): # For all GMRF Grid values
a1 = int(5 * i1) # Each GMRF node has 5 entries in the corresponding precision matrix
Q_rc[0, a1:(a1 + 5)] = i1 * np.ones(shape=(1, 5)) # Row indices
Q_rc[1, a1:(a1 + 5)] = np.hstack((i1, infmat[i1, 1:5])) # Column indices
Q_d[0, a1:(a1 + 5)] = np.hstack(((a * kappa) * np.ones(shape=(1, 1)),
(-1 * kappa) * np.ones(shape=(1, 4)))) # Data
Q_temporary1 = sp.coo_matrix((Q_d[0, :], (Q_rc[0, :], Q_rc[1, :])), shape=(lx * ly, lx * ly)).tocsr()
# if Config.set_gmrf_torus == False:
# Q_temporary = Q_temporary1.todense()
# Q_new = np.diag(np.diag(Q_temporary, k=0), k=0) + np.diag(np.diag(Q_temporary, k=-1), k=-1) + \
# np.diag(np.diag(Q_temporary, k=1), k=1) + np.diag(np.diag(Q_temporary, k=lx), k=lx) + \
# np.diag(np.diag(Q_temporary, k=-lx), k=-lx)
# return sp.coo_matrix(Q_new)
# else:
return Q_temporary1
elif car1 == False:
Q_rc = np.zeros(shape=(3, 13 * lx * ly)).astype(int) # Save row colum indices of Q in COO-sparse-format
Q_d = np.zeros(shape=(3, 13 * lx * ly)).astype(float) # Save data of Q in COO-sparse-format
for i2 in range(0, (lx * ly)):
a1 = int(13 * i2)
Q_rc[0, a1:(a1 + 13)] = i2 * np.ones(shape=(1, 13)) # Row indices
Q_rc[1, a1:(a1 + 13)] = np.hstack((i2, infmat[i2, 1:5], infmat[i2, 5:9], infmat[i2, 9:13])) # Column indices
# Q_d[0, a1:(a1 + 13)] = np.hstack(((4 + a ** 2) * (1 / kappa) * np.ones(shape=(1, 1)), (-2 * a / kappa) * np.ones(shape=(1, 4)),
# (1/kappa) * np.ones(shape=(1, 4)), (2 / kappa) * np.ones(shape=(1, 4)))) # Data
Q_d[0, a1:(a1 + 13)] = np.hstack(((4 + a ** 2) * kappa * np.ones(shape=(1, 1)), (-2 * a * kappa) * np.ones(shape=(1, 4)),
kappa * np.ones(shape=(1, 4)), 2 * kappa * np.ones(shape=(1, 4)))) # Data
Q_temporary1 = sp.coo_matrix((Q_d[0, :], (Q_rc[0, :], Q_rc[1, :])), shape=(lx * ly, lx * ly)).tocsr()
return Q_temporary1
elif Config.set_gmrf_torus == False: # Neumann Boundary condition instead
if car1 == True:
n_a = 5 # Number of non-zero entries in the precision matrix for one GMRF vertice row
q_center = (alpha + 4) * kappa
q_border = (alpha + 3) * kappa
q_corner = (alpha + 2) * kappa
q_2 = -1 * kappa
n_size = (lx - 2) * (ly - 2) * n_a + (lx - 2) * 2 * (n_a - 1) + (ly - 2) * 2 * (n_a - 1) + 4 * (n_a - 2)
Q_rc = np.zeros(shape=(2, n_size)).astype(int) # Save row colum indices of Q in COO-sparse-format
Q_d = np.zeros(shape=(1, n_size)).astype(float) # Save data of Q in COO-sparse-format
ce = 0 # Current index inside the coo matrix that defines the GMRF precision
for ii in range(0, ly): # Step to next field vertice in y-direction
for jj in range(0, lx): # Step to next field vertice in x-direction
i1 = field_info[ii, jj]
# The GMRF field has ly entries in Y-direction (indices in field array 0 to ly-1)
# Check if field vertice is inside field, on border or in corner of field
# To define its neighbours
if ii == 0 and jj == 0: # Left lower corner
Q_rc[0, ce:(ce + 3)] = i1 * np.ones(shape=(1, 3))
Q_rc[1, ce:(ce + 3)] = [i1, 2, lx] # Column indices
Q_d[0, ce:(ce + 3)] = [q_corner, q_2, q_2] # Data
ce += 3
elif ii == 0 and jj == lx - 1: # Right lower corner
Q_rc[0, ce:(ce + 3)] = i1 * np.ones(shape=(1, 3))
Q_rc[1, ce:(ce + 3)] = [i1, i1 - 1, i1 + lx] # Column indices
Q_d[0, ce:(ce + 3)] = [q_corner, q_2, q_2] # Data
ce += 3
elif ii == ly - 1 and jj == 0: # Left upper corner
Q_rc[0, ce:(ce + 3)] = i1 * np.ones(shape=(1, 3))
Q_rc[1, ce:(ce + 3)] = [i1, i1 + 1, i1 - lx] # Column indices
Q_d[0, ce:(ce + 3)] = [q_corner, q_2, q_2] # Data
ce += 3
elif ii == ly - 1 and jj == lx - 1: # Right upper corner
Q_rc[0, ce:(ce + 3)] = i1 * np.ones(shape=(1, 3))
Q_rc[1, ce:(ce + 3)] = [i1, i1 - 1, i1 - lx] # Column indices
Q_d[0, ce:(ce + 3)] = [q_corner, q_2, q_2] # Data
ce += 3
elif ii == ly - 1: # North field border
Q_rc[0, ce:(ce + 4)] = i1 * np.ones(shape=(1, 4))
Q_rc[1, ce:(ce + 4)] = [i1, i1 - 1, i1 + 1, i1 - lx] # Column indices
Q_d[0, ce:(ce + 4)] = [q_border, q_2, q_2, q_2] # Data
ce += 4
elif jj == lx - 1: # East field border
Q_rc[0, ce:(ce + 4)] = i1 * np.ones(shape=(1, 4))
Q_rc[1, ce:(ce + 4)] = [i1, i1 - 1, i1 + lx, i1 - lx] # Column indices
Q_d[0, ce:(ce + 4)] = [q_border, q_2, q_2, q_2] # Data
ce += 4
elif ii == 0: # South field border
Q_rc[0, ce:(ce + 4)] = i1 * np.ones(shape=(1, 4))
Q_rc[1, ce:(ce + 4)] = [i1, i1 - 1, i1 + 1, i1 + lx] # Column indices
Q_d[0, ce:(ce + 4)] = [q_border, q_2, q_2, q_2] # Data
ce += 4
elif jj == 0: # West field border
Q_rc[0, ce:(ce + 4)] = i1 * np.ones(shape=(1, 4))
Q_rc[1, ce:(ce + 4)] = [i1, i1 + lx, i1 + 1, i1 - lx] # Column indices
Q_d[0, ce:(ce + 4)] = [q_border, q_2, q_2, q_2] # Data
ce += 4
else: # Center vertices
Q_rc[0, ce:(ce + 5)] = i1 * np.ones(shape=(1, 5))
Q_rc[1, ce:(ce + 5)] = [i1, i1 - 1, i1 + 1, i1 - lx, i1 + lx] # Column indices
Q_d[0, ce:(ce + 5)] = [q_center, q_2, q_2, q_2, q_2] # Data # Data
ce += 5
Q_temporary1 = sp.coo_matrix((Q_d[0, :], (Q_rc[0, :], Q_rc[1, :])), shape=(lx * ly, lx * ly)).tocsr()
return Q_temporary1
elif car1 == False:
a = alpha + 4
q0 = kappa * (4 + a ** 2)
q1 = -2 * a * kappa
q2 = 2 * kappa
q3 = 1 * kappa
n_size = 13 * (lx - 4) * (ly - 4) + 2 * (9 + 12) * ((lx - 4) + (ly - 4)) + 4 * 11 + 8 * 8 + 4 * 6
Q_r = np.zeros(shape=(1, n_size)).astype(int)
Q_c = np.zeros(shape=(1, n_size)).astype(int)
Q_d = np.zeros(shape=(1, n_size)).astype(float) # Save data of Q in COO-sparse-format
ce = 0
for ii in range(0, ly): # Step to next field vertice in y-direction
for jj in range(0, lx): # Step to next field vertice in x-direction
# The GMRF field has ly entries in Y-direction (indices in field array 0 to ly-1)
# Check if field vertice is inside field, on border or in corner of field
"""
Entries of the precision element: | Precision element confugrations (depends on the vertice postion inside the field):
k3 | I. o II. o III. o IV. o V. o
k2 k1 k2 | o o o o o o o o o o o o o o
k3 k1 ii,jj k1 k3 | 0 o o o o 0 o o o 0 o o o o 0 o o o o 0 o o
k2 k1 k2 | o o o o o o o o o
k3 | o
"""
i1 = field_info[ii, jj]
k1 = []
k2 = []
k3 = []
# Check if precision element types k1 and k2 are inside field for vertice ii,jj
if (ii + 2) <= (ly - 1): # North Border
k1.append(i1 + lx)
k2.append(i1 + 2 * lx)
elif (ii + 1) <= (ly - 1):
k1.append(i1 + lx)
if (jj + 2) <= (lx - 1): # East Border
k1.append(i1 + 1)
k2.append(i1 + 2)
elif (jj + 1) <= (lx - 1):
k1.append(i1 + 1)
if (ii - 2) >= 0: # South Border
k1.append(i1 - lx)
k2.append(i1 - 2 * lx)
elif (ii - 1) >= 0:
k1.append(i1 - lx)
if (jj - 2) >= 0: # West Border
k1.append(i1 - 1)
k2.append(i1 - 2)
elif (jj - 1) >= 0:
k1.append(i1 - 1)
# Check if precision element type k3 is inside field for vertice ii,jj
if (i1 + lx) in k1 and (i1 + 1) in k1: # Upper right precision element k3
k3.append(i1 + lx + 1)
if (i1 + lx) in k1 and (i1 - 1) in k1: # Upper left precision element k3
k3.append(i1 + lx - 1)
if (i1 - lx) in k1 and (i1 + 1) in k1: # Lower right precision element k3
k3.append(i1 - lx + 1)
if (i1 - lx) in k1 and (i1 - 1) in k1: # Lower left precision element k3
k3.append(i1 - lx - 1)
n_qe = 1 + len(k1) + len(k2) + len(k3)
Q_r[0, ce:(ce + n_qe)] = i1 * np.ones(shape=(1, n_qe))
Q_c[0, ce:(ce + n_qe)] = np.hstack((i1 * np.ones(shape=(1, 1)), np.array(k1).reshape(1, len(k1)), np.array(k2).reshape(1, len(k2)), np.array(k3).reshape(1, len(k3)))) # Column indices
Q_d[0, ce:(ce + n_qe)] = np.hstack((q0 * np.ones(shape=(1, 1)), q1 * np.ones(shape=(1, len(k1))), q2 * np.ones(shape=(1, len(k2))), q3 * np.ones(shape=(1, len(k3))))) # Data # Data
ce += n_qe
return sp.coo_matrix((Q_d[0, :], (Q_r[0, :], Q_c[0, :])), shape=(lx * ly, lx * ly)).tocsr()
"""SAMPLE from GMRF"""
def sample_from_GMRF(gmrf_dim, kappa, alpha, car_var, plot_gmrf=False):
x_min, x_max, y_min, y_max = Config.field_dim
lxf, lyf, dvx, dvy = gmrf_dim
lx1 = lxf + 2 * dvx # Total number of GMRF vertices in x
ly1 = lyf + 2 * dvy
# Calculate precision matrix
Q_storage = calculate_precision_matrix(lx1, ly1, kappa[0], alpha[0], car1=car_var)
# Draw sampel from GMRF
mue_Q = 10
z_I = np.random.standard_normal(size=lx1 * ly1)
x_Q = np.zeros(shape=(ly1, lx1, len(kappa)))
print(Q_storage)
L_Q = np.linalg.cholesky(Q_storage.todense())
v_Q = np.linalg.solve(L_Q.T, z_I)
x_Q_vec = mue_Q + v_Q
x_Q = x_Q_vec.reshape((ly1, lx1))
if plot_gmrf == True:
if len(kappa) == 1:
fig, ax = plt.subplots(1)
# ax = ax.ravel()
k = 0
cf = ax.pcolor(np.linspace(x_min, x_max, num=lx1, endpoint=True),
np.linspace(y_min, y_max, num=ly1, endpoint=True), x_Q[:, :, k])
ax.axis('tight')
plt.colorbar(cf, ax=ax)
ax.set_title('GMRF sample, kappa: ' + str(kappa[k]) + ', alpha: ' + str(alpha[k]))
plt.xlabel('x (m)')
plt.ylabel('y (m)')
k += 1
plt.show()
else:
fig, ax = plt.subplots(3, 2)
k = 0
for j in range(2):
for i in range(3):
cf = ax[i, j].pcolor(np.linspace(x_min, x_max, num=lx1, endpoint=True),
np.linspace(y_min, y_max, num=ly1, endpoint=True), x_Q[:, :, k])
ax[i, j].axis('tight')
plt.colorbar(cf, ax=ax[i, j])
ax[i, j].set_title('GMRF sample, kappa: ' + str(kappa[k]) + ', alpha: ' + str(alpha[k]))
plt.xlabel('x (m)')
plt.ylabel('y (m)')
k += 1
plt.show()
de = np.array([float(x_max - x_min) / (lxf - 1), float(y_max - y_min) / (lyf - 1)]) # Element width in x and y
xg_min = x_min - dvx * de[0] # Min GMRF field value in x
xg_max = x_max + dvx * de[0]
yg_min = y_min - dvy * de[1]
yg_max = y_max + dvy * de[1]
X = np.linspace(xg_min, xg_max, num=lx1, endpoint=True) # Specifies column coordinates of field
Y = np.linspace(yg_min, yg_max, num=ly1, endpoint=True) # Specifies row coordinates of field
f = scipy.interpolate.interp2d(X, Y, x_Q, kind='cubic')
return f
class GMRF:
def __init__(self, gmrf_dim, alpha_prior, kappa_prior, set_Q_init):
"""Initialize GMRF dimensions and precision matrices"""
"""Initialize GMRF dimensions"""
x_min, x_max, y_min, y_max = Config.field_dim
lxf, lyf, dvx, dvy = gmrf_dim
lx = lxf + 2 * dvx # Total number of GMRF vertices across x dimension
print("size of lx: ", lx)
ly = lyf + 2 * dvy # Total number of GMRF vertices across x dimension
print("size of ly: ", ly)
n = lx * ly # Total number of GMRF vertices
print("size of n: ", n)
de = np.array([float(x_max - x_min) / (lxf - 1), float(y_max - y_min) / (lyf - 1)]) # Element width in x and y
print("value of de: ", de)
xg_min = x_min - dvx * de[0] # Min GMRF field value in x
xg_max = x_max + dvx * de[0]
yg_min = y_min - dvy * de[1]
yg_max = y_max + dvy * de[1]
"""Intialize GMRF PRECISION matrices"""
p = 1 # Number of regression coefficients beta
self.F = np.ones(shape=(n, p)) # Mean regression functions
self.T = 1e-6 * np.ones(shape=(p, p)) # Precision matrix of the regression coefficients
# Initialize hyperparameter prior
THETA = [] # Matrix containing all discrete hyperparameter combinations
for i in range(0, len(alpha_prior)):
for j in range(0, len(kappa_prior)):
THETA.append([kappa_prior[j], alpha_prior[i]])
THETA = np.array(THETA).T
l_TH = len(THETA[1]) # Number of hyperparameter pairs
p_THETA = 1.0 / l_TH # Prior probability for one theta
self.diag_Q_t_inv = np.zeros(shape=(lx * ly + p, l_TH)).astype(float)
F_sparse = sp.csr_matrix(self.F)
FT_sparse = scipy.sparse.csr_matrix(self.F.T)
T_inv = np.linalg.inv(self.T) # Inverse of the Precision matrix of the regression coefficients
T_sparse = sp.csr_matrix(self.T)
if set_Q_init == True:
for jj in range(0, l_TH):
print("Initialize Matrix:", jj, "of", l_TH)
"""Initialize Q_{x|eta}"""
# _{field values|eta} kappa alpha
Q_temporary = calculate_precision_matrix(lx, ly, THETA[0, jj], THETA[1, jj], car1=Config.set_GMRF_cartype)
Q_eta_inv = np.linalg.inv(Q_temporary.todense())
"""Q_{x|eta,y=/} & diag_Q_inv """
A2 = Q_temporary.dot(-1 * F_sparse)
B1 = -1 * FT_sparse.dot(Q_temporary)
B2 = sp.csr_matrix.dot(FT_sparse, Q_temporary.dot(F_sparse)) + T_sparse
H1 = sp.hstack([Q_temporary, A2])
H2 = sp.hstack([B1, B2])
filename = os.path.join('gp_scripts', 'Q_t_' + str(jj))
Q_t = sp.vstack([H1, H2]).tocsr()
setattr(self, filename, Q_t)
np.savez(filename, data=Q_t.data, indices=Q_t.indices,
indptr=Q_t.indptr, shape=Q_t.shape)
C1 = Q_eta_inv + np.dot(self.F, np.dot(T_inv, self.F.T))
C2 = np.dot(self.F, T_inv)
D1 = np.dot(self.F, T_inv).T
Q_t_inv = np.vstack([np.hstack([C1, C2]),
np.hstack([D1, T_inv])])
self.diag_Q_t_inv[:, jj] = Q_t_inv.diagonal()
np.save(os.path.join('gp_scripts', 'diag_Q_t_inv.npy'), self.diag_Q_t_inv)
else:
print('Loading precalculated matrices')
for j2 in range(0, l_TH):
filename = os.path.join('gp_scripts', "Q_t_" + str(j2))
loader = np.load(filename + '.npz')
Q_t2 = sp.csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
filename2 = os.path.join('gp_scripts', "Q_t_" + str(j2))
setattr(self, filename2, Q_t2)
self.diag_Q_t_inv = np.load(os.path.join('gp_scripts', 'diag_Q_t_inv.npy'))
"""Initialize adaptive GMRF algorithm matrices"""
self.b = np.zeros(shape=(n + p, 1)) # Canonical mean
self.c = 0.0 # Log-likelihood update vector
self.h_theta = np.zeros(shape=(n + p, l_TH))
self.g_theta = np.zeros(shape=(l_TH, 1))
self.log_pi_y = np.zeros(shape=(l_TH, 1))
self.pi_theta = np.zeros(shape=(l_TH, 1))
self.mue_theta = np.zeros(shape=(n + p, l_TH))
self.mue_x = np.zeros(shape=(n + p, 1))
self.var_x = np.zeros(shape=(n + p, 1))
print("size of p: ", p)
self.params = (lxf, lyf, dvx, dvy, lx, ly, n, p, de, l_TH, p_THETA, xg_min, xg_max, yg_min, yg_max)
def gmrf_bayese_update(self, x_auv, y_t):
"""updates the GMRF Class beliefe of the true field
Input: State, New measurement
Output: Field Mean, Field Variance, Parameter Posterior
"""
(lxf, lyf, dvx, dvy, lx, ly, n, p, de, l_TH, p_THETA, xg_min, xg_max, yg_min, yg_max) = self.params
"""Compute interpolation matrix"""
u = Config.interpolation_matrix(x_auv, n, p, lx, xg_min, yg_min, de)
u_sparse = sp.csr_matrix(u)
"""Update canonical mean and observation-dependent likelihood terms"""
self.b = self.b + (y_t / Config.sigma_w_squ) * u # Canonical mean
self.c = self.c - ((y_t ** 2) / (2 * Config.sigma_w_squ)) # Likelihood term
for jj in range(0, l_TH):
"""Calculate observation precision (?)"""
filename = os.path.join('gp_scripts', "Q_t_" + str(jj))
Q_temporary = getattr(self, filename)
Q_temporary = Q_temporary
self.h_theta[:, jj] = scipy.sparse.linalg.spsolve(Q_temporary, u_sparse).T
# L_factor = cholesky(Q_temporary)
"""Update Precision Matrix"""
self.diag_Q_t_inv[:, jj] = np.subtract(self.diag_Q_t_inv[:, jj], (
np.multiply(self.h_theta[:, jj], self.h_theta[:, jj]) / (Config.sigma_w_squ + np.dot(u.T, self.h_theta[:, jj]))))
# Q_unc_sparse = 0.02*scipy.sparse.eye(n+p)
Q_temporary = Q_temporary + (1 / Config.sigma_w_squ) * u_sparse.dot(u_sparse.T)
setattr(self, filename, Q_temporary)
if Config.set_Q_check == True:
# Check precision matrix
my_data = Q_temporary.todense()
my_data[my_data == 0.0] = np.nan
plt.matshow(my_data, cmap=cm.Spectral_r, interpolation='none')
plt.draw()
plt.pause(30)
x = raw_input("Press [enter] to continue")
self.g_theta[jj] = self.g_theta[jj] - (0.5 * np.log(1 + (1 / Config.sigma_w_squ) * np.dot(u.T, self.h_theta[:, jj])))
for hh in range(0, l_TH):
"""Compute canonical mean"""
filename = os.path.join('gp_scripts', "Q_t_" + str(jj))
Q_temporary = getattr(self, filename)
self.mue_theta[:, hh] = scipy.sparse.linalg.spsolve(Q_temporary, self.b).T
"""Compute Likelihood"""
self.log_pi_y[hh] = self.c + self.g_theta[hh] + 0.5 * np.dot(self.b.T, self.mue_theta[:, hh]) # Compute likelihood
"""Scale likelihood and Posterior distribution (theta|y)"""
self.log_pi_exp = np.exp(self.log_pi_y - np.amax(self.log_pi_y))
self.posterior = (1 / np.sum(self.log_pi_exp)) * self.log_pi_exp * p_THETA
self.pi_theta = (1 / np.sum(self.posterior)) * self.posterior # Compute posterior distribution
"""Predictive mean and variance (x|y)"""
for ji in range(0, n + p):
self.mue_x[ji] = np.dot(self.mue_theta[[ji], :], self.pi_theta) # Predictive Mean
self.var_x[ji] = np.dot((self.diag_Q_t_inv[ji] +
(np.subtract(self.mue_theta[ji, :],
self.mue_x[ji] * np.ones(shape=(1, l_TH))) ** 2)), self.pi_theta)
return self.mue_x, self.var_x, self.pi_theta
|
from django.db import models
from transaction_logging.models import InklingTransaction
from django import forms
class UserInfo(models.Model):
first_name = models.CharField(verbose_name="First Name", max_length=40)
last_name = models.CharField(verbose_name="Last Name", max_length=40)
cnm_email = models.EmailField(verbose_name="CNM Email", max_length=40)
book_choice = models.CharField(verbose_name="Book Choice", max_length=40)
def __unicode__(self):
return self.cnm_email
class Product(models.Model):
title = models.CharField(verbose_name="Title", max_length=40)
author = models.CharField(verbose_name="Author", max_length=40)
cover_image = models.ImageField(verbose_name="Cover Image")
mobile_cover_image = models.ImageField(verbose_name="Mobile Cover Image", default='')
price = models.FloatField(verbose_name="Price", max_length=5)
availability = models.CharField(verbose_name="Availability", max_length=20)
description = models.TextField(verbose_name="Description", max_length=240)
site_id = models.CharField(verbose_name="UPay Site ID", max_length=2, default=1)
inkling_product_id = models.CharField(verbose_name="Inkling Product ID", max_length=40)
def __unicode__(self):
return self.title
#This broke migrate twice, should probably be elsewhere
bulk_choices = list()
products = Product.objects.all()
for product in products:
bulk_choices.append((product.title, product.title))
class BulkUpload(models.Model):
csv_field = models.TextField(verbose_name="CSV Field")
#book_choice = forms.ModelMultipleChoiceField(required=True, queryset=Product.objects.all())
book_choice = models.CharField(verbose_name="Book Choice", max_length=40, choices=bulk_choices)
#book_choice = models.CharField(verbose_name="Book Choice", max_length=40, choices='')
def __unicode__(self):
return 'CSV Field'
|
import numpy as np
def euler2quaternion(phi1, Phi, phi2, P = 1):
# Input - Euler Angles in Radians, Permutation operator (+- 1)
# Output - Tuple containing quaternion form
SIGMA = 0.5*(phi1 + phi2)
DELTA = 0.5*(phi1 - phi2)
C = np.cos(Phi/2)
S = np.sin(Phi/2)
q0 = C*np.cos(SIGMA)
q1 = -P*S*np.cos(DELTA)
q2 = -P*S*np.sin(DELTA)
q3 = -P*C*np.sin(SIGMA)
if (q0 < 0):
return (-q0, -q1, -q2, -q3)
else:
return (q0, q1, q2, q3)
def quaternion2euler(q0, q1, q2, q3, P = 1):
# Input - Quaternion, Permutation operator (+- 1)
# Output - Tuple of Euler Angles in Radians in Bunge Convention
q03 = np.square(q0) + np.square(q3)
q12 = np.square(q1) + np.square(q2)
CHI = np.sqrt(q03*q12)
if (CHI == 0 and q12 == 0):
THETA = (np.arctan2(-2*P*q0*q3, q0^2 - q3^2), 0, 0)
elif (CHI == 0 and q03 == 0):
THETA = (np.arctan2(2*q1*q2, q1^2 - q2^2), np.pi, 0)
elif (CHI != 0):
THETA01 = (q1*q3 - P*q0*q2) / CHI
THETA02 = (-P*q0*q1 - q2*q3) / CHI
THETA11 = 2*CHI
THETA12 = q03 - q12
THETA21 = (P*q0*q2 + q1*q3) / CHI
THETA22 = (q2*q3 - P*q0*q1) / CHI
THETA = []
for angle in [(THETA01, THETA02), (THETA11, THETA12), (THETA21, THETA22)]:
val = np.arctan2(angle[0], angle[1])
if (val < 0):
val += 2*np.pi
THETA.append(val)
THETA = tuple(THETA)
return THETA
def euler2orimatrix(phi1, Phi, phi2, P = 1):
# Input - Euler Angle in Radians, Permutation operator (+- 1)
# Output - Numpy orientation matrix
C1 = np.cos(phi1)
C2 = np.cos(phi2)
S1 = np.sin(phi1)
S2 = np.sin(phi2)
C = np.cos(Phi)
S = np.sin(Phi)
E1 = C1*C2 - S1*C*S2
E2 = S1*C2 - C1*C*S2
E3 = S*S2
E4 = -C1*S2 - S1*C*C2
E5 = -S1*S2 + C1*C*C2
E6 = S*C2
E7 = S1*S
E8 = -C1*S
E9 = C
return np.matrix([[E1, E2, E3], [E4, E5, E6] , [E7, E8, E9]])
def euler2axisangle(phi1, Phi, phi2, P = 1):
# Input - Euler Angles in Radians, Permutation operator (+- 1)
# Output - Tuple containing (axis1, axis2, axis3, angle)
T = np.tan(Phi / 2)
SIGMA = (1/2)*(phi1 + phi2)
DELTA = (1/2)*(phi1 - phi2)
TAU = np.sqrt(np.square(T) + np.square(np.sin(SIGMA)))
OMEGA = 2*np.arctan(TAU / np.cos(SIGMA))
if (OMEGA > np.pi):
OMEGA = 2*np.pi - OMEGA
axis1 = (P/TAU)*T*np.cos(DELTA)
axis2 = (P/TAU)*T*np.sin(DELTA)
axis3 = (P/TAU)*T*np.sin(SIGMA)
return (axis1, axis2, axis3, OMEGA)
def orimatrix2euler(mat, P = 1):
# Input - Numpy 3x3 Orientation Matrix
# Output - Tuple of Euler Angles in Radians
ZETA = (1 / np.sqrt(1 - np.square(mat[2,2])))
if (mat[2,2] == 1):
THETA1 = np.arctan2(mat[0,1], mat[0,0])
THETA2 = (np.pi/2)*(1 - mat[2,2])
THETA3 = 0
else:
THETA1 = np.arctan2(mat[2,0]*ZETA, -(mat[2,1]*ZETA))
THETA2 = np.arccos(mat[2,2])
THETA3 = np.arctan2(mat[0,2]*ZETA, mat[1,2]*ZETA)
return (THETA1, THETA2, THETA3)
def orimatrix2quaternion(mat, P = 1):
# Input - Numpy 3x3 Orientation Matrix
# Output - Tuple containing (q0, q1, q2, q3)
q0 = (1/2)*np.sqrt(1 + mat[0,0] + mat[1,1] + mat[2,2])
q1 = (P/2)*np.sqrt(1 + mat[0,0] - mat[1,1] - mat[2,2])
q2 = (P/2)*np.sqrt(1 - mat[0,0] + mat[1,1] - mat[2,2])
q3 = (P/2)*np.sqrt(1 - mat[0,0] - mat[1,1] + mat[2,2])
if (mat[2,1] < mat[1,2]):
q1 = -q1
if (mat[0,2] < mat[2,0]):
q2 = -q2
if (mat[1,0] > mat[0,1]):
q3 = -q3
MAGNITUDE = np.sqrt(np.square(q0) + np.square(q1) + np.square(q2) + np.square(q3))
return (q0/MAGNITUDE, q1/MAGNITUDE, q2/MAGNITUDE, q3/MAGNITUDE)
#TODO: Quaternion misorientation
#TODO: GAM
#TODO: Find Symmetry operator quatonion form
def quaternion2axisangle(q0, q1, q2, q3, P = 1):
# Input - Four quaternion values
# Output - Tuple containing a list of three axis values and an angle in radians
OMEGA = 2*np.arccos(q0)
if OMEGA == 0:
return ([q1, q2, q3], np.pi)
else:
s = np.sign(q0) / np.sqrt(np.square(q1) + np.square(q2) + np.square(q3))
return ([s*q1, s*q2, s*q3], OMEGA)
|
import pandas as pd
from sklearn import preprocessing
from preprocessing import read, split, non_numerical_features, one_hot_encoding
from preprocessing import drop_features, deal_with_23 , deal_with_58
from postprocessing import writeoutput
from csv import DictReader, DictWriter
from sklearn.feature_selection import VarianceThreshold
from sklearn.externals import joblib
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import ExtraTreesClassifier
import time
from sklearn.ensemble import RandomForestClassifier
from csv import DictReader, DictWriter
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
start = time.time()
#data = read('data_test.csv')
#quiz = read('quiz_test.csv')
data = read('data.csv')
label = data['label']
data = data.drop('label', axis = 1)
print(data)
quiz = read('quiz.csv')
data = deal_with_23(data)
quiz = deal_with_23(quiz)
data = deal_with_58(data)
quiz = deal_with_58(quiz)
print(data.shape)
print(quiz.shape)
#data = data.drop('23', axis = 1)
#quiz = quiz.drop('23', axis = 1)
#data = data.drop('58', axis = 1)
#quiz = quiz.drop('58', axis = 1)
categories = non_numerical_features(data)
print(categories)
data, quiz = one_hot_encoding(data, quiz,categories)
data = drop_features(data, categories)
quiz = drop_features(quiz, categories)
print(data.shape)
print(quiz.shape)
train_data = preprocessing.normalize(data)
test_data = preprocessing.normalize(quiz)
print("Entering the learing phase")
print("-------------------------------------")
print("Adaboost Classifier 1-100 ")
model1 = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), algorithm="SAMME", n_estimators=100)
train_data = data.values
test_data = quiz.values
model1 = model1.fit(train_data,label.values.T)
output = model1.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
outputA = model1.predict(test_data)
print("-------------------------------------")
print("Adaboost Classifier 1-200 ")
model2 = AdaBoostClassifier(DecisionTreeClassifier(max_depth=10), algorithm="SAMME", n_estimators=300)
train_data = data.values
test_data = quiz.values
model2 = model2.fit(train_data,label.values.T)
output = model2.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
output1 = model2.predict(test_data)
writeoutput('output1.csv',output1)
print("-------------------------------------")
print("Random Forest Classifier 300 ")
model3 = RandomForestClassifier(n_estimators = 300)
model3 = model3.fit(train_data,label.values.T)
output = model3.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
output2 = model3.predict(test_data)
writeoutput('output2.csv',output2)
print("-------------------------------------")
print("Logical Regression ")
model4 = LogisticRegression()
model4 = model4.fit(train_data,label.values.T)
output = model4.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
output3 = model4.predict(test_data)
writeoutput('output3.csv',output3)
print("-------------------------------------")
print("K NN 2")
model5 = KNeighborsClassifier(n_neighbors=2)
model5 = model5.fit(train_data,label.values.T)
output = model5.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
output5 = model5.predict(test_data)
writeoutput('output5.csv',output5)
for i in range(0,len(output5)):
value =output1[i] +(2*output2[i]) + (2*output5[i])
if value<0:
outputA[i] = -1
else:
outputA[i] = 1
writeoutput('output.csv',outputA)
done = time.time()
elapsed = done - start
print(elapsed)
|
#!/usr/bin/python
import math
primes = []
def conjecture(num):
i = 0
while i < len(primes) and primes[i] < num:
a = (num - primes[i]) // 2
if a ** 0.5 == int(a ** 0.5):
return True
i += 1
return False
n = 1000000
arr = [True] * (n + 1)
count = 0
for i in range(2, int(math.sqrt(n)) + 1):
if arr[i]:
j = i * i
while j <= n:
arr[j] = False
j += i
for i in range(2, n + 1):
if arr[i]:
primes.append(i)
elif not conjecture(i):
print(i)
break
|
from flask import Flask, render_template, flash, redirect, Blueprint, url_for
from functools import wraps
from app.forms.sign_up import SignUpForm
from app.forms.log_in import LogInForm
from app.models.models import User
from app.models import db
from flask_login import login_user, current_user, logout_user
# login not required decorator
def anonymous_required(f):
@wraps(f)
def actual_decorator(*args, **kwargs):
if current_user.is_anonymous:
return f(*args, **kwargs)
flash('Please log out to view this page')
return redirect(url_for('public.index'))
return actual_decorator
public_views = Blueprint('public', __name__)
@public_views.route('/')
@public_views.route('/index')
def index():
return render_template('index.html',
title='LEΛPP')
@public_views.route('/signup', methods=['GET', 'POST'])
@anonymous_required
def signup():
form = SignUpForm()
valid_data = True
if form.validate_on_submit():
# everything is fine, possibly make a new account
if len(User.query.filter_by(email=form.email.data).all()) > 0:
flash('An account with this email already exists.', 'danger')
valid_data = False
# more checks as necessary
if valid_data:
new_user = User(form.first_name.data,
form.last_name.data,
form.email.data,
form.password.data,
form.password_confirm.data)
db.session.add(new_user)
db.session.commit()
flash('New account created! Log in below.', 'success')
return redirect('/login')
elif len(form.errors) > 0 or not valid_data:
flash('Please correct the errors below.', 'danger')
return render_template('sign_up.html',
title='Sign Up',
sign_up_form=form)
@public_views.route('/login', methods=["GET", "POST"])
@anonymous_required
def login():
form = LogInForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.validate_password(form.password.data):
flash('Invalid username or password', 'danger')
return redirect(url_for('public.login'))
flash('You should be logged in now', 'success')
login_user(user)
return render_template('log_in.html',
title='Log In',
log_in_form=form)
@public_views.route('/logout')
def logout():
logout_user()
flash('Successfully logged out!')
return redirect(url_for('public.login'))
@public_views.route('/logindata')
def data_stuff():
if current_user.is_anonymous:
return str('NO LOGIN HERE')
return str('LOGGED IN AS ' + current_user.email)
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from dbUtils import DbUtils
class RegistrationForm(FlaskForm):
username = StringField('Usuario', validators=[DataRequired(), Length(min=2,max=20)])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Senha', validators=[DataRequired()])
confirm_password = PasswordField('Confirmar Senha', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Registrar')
def validate_username(self,username):
dbUtils = DbUtils()
if(dbUtils.verificaUsuarioNome(username.data)):
raise ValidationError('Esse nome já está em uso! Por favor, utilize outro nome')
def validate_email(self,email):
dbUtils = DbUtils()
if(dbUtils.verificaUsuarioEmail(email.data)):
raise ValidationError('Esse email já está em uso! Por favor, utilize outro email')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Senha', validators=[DataRequired()])
remember = BooleanField('Lembrar minha conta')
submit = SubmitField('Entrar')
class AlugarForm(FlaskForm):
filme = StringField('Titulo', validators=[DataRequired(), Length(min=2,max=30)])
submit = SubmitField('Pesquisar') |
#!/usr/bin/python3
from os import mkdir;
from os.path import exists, join;
from absl import app, flags;
import numpy as np;
import cv2;
import tensorflow as tf;
from create_dataset import Dataset;
from models import BlindSuperResolution;
FLAGS = flags.FLAGS;
def add_options():
flags.DEFINE_integer('batch_size', default = 32, help = 'batch size');
flags.DEFINE_string('checkpoint', default = 'checkpoints', help = 'path to checkpoint directory');
flags.DEFINE_integer('eval_steps', default = 100, help = 'how many iterations for each evaluation');
flags.DEFINE_integer('checkpoint_steps', default = 1000, help = 'how many iterations for each checkpoint');
flags.DEFINE_enum('scale', default = '2', enum_values = ['2', '3', '4'], help = 'train DASR on which scale of DIV2K');
flags.DEFINE_string('dataset_path', default = None, help = 'where the HR images locate');
class SummaryCallback(tf.keras.callbacks.Callback):
def __init__(self, dasr, eval_freq = 1000):
self.dasr = dasr;
self.eval_freq = eval_freq;
testset = Dataset(FLAGS.dataset_path, scale = int(FLAGS.scale)).load_dataset(mode = 'test').batch(1).repeat(-1);
self.iter = iter(testset);
self.log = tf.summary.create_file_writer(FLAGS.checkpoint);
def on_batch_end(self, batch, logs = None):
if batch % self.eval_freq == 0:
lr, hr = next(self.iter);
pred_hr, loss = self.dasr([lr,lr]);
pred_hr = tf.cast(pred_hr + tf.reshape([114.444 , 111.4605, 103.02 ], (1,1,1,3)), dtype = tf.uint8);
gt_hr = tf.cast(hr['sr'] + tf.reshape([114.444 , 111.4605, 103.02 ], (1,1,1,3)), dtype = tf.uint8);
with self.log.as_default():
for key, value in logs.items():
tf.summary.scalar(key, value, step = self.dasr.optimizer.iterations);
tf.summary.image('ground truth', gt_hr, step = self.dasr.optimizer.iterations);
tf.summary.image('predict', pred_hr, step = self.dasr.optimizer.iterations);
def main(unused_argv):
steps_per_epoch = 3450 // FLAGS.batch_size;
# 1) train moco only
# 1.1) create model and compile
dasr = BlindSuperResolution(scale = int(FLAGS.scale), enable_train = True);
moco = dasr.get_layer('moco');
moco_opt = tf.keras.optimizers.Adam(tf.keras.optimizers.schedules.ExponentialDecay(1e-3, decay_steps = 60 * steps_per_epoch, decay_rate = 0.9));
moco.compile(optimizer = moco_opt,
loss = {'output_2': tf.keras.losses.BinaryCrossentropy(from_logits = True)});
# 1.2) create dataset
moco_trainset = Dataset(FLAGS.dataset_path, scale = int(FLAGS.scale)).load_dataset(mode = 'moco').shuffle(10 * FLAGS.batch_size).batch(FLAGS.batch_size).prefetch(tf.data.experimental.AUTOTUNE);
# 1.3) fit
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir = FLAGS.checkpoint),
tf.keras.callbacks.ModelCheckpoint(filepath = join(FLAGS.checkpoint, 'moco_ckpt'), save_freq = FLAGS.checkpoint_steps),
];
moco.fit(moco_trainset, callbacks = callbacks, epochs = 100);
moco.save_weights('moco_weights.h5');
# 2) train whole network
# 2.1) create model and compile
dasr_opt = tf.keras.optimizers.Adam(tf.keras.optimizers.schedules.ExponentialDecay(1e-4, decay_steps = 125 * steps_per_epoch, decay_rate = 0.5));
dasr.compile(optimizer = dasr_opt,
loss = {'sr': tf.keras.losses.MeanAbsoluteError(), 'moco': tf.keras.losses.BinaryCrossentropy(from_logits = True)},
metrics = {'sr': tf.keras.metrics.MeanAbsoluteError()});
# 2.2) create dataset
dasr_trainset = Dataset(FLAGS.dataset_path, scale = int(FLAGS.scale)).load_dataset(mode = 'train').shuffle(10 * FLAGS.batch_size).batch(FLAGS.batch_size).prefetch(tf.data.experimental.AUTOTUNE);
# 2.3) fit
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir = FLAGS.checkpoint),
tf.keras.callbacks.ModelCheckpoint(filepath = join(FLAGS.checkpoint, 'dasr_ckpt'), save_freq = FLAGS.checkpoint_steps),
SummaryCallback(dasr, FLAGS.eval_steps),
];
dasr.fit(dasr_trainset, callbacks = callbacks, epochs = 500);
dasr.save_weights('dasr_weights.h5');
if __name__ == "__main__":
add_options();
app.run(main);
|
from elasticsearch_dsl import Date, Text, Integer, Nested, Keyword, DocType
import json
import logging
"""
Provides an ORM-like experience for accessing data in Elasticsearch.
Note the actual schema for Elasticsearch is defined in es_mapping.py; any
low-level changes to the index must be represented there as well.
"""
class SyncableDocType(DocType):
"""
Represents tables in the source-of-truth that will be replicated to
Elasticsearch.
"""
# Aggregations can't be performed on the _id meta-column, which necessitates
# copying it to this column in the doc. Aggregation is used to find the last
# document inserted into Elasticsearch
id = Integer()
@staticmethod
def database_row_to_elasticsearch_doc(row, schema):
"""
Children of this class must have a function mapping a Postgres model
to an Elasticsearch document.
:param row: A tuple representing a row in Postgres.
:param schema: A map of each field name to its position in the row.
:return:
"""
raise NotImplemented(
'Model is missing database -> Elasticsearch translation.'
)
def _parse_description(metadata_field):
"""
Parse the description field from the metadata if available.
Limit to the first 2000 characters.
"""
try:
if 'description' in metadata_field:
return metadata_field['description'][:2000]
except TypeError:
return None
class Image(SyncableDocType):
title = Text(analyzer="english")
identifier = Keyword()
creator = Text()
creator_url = Keyword()
tags = Text(multi=True)
created_on = Date()
url = Keyword()
thumbnail = Keyword()
provider = Text(analyzer="keyword")
source = Keyword()
license = Keyword()
license_version = Keyword()
foreign_landing_url = Keyword()
meta_data = Nested()
view_count = Integer()
description = Text(analyzer="english")
class Index:
name = 'image'
@staticmethod
def database_row_to_elasticsearch_doc(row, schema):
def _parse_detailed_tags(json_tags):
if json_tags:
parsed_tags = []
for tag in json_tags:
if 'name' in tag:
parsed_tag = {'name': tag['name']}
if 'accuracy' in tag:
parsed_tag['accuracy'] = tag['accuracy']
parsed_tags.append(parsed_tag)
return parsed_tags
else:
return None
return Image(
_id=row[schema['id']],
id=row[schema['id']],
title=row[schema['title']],
identifier=row[schema['identifier']],
creator=row[schema['creator']],
creator_url=row[schema['creator_url']],
tags=_parse_detailed_tags(row[schema['tags']]),
created_on=row[schema['created_on']],
url=row[schema['url']],
thumbnail=row[schema['thumbnail']],
provider=row[schema['provider']],
source=row[schema['source']],
license=row[schema['license']].lower(),
license_version=row[schema['license_version']],
foreign_landing_url=row[schema['foreign_landing_url']],
meta_data=None,
view_count=row[schema['view_count']],
description=_parse_description(row[schema['meta_data']])
)
# Table name -> Elasticsearch model
database_table_to_elasticsearch_model = {
'image': Image
}
|
#!/usr/bin/env python
# test has been developed by Robert Harakaly and changed for SAM by Victor Galaktionov
# get LFC current directory used by the name server (lfc_getcwd)
# meta: proxy=true
# meta: preconfig=../../LFC-config
import os, lfc, sys, errno
from testClass import _test, _ntest, _testRunner, SAM_Run, LFC_VO, TEST_HOME
class test_ok(_test):
def info(self):
return "lfc_getcwd OK "
def prepare(self):
self.path = LFC_VO
lfc.lfc_chdir(self.path)
def test(self):
path = " " * 256
ret = lfc.lfc_getcwd(path,len(path))
return ((ret.strip(),path.strip()),0)
def ret(self):
return (self.path,self.path+"\0")
def compare(self, testVal, retVal):
((ret1,ret2), retRetVal) = retVal
((test1, test2), testRetVal) = testVal
if ((ret1 == test1) & (test2 == ret2)):
retval = True
else:
retval = False
return retval
class test_ENOENT(_ntest):
def info(self):
return "lfc_getcwd on non-existing directory"
def prepare(self):
self.name = LFC_VO + "/python_getcwd_test"
ret = lfc.lfc_mkdir(self.name,0755)
lfc.lfc_chdir(self.name)
lfc.lfc_rmdir(self.name)
def test(self):
self.path = " "
ret = lfc.lfc_getcwd(self.path,len(self.path))
return ((ret,self.path,lfc.cvar.serrno),0)
def ret(self):
return (None,self.path,errno.ENOENT)
def compare(self, testVal, retVal):
((ret1,ret2,reterr), retRetVal) = retVal
((test1, test2, testerr), testRetVal) = testVal
print ret1, test1
print test2, ret2
print reterr, testerr
if ((ret1 == test1) & (test2 == ret2) & (reterr == testerr)):
retval = True
else:
retval = False
return retval
def clean(self):
lfc.lfc_rmdir(self.name)
class test_ERANGE(_ntest):
def info(self):
return "lfc_getcwd insufficient buffer space (ERANGE)"
def test(self):
self.path = " "
ret = lfc.lfc_getcwd(self.path,len(self.path))
return ((ret,self.path,lfc.cvar.serrno),0)
def ret(self):
return (None,self.path,errno.ERANGE)
def compare(self, testVal, retVal):
((ret1,ret2,reterr), retRetVal) = retVal
((test1, test2, testerr), testRetVal) = testVal
if ((ret1 == test1) & (test2 == ret2) & (reterr == testerr)):
retval = True
else:
retval = False
return retval
class lfc_getcwd_test(_testRunner):
def __init__(self):
self.name = "lfc_getcwd_test"
self.tests=[test_ok, test_ERANGE]
#************* Interface for SAM and Python tests ***************
SAM_Run(lfc_getcwd_test)
|
def main ():
with open("act.txt") as file:
p = 1
while True:
num_activities = file.readline().strip()
if num_activities == "":
break
num_activities = int(num_activities)
activities = []
for index in range(num_activities):
y = file.readline().split(" ")
activities.append((int(y[0]), int(y[1]), int(y[2])))
activities.sort(key=lambda x: x[1], reverse=True)
n = len(activities)
activities_result = [activities[0][0]]
k = 0
for m in range (2, n):
if activities[m][2] <= activities[k][1]:
activities_result.append(activities[m][0])
k = m
activities_result.reverse()
print("Set {}".format(p))
p += 1
print("Number of activities selected = {}".format(len(activities_result)))
print("Activities: {}".format(" ".join(str(x) for x in activities_result)))
main() |
#coding:utf-8
#!/usr/bin/env python
import random
from gclib.utility import is_expire, currentTime, hit, randint, dayTime, drop, is_same_day
from game.utility.config import config
from game.routine.vip import vip
class luckycat:
@staticmethod
def make():
"""
制做
"""
data = {}
data['level'] = 1
data['exp'] = 0
data['critical_point_list'] = []
data['beckon_count'] = 0
data['beckon_gem_count'] = 0
data['beckon_last_update_time'] = currentTime()
data['beckon_cooldown'] = 0
data['critical_point_list'] = []
data['feed_self_count'] = 0
data['feed_self_last_time'] = 0
data['feed_other_count'] = 0
data['feed_other_last_time'] = 0
data['fatigue'] = 0
data['bless_roll_last_time'] = 0
data['bless_cycle_begin_time'] = 0
data['bless'] = {}
data['record'] = []
data['feed_candidate_list'] = []
data['feed_request_list'] = []
return data
@staticmethod
def beckon(usr, useGem):
"""
招财
"""
if not usr.luckycat:
return {'msg':'luckycat_not_available'}
luckycatProfitConf = config.getConfig('luckycat_profit')
gameConf = config.getConfig('game')
luckycat.updateBeckon(usr)
gameConf = config.getConfig('game')
if usr.luckycat['beckon_count'] >= gameConf['luckycat_beckon_count_base'] and (not useGem):
return {'msg':'luckycat_beckon_max_free_count'}
if usr.luckycat['beckon_count'] >= gameConf['luckycat_beckon_count_base'] + vip.value(usr, 'gem_beckon'):
return {'msg':'luckycat_beckon_max_count'}
if usr.luckycat['beckon_cooldown'] > gameConf['luckycat_cooldown_max'] and (not useGem):
return {'msg':'luckycat_beckon_in_cooldown'}
costGem = 0
if useGem:
costGem = gameConf['luckycat_beckon_gem_base'] + gameConf['luckycat_beckon_gem_delta'] * usr.luckycat['beckon_gem_count']
if usr.gem < costGem:
return {'msg':'gem_not_enough'}
luckycatBlessConf = config.getConfig('luckycat_bless')
beckonGold = luckycatProfitConf[usr.level - 1]['beckonProfit']
beckonCritical, blessid = luckycat.beckon_once(usr, useGem, beckonGold, luckycatBlessConf, gameConf)
usr.save()
return {'gold':usr.gold, 'luckycat_beckon_count':usr.luckycat['beckon_count'], 'luckycat_beckon_cooldown':usr.luckycat['beckon_cooldown'], 'beckon_critical':beckonCritical, 'gem':usr.gem, 'bless':blessid}
@staticmethod
def beckon_clickonce(usr):
"""
一键招财
"""
if not usr.luckycat:
return {'msg':'luckycat_not_available'}
if not vip.value(usr, 'beckon_clickonce'):
return {'msg':'vip_required'}
luckycatProfitConf = config.getConfig('luckycat_profit')
gameConf = config.getConfig('game')
luckycat.updateBeckon(usr)
gameConf = config.getConfig('game')
if usr.luckycat['beckon_count'] >= gameConf['luckycat_beckon_count_base'] and (not useGem):
return {'msg':'luckycat_beckon_max_free_count'}
if usr.luckycat['beckon_count'] >= gameConf['luckycat_beckon_count_base'] + vip.value(usr, 'gem_beckon'):
return {'msg':'luckycat_beckon_max_count'}
if usr.luckycat['beckon_cooldown'] > gameConf['luckycat_cooldown_max'] and (not useGem):
return {'msg':'luckycat_beckon_in_cooldown'}
costGem = gameConf['luckycat_beckon_gem_base'] + gameConf['luckycat_beckon_gem_delta'] * usr.luckycat['beckon_gem_count']
if usr.gem < costGem:
return {'msg':'gem_not_enough'}
luckycatBlessConf = config.getConfig('luckycat_bless')
beckonGold = luckycatProfitConf[usr.level - 1]['beckonProfit']
for i in range(10):
if usr.gem < costGem:
break
beckonCritical, blessid = luckycat.beckon_once(usr, costGem, beckonGold, luckycatBlessConf, gameConf)
usr.save()
return {'gold':usr.gold, 'luckycat_beckon_count':usr.luckycat['beckon_count'], 'luckycat_beckon_cooldown':usr.luckycat['beckon_cooldown'], 'gem':usr.gem}
@staticmethod
def beckon_once(usr, costGem, beckonGold, luckycatBlessConf, gameConf):
"""
招财一次
"""
blessid = []
for i in range(3):
bid = luckycat.rollBeckonBless(usr, luckycatBlessConf)
if bid:
blessid.append(bid)
beckonGold, gem, beckonCount, beckonCD = luckycat.beckonBless(usr, beckonGold, blessid, luckycatBlessConf)
usr.luckycat['beckon_count'] = beckonCount
usr.luckycat['beckon_cooldown'] = beckonCD
beckonGold = beckonGold * luckycat.currentLuckycatFortune()
beckonCritical = False
if luckycat.isCritical(usr):
beckonGold = beckonGold * 2
beckonCritical = True
usr.gold = usr.gold + beckonGold
usr.gem = usr.gem - costGem
usr.gem = usr.gem + gem
if costGem:
usr.luckycat['beckon_count'] = usr.luckycat['beckon_count'] + 1
usr.luckycat['fatigue'] = usr.luckycat['fatigue'] + 1
if usr.luckycat['fatigue'] > gameConf['luckycat_fatigue_max']:
usr.luckycat['fatigue'] = gameConf['luckycat_fatigue_max']
usr.luckycat['beckon_cooldown'] = int(usr.luckycat['beckon_cooldown'] + (gameConf['luckycat_cooldown_base'] * (1 + usr.luckycat['fatigue'] / 9.4)))
usr.luckycat['beckon_last_update_time'] = currentTime()
return beckonCritical, blessid
@staticmethod
def beckonBless(usr, beckonGold, blessid, luckycatBlessConf):
"""
招财祝福
"""
gold = beckonGold
gem = 0
beckonCount = usr.luckycat['beckon_count']
beckonCD = usr.luckycat['beckon_cooldown']
for b in blessid:
luckycatBlessInfo = luckycatBlessConf[b]
if luckycatBlessInfo['blessTypeStr'] == 'rubyluck':
gem = gem + luckycatBlessInfo['value']
elif luckycatBlessInfo['blessTypeStr'] == 'freepc':
beckonCount = usr.luckycat['beckon_count'] - 1
elif luckycatBlessInfo['blessTypeStr'] == 'increase':
gold = gold * luckycatBlessInfo['value']
elif luckycatBlessInfo['blessTypeStr'] == 'reducecd':
beckonCD = beckonCD - luckycatBlessInfo['value']
return gold, gem, beckonCount, beckonCD
@staticmethod
def beckon_reset(usr):
"""
重置招财
"""
if not usr.luckycat:
return {'msg':'luckycat_not_available'}
gameConf = config.getConfig('game')
costGem = gameConf['luckycat_beckon_reset_price']['gem']
costGold = gameConf['luckycat_beckon_reset_price']['gold']
if usr.gem < costGem:
return {'msg':'gem_not_enough'}
if usr.gold < costGold:
return {'msg':'gold_not_enough'}
usr.gem = usr.gem - costGem
usr.gold = usr.gold - costGold
usr.luckycat['beckon_cooldown'] = 0
usr.luckycat['beckon_last_update_time'] = currentTime()
usr.save()
return {'gold':usr.gold, 'gem':usr.gem, 'luckycat_beckon_cooldown': luckycat.beckon_cooldown(usr)}
@staticmethod
def beckon_cooldown(usr):
"""
招财冷却
"""
now = currentTime()
if usr.luckycat['beckon_cooldown'] + usr.luckycat['beckon_last_update_time'] > now:
return usr.luckycat['beckon_cooldown'] + usr.luckycat['beckon_last_update_time'] - currentTime()
return 0
@staticmethod
def beckonCooldownCleanup(usr):
"""
清除招财冷却时间
"""
gemCost = usr.luckycat['beckon_cooldown'] / 10
usr.gem = usr.gem - gemCost
usr.save()
return {'gem':usr.gem, 'luckycat_beckon_cooldown':0}
@staticmethod
def feed(usr, target):
"""
喂养
"""
if not usr.luckycat:
return {'msg':'luckycat_not_available'}
luckycat.updateFeed(usr)
gameConf = config.getConfig('game')
requiredLevel = 0
now = currentTime()
if (target.roleid in usr.luckycat['feed_request_list']) or (usr.roleid in target.luckycat['feed_candidate_list']) :
return {'msg':'luckycat_already_feed'}
if gameConf['luckycat_feed_other_required_level'] > usr.level:
return {'msg':'luckycat_feed_level_required'}
if usr.luckycat['feed_self_count'] >= (gameConf['luckycat_feed_self_count_max'] + vip.value(usr, 'feed_extra')):
return {'msg':'luckycat_feed_max_time'}
luckycat.updateFeed(usr)
usr.luckycat['feed_self_count'] = usr.luckycat['feed_self_count'] + 1
usr.luckycat['feed_self_last_time'] = now
spreadBlessid = ''
if not target.luckycat:
return {'msg':'luckycat_not_available'}
usr.luckycat['feed_request_list'].append(int(target.roleid))
target.luckycat['feed_candidate_list'].append(int(usr.roleid))
luckycat.notify_candidate_add(target, usr.roleid)
usr.save()
target.save()
return {'add_luckycat_request':target.roleid, 'luckycat_feed_self_count': usr.luckycat['feed_self_count']}
@staticmethod
def agreeFeed(usr, friendid):
"""
同意喂养
"""
if not usr.luckycat:
return {'msg':'luckycat_not_available'}
if friendid not in usr.luckycat['feed_candidate_list']:
return {'msg':'luckycat_request_not_exist'}
gameConf = config.getConfig('game')
luckycat.updateFeed(usr)
if usr.luckycat['feed_other_count'] >= gameConf['luckycat_feed_other_count_max']:
return {'msg':'luckycat_agree_request_max_time'}
usr.luckycat['feed_candidate_list'].remove(friendid)
friend = usr.__class__.get(friendid)
if not friend:
usr.save()
return {'msg':'usr_not_exist'}
friend.luckycat['feed_request_list'].remove(usr.roleid)
luckycat.notify_request_list_remove(friend, usr.roleid)
if not friend.luckycat:
return {'msg':'luckycat_not_available'}
now = currentTime()
usr.luckycat['feed_other_count'] = usr.luckycat['feed_other_count'] + 1
usr.luckycat['feed_self_last_time'] = now
luckycat.updateBless(usr)
luckycat.updateBless(friend)
allSpreadBlessid = []
for blessid in usr.luckycat['bless']:
if usr.luckycat['bless'][blessid].has_key('spread'):
allSpreadBlessid.append(blessid)
for blessid in friend.luckycat['bless']:
if blessid in allSpreadBlessid:
allSpreadBlessid.remove(blessid)
spreadBlessid = ''
if allSpreadBlessid:
spreadBlessid = random.sample(allSpreadBlessid, 1)[0]
friend.luckycat['bless'][spreadBlessid] = {}
friend.luckycat['bless'][spreadBlessid]['blessid'] = spreadBlessid
luckycat.notify_bless(friend, spreadBlessid)
luckycatProfitConf = config.getConfig('luckycat_profit')
usrAwardGold = luckycatProfitConf[usr.level - 1]['agreeProfit']
friendAwardGold = luckycatProfitConf[friend.level - 1]['blessProfit']
usr.gold = usr.gold + usrAwardGold
friend.gold = friend.gold + friendAwardGold
usr.save()
friend.save()
return {'gold':usr.gold, 'luckycat_bless':spreadBlessid, 'luckycat_feed_other_count':usr.luckycat['feed_other_count']}
@staticmethod
def disagreeFeed(usr, friendid):
"""
不同意喂养
"""
if friendid not in usr.luckycat['feed_candidate_list']:
return {'msg':'luckycat_candidate_not_exist'}
usr.luckycat['feed_candidate_list'].remove(friendid)
usr.luckycat['feed_self_count'] = usr.luckycat['feed_self_count'] + 1
friend = usr.__class__.get(friendid)
if not friend:
usr.save()
return {'msg':'usr_not_exist'}
if usr.roleid in friend.luckycat['feed_request_list']:
friend.luckycat['feed_request_list'].remove(usr.roleid)
luckycat.notify_request_list_remove(friend, usr.roleid)
friend.save()
usr.save()
return {'delete_luckycat_candidate': friendid, 'feed_self_count':usr.luckycat['feed_self_count']}
@staticmethod
def cancelRequest(usr, friendid):
"""
取消请求
"""
if friendid not in usr.luckycat['feed_request_list']:
return {'msg':'luckycat_request_not_exist'}
luckycat.updateFeed(usr)
now = currentTime()
usr.luckycat['feed_self_count'] = usr.luckycat['feed_self_count'] - 1
usr.luckycat['feed_self_last_time'] = now
usr.luckycat['feed_request_list'].remove(friendid)
friend = usr.__class__.get(friendid)
if not friend:
usr.save()
return {'msg':'usr_not_exist'}
if usr.roleid in friend.luckycat['feed_candidate_list']:
friend.luckycat['feed_candidate_list'].remove(usr.roleid)
friend.save()
usr.save()
return {'delete_luckycat_request':friendid, 'luckycat_feed_self_count': usr.luckycat['feed_self_count']}
@staticmethod
def rollBeckonBless(usr, luckycatBlessConf):
"""
抽取招财祝福
"""
#luckycat.updateBless(usr)
#luckycatBlessConf = config.getConfig('luckycat_bless')
roll = randint()
blessConf = {}
for blessid in luckycatBlessConf:
b = luckycatBlessConf[blessid]
if b['probability'] < roll:
roll = roll - b['probability']
else:
if usr.luckycat['bless'].has_key(blessid):
return blessid
return None
return None
@staticmethod
def rollBless(usr, isUseGem):
"""
抽取祝福
"""
if not usr.luckycat:
return {'msg':'luckycat_not_available'}
if isUseGem and (not vip.canBuyLuckycatBless(usr)):
return {'msg':'luckycat_vip_required'}
luckycat.updateBless(usr)
luckycatBlessConf = config.getConfig('luckycat_bless')
now = currentTime()
if is_same_day(now, usr.luckycat['bless_roll_last_time']):
if not isUseGem:
return {'msg':'luckycat_roll_bless_already_today'}
else:
if isUseGem:
return {'msg':'gem_not_necessary'}
roll = randint()
blessConf = {}
for blessid in luckycatBlessConf:
b = luckycatBlessConf[blessid]
if b['probability'] < roll:
roll = roll - b['probability']
else:
blessConf = b
break
blessid = blessConf['blessid']
if not usr.luckycat['bless'].has_key(blessid):
usr.luckycat['bless'][blessid] = {}
usr.luckycat['bless'][blessid]['blessid'] = blessid
if luckycat.isCycleDay(usr) or isUseGem:
usr.luckycat['bless'][blessid]['spread'] = True
usr.luckycat['bless_roll_last_time'] = now
usr.save()
return {'luckycat_roll_bless':blessid, 'luckycat_roll_bless_spread':usr.luckycat['bless'][blessid].has_key('spread')}
@staticmethod
def updateBless(usr):
"""
更新祝福
"""
now = currentTime()
gameConf = config.getConfig('game')
if usr.luckycat['bless_cycle_begin_time'] and (day_diff(usr.luckycat['bless_cycle_begin_time'], now) > gameConf['luckycat_bless_cycle_day']):
usr.luckycat['bless_cycle_begin_time'] = 0
@staticmethod
def isCycleDay(usr):
"""
是否循环天
"""
return is_same_day(usr.luckycat['bless_cycle_begin_time'], currentTime()) or (usr.luckycat['bless_cycle_begin_time'] == 0)
@staticmethod
def getClientData(usr, gameConf):
"""
得到client data
"""
data = {}
data['level'] = usr.luckycat['level']
data['exp'] = usr.luckycat['exp']
data['critical_point_list'] = usr.luckycat['critical_point_list']
data['beckon_count'] = usr.luckycat['beckon_count']
data['beckon_gem_count'] = usr.luckycat['beckon_gem_count']
data['beckon_cooldown'] = luckycat.beckon_cooldown(usr)
data['critical_point_list'] = usr.luckycat['critical_point_list']
data['feed_self_count'] = usr.luckycat['feed_self_count']
#data['feed_self_cooldown'] = luckycat.feed_self_cooldown(usr, gameConf)
data['feed_other_count'] = usr.luckycat['feed_other_count']
#data['feed_other_cooldown'] = luckycat.feed_other_cooldown(usr, gameConf)
data['bless_roll_last_time'] = usr.luckycat['bless_roll_last_time']
data['bless_cycle_begin_time'] = usr.luckycat['bless_cycle_begin_time']
data['bless'] = usr.luckycat['bless']
#data['record'] = usr.luckycat['record']
data['feed_candidate_list'] = usr.luckycat['feed_candidate_list']
data['feed_request_list'] = usr.luckycat['feed_request_list']
return data
@staticmethod
def isCritical(usr):
"""
是否爆击
"""
criticalPoint = sum(usr.luckycat['critical_point_list'])
probability = criticalPoint / usr.luckycat['level'] * 25
return drop(probability)
@staticmethod
def updateBeckon(usr):
"""
更新招财
"""
gameConf = config.getConfig('game')
now = currentTime()
if is_expire(gameConf['luckycat_beckon_count_reset_time'], usr.luckycat['beckon_last_update_time']):
usr.luckycat['beckon_count'] = 0
usr.luckycat['beckon_cooldown'] = 0
usr.luckycat['beckon_last_update_time'] = now
return
elapse = now - usr.luckycat['beckon_last_update_time']
usr.luckycat['beckon_cooldown'] = usr.luckycat['beckon_cooldown'] - elapse
if usr.luckycat['beckon_cooldown'] < 0:
usr.luckycat['beckon_cooldown'] = 0
usr.luckycat['beckon_last_update_time'] = now
@staticmethod
def updateFeed(usr):
"""
更新喂养
"""
gameConf = config.getConfig('game')
now = currentTime()
if is_expire(gameConf['luckycat_beckon_count_reset_time'], usr.luckycat['feed_self_last_time']):
usr.luckycat['feed_self_count'] = 0
usr.luckycat['feed_self_last_time'] = currentTime()
if is_expire(gameConf['luckycat_beckon_count_reset_time'], usr.luckycat['feed_other_last_time']):
usr.luckycat['feed_other_count'] = 0
usr.luckycat['feed_oter_last_time'] = currentTime()
@staticmethod
def onLeveup(usr):
"""
升级喂养
"""
nw = usr.getNetwork()
nw.updateFriendData()
@staticmethod
def onEveryLeveup(usr):
"""
每次升级
"""
gameConf = config.getConfig('game')
luckycatLevelConf = config.getConfig('luckycat_level')
if gameConf['luckycat_level_critical_itme'].count(usr.luckycat['level']):
usr.luckycat['critical_point_list'].append(hit(gameConf['luckycat_critical_point_probability']))
awardGold = luckycatLevelConf[usr.luckycat['level']]['levelupGold']
awardGem = 0
return awardGold, awardGem
@staticmethod
def freshCritical(usr, itemIndex):
"""
更新暴击
"""
gameConf = config.getConfig('game')
goldCost = gameConf['luckycat_critical_item_fresh_price']['gold']
gemCost = gameConf['luckycat_critical_item_fresh_price']['gem']
if usr.gold < goldCost:
return {'msg':'gold_not_enough'}
if usr.gem < gemCost:
return {'msg':'gem_not_enough'}
usr.gold = usr.gold - goldCost
usr.gem = usr.gem - gemCost
usr.luckycat['critical_point_list'][itemIndex] = hit(gameConf['luckycat_critical_point_probability'])
@staticmethod
def currentLuckycatFortune():
"""
当前财运
"""
luckycatFortuneConf = config.getConfig('luckycat_fortune')
now = currentTime()
daysecond = dayTime()
selItem = None
for item in luckycatFortuneConf:
if item[0] < daysecond:
selItem = item
else:
break
return (selItem[1][0] + selItem[1][1]) / 2
@staticmethod
def notify_bless(usr, blessid):
"""
提示祝福
"""
if not usr.notify.has_key('add_luckycat_bless'):
usr.notify['add_luckycat_bless'] = []
usr.notify['add_luckycat_bless'].append(blessid)
@staticmethod
def notify_request_list_remove(usr, friendid):
"""
提示请求移除
"""
if not usr.notify.has_key('delete_luckcat_request'):
usr.notify['delete_luckcat_request'] = []
usr.notify['delete_luckcat_request'].append(friendid)
@staticmethod
def notify_candidate_add(usr, friendid):
"""
提示添加候选菜单
"""
if not usr.notify.has_key('add_luckycat_candidate'):
usr.notify['add_luckycat_candidate'] = []
usr.notify['add_luckycat_candidate'].append(friendid) |
n = int(input())
lead = leadt = winner = score1 = score2 = 0
for n in range(n):
a, b = [int(x) for x in input().split()]
score1 += a
score2 += b
lead = abs(score1 - score2)
if(lead > leadt):
leadt = lead
if(score1 > score2):
winner = 1
else:
winner = 2
print(winner, leadt) |
# Importing packages
import matplotlib.pyplot as plt
# Define x and y values
x = [7, 14, 21, 28, 35, 42, 49]
y = [8, 13, 21, 30, 31, 44, 50]
# Plot a simple line chart without any feature
# supported values are '-', '--', '-.', ':', 'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted'
plt.plot(x, y, linestyle='-', linewidth=7, color='#0abab5')
#plt.show()
plt.plot([80, 20], [100, 100], 'r', linewidth=5)
plt.show() |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class AuthUserCreateParams(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
AuthUserCreateParams - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'email': 'str',
'enabled': 'bool',
'expiry': 'int',
'gecos': 'str',
'home_directory': 'str',
'password': 'str',
'password_expires': 'bool',
'primary_group': 'GroupMember',
'prompt_password_change': 'bool',
'shell': 'str',
'sid': 'str',
'uid': 'int',
'unlock': 'bool',
'name': 'str'
}
self.attribute_map = {
'email': 'email',
'enabled': 'enabled',
'expiry': 'expiry',
'gecos': 'gecos',
'home_directory': 'home_directory',
'password': 'password',
'password_expires': 'password_expires',
'primary_group': 'primary_group',
'prompt_password_change': 'prompt_password_change',
'shell': 'shell',
'sid': 'sid',
'uid': 'uid',
'unlock': 'unlock',
'name': 'name'
}
self._email = None
self._enabled = None
self._expiry = None
self._gecos = None
self._home_directory = None
self._password = None
self._password_expires = None
self._primary_group = None
self._prompt_password_change = None
self._shell = None
self._sid = None
self._uid = None
self._unlock = None
self._name = None
@property
def email(self):
"""
Gets the email of this AuthUserCreateParams.
Specifies an Email address.
:return: The email of this AuthUserCreateParams.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""
Sets the email of this AuthUserCreateParams.
Specifies an Email address.
:param email: The email of this AuthUserCreateParams.
:type: str
"""
self._email = email
@property
def enabled(self):
"""
Gets the enabled of this AuthUserCreateParams.
Auth user is enabled.
:return: The enabled of this AuthUserCreateParams.
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Sets the enabled of this AuthUserCreateParams.
Auth user is enabled.
:param enabled: The enabled of this AuthUserCreateParams.
:type: bool
"""
self._enabled = enabled
@property
def expiry(self):
"""
Gets the expiry of this AuthUserCreateParams.
Epoch time at which the auth user will expire.
:return: The expiry of this AuthUserCreateParams.
:rtype: int
"""
return self._expiry
@expiry.setter
def expiry(self, expiry):
"""
Sets the expiry of this AuthUserCreateParams.
Epoch time at which the auth user will expire.
:param expiry: The expiry of this AuthUserCreateParams.
:type: int
"""
self._expiry = expiry
@property
def gecos(self):
"""
Gets the gecos of this AuthUserCreateParams.
Sets GECOS value (usually full name).
:return: The gecos of this AuthUserCreateParams.
:rtype: str
"""
return self._gecos
@gecos.setter
def gecos(self, gecos):
"""
Sets the gecos of this AuthUserCreateParams.
Sets GECOS value (usually full name).
:param gecos: The gecos of this AuthUserCreateParams.
:type: str
"""
self._gecos = gecos
@property
def home_directory(self):
"""
Gets the home_directory of this AuthUserCreateParams.
Specifies user's home directory.
:return: The home_directory of this AuthUserCreateParams.
:rtype: str
"""
return self._home_directory
@home_directory.setter
def home_directory(self, home_directory):
"""
Sets the home_directory of this AuthUserCreateParams.
Specifies user's home directory.
:param home_directory: The home_directory of this AuthUserCreateParams.
:type: str
"""
self._home_directory = home_directory
@property
def password(self):
"""
Gets the password of this AuthUserCreateParams.
Changes user's password.
:return: The password of this AuthUserCreateParams.
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""
Sets the password of this AuthUserCreateParams.
Changes user's password.
:param password: The password of this AuthUserCreateParams.
:type: str
"""
self._password = password
@property
def password_expires(self):
"""
Gets the password_expires of this AuthUserCreateParams.
Specifies whether the password expires.
:return: The password_expires of this AuthUserCreateParams.
:rtype: bool
"""
return self._password_expires
@password_expires.setter
def password_expires(self, password_expires):
"""
Sets the password_expires of this AuthUserCreateParams.
Specifies whether the password expires.
:param password_expires: The password_expires of this AuthUserCreateParams.
:type: bool
"""
self._password_expires = password_expires
@property
def primary_group(self):
"""
Gets the primary_group of this AuthUserCreateParams.
A persona consists of either a 'type' and 'name' or a 'ID'.
:return: The primary_group of this AuthUserCreateParams.
:rtype: GroupMember
"""
return self._primary_group
@primary_group.setter
def primary_group(self, primary_group):
"""
Sets the primary_group of this AuthUserCreateParams.
A persona consists of either a 'type' and 'name' or a 'ID'.
:param primary_group: The primary_group of this AuthUserCreateParams.
:type: GroupMember
"""
self._primary_group = primary_group
@property
def prompt_password_change(self):
"""
Gets the prompt_password_change of this AuthUserCreateParams.
Prompts the user to change their password on next login.
:return: The prompt_password_change of this AuthUserCreateParams.
:rtype: bool
"""
return self._prompt_password_change
@prompt_password_change.setter
def prompt_password_change(self, prompt_password_change):
"""
Sets the prompt_password_change of this AuthUserCreateParams.
Prompts the user to change their password on next login.
:param prompt_password_change: The prompt_password_change of this AuthUserCreateParams.
:type: bool
"""
self._prompt_password_change = prompt_password_change
@property
def shell(self):
"""
Gets the shell of this AuthUserCreateParams.
Specifies the user's shell.
:return: The shell of this AuthUserCreateParams.
:rtype: str
"""
return self._shell
@shell.setter
def shell(self, shell):
"""
Sets the shell of this AuthUserCreateParams.
Specifies the user's shell.
:param shell: The shell of this AuthUserCreateParams.
:type: str
"""
self._shell = shell
@property
def sid(self):
"""
Gets the sid of this AuthUserCreateParams.
A security identifier.
:return: The sid of this AuthUserCreateParams.
:rtype: str
"""
return self._sid
@sid.setter
def sid(self, sid):
"""
Sets the sid of this AuthUserCreateParams.
A security identifier.
:param sid: The sid of this AuthUserCreateParams.
:type: str
"""
self._sid = sid
@property
def uid(self):
"""
Gets the uid of this AuthUserCreateParams.
A numeric user identifier.
:return: The uid of this AuthUserCreateParams.
:rtype: int
"""
return self._uid
@uid.setter
def uid(self, uid):
"""
Sets the uid of this AuthUserCreateParams.
A numeric user identifier.
:param uid: The uid of this AuthUserCreateParams.
:type: int
"""
self._uid = uid
@property
def unlock(self):
"""
Gets the unlock of this AuthUserCreateParams.
Unlocks the user's account if locked.
:return: The unlock of this AuthUserCreateParams.
:rtype: bool
"""
return self._unlock
@unlock.setter
def unlock(self, unlock):
"""
Sets the unlock of this AuthUserCreateParams.
Unlocks the user's account if locked.
:param unlock: The unlock of this AuthUserCreateParams.
:type: bool
"""
self._unlock = unlock
@property
def name(self):
"""
Gets the name of this AuthUserCreateParams.
A user name.
:return: The name of this AuthUserCreateParams.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this AuthUserCreateParams.
A user name.
:param name: The name of this AuthUserCreateParams.
:type: str
"""
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# -*- coding:utf-8 -*-
# Author: Jorden Hai
import copy
names = ["ZhangYang","GuYun","XuLiangchen",['alex','jack'],"YangZhe","ChenZhonghua","ZhaoZi"]
print(names[::2])
for i in names[::2]:
print(i)
'''
names2 = copy.deepcopy(names)
names[1] = '向鹏'
print(names)
print(names2)
names[3][0] = 'Alex'
names2[3][1] = 'Jack'
'''
#浅层次的copy
#只对第一层进行copy
#其他层次就是复制下
#对于列表中的列表 copy动作只是复制了其指针!!
#真正的复制
'''
names.append("YangZhe")
names.append("LeiHaidong")
names.insert(1,"ChenZhonghua")
names.insert(3,"ZhaoZi")
names[2] = 'XieDi'
print(names[names.index("XieDi")])
names.append("ChenZhonghua")
print(names.count('ChenZhonghua'))
count = 0
for name in names:
if name == "ChenZhonghua":
count = count +1
print(count)
print(names)
names.reverse()
names.sort() #ASCII码排序
names.extend(names2)
del names[1]
names.pop(1)
names.remove("XieDi")
names.pop(1)
names.remove("ZhangYang")
newnames = names.copy()
print(newnames)
print(enumerate(names))
for index,value in enumerate(names):
print(index,value)
print(names[len(names)-1])
print(names[-1])
print(names[-2:])
'''
|
import itertools
import numpy
import pytest
from helpers import *
from tigger.reduce import Reduce
import tigger.cluda.dtypes as dtypes
shapes = [
(2,), (13,), (1535,), (512 * 231,),
(140, 3), (13, 598), (1536, 789),
(5, 15, 19), (134, 25, 23), (145, 56, 178)]
shapes_and_axes = [(shape, axis) for shape, axis in itertools.product(shapes, [None, 0, 1, 2])
if axis is None or axis < len(shape)]
shapes_and_axes_ids = [str(shape) + "," + str(axis) for shape, axis in shapes_and_axes]
@pytest.mark.parametrize(('shape', 'axis'), shapes_and_axes, ids=shapes_and_axes_ids)
def test_normal(ctx, shape, axis):
rd = Reduce(ctx)
a = get_test_array(shape, numpy.int64)
a_dev = ctx.to_device(a)
b_ref = a.sum(axis)
if len(b_ref.shape) == 0:
b_ref = numpy.array([b_ref], numpy.int64)
b_dev = ctx.allocate(b_ref.shape, numpy.int64)
rd.prepare_for(b_dev, a_dev, axis=axis,
code=dict(kernel="return input1 + input2;"))
rd(b_dev, a_dev)
assert diff_is_negligible(b_dev.get(), b_ref)
def test_nontrivial_function(ctx):
rd = Reduce(ctx)
shape = (100, 100)
a = get_test_array(shape, numpy.int64)
a_dev = ctx.to_device(a)
b_ref = a.sum(0)
b_dev = ctx.allocate((100,), numpy.int64)
rd.prepare_for(b_dev, a_dev, axis=0,
code=dict(
kernel="return test(input1, input2);",
functions="""
WITHIN_KERNEL ${output.ctype} test(${input.ctype} val1, ${input.ctype} val2)
{
return val1 + val2;
}
"""))
rd(b_dev, a_dev)
assert diff_is_negligible(b_dev.get(), b_ref)
|
import os
import json
from dotenv import load_dotenv
from google.cloud import pubsub_v1
from google.oauth2 import service_account
import googleapiclient.discovery
load_dotenv(verbose=True)
GOOGLE_APPLICATION_CREDENTIALS = os.getenv('GOOGLE_APPLICATION_CREDENTIALS')
SERVICE_ACCOUNT_FILE = os.getenv('SERVICE_ACCOUNT_FILE')
print('GOOGLE_APPLICATION_CREDENTIALS :' + GOOGLE_APPLICATION_CREDENTIALS)
print('SERVICE_ACCOUNT_FILE :' + SERVICE_ACCOUNT_FILE)
# TODO(developer)
# project_id = "geometric-edge-659" #firebase-relaystory
# topic_id = "play-noti"
# subs_id = "play-subs"
# publisher = pubsub_v1.PublisherClient()
# topic_path = publisher.topic_path(project_id, topic_id)
# topic = publisher.create_topic(request={"name": topic_path})
# print("Created topic: {}".format(topic.name))
#
#
#
SCOPES = ['https://www.googleapis.com/auth/androidpublisher']
# SERVICE_ACCOUNT_FILE = '/path/to/service.json'
credentials = service_account.Credentials.from_service_account_file(
SERVICE_ACCOUNT_FILE, scopes=SCOPES)
androidpublisher = googleapiclient.discovery.build('androidpublisher', 'v3', credentials=credentials)
# packageName="com.lab78.BabySootherSeal"
# subscriptionId="pro_subs_15"
# purchaseToken="ghfihlhbeibmodjmaiooehii.AO-J1OzgKLfLWuN49wBns0uxcuap-s_5B-aTGtLctJcmu6yy47ibU9GFBvM4iiLLmGnYT6gm48NZBL57NLNqX3gDakpzte_its59QmOv6tqiLPkQJjbpkTM"
# packageName="com.lab78.BabySootherSEALFree"
# subscriptionId="pro_subs_15"
# purchaseToken="kejojkbhnoclghgkchdngafn.AO-J1OzNlcz4AShjEJY2ikXXT3ZqZlh1osOixkQdOWMHosJFw4uhwr8McjQarC0qsPbiIUt4eZJk-iYFLxqIgFZCHpUe9NY9G3BwaV00Ywzm-qraif5i8Z4"
CANJOB = 1
topic_name = "projects/geometric-edge-659/topics/play-noti"
subscription_name = "projects/geometric-edge-659/subscriptions/play-subs"
subscriber = pubsub_v1.SubscriberClient()
# subscriber.create_subscription(
# name=subscription_name, topic=topic_name)
def callback(message):
global CANJOB # global variable
print(message.data)
print('CANJOB:'+str(CANJOB))
# decoded_data = base64.b64decode(message.data)
# print(decoded_data)
# decoded_data = decoded_data.decode('UTF-8')
# print(message.data)
decoded_data = json.loads(message.data)
packageName = decoded_data["packageName"]
subscription_obj = decoded_data["subscriptionNotification"]
purchaseToken = subscription_obj["purchaseToken"]
# notificationType = subscription_obj["notificationType"]
subscriptionId = subscription_obj["subscriptionId"]
print('packageName subscriptionId purchaseToken:' + packageName + " , " + subscriptionId + " , " + purchaseToken)
if CANJOB == 1:
print("canJob start!")
CANJOB = 0
response = androidpublisher.purchases().subscriptions().get(packageName=packageName,subscriptionId=subscriptionId,token=purchaseToken).execute()
print(response)
CANJOB = 1
print("canJob end!")
else:
print("canJob = false")
message.ack()
future = subscriber.subscribe(subscription_name, callback)
try:
future.result()
except KeyboardInterrupt:
future.cancel()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# from turtle import *
#
# # width(4)
# #
# # forward(200)
# # right(90)
# #
# # pencolor('cyan')
# # forward(100)
# # right(90)
# #
# # pencolor('yellow')
# # forward(200)
# # right(90)
# #
# # pencolor('brown')
# # forward(100)
# # right(90)
# #
# # done()
#
#
# # 设置色彩模式是RGB:
# colormode(255)
#
# lt(90)
#
# lv = 14
# l = 120
# s = 45
#
# width(lv)
#
# # 初始化RGB颜色:
# r = 0
# g = 0
# b = 0
# pencolor(r, g, b)
#
# penup()
# bk(l)
# pendown()
# fd(l)
#
# def draw_tree(l, level):
# global r, g, b
# # save the current pen width
# w = width()
#
# # narrow the pen width
# width(w * 3.0 / 4.0)
# # set color:
# r = r + 1
# g = g + 2
# b = b + 3
# pencolor(r % 200, g % 200, b % 200)
#
# l = 3.0 / 4.0 * l
#
# lt(s)
# fd(l)
#
# if level < lv:
# draw_tree(l, level + 1)
# bk(l)
# rt(2 * s)
# fd(l)
#
# if level < lv:
# draw_tree(l, level + 1)
# bk(l)
# lt(s)
#
# # restore the previous pen width
# width(w)
#
# speed("fastest")
#
# draw_tree(l, 4)
#
# done()
import turtle as tt
from random import randint
tt.TurtleScreen._RUNNING = True
tt.speed(0) # 绘图速度为最快
tt.bgcolor("black") # 背景色为黑色
tt.setpos(-25, 25) # 改变初始位置,这可以让图案居中
tt.colormode(255) # 颜色模式为真彩色
cnt = 0
while cnt < 10000:
r = randint(0, 255)
g = randint(0, 255)
b = randint(0, 255)
tt.pencolor(r, g, b) # 画笔颜色每次随机
tt.forward(50 + cnt)
tt.right(91)
cnt += 1
tt.done()
|
from unittest import TestCase
from mdat import core
__author__ = 'pbc'
class TestFuzzyMeasure(TestCase):
def test_init(self):
# self.list_of_members = frozenset([])
fm = core.FuzzyMeasure()
self.assertEqual(len(fm.list_of_members), 0)
fm = core.FuzzyMeasure()
self.assertEqual(len(fm.list_of_members), 0)
criteria = {'c1': .9, 'c2': 1, 'c3': .6}
fm = core.FuzzyMeasure(criteria)
self.assertEqual(len(fm.list_of_members),3)
def test_store_criteria(self):
fm = core.FuzzyMeasure()
criteria = {'c1': .9, 'c2': 1, 'c3': .6}
fm.store_criteria(criteria)
criteria_labels = set(['c1','c2','c3'])
self.assertSetEqual(fm.list_of_members,criteria_labels)
self.assertDictEqual(fm.criteria,criteria)
def test_make_all_subsets(self):
list_of_members = set([])
fm = core.FuzzyMeasure()
fm.list_of_members = frozenset(['a','b','c','d','e','f','g','h','i','j','k','l','m'])
fm.make_all_subsets()
self.assertEqual(len(fm.set_of_all_subsets), 2**len(fm.list_of_members))
def test_set_fm_for_trivial_cases(self):
fm = core.FuzzyMeasure()
fm.list_of_members = frozenset([1,2,3,4,5])
fm.make_all_subsets()
fm.set_fm_for_trivial_cases()
self.assertEqual(fm.mu[frozenset([])],0)
self.assertEqual(fm.mu[fm.list_of_members],1)
def test_set_fm_for_singleton_sets(self):
# initialize FuzzyMeasure instance
fm = core.FuzzyMeasure()
criteria = {'c1': .9, 'c2': 1, 'c3': .6}
fm.store_criteria(criteria)
fm.make_all_subsets()
fm.set_fm_for_trivial_cases()
# test function of interest
fm.set_fm_for_singleton_sets()
self.assertEqual(fm.mu[frozenset(['c1'])],0.36)
self.assertEqual(fm.mu[frozenset(['c2'])],0.4)
self.assertEqual(fm.mu[frozenset(['c3'])],0.24)
def test_set_fm_for_complex_sets(self):
# initialize FuzzyMeasure instance
criteria = {'c1': .9, 'c2': .8, 'c3': .6, 'c4': .2}
fm = core.FuzzyMeasure(criteria)
#print fm.mu
self.assertEqual(1, 1)
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class SqliteToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('sqlite')
def install(self, spec, prefix):
values = {}
values['VER'] = spec['sqlite'].version
values['PFX'] = spec['sqlite'].prefix
fname = 'sqlite.xml'
contents = str("""<tool name="sqlite" version="$VER">
<lib name="sqlite3"/>
<client>
<environment name="SQLITE_BASE" default="$PFX"/>
<environment name="LIBDIR" default="$$SQLITE_BASE/lib"/>
<environment name="BINDIR" default="$$SQLITE_BASE/bin"/>
<environment name="INCLUDE" default="$$SQLITE_BASE/include"/>
</client>
<runtime name="PATH" value="$$BINDIR" type="path"/>
<runtime name="ROOT_INCLUDE_PATH" value="$$INCLUDE" type="path"/>
<use name="root_cxxdefaults"/>
</tool>""")
write_scram_toolfile(contents, values, fname, prefix)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.box_utils import match, log_sum_exp
from data import cfg
import numpy as np
GPU = cfg['gpu_train']
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target):
# num_classes=2
# overlap_thresh=0.35
# prior_for_matching=True
# bkg_label=0
# neg_mining=True
# neg_pos=7
# neg_overlap=0.35
# encode_target=False
super(MultiBoxLoss, self).__init__()
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = [0.1, 0.2]
def forward(self, predictions, priors, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
ground_truth (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
# print('predictions: ', predictions)
loc_data, conf_data = predictions # loc_data shape: torch.Size([64, 21824, 4])
# conf_data shape: torch.Size([64, 21824, 2])
# print(conf_data)
# print(conf_data.shape)
priors = priors # priors shape: torch.Size([21824, 4])
# priors: tensor([[0.x, 0.x, 0.x, 0.x], [0.x, 0.x, 0.x, 0.x], [0.x, 0.x, 0.x, 0.x], ...], device='cuda:0')
# print(priors)
# print('priors shape: ', priors.shape)
num = loc_data.size(0) # num: 64, this is batch size
# print('num: ', num)
num_priors = (priors.size(0)) # num_priors: 21824, total number of anchors
# print('num_priors: ', num_priors) # num_priors: bigger: 21824, smaller: 5440
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4) # loc_t: torch.Size([64, 21824, 4])
# print('loc_t: ', loc_t.shape)
conf_t = torch.LongTensor(num, num_priors) # conf_t: torch.Size([64, 21824])
# conf_t: tensor([[0 ,0 ...], [0 ,0, 1, ...], ...])
# print(conf_t)
# print('conf_t: ', conf_t.shape)
# print('target shape: ', np.array(targets).shape) # targets shape: 64,
# print('targets[0]: ', targets[0])
# targets[0]: tensor([[0.x, 0.x, 0.x, 0.x, 1.], [0.x, 0.x, 0.x, 0.x, 1.], ...], device='cuda:0')
# targets[0]: tensor([[0.x, 0.x, 0.x, 0.x ,1.]], device='cuda:0')
for idx in range(num):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
# truths: tensor([[0.6023, 0.6023, 0.7455, 0.7705], [0.0000, 0.2205, 0.1795, 0.3864]], device='cuda:0')
# labels: tensor([1., 1.], device='cuda:0')
# defaults: tensor([[], [], [], ...], device='cuda:0')
# print('idx: ', idx)
# print('truths: ', truths)
# print('labels: ', labels)
# print('defaults: ', defaults)
# threshold: 0.35
# variance: [0.1, 0.2]
# idx: 0 or 1 or ...or 63, which image
# loc_t: [64, 21824, 4]
# conf_t: [64, 21824] loc/conf_t: prior boxes
match(self.threshold, truths, defaults, self.variance, labels, loc_t, conf_t, idx)
if GPU:
loc_t = loc_t.cuda() # offset to train: cxcy & wh
conf_t = conf_t.cuda()
# conf[best_truth_overlap < threshold] = 0,
# dim = 21824, which is also the prior number
# conf_t: tensor([[0, 0, ...], num=64
# [0, 0, ...],
# ...])
# conf_t.shape: torch.Size([64, 21824])
#
# loct_t torch.Size([64, 21824, 4])
pos = conf_t > 0 # torch.Size(64, 21824)
# pos: tensor([[False, False, ...], num=64
# [False, False, ...], almost all false
# ...])
# print(pos)
# print(loc_t.shape)
# Localization Loss (Smooth L1)
'''pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data) explanation'''
# Shape: [batch,num_priors,4]
""" pos.dim() = 2 """
# print(pos.unsqueeze(1).shape) # ([64, 1, 21824])
# ([[[False, False, ...]],
# [[False, False, ...]]
# ...])
#
""" pos.unsqueeze(2) # [64, 21824, 1] """
# ([[[False], [False], [], ...],
# [[False], [False], [], ...],
# ...
# ])
# expand_as: Expand this tensor to the same size as other.self.expand_as(other)
# is equivalent to self.expand(other.size())
# expand e.g:
# x = torch.tensor([[1], [2], [3]])
# x.size() = ([3, 1])
# x.expand(3, 4)
# x = tensor([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]])
# x.expand(-1, 4) = x
# -1 means not changing the size of that dimension
""" here, loc_data = torch.Size([64, 21824, 4]) """
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data) # torch.Size([64, 21824, 4])
# pos_idx: tensor([[[False, False, False, False], [], ...], [[], [], ...[]], ...])
# print(pos_idx.shape)
loc_p = loc_data[pos_idx].view(-1, 4)
# loc_p: positive predicted sample(prior)s location, tensor([[1.074, -0.836, -0.934, 0.414], [x, x, x, x], ...])
# loc_p.shape: torch.Size([1186, 4]), torch.Size([num of True, 4])
loc_t = loc_t[pos_idx].view(-1, 4)
# loc_t: positive sample(prior)s location, tensor([[1.0743, -0.8366, -0.9314, 0.4146], [x, x, x, x], ...])
# loc_t.shape: torch.Size([1186, 4]), torch.Size([num of True, 4])
''' _t & _p'''
# loc_p: predicted results of offsets between predicted bboxes and prior boxes
# loc_t: target of offsets between ground truth bboxes and prior boxes
# because the prior boxes are fixed, so we need to to use the same indices (pos_idx)
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
# loss_l: just give out a really big number initially. 5031.2363, 4719.4766, 1720.2607, ...
''' now we are dueling with classes '''
# Compute max conf across batch for hard negative mining
batch_conf = conf_data.view(-1, self.num_classes)
# conf_data.shape: torch.Size([64, 21824, 2])
# batch_conf.shape: torch.Size(64x21824=1396736, 2)
# batch_conf:
# tensor([[0.0473, -0.1172], [0.1001, 0.2789], ...])
# conf_t.shape: torch.Size([64, 21824]),
# conf_t: almost all 0
#
# log_sum_exp: log(softmax(batch_conf))
# log_sum_exp - batch_conf(0 (background) or 1 (face) is determined by prior label(conf_t))
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
# log_sum_exp(.).shape = batch_conf.shape = torch.Size([64x21824=1396736, 1])
# print(log_sum_exp(batch_conf).shape)
# print(batch_conf.gather(1, conf_t.view(-1, 1)).shape)
# Hard Negative Mining
loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now
# pos = conf_t > 0 # torch.Size(64, 21824)
# pos: tensor([[False, False, ...], num=64
# [False, False, ...], almost all false
# ...])
loss_c = loss_c.view(num, -1) # torch.Size([64, 21824])
_, loss_idx = loss_c.sort(1, descending=True) # sort the loss for each image in a batch
# loss_idx: tensor([[6264, 4904, ....], [], ...]) torch.Size([64, 21824])
# _: is sorted matrix
_, idx_rank = loss_idx.sort(1)
# sort the loss_idx under ascending order
# idx_rank, torch.Size([64, 21824])
# print(_)
# print(idx_rank)
# print(idx_rank.shape)
num_pos = pos.long().sum(1, keepdim=True)
# num_pos: tensor([[13] ,[10] , ...]) torch.Size([64, 1])
# num_pos: for each image, how many positive samples
# print(num_pos)
# print(num_pos.shape)
num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
# clamp: clamp all elements in input into the range [min, max]
# self.negpos_ratio*num_pos: 7 * each element in num_pos, torch.Size([64, 1])
# tensor([[49], [105], [70], ...])
# num_neg: elements cannot be over the number of priors, which is pos.size(1) - 1 = 21823 here
''' Hard negative mining '''
''' Get samples with num_neg smallest losses '''
neg = idx_rank < num_neg.expand_as(idx_rank)
# eg1.
# indices: 0 1 2 3 4 5 6
# idx: 6 5 4 3 2 1 0 -> loss descending
# rank: 6 5 4 3 2 1 0 -> idx ascending
# num_neg = 2
# so 1 & 0 in rank is picked
# 1 & 0 in rank corresponds to 5 & 6 in indices,
# which also corresponds to the first two (6 & 5) in idx which follows the loss descending order.
# so we pick the samples with smallest losses. Done!
#
# eg2.
# indices: 0 1 2 3 4
# idx: 3 4 0 2 1
# rank: 2 4 3 0 1
# num_neg = 3
# so 2, 0, 1 in rank is picked
# which corresponds to 0 3 4 in indices,
# which corresponds to the first three (3, 4 & 0) in idx wich follows the loss descending order.
# so we pick the samples with smallest losses. Done!
#
# neg.shape: torch.Size([64, 21824])
# neg: tensor([[False, False, ...], num=64
# [False, False, ...], almost all false
# ...])
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
# pos/neg.unsqueeze(2):
# tensor([[[False], [False], ...], [[False], [False], ...], ...])
# shape: torch.Size([64, 21824, 1])
# pos_idx/neg_idx:
# tensor([[[False], [False], ...], [[False], [True], ...], ...])
# shape: torch.Size([64, 21824, 2])
# print(pos_idx.shape)
conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1,self.num_classes) # this is the target
# pos_idx + neg_idx: like "or" operation
# .gt(0): if (pos_idx + neg_idx) > 0, then that element is True, otherwise False.
#
# conf_data[(pos_idx + neg_idx).gt(0)]: grab the loss where there losses are True.
# conf_data[·].shape: torch.Size([18080])
# conf_data[·]: tensor([-2.5210, 1.1987, -2.3412, ...])
targets_weighted = conf_t[(pos+neg).gt(0)] # this is the ground truth
loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
N = max(num_pos.data.sum().float(), 1)
loss_l /= N
loss_c /= N
return loss_l, loss_c
|
# Copyright 2018 Cable Television Laboratories, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from api_http import ApiHttp
from drp_python.exceptions.http_exceptions import AuthorizationError, \
ConnectionError
from drp_python.exceptions.drb_exceptions import NotFoundError
from drp_python.model_layer.machine_model import MachineModel
import logging
logger = logging.getLogger('drp-mach-trans')
class MachineTranslation(ApiHttp):
"""
All HTTP based API Calls related to Machines
"""
def __init__(self, session):
super(MachineTranslation, self).__init__(session)
logger.info('__init__')
def get_machine(self, machine_uuid):
logger.info('get_machine')
drp_obj = self.session.get('machines', machine_uuid)
machine_model = convert_to_client(drp_obj)
return machine_model
def get_machine_by_name(self, machine_name):
logger.debug('get_machine_by_name')
drp_obj = self.session.get('machines?Name=' + machine_name)
if len(drp_obj) > 0:
drp_obj = drp_obj[0]
machine_model = convert_to_client(drp_obj)
return machine_model
else:
raise NotFoundError('Test', 'Test')
def create_machine(self, machine_config_model):
logger.info('create_machine')
drp_object = convert_to_drp(machine_config_model)
drp_object = self.session.post('machines', drp_object)
machine_model = convert_to_client(drp_object)
logger.info('Created ' + machine_model.name)
return machine_model
def update_machine(self, machine_config_model, machine_uuid):
logger.info('update_machine')
drp_object = convert_to_drp(machine_config_model)
drp_object = self.session.put('machines', drp_object, machine_uuid)
machine_model = convert_to_client(drp_object)
logger.info('Updated ' + machine_uuid)
return machine_model
def delete_machine(self, machine_uuid):
logger.info('delete_machine')
result = self.session.delete('machines', machine_uuid)
logger.info('Deleted ' + machine_uuid)
return
def add_machine_params(self, params_config_model, machine_uuid):
logger.info('add params to machine')
value = params_config_model.value
param = params_config_model.name
temp = self.session.post('machines/' + machine_uuid +
'/params/' + param, value)
machine_model = self.get_machine(machine_uuid)
logger.info('Updated ' + machine_uuid)
return machine_model
def convert_to_drp(machine_model):
logger.info('convert_to_drp')
drp_object = {
"Address": machine_model.ip,
"Description": machine_model.type,
"HardwareAddrs": [
machine_model.mac
],
"Name": machine_model.name,
"OS": machine_model.os,
"Runnable": True,
"Workflow": machine_model.workflow
}
logger.info('Converted client to drp')
logger.info(drp_object)
return drp_object
def convert_to_client(drp_object):
logger.info(drp_object)
mac = drp_object.get('HardwareAddrs')
if mac is not None:
mac = mac[0]
machine_model_dict = {
'ip': drp_object.get('Address'),
'mac': mac,
'name': drp_object.get('Name'),
'type': drp_object.get('Description'),
'os': drp_object.get('OS'),
'uuid': drp_object.get('Uuid'),
'workflow': drp_object.get('Workflow'),
'available': drp_object.get('Available'),
'errors': drp_object.get('Errors'),
'read_only': drp_object.get('ReadOnly'),
'validated': drp_object.get('Validated'),
'params': drp_object.get('Params')
}
logger.info('Converted drp to client')
machine_model = MachineModel(**machine_model_dict)
logger.info(machine_model)
return machine_model
def get_all_machines(session):
logger.info('get_all_machines')
try:
result = session.get('machines')
logger.info('Fetched all machines')
return result
except AuthorizationError as error:
logger.error(error)
raise error
except ConnectionError as error:
logger.error(error)
raise error
|
from django.db import models
# Create your models here.
class Airport(models.Model):
code=models.CharField(max_length=3)
city=models.CharField(max_length=64)
def __str__(self):
return f"{self.city} ({self.code})"
class Flight(models.Model):
origin=models.ForeignKey(Airport,on_delete=models.CASCADE,related_name="departures")
destination=models.ForeignKey(Airport,on_delete=models.CASCADE,related_name="arrivals")
duration=models.IntegerField()
def is_valid_flight(self):
return (self.origin!=self.destination and self.duration>=0)
def __str__(self):
return f"{self.id}-{self.origin} to {self.destination}"
class Passengers(models.Model):
first_name=models.CharField(max_length=64)
last_name=models.CharField(max_length=64)
flight=models.ManyToManyField(Flight,blank=True,related_name="passengers")
def __str__(self):
return f"{self.first_name} {self.last_name}"
|
from os import makedirs
from datetime import datetime
import numpy as np
from keras.models import model_from_json
from keras.utils import plot_model
from json import dump
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from matplotlib.colors import Normalize
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
class Save_model:
def __init__(self):
pass
def save_keras(self, dir_name, model=None, history=None, save_model_plot=False):
self.dir_name = 'logs/' + dir_name + datetime.now().strftime('%c')
makedirs(self.dir_name)
if model:
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("{}/model.h5".format(self.dir_name))
print("Saved model to disk")
if history:
self.plot_model_scores(history)
with open('{}/history.json'.format(self.dir_name), 'w') as f:
dump(history.history, f)
if save_model_plot and model:
plot_model(model, to_file='{}/model.png'.format(self.dir_name))
def plot_model_scores(self, history):
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.grid()
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('{}/acc.png'.format(self.dir_name))
plt.figure()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.grid()
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('{}/loss.png'.format(self.dir_name))
plt.figure()
# categ acc
plt.plot(history.history['categorical_accuracy'])
plt.plot(history.history['val_categorical_accuracy'])
plt.grid()
plt.title('categ acc')
plt.ylabel('categ_acc')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('{}/cat_acc.png'.format(self.dir_name))
plt.figure()
# custom metric same as loss at this moment
plt.plot(history.history['custom_metric'])
plt.plot(history.history['val_custom_metric'])
plt.grid()
plt.title('custom metric')
plt.ylabel('custom_metric')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('{}/custom_metric.png'.format(self.dir_name))
plt.figure()
# custom metric same as loss at this moment
plt.plot(history.history['one_class_acc'])
plt.plot(history.history['val_one_class_acc'])
plt.grid()
plt.title('one class acc')
plt.ylabel('one class acc')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.savefig('{}/one_class_acc.png'.format(self.dir_name))
def plot_conf_matrix(self, yt, yp, train_matrix=False): #yt - ytrue, yp - ypredicted
plt.figure()
confm = confusion_matrix(yt.argmax(axis=1), yp.argmax(axis=1))
if train_matrix:
print('train matrix')
print(confm)
plt.plot(confm)
plt.savefig('confm.png')
plt.figure()
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(confm, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('x')
plt.ylabel('y')
plt.colorbar()
#plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
#plt.yticks(np.arange(len(C_range)), C_range)
if train_matrix:
plt.title('train_mtrix')
plt.savefig('train_mtrx.png')
else:
plt.title('valid_mtrix')
plt.savefig('valid_mtrx.png')
#plt.show()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-10-06 01:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('adminapp', '0009_listarestudiante'),
]
operations = [
migrations.RenameModel(
old_name='ListarEstudiante',
new_name='Estudiante',
),
]
|
import requests
# Define API constant parameters
API_URI = 'https://gateway.watsonplatform.net/discovery/api'
API_USR = 'raphael.moraglia.@PPTECH.com'
API_KEY = 'WOMqeAx8wH8S-0CX6Nzn3B_4AC8hyqwEz8KbgPgii4jA'
# First authenticate with user
# res = requests.post(API_URI + '/auth/token', json={'email': API_USR, 'password': API_PWD})
res = requests.get(API_URI, auth=(API_KEY))
# Ensure user is authenticated
if res.status_code != 200:
print('Error, not connected')
exit(1)
|
#-*- coding: utf-8 -*-
import fcntl
"""
一般用来给文件加锁
referrence:
1. https://zhangnq.com/3284.html
2. https://blog.csdn.net/farsight2009/article/details/55517833
3. https://www.pynote.net/archives/1810 ##valuable##
4. https://www.cnblogs.com/ExMan/p/9769337.html ##pid文件与fcntl的关系
1. 复制一个现有的描述符(cmd=F_DUPFD).
2. 获得/设置文件描述符标记(cmd=F_GETFD或F_SETFD).
3. 获得/设置文件状态标记(cmd=F_GETFL或F_SETFL).
4. 获得/设置异步I/O所有权(cmd=F_GETOWN或F_SETOWN).
5. 获得/设置记录锁(cmd=F_GETLK , F_SETLK或F_SETLKW).
"""
fcntl.F_GETFL
print(fcntl.F_GETFL)
import sys
flags = fcntl.fcntl(sys.stderr.fileno(), fcntl.F_GETFL, 0)
print(flags)
# important usage
# set file object non-block
import os
#fcntl.fcntl(0, fcntl.F_SETFL, os.O_NONBLOCK) #fisrt arg is fd or file_obj
import socket
s = socket.socket()
s.connect(("127.0.0.1", 7005))
fd = s.fileno()
print("fd:", fd)
print("F_GETFL:", fcntl.fcntl(fd, fcntl.F_GETFL))
input("send message from server")
print("F_GETFL:", fcntl.fcntl(fd, fcntl.F_GETFL))
print(s.recv(1024))
print("F_GETFL:", fcntl.fcntl(fd, fcntl.F_GETFL))
input("close connection")
print("F_GETFL:", fcntl.fcntl(fd, fcntl.F_GETFL))
s.close()
print("F_GETFL:",fcntl.fcntl(fd, fcntl.F_GETFL))
|
class Solution:
def reverse_aword(self,word):
w=""
for i in range(len(word)-1,-1,-1):
w+=word[i]
return w
def reverseWords(self, s):
reverse_s=""
for word in s.split(" "):
reverse_s+=self.reverse_aword(word)+ " "
return reverse_s.strip()
"""
:type s: str
:rtype: str
"""
|
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static, serve
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from . import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='story_list'),
# url(r'^(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT,}),
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
url(r'^(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + staticfiles_urlpatterns()
|
from waitlist.utility.json.waitlist import make_json_fitting, make_json_character
def make_json_account(acc):
return {'id': acc.id,
'character': make_json_character(acc.current_char_obj),
'username': acc.username,
}
def make_history_json(entries):
return {'history': [make_history_entry_json(entry) for entry in entries]}
def make_history_entry_json(entry):
return {
'historyID': entry.historyID,
'action': entry.action,
'time': entry.time,
'exref': entry.exref,
'fittings': [make_json_fitting(fit) for fit in entry.fittings],
'source': None if entry.source is None else make_json_account(entry.source),
'target': make_json_character(entry.target)
}
|
#Dictionary
'''
* Key & value pair
* (Key, value) is an item
* Addressing unordered manner
* Random access possible
* Duplicate key cannot be possible
* Mutable, value can be modified, item can be removed
Ex:
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank"}
print(MyDict)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank'}
---------------
#Dictionary access using indexing
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
print(MyDict["fname"])
print(MyDict["lname"])
print(MyDict[1])
print(MyDict["age"])
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
jeeva
madhu
rank
29
---------------------------
#Delete Dictionary variable
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
del MyDict
print(MyDict)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
Traceback (most recent call last):
File "C:/Users/HP1/AppData/Local/Programs/Python/Python37/Dictionary.py", line 48, in <module>
print(MyDict)
NameError: name 'MyDict' is not defined
---------------------------
#length of Dictionary variable
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
print(len(MyDict))
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
4
-------------------------
#Append
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
MyDict["Gendre"]="Male"
print(MyDict)
MyDict["fname"]="Balan" # duplicate key append will override the value, duplicate key will not work
print(MyDict)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29, 'Gendre': 'Male'}
{'fname': 'Balan', 'lname': 'madhu', 1: 'rank', 'age': 29, 'Gendre': 'Male'}
--------------------
#Built-in functions
print(dir(dict))
['__class__', '__contains__', '__delattr__', '__delitem__', '__dir__',
'__doc__', '__eq__', '__format__', '__ge__', '__getattribute__',
'__getitem__', '__gt__', '__hash__', '__init__', '__init_subclass__',
'__iter__', '__le__', '__len__', '__lt__', '__ne__', '__new__',
'__reduce__', '__reduce_ex__', '__repr__', '__setattr__',
'__setitem__', '__sizeof__', '__str__', '__subclasshook__', 'clear',
'copy', 'fromkeys', 'get', 'items', 'keys', 'pop', 'popitem',
'setdefault', 'update', 'values']
----------------------------------
#Clear
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
MyDict.clear()
print(MyDict)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
{}
-----------------------------------
#Copy
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
X=MyDict.copy()
print(X)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
-----------------------------------
#get
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
X=MyDict.get("lname")
print(X)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
madhu
-----------------------------------
#Items
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
X=MyDict.items()
print(X)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
dict_items([('fname', 'jeeva'), ('lname', 'madhu'), (1, 'rank'), ('age', 29)])
-----------------------------------
#keys
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
X=MyDict.keys()
print(X)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
dict_keys(['fname', 'lname', 1, 'age'])
-----------------------------------
#values
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
X=MyDict.values()
print(X)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
dict_values(['jeeva', 'madhu', 'rank', 29])
-----------------------------------
#pop
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
MyDict.pop("lname") #must give key values since the index is unorder
print(MyDict)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
{'fname': 'jeeva', 1: 'rank', 'age': 29}
-----------------------------------
#popitem
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
MyDict.popitem() #remove random item
print(MyDict)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank'}
-----------------------------------
#SetDefault
MyDict={"fname":"jeeva", 'lname':'madhu',1:"rank", "age":29}
print(MyDict)
MyDict.setdefault("fname","Balan")
print(MyDict)
output:
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
{'fname': 'jeeva', 'lname': 'madhu', 1: 'rank', 'age': 29}
--------------------------------
'''
#Homework
#Fromkey
keys=['fname','lname', "age", "rank", "age"]
MyDict={}
MyDict=MyDict.fromkeys(keys)
print(MyDict)
values= ['jeeva', 'madhu', 29, 3]
i=0
for key in MyDict:
MyDict[key]=values[i]
i+=1
print(MyDict)
|
# Basic operations on a dictionary
user_info = {
'Maninder Singh': 'singhmaninder1001@gmail.com',
'Sam': 'sam@gmail.com',
'Larry': 'larry@gmail.com'
}
print('Maninder\'s email address is {}.'.format(user_info['Maninder Singh']))
# Deleting a key value pair
del user_info['Larry']
print('\nThere are {} contacts in address book now.'.format(len(user_info)))
for name, email in user_info.items():
print('Name:', name)
print('Email:', email)
# Adding a new key in dictionary
user_info['John'] = 'john@gmail.com'
if 'John' in user_info:
print("\nJohn's email address is {}.".format(user_info['John']))
|
# acsii code
'''
IAC = 255.chr # "\377" # "\xff" # interpret as command
DONT = 254.chr # "\376" # "\xfe" # you are not to use option
DO = 253.chr # "\375" # "\xfd" # please, you use option
WONT = 252.chr # "\374" # "\xfc" # I won't use option
WILL = 251.chr # "\373" # "\xfb" # I will use option
SB = 250.chr # "\372" # "\xfa" # interpret as subnegotiation
GA = 249.chr # "\371" # "\xf9" # you may reverse the line
EL = 248.chr # "\370" # "\xf8" # erase the current line
EC = 247.chr # "\367" # "\xf7" # erase the current character
AYT = 246.chr # "\366" # "\xf6" # are you there
AO = 245.chr # "\365" # "\xf5" # abort output--but let prog finish
IP = 244.chr # "\364" # "\xf4" # interrupt process--permanently
BREAK = 243.chr # "\363" # "\xf3" # break
DM = 242.chr # "\362" # "\xf2" # data mark--for connect. cleaning
NOP = 241.chr # "\361" # "\xf1" # nop
SE = 240.chr # "\360" # "\xf0" # end sub negotiation
EOR = 239.chr # "\357" # "\xef" # end of record (transparent mode)
ABORT = 238.chr # "\356" # "\xee" # Abort process
SUSP = 237.chr # "\355" # "\xed" # Suspend process
EOF = 236.chr # "\354" # "\xec" # End of file
SYNCH = 242.chr # "\362" # "\xf2" # for telfunc calls
OPT_BINARY = 0.chr # "\000" # "\x00" # Binary Transmission
OPT_ECHO = 1.chr # "\001" # "\x01" # Echo
OPT_RCP = 2.chr # "\002" # "\x02" # Reconnection
OPT_SGA = 3.chr # "\003" # "\x03" # Suppress Go Ahead
OPT_NAMS = 4.chr # "\004" # "\x04" # Approx Message Size Negotiation
OPT_STATUS = 5.chr # "\005" # "\x05" # Status
OPT_TM = 6.chr # "\006" # "\x06" # Timing Mark
OPT_RCTE = 7.chr # "\a" # "\x07" # Remote Controlled Trans and Echo
OPT_NAOL = 8.chr # "\010" # "\x08" # Output Line Width
OPT_NAOP = 9.chr # "\t" # "\x09" # Output Page Size
OPT_NAOCRD = 10.chr # "\n" # "\x0a" # Output Carriage-Return Disposition
OPT_NAOHTS = 11.chr # "\v" # "\x0b" # Output Horizontal Tab Stops
OPT_NAOHTD = 12.chr # "\f" # "\x0c" # Output Horizontal Tab Disposition
OPT_NAOFFD = 13.chr # "\r" # "\x0d" # Output Formfeed Disposition
OPT_NAOVTS = 14.chr # "\016" # "\x0e" # Output Vertical Tabstops
OPT_NAOVTD = 15.chr # "\017" # "\x0f" # Output Vertical Tab Disposition
OPT_NAOLFD = 16.chr # "\020" # "\x10" # Output Linefeed Disposition
OPT_XASCII = 17.chr # "\021" # "\x11" # Extended ASCII
OPT_LOGOUT = 18.chr # "\022" # "\x12" # Logout
OPT_BM = 19.chr # "\023" # "\x13" # Byte Macro
OPT_DET = 20.chr # "\024" # "\x14" # Data Entry Terminal
OPT_SUPDUP = 21.chr # "\025" # "\x15" # SUPDUP
OPT_SUPDUPOUTPUT = 22.chr # "\026" # "\x16" # SUPDUP Output
OPT_SNDLOC = 23.chr # "\027" # "\x17" # Send Location
OPT_TTYPE = 24.chr # "\030" # "\x18" # Terminal Type
OPT_EOR = 25.chr # "\031" # "\x19" # End of Record
OPT_TUID = 26.chr # "\032" # "\x1a" # TACACS User Identification
OPT_OUTMRK = 27.chr # "\e" # "\x1b" # Output Marking
OPT_TTYLOC = 28.chr # "\034" # "\x1c" # Terminal Location Number
OPT_3270REGIME = 29.chr # "\035" # "\x1d" # Telnet 3270 Regime
OPT_X3PAD = 30.chr # "\036" # "\x1e" # X.3 PAD
OPT_NAWS = 31.chr # "\037" # "\x1f" # Negotiate About Window Size
OPT_TSPEED = 32.chr # " " # "\x20" # Terminal Speed
OPT_LFLOW = 33.chr # "!" # "\x21" # Remote Flow Control
OPT_LINEMODE = 34.chr # "\"" # "\x22" # Linemode
OPT_XDISPLOC = 35.chr # "#" # "\x23" # X Display Location
OPT_OLD_ENVIRON = 36.chr # "$" # "\x24" # Environment Option
OPT_AUTHENTICATION = 37.chr # "%" # "\x25" # Authentication Option
OPT_ENCRYPT = 38.chr # "&" # "\x26" # Encryption Option
OPT_NEW_ENVIRON = 39.chr # "'" # "\x27" # New Environment Option
OPT_EXOPL = 255.chr # "\377" # "\xff" # Extended-Options-List
'''
from telnetlib import IAC, DO, DONT, WILL, WONT, SB, SE, TTYPE, NAOFFD, ECHO
import telnetlib, socket, time, threading
class KawaBot:
"""
Python interface to Kawasaki Robotics.
inspired by:
http://code.activestate.com/recipes/52228-remote-control-with-telnetlib/
"""
def __init__(self, host="192.168.0.1", port=10000):
# If connecting to K-roset D+ controller or earlier controller connect through port: 9105
# If connecting to K-roset E controller or later controller connect through one of ports: 10000, 10300
# NOTE: Simulated D+ controller or earlier does NOT support opening additional tcp-ip ports over localhost
# Movement server will therfor NOT work on D+
#self.logger = logging.getLogger("kawa")
self.host = host
self.port = port
self.env_term = "VT100"
self.user = "as" # khidl also possible, effect unknown. different permissions?
self.telnet = telnetlib.Telnet()
self.connect()
def telnet_process_options(self, socket, cmd, opt):
IS = b'\00'
# Inspired by
# https://github.com/rapid7/metasploit-framework/blob/master/lib/msf/core/exploit/telnet.rb
# https://github.com/jquast/x84/blob/cf3dff9be7280f424f6bcb0ea2fe13d16e7a5d97/x84/default/telnet.py
if cmd == WILL and opt == ECHO: #hex:ff fb 01 name:IAC WILL ECHO description:(I will echo)
socket.sendall(IAC + DO + opt) #hex(ff fd 01), name(IAC DO ECHO), descr(please use echo)
elif cmd == DO and opt == TTYPE: #hex(ff fd 18), name(IAC DO TTYPE), descr(please send environment type)
socket.sendall(IAC + WILL + TTYPE) #hex(ff fb 18), name(IAC WILL TTYPE), descr(Dont worry, i'll send environment type)
elif cmd == SB:
socket.sendall(IAC + SB + TTYPE + IS + self.env_term.encode() + IS + IAC + SE)
# hex(ff fa 18 00 b"VT100" 00 ff f0) name(IAC SB TTYPE iS VT100 IS IAC SE) descr(Start subnegotiation, environment type is VT100, end negotation)
elif cmd == SE: # server letting us know sub negotiation has ended
pass # do nothing
else: print("Unexpected telnet negotiation")
def connect(self):
print("Connecting to kawasaki robot")
self.telnet.set_option_negotiation_callback(self.telnet_process_options)
self.telnet.open(self.host, self.port, 1)
time.sleep(0.5) #Allow TELNET negotaion to finish
self.telnet.read_until(b"n: ")
self.telnet.write(self.user.encode() + b"\r\n")
self.telnet.read_until(b">")
print("Connected succesfully\n")
def disconnect(self):
print("Disconnecting")
command = b"signal(-2010)\r\n"
self.telnet.write(command)
time.sleep(1)
print(self.telnet.read_until(b">").decode())
self.telnet.close()
def AS_command(self, command=None):
if command == None:
print("No command specified, check kawa documentation")
return
command = command.encode + b"\r\n"
self.telnet.write(command)
self.telnet.read_until(b">")
def load_as_file(self, file_location=None):
max_chars = 492 # Max amount of characters that can be accepted per write to kawa.
if file_location != None:
print("Transfering {} to kawasaki".format(file_location))
inputfile = open(file_location, 'r')
file_text = inputfile.read() # Store Kawa-as code from file in local varianle
text_split = [file_text[i:i+max_chars] for i in range(0, len(file_text), max_chars)] #Split AS code in sendable blocks
print("File consists of {} characters".format(len(file_text)))
self.telnet.write(b"load kawa.as\r\n")
self.telnet.read_until(b'.as').decode("ascii")
self.telnet.write(b'\x02A 0\x17')
self.telnet.read_until(b"\x17")
print("Sending file.... ")
for i in range(0, len(text_split), 1):
self.telnet.write(b'\x02C 0' + text_split[i].encode() + b'\x17')
self.telnet.read_until(b"\x17")
self.telnet.write(b'\x02' + b'C 0' + b'\x1a\x17')
#Finish transfering .as file and start confirmation
self.telnet.read_until(b"Confirm !")
self.telnet.write(b'\r\n')
self.telnet.read_until(b"E\x17")
self.telnet.write(b'\x02' + b'E 0' + b'\x17')
#Read until command prompt and continue
self.telnet.read_until(b">")
print(".... Done, great success!\n")
else: print("No file specified\n")
#Lastknown check, was built and sent to robot
def abort_kill_all(self):
for command in ["pcabort "]:
for i in range(1, 6):
prog_number = str(i) + ":" #should include socket connection ending
self.telnet.write(command.encode() + prog_number.encode() + b"\r\n")
self.telnet.read_until(b">")
for command in ["abort ", "pckill\r\n1", "kill\r\n1"]:
self.telnet.write(command.encode() + b"\r\n")
self.telnet.read_until(b">")
#wont work, wrong command syntax
def payload_weight(self, kg=0, centre_of_mass=(0,0,0)):
command = ("weight " + str(kg) + ", " +
str(centre_of_mass[0]) + ", " +
str(centre_of_mass[1]) + ", " +
str(centre_of_mass[2]) + "\r\n").encode()
self.telnet.write(command)
print(self.telnet.read_until(b">"))
def delete_eveything_in_robot_memory(self):
command = b"sysint\r\n" #was sysinit=initalize system sysint=Clears the memory and initializes the system parameters
self.telnet.write(command)
self.telnet.read_until(b"(Yes:1, No:0)")
command = b"1\r\n"
self.telnet.write(command)
self.telnet.read_until(b">")
def motor_power_on(self):
command = b"External motor Power on\r\n"#pg 13ASGuide
self.telnet.write(command)
self.telnet.read_until(b">")
def motor_power_off(self):
command = b"External motor Power off\r\n"
self.telnet.write(command)
self.telnet.read_until(b">")
def get_status(self):
command = b"status\r\n"
self.telnet.write(command)
print(self.telnet.read_until(b">").decode("ascii"))
def get_kawa_position(self):
command = b"where\r\n"
self.telnet.write(command)
print(self.telnet.read_until(b">").decode("ascii"))
def get_kawa_error(self):
command = b"errorlog\r\n"#pg 244ASGUIDE
self.telnet.write(command)
print(self.telnet.read_until(b">").decode("ascii"))
def reset_error(self):
command = b"ereset\r\n"
self.telnet.write(command)
print(self.telnet.read_until(b">").decode("ascii"))
#cant find command ID
def get_kawa_id(self):
command = b"ID\r\n"
self.telnet.write(command)
print(self.telnet.read_until(b">").decode("ascii"))
def initiate_kawabot(self):
print("Initiating kawabot")
command = b"pcexecute init_kawa\r\n"#CALLS TO .AS!!! PCEXECUTE [program, cycle, step]
self.telnet.write(command)
print(self.telnet.read_until(b"completed").decode("ascii"))
command = b"\r\n"
self.telnet.write(command)
#time.sleep(0.1)
print(self.telnet.read_until(b">").decode("ascii"))
def connect_to_movement_server(self, movement_server_port=9811):
print("Connecting to movement server")
command = b"pcexecute 1: recv_tcp_serv\r\n"#CALLS TO .AS
self.telnet.write(command)
self.telnet.read_until(b">").decode("ascii")
self.movement_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.movement_server.connect((self.host, movement_server_port))
self.movement_seq_num = 0
print("Activating movement server")
command = b"pcexecute 2: read_tcp_buffer\r\n"
self.telnet.write(command)
self.telnet.read_until(b">").decode("ascii")
print("Start moving robot")
command = b"execute move_kawa\r\n"#CALLS TO .AS
self.telnet.write(command)
self.telnet.read_until(b">").decode("ascii")
print("Connected to and activated movement server succesfully!\n")
def jmove(self, joint_0, joint_1, joint_2, joint_3, joint_4, joint_5, speed=100, accuracy=10, acceleration=100, deceleration=100, break_cp=0):
# EXAMPLE joint_trajectory_point = "\x02|\x01|45|11|00|10|400|100|100|0|-40|20|-50|0|10|100|\x03"
msg_start = "\x02|\x01|"
msg_seq_num = "|11|" + str(self.movement_seq_num).zfill(2)
msg_end = msg_seq_num + "|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|\x03".format(
speed, accuracy, acceleration, deceleration, break_cp,
joint_0, joint_1, joint_2, joint_3, joint_4, joint_5)
joint_trajectory_point = msg_start + str(len(msg_end)) + msg_end
try:
self.movement_server.send(joint_trajectory_point.encode())
if self.movement_seq_num == 99:
self.movement_seq_num = 0
else:
self.movement_seq_num += 1
except socket.error:
print("No chooch")
#Define each joint seperately? DRIVE jointnum,angle[speed]
def close_movement_server(self):
self.movement_server.close()
#close socket?
def close_pose_update_server(self):
self.pose_server.close()
#close socket?
def connect_to_pose_update_server(self, pose_update_server_port=23):
print("Connecting to pose update server")
command = b"pcexecute 4: send_pos_serv\r\n"
self.current_pose = 0
self.telnet.write(command)
self.telnet.read_until(b">").decode("ascii")
self.pose_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.pose_server.connect((self.host, pose_update_server_port))
self.pose_server.settimeout(1)
print("Connected to pose update server succesfully!\n")
def get_current_pose(self):
return self.pose_server.recv(1024)
#################################################
############### Testing functions ###############
#################################################
def connect_to_tcp_test(self):
print("Connecting to tcp server")
command = b"pcexe 1: recv_traj_serv\r\n"
self.telnet.write(command)
self.telnet.read_until(b">").decode("ascii")
command = b"pcexe 2: read_per_char\r\n"
self.telnet.write(command)
self.telnet.read_until(b">").decode("ascii")
movement_server_port = 23
self.tcp_test_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_test_server.connect((self.host, movement_server_port))
print("Connected succesfully!\n")
def send_tcp_test_message(self, message="hello!"):
try:
print(message)
self.tcp_test_server.send(message.encode())
except socket.error:
print("No chooch")
def close_to_tcp_test(self):
command = b"signal(-2010)\r\n"
self.telnet.write(command)
time.sleep(1)
print(self.telnet.read_until(b">").decode())
self.tcp_test_server.close() |
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
# Create your models here.
class Member(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
city = models.CharField(max_length=100)
user_name = models.CharField(max_length=200)
def __str__(self):
return self.user_name
|
from .models import *
from .users import *
from .admin_config import *
|
# -*- coding: utf-8 -*-
from . import crear_faltas_from_retardos
#from . import import_loan
from . import import_logs
from . import wizard_reglas_salariales
from . import calculo_isr_anual
from . import importar_dias_wizard |
import Matrix
import os
from time import sleep
class Matrix:
#Propiedades (Herramientas de trabajo)
scanr = 'nmap'
tracer = 'traceroute'
ider = 'whois'
ping = 'ping'
diger = 'dig'
# Metodo para hacer pings
def pingeame(self, host):
pinger = Matrix.ping
verificacion_pinger = os.popen(pinger + ' ' +'-c 1 ' + ' ' + host, 'r')
i = 0
for line in verificacion_pinger:
i += 1
if i == 5:
print line
print ' '
sleep(1)
#Metodo para hacer una traza a un host
def traceme(self, host):
tracer = Matrix.tracer
verificacion_traceme = os.popen(tracer + ' ' + host, 'r')
print verificacion_traceme.read(), "\r\n"
sleep(1)
#Metodo para ver un puerto activo con Nmap
def scanme(self, host, port):
scanr = Matrix.scanr
verificacion_scan = os.popen(scanr + ' ' + host + ' -p' + ' ' + str(port))
#Ciclo FOR para leer la linea 5 que contiene el dato del scan
i = 0
for line in verificacion_scan:
i += 1
if i == 5:
print line, "\r\n"
sleep(1)
#Metodo para revisar informacion Whois
def whom(self, host):
idme = Matrix.ider
print ''
d = os.popen(idme + ' ' + host + ' | ' + ' tail -n25 ', 'r')
print ' '
print d.read()
print ' '
sleep(1)
#Metodo para revisar informacion de records DNS
def dig(self, host):
diger = Matrix.diger
record = str(raw_input('Que record deseas: '))
print ''
g = os.popen(diger + ' ' + record.upper() + ' ' + host)
print g.read() |
import jwt
from flask import Flask, render_template_string, request, render_template, redirect, make_response
from jinja2 import Template
import sqlite3
import time
app = Flask(__name__)
conn = sqlite3.connect("challenge.db")
cc = conn.cursor()
cc.execute("""CREATE TABLE IF NOT EXISTS usuarios (
usuario text PRIMARY KEY,
password text);""")
@app.route('/', methods=["GET"])
def index():
return render_template('template1.html')
@app.route('/registro', methods=["POST"])
def registro():
user = request.form['username']
password = request.form['password']
connt = sqlite3.connect('challenge.db')
try:
c = connt.cursor()
c.execute("INSERT INTO usuarios(usuario, password) VALUES (?, ?)", (user, password))
connt.commit()
token = {
'usuario': user,
'timestamp': time.time()
}
encoded_jwt = jwt.encode(token, 'Un14nD3S', algorithm='HS256')
resp = make_response(redirect('/perfil', code=302))
resp.set_cookie('at', encoded_jwt)
return resp
except:
return render_template('template3.html', usuario=user, password=password)
@app.route('/perfil', methods=["GET"])
def perfil():
token = request.cookies.get('at')
decoded_jwt = jwt.decode(token, verify=False)
user = decoded_jwt['usuario']
connt = sqlite3.connect('challenge.db')
c = connt.cursor()
retorno = ''
c.execute("SELECT * FROM usuarios WHERE usuario=?", (str(user),))
rows = c.fetchall()
user = ''
password = ''
for x in rows:
retorno = retorno + str(x[0])+', ' + str(x[1]) + "\n"
user = str(x[0])
password = str(x[1])
if(user=='admin'):
password = 'DISCCTF{JWT_UnI4nD3s_l4bs1s}'
return render_template('template2.html', usuario=user, password=password)
if __name__ == '__main__':
app.run(debug=True) |
from future import standard_library
standard_library.install_aliases()
from builtins import object
import re
import urllib.request, urllib.parse, urllib.error
import os, os.path
from datetime import datetime
from django.conf import settings
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.functional import curry
from django.core.files.images import ImageFile
from django.core.files.storage import get_storage_class
from django.contrib.staticfiles import finders
from sorl.thumbnail import get_thumbnail
from .conf import *
class FilePath(str):
def __new__(cls, str, instance=None, field=None, settings={}):
self = super(FilePath, cls).__new__(cls, str.strip())
self._instance = instance
self._field = field
self._exists = None
self._size = None
self._accessed_time = None
self._created_time = None
self._modified_time = None
self._thumbnails = {}
self.settings = {
'img_attrs': {},
'thumbnail_size': None,
'thumbnail_attrs': {},
}
self.settings.update(settings)
return self
def _html_attrs(self, **kwargs):
attrs = {}
attrs.update(kwargs)
if 'css_class' in attrs:
attrs['class'] = attrs['css_class']
del attrs['css_class']
return attrs
@property
def unescaped(self):
return urllib.parse.unquote(self)
@property
def escaped(self):
return urllib.parse.quote(self.unescaped)
@property
def url(self):
if not self.startswith('/') and self.find('//') == -1:
return os.path.join(MEDIA_URL, self.escaped)
return self.escaped
@property
def local_path(self):
if not self.startswith('/') and self.find('//') == -1:
return os.path.join(MEDIA_ROOT, urllib.parse.unquote(self))
return self
def _get_local_path_or_file(self):
# if file is in static instead of media directory, sorl raises
# a suspicious operation error. So we open it safely without errors.
if self.startswith('/'):
if self.startswith('/static/'):
path = self.replace('/static/', '')
elif self.startswith(settings.STATIC_URL):
path = self.replace(settings.STATIC_URL, '')
else:
return self.local_path
else:
return self.local_path
path = finders.find(urllib.parse.unquote(path))
image = ImageFile(open(path, 'r'))
return image
@property
def filename(self):
return urllib.parse.unquote(re.sub(r'^.+\/', '', self))
@property
def display_name(self):
without_extension = re.sub(r'\.[\w\d]+$', '', self.filename)
with_spaces = re.sub(r'_', ' ', without_extension)
return with_spaces
@property
def ext(self):
return re.sub(r'^.+\.', '', self.filename)
def exists(self):
if self._exists == None:
self._exists = os.path.exists(self.local_path)
return self._exists
def get_size(self):
if self._size == None:
self._size = os.path.getsize(self.local_path)
return self._size
def get_accessed_time(self):
if self._accessed_time == None:
self._accessed_time = datetime.fromtimestamp(os.path.getatime(self.local_path))
return self._accessed_time
def get_created_time(self):
if self._created_time == None:
self._created_time = datetime.fromtimestamp(os.path.getctime(self.local_path))
return self._created_time
def get_modified_time(self):
if self._modified_time == None:
self._modified_time = datetime.fromtimestamp(os.path.getmtime(self.local_path))
return self._modified_time
class ImagePath(FilePath):
def img_tag(self, **kwargs):
attrs = {}
attrs.update(self.settings['img_attrs'])
attrs.update(kwargs)
attrs = self._html_attrs(**attrs)
attrs_str = ''.join([
u'%s="%s" ' % (key, value)
for key, value in attrs.items()
])
return mark_safe(u'<img src="%s" %s/>' % (self.url, attrs_str))
def _thumbnail_file_format(self):
if self.ext.lower() in ['gif', 'png']:
return 'PNG'
return 'JPEG'
def thumbnail(self, size=None, **kwargs):
size = size or self.settings['thumbnail_size']
if not size:
raise Exception('No thumbnail size supplied')
attrs = {
'format': self._thumbnail_file_format(),
'upscale': False,
}
attrs.update(self.settings['thumbnail_attrs'])
attrs.update(kwargs)
all_attrs = { 'size': size }
all_attrs.update(attrs)
key = hash(frozenset(all_attrs))
if not key in self._thumbnails:
#self._thumbnails[key] = get_thumbnail(self._get_local_path_or_file(), size, **attrs)
self._thumbnails[key] = get_thumbnail(self.local_path, size, **attrs)
return self._thumbnails[key]
def thumbnail_tag(self, size, opts={}, **kwargs):
try:
thumbnail = self.thumbnail(size, **opts)
except EnvironmentError as e:
if settings.THUMBNAIL_DEBUG:
raise e
return ''
src = ImagePath(thumbnail.url, self._instance, self._field)
attrs = { 'width': thumbnail.width, 'height': thumbnail.height }
attrs.update(self.settings['img_attrs'])
attrs.update(kwargs)
return src.img_tag(**attrs)
def __getattr__(self, attr):
thumbnail_mxn = re.match(r'^thumbnail_(tag_)?(\d*x?\d+)$', attr)
if thumbnail_mxn:
tag = thumbnail_mxn.group(1) == 'tag_'
size = thumbnail_mxn.group(2)
if tag:
return curry(self.thumbnail_tag, size)
else:
return curry(self.thumbnail, size)
raise AttributeError
class FilePaths(str):
item_class = FilePath
def __new__(cls, str, instance=None, field=None, settings={}):
self = super(FilePaths, cls).__new__(cls, str)
self._instance = instance
self._field = field
self._all = None
self._length = None
self._current = 0
self.settings = {
'img_attrs': {},
'thumbnail_size': None,
'thumbnail_attrs': {},
}
self.settings.update(settings)
return self
def all(self):
if self._all == None:
self._all = []
for f in self.splitlines():
self._all.append(self._field.attr_class.item_class(f, self._instance, self._field, self.settings))
self._length = len(self._all)
return self._all
def count(self):
self.all()
return self._length
def first(self):
return self.all() and self.all()[0] or None
def last(self):
return self.all() and self.all()[-1] or None
def next(self):
f = self.all()[self._current]
self._current += 1
return f
def next_n(self, n):
files = self.all()[self._current:self._current+n]
self._current += n
return files
def next_all(self):
files = self.all()[self._current:]
self._current = self._length - 1
return files
def has_next(self):
self.all()
return max(0, self._length - self._current - 1)
def reset(self):
self._current = 0
def __getattr__(self, attr):
next_n = re.match(r'^next_(\d+)$', attr)
if next_n:
n = int(next_n.group(1))
return curry(self.next_n, n)
raise AttributeError
class ImagePaths(FilePaths):
item_class = ImagePath
def as_gallery(self):
raise NotImplementedError
def as_carousel(self):
raise NotImplementedError
class FilesDescriptor(object):
"""
Used django.db.models.fields.files.FileDescriptor as an example.
This descriptor returns an unicode object, with special methods
for formatting like filename(), absolute(), relative() and img_tag().
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
files = instance.__dict__[self.field.name]
if isinstance(files, six.string_types) and not isinstance(files, (FilePath, FilePaths)):
attr = self.field.attr_class(files, instance, self.field)
instance.__dict__[self.field.name] = attr
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given an array of characters, compress it in-place.
#
# The length after compression must always be smaller than or equal to the original array.
#
# Every element of the array should be a character (not int) of length 1.
#
# After you are done modifying the input array in-place, return the new length of the array.
#
# Example 1:
# Input:
# ["a","a","b","b","c","c","c"]
#
# Output:
# Return 6, and the first 6 characters of the input array should be: ["a","2","b","2","c","3"]
#
# Explanation:
# "aa" is replaced by "a2". "bb" is replaced by "b2". "ccc" is replaced by "c3".
# Example 2:
# Input:
# ["a"]
#
# Output:
# Return 1, and the first 1 characters of the input array should be: ["a"]
#
# Explanation:
# Nothing is replaced.
# Example 3:
# Input:
# ["a","b","b","b","b","b","b","b","b","b","b","b","b"]
#
# Output:
# Return 4, and the first 4 characters of the input array should be: ["a","b","1","2"].
#
# Explanation:
# Since the character "a" does not repeat, it is not compressed. "bbbbbbbbbbbb" is replaced by "b12".
# Notice each digit has it's own entry in the array.
# Regular Expression.
# 70 / 70 test cases passed.
# Status: Accepted
# Runtime: 116 ms
import re
class Solution(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
if not chars:
return 0
dummy_string = ''.join(chars)
re_c = re.compile(r'(?<=(.))(\1+)')
chars[:] = re_c.sub(lambda m: str(1 + len(m.group())), dummy_string)
return len(chars)
# 70 / 70 test cases passed.
# Status: Accepted
# Runtime: 79 ms
class Solution(object):
def compress(self, chars):
"""
:type chars: List[str]
:rtype: int
"""
if not chars:
return 0
lo, hi = 0, len(chars)
same_element_length = 0
while lo < hi:
if lo + same_element_length < hi and chars[lo] == chars[lo + same_element_length]:
same_element_length += 1
else:
if same_element_length > 1:
dummy_list = []
length = same_element_length
while same_element_length:
dummy_list.insert(0, str(same_element_length % 10))
same_element_length //= 10
chars[lo: lo + length] = [str(chars[lo])] + dummy_list
lo += len(dummy_list)
hi -= length - len(dummy_list) - 1
lo += 1
same_element_length = 0
return len(chars)
# Functional Solution.
# 70 / 70 test cases passed.
# Status: Accepted
# Runtime: 99 ms
class Solution(object):
def compress(self, chars):
"""
:param chars:
:return:
"""
if not chars:
return 0
flips = [(chars[0], 0)] + [(chars[i], i) for i in range(1, len(chars)) if chars[i] != chars[i - 1]]\
+ [(None, len(chars))]
chunks = [(b[0],a[1]-b[1]) for a, b in zip(flips[1:], flips)]
from functools import reduce
compressed = reduce(lambda x, y: x + [y[0]] + (list(str(y[1])) if (y[1] > 1) else []), chunks, [])
chars[:len(compressed)] = compressed
return len(compressed)
if __name__ == '__main__':
print(Solution().compress(["a","b","b","b","b","b","b","b","b","b","b","b","b"]))
print(Solution().compress(["a","a","a","b","b","a","a", "a"]))
|
#!/usr/bin/python3.6 -u
#Main script for the Very Independent VEGAS Analysis (VIVA)
import sys
from plutils import *
def section():
print('-'*25)
section()
print('VIVA starting-up!')
print('It\'s a great day for SCIENCE!')
print('Believe me, we will have the best SCIENCE!')
section()
print('Attempting to read in the instructions file...')
inst_filename = sys.argv[1]
try:
f = open(inst_filename)
except OSError:
print("Instructions file ", inst_filename, " could not be opened.")
raise
else:
f.close()
print('Reading instructions from {0}'.format(inst_filename))
section()
print('Building configuration dictionary...')
read_inst = instreader.InstFileReader(inst_filename)
configdict = read_inst.get_config_dict()
print ('Configuration Dictionary:\n', configdict)
section()
print('Establishing DB connection...')
dbcnx=database.DBConnection(configdict=configdict)
print('host = ', dbcnx.host)
print('db = ', dbcnx.db)
print('user = ', dbcnx.user)
section()
print('Initializing the run manager...')
rgm = runmanager.RunGroupManager(configdict,dbcnx)
section()
print('Initalizing the analysis core...')
ac = analysis.AnalysisCore(configdict=configdict, runmanager=rgm)
print('Status = {0}'.format(ac.get_status()))
section()
print('Executing the analaysis...')
try:
ac.execute()
except:
raise
finally:
section()
print('+++++ Final Status +++++')
ac.print_status()
section()
print('Cleaning up...')
ac.clean_up()
section()
print('All done here!')
print('Even if I reported no errors, it is still reccomended to inspect the')
print('.error, .out, and .log files in the output directories.')
|
import tweepy
import re
import os
import sys
from configparser import ConfigParser
# Consumer keys and access tokens, used for OAuth
pathname = os.path.dirname(sys.argv[0])
config = ConfigParser()
config.read( pathname + '/../config.ini')
consumer_key = config['twitter']['consumer_key']
consumer_secret = config['twitter']['consumer_secret']
access_token = config['twitter']['access_token']
access_token_secret = config['twitter']['access_token_secret']
print("\nconsumer_key: " + consumer_key)
print("consumer_secret: " + consumer_secret)
print("access_token: " + access_token)
print("access_token_secret: " + access_token_secret + "\n")
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Creation of the actual interface, using authentication
api = tweepy.API(auth)
def downloadTweets(profiles,result_folder):
#Download the tweets and save into files
for index, _item in enumerate(profiles):
screen_name = "@%s" % profiles[index]
print("Start downloading: %s" % screen_name)
#Warning: if you put a number inside the items you can limit the max tweets download
try:
for status in tweepy.Cursor(api.user_timeline, screen_name=screen_name).items():
filename = os.path.join(result_folder, "raw_%s.txt" % profiles[index])
with open(filename, 'a', encoding='utf8') as text_file:
text_file.write(status.text + "\n")
except Exception as e:
print("Error downloading tweets: %s" % e)
print("Tweet for %s downloaded!" % profiles[index]) |
'''
Alessia Pizzoccheri
Wholesale
Test Case #1
3 books: wholesale $44.91, shipping $4.50, total cost $49.41
Test Case #2
12 books: wholesale $179.64, shipping $11.25, total cost $190.89
Test Case #3
257 book: wholesale $3847.29, shipping $195.00, total cost $4042.29
'''
def main():
# variables needed to calculate final cost
price = 24.95
discount = .40
shipping = 3
additional_shipping = .75
# total number of books
books = int(input('How books are you ordering? '))
wholesale = (price * books) * (1 - discount)
# calculate shipping
shipping_cost = shipping + ((books - 1) * .75)
total_cost = wholesale + shipping_cost
print('The wholesale cost is ${:.2f}'.format(wholesale))
print('The shipping cost is ${:.2f}'.format(shipping_cost))
print('The total cost is ${:.2f}'.format(total_cost))
main()
|
def spread(func, args):
return func(*args)
'''
You must create a function, spread, that takes a function and a list of
arguments to be applied to that function. You must make this function
return the result of calling the given function/lambda with the given arguments.
eg:
spread(someFunction, [1, true, "Foo", "bar"] )
# is the same as...
someFunction(1, true, "Foo", "bar")
'''
|
from django.contrib import admin
# Register your models here.
from .models import Members
class MembersAdmin(admin.ModelAdmin):
admin.site.register(Members)
#, MembersAdmin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.