text
stringlengths 8
6.05M
|
|---|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logged Replay Buffer."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import pickle
from dopamine.replay_memory import circular_replay_buffer
import gin
import numpy as np
import tensorflow.compat.v1 as tf
STORE_FILENAME_PREFIX = circular_replay_buffer.STORE_FILENAME_PREFIX
class OutOfGraphLoggedReplayBuffer(
circular_replay_buffer.OutOfGraphReplayBuffer):
"""Logs the replay buffer to disk everytime it's full."""
def __init__(self, log_dir, *args, **kwargs):
super(OutOfGraphLoggedReplayBuffer, self).__init__(*args, **kwargs)
self._log_count = 0
self._log_dir = log_dir
tf.gfile.MakeDirs(self._log_dir)
def add(self, observation, action, reward, terminal, *args):
super(OutOfGraphLoggedReplayBuffer, self).add(
observation, action, reward, terminal, *args)
# Log the replay buffer to a file in self._log_dir if the replay buffer
# is full.
cur_size = self.add_count % self._replay_capacity
if cur_size == self._replay_capacity - 1:
self._log_buffer()
self._log_count += 1
def load(self, checkpoint_dir, suffix):
super(OutOfGraphLoggedReplayBuffer, self).load(checkpoint_dir, suffix)
self._log_count = self.add_count // self._replay_capacity
def _log_buffer(self):
"""This method will save all the replay buffer's state in a single file."""
checkpointable_elements = self._return_checkpointable_elements()
for attr in checkpointable_elements:
filename = self._generate_filename(self._log_dir, attr, self._log_count)
with tf.gfile.Open(filename, 'wb') as f:
with gzip.GzipFile(fileobj=f) as outfile:
# Checkpoint the np arrays in self._store with np.save instead of
# pickling the dictionary is critical for file size and performance.
# STORE_FILENAME_PREFIX indicates that the variable is contained in
# self._store.
if attr.startswith(STORE_FILENAME_PREFIX):
array_name = attr[len(STORE_FILENAME_PREFIX):]
np.save(outfile, self._store[array_name], allow_pickle=False)
# Some numpy arrays might not be part of storage
elif isinstance(self.__dict__[attr], np.ndarray):
np.save(outfile, self.__dict__[attr], allow_pickle=False)
else:
pickle.dump(self.__dict__[attr], outfile)
tf.logging.info('Replay buffer logged to ckpt {number} in {dir}'.format(
number=self._log_count, dir=self._log_dir))
def log_final_buffer(self):
"""Logs the replay buffer at the end of training."""
add_count = self.add_count
self.add_count = np.array(self.cursor())
self._log_buffer()
self._log_count += 1
self.add_count = add_count
@gin.configurable(denylist=['observation_shape', 'stack_size',
'update_horizon', 'gamma'])
class WrappedLoggedReplayBuffer(circular_replay_buffer.WrappedReplayBuffer):
"""Wrapper of OutOfGraphLoggedReplayBuffer with an in graph sampling mechanism."""
def __init__(self,
log_dir,
observation_shape,
stack_size,
use_staging=True,
replay_capacity=1000000,
batch_size=32,
update_horizon=1,
gamma=0.99,
wrapped_memory=None,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32):
"""Initializes WrappedLoggedReplayBuffer."""
memory = OutOfGraphLoggedReplayBuffer(
log_dir, observation_shape, stack_size, replay_capacity, batch_size,
update_horizon, gamma, max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype)
super(WrappedLoggedReplayBuffer, self).__init__(
observation_shape,
stack_size,
use_staging=use_staging,
replay_capacity=replay_capacity,
batch_size=batch_size,
update_horizon=update_horizon,
gamma=gamma,
wrapped_memory=memory,
max_sample_attempts=max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype,
action_shape=action_shape,
action_dtype=action_dtype,
reward_shape=reward_shape,
reward_dtype=reward_dtype)
|
# -*- coding: utf-8 -*-
import pandas as pd
import threading
import Queue
import logging
import time
import init_data_struct as ids
from extract_from_xml import build_single_itinerary
from error_handling import eval_response
import trip as tr
import sbb_response
class SBBAPIThread(threading.Thread):
def __init__(self, q_in, q_out, CONFIG):
super(SBBAPIThread, self).__init__()
self.q_in = q_in
self.q_out = q_out
self.CONFIG = CONFIG
def run(self):
while True:
params = self.q_in.get()
try:
response = eval_response(params, self.CONFIG)
if response:
self.q_out.put(response)
except:
logging.error('Worker failed to get response for params: {p}'.format(p=params))
finally:
self.q_in.task_done()
def api_call_thread(q_in, q_out, q_filled, CONFIG):
# keep_alive = True
# while keep_alive:
#
# # Tries to get an item from the queue,
# try:
# params = q_in.get(True, timeout=5)
# except Queue.Empty:
# # Get returns an .Empty error when queue is empty
# if not q_filled.empty():
# # If all things to do are exhausted, kill worker
# keep_alive = False
# # Do not run code below if the queue was empty
# continue
#
# # If a set of parameter was obtained from queue, make the API call
# try:
# # Calls the SBB API and returns the response message
# response = eval_response(params, CONFIG)
# # Skip operations for item if there is an error in the response (query_sbb_api() returns None)
# if response:
# q_out.put(response)
# except:
# logging.error('Worker failed to get response for params: {p}'.format(p=params))
# # Ensures that tasks gets flagged as done even if an error occurred so that bot doesn't hang
# finally:
# q_in.task_done()
#
# return
while True:
params = q_in.get()
try:
response = eval_response(params, CONFIG)
if response:
q_out.put(response)
except:
logging.error('Worker failed to get response for params: {p}'.format(p=params))
finally:
q_in.task_done()
# TODO switch to 'from vibepy.multithread_workers import MultiThreadOp'
def multithread_api_queries(trip, loop_through, CONFIG):
num_fetch_threads = len(loop_through)
q_in = Queue.Queue() # Queue of inputs
q_out = Queue.Queue() # Queue of outputs
q_filled = Queue.Queue() # When all inputs have been filled, a True is added to this queue ot tell workers to close
# Initialize the threads
for i in range(num_fetch_threads):
# worker = threading.Thread(target=api_call_thread, args=(q_in, q_out, q_filled, CONFIG))
worker = SBBAPIThread(q_in, q_out, CONFIG)
worker.setDaemon(True)
worker.start()
# Populate queue
for item in loop_through:
# Generate a dictionary (params) that contains all the information required ot build the API call
params = gen_param_seg(trip, MaxResultNumber=item[0], leave_at=item[1])
q_in.put(params)
# This is really stupid and in need of a better solution but it should prevents daemon threads from accumulating
q_filled.put(True)
# Wait for all workers to be done and return que of responses
q_in.join()
return q_out
def build_single_trip(trip, CONFIG):
# 6 preceding/following departure/arrival
loop_through = [(6, True), (6, False), (-6, True), (-6, False)]
# Multi-threaded API calls (returns fifo queue of responses)
q_out = multithread_api_queries(trip, loop_through, CONFIG)
# Initialize empty dataframe with the right column structure for easy concatenation later
# trip_link_df, itinerary_df, legs_df, segments_df = ids.initialize_all_empty_df()
newTrip = tr.Trip(trip, CONFIG)
while not q_out.empty():
# Pop one response for processing
response = q_out.get()
# extracts the valuable information from the API query and stores it into pandas dataframes
# (This runs in about 0.04 seconds per response.content, not a bottleneck anymore)
# trip_link_df_i, itinerary_df_i, legs_df_i, segments_df_i = \
# build_single_itinerary(response.content, trip, itinerary_df['context_reconstruction'].values, CONFIG)
newTrip.build_single_itinerary(sbb_response.SBBResponse(response))
# Update the dataframe with new items
# we can do this later now
# trip_link_df = pd.concat([trip_link_df, trip_link_df_i], ignore_index=True)
# itinerary_df = pd.concat([itinerary_df, itinerary_df_i])
# legs_df = pd.concat([legs_df, legs_df_i])
# segments_df = pd.concat([segments_df, segments_df_i])
newTrip.concat_trip_dfs()
newTrip.concat_legs_dfs()
newTrip.concat_seg_dfs()
# Only perform these on non-empty dataframes
# if not trip_link_df.empty:
# # These values are shared across all rows so add them at once
# trip_link_df['vid'] = trip['vid']
# trip_link_df['mot_segment_id'] = trip['mot_segment_id']
#
# # Not using vid / mot_segment_id as indexes since they're identical for all...
# # Hierarchical indexes need to be sorted for faster operation (uses np.searchsorted )
# trip_link_df.sort_values(['itinerary_id', 'leg_id', 'segment_id', ], inplace=True)
#
# trip_link_df.set_index(['itinerary_id', 'leg_id', 'segment_id', ], inplace=True)
#
# return trip_link_df, itinerary_df, legs_df, segments_df
if not newTrip.trip_link_df.empty:
# These values are shared across all rows so add them at once
newTrip.trip_link_df['vid'] = trip['vid']
newTrip.trip_link_df['mot_segment_id'] = trip['mot_segment_id']
# Not using vid / mot_segment_id as indexes since they're identical for all...
# Hierarchical indexes need to be sorted for faster operation (uses np.searchsorted )
newTrip.trip_link_df.sort_values(['itinerary_id', 'leg_id', 'segment_id', ], inplace=True)
newTrip.trip_link_df.set_index(['itinerary_id', 'leg_id', 'segment_id', ], inplace=True)
return newTrip.trip_link_df, newTrip.itinerary_df, newTrip.legs_df, newTrip.segments_df
# def build_single_trip2(trip, CONFIG):
# # 6 preceding/following departure/arrival
# loop_through = [(6, True), (6, False), (-6, True), (-6, False)]
#
# # Initialize empty dataframe with the right column structure for easy concatenation later
# trip_link_df, itinerary_df, legs_df, segments_df = ids.initialize_all_empty_df()
#
# for item in loop_through:
# # Generate a dicitonary (params) that contains all the information required ot build the API call
# params = gen_param_seg(trip, MaxResultNumber=item[0], leave_at=item[1])
# # Calls the SBB API and returns the response message
# response = eval_response(params, CONFIG)
#
# # Skip operations for item if there is an error in the response (query_sbb_api() returns None)
# if not response:
# continue
#
# # extracts the valuable information from the API query and stores it into pandas dataframes
# # (This runs in about 0.04 seconds per response.content, not a bottleneck anymore)
# trip_link_df_i, itinerary_df_i, legs_df_i, segments_df_i = \
# build_single_itinerary(response.content, trip, itinerary_df['context_reconstruction'].values, CONFIG)
#
# # Update the dataframe with new items
# trip_link_df = pd.concat([trip_link_df, trip_link_df_i], ignore_index=True)
# itinerary_df = pd.concat([itinerary_df, itinerary_df_i])
# legs_df = pd.concat([legs_df, legs_df_i])
# segments_df = pd.concat([segments_df, segments_df_i])
#
# # These values are shared across all rows so add them at once
# trip_link_df['vid'] = trip['vid']
# trip_link_df['mot_segment_id'] = trip['mot_segment_id']
#
# # Not using vid / mot_segment_id as indexes since they're identical for all...
# # Hierarchical indexes need to be sorted for faster operation (uses np.searchsorted )
# trip_link_df.sort_values(['itinerary_id', 'leg_id', 'segment_id', ], inplace=True)
# trip_link_df.set_index(['itinerary_id', 'leg_id', 'segment_id', ], inplace=True)
#
# return trip_link_df, itinerary_df, legs_df, segments_df
def gen_param_seg(trip, MaxResultNumber=3, leave_at=True, api_version='v2'):
# Some parameters need a bit of reformatting
params = {
'api_version': api_version, # check, there might some other dependencies inside the XML...
'MaxResultNumber': MaxResultNumber,
'from_lat': int(round(trip['lat_start'] * 10 ** 6)), # int, lat/lon * 1e6
'from_lon': int(round(trip['lon_start'] * 10 ** 6)),
'to_lat': int(round(trip['lat_end'] * 10 ** 6)),
'to_lon': int(round(trip['lon_end'] * 10 ** 6)),
# These times are used by python but not for the XML query so no reformat
'trip_time_start': trip['trip_time_start'], # previously starttriptime
'trip_time_end': trip['trip_time_end'] # previously endtriptime
}
if leave_at: # calculate trip to start at this time
params['timestamp'] = trip['time_start'].strftime("%Y-%m-%dT%H:%M:%S") # timestamp format: 2015-08-04T14:00:00
params['DateTimeType'] = 'ABFAHRT'
else: # calculate trip to arrive by this time
params['timestamp'] = trip['time_end'].strftime("%Y-%m-%dT%H:%M:%S") # timestamp format: 2015-08-04T14:00:00
params['DateTimeType'] = 'ANKUNFT'
params['mot_segment_id'] = trip['mot_segment_id']
return params
|
try:
valor = raw_input("Escriba un numero o una palabra para lansar el error ")
print type(valor)
valor = int(valor)
print type(valor)
except :
print "Error al intrudir datos"
else:
print valor
|
import z
from collections import defaultdict
from sortedcontainers import SortedSet
# largest change by mc years ago as a last year predicter
wlp_dict = z.getp("wlp_dict")
dates = z.getp("dates")
rsi_indicator_dic = dict()
changers = SortedSet()
yearsago = -1*252*2
keeping = 60
discardlocation = int(keeping/2)
for j, astock in enumerate(wlp_dict.keys()):
seen = list()
rsid = dict()
rseen = list()
try:
bar = wlp_dict[astock]
if astock == "BA":
print("bar : {}".format( bar.keys() ))
data1 = bar[dates[-252]][0]
data2 = bar[dates[yearsago]][0]
change = round(data1/data2,3)
changers.add((change, astock))
if len(changers) > keeping:
changers.discard(changers[discardlocation])
# change2 = data2/data3
# change2 = data2/data3
except:
pass
#print("changers: {}".format( changers))
z.setp(changers[:discardlocation], "mc1", True)
z.setp(changers[-1*discardlocation:], "mc2", True)
exit()
# if not j % 100:
# print("count: {}".format( j))
#
# try:
# prev = None
# for i, row in enumerate(buy.getRows(astock, dates[0])):
# c_close = round(float(row[z.closekey]),2)
# try:
# change = round(c_close/prev,4)
# except:
# prev = c_close
# continue
# seen.append(change)
# if len(seen) > count:
# seen.pop(0)
# try:
# rsi = getRsi(seen)
# except:
# print("{} problems: {}".format(astock, seen))
# continue
# rseen.append((rsi, c_close))
## rsid[row['Date']] = round(rsi,1)
# prev = c_close
# except Exception as e:
# z.trace(e)
# pass
#
# tally = list()
#
# for i, pair in enumerate(rseen):
# rsi = pair[0]
# try:
# if rsi <= 20:
# if pair[1] < rseen[i+preday][1]:
# tally.append(1)
# else:
# tally.append(-1)
# elif rsi >= 80:
# if pair[1] > rseen[i+preday][1]:
# tally.append(1)
# else:
# tally.append(-1)
# except:
# pass
# try:
# lental = len(tally)
# if lental < 50:
# continue
# valid = abs(round(sum(tally)/lental,3))
# except:
# continue
# rsi_indicator_dic[astock] = valid
# rsi_high.add((valid, astock))
# if len(rsi_high) > 30:
# rsi_high.remove(rsi_high[0])
#z.setp(rsi_indicator_dic, "rsi_indicator_dic")
#z.setp(rsi_high, "rsi_high", True)
#
|
import pandas as pd
import re
# coding=utf-8
import re
import string
import pandas as pd
import csv
arabic_punctuations = '''`÷×؛<>_()*&^%][ـ،/:"؟.,'{}~¦+|!”…“–ـ'''
english_punctuations = string.punctuation
punctuations_list = arabic_punctuations + english_punctuations
arabic_diacritics = re.compile("""
ّ | # Tashdid
َ | # Fatha
ً | # Tanwin Fath
ُ | # Damma
ٌ | # Tanwin Damm
ِ | # Kasra
ٍ | # Tanwin Kasr
ْ | # Sukun
ـ # Tatwil/Kashida
""", re.VERBOSE)
EN = r'[A-Za-z0-9_]+'
AN = r'[A-Za-z0-9٠-٩_]+'
Non_Ar_char='[ĦèđóÕŐчεуфĸĹǪɪíжçбšђрҭкĎŃоĄйτпˇפмвдєσİéαнĔξմыиѵĤƒьЖΩСêğşωсаÑĨǹìνòıðבеםCmuno]'
pat1 = r'@[A-Za-z0-9_]+'
pat2 = r'https?://[^ ]+'
S_part=r'[RT-_-💕-🤓-🤔-🤣-۩-•-🤷-⤵️-🦋_]+'
#combined_pat = r'|'.join((pat1, pat2))
www_pat = r'www.[^ ]+'
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"(\U00002600-\U000027BF)"
u"(\U0001f300-\U0001f64F)"
u"(\U0001f680-\U0001f6FF)"
u"(\U00002702-\U000027B0)"
u"(\U000024C2-\U0001F251)"
"]+", flags=re.UNICODE)
def normalize_arabic(text):
text = re.sub("[إأآا]", "ا", text)
text = re.sub("ى", "ي", text)
text = re.sub("ة", "ه", text)
text = re.sub('\n',' ', text)
text=re.sub(EN,'', text)
text = re.sub(AN,'', text)
return text
def remove_diacritics(text):
text = re.sub(arabic_diacritics,'', text)
text=re.sub(emoji_pattern,'',text)
return text
def remove_punctuations(text):
translator = str.maketrans('', '', punctuations_list)
return text.translate(translator)
def remove_repeating_char(text):
return re.sub(r'(.)\1+', r'\1', text)
def tweet_cleaner(text):
stripped = re.sub(pat1, '',text)
stripped = re.sub(pat2, 'رابط', stripped)
stripped = re.sub(www_pat, 'رابط', stripped)
stripped = re.sub(S_part, ' ', stripped)
stripped = re.sub(Non_Ar_char, ' ', stripped)
return (stripped)
if __name__ == '__main__':
File = 'file.txt'
with open(File, 'r', encoding='utf-8') as f:
csv_reader = csv.reader(f)
csvFile = open('Clean_tweets.csv', 'w', newline='', encoding='utf-8')
f2 = csv.writer(csvFile, delimiter='\n')
for row in csv_reader:
row = row[0]
# print("the text before ")
# print (row)
text = tweet_cleaner(row)
text = remove_punctuations(text)
text = remove_diacritics(text)
text = normalize_arabic(text)
text = re.sub(r'\W', ' ', text, flags=re.UNICODE)
# print("the text after ")
# print(text)
# print("_____________________________________________")
f2.writerow([text])
|
while True:
al = input("Are you an alien? ").lower()
if al == "yes" or al == "y":
print("OH MY GOD AN ALIEN!!!!! PLEASE DONT KILL ME.")
break
elif al == "no" or al == "n":
print("Oh, ok. Nevermind.")
break
else:
print("Please put yes or no.")
|
"""
Train a logistic regresion model for document classification.
Search this file for the keyword "Hint" for possible areas of
improvement. There are of course others.
"""
#from distutils.version import LooseVersion as Version
#from sklearn import __version__ as sklearn_version
import pandas as pd
import pickle
#import os
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem.porter import PorterStemmer
from sklearn.naive_bayes import MultinomialNB
# Used for porterStemmer tokenizer
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop = stopwords.words('english')
"""
This is a very basic tokenization strategy.
"""
porter = PorterStemmer()
def tokenizer(text):
return text.split()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
# Read in the dataset and store in a pandas dataframe
print("Reading Data")
df = pd.read_csv('./training_movie_data.csv')
# Perform feature extraction on the text.
print("Start preprocessor")
tfidf = TfidfVectorizer(strip_accents=None, lowercase=True, preprocessor=None,
tokenizer = tokenizer_porter,
sublinear_tf=True, stop_words='english', max_df = .7,
min_df = 0)
# Create a pipeline to vectorize the data and then perform regression.
# Uses Multinomial Naive Bayes classifier.
print("Creating pipeline")
lr_tfidf = Pipeline([
('vect', tfidf),
('clf', MultinomialNB())
])
"""
Perform K-fold validation with 10 folds
"""
from sklearn.cross_validation import KFold
print("Performing K-Fold validation with 10 folds")
k_fold = KFold(n=len(df), n_folds=10)
scores = []
x = 0
for train_indices, test_indices in k_fold:
print("ENTERED K-FOLD LOOP")
train_x = df.iloc[train_indices]['review'].values
train_y = df.iloc[train_indices]['sentiment'].values
test_x = df.iloc[test_indices]['review'].values
test_y = df.iloc[test_indices]['sentiment'].values
lr_tfidf.fit(train_x, train_y)
score = lr_tfidf.score(test_x, test_y)
scores.append(score)
print(x)
x = x+1
print('Total reviews classified:', len(df))
print('Score:', sum(scores)/len(scores))#this is taking the average score from the cross validation
# Save the classifier for use later.
pickle.dump(lr_tfidf, open("saved_model.sav", 'wb'))
|
import unittest
from neo.rawio.tdtrawio import TdtRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
class TestTdtRawIO(BaseTestRawIO, unittest.TestCase, ):
rawioclass = TdtRawIO
entities_to_test = ['aep_05']
files_to_download = [
'aep_05/Block-1/aep_05_Block-1.Tbk',
'aep_05/Block-1/aep_05_Block-1.Tdx',
'aep_05/Block-1/aep_05_Block-1.tev',
'aep_05/Block-1/aep_05_Block-1.tsq',
'aep_05/Block-2/aep_05_Block-2.Tbk',
'aep_05/Block-2/aep_05_Block-2.Tdx',
'aep_05/Block-2/aep_05_Block-2.tev',
'aep_05/Block-2/aep_05_Block-2.tsq',
]
if __name__ == "__main__":
unittest.main()
|
import copy
# import hashlib
def leftRotate(num, shiftNum):
return ((num << shiftNum) | (num >> (32 - shiftNum))) & 0xffffffff
def SHA1(message: bytes):
# Initialize variable
h0 = 0x67452301
h1 = 0xEFCDAB89
h2 = 0x98BADCFE
h3 = 0x10325476
h4 = 0xC3D2E1F0
mask = 0xffffffff
messageLen = len(message) * 8
message += b'\x80'
while(len(message) % 64 != 56):
message += b'\x00'
message += messageLen.to_bytes(8, "big")
assert (len(message) % 64 == 0)
chunks = []
copyList = copy.deepcopy(message)
for i in range(len(message) // 64):
chunks.append(copyList[: 64])
copyList = copyList[64:]
for chunk in chunks:
w = [0] * 80
copyChunk = copy.deepcopy(chunk)
for i in range(16):
w[i] = int.from_bytes(copyChunk[: 4], "big")
copyChunk = copyChunk[4:]
# Extend the sixteen 32-bit words into eighty 32-bits
for i in range(16, 80):
w[i] = leftRotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1)
a = h0
b = h1
c = h2
d = h3
e = h4
for i in range(80):
if 0 <= i <= 19:
f = d ^ (b & (c ^ d))
k = 0x5A827999
elif 20 <= i <= 39:
f = b ^ c ^ d
k = 0x6ED9EBA1
elif 40 <= i <= 59:
f = (b & c) | (b & d) | (c & d)
k = 0x8F1BBCDC
elif 60 <= i <= 79:
f = b ^ c ^ d
k = 0xCA62C1D6
temp = (leftRotate(a, 5) + f + e + k + w[i]) & mask
e = d
d = c
c = leftRotate(b, 30)
b = a
a = temp
h0 = (h0 + a) & mask
h1 = (h1 + b) & mask
h2 = (h2 + c) & mask
h3 = (h3 + d) & mask
h4 = (h4 + e) & mask
# print('%08x%08x%08x%08x%08x' % (h0, h1, h2, h3, h4))
h = [h0, h1, h2, h3, h4]
return (b''.join(sub.to_bytes(4, "big") for sub in h))
# SHA1(str.encode("test"))
# print(hashlib.sha1(str.encode("test")).digest())
|
import dash_bootstrap_components as dbc
from dash import html
alert = dbc.Alert(
[
html.H4("Well done!", className="alert-heading"),
html.P(
"This is a success alert with loads of extra text in it. So much "
"that you can see how spacing within an alert works with this "
"kind of content."
),
html.Hr(),
html.P(
"Let's put some more text down here, but remove the bottom margin",
className="mb-0",
),
]
)
|
import torch
import torch.nn as nn
#
# L2 Loss
# L2Loss(outputs, targets)
# outputs -> shape BATCH_SIZE x NUM_CLASSES
# targets -> shape BATCH_SIZE x NUM_CLASSES
#
class L2Loss():
# Constructor
def __init__(self, reduction=None, alpha=1.0):
default_reduction = 'mean'
if reduction == None:
self.reduction = default_reduction
elif reduction == 'mean': # Mean on batch_size
self.reduction = 'mean'
elif reduction == 'hardmean': # Mean on batch_size and also number_of_classes
self.reduction = 'hardmean'
elif reduction == 'sum':
self.reduction = 'sum'
else:
self.reduction = default_reduction
self.alpha = alpha
return
# Methods
def __call__(self, outputs, targets):
batch_size = outputs.shape[0]
num_classes = outputs.shape[1]
losses = torch.zeros((batch_size), dtype=torch.float32).to('cuda')
for i, (output, target) in enumerate(zip(outputs, targets)):
losses[i] = torch.sum(self.alpha*torch.square(output-target))
# print(losses[i], len(output), len(target), output, target)
if self.reduction == 'mean':
losses = torch.sum(losses)/batch_size
elif self.reduction == 'hardmean':
losses = torch.sum(losses)/batch_size/num_classes
elif self.reduction == 'sum':
losses = torch.sum(losses)
return losses
def CE_L2_loss(net, net_old, criterion, images, labels, current_classes, starting_label, ending_label, distillation_weight=1, outputs_normalization='sigmoid', alpha=100):
# Classification loss -> CE
# Distillation loss -> L2
CE_criterion = nn.CrossEntropyLoss(reduction='sum')
L2_criterion = L2Loss(reduction='sum', alpha=alpha)
softmax = torch.nn.Softmax(dim=-1)
outputs = net(images)
batch_size = outputs.shape[0]
if outputs_normalization == 'softmax':
outputs_normalized = softmax(outputs)
elif outputs_normalization == 'sigmoid':
outputs_normalized = torch.sigmoid(outputs)
else:
raise RuntimeError('Errore nella scelta outputs_normalization in CE_L2')
if starting_label == 0:
loss = CE_criterion(outputs, labels)/batch_size
else:
with torch.no_grad():
net_old.train(False)
outputs_old = net_old(images)
# sigmoids_old = torch.sigmoid(outputs_old[:,0:starting_label])
if outputs_normalization == 'softmax':
probabilities_old = softmax(outputs_old)
elif outputs_normalization == 'sigmoid':
probabilities_old = torch.sigmoid(outputs_old)
ce_loss = CE_criterion(outputs, labels)#/batch_size
targets = probabilities_old[:, :starting_label].to('cuda')
dist_loss = L2_criterion(outputs_normalized[:, :starting_label], targets)#/batch_size
# print(f"[CE loss: {ce_loss.item()} | Dist loss: {dist_loss.item()}")
loss = (ce_loss + (distillation_weight*dist_loss))/batch_size
return loss
def L2_L2_loss(net, net_old, criterion, images, labels, current_classes, starting_label, ending_label, distillation_weight=1, outputs_normalization='sigmoid', alpha=100, bce_var=2):
# Classification loss -> L2
# Distillation loss -> L2
L2_criterion = L2Loss(reduction='hardmean', alpha=alpha)
softmax = torch.nn.Softmax(dim=-1)
outputs = net(images)
batch_size = outputs.shape[0]
if outputs_normalization == 'softmax':
outputs_normalized = softmax(outputs)
elif outputs_normalization == 'sigmoid':
outputs_normalized = torch.sigmoid(outputs)
else:
raise RuntimeError('Errore nella scelta outputs_normalization in L2_L2')
if bce_var == 2:
ending_label = 100
if starting_label == 0:
one_hot_targets = torch.zeros([batch_size, ending_label], dtype=torch.float32)
# one hot encoding
for i in range(batch_size):
one_hot_targets[i][labels[i]] = 1
one_hot_targets = one_hot_targets.to('cuda')
loss = L2_criterion(outputs_normalized, one_hot_targets)
else:
with torch.no_grad():
net_old.train(False)
outputs_old = net_old(images)
# sigmoids_old = torch.sigmoid(outputs_old[:,0:starting_label])
if outputs_normalization == 'softmax':
probabilities_old = softmax(outputs_old)
elif outputs_normalization == 'sigmoid':
probabilities_old = torch.sigmoid(outputs_old)
one_hot_targets = torch.zeros([batch_size, ending_label], dtype=torch.float32)
# one hot encoding
for i in range(batch_size):
one_hot_targets[i,0:starting_label] = probabilities_old[i, :starting_label]
if labels[i] in current_classes:
one_hot_targets[i][labels[i]] = 1
one_hot_targets = one_hot_targets.to('cuda')
# clf_loss = L2_criterion(outputs_normalized, one_hot_targets)/batch_size
# clf_loss = L2_criterion(outputs, labels)#/batch_size
# targets = probabilities_old[:, :starting_label].to('cuda')
# dist_loss = L2_criterion(outputs_normalized[:, :starting_label], targets)#/batch_size
# print(f"[CE loss: {ce_loss.item()} | Dist loss: {dist_loss.item()}")
# loss = (clf_loss + (distillation_weight*dist_loss))/batch_size
loss = L2_criterion(outputs_normalized, one_hot_targets)
return loss
def BCE_L2_loss(net, net_old, criterion, images, labels, current_classes, starting_label, ending_label, distillation_weight=1, outputs_normalization='sigmoid', alpha=100):
# Binary Classification loss -> BCE (new)
# Distillation loss -> L2 (old net)
BCE_criterion = nn.BCEWithLogitsLoss(reduction='sum')
L2_criterion = L2Loss(reduction='sum', alpha=alpha)
softmax = torch.nn.Softmax(dim=-1)
outputs = net(images)
batch_size = outputs.shape[0]
if outputs_normalization == 'softmax':
outputs_normalized = softmax(outputs)
elif outputs_normalization == 'sigmoid':
outputs_normalized = torch.sigmoid(outputs)
else:
raise RuntimeError('Errore nella scelta outputs_normalization in BCE_L2')
# BCE_VAR=2
ending_label = 100
if starting_label == 0:
# BCE computed da starting label fino a 100
# first group of classes -> just BCE (no L2 distillation)
one_hot_targets = torch.zeros([batch_size, ending_label], dtype=torch.float32)
# one hot encoding
for i in range(batch_size):
one_hot_targets[i][labels[i]] = 1
one_hot_targets = one_hot_targets.to('cuda')
## ONE-HOT
# print(one_hot_targets.size())
# print(outputs.size()) # torch.Size([128, 100])
# print(outputs[:,0:ending_label].size())
loss = BCE_criterion(outputs, one_hot_targets)/batch_size/100
else:
# BCE
# compute old output
with torch.no_grad():
net_old.train(False)
outputs_old = net_old(images)
# sigmoids_old = torch.sigmoid(outputs_old[:,0:starting_label])
if outputs_normalization == 'softmax':
probabilities_old = softmax(outputs_old)
elif outputs_normalization == 'sigmoid':
probabilities_old = torch.sigmoid(outputs_old)
one_hot_targets = torch.zeros([batch_size, ending_label], dtype=torch.float32)
# one-hot encoding
for i in range(batch_size):
# old labels
one_hot_targets[i,0:starting_label] = probabilities_old[i, :starting_label]
# new labels (current group of classes)
if labels[i] in current_classes:
one_hot_targets[i][labels[i]] = 1
# one_hot_targets contains one-hot encoding of all classes seen until now (cumulative)
one_hot_targets = one_hot_targets.to('cuda')
bce_loss = BCE_criterion(outputs[:,starting_label:ending_label], one_hot_targets[:,starting_label:ending_label]) #/batch_size
# test_sigmoid_outputs = softmax(outputs)
# print('Some initial outputs:', test_sigmoid_outputs[0, labels[0]], test_sigmoid_outputs[1, labels[1]], test_sigmoid_outputs[2, labels[2]])
# for i in range(len(outputs)):
# print('i',i,'- ', test_sigmoid_outputs[i, labels[i]].item())
# MSE - distillation loss on old outputs (old net)
targets = probabilities_old[:, :starting_label].to('cuda')
dist_loss = L2_criterion(outputs_normalized[:, :starting_label], targets) #/(batch_size*)
#print(f"[BCE loss: {bce_loss.item()} | L2/MSE loss: {dist_loss.item()}")
loss = (bce_loss + dist_loss)/batch_size/100 #/batch_size
#print(loss.item())
return loss
|
from sqlalchemy.exc import DatabaseError, IntegrityError
from marshmallow import ValidationError
from .models import (
Costumer,
User,
Seller
)
from .serializers import(
CostumerSchema,
UserSchema,
SellerSchema
)
from utils.errors import (
ConflictError,
NotFoundError,
ClientException
)
from utils.responses import (
AbstractResponse,
GenericCreateResponse,
GenericListResponse,
GenericRetriveResponse,
GenericDeleteResponse
)
from utils.serializers import ImageSchema
from utils.images_helpers import save_to_image
from settings.database import session
from flask_bcrypt import generate_password_hash
from flaskr import bcrypt
from http import HTTPStatus
# Create your responses here.
class UserResponse(GenericCreateResponse, GenericRetriveResponse, GenericListResponse):
model = User
schema = UserSchema
session = session
def pre_validation(self):
user = User.query.filter_by(
username=self.data['username']).first()
if user:
raise ConflictError(
user_err_msg=f'{user} already exists.')
def create_instance(self):
user = User(**self.data)
password_crypt = bcrypt.generate_password_hash(
user.password, 10).decode('utf-8')
user.password = password_crypt
return user
class UserImageResponse(GenericRetriveResponse, GenericCreateResponse):
model = User
schema = ImageSchema
def create(self, blueprint_name, id):
serializer = self.pre_create()
user = self.get_object(id)
image_name = save_to_image(
blueprint_name,
image_file=serializer['thumbnail'],
extension=serializer['extension']
)
user.thumbnail = image_name
session.commit()
deserializer = UserSchema().dump(user)
return deserializer, HTTPStatus.OK
class SellerResponse(GenericCreateResponse, GenericRetriveResponse, GenericListResponse):
model = Seller
schema = SellerSchema
session = session
def pre_validation(self):
user = User.query.filter_by(
id=self.data['user_id'], deleted=False).first()
if not user:
raise ConflictError(
user_err_msg=f'User({user}) does not exists.')
seller_by_user = Seller.query.join(Seller.user).filter(
User.id == self.data['user_id']).first()
if seller_by_user:
raise ConflictError(
user_err_msg=f'Already exists a seller to the {user}')
class SellerDeleteResponse(GenericDeleteResponse):
model = Seller
schema = SellerSchema
session = session
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from .models import Neighborhood
class NeighborhoodViewTests(TestCase):
def setUp(self):
pass
def test_neighborhood_home_without_login(self):
resp = self.client.get('neighborhood:neighborhood_home')
self.assertEqual(resp.status_code, 404)
|
"""setup.py for flake8-import-order-tkalus."""
from setuptools import setup
__title__ = "flake8-import-order-tkalus"
__author__ = "Turtle Kalus"
__email__ = "turtle" "@" "kalus.us"
__version__ = "2.0"
__copyright__ = "Copyright (C) 2019 tkalus"
__license__ = "MIT License"
install_requires = ["flake8-import-order >= 0.18"]
setup(
name=__title__,
version=__version__,
description="tkalus' custom import order plugin",
long_description="",
url="https://github.com/tkalus-config/flake8-import-order-tkalus",
author=__author__,
author_email=__email__,
license=__license__,
install_requires=install_requires,
py_modules=["flake8_import_order_tkalus"],
python_requires=">=3.5",
entry_points={
"flake8_import_order.styles": ["tkalus = flake8_import_order_tkalus:Tkalus"]
},
keyword="flake8 import imports ordering",
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Flake8",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License (Expat)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
],
)
|
class Solution:
#Function to find the maximum number of meetings that can
#be performed in a meeting room.
def maximumMeetings(self,n,start,end):
result=[]
for i in range(len(start)):
result.append((start[i],end[i]))
result.sort(key=lambda x:x[1])
i=0
ans=[]
ans.append(result[i])
j=1
while j<len(result):
if result[i][1]<result[j][0]:
ans.append(result[j])
i=j
j+=1
else:
j+=1
return len(ans)
|
#!/usr/bin/env python
Import('env')
env_module = env.Clone()
# Thirdparty sources
thirdparty_dirs = [
"thirdparty/exoquant/",
"thirdparty/hq3x/",
]
thirdparty_sources = []
thirdparty_sources += Glob("thirdparty/exoquant/*.c")
thirdparty_sources += Glob("thirdparty/hqx/*.cc")
env_thirdparty = env_module.Clone()
env_thirdparty.add_source_files(env.modules_sources, thirdparty_sources)
env_thirdparty.disable_warnings()
env_thirdparty.Append(CPPPATH='thirdparty/')
env_module.Append(CPPPATH='.')
env_module.Append(CPPPATH='thirdparty/')
env_module.Append(CPPPATH='classes/')
# Module sources
env_module.add_source_files(env.modules_sources, '*.cpp')
env_module.add_source_files(env.modules_sources, 'bind/*.cpp')
env_module.add_source_files(env.modules_sources, 'classes/*.cpp')
env_module.add_source_files(env.modules_sources, 'drivers/png/*.cpp')
Export('env_module')
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
from rest_framework.authtoken.models import Token
from allauth.account.models import EmailAddress
from django.shortcuts import get_object_or_404
@receiver(post_save, sender = settings.AUTH_USER_MODEL)
def create_token_signal(sender, instance = None, created = False, **kwargs):
if created:
if instance.is_superuser:
email = EmailAddress(user = instance, verified = True, email = instance.email, primary = True)
email.save()
Token.objects.create(user = instance)
|
import unittest
from katas.kyu_8.grasshopper_terminal_game_1 import Hero
class HeroTestCase(unittest.TestCase):
def setUp(self):
self.myHero = Hero()
def test_equals(self):
self.assertEqual(self.myHero.name, 'Hero')
def test_equals_2(self):
self.assertEqual(self.myHero.experience, 0)
def test_equals_3(self):
self.assertEqual(self.myHero.health, 100)
def test_equals_4(self):
self.assertEqual(self.myHero.position, '00')
def test_equals_5(self):
self.assertEqual(self.myHero.damage, 5)
|
#1
def BMIC():
weight = float(input("Please enter your weight in pounds: "))
height = int(input("Please enter your height in inches: "))
BMI = (weight * 720) / (height ** 2)
if BMI > 25:
print(BMI)
print("\nyour BMI is above the healthy range")
elif BMI < 19:
print(BMI)
print("\nyour BMI is below the healthy range")
else:
print(BMI)
print("\nyour BMI is within the healthy range")
BMIC()
#2
def validdate():
date = str(input("please enter a date in the form month/day/year: "))
datelist = date.split("/")
month = int(datelist[0])
day = int(datelist[1])
year = int(datelist[2])
if year < 0:
print("Invalid date")
else:
if month < 0 or month > 12:
print("Invalid date")
else:
if month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12:
if day >= 32:
print("Invalid date")
else:
print("This is a valid date")
else:
if day >= 31:
print("Invalid date")
else:
print("This is a valid date")
validdate()
|
from flask import Flask, jsonify, request
app = Flask(__name__)
books = [
{'name': 'Green Eggs and Ham',
'price': 7.99,
'isbn': 9870394800165
},
{'name': 'The Cat In the Hat',
'price': 6.99,
'isbn': 9870394800193
}
]
@app.route('/books')
def get_books():
return jsonify({'books': books})
app.run(port=5000)
|
from pyVim import connect
from pyVmomi import vim
import ssl
import tasks
import pdb
gcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
si=connect.SmartConnect(host="",port=443,user="",pwd="",sslContext=gcontext)
def get_root():
content = si.RetrieveContent()
rootfolder = content.rootFolder
print "Coneected to ",rootfolder.name , "Vcenter"
return rootfolder
def select_vm_name(name):
rootfolder = get_root()
connected_vm = None
for datacenter in rootfolder.childEntity:
for vm in datacenter.vmFolder.childEntity:
if vm.name == name:
connected_vm = vm
break
if connected_vm:
print "Connected to vm:",connected_vm.name
return connected_vm
def add_disk(vm_name,disksize,disk_type):
vm = select_vm_name(vm_name)
spec = vim.vm.ConfigSpec()
unit_number = 0
dev_changes = []
# get all disks on a VM, set unit_number to the next available
for dev in vm.config.hardware.device:
if hasattr(dev.backing, 'fileName'):
unit_number = int(dev.unitNumber) + 1
# unit_number 7 reserved for scsi controller
if unit_number == 7:
unit_number += 1
if unit_number >= 16:
print "we don't support this many disks"
return
if isinstance(dev, vim.vm.device.VirtualSCSIController):
controller = dev
if vm:
print "Selected ",vm.name,"Virtual machine"
disk_spec = vim.vm.device.VirtualDeviceSpec()
new_disk_kb = int(disksize) * 1024 * 1024
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.fileOperation = "create"
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.backing = \
vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
if disk_type == 'thin':
disk_spec.device.backing.thinProvisioned = True
disk_spec.device.backing.diskMode = 'persistent'
disk_spec.device.unitNumber = unit_number
disk_spec.device.capacityInKB = new_disk_kb
disk_spec.device.controllerKey = controller.key
dev_changes.append(disk_spec)
spec.deviceChange = dev_changes
vm.ReconfigVM_Task(spec=spec)
return True
else:
print "No vm provided with name:",vm_name
return ""
return ""
def get_all_disknumbers(vm_name):
all_disk_numbers = []
vm = select_vm_name(vm_name)
for dev in vm.config.hardware.device:
if hasattr(dev.backing, 'fileName'):
all_disk_numbers.append(dev.unitNumber)
return all_disk_numbers
def remove_disc(vm_name,disk_number):
try:
vm = select_vm_name(vm_name)
hdd_prefix_label = 'Hard disk '
hdd_label = hdd_prefix_label + str(disk_number)
virtual_hdd_device = None
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk) \
and dev.deviceInfo.label == hdd_label:
virtual_hdd_device = dev
virtual_hdd_spec = vim.vm.device.VirtualDeviceSpec()
virtual_hdd_spec.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.remove
virtual_hdd_spec.device = virtual_hdd_device
spec = vim.vm.ConfigSpec()
spec.deviceChange = [virtual_hdd_spec]
task = vm.ReconfigVM_Task(spec=spec)
tasks.wait_for_tasks(si, [task])
return True
except Exception as err:
print err
return False
def disc_change_mode(vm_name,disk_number,mode):
try:
vm = select_vm_name(vm_name)
hdd_prefix_label = 'Hard disk '
hdd_label = hdd_prefix_label + str(disk_number)
virtual_hdd_device = None
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk) \
and dev.deviceInfo.label == hdd_label:
virtual_hdd_device = dev
if not virtual_hdd_device:
raise RuntimeError('Virtual {} could not be found.'.format(disk_label))
virtual_hdd_spec = vim.vm.device.VirtualDeviceSpec()
virtual_hdd_spec.operation = \
vim.vm.device.VirtualDeviceSpec.Operation.edit
virtual_hdd_spec.device = virtual_hdd_device
virtual_hdd_spec.device.backing.diskMode = mode
spec = vim.vm.ConfigSpec()
spec.deviceChange = [virtual_hdd_spec]
task = vm.ReconfigVM_Task(spec=spec)
tasks.wait_for_tasks(si, [task])
return True
except Exception as err:
print err
return False
def update_virtual_disk_capacity(vm_name, disk_number, new_capacity_in_gb):
"""
:param vm_obj: Virtual Machine Object
:param disk_number: Disk Number to change
:param new_capacity_in_gb: New Capacity in GB
:param si: Service Instance
:return: True if success
"""
try:
vm_obj = select_vm_name(vm_name)
new_capacity_in_kb = gigabytes_to_kilobytes(long(new_capacity_in_gb))
hard_disk_prefix_label = 'Hard disk '
hard_disk_label = hard_disk_prefix_label + str(disk_number)
virtual_disk_device = None
for dev in vm_obj.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk) and dev.deviceInfo.label == hard_disk_label:
virtual_disk_device = dev
disk_exist = True if virtual_disk_device else False
if disk_exist:
old_capacity_in_gb = bytes_to_gigabytes(virtual_disk_device.capacityInBytes) \
if virtual_disk_device.capacityInBytes else \
kilobytes_to_gigabytes(virtual_disk_device.capacityInKB)
if new_capacity_in_gb > old_capacity_in_gb:
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.key = virtual_disk_device.key
disk_spec.device.backing = virtual_disk_device.backing
disk_spec.device.backing.fileName = virtual_disk_device.backing.fileName
disk_spec.device.backing.diskMode = virtual_disk_device.backing.diskMode
disk_spec.device.controllerKey = virtual_disk_device.controllerKey
disk_spec.device.unitNumber = virtual_disk_device.unitNumber
disk_spec.device.capacityInKB = long(new_capacity_in_kb)
elif new_capacity_in_gb == new_capacity_in_gb:
return 'Disk capacity is the same. No change need to be done.'
else:
raise RuntimeError('Reducing Virtual Hard Disk Size is not supported at this time.')
else:
disks = list()
for dev in vm_obj.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk):
disks.append(dev)
next_unit_number = int(disks[-1].unitNumber) + 1
current_controller_key = int(disks[-1].controllerKey)
disk_spec = create_virtual_disk(new_capacity_in_kb, current_controller_key, next_unit_number, in_bytes=False)
dev_changes = []
dev_changes.append(disk_spec)
spec = vim.vm.ConfigSpec()
spec.deviceChange = dev_changes
task = vm_obj.ReconfigVM_Task(spec=spec)
tasks.wait_for_tasks(si, [task])
return True
except Exception as err:
print err
return False
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that msvs_external_builder being set will invoke the provided
msvs_external_builder_build_cmd and msvs_external_builder_clean_cmd, and will
not invoke MSBuild actions and rules.
"""
import os
import sys
import TestGyp
if int(os.environ.get('GYP_MSVS_VERSION', 0)) < 2010:
sys.exit(0)
test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
# without the flag set
test.run_gyp('external.gyp')
test.build('external.gyp', target='external')
test.must_not_exist('external_builder.out')
test.must_exist('msbuild_rule.out')
test.must_exist('msbuild_action.out')
test.must_match('msbuild_rule.out', 'msbuild_rule.py hello.z a b c')
test.must_match('msbuild_action.out', 'msbuild_action.py x y z')
os.remove('msbuild_rule.out')
os.remove('msbuild_action.out')
# with the flag set, using Build
try:
os.environ['GYP_DEFINES'] = 'use_external_builder=1'
test.run_gyp('external.gyp')
test.build('external.gyp', target='external')
finally:
del os.environ['GYP_DEFINES']
test.must_not_exist('msbuild_rule.out')
test.must_not_exist('msbuild_action.out')
test.must_exist('external_builder.out')
test.must_match('external_builder.out', 'external_builder.py build 1 2 3')
os.remove('external_builder.out')
# with the flag set, using Clean
try:
os.environ['GYP_DEFINES'] = 'use_external_builder=1'
test.run_gyp('external.gyp')
test.build('external.gyp', target='external', clean=True)
finally:
del os.environ['GYP_DEFINES']
test.must_not_exist('msbuild_rule.out')
test.must_not_exist('msbuild_action.out')
test.must_exist('external_builder.out')
test.must_match('external_builder.out', 'external_builder.py clean 4 5')
os.remove('external_builder.out')
test.pass_test()
|
from app import db
from app.model.DetailsModel import Details
class Queries(db.Model):
__tablename__ = "queries" # Define nama tabel
id = db.Column(db.Integer, unique=True, primary_key=True, nullable=False)
query_name = db.Column(db.String, nullable=False)
details = db.relationship("Details", backref="queries", lazy="dynamic")
def __init__(self, query):
self.query_name = query
def __repr__(self):
return "<Query: {}>".format(self.query)
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def getAll():
queries = Queries.query.all()
result = list()
for data in queries:
print(data.details)
obj = {
"id": data.id,
"query": data.query_name,
"details": Details.getAll(data.id)
}
result.append(obj)
return result
@staticmethod
def findByQueryName(queryName):
data = Queries.query.filter_by(query_name=queryName).first()
if data is None:
return None
else:
obj = {
"id": data.id,
"query": data.query_name,
"details": Details.getAll(data.id)
}
return obj
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 23 18:20:22 2019
@author: karthik
"""
# Importing the needed libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
# Importing the data
train=pd.read_csv('Data/train.csv')
test=pd.read_csv('Data/test.csv')
# Info about data given
train.info()
test.info()
# Knowing sum of null values
test.isnull().sum()
train.isnull().sum()
# Dropping the Id Column
train=train.drop('Id',axis=1)
test_data=test.drop('Id',axis=1)
# Handling missing values in train
train['LotFrontage'] = train['LotFrontage'].interpolate()
#filling alley values 'nan ' with no
train['Alley']= train['Alley'].fillna('no')
train['MasVnrType']= train['MasVnrType'].fillna('no')
train['BsmtQual']= train['BsmtQual'].fillna('no')
train['BsmtCond']= train['BsmtCond'].fillna('no')
train['BsmtExposure']= train['BsmtExposure'].fillna('no')
train['BsmtFinType1']= train['BsmtFinType1'].fillna('no')
train['BsmtFinType2']= train['BsmtFinType2'].fillna('no')
train['Electrical']= train['Electrical'].fillna('no')
train['FireplaceQu']= train['FireplaceQu'].fillna('no')
train['GarageType']= train['GarageType'].fillna('no')
train['GarageFinish']= train['GarageFinish'].fillna('no')
train['GarageQual']= train['GarageQual'].fillna('no')
train['GarageCond']= train['GarageCond'].fillna('no')
train['PoolQC']= train['PoolQC'].fillna('no')
train['Fence']= train['Fence'].fillna('no')
train['MiscFeature']= train['MiscFeature'].fillna('no')
train['GarageYrBlt']= train['GarageYrBlt'].fillna(0)
train['MasVnrArea']= train['MasVnrArea'].fillna(0)
# handling missing values in test_data
test_data['LotFrontage'] = test_data['LotFrontage'].interpolate()
#filling alley values 'nan ' with no
test_data['Alley']= test_data['Alley'].fillna('no')
test_data['MSZoning']=test_data['MSZoning'].fillna('no')
test_data['Alley']= test_data['Alley'].fillna('no')
test_data['Exterior1st']= test_data['Exterior1st'].fillna('no')
test_data['Exterior2nd']= test_data['Exterior2nd'].fillna('no')
test_data['MasVnrType']= test_data['MasVnrType'].fillna('no')
test_data['MasVnrArea']= test_data['MasVnrArea'].fillna(0)
test_data['Utilities']=test_data['Utilities'].fillna('no')
test_data['KitchenQual']=test_data['KitchenQual'].fillna('no')
test_data['Functional']=test_data['Functional'].fillna('no')
test_data['FireplaceQu']=test_data['FireplaceQu'].fillna('no')
test_data['BsmtQual']=test_data['BsmtQual'].fillna('no')
test_data['BsmtCond']=test_data['BsmtCond'].fillna('no')
test_data['BsmtExposure']=test_data['BsmtExposure'].fillna('no')
test_data['BsmtFinType1']=test_data['BsmtFinType1'].fillna('no')
test_data['BsmtFinType2']=test_data['BsmtFinType2'].fillna('no')
test_data['GarageType']=test_data['GarageType'].fillna('no')
test_data['GarageFinish']=test_data['GarageFinish'].fillna('no')
test_data['GarageQual']=test_data['GarageQual'].fillna('no')
test_data['GarageCond']=test_data['GarageCond'].fillna('no')
test_data['GarageYrBlt']=test_data['GarageYrBlt'].fillna(0)
test_data['GarageCars']=test_data['GarageCars'].fillna(0)
test_data['GarageArea']=test_data['GarageArea'].fillna(0)
test_data['PoolQC']=test_data['PoolQC'].fillna('no')
test_data['Fence']=test_data['Fence'].fillna('no')
test_data['MiscFeature']=test_data['MiscFeature'].fillna('no')
test_data['SaleType']=test_data['SaleType'].fillna('no')
test_data['BsmtFinSF1']=test_data['BsmtFinSF1'].fillna(0)
test_data['BsmtFinSF2']=test_data['BsmtFinSF2'].fillna(0)
test_data['BsmtUnfSF']=test_data['BsmtUnfSF'].fillna(0)
test_data['TotalBsmtSF']=test_data['TotalBsmtSF'].fillna(0)
test_data['BsmtFullBath']=test_data['BsmtFullBath'].fillna(0)
test_data['BsmtHalfBath']=test_data['BsmtHalfBath'].fillna(0)
# Converting categorical variables into numbers in train
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
train['MSZoning'] = labelencoder.fit_transform(train['MSZoning'])
train['Street'] = labelencoder.fit_transform(train['Street'])
train['Alley'] = labelencoder.fit_transform(train['Alley'])
train['LotShape'] = labelencoder.fit_transform(train['LotShape'])
train['LandContour'] = labelencoder.fit_transform(train['LandContour'])
train['Utilities'] = labelencoder.fit_transform(train['Utilities'])
train['LotConfig'] = labelencoder.fit_transform(train['LotConfig'])
train['LandSlope'] = labelencoder.fit_transform(train['LandSlope'])
train['Neighborhood'] = labelencoder.fit_transform(train['Neighborhood'])
train['Condition1'] = labelencoder.fit_transform(train['Condition1'])
train['Condition2'] = labelencoder.fit_transform(train['Condition2'])
train['BldgType'] = labelencoder.fit_transform(train['BldgType'])
train['HouseStyle'] = labelencoder.fit_transform(train['HouseStyle'])
train['RoofStyle'] = labelencoder.fit_transform(train['RoofStyle'])
train['RoofMatl'] = labelencoder.fit_transform(train['RoofMatl'])
train['Exterior1st'] = labelencoder.fit_transform(train['Exterior1st'])
train['Exterior2nd'] = labelencoder.fit_transform(train['Exterior2nd'])
train['MasVnrType'] = labelencoder.fit_transform(train['MasVnrType'])
train['ExterQual'] = labelencoder.fit_transform(train['ExterQual'])
train['ExterCond'] = labelencoder.fit_transform(train['ExterCond'])
train['Foundation'] = labelencoder.fit_transform(train['Foundation'])
train['BsmtQual'] = labelencoder.fit_transform(train['BsmtQual'])
train['BsmtCond'] = labelencoder.fit_transform(train['BsmtCond'])
train['BsmtExposure'] = labelencoder.fit_transform(train['BsmtExposure'])
train['BsmtFinType1'] = labelencoder.fit_transform(train['BsmtFinType1'])
train['BsmtFinType2'] = labelencoder.fit_transform(train['BsmtFinType2'])
train['Heating'] = labelencoder.fit_transform(train['Heating'])
train['HeatingQC'] = labelencoder.fit_transform(train['HeatingQC'])
train['CentralAir'] = labelencoder.fit_transform(train['CentralAir'])
train['Electrical'] = labelencoder.fit_transform(train['Electrical'])
train['KitchenQual'] = labelencoder.fit_transform(train['KitchenQual'])
train['Functional'] = labelencoder.fit_transform(train['Functional'])
train['FireplaceQu'] = labelencoder.fit_transform(train['FireplaceQu'])
train['GarageType'] = labelencoder.fit_transform(train['GarageType'])
train['GarageFinish'] = labelencoder.fit_transform(train['GarageFinish'])
train['GarageQual'] = labelencoder.fit_transform(train['GarageQual'])
train['GarageCond'] = labelencoder.fit_transform(train['GarageCond'])
train['PavedDrive'] = labelencoder.fit_transform(train['PavedDrive'])
train['PoolQC'] = labelencoder.fit_transform(train['PoolQC'])
train['Fence'] = labelencoder.fit_transform(train['Fence'])
train['MiscFeature'] = labelencoder.fit_transform(train['MiscFeature'])
train['SaleType'] = labelencoder.fit_transform(train['SaleType'])
train['SaleCondition'] = labelencoder.fit_transform(train['SaleCondition'])
# Converting categorical variables into numbers in test_data
test_data['MSZoning'] = labelencoder.fit_transform(test_data['MSZoning'])
test_data['Alley'] = labelencoder.fit_transform(test_data['Alley'])
test_data['Street'] = labelencoder.fit_transform(test_data['Street'])
test_data['LotShape'] = labelencoder.fit_transform(test_data['LotShape'])
test_data['LandContour'] = labelencoder.fit_transform(test_data['LandContour'])
test_data['Utilities'] = labelencoder.fit_transform(test_data['Utilities'])
test_data['LotConfig'] = labelencoder.fit_transform(test_data['LotConfig'])
test_data['LandSlope'] = labelencoder.fit_transform(test_data['LandSlope'])
test_data['Neighborhood'] = labelencoder.fit_transform(test_data['Neighborhood'])
test_data['Condition1'] = labelencoder.fit_transform(test_data['Condition1'])
test_data['Condition2'] = labelencoder.fit_transform(test_data['Condition2'])
test_data['BldgType'] = labelencoder.fit_transform(test_data['BldgType'])
test_data['HouseStyle'] = labelencoder.fit_transform(test_data['HouseStyle'])
test_data['RoofStyle'] = labelencoder.fit_transform(test_data['RoofStyle'])
test_data['RoofMatl'] = labelencoder.fit_transform(test_data['RoofMatl'])
test_data['Exterior1st'] = labelencoder.fit_transform(test_data['Exterior1st'])
test_data['Exterior2nd'] = labelencoder.fit_transform(test_data['Exterior2nd'])
test_data['MasVnrType'] = labelencoder.fit_transform(test_data['MasVnrType'])
test_data['ExterQual'] = labelencoder.fit_transform(test_data['ExterQual'])
test_data['ExterCond'] = labelencoder.fit_transform(test_data['ExterCond'])
test_data['Foundation'] = labelencoder.fit_transform(test_data['Foundation'])
test_data['KitchenQual'] = labelencoder.fit_transform(test_data['KitchenQual'])
test_data['Functional'] = labelencoder.fit_transform(test_data['Functional'])
test_data['FireplaceQu'] = labelencoder.fit_transform(test_data['FireplaceQu'])
test_data['BsmtQual'] = labelencoder.fit_transform(test_data['BsmtQual'])
test_data['BsmtCond'] = labelencoder.fit_transform(test_data['BsmtCond'])
test_data['BsmtExposure'] = labelencoder.fit_transform(test_data['BsmtExposure'])
test_data['BsmtFinType1'] = labelencoder.fit_transform(test_data['BsmtFinType1'])
test_data['BsmtFinType2'] = labelencoder.fit_transform(test_data['BsmtFinType2'])
test_data['Heating']=labelencoder.fit_transform(test_data['Heating'])
test_data['HeatingQC']=labelencoder.fit_transform(test_data['HeatingQC'])
test_data['CentralAir']=labelencoder.fit_transform(test_data['CentralAir'])
test_data['Electrical']=labelencoder.fit_transform(test_data['Electrical'])
test_data['GarageType']=labelencoder.fit_transform(test_data['GarageType'])
test_data['GarageFinish']=labelencoder.fit_transform(test_data['GarageFinish'])
test_data['GarageQual']=labelencoder.fit_transform(test_data['GarageQual'])
test_data['GarageCond']=labelencoder.fit_transform(test_data['GarageCond'])
test_data['PavedDrive']=labelencoder.fit_transform(test_data['PavedDrive'])
test_data['PoolQC']=labelencoder.fit_transform(test_data['PoolQC'])
test_data['Fence']=labelencoder.fit_transform(test_data['Fence'])
test_data['MiscFeature']=labelencoder.fit_transform(test_data['MiscFeature'])
test_data['SaleType']=labelencoder.fit_transform(test_data['SaleType'])
test_data['SaleCondition']=labelencoder.fit_transform(test_data['SaleCondition'])
# Splitting into x and y
x = train.iloc[ : , : -1].values
y = train.iloc[:,-1].values
# Splitting data into train and test data
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=0) #30% of data is taken as test data and always traindata+testdata=1
# Linear regression
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(x_train , y_train)
regressor.score(x_train,y_train)
# Predicting the model on test_data
x_test_data=test_data.values
y_pred=regressor.predict(x_test_data)
# exporting the results
output = np.column_stack((test['Id'], y_pred))
results = pd.DataFrame(output.astype('int'), columns = ['Id', 'SalePrice'])
results.to_csv('house_price_prediction_results.csv', index = False)
|
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold, cross_val_score
from sklearn.preprocessing import PolynomialFeatures
from gaussian_kernel_funcfile import *
#(i)(a)
x_dummy_training_data=np.array([-1,0,1]).reshape(-1,1)
y_dummy_training_data=np.array([0,1,0]).reshape(-1,1)
fig1=plt.figure()
from sklearn.neighbors import KNeighborsRegressor
# KNeighborsRegressor model :k = 3 γ=0, 1, 5, 10, 25
# generate predictions on a grid of feature values that range from -3 to 3
Xtest =[]
grid=np.linspace(-3,3)
for x in grid:
Xtest.append(x)
Xtest=np.array(Xtest).reshape(-1,1)
model1 = KNeighborsRegressor(n_neighbors=3,weights=gaussian_kernel0).fit(x_dummy_training_data, y_dummy_training_data)
ypred1 = model1.predict(Xtest)
model2 = KNeighborsRegressor(n_neighbors=3,weights=gaussian_kernel1).fit(x_dummy_training_data, y_dummy_training_data)
ypred2 = model2.predict(Xtest)
model3 = KNeighborsRegressor(n_neighbors=3,weights=gaussian_kernel5).fit(x_dummy_training_data, y_dummy_training_data)
ypred3 = model3.predict(Xtest)
model4 = KNeighborsRegressor(n_neighbors=3,weights=gaussian_kernel10).fit(x_dummy_training_data, y_dummy_training_data)
ypred4 = model4.predict(Xtest)
model5 = KNeighborsRegressor(n_neighbors=3,weights=gaussian_kernel25).fit(x_dummy_training_data, y_dummy_training_data)
ypred5 = model5.predict(Xtest)
plt.scatter(x_dummy_training_data, y_dummy_training_data, color='red',label="dummy_training_data")#plot the dummy training data
plt.plot(Xtest,ypred1,color='green',label="k=3,gamma=0")
plt.plot(Xtest, ypred2, color='blue',label="k=3,gamma=1")
plt.plot(Xtest, ypred3, color='orange',label="k=3,gamma=5")
plt.plot(Xtest, ypred4, color='purple',label="k=3,gamma=10")
plt.plot(Xtest, ypred5, color='brown',label="k=3,gamma=25")
plt.xlabel('input x')
plt.ylabel('output y')
plt.title("predictions of training data")
plt.legend()
plt.show()
#(b)
fig2=plt.figure()
Xtest_1 =[]
grid=np.linspace(-1,1)
for x in grid:
Xtest_1.append(x)
Xtest_1=np.array(Xtest_1).reshape(-1,1)
ypred_1=model5.predict(Xtest_1)
plt.scatter(x_dummy_training_data, y_dummy_training_data, color='red',label="dummy_training_data")#plot the dummy training data
plt.plot(Xtest_1, ypred_1, color='blue',label="k=3,gamma=25")
plt.xlabel('input x')
plt.ylabel('output y')
plt.legend()
plt.show()
#(c)
from sklearn.kernel_ridge import KernelRidge
c_range=[0.1,1,1000]
gamma_range=[0,1,5,10,25]
color_range=['blue','green','orange','lime','cyan','pink','purple','cornflowerblue']
"""
for inx1,gamma in enumerate(gamma_range):
fig3 = plt.figure() #for each gamma has one plot with different values of C
plt.scatter(x_dummy_training_data, y_dummy_training_data, color='red',
label="dummy_training_data",s=10) # plot the dummy training data
for inx2,C in enumerate(c_range):
model = KernelRidge(alpha=1.0/(2*C), kernel='rbf', gamma=gamma).fit(x_dummy_training_data, y_dummy_training_data)
ypred_kernelridge=model.predict(Xtest)
print("KernelRidge model when gammma={0},C={1},dual_coef={2}".format(gamma,C,model.dual_coef_))
plt.plot(Xtest,ypred_kernelridge,color=color_range[inx2],label='C={}'.format(C))
plt.legend(loc='upper left', fontsize=10)
plt.title('gamma={}'.format(gamma))
plt.show()
"""
#(ii)(a)
gamma_range1=[0,1,5,10,25,50,100,200]
df = pd.read_csv("week6.csv")
x = np.array(df.iloc[:, 0]).reshape(-1,1)#read the x value as array
y = np.array(df.iloc[:, 1]).reshape(-1,1)#read the y value
# kNN model with Gaussian kernel weights: k=999 gamma=0,1,5,10,25
weights_range=[gaussian_kernel0,gaussian_kernel1,gaussian_kernel5,gaussian_kernel10,gaussian_kernel25,gaussian_kernel50,gaussian_kernel100,gaussian_kernel200]
fig4=plt.figure()
plt.scatter(x, y, color='red', label="original training data",s=3) # plot the CSV training data
for inx3,weights in enumerate(weights_range):
model_knn = KNeighborsRegressor(n_neighbors=999, weights=weights).fit(x, y)
ypred_knn = model_knn.predict(Xtest)
plt.plot(Xtest, ypred_knn, color=color_range[inx3],label='gamma={}'.format(gamma_range1[inx3]))
plt.xlabel('input x')
plt.ylabel('output y')
plt.title('KNN model predictions when k=999 with different gamma')
plt.legend(fontsize=5)
plt.show()
#(ii)(b)
"""
for inx4,gamma in enumerate(gamma_range1):
fig5 = plt.figure() #for each gamma has one plot with different values of C
plt.scatter(x, y, color='red',
label="original training data",s=10) # plot the original training data
for inx4,C in enumerate(c_range):
model = KernelRidge(alpha=1.0/(2*C), kernel='rbf', gamma=gamma).fit(x, y)
ypred_kernelridge=model.predict(Xtest)
print("KernelRidge model when gammma={0},C={1},dual_coef={2}".format(gamma,C,model.dual_coef_))
plt.plot(Xtest,ypred_kernelridge,color=color_range[inx4],label='C={}'.format(C))
plt.legend(loc='upper left', fontsize=10)
plt.title('gamma={}'.format(gamma))
plt.show()"""
#(ii)(c)
""" Use cross-validation to choose a reasonable value for hyperparameter γ for the
kNN model. Now use cross-validation to choose γ and α hyperparameter for the
kernalised ridge regression model. Generate predictions for both models using
these “optimised” hyperparameter values. """
fig6=plt.figure()
mean_error_knn=[]
std_error_knn=[]
mean_score_knn=[]
weights_range1=[gaussian_kernel0,gaussian_kernel1,gaussian_kernel5,gaussian_kernel10,gaussian_kernel25]
for inx5,weights in enumerate(weights_range):
kf = KFold(n_splits=5)
model_knn1 = KNeighborsRegressor(n_neighbors=799, weights=weights)
mse = []
for train, test in kf.split(x):
model_knn1.fit(x[train], y[train])
y_pre_knn1 = model_knn1.predict(x[test])
from sklearn.metrics import mean_squared_error
mse.append(mean_squared_error(y[test],y_pre_knn1))
mean_error_knn.append(np.array(mse).mean())
std_error_knn.append(np.array(mse).std())
mean_score_knn.append(cross_val_score(model_knn1, x, y, cv=5).mean())
plt.subplot(1,2,1)
plt.plot(gamma_range1,mean_score_knn,label='mean scores',color='red')
plt.ylabel('mean score')
plt.xlabel('gamma')
plt.title("score of different gamma values")
plt.legend(loc='upper right', fontsize=10)
plt.subplot(1,2,2)
plt.errorbar(gamma_range1, mean_error_knn, label="MSE of cross_validation", color='blue', yerr=std_error_knn)
plt.ylabel('Mean square error')
plt.xlabel('gamma')
plt.title("MSE of different gamma values")
plt.legend(loc='upper right', fontsize=10)
plt.show()
fig7=plt.figure()
model_knn_final = KNeighborsRegressor(n_neighbors=999, weights=gaussian_kernel50).fit(x,y)
ypred_knn_final=model_knn_final.predict(Xtest)
plt.scatter(x, y, color='red', label="original training data",s=3) # plot the CSV training data
plt.plot(Xtest,ypred_knn_final,color='blue',label='prediction')
plt.title("KNN final model:K=999,gamma=50")
plt.legend()
#alpha=5 to find gamma for kernalised ridge regression model
mean_error_kr=[]
std_error_kr=[]
mean_score_kr=[]
for inx5,gamma in enumerate(gamma_range):
kf = KFold(n_splits=5)
kr_model = KernelRidge(alpha=5, kernel='rbf', gamma=gamma)
mse = []
for train, test in kf.split(x):
kr_model.fit(x[train], y[train])
y_pre_kf = kr_model.predict(x[test])
from sklearn.metrics import mean_squared_error
mse.append(mean_squared_error(y[test],y_pre_kf))
mean_error_kr.append(np.array(mse).mean())
std_error_kr.append(np.array(mse).std())
mean_score_kr.append(cross_val_score(kr_model, x, y, cv=5).mean())
plt.subplot(1,2,1)
plt.plot(gamma_range,mean_score_kr,label='mean scores',color='red')
plt.ylabel('mean score')
plt.xlabel('gamma')
plt.title("score of different gamma values")
plt.legend(loc='upper right', fontsize=10)
plt.subplot(1,2,2)
plt.errorbar(gamma_range, mean_error_kr, label="MSE of cross_validation", color='blue', yerr=std_error_kr)
plt.ylabel('Mean square error')
plt.xlabel('gamma')
plt.title("MSE of different gamma values")
plt.legend(loc='upper right', fontsize=10)
#gamma=5 to find the best alpha
fig8=plt.figure()
mean_error_kr1=[]
std_error_kr1=[]
mean_score_kr1=[]
alpha_range=[0.01,0.03,0.05,0.07,0.1,0.15,0.2]
for alpha in alpha_range:
kf = KFold(n_splits=5)
kr_model1 = KernelRidge(alpha=alpha, kernel='rbf', gamma=5)
mse = []
for train, test in kf.split(x):
kr_model1.fit(x[train], y[train])
y_pre_kr = kr_model1.predict(x[test])
from sklearn.metrics import mean_squared_error
mse.append(mean_squared_error(y[test],y_pre_kr))
mean_error_kr1.append(np.array(mse).mean())
std_error_kr1.append(np.array(mse).std())
mean_score_kr1.append(cross_val_score(kr_model1, x, y, cv=5).mean())
plt.subplot(1,2,1)
plt.plot(alpha_range,mean_score_kr1,label='mean scores',color='red')
plt.ylabel('mean score')
plt.xlabel('alpha')
plt.title("score of different alpha values")
plt.legend(loc='upper right', fontsize=10)
plt.subplot(1,2,2)
plt.errorbar(alpha_range, mean_error_kr1, label="MSE of cross_validation", color='blue', yerr=std_error_kr1)
plt.ylabel('Mean square error')
plt.xlabel('alpha')
plt.title("MSE of different alpha values")
plt.legend(loc='upper right', fontsize=10)
plt.show()
#plot final KR model gamma=5 alpha=0.1
fig9=plt.figure()
kr_model_final = KernelRidge(alpha=0.1, kernel='rbf', gamma=5).fit(x,y)
kr_pre_final=kr_model_final.predict(Xtest)
plt.scatter(x, y, color='red', label="original training data",s=3) # plot the CSV training data
plt.plot(Xtest,kr_pre_final,color='blue',label='KernelRidge')
plt.plot(Xtest,ypred_knn_final,color='yellow',label='KNN')
plt.title("final models")
plt.legend()
plt.show()
|
#-*- coding:utf8 -*-
# Copyright (c) 2020 barriery
# Python release: 3.7.0
"""
[{
"NodeID": "xxxxxx",
"NodeFreeResourceInfo":"...jsonString...",
"NodeContractResourceInfo":"...jsonString..."
}, {
"NodeInfo": "...jsonString...",
"NodeFreeResourceInfo":"...jsonString...",
"NodeContractResourceInfo":"...jsonString..."
},
...
{
"NodeInfo": "...jsonString...",
"NodeFreeResourceInfo":"...jsonString...",
"NodeContractResourceInfo":"...jsonString..."
}]
"""
import json
import logging
logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M', level=logging.INFO)
def parse_json(json_obj):
nodes = []
for node in json_obj:
nid = node["NodeID"]
threshold = node["threshold"]
NodeFreeResourceInfo = node["NodeFreeResourceInfo"]
free = {
"mem": float(NodeFreeResourceInfo["freeMEM"].split(" ")[0]),
"cpu": float(NodeFreeResourceInfo["freeCPU"].split("%")[0]),
}
NodeContractResourceInfo = node["NodeContractResourceInfo"]
contracts = []
for c in NodeContractResourceInfo:
contracts.append(Contract(
cid=c["id"],
mem=float(c["rss"].split(" ")[0]),
cpu=float(c["cpu"].split("%")[0])))
nodes.append(Node(
nid=nid,
free=free,
contracts=contracts,
threshold=threshold))
return nodes
class Contract(object):
def __init__(self, cid, mem, cpu):
self.cid = cid
self.mem = mem
self.cpu = cpu
class Node(object):
def __init__(self, nid, free, contracts, threshold):
self.nid = nid
self.mem = free["mem"]
self.cpu = free["cpu"]
self.threshold = threshold
self.contracts = {c.cid: c for c in contracts}
self.tot_mem = self.mem
self.tot_cpu = self.cpu
for c in self.contracts.values():
self.tot_mem += c.mem
self.tot_cpu += c.cpu
self.tonodes = {} # contract.id : node
self.backnodes = {} # contract.id : nodes
self.rest = 1 - self.threshold
def min_num_to_mem(self):
mem = self.tot_mem * self.rest - self.mem
contracts = list(self.contracts.values())
contracts.sort(key=lambda x:x.mem, reverse=True)
for idx, contract in enumerate(contracts):
mem -= contract.mem
if mem <= 0:
return idx + 1
def min_num_to_cpu(self):
cpu = self.tot_cpu * self.rest - self.cpu
contracts = list(self.contracts.values())
contracts.sort(key=lambda x:x.cpu, reverse=True)
for idx, contract in enumerate(contracts):
cpu -= contract.cpu
if cpu <= 0:
return idx + 1
def add_tonode(self, cid, nid):
if cid not in self.tonodes:
self.tonodes[cid] = set()
if nid in self.tonodes[cid]:
raise Exception("nid({}) already exists in tonodes".format(nid))
self.tonodes[cid].add(nid)
def remove_tonode(self, cid, nid):
if nid not in self.tonodes[cid]:
raise Exception("nid({}) not in tonodes".format(nid))
self.tonodes[cid].remove(nid)
def add_backnode(self, cid, nid):
if cid not in self.backnodes:
self.backnodes[cid] = set()
if nid in self.backnodes[cid]:
raise Exception("nid({}) already exists in backnodes".format(nid))
self.backnodes[cid].add(nid)
def rm_backnode(self, cid, nid):
if nid not in self.backnodes[cid]:
raise Exception("nid({}) not in backnodes".format(nid))
self.backnodes[cid].remove(nid)
def remove_contract(self, cid):
if cid not in self.contracts:
raise Exception("cid({}) not in Node({})".format(cid, self.nid))
contract = self.contracts.pop(cid)
self.mem += contract.mem
self.cpu += contract.cpu
self.tonodes.pop(cid)
return contract
def add_contract(self, contract):
if contract.cid in self.contracts:
raise Exception("cid({}) already exists in Node({})".format(contract.cid, self.nid))
self.mem -= contract.mem
self.cpu -= contract.cpu
self.contracts[contract.cid] = contract
def exceed(self):
return self.tot_mem * self.rest > self.mem or self.tot_cpu * self.rest > self.cpu
class Graph(object):
def __init__(self, nodes):
self.contract2nodes = {}
for node in nodes:
for cid, contract in node.contracts.items():
if cid not in self.contract2nodes:
self.contract2nodes[cid] = set()
self.contract2nodes[cid].add(node.nid)
for tonode in nodes:
if tonode.nid == node.nid:
continue
if cid not in tonode.contracts:
node.add_tonode(cid, tonode.nid)
logging.debug("nid[{}] tonode: {}".format(node.nid, node.tonodes))
self.nodes = {n.nid: n for n in nodes}
self.exceed_nodes = set()
for nid, node in self.nodes.items():
if node.exceed():
self.exceed_nodes.add(nid)
self.transfers = {}
def transfer(self, src_nid, dst_nid, cid, add=True):
src_node = self.nodes[src_nid]
dst_node = self.nodes[dst_nid]
logging.debug("nid[{}] tonode: {}".format(src_nid, src_node.tonodes.get(cid)))
logging.debug("nid[{}] tonode: {}".format(dst_nid, dst_node.tonodes.get(cid)))
self.update_node_tonodes(src_node, dst_node, cid)
self.update_node_backnodes(src_node, dst_node, cid)
self.update_node_contracts(src_node, dst_node, cid)
self.update_exceed_nodes(src_node, dst_node)
self.update_transfer(src_node, dst_node, cid, add)
logging.debug("nid[{}] tonode: {}".format(src_nid, src_node.tonodes.get(cid)))
logging.debug("nid[{}] tonode: {}".format(dst_nid, dst_node.tonodes.get(cid)))
logging.debug("===========")
def update_transfer(self, src_node, dst_node, cid, add):
src_nid = src_node.nid
dst_nid = dst_node.nid
if add:
if cid in self.transfers:
raise Exception("cid({}) already exists in transfers".format(cid))
self.transfers[cid] = {
"src": src_nid,
"dst": dst_nid,
}
else:
if cid not in self.transfers:
raise Exception("cid({}) not exists in transfers".format(cid))
self.transfers.pop(cid)
def update_exceed_nodes(self, src_node, dst_node):
src_nid = src_node.nid
dst_nid = dst_node.nid
logging.debug("update_exceed_nodes: {} => {}".format(src_nid, dst_nid))
if src_nid in self.exceed_nodes and not src_node.exceed():
self.exceed_nodes.remove(src_nid)
if dst_nid not in self.exceed_nodes and dst_node.exceed():
self.exceed_nodes.add(dst_nid)
def update_node_contracts(self, src_node, dst_node, cid):
logging.debug("update_node_contracts: [{}] {} => {}".format(cid, src_node.nid, dst_node.nid))
contract = src_node.remove_contract(cid)
dst_node.add_contract(contract)
def update_node_tonodes(self, src_node, dst_node, cid):
logging.debug("update_node_tonode: [{}] {} => {}".format(cid, src_node.nid, dst_node.nid))
if cid not in src_node.tonodes:
src_node.tonodes[cid] = set()
if cid not in dst_node.tonodes:
dst_node.tonodes[cid] = set()
src_node.tonodes[cid], dst_node.tonodes[cid] = dst_node.tonodes[cid], src_node.tonodes[cid]
dst_node.remove_tonode(cid, dst_node.nid)
dst_node.add_tonode(cid, src_node.nid)
def update_node_backnodes(self, src_node, dst_node, cid):
src_nid = src_node.nid
dst_nid = dst_node.nid
logging.debug("update_node_backnodes: [{}] {} => {}".format(cid, src_nid, dst_nid))
if src_nid not in self.contract2nodes[cid]:
raise Exception("Failed to transfer: nid[{}] not in contract2nodes".format(src_nid))
self.contract2nodes[cid].remove(src_nid)
self.contract2nodes[cid].add(dst_nid)
if cid not in src_node.backnodes:
src_node.backnodes[cid] = set()
if cid not in dst_node.backnodes:
dst_node.backnodes[cid] = set()
for nid in src_node.backnodes[cid]:
node = self.nodes[nid]
node.remove_tonode(src_nid)
node.add_tonode(dst_nid)
src_node.backnodes[cid], dst_node.backnodes[cid] = dst_node.backnodes[cid], src_node.backnodes[cid]
def check(self):
return len(self.exceed_nodes) == 0
def transfer_already_exists(self, cid):
return cid in self.transfers
def min_num_to_transfer(self):
count = 0
for exceed_nid in self.exceed_nodes:
min_count = max(
self.nodes[exceed_nid].min_num_to_mem(),
self.nodes[exceed_nid].min_num_to_cpu())
count += min_count
return count
def star(graph):
return graph.min_num_to_transfer()
def IDAstar(graph, depth, max_depth):
logging.debug("IDA*: depth[{}] max_depth[{}]".format(depth, max_depth))
if graph.check():
return True
Astar = star(graph)
logging.debug("Star: {}".format(Astar))
if Astar + depth > max_depth:
logging.debug("Astar({}) + depth({}) > max_depth({})".format(Astar, depth, max_depth))
return False
logging.debug("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
logging.debug("exceed_nids: {}".format(graph.exceed_nodes))
for exceed_nid in graph.exceed_nodes:
exceed_node = graph.nodes[exceed_nid]
logging.debug("exceed_node: {}".format(exceed_node.nid))
for cid in exceed_node.contracts:
tonodes = exceed_node.tonodes[cid]
for dst_nid in tonodes:
if graph.transfer_already_exists(cid):
continue
logging.debug(" > {} {} {}".format(exceed_nid, dst_nid, cid))
graph.transfer(exceed_nid, dst_nid, cid, add=True)
if IDAstar(graph, depth + 1, max_depth):
return True
logging.debug(" < {} {} {}".format(dst_nid, exceed_nid, cid))
graph.transfer(dst_nid, exceed_nid, cid, add=False)
def schedule(json_obj):
nodes = parse_json(json_obj)
graph = Graph(nodes)
depth = 0
max_depth = 5
while depth <= max_depth and not IDAstar(graph, 0, depth):
depth += 1
logging.info("try depth: {}".format(depth))
transfers = [{
"NodeContractID": key,
"SourceNode": value["src"],
"DestinationNode": value["dst"]
} for key, value in graph.transfers.items()]
return {"result": transfers}
if __name__ == "__main__":
with open("test.txt2") as f:
t = f.read()
json_obj = json.loads(t)
transfers = schedule(json_obj)
print(len(transfers))
print(json.dumps(transfers, indent=2, separators=(',', ':')))
|
from django import forms
from reddituser.models import RedditUser
class UpdateUserForm(forms.Form):
bio = forms.CharField(
max_length=250,
required=False,
widget=forms.TextInput(
attrs={
'class': 'input',
'placeholder': 'Bio'
})
)
image = forms.ImageField(
required=False,
# widget=forms.ImageField(
# attrs={
# 'class': 'file-input',
# 'placeholder': 'Bio'
# })
)
|
import requests
import urllib3
res = requests.get('https://localprod.pandateacher.com/python-manuscript/crawler-html/chromedriver/ChromeDriver.html')
print(res.text)
|
from django.utils.text import slugify
def overlap_percent(geom1, geom2):
# How much of the area of geom2 is also inside geom1
# (expressed as a percentage)
g1 = geom1.transform(27700, clone=True)
g2 = geom2.transform(27700, clone=True)
intersection = g1.intersection(g2)
return (intersection.area / g1.area) * 100
def normalize_name_for_matching(name):
# Slug a name and remove suffixes so we can compare division
# names within a local authority ignoring minor differences
# and the suffixes ' Ward' and ' ED' (which are appended to
# the names of wards and electoral districts in BoundaryLine)
slug = slugify(name)
if slug.endswith("-ed"):
return slug[:-3]
if slug.endswith("-ward"):
return slug[:-5]
return slug
def split_code(code):
return tuple(code.split(":"))
|
from pico2d import *
import math
class Player:
bodyImage = None
barrelImage = None
interested_keys = [ SDLK_LEFT, SDLK_RIGHT, SDLK_UP, SDLK_DOWN ]
d1, d2 = 16, 35
def __init__(self):
self.x = 400
self.y = 300
self.angle = 0
self.bAngle = 0
self.bx = self.x
self.by = self.y - Player.d1 + Player.d2
self.mx = self.x
self.my = 0
self.moveSpeed = 100 * 1 / 30
self.rotSpeed = 1 * math.pi / 60
self.keys = {}
for k in Player.interested_keys: self.keys[k] = False
if Player.bodyImage == None:
Player.bodyImage = load_image('player_body_pix.png')
if Player.barrelImage == None:
Player.barrelImage = load_image('player_barrel_pix.png')
def draw(self):
self.bodyImage.composite_draw(self.angle, "", self.x, self.y)
self.barrelImage.composite_draw(self.angle + self.bAngle, "", self.bx, self.by)
def update(self):
mag = 1 if self.keys[SDLK_LEFT] else 0
mag += -1 if self.keys[SDLK_RIGHT] else 0
move = 1 if self.keys[SDLK_UP] else 0
move += -1 if self.keys[SDLK_DOWN] else 0
# print(mag, move)
if mag != 0:
# if move == 0:
# self.bAngle += mag * self.rotSpeed
# else:
if move < 0: mag = -mag
self.angle += mag * self.rotSpeed
# print(mag, self.angle)
if move != 0:
self.x += -move * self.moveSpeed * math.sin(self.angle)
self.y += +move * self.moveSpeed * math.cos(self.angle)
angle = math.atan2(self.x - self.mx, self.my - self.y)
self.bAngle = angle - self.angle
# if mag != 0 or move != 0:
x, y = self.x, self.y
x += +Player.d1 * math.sin(self.angle)
y += -Player.d1 * math.cos(self.angle)
x += -Player.d2 * math.sin(self.angle + self.bAngle)
y += +Player.d2 * math.cos(self.angle + self.bAngle)
self.bx, self.by = x, y
def handle_event(self, e):
if e.type == SDL_KEYDOWN or e.type == SDL_KEYUP:
if e.key in Player.interested_keys:
self.keys[e.key] = e.type == SDL_KEYDOWN
# print(e.key, e.type == SDL_KEYDOWN)
elif e.type == SDL_MOUSEMOTION:
self.mx, self.my = e.x, 600 - e.y
pass
|
from main.page.desktop_v3.setting.pe_user import *
from selenium.webdriver.common.by import By
from random import randint
import time, subprocess
class UserProfile(UserSetting):
#tab locator for detail
_name_loc = (By.ID, "full-name")
_birthday_date_dd_loc = (By.XPATH, "//select[@name='bday_dd']/option")
_birthday_date_mm_loc = (By.XPATH, "//select[@name='bday_mm']/option")
_birthday_date_yy_loc = (By.XPATH, "//select[@name='bday_yy']/option")
_gender_male_loc = (By.ID, "gender-male")
_gender_female_loc = (By.ID, "gender-female")
_hobbies_loc = (By.ID, "hobbies")
_messenger_loc = (By.ID, "messenger")
_password_loc = (By.XPATH, "//*[@id='form-edit-profile']/div[8]/div[2]/div/input")
_submit_personal_profile_loc = (By.XPATH, '//*[@id="form-edit-profile"]/div[9]/button')
#--
#tab locator for change picture
_upload_image_loc = (By.ID, 'pickfiles')
#--
#tab locator for change password
_edit_password_loc = (By.XPATH, '//*[@id="img-profile"]/div[2]/button')
_old_password_loc = (By.ID, "oldpassword")
_new_password_loc = (By.ID, "newpassword")
_confirmation_password_loc = (By.ID, "confpassword")
_save_password_loc = (By.XPATH, '//*[@id="edit-contact"]/div[4]/button[2]')
#--
def edit_personal_profile(self, hobby, messenger, pwd):
try:
self.driver.find_element(*self._name_loc).click()
self.choose_date_of_birth()
self.driver.find_element(*self._gender_male_loc).click()
self.driver.find_element(*self._hobbies_loc).clear()
self.driver.find_element(*self._hobbies_loc).send_keys(hobby)
self.driver.find_element(*self._messenger_loc).clear()
self.driver.find_element(*self._messenger_loc).send_keys(messenger)
self.driver.find_element(*self._password_loc).clear()
self.driver.find_element(*self._password_loc).send_keys(pwd)
time.sleep(2)
self.driver.find_element(*self._submit_personal_profile_loc).click()
except Exception as inst:
print(inst)
def choose_date_of_birth(self):
try:
time.sleep(1)
self.driver.execute_script("document.getElementsByName('bday_dd')[0].style.display='block'")
self.driver.execute_script(
"document.getElementsByClassName('span2 selectBox-dropdown')[0].style.display='none'")
list_bday_dd = self.driver.find_elements(*self._birthday_date_dd_loc)
i = randint(1, len(list_bday_dd))
list_bday_dd[i].click()
time.sleep(1)
self.driver.execute_script("document.getElementsByName('bday_mm')[0].style.display='block'")
self.driver.execute_script(
"document.getElementsByClassName('span4 selectBox-dropdown')[0].style.display='none'")
list_bday_mm = self.driver.find_elements(*self._birthday_date_mm_loc)
i = randint(1, len(list_bday_mm))
list_bday_mm[i].click()
time.sleep(1)
self.driver.execute_script("document.getElementsByName('bday_yy')[0].style.display='block'")
self.driver.execute_script(
"document.getElementsByClassName('span3 selectBox-dropdown')[0].style.display='none'")
list_bday_yy = self.driver.find_elements(*self._birthday_date_yy_loc)
i = randint(1, len(list_bday_yy))
list_bday_yy[i].click()
except Exception as inst:
print(inst)
def edit_password(self, pwd, new_pwd):
try:
time.sleep(5)
self.driver.find_element(*self._edit_password_loc).click()
time.sleep(5)
self.driver.find_element(*self._old_password_loc).send_keys(pwd)
self.driver.find_element(*self._new_password_loc).send_keys(new_pwd)
self.driver.find_element(*self._confirmation_password_loc).send_keys(new_pwd)
self.driver.find_element(*self._save_password_loc).click()
time.sleep(2)
except Exception as inst:
print(inst)
def edit_photo(self):
try:
time.sleep(3)
self.driver.find_element(*self._upload_image_loc).click()
time.sleep(2)
#Put the new image in folder C:\autoit and rename it with "FileUpload.jpg"
subprocess.Popen(r"C:\autoit\upload-image.exe")
time.sleep(2)
except Exception as inst:
print(inst)
|
import socket
import pickle
import dice_chess
from _thread import *
# Server ip address in server add the ip address of the server
server = socket.gethostname()
PORT = 5555
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((server, PORT))
except socket.error as e:
print(e)
s.listen(2)
print("Server Started \n Waiting for connection...")
# Sending initial positions of game pieces
qw = dice_chess.queen(7, 3, 0)
qb = dice_chess.queen(0, 3, 1)
kw = dice_chess.king(7, 4, 0)
kb = dice_chess.king(0, 4, 1)
rw1 = dice_chess.rook(7, 0, 0)
rw2 = dice_chess.rook(7, 7, 0)
rb1 = dice_chess.rook(0, 0, 1)
rb2 = dice_chess.rook(0, 7, 1)
bw1 = dice_chess.bishop(7, 2, 0)
bb1 = dice_chess.bishop(0, 2, 1)
bw2 = dice_chess.bishop(7, 5, 0)
bb2 = dice_chess.bishop(0, 5, 1)
nb1 = dice_chess.knight(0, 1, 1)
nw1 = dice_chess.knight(7, 1, 0)
nb2 = dice_chess.knight(0, 6, 1)
nw2 = dice_chess.knight(7, 6, 0)
avai_piece = [qw, qb, kw, kb, rw1, rw2, rb1, rb2, bw1, bw2, bb1, bb2, nb1, nb2, nw1, nw2]
def create_pawn_structure():
for rows in range(8):
black_pawn = dice_chess.pawn(1, rows, 1)
white_pawn = dice_chess.pawn(6, rows, 0)
avai_piece.append(black_pawn)
avai_piece.append(white_pawn)
create_pawn_structure()
def threadfun(conn, avai_piece):
conn.send(pickle.dumps(avai_piece))
while True:
try:
data = pickle.loads(conn.recv(2048))
avai_piece = data
if not data:
print("Disconnected...")
break
else:
print("Received: ", avai_piece)
print("Sending: ", avai_piece)
conn.sendall(pickle.dumps(avai_piece))
print("Count: ", count)
except:
break
print("Connection Lost ")
conn.close()
count = 0
while True:
conn, addr = s.accept()
print(f"Connection Success ! \n Connected to : ", addr)
start_new_thread(threadfun, (conn, avai_piece))
count += 1
|
#!/usr/bin/env python3
"""
Scrape heatmaps from Airwave
"""
from argparse import ArgumentParser
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import time
def scrape(username, password, sites_csv):
driver = webdriver.Chrome()
# Allow for page load
driver.implicitly_wait(10)
# Open web page
driver.get("https://airwave.colgate.edu/")
driver.set_window_size(1851, 1036)
# Login
driver.find_element_by_id("login-username-id").send_keys(username)
driver.find_element_by_id("login-password-id").send_keys(password)
driver.find_element_by_id("login-password-id").send_keys(Keys.ENTER)
print("Logged in")
time.sleep(5)
with open(sites_csv) as sites:
for site in sites:
site_id, building_name, floor_num = site.split(',')
capture_heatmap(driver, site_id, building_name, int(floor_num))
driver.quit()
def capture_heatmap(driver, site_id, building_name, floor_num):
print("Capturing %s %d" % (building_name, floor_num))
# Load heatmap
driver.get("https://airwave.colgate.edu/vrf?site_id="+site_id)
time.sleep(4)
# Only show 5GHz coverage
driver.find_element_by_id(
'overlay_hm_checked').find_element_by_css_selector(
'.vrf_visibility_arrow').click()
driver.find_elements_by_css_selector(
'.goog-checkbox')[1].click()
time.sleep(8)
# Save heatmap
driver.save_screenshot('%s_f%d_5GHz.png' % (building_name, floor_num))
# Only show 2.4GHz coverage
driver.find_elements_by_css_selector(
'.goog-checkbox')[1].click()
driver.find_elements_by_css_selector(
'.goog-checkbox')[0].click()
time.sleep(8)
# Save heatmap
driver.save_screenshot('%s_f%d_2GHz.png' % (building_name, floor_num))
def main():
# Parse arguments
arg_parser = ArgumentParser(description='Scrape heatmaps from Airwave')
arg_parser.add_argument('-u', '--username', dest='username', action='store',
required=True, help='Airwave username')
arg_parser.add_argument('-p', '--password', dest='password', action='store',
required=True, help='Airwave password')
arg_parser.add_argument('-s', '--sites', dest='sites', action='store',
required=True,
help='CSV file of site IDs, building names, and floor numbers')
settings = arg_parser.parse_args()
# Scrape heatmaps
scrape(settings.username, settings.password, settings.sites)
if __name__ == '__main__':
main()
|
from backbone import *
from backbone.collector import *
import time
import re
'''
To check if the following on collector nodes :
1. Check if kfps of collector is contant for configured days or for 5 days default
2. Check if kfps of collector is contant for configured hours or for 10 hours default
'''
nodes = get_nodes_by_type('collector')
for node in nodes :
if node.isMaster(log=False):
instance = get_instance(node)
adaptors = get_adaptors(node,instance)
counts = {}
counts = dict([ ('1-day',get_var('day-count') if get_var('day-count') else 5),('1-hour',get_var('hour-count') if get_var('hour-count') else 10 ) ])
deviationThreshold = get_var('deviationThreshold') if get_var('deviationThreshold') else 10
stats = {}
for adaptor in adaptors :
for count in counts :
logger.info('Daily Stats being calculated now' if 'day' in count else 'Hourly Stats being calculated now')
dayStats = node.cliCmd('collector stats instance-id %s adaptor-stats %s total-flow interval-type %s interval-count %s ' % ( instance,adaptor,count,counts[count] ) )
logger.info('Stats have be retrieved. Calculating the mean deviation' )
# using mean deviation to calculate this
for line in dayStats.split('\n'):
try :
line = re.sub('\s+',' ',line)
#logger.info('ABC : %s ' % line.split(' ') )
stats[int(line.split(' ')[0])] = int(line.split(' ')[3])
except (ValueError,IndexError) as e:
continue
logger.info('Stats : %s ' % stats )
mean = reduce(lambda x,y : x+y, stats.values() )/len(stats.values())
logger.info('Mean avg : %s' % reduce(lambda x,y : x+y, stats.values() ) )
stats = dict(map(lambda (y,x) : (y,(abs(x-mean)/float(x))*100),stats.iteritems() ))
logger.info('Stats : %s ' % stats )
logger.info('Threshold variation level: %s' % deviationThreshold )
for k,v in enumerate(stats.values()) :
if v > deviationThreshold :
logger.error('Collector deviation seen in kfps is more than %s percent : %s ' % (deviationThreshold,v ) )
report.fail('Collector deviation seen in kfps is more than %s percent : %s ' % (deviationThreshold,v ) )
logger.info('Collector kfps correctly maintained')
|
#!./venv/bin/python
"""
Indexes the contents of the bundle store. Used during the launch of the new
worker system.
TODO(klopyrev): Delete once it's launched.
"""
import sys
sys.path.append('.')
from codalab.common import State
from codalab.lib.codalab_manager import CodaLabManager
from codalab.model.tables import bundle as cl_bundle, bundle_contents_index as cl_bundle_contents_index
from sqlalchemy import distinct, select
from worker.file_util import index_contents
manager = CodaLabManager()
bundle_store = manager.bundle_store()
model = manager.model()
engine = model.engine
with engine.begin() as conn:
bundles = conn.execute(
select([cl_bundle.c.uuid])
.where(cl_bundle.c.state.in_([State.READY, State.FAILED]))
).fetchall()
indexed_bundles = conn.execute(
select([distinct(cl_bundle_contents_index.c.bundle_uuid)])
).fetchall()
uuids_to_index = (set(bundle.uuid for bundle in bundles) -
set(bundle.bundle_uuid for bundle in indexed_bundles))
for uuid in uuids_to_index:
print 'Indexing', uuid
index = index_contents(bundle_store.get_bundle_location(uuid))
model.update_bundle_contents_index(uuid, index)
|
"""
These are member related models.
"""
from dataclasses import dataclass, field
from typing import List, Optional
from .base import BaseModel
from .common import BaseApiResponse
from .mixins import DatetimeTimeMixin
@dataclass
class MemberSnippetMemberDetails(BaseModel):
"""
A class representing the member snippet member detail.
Refer: https://developers.google.com/youtube/v3/docs/members#snippet.memberDetails
"""
channelId: Optional[str] = field(default=None)
channelUrl: Optional[str] = field(default=None, repr=False)
displayName: Optional[str] = field(default=None, repr=False)
profileImageUrl: Optional[str] = field(default=None, repr=False)
@dataclass
class MemberSnippetMembershipsDuration(BaseModel, DatetimeTimeMixin):
memberSince: Optional[str] = field(default=None)
memberTotalDurationMonths: Optional[int] = field(default=None, repr=False)
@dataclass
class MemberSnippetMembershipsDurationAtLevel(BaseModel):
level: Optional[str] = field(default=None)
memberSince: Optional[str] = field(default=None, repr=False)
memberTotalDurationMonths: Optional[int] = field(default=None, repr=False)
@dataclass
class MemberSnippetMembershipsDetails(BaseModel):
"""
A class representing the member snippet membership detail.
Refer: https://developers.google.com/youtube/v3/docs/members#snippet.membershipsDetails
"""
highestAccessibleLevel: Optional[str] = field(default=None)
highestAccessibleLevelDisplayName: Optional[str] = field(default=None)
accessibleLevels: Optional[List[str]] = field(default=None, repr=False)
membershipsDuration: Optional[MemberSnippetMembershipsDuration] = field(
default=None, repr=False
)
membershipsDurationAtLevel: Optional[
List[MemberSnippetMembershipsDurationAtLevel]
] = field(default=None, repr=False)
@dataclass
class MemberSnippet(BaseModel):
"""
A class representing the member snippet info.
Refer: https://developers.google.com/youtube/v3/docs/members#snippet
"""
creatorChannelId: Optional[str] = field(default=None)
memberDetails: Optional[MemberSnippetMemberDetails] = field(
default=None, repr=False
)
membershipsDetails: Optional[MemberSnippetMembershipsDetails] = field(
default=None, repr=False
)
@dataclass
class Member(BaseModel):
"""
A class representing the member info.
Refer: https://developers.google.com/youtube/v3/docs/members
"""
kind: Optional[str] = field(default=None)
etag: Optional[str] = field(default=None, repr=False)
snippet: Optional[MemberSnippet] = field(default=None, repr=False)
@dataclass
class MemberListResponse(BaseApiResponse):
"""
A class representing the member's retrieve response info.
Refer: https://developers.google.com/youtube/v3/docs/members/list#response
"""
items: Optional[List[Member]] = field(default=None, repr=False)
|
#
# @lc app=leetcode.cn id=83 lang=python3
#
# [83] 删除排序链表中的重复元素
#
# @lc code=start
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
# 166/166 cases passed (44 ms)
# Your runtime beats 65.47 % of python3 submissions
# Your memory usage beats 34.51 % of python3 submissions (15 MB)
# curr = head
# while curr:
# while curr.next and curr.val == curr.next.val:
# curr.next = curr.next.next
# curr = curr.next
# return head
if not head:
return head
slow, fast = head, head
while fast:
if fast.val != slow.val:
slow.next = fast
slow = slow.next
fast = fast.next
slow.next = None
return head
# @lc code=end
|
class SQList():
def __init__(self,lis = None):
self.r = lis
def swap(self,i,j):
self.r[i],self.r[j] = self.r[j],self.r[i]
def bubble_sort(self):
lis = self.r
length = len(self.r)
for i in range(length):
for j in range(i + 1,length):
if lis[i] > lis[j]:
self.swap(i,j)
def __str__(self):
ret = ""
for i in self.r:
ret += ' ' + str(i)
return ret
if __name__ == '__main__':
sqlist = SQList([2,5,23,9,6,564,2,34,5,6,7,4,34])
sqlist.bubble_sort()
print(sqlist)
|
contents = []
rawline = open("linebuffer.txt", "r")
for line in rawline:
contents.append(line)
rawline.close()
if (contents[0] == 'U' && contents[1] == 'n' && contents[2] == 'k'):
next_step = open("lined.txt", 'w')
next_step.write(". rbrain.txt")
next_step.close()
else:
message_final = open("sinai.txt", 'a')
message_final.close()
|
import json
from graphql_server import (HttpQueryError, default_format_error,
encode_execution_results, json_encode, load_json_body, run_http_query)
class PaginatedResult(object):
def __init__(self, data, per_page, page, total_count):
self.__per_page = per_page
self.__page = page
self.__total_count = total_count
self.response = json.dumps(
{
"data" : data,
"meta" :
{
"pagination" :
{
"per_page" : per_page,
"current_page" : page,
"has_next" : self.__has_next,
"has_prev": self.__has_previous,
"total_pages" : self.__total_pages,
"total_results" : total_count
}
}
})
@property
def __has_next(self):
return (self.__per_page * self.__page) < self.__total_count
@property
def __has_previous(self):
return self.__page > 1
@property
def __total_pages(self):
return -(-self.__total_count / self.__per_page)
class ErrorResponse:
def __init__(self, errors_array):
self.response = json.dumps(
{
"error" : True,
"errors" : errors_array
}
)
def parse_body(request):
content_type = request.mimetype
if content_type == 'application/graphql':
return {'query': request.data.decode('utf8')}
elif content_type == 'application/json':
return load_json_body(request.data.decode('utf8'))
elif content_type in ('application/x-www-form-urlencoded', 'multipart/form-data'):
return request.form
return {}
|
class Ordenar:
def __init__(self, lista):
self.lista = lista
def borbuja(self):
for i in range(len(self.lista)):
for j in range(i + 1, len(self.lista)):
if self.lista[i] > self.lista[j]:
aux = self.lista[i]
self.lista[i] = self.lista[j]
self.lista[j] = aux
def insertar(self, valor):
self.borbuja()
aux_lista = []
enc = False
for pos, ele in enumerate(self.lista):
if ele > valor:
aux_lista.append(valor)
enc = True #enontrado
break
if enc == True:
self.lista = self.lista[0:pos] + aux_lista + self.lista[pos:]
else:
self.lista.append(valor)
return aux_lista
#manipular las lineas
# def insertar2(self, valor):
# self.borbuja()
# aux_lista = []
# enc = False
# for pos, ele in enumerate(self.lista):
# if ele > valor:
# aux_lista.append(valor)
# enc = True #enontrado
# break
# if enc == True:
# #for i in range(pos):
# aux_lista.apppend(self.lista[i])
# aux_lista. append(valor)
# for j in range(pos, len(self.lista)):
# aux_lista.append(self.lista[j])
# self.lista= aux_lista
# else:
# self.lista.append(valor)
# return aux_lista
ord1 = Ordenar([1,6,8,2,0])
ord1.insertar(5)
ord1.borbuja()
|
"""
Created by Alex Wang
on 2017-07-30
非阻塞异步flask服务
logger:https://stackoverflow.com/questions/26578733/why-is-flask-application-not-creating-any-logs-when-hosted-by-gunicorn
"""
import os
from gevent import monkey
monkey.patch_all()
from flask import Flask, request
from gevent import wsgi
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = ""
a = tf.placeholder(tf.int32, shape=(), name="input")
asquare = tf.multiply(a, a, name="output")
sess = tf.Session()
app = Flask(__name__)
@app.route('/')
def index():
return 'Hello World'
@app.route('/hello')
def response_request():
num = request.args.get('num')
for i in range (100):
ret = sess.run([asquare], feed_dict={a: num})
return str(ret)
# return "hello"
if __name__ == "__main__":
server = wsgi.WSGIServer(('127.0.0.1', 19877), app)
server.serve_forever()
|
nome = input("Qual seu nome:")
print('Meu nome é {} e programo em python'.format(nome))
print('Meu nome é ' + nome + ' e programo em python')
|
def factorial(n):
if n < 2:
return 1
else:
return n * factorial(n-1)
def combinations(l,s):
return factorial(l)/(factorial(s)*factorial(l-s))
def binomialdist(x,n,l):
combs = combinations(n,x)
first = l[0]*0.01
second = 1-first
return combs*(first**x)*(second**(n-x))
def cumulativeprob_least(x,n,probs):
s = 0
for i in range(x,n+1):
s += binomialdist(i,n,probs)
return round(s,3)
def cumulativeprob_most(x,n,probs):
s = 0
for i in range(x+1):
s += binomialdist(i,n,probs)
return round(s,3)
nums = [12,10]
print(cumulativeprob_most(2,10,nums))
print(cumulativeprob_least(2,10,nums))
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'test-floating-point-model-default',
'type': 'executable',
'sources': ['floating-point-model-precise.cc'],
},
{
'target_name': 'test-floating-point-model-precise',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'FloatingPointModel': '0'
}
},
'sources': ['floating-point-model-precise.cc'],
},
{
'target_name': 'test-floating-point-model-strict',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'FloatingPointModel': '1'
}
},
'sources': ['floating-point-model-strict.cc'],
},
{
'target_name': 'test-floating-point-model-fast',
'type': 'executable',
'msvs_settings': {
'VCCLCompilerTool': {
'FloatingPointModel': '2'
}
},
'sources': ['floating-point-model-fast.cc'],
},
]
}
|
from django.db import models
class Aluno(models.Model):
nome = models.CharField(max_length=50)
cpf = models.CharField(max_length=14)
email = models.CharField(max_length=50)
tel = models.CharField(max_length=14)
|
from django.db import models
from PIL import Image
class Author(models.Model):
lastname = models.CharField(max_length=50, blank=True, null=True)
middlename = models.CharField(max_length=50, blank=True, null=True)
firstname = models.CharField(max_length=50, blank=True, null=True)
date_added = models.DateTimeField(auto_now_add=True, null=True)
image = models.ImageField(default='default.jpg', upload_to='author_pics')
def __str__(self):
if self.middlename is None:
return f"{self.lastname}, {self.firstname}"
else:
return f"{self.lastname}, {self.firstname} {self.middlename}"
def save(self):
super().save()
img = Image.open(self.image.path)
if img.height > 200 or img.width > 200:
output_size = (200, 200)
img.thumbnail(output_size)
img.save(self.image.path)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from revolver.core import run
from revolver import package
from revolver.tool import pythonbrew
def install(version, _update=True):
# Without this we would build python without the bz2 package
package.ensure("libbz2-dev")
pythonbrew.ensure()
status = run("pythonbrew switch %s; true" % version)
if status.find("not installed") != -1 or _update:
run("pythonbrew install --no-test %s" % version)
run("pythonbrew cleanup")
run("pythonbrew switch %s" % version)
run("pip install virtualenv")
run("pip install virtualenvwrapper")
def ensure(version):
install(version, _update=False)
|
import os
import operator
from collections import defaultdict
from BitVector import BitVector
# TODO File with all constants
SOURCE_LABEL = "matteosalvinimi"
NUM_TWEETS = 100
DATA = "data1"
PEOPLE = "people1"
FINAL_GRAPH = "graph"
def tweet_parser(filename, hashtags_map=None, hashtags_bitmask=None, graph_id=None, debug=False):
tweet_graph = dict()
f = open(os.path.join(DATA, PEOPLE, SOURCE_LABEL, filename), 'r')
raw_hashtags = f.readline().strip("\n").split("\t")
raw_hashtags.pop()
hs = set()
# Hashtag Normalization
ind = -1
for ht in raw_hashtags:
for char in ht:
if char in [",", ".", ";", '"', ")", "?", "!", ":", "'"]:
ind = ht.find(char)
if ind != -1:
hs.add(ht[0:ind])
break
# print ht[0:ind]
if ind == -1:
hs.add(ht)
hashtags = list(hs)
if debug:
print "hashtags:", hashtags
line = f.readline()
if len(line) <= 0:
return None, None
while len(line) > 0:
edge = line.strip("\n").split("\t")
if debug:
print "edge:", edge
if edge[1] == SOURCE_LABEL:
line = f.readline()
continue
if edge[0] not in tweet_graph:
tweet_graph[edge[0]] = set()
tweet_graph[edge[0]].add(edge[1])
else:
tweet_graph[edge[0]].add(edge[1])
if edge[1] not in tweet_graph:
tweet_graph[edge[1]] = set()
line = f.readline()
if hashtags_map is not None:
for h in hashtags:
hashtags_map[h].append(tweet_graph)
hashtags_bitmask[h][graph_id] = 1
if debug:
print tweet_graph
'''
from collections import deque
queue = deque(tweet_graph[SOURCE_LABEL])
# queue = tweet_graph[SOURCE_LABEL]
print SOURCE_LABEL
while len(queue) > 0:
node = queue.popleft()
print node
queue.extend(tweet_graph[node])
'''
f.close()
return tweet_graph, hashtags
# UNUSED, for the moment...
def join_graphs(graph_map):
union_graph = defaultdict(lambda: set())
for entry in graph_map.values():
graph = entry[0]
for nodes in graph.keys():
union_graph[nodes].update(graph[nodes])
return union_graph
def graph_file_writer(final_graph, filename="final_graph.tsv"):
f = open(os.path.join(DATA, FINAL_GRAPH, filename), 'wt')
for edge in final_graph.keys():
nodes = edge.split("->")
f.write("{}\t{}\t".format(nodes[0], nodes[1]))
prob = ""
for ht in final_graph[edge].keys():
if prob == "":
prob += "{}\t{}".format(ht, final_graph[edge][ht])
else:
prob += "\t{}\t{}".format(ht, final_graph[edge][ht])
prob += "\n"
f.write(prob)
f.close()
def jaccard_all_pairs_similarity_file_writer(ht_bitmasks, filename="jaccard_all_pairs_similarity.tsv"):
ht_pair_sim = defaultdict(lambda: dict())
for (ht1, bm1) in ht_bitmasks.iteritems():
for (ht2, bm2) in ht_bitmasks.iteritems():
if ht1 == ht2:
continue
ht_pair_sim[ht1][ht2] = bm1.jaccard_similarity(bm2)
ht_pair_sim[ht1] = sorted(ht_pair_sim[ht1].items(), key=operator.itemgetter(1), reverse=True)
f = open(os.path.join(DATA, FINAL_GRAPH, filename), 'wt')
line = ""
for (ht1, all_pairs_sim) in ht_pair_sim.iteritems():
line += ht1
for pair in all_pairs_sim:
line += "\t{}:{}".format(pair[0], pair[1])
f.write("{}\n".format(line))
line = ""
f.close()
def bitmask_file_writer(ht_bitmasks, filename="hashtags_bitmasks.tsv"):
f = open(os.path.join(DATA, FINAL_GRAPH, filename), 'wt')
for (ht, bm) in ht_bitmasks.iteritems():
f.write("{}\t{}\n".format(ht, str(bm)))
f.close()
def name_to_index_dict_writer(name_to_index_dict, filename="name_to_index_graph_translation.tsv"):
f = open(os.path.join(DATA, FINAL_GRAPH, filename), 'wt')
for (id, name) in name_to_index_dict.iteritems():
f.write("{}\t{}\n".format(name, id))
f.close()
def final_graph_builder():
hashtags_map = defaultdict(lambda: [])
graphs_map = {}
index = 0
name_to_index_dict = {}
ht_bitmasks = defaultdict(lambda: BitVector(intVal=0, size=NUM_TWEETS))
if not os.path.exists(os.path.join(DATA, FINAL_GRAPH)):
os.makedirs(os.path.join(DATA, FINAL_GRAPH))
filenames = os.listdir(os.path.join(DATA, PEOPLE, SOURCE_LABEL))
empty_tweet = 0
for filename in filenames:
graph, hashtags = tweet_parser(filename=filename, hashtags_map=hashtags_map,
hashtags_bitmask=ht_bitmasks, graph_id=index)
if graph is None:
empty_tweet += 1
continue
graphs_map[filename] = (graph, hashtags)
name_to_index_dict[index] = filename
index += 1
name_to_index_dict_writer(name_to_index_dict)
jaccard_all_pairs_similarity_file_writer(ht_bitmasks)
bitmask_file_writer(ht_bitmasks)
# union_graph = join_graphs(graphs_map)
# nodes_counters = defaultdict(lambda: defaultdict(lambda: 0))
edges_counters = defaultdict(lambda: defaultdict(lambda: 0))
hashtags_counters = defaultdict(lambda: 0)
# print "Keys: "
# print hashtags_map.keys()
for h in hashtags_map.keys():
hashtags_counters[h] = len(hashtags_map[h])
for g in hashtags_map[h]:
for (node, replies) in g.iteritems():
# nodes_counters[node][h] += 1
for reply in replies:
edges_counters["{}->{}".format(node, reply)][h] += 1
final_graph = defaultdict(lambda: defaultdict(lambda: 0))
for edge in edges_counters.keys():
# informer = edge.split("->")[0]
for edge_ht in edges_counters[edge].keys():
# final_graph[edge][edge_ht] = float(edges_counters[edge][edge_ht])/float(nodes_counters[informer][edge_ht])
final_graph[edge][edge_ht] = float(edges_counters[edge][edge_ht]) / float(hashtags_counters[edge_ht])
graph_file_writer(final_graph)
print "empty_tweet: ",empty_tweet
if __name__ == "__main__":
final_graph_builder()
|
import media
import fresh_tomatoes
"""
Create new movie object to 3 variables is dunkirk_movie,
the_ragnarok, wonder_woman
"""
dunkirk_movie = media.Movie('Dunkirk',
'Dunkirk is a 2017 war film written, directed, and'
'produced by Christopher Nolan that depicts the'
'Dunkirk evacuation of World War II.',
'https://upload.wikimedia.org/wikipedia/en/1/15/Dunkirk_Film_poster.jpg', # noqa
'https://www.youtube.com/watch?v=F-eMt3SrfFU')
the_ragnarok = media.Movie('The Ragnarok',
'Thor: Ragnarok is a 2017 American superhero film'
'based on the Marvel Comics character Thor',
'https://upload.wikimedia.org/wikipedia/en/7/7d/Thor_Ragnarok_poster.jpg', # noqa
'https://www.youtube.com/watch?v=ue80QwXMRHg')
wonder_woman = media.Movie('Wonder Woman',
'Wonder Woman is a 2017 American superhero film'
'based on the DC Comics character',
'https://upload.wikimedia.org/wikipedia/en/e/ed/Wonder_Woman_%282017_film%29.jpg', # noqa
'https://www.youtube.com/watch?v=VSB4wGIdDwo')
"""
Main subroutine for generate movie page
Add 3 variables to movies array for generate
"""
movies = [dunkirk_movie, the_ragnarok, wonder_woman]
fresh_tomatoes.open_movies_page(movies)
|
import os
import datetime
from functools import wraps
from gevent import monkey, wsgi
monkey.patch_all()
from app import create_core
from flask_script import Manager
from flask_migrate import MigrateCommand
env = os.getenv('ENVIRONMENT', 'default')
core = create_core(env)
app = core.app
manager = Manager(app)
manager.add_command('db', MigrateCommand)
@manager.command
def runserver():
if app.config['DEBUG']:
# wsgi.WSGIServer(('0.0.0.0', 5000), app).serve_forever()
app.run(host='0.0.0.0')
else:
wsgi.WSGIServer(('0.0.0.0', 5000), app).serve_forever()
@manager.command
def apidoc():
import os
import json
from app.controllers import controllers
json_path = os.path.join(os.path.dirname(__file__), 'app', 'doc.json')
doc = dict()
for c in controllers:
doc[c.name] = c.format_doc()
extra = dict()
extra['updated_at'] = ' '.join(
datetime.datetime.now().isoformat().split('T'))
with open(json_path, 'w') as f:
f.writelines(json.dumps(dict(doc=doc, extra=extra), indent=2))
print('Update api document successed')
if __name__ == '__main__':
manager.run(default_command='runserver')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('api', '0002_location_user'),
]
operations = [
migrations.CreateModel(
name='NormalizedLocation',
fields=[
('location_ptr', models.OneToOneField(parent_link=True, serialize=False, auto_created=True, to='api.Location', primary_key=True)),
],
options={
'abstract': False,
},
bases=('api.location',),
),
migrations.CreateModel(
name='Venue',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('unique_identifier', models.CharField(max_length=255, unique=True)),
('name', models.CharField(max_length=500)),
('url', models.URLField()),
('point', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='normalizedlocation',
name='venue',
field=models.ForeignKey(to='api.Venue'),
preserve_default=True,
),
migrations.RemoveField(
model_name='location',
name='latitude',
),
migrations.RemoveField(
model_name='location',
name='longitude',
),
migrations.AddField(
model_name='location',
name='address',
field=models.CharField(max_length=255, default='My House'),
preserve_default=False,
),
migrations.AddField(
model_name='location',
name='point',
field=django.contrib.gis.db.models.fields.PointField(srid=4326, default='Point (0 0)'),
preserve_default=False,
),
]
|
from unittest import TestCase
from app import app
from models import db,User
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///test_user_db'
app.config['SQLALCHEMY_ECHO'] = False
db.drop_all()
db.create_all()
class UserModelTestCase(TestCase):
def setUp(self):
User.query.delete()
def tearDown(self):
db.session.rollback()
def test_greeting(self):
user = User(first_name='shumin',last_name='zhou',image_url='www.google.com')
self.assertEqual(user.greeting(),"Hi,my name is shumin zhou!")
def test_add_new_user(self):
user = User(first_name='shumin',last_name='zhou',image_url='www.google.com')
db.session.add(user)
db.session.commit()
shumin = User.query.get(1)
self.assertEqual(shumin,user)
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
from db_manager import executeDDL
from db_manager import executeDML
from db_manager import executeSearch
print('Inicializando agenda')
def createOrConnectDB():
executeDDL('''CREATE TABLE IF NOT EXISTS datos (nombre TEXT, apellido TEXT, telefono TEXT, correo TEXT)''')
def doInsert(nombre, apellido, telefono, correo):
executeDML("insert into datos (nombre, apellido, telefono, correo) values ('%s','%s','%s','%s')"%(nombre,apellido, telefono, correo))
def doDelete(nombre):
executeDML("delete from datos where nombre='%s'"%(nombre))
def doSelect(nombre=None):
if nombre is None:
return executeSearch("select * from datos")
else:
return executeSearch("select * from datos where nombre='%s'"%(nombre))
|
"""
Definition of TreeNode:
"""
class TreeNode:
def __init__(self, val= None):
self.val = val
self.left, self.right = None, None
class Solution:
"""
@param: root: A Tree
@return: Preorder in ArrayList which contains node values.
"""
def createTree(self):
root = TreeNode(1);
root.left = TreeNode(2);
root.right = TreeNode(3)
root.left.left = TreeNode(4);
root.left.right = TreeNode(5);
return root;
def intern(self, root):
# write your code here
if(root == None):
return [];
left = self.intern(root.left)
right = self.intern(root.right)
res = []
res.append(root.val)
res= res + left+right;
return res
def preorderTraversal(self, root):
return self.intern(root);
def maxDepth(self, root):
return self.help2(root,0);
def help2(self,root,level):
if(root == None):
return level
if(None != root):
level +=1;
left = self.help2(root.left,level)
right = self.help2(root.right,level)
return max(left,right)
class Solution2:
"""
@param root: The root of binary tree.
@return: Preorder in list which contains node values.
"""
def preorderTraversal(self, root):
res = []
stack = []
if (root != None):
stack.append(root)
while stack != []:
root = stack.pop()
res.append(root.val)
if root.right != None:
stack.append(root.right)
if root.left != None:
stack.append(root.left)
return res
def inorderTraversal(self, root):
res = []
stack = []
if (root != None):
stack.append(root)
while stack != []:
if(root != None):
print("root is ",root.val)
while root != None and root.left != None:
stack.append(root.left)
root= root.left
root = stack.pop()
res.append(root.val)
root = root.right
if(root != None):
stack.append(root)
return res
def postorderTraversal(self, root):
result = []
stack = []
prev = None
cur = root
if (root == None):
return result;
stack.append(root);
while (stack!= []):
curr = stack[-1];
if (prev == None or prev.left == curr or prev.right == curr): # traverse down the tree
if (curr.left != None):
stack.append(curr.left);
elif (curr.right != None):
stack.append(curr.right);
elif (curr.left == prev): # traverse up the tree from the left
if (curr.right != None):
stack.append(curr.right);
else : #traverse up the tree from the right
result.append(curr.val);
stack.pop();
prev = curr;
return result;
mysolution = Solution()
treeRoot = mysolution.createTree();
preOrder = Solution2()
res = preOrder.preorderTraversal(treeRoot)
print("res is ",res)
res = preOrder.inorderTraversal(treeRoot)
print("res of inorder is ",res)
res = preOrder.postorderTraversal(treeRoot)
print("res of postorder is ",res)
|
# Generated by Django 2.2.3 on 2019-07-14 11:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0028_auto_20190709_1531'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='tak_address',
new_name='default_address',
),
]
|
import datetime
import math
import numpy as np
import pandas as pd
from .config import PLAYERS
def main():
df2016 = pd.read_csv(r"C:\Users\Michael.Copeland\Projects\nba\NBA Stats 2016.csv")
df2017 = pd.read_csv(r"C:\Users\Michael.Copeland\Projects\nba\NBA Stats 2017.csv")
df2018 = pd.read_csv(r"C:\Users\Michael.Copeland\Projects\nba\NBA Stats 2018.csv")
df2019 = pd.read_csv(r"C:\Users\Michael.Copeland\Projects\nba\NBA Stats 2019.csv")
df = pd.concat([df2016, df2017, df2018, df2019])
dfg = df.groupby(["PLAYER"]).agg(
{
"YEAR": "count",
"AGE": "max",
"GP": "sum",
"MIN": ["last", "sum"],
"FGM": "sum",
"FGA": "sum",
"FTM": "sum",
"FTA": "sum",
"3PM": "sum",
"3PA": "sum",
"PTS": "sum",
"OREB": "sum",
"DREB": "sum",
"AST": "sum",
"STL": "sum",
"BLK": "sum",
"TOV": "sum",
}
)
dfg.columns = ["_".join(x) if x[0] == "MIN" else x[0] for x in dfg.columns.ravel()]
dfg.rename(columns={'MIN_sum': 'MIN'}, inplace=True)
dfg = dfg[
((dfg["MIN"] > 1800.0) & (dfg["YEAR"] > 1)) | ((dfg["MIN"] > 1000.0) & (dfg["YEAR"] == 1))
]
dfg["FG%"] = dfg["FGM"] / dfg["FGA"]
dfg["FT%"] = dfg["FTM"] / dfg["FTA"]
dfg["3PM_per_MIN"] = dfg["3PM"] / dfg["MIN"]
dfg["PTS_per_MIN"] = dfg["PTS"] / dfg["MIN"]
dfg["REB_per_MIN"] = (dfg["OREB"] + dfg["DREB"]) / dfg["MIN"]
dfg["AST_per_MIN"] = dfg["AST"] / dfg["MIN"]
dfg["STL_per_MIN"] = dfg["STL"] / dfg["MIN"]
dfg["BLK_per_MIN"] = dfg["BLK"] / dfg["MIN"]
dfg["MIN_per_TOV"] = dfg["MIN"] / dfg["TOV"]
dfg["FG%_Z"] = dfg["FG%"].apply(
lambda x: (x - dfg["FG%"].mean()) / dfg["FG%"].std()
)
dfg["FT%_Z"] = dfg["FT%"].apply(
lambda x: (x - dfg["FT%"].mean()) / dfg["FT%"].std()
)
dfg["3PM_Z"] = dfg["3PM_per_MIN"].apply(
lambda x: (x - dfg["3PM_per_MIN"].mean()) / dfg["3PM_per_MIN"].std()
)
dfg["PTS_Z"] = dfg["PTS_per_MIN"].apply(
lambda x: (x - dfg["PTS_per_MIN"].mean()) / dfg["PTS_per_MIN"].std()
)
dfg["REB_Z"] = dfg["REB_per_MIN"].apply(
lambda x: (x - dfg["REB_per_MIN"].mean()) / dfg["REB_per_MIN"].std()
)
dfg["AST_Z"] = dfg["AST_per_MIN"].apply(
lambda x: (x - dfg["AST_per_MIN"].mean()) / dfg["AST_per_MIN"].std()
)
dfg["STL_Z"] = dfg["STL_per_MIN"].apply(
lambda x: (x - dfg["STL_per_MIN"].mean()) / dfg["STL_per_MIN"].std()
)
dfg["BLK_Z"] = dfg["BLK_per_MIN"].apply(
lambda x: (x - dfg["BLK_per_MIN"].mean()) / dfg["BLK_per_MIN"].std()
)
dfg["MPT_Z"] = dfg["MIN_per_TOV"].apply(
lambda x: (x - dfg["MIN_per_TOV"].mean()) / dfg["MIN_per_TOV"].std()
)
dfg["Z"] = (
dfg["FG%_Z"]
+ dfg["FT%_Z"]
+ dfg["3PM_Z"]
+ dfg["PTS_Z"]
+ dfg["REB_Z"]
+ dfg["AST_Z"]
+ dfg["STL_Z"]
+ dfg["BLK_Z"]
+ dfg["MPT_Z"]
)
dfg["Z_ADJ"] = np.where(
dfg["AGE"] <= 24,
dfg["Z"] * 1.04,
np.where(dfg["AGE"] >= 28, dfg["Z"] * 0.96, dfg["Z"]),
)
player_lookup = {}
for manager, player_list in iter(PLAYERS.items()):
for player_id in player_list:
player_lookup[player_id] = manager
dfg.reset_index(inplace=True)
dfg['Manager'] = dfg['PLAYER'].apply(
lambda x: player_lookup.get(x)
)
dfg = dfg.sort_values(by=["Z_ADJ"], ascending=False)
dfg.to_csv(r"C:\Users\Michael.Copeland\Projects\nba\NBA 2016-19 (3YR).csv")
dfm = dfg.groupby(['Manager']).agg({
'FG%_Z': 'mean',
'FT%_Z': 'mean',
'3PM_Z': 'mean',
'PTS_Z': 'mean',
'REB_Z': 'mean',
'AST_Z': 'mean',
'STL_Z': 'mean',
'BLK_Z': 'mean',
'MPT_Z': 'mean',
'Z': 'mean',
'Z_ADJ': 'mean'
})
dfm = dfm.sort_values(by=["Z_ADJ"], ascending=False)
dfm.to_csv(r"C:\Users\Michael.Copeland\Projects\nba\Manager Analysis.csv")
if __name__ == "__main__":
main()
|
#coding:utf-8
#透视变换
import cv2 as cv
import numpy as np
img = cv.imread('D:/python_file/Opencv3_study_file/images/PT_Picture.jpg')
rows,cols,ch = img.shape
pts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv.getPerspectiveTransform(pts1,pts2)
dst = cv.warpPerspective(img,M,(300,300))
cv.imshow('OUTPUT',dst)
cv.waitKey(0)
cv.destroyAllWindows()
|
import torch
import torch.nn as nn
from torch.autograd import Variable
class FusedBlock(nn.Module):
def __init__(self, z_dim=128):
super(FusedBlock, self).__init__()
self.fc = nn.Linear(128, 4*4*1024)
self.bn1 = nn.BatchNorm1d(4*4*1024)
self.conv1 = nn.ConvTranspose2d(1024, 512, 4, 2, 1)
self.bn2 = nn.BatchNorm2d(512)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
h = self.relu(self.bn1(self.fc(x.view(-1, 128))))
h = h.view(-1, 1024, 4, 4)
h = self.relu(self.bn2(self.conv1(h)))
return h
class UnconditionG(nn.Module):
def __init__(self, num_featrues=512, img_size=128):
super(UnconditionG, self).__init__()
self.img_size = img_size
# 8x8 --> 16x16
self.conv1 = nn.ConvTranspose2d(num_featrues, num_featrues//2, 4, 2, 1)
self.bn1 = nn.BatchNorm2d(num_featrues//2)
# 16x16 --> 32x32
self.conv2 = nn.ConvTranspose2d(num_featrues//2, num_featrues//4, 4, 2, 1)
self.bn2 = nn.BatchNorm2d(num_featrues//4)
if img_size == 128:
# 32x32 --> 64x64
self.conv3 = nn.ConvTranspose2d(num_featrues//4, num_featrues//8, 4, 2, 1)
self.bn3 = nn.BatchNorm2d(num_featrues//8)
# 64x64 --> 128x128
self.conv4 = nn.ConvTranspose2d(num_featrues//8, 3, 4, 2, 1)
else:
# 32x32 --> 64x64
self.conv3 = nn.ConvTranspose2d(num_featrues//4, 3, 4, 2, 1)
self.relu = nn.ReLU(inplace=True)
self.tanh = nn.Tanh()
def forward(self, x):
h = self.relu(self.bn1(self.conv1(x)))
h = self.relu(self.bn2(self.conv2(h)))
if self.img_size == 128:
h = self.relu(self.bn3(self.conv3(h)))
out = self.tanh(self.conv4(h))
else:
out = self.tanh(self.conv3)
return out
class Condition_Embedding(nn.Module):
def __init__(self, num_attr=40):
super(Condition_Embedding, self).__init__()
self.num_attr = num_attr
self.fc = nn.Linear(num_attr, num_attr*2, bias=True)
self.relu = nn.ReLU(inplace=True)
def embedding(self, x):
e = self.relu(self.fc(x))
mu = e[:, :self.num_attr]
logvar = e[:, self.num_attr:]
return mu, logvar
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.cuda.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, x):
mu, logvar = self.embedding(x)
attr_code = self.reparametrize(mu, logvar)
return attr_code, mu, logvar
class ConditionG(nn.Module):
def __init__(self, num_featrues=512, img_size=128, num_attr=40):
super(ConditionG, self).__init__()
self.img_size = img_size
self.condition_embedding = Condition_Embedding(num_attr=num_attr)
# 8x8 --> 16x16
self.conv1 = nn.ConvTranspose2d(num_featrues+num_attr, num_featrues//2, 4, 2, 1)
self.bn1 = nn.BatchNorm2d(num_featrues//2)
# 16x16 --> 32x32
self.conv2 = nn.ConvTranspose2d(num_featrues//2, num_featrues//4, 4, 2, 1)
self.bn2 = nn.BatchNorm2d(num_featrues//4)
if img_size == 128:
# 32x32 --> 64x64
self.conv3 = nn.ConvTranspose2d(num_featrues//4, num_featrues//8, 4, 2, 1)
self.bn3 = nn.BatchNorm2d(num_featrues//8)
# 64x64 --> 128x128
self.conv4 = nn.ConvTranspose2d(num_featrues//8, 3, 4, 2, 1)
else:
# 32x32 --> 64x64
self.conv3 = nn.ConvTranspose2d(num_featrues//4, 3, 4, 2, 1)
self.relu = nn.ReLU(inplace=True)
self.tanh = nn.Tanh()
def forward(self, x, y):
attr_code, mu, logvar = self.condition_embedding(y)
attr_code = attr_code[:,:,None,None].repeat(1, 1, x.size(2), x.size(3))
x = torch.cat((x, attr_code), dim=1)
h = self.relu(self.bn1(self.conv1(x)))
h = self.relu(self.bn2(self.conv2(h)))
if self.img_size == 128:
h = self.relu(self.bn3(self.conv3(h)))
out = self.tanh(self.conv4(h))
else:
out = self.tanh(self.conv3)
return out, mu, logvar
class Generator(nn.Module):
def __init__(self, z_dim=128, num_featrues=512, img_size=128, num_attr=40):
super(Generator, self).__init__()
self.fused_block = FusedBlock(z_dim=z_dim)
self.uncondition_generator = UnconditionG(num_featrues=num_featrues, img_size=img_size)
self.condition_generator = ConditionG(num_featrues=num_featrues, img_size=img_size, num_attr=num_attr)
def forward(self, x, y):
h = self.fused_block(x)
hu = self.uncondition_generator(h)
hc, mu, logvar = self.condition_generator(h, y)
return hu, hc, mu, logvar
#### Discriminator
class UnconditionD(nn.Module):
def __init__(self, num_featrues=64, img_size=128):
super(UnconditionD, self).__init__()
self.img_size = img_size
self.conv1 = nn.Conv2d(3, num_featrues, 4, 2, 1, bias=False)
self.conv2 = nn.Conv2d(num_featrues, num_featrues*2, 4, 2, 1, bias=False)
self.bn2 = nn.BatchNorm2d(num_featrues*2)
self.conv3 = nn.Conv2d(num_featrues*2, num_featrues*4, 4, 2, 1, bias=False)
self.bn3 = nn.BatchNorm2d(num_featrues*4)
self.conv4 = nn.Conv2d(num_featrues*4, num_featrues*8, 4, 2, 1, bias=False)
self.bn4 = nn.BatchNorm2d(num_featrues*8)
if img_size == 128:
self.conv5 = nn.Conv2d(num_featrues*8, num_featrues*16, 4, 2, 1, bias=False)
self.bn5 = nn.BatchNorm2d(num_featrues*16)
self.conv6 = nn.Conv2d(num_featrues*16, 1, 4, 1, 0, bias=False)
else:
self.conv5 = nn.Conv2d(num_featrues*16, 1, 4, 1, 0, bias=False)
self.sigmoid = nn.Sigmoid()
self.lrelu = nn.LeakyReLU(inplace=True, negative_slope=0.2)
def forward(self, x):
h = self.lrelu(self.conv1(x))
h = self.lrelu(self.bn2(self.conv2(h)))
h = self.lrelu(self.bn3(self.conv3(h)))
h = self.lrelu(self.bn4(self.conv4(h)))
if self.img_size == 128:
h = self.lrelu(self.bn5(self.conv5(h)))
out = self.conv6(h)
else:
out = self.conv5(h)
return out.view(-1)
class ConditionD(nn.Module):
def __init__(self, num_featrues=64, img_size=128, num_attr=40):
super(ConditionD, self).__init__()
self.img_size = img_size
self.conv1 = nn.Conv2d(3, num_featrues, 4, 2, 1, bias=False)
self.conv2 = nn.Conv2d(num_featrues, num_featrues*2, 4, 2, 1, bias=False)
self.bn2 = nn.BatchNorm2d(num_featrues*2)
self.conv3 = nn.Conv2d(num_featrues*2, num_featrues*4, 4, 2, 1, bias=False)
self.bn3 = nn.BatchNorm2d(num_featrues*4)
if img_size == 128:
attr_dim = 0
else:
attr_dim = 40
self.conv4 = nn.Conv2d(num_featrues*4+attr_dim, num_featrues*8, 4, 2, 1, bias=False)
self.bn4 = nn.BatchNorm2d(num_featrues*8)
if img_size == 128:
attr_dim = num_attr
self.conv5 = nn.Conv2d(num_featrues*8+attr_dim, num_featrues*16, 4, 2, 1, bias=False)
self.bn5 = nn.BatchNorm2d(num_featrues*16)
self.conv6 = nn.Conv2d(num_featrues*16, 1, 4, 1, 0, bias=False)
else:
self.conv5 = nn.Conv2d(num_featrues*16, 1, 4, 1, 0, bias=False)
self.lrelu = nn.LeakyReLU(inplace=True, negative_slope=0.2)
def forward(self, x, y):
y = y[:,:,None,None].repeat(1,1,8,8)
h = self.lrelu(self.conv1(x))
h = self.lrelu(self.bn2(self.conv2(h)))
h = self.lrelu(self.bn3(self.conv3(h)))
if self.img_size is not 128:
h = torch.cat((h, y), dim=1)
h = self.lrelu(self.bn4(self.conv4(h)))
if self.img_size == 128:
h = torch.cat((h, y), dim=1)
h = self.lrelu(self.bn5(self.conv5(h)))
out = self.conv6(h)
else:
out = self.conv5(h)
return out.view(-1)
|
import socket
import sys
def run(user, password, * commands):
HOST, PORT = "codebb.cloudapp.net", 17429
data = user + " " + password + "\n" + "\n".join(commands) + "\nCLOSE_CONNECTION\n"
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((HOST, PORT))
sock.sendall(bytes(data, "utf-8"))
sfile = sock.makefile()
rline = sfile.readline()
while rline:
rline = sfile.readline()
def get_status(user, password):
HOST, PORT = "codebb.cloudapp.net", 17429
data = user + " " + password + "\n" + 'STATUS' + "\nCLOSE_CONNECTION\n"
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
output = []
sock.connect((HOST, PORT))
sock.sendall(bytes(data, "utf-8"))
sfile = sock.makefile()
rline = sfile.readline()
while rline:
output += [rline.strip()]
rline = sfile.readline()
return output
def get_config(user, password):
HOST, PORT = "codebb.cloudapp.net", 17429
data = user + " " + password + "\n" + 'CONFIGURATIONS' + "\nCLOSE_CONNECTION\n"
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
output = []
sock.connect((HOST, PORT))
sock.sendall(bytes(data, "utf-8"))
sfile = sock.makefile()
rline = sfile.readline()
while rline:
output += [rline.strip()]
rline = sfile.readline()
return output
def get_scan(user, password, x, y):
HOST, PORT = "codebb.cloudapp.net", 17429
data = user + " " + password + "\n" + 'SCAN' + " " + x + " " + y + "\nCLOSE_CONNECTION\n"
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
output = []
sock.connect((HOST, PORT))
sock.sendall(bytes(data, "utf-8"))
sfile = sock.makefile()
rline = sfile.readline()
while rline:
output += [rline.strip()]
rline = sfile.readline()
return output
def subscribe(user, password):
HOST, PORT = "codebb.cloudapp.net", 17429
data = user + " " + password + "\nSUBSCRIBE\n"
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((HOST, PORT))
sock.sendall(bytes(data, "utf-8"))
sfile = sock.makefile()
rline = sfile.readline()
while rline:
rline = sfile.readline()
|
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import random
import time
import numpy as np
from datetime import datetime
def randomDate():
frmt = '%d-%m-%Y'
stime = time.mktime(time.strptime('20-01-2018', frmt))
etime = time.mktime(time.strptime('23-01-2001', frmt))
ptime = stime + random.random() * (etime - stime)
dt = datetime.fromtimestamp(time.mktime(time.localtime(ptime)))
return dt
rootCauseDict = {
0:"Poor Planning",
1:"Strike of workers",
2:"Poor Lead time calculation",
3:"Poor inventory control",
4:"faulty plant layout",
5:"excessive machine stoppage",
6:"electricty stoppage",
7:"Raw material low",
8:"material wastage due to over-feeding",
9:"Demand Variation",
10:"Huge backlog of orders",
11:"Supply Shortages and logistical uncertainties",
12:"Factory shutdown",
13:"Financial problems of company leading to interrupted supplies",
}
def create_model(data):
#data=pd.read_csv("Keyboard.csv")
data['date']=0
for index,row in data.iterrows():
data.loc[index,'date']=randomDate().date()#("20-01-2018", "23-01-2018").date()
data['date'] = pd.to_datetime(data['date'])
data=data.drop('month',axis=1)
data=data.drop('year',axis=1)
data.columns
data=data[['date','inventorytofactorydistance', 'isholiday', 'modeofdelivery',
'profittocompany', 'quantity', 'rootcause', 'totalweight']]
df=data.copy()
df=df.set_index('date')
###### VISUALIZATIONS ##########
# df.plot(grid=True)
#
# import seaborn as sns
# # Use seaborn style defaults and set the default figure size
# sns.set(rc={'figure.figsize':(11, 4)})
# df['profittocompany'].plot(linewidth=0.5)
##################################
df1 = pd.get_dummies(df['modeofdelivery'])
df=pd.concat([df,df1],axis=1)
df=df.drop('modeofdelivery',axis=1)
df=df.drop('rail',axis=1)
df.columns
df=df[['inventorytofactorydistance', 'isholiday', 'profittocompany',
'quantity', 'totalweight', 'airways', 'road', 'water', 'rootcause']]
x= df.iloc[:, 0:8]
y= df.iloc[:,-1:]
x['isholiday']=x['isholiday'].astype(int)
y = pd.get_dummies(y['rootcause'])
y=y.drop('Poor Planning',axis=1)
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
x_training_set_scaled = sc_X.fit_transform(x)
#x_final=pd.DataFrame(x_training_set_scaled)
#x_final['date']=x['date']
#x_training_set_scaled['date']=x['date']
sc_Y = StandardScaler()
y_training_set_scaled=sc_Y.fit_transform(y)
#from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x_training_set_scaled, y_training_set_scaled, test_size = 1/3, random_state = 0)
X_train = []
y_train = []
#Converting into array
X_train = np.array(x_training_set_scaled)
y_train = np.array(y_training_set_scaled)
#adding the third dimension
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
# #Importing libraties for the LSTM model
# from keras.models import Sequential
# from keras.layers import Dense
# from keras.layers import LSTM
# from keras.layers import Dropout
# Initialising the RNN
classifier = Sequential()
# Adding the first LSTM layer
#X_train.shape
classifier.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))
# Adding a second LSTM layer
classifier.add(LSTM(units = 50, return_sequences = True))
# Adding a third LSTM layer
classifier.add(LSTM(units = 50, return_sequences = True))
# Adding a fourth LSTM layer
classifier.add(LSTM(units = 50))
# Adding the output layer
classifier.add(Dense(units = 14))
# Compiling the RNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy')
classifier.fit(X_train, y_train, epochs = 100, batch_size = 32)
# predicted_cause = classifier.predict(X_test)
# predicted_cause = sc.inverse_transform(predicted_cause)
return classifier
def retMAX(predicted_cause):
print(predicted_cause)
df_final=pd.DataFrame(predicted_cause)
rootcause=''
maxx = -100000
for i in range(14):
if abs(df_final.iloc[0][i]) > maxx:
maxx = abs(df_final.iloc[0][i])
rootcause =rootCauseDict[i]
print(maxx)
print(rootcause)
if(rootcause == ''):
rootcause = 'Transport Delays'
return(rootcause)
def findRootcause(classifier,features):
columns=['inventorytofactorydistance','isholiday','modeofdelivery','month','profittocompany','quantity','totalweight','year']
dflist = []
rlist = []
for key in columns:
if features[key] is None:
rlist.append(None)
else:
rlist.append(features[key])
dflist.append(rlist)
df = pd.DataFrame(dflist)
df=np.array(df)
df=np.reshape(df,(df.shape[0],df.shape[1],1))
rootcause = classifier.predict(df)
#will print the root_cause a particular row only
return(retMAX(rootcause)) #will be of type string
# data=data[['date','inventorytofactoryDistance', 'isholiday', 'modeofdelivery',
# 'profittocompany', 'quantity', 'rootcause', 'totalweight']]
features = {
'inventorytofactorydistance':2345 ,
'isholiday': 0,
'modeofdelivery': 0,
'month': 11,
'profittocompany':251,
'quantity': 4233,
'totalweight': 2134,
'year': 2017
}
df = pd.read_csv('Ram.csv')
classifier1 = create_model(df)
x = findRootcause(classifier1, features)
t1=['inventorytofactorydistance','isholiday','modeofdelivery','month','profittocompany','quantity','totalweight','year']
t2 = pd.DataFrame(list(features.items()),columns=t1)
dflist = []
rlist = []
for key in t1:
if features[key] is None:
rlist.append(None)
else:
rlist.append(features[key])
dflist.append(rlist)
df = pd.DataFrame(dflist)
df=np.array(df)
df=np.reshape(df,(df.shape[0],df.shape[1],1))
classifier1.predict(df)
rc = findRootcause(classifier1, features)
|
#coding:utf-8
if __name__ == "__main__":
String1 = "Welcome to Git firstly!"
String2 = "jiwei"
print(String1,String2)
|
from typing import Dict, List
from .lib.placeholder import Placeholder
class AbstractInterface:
"""
AbstractInterface를 상속받아 새 DB Model의 Interface를 만들 수 있습니다. 새 Interface에서 다음의 변수를 사용하십시오:
1. create_fields
INSERT할 때 필수적으로 필요한 Fields를 List로 정의할 수 있습니다.
2. retrieve_fields
SELECT할 때 필요한 Fields를 List로 정의할 수 있습니다.
3. update_fields
UPDATE할 때 필수적으로 필요한 Fields를 List로 정의할 수 있습니다.
4. table_name
액세스할 테이블의 이름을 str로 정의할 수 있습니다.
"""
def __init__(self, cur):
"""
cur은 DB Connector의 cursor입니다. 이를 이용해 DB에 접근합니다.
"""
self.cur = cur
def perform_create(self, return_type=None, **data: Dict):
"""data를 table_name 테이블에 추가합니다."""
keys_set = set(data.keys())
unknown_fields = keys_set - set(self.create_fields)
if len(unknown_fields) > 0:
# TODO: 허용되지 않은 fields 출력.
raise AttributeError(f'{str(unknown_fields)} field(s) is(are) not allowed')
keys_list = list(data.keys())
query_fields = ', '.join(keys_list)
fields_values = tuple(data.get(key) for key in keys_list) # self.create_fields를 valid_fields로 바꿔도 무방.
self.cur.execute(
f'''
INSERT INTO
{self.table_name}({query_fields})
VALUES({Placeholder.for_create_query(len(fields_values))})
'''
, fields_values
)
context = {key: data.get(key) for key in self.retrieve_fields} # self.create_fields를 self.retrieve_fields로 변경.
context['id'] = self.cur.lastrowid
if return_type != None:
return return_type(**context)
return context
def perform_retrieve(self, return_type=None, project_fields: List = [], **where_kwargs: Dict):
"""table_name 테이블에서 project_fields Field를 where_kwargs의 조건으로 가져옵니다."""
# project_fields중 self.retrieve_fields에 없는 fields가 있다면 AttributeError.
unknown_fields = set(project_fields) - set(self.retrieve_fields)
if len(unknown_fields) > 0:
raise AttributeError(f'{str(unknown_fields)} field(s) is(are) not allowed')
# len(project_fields) <= 0 이면 project_fields = self.retrieve_fields.
if len(project_fields) <= 0:
project_fields = self.retrieve_fields
# where_kwargs.keys()중 self.retrieve_fields에 없는 fields가 있다면 AttributeError.
unknown_where_fields = set(where_kwargs.keys()) - set(self.retrieve_fields)
if len(unknown_where_fields) > 0:
raise AttributeError(f'{str(unknown_where_fields)} field(s) is(are) not allowed')
# project_fields placeholder 만들기. (공통)
project_placeholder = Placeholder.for_select_query(project_fields)
# where 없는 상태로 query문 만들기.
query = f"""SELECT {project_placeholder}\nFROM {self.table_name}"""
where_keys = list(where_kwargs.keys())
# len(where_kwargs.keys()) <= 0 이면 where 절 없음.
if len(where_keys) > 0:
# where 있음.
# where_kwargs placeholder 만들기.
where_placeholder = Placeholder.for_where_query(where_keys)
query += f'\nWHERE {where_placeholder}'
where_values = tuple(where_kwargs.get(key) for key in where_keys)
# query문 만들고 실행. (공통)
self.cur.execute(query, where_values)
else:
self.cur.execute(query)
# fetchall / many하고 for loop에서 self.DTO의 instance로 구성된 List를 반환. (공통)
lst = []
if return_type != None:
for t in self.cur:
instance = return_type(**t)
lst.append(instance)
else:
for t in self.cur:
lst.append(t)
return lst
def perform_update(self, id: int, **data: Dict) -> Dict:
"""id로 지정되는 한 튜플을 data로 갱신합니다."""
keys_set = set(data.keys())
unknown_fields = keys_set - set(self.update_fields)
if len(unknown_fields) > 0:
raise AttributeError(f'{str(unknown_fields)} field(s) is(are) not allowed')
if len(keys_set) <= 0: return None# 허용된 fields에 해당하는 data가 없으면 update할 data가 없다는 것을 의미하므로 종료.
keys_list = list(data.keys())
fields_values = tuple(data.get(key) for key in keys_list)
self.cur.execute(
f'''
UPDATE {self.table_name}
SET {Placeholder.for_update_query(keys_list)}
WHERE id = ?
''',
(*fields_values, id)
)
returnable_fields = list(set(self.retrieve_fields).intersection(keys_set))
context = {key: data.get(key) for key in returnable_fields}
return context
def perform_delete(self, id: int) -> None:
"""id로 지정되는 한 튜플을 삭제합니다."""
self.cur.execute(
f'''
DELETE FROM {self.table_name}
WHERE id = ?
''',
(id,)
)
return None
def create(self, return_type=None, **data: Dict):
"""
data를 self.table_name 테이블에 추가합니다. perform_create(self, **data)와 다른 점이라면 오버라이드하여 data의 특정 field를 가공할 수 있습니다.
"""
return self.perform_create(return_type, **data)
def retrieve(self, return_type=None, project_fields: List = [], **where_kwargs: Dict):
"""
table_name 테이블에서 project_fields Field를 where_kwargs의 조건으로 가져옵니다.
"""
return self.perform_retrieve(return_type, project_fields, **where_kwargs)
def update(self, id: int, **data: Dict) -> Dict:
"""
id로 지정되는 한 튜플을 data로 갱신합니다. perform_update(self, id, **data)와 다른 점이라면 오버라이드하여 data의 특정 field를 가공할 수 있습니다.
"""
return self.perform_update(id, **data)
def delete(self, id: int) -> None:
"""
id로 지정되는 한 튜플을 삭제합니다.
"""
return self.perform_delete(id)
|
class Environment():
def __init__(self, players, deck, currentPlayer):
self.players = players
self.deck = deck
self.winners = []
self.currentPlayer = currentPlayer
self.previousPlayer = None
|
GlowScript 2.1 VPython
# Using a graph-plotting module
EPS = 0.001
energy = -0.25
scene.height = 20
scene.background = vector(0.95, 0.95, 0.95)
class EventBus:
def __init__(self):
self.events = dict(color=5)
def on(self, event_name, handler):
events = self.events
if not events[event_name]:
events[event_name] = []
events[event_name].append(handler)
def get_trigger(self, event_name):
events = self.events
def on_call(value):
if not events[event_name]:
events[event_name] = []
for handler in events[event_name]:
handler(value)
return on_call
eventBus = EventBus()
eventBus.on('slider_changed', on_slider_changed)
def on_slider_changed(slider_data):
global energy
value = float(slider_data.value)
energy = value
trigger = eventBus.get_trigger('energy_changed')
trigger(energy)
def calc_effective_potential(r):
# normalised so
# alpha = 1, M**2/m = 1
r = float(r)
return -1/r + 1/(2 * r**2)
def calc_conic_section_dist(phi, e):
r = 1/(1.0 + e * cos(phi))
if r < 0:
raise ArithmeticError('negative')
return r
def calc_eccentricity(energy):
return (1 + 2*energy)**0.5
def potential_plot():
xmin, xmax = 0.0, 3.0
ymin, ymax = -1.0, 1.0
potential_plot = gdisplay( xtitle='r',
ytitle='Potential',
width=400,
height=300,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax)
effective_potential_plot = gcurve(color=color.cyan,
label='U_eff')
middle_line = gcurve(color=color.black,
data=[[0, 0], [xmax, 0]])
energy_plot = gcurve(color=color.orange,
data=[[0, energy], [xmax, energy]],
label='energy')
def draw_effective_potential():
num_ticks = 100.0
step = (xmax - xmin) / num_ticks
for r in arange(xmin, xmax, step):
effective_potential_plot.plot( pos=(r, calc_effective_potential(r)) )
def draw_energy(energy):
energy_plot.data = [[0, energy], [xmax, energy]]
eventBus.on('energy_changed', draw_energy)
draw_effective_potential()
# here border cases from analysis of effective_potential
slider( min=-0.5 + EPS, # to avoid sqrt(negative number)
max=0.5,
value=energy,
length=400,
bind=eventBus.get_trigger('slider_changed'))
def trajectory_plot():
# simulation meta variables
FPS = 40.0
out_point = (5000, 5000)
d_t = 1/FPS
phi_speed = pi * 0.5
d_phi = phi_speed * d_t
steps = int(2 * pi / d_phi)
trajectory = [out_point]
xmin, xmax = -5.0, 5.0
ymin, ymax = -5.0, 5.0
potential_plot = gdisplay(width=400,
height=400,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax)
particle = gdots(color=color.red, size=10)
trajectory_plot = gdots(color=color.red, size=1)
#print(dir(redraw_trajectory))
def calc_trajectory(energy):
data = []
e = calc_eccentricity(energy)
for i in range(steps + 1):
phi = 2.0*pi*i/steps
r = 2 #calc_conic_section_dist(phi, e)
x = r*cos(phi)
y = r*sin(phi)
data.append((x, y))
return data
def on_energy_changed(energy):
global trajectory
trajectory = calc_trajectory(energy)
trajectory_plot.data = trajectory[::4]
#eventBus.on('energy_changed', on_energy_changed)
#on_energy_changed(energy)
# initial values
curr_step = 0
particle.data = [out_point]
def run():
global curr_step
# loop it. See:
# http://www.glowscript.org/#/user/GlowScriptDemos/folder/Examples/program/Bounce-Callbacks-VPython/edit
rate(int(FPS), run)
# e = calc_eccentricity(energy)
curr_step = (curr_step + 1) % len(trajectory)
curr_point = trajectory[curr_step]
particle.data = [curr_point]
run()
trajectory_plot()
potential_plot()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to parse USN change journal records."""
import argparse
import logging
import sys
from dtformats import usn_journal
from dtformats import output_writers
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts information from USN change journal records.'))
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='enable debug output.')
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH',
default=None, help='path of the USN change journal records.')
options = argument_parser.parse_args()
if not options.source:
print('Source file missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print(f'Unable to open output writer with error: {exception!s}')
print('')
return False
usn_records = usn_journal.USNRecords(
debug=options.debug, output_writer=output_writer)
usn_records.Open(options.source)
output_writer.WriteText('USN journal records information:')
output_writer.WriteText(','.join([
'Date and time', 'Name', 'File reference', 'Parent file reference']))
for usn_record in usn_records.ReadRecords():
# pylint: disable=protected-access
date_time = usn_record._FormatIntegerAsFiletime(usn_record.timestamp)
mft_entry = usn_record.file_reference & ((1 << 48) - 1)
sequence_number = usn_record.file_reference >> 48
file_reference = f'{mft_entry:d}-{sequence_number:d}'
mft_entry = usn_record.parent_file_reference & ((1 << 48) - 1)
sequence_number = usn_record.parent_file_reference >> 48
parent_file_reference = f'{mft_entry:d}-{sequence_number:d}'
output_writer.WriteText(','.join([
date_time, usn_record.name, file_reference, parent_file_reference]))
usn_records.Close()
output_writer.Close()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
|
from django.shortcuts import render
from mainapp.models import CollegeGroup, Student
def index(request):
return render(request, 'mainapp/index.html')
def students(request):
categories = CollegeGroup.objects.all()
context = {
'categories': categories,
'page_title': 'каталог'
}
return render(request, 'mainapp/students.html', context)
def students_page(request, pk):
items = Student.objects.filter(category_id=pk)
context = {
'items': items,
'page_title': 'cтраница студентов'
}
return render(request, 'mainapp/students_pade.html', context)
# Create your views here.
|
"""
This code computes the order of two sibling nodes in a dependency subtree,
where the left and the right siblings are defined on the source dependency tree.
Each node is represented by the continuous vector of its dependency link to its
parent node.
"""
__docformat__ = 'restructedtext en'
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from dropout_mlp import DropoutMLP as MLP
from lookuptable import LookupTable
class FeatureBasedPredictor(object):
"""Multi-Layer Perceptron Class with Lookup Table Input
"""
def __init__(self, rng, input, feature_size, emb_mat, hidden_sizes=None, activation=T.nnet.relu, dropout_rates=None,
n_slack=0, init_ignore_out=False, init_account_dropout=False):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type emb_mat: theano.tensor.TensorType
:param emb_mat: embedding matrix, must be pre-initialized (random or from w2v)
"""
# Since we are dealing with a one lookup table layer LR, we need lookup table before LR
self.dropout_rates = dropout_rates
self.lookupTableLayer = LookupTable(
rng=rng, input=input, emb_mat=emb_mat
)
# The projection layer, i.e., an MLP layer, gets as input the output units
# of the lookup table layer
emb_size = emb_mat.shape[1]
if hidden_sizes is None:
hidden_sizes = [] # default: no hidden layer
if dropout_rates is None:
dropout_rates = [0.5] * (len(hidden_sizes)+1)
self.projectionLayer = MLP(
rng=rng,
input=self.lookupTableLayer.output,
n_in=feature_size * emb_size,
n_hiddens=hidden_sizes,
dropout_rates=dropout_rates,
activation=activation,
n_slack=n_slack,
init_ignore_out=init_ignore_out,
init_account_dropout=init_account_dropout
)
# prediction of the sibling pair order is given by the prediction of the
# model, computed in the multilayer perceptron projection layer
self.y_pred = self.projectionLayer.y_pred
self.errors = self.projectionLayer.errors
self.cross_entropy = self.projectionLayer.cross_entropy
self.dropout_errors = self.projectionLayer.dropout_errors
self.dropout_cross_entropy = self.projectionLayer.dropout_cross_entropy
self.true_positives = self.projectionLayer.true_positives
self.true_negatives = self.projectionLayer.true_negatives
self.false_positives = self.projectionLayer.false_positives
self.false_negatives = self.projectionLayer.false_negatives
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.lookupTableLayer.params + self.projectionLayer.params
# keep track of model input
self.input = input
# Access to actual (non-dropped) parameters
def lookupTable(self): # return current state of the lookup table
return self.lookupTableLayer.embeddings.get_value()[:-1]
def get_W(self, i):
return self.projectionLayer.W[i].get_value() * (1. - self.dropout_rates[i])
def get_b(self, i):
return self.projectionLayer.b[i].get_value()
def get_logre_W(self):
return self.projectionLayer.dropoutLogRegressionLayer.W.get_value() * (1. - self.dropout_rates[-1])
def get_logre_b(self):
return self.projectionLayer.dropoutLogRegressionLayer.b.get_value()
|
from scapy.layers.l2 import ARP, Ether
from scapy.sendrecv import srp
import time
import requests
url = "https://api.macvendors.com/"
def get_mac_details(mac_address):
response = requests.get(url + mac_address)
return response.content.decode()
target_ip = input('Enter the IP address:')
startTime = time.time()
print("Scannning started on local network ...")
arp = ARP(pdst=target_ip)
ether = Ether(dst="ff:ff:ff:ff:ff:ff")
packet = ether / arp
result = srp(packet, timeout=3, verbose=0)[0]
# a list of clients, we will fill this in the upcoming loop
clients = []
for sent, received in result:
vendor_name = get_mac_details(received.hwsrc)
clients.append({'ip': received.psrc, 'mac': received.hwsrc,'vendor': vendor_name})
# print clients
print("Available devices in the network:")
print("IP" + " " * 18 + "MAC" + " " * 18 + "Vendor")
for client in clients:
print("{:16} {} {}".format(client['ip'], client['mac'], client['vendor']))
print(' ')
print('.........................................................................')
print('Time taken:', time.time() - startTime)
|
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import kmeans
import gausian_mixture_model
gauss1 = stats.multivariate_normal([0, 0], [[20, 0], [0, 20]])
gauss2 = stats.multivariate_normal([12, 12], [[3, 0], [0, 3]])
gauss3 = stats.multivariate_normal([-12, 12], [[3, 0], [0, 3]])
dataset = []
for _ in range(600):
dataset.append(gauss1.rvs())
for _ in range(200):
dataset.append(gauss2.rvs())
for _ in range(200):
dataset.append(gauss3.rvs())
dataset = np.array(dataset)
kmeans = kmeans.KMeans(X=dataset, K=3)
preds = []
for te_x in dataset:
idx, center = kmeans.predict(te_x)
preds.append(idx)
preds = np.array(preds)
plt.subplot(2, 1, 1)
plt.scatter(dataset[:, 0], dataset[:, 1], c=preds, s=50, cmap='viridis', alpha=0.5)
plt.scatter(kmeans.centers[:, 0], kmeans.centers[:, 1], c='black', s=200)
plt.title('KMeans - My implementation')
plt.xticks(visible=False)
kmeans_sklearn = KMeans(n_clusters=3, init='random', algorithm='full')
kmeans_sklearn.fit(dataset)
preds = kmeans_sklearn.predict(dataset)
plt.subplot(2, 1, 2)
plt.scatter(dataset[:, 0], dataset[:, 1], c=preds, s=50, cmap='viridis', alpha=0.5)
plt.scatter(kmeans_sklearn.cluster_centers_[:, 0], kmeans_sklearn.cluster_centers_[:, 1], c='black', s=200)
plt.title('KMeans - Scikit learn')
plt.show()
clusters = np.array([np.array(cluster) for _, cluster in kmeans.clusters.items()])
gmm = gausian_mixture_model.GaussianMixtureModel(dataset, clusters)
gmm.visualize()
|
# scrape.py
# Ethan Malenchek
# a program to gather information on the top NFL fantasy scorers
import requests
import re
import pandas as pd
from bs4 import BeautifulSoup
website_url = requests.get('https://fantasy.nfl.com/research/scoringleaders').text
path = './top_25.csv'
soup = BeautifulSoup(website_url, 'html.parser') # parses entire page
my_table = soup.find('table',{'class':'tableType-player'}) # finds table of players on page
name_links = my_table.findAll('a',{'class':'playerName'}) # finds all <a> for player names
points = my_table.findAll('span',{'class':'playerSeasonTotal'}) # stores player points
positions = my_table.findAll('em')
player_names = [] # instantiate list of player names
player_points = [] # instantiate list of player points
player_positions = [] # instantiate list of player positions
count = 0
for link in name_links:
player_names.append(link.get_text()) # parse links for player names and store in list
for point in points:
player_points.append(point.get_text()) # parse spans for player points and store in list
for position in positions:
count = count + 1
if count > 7: # skips heading <em> elements
player_positions.append(position.get_text()) # parse <em> for position/team and store in list
dataf = pd.DataFrame( # construct data frame
{'Player Names':player_names,
'Total Points':player_points,
'Position & Team':player_positions,
})
dataf['Player Names'] = player_names
dataf['Total Points'] = player_points
dataf['Position & Team'] = player_positions
print(dataf)
dataf.to_csv(path, index = False) # export data frame to csv
|
import datetime
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth import get_user_model
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'PUblished recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length = 200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
anual_leave_quota = models.IntegerField(default=10)
anual_leave_remain = models.IntegerField(default=10)
sick_leave_quota = models.IntegerField(default=10)
sick_leave_remain = models.IntegerField(default=5)
is_approver = models.BooleanField(default = False)
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def create_or_update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
class LeaveRequest(models.Model):
PENDING = 0
APPROVED = 1
REJECTED = 2
REQUEST_STATUS_CHOIES = (
(PENDING, 'Pending'),
(APPROVED, 'Approved'),
(REJECTED, 'Rejected')
)
leave_start_date = models.DateTimeField('leave start')
leave_end_date = models.DateTimeField('leave end')
leave_request_status = models.IntegerField(
choices = REQUEST_STATUS_CHOIES,
default = PENDING,
)
leave_requester = models.ForeignKey(User, on_delete=models.CASCADE, related_name="creator")
leave_approver = models.ForeignKey(User, limit_choices_to={'profile__is_approver': True}, on_delete=models.CASCADE)
def leave_days(self):
delta = self.leave_end_date - self.leave_start_date
return delta
|
from model import Base, Product
from model import Base, Cart
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///database.db')
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()
def add_product(name, price, picture_link, description):
product_object = Product(
name = name,
price = price,
picture_link = picture_link,
description = description)
session.add(product_object)
session.commit()
def edit_product(id, name, price, picture_link, description):
product_object = session.query(
Product).filter_by(
id=id).first()
product_object.name = name
product_object.price = price
product_object.picture_link = picture_link
product_object.description = description
session.add(product_object)
session.commit()
def delete_product(name):
session.query(Product).filter_by(
name = name).delete()
session.commit()
def return_all_products():
products = session.query(Product).all()
return products
def query_by_id(their_id):
product = session.query(Product).filter_by(id = their_id).first()
return product
def Add_To_Cart(productID):
add_to_cart = Cart(
productID = productID)
session.add(add_to_cart)
session.commit()
# add_product("Granny Smith", 75, 'GrannySmith.jpg', "S O U R city!")
# add_product("Red Delicious", 50, 'RedDelicious.jpg', "i like to eat this apple becasue it is red")
# add_product("Golden Delicious", 25, 'GoldenDelicious.jpg', "i dont like this apple becasue it is yellow")
|
str1='ABCD'
str2='PQR'
for i in range(4):
print(str1[:i+1]+str2[i:])
|
import random
from words import word_list
def get_word():
word = random.choice(word_list)
return word.upper()
def play(word):
print('hello lets play')
word_completion = '_'*len(word)
guessed = False
guessed_letters = []
guessed_words = []
tries = 6
print('Lets play hangman')
print(display_hangman(tries))
print(word_completion)
print('\n')
while not guessed and tries > 0 :
guess = input('Please guess a letter or word').upper()
# length has to be equal to one (char) and it has to a letter--> isalpha()
if len(guess) == 1 and guess.isalpha():
#char already in list of guessed letters
if guess in guessed_letters:
print('You are ready guess the letter' , guess)
# if (char) not in the character of the word
elif guess not in word:
print(guess, ' is not in the word')
tries -= 1
guessed_letters.append(guess)
else:
print('Good job', guess , 'is in the word!')
guessed_letters.append (guess)
word_as_list = list(word_completion)
def display_hangman(tries):
stages = [ # final state: head, torso, both arms, and both legs
"""
--------
| |
| O
| \\|/
| |
| / \\
-
""",
# head, torso, both arms, and one leg
"""
--------
| |
| O
| \\|/
| |
| /
-
""",
# head, torso, and both arms
"""
--------
| |
| O
| \\|/
| |
|
-
""",
# head, torso, and one arm
"""
--------
| |
| O
| \\|
| |
|
-
""",
# head and torso
"""
--------
| |
| O
| |
| |
|
-
""",
# head
"""
--------
| |
| O
|
|
|
-
""",
# initial empty state
"""
--------
| |
|
|
|
|
-
"""
]
return stages[tries]
def main():
word = get_word()
play(word)
if __name__ == '__main__':
main()
|
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from scraper_auchan.items import ScraperAuchanItem
class AuchanSpider(CrawlSpider):
name = 'auchan'
allowed_domains = ['www.auchandirect.fr']
start_urls = ['http://www.auchandirect.fr/marche/legumes/id3/404/463']
followRegex = 'http://www.auchandirect.fr/[a-z]+/[a-z]+/[a-z-,]+/id0/[0-9]+'
parseRegex = 'http://www.auchandirect.fr/[a-z]+/[a-z]+/[a-z-]+/id1/' + \
'[0-9]+/[0-9]+'
rules = (
Rule(LinkExtractor(allow=(followRegex, ))),
Rule(LinkExtractor(allow=(parseRegex, )), callback='parse_product'),
)
def parse_product(self, response):
for sel in response.xpath('//div[@class="fiche-produit"]'):
item = ScraperAuchanItem()
item['id'] = response.url.split('/')[-1]
item['name'] = sel \
.xpath('//span[@class="titre-principal"]/text()') \
.extract()
item['desc'] = sel \
.xpath('//span[@class="titre-annexe"]/text()') \
.extract()
item['price'] = sel \
.xpath('//div[@class="prix-actuel"]/span/text()') \
.extract()
item['pricePerUnit'] = sel \
.xpath('//div[@class="prix-annexe"]/p/text()') \
.extract()
item['composition'] = sel \
.xpath('//span[@class="texte-info-normal"]/text()') \
.extract()
yield item
|
import pygame
from card import *
#屏幕大小常量
SCREEN_RECT = pygame.Rect(0,0,997,604)
#刷新帧率
FRAME_PER_SEC = 60
#背景图片地址
BGC_IMAGE_NAME = './images/table.png'
#牌图片的大小
CARD_SIZE = (56,98)
#这些位置目前还没有实现居中对齐
#手牌位置
HAND_CARDS_POS = (120,SCREEN_RECT.bottom-CARD_SIZE[1]-10)
#手牌偏移比例
HAND_CARDS_PARTITION = 0.4
#出牌位置
GIVEN_CARDS_POS = (200,SCREEN_RECT.bottom-2*CARD_SIZE[1]-20)
#上家位置
LAST_CARDS_POS = (10,SCREEN_RECT.centery-CARD_SIZE[1])
#下家位置
NEXT_CARDS_POS = (SCREEN_RECT.right-10-CARD_SIZE[0],SCREEN_RECT.centery-CARD_SIZE[1])
class GameSprite(pygame.sprite.Sprite):
'''游戏精灵父类'''
def __init__(self,image_name):
#调用父类的init方法
super().__init__()
#定义对象属性
self.image = pygame.image.load(image_name)
self.rect = self.image.get_rect()
class Background(GameSprite):
'''背景精灵'''
def __init__(self):
super().__init__(BGC_IMAGE_NAME)
class CardSprite(GameSprite):
'''牌类精灵'''
def __init__(self,index,pos):#x,y是牌的位置
#调用父类方法根据index创建精灵
self.card = Card(index)
super().__init__(self.card.pic)
#设置牌的初始位置
self.rect.x = pos[0]
self.rect.y = pos[1]
self.given = False #判断是否被选中要出
def mouse_click(self,mouse_pos):#目前无法实现堆叠状况的正确点击,这个bug目前部分解决,但是最边上一个只能点一半
if(mouse_pos[0]>self.rect.left and mouse_pos[0]<self.rect.left+CARD_SIZE[0]*HAND_CARDS_PARTITION and mouse_pos[1]<self.rect.bottom and mouse_pos[1]>self.rect.top):#判断点击的位置是否在牌的中间
self.given = not self.given
if(self.given):
self.rect.y -= 20
else:
self.rect.y += 20
|
'''
This module provides a set of useful functions on dictionaries
For example :
- `get_path` to retrieve a nested element in a dict
'''
def falsy_key(d:dict, key:str) -> bool:
"""
returns `True` if key is not an attribute of dict `d` or if `d[key]` is falsy
Returns
-------
bool
False if `d[key]` exists and is not falsy (like `None` or `""`)<br>
True if `d[key]` does not exist or is falsy
"""
if not key in d: return True
if not d[key]: return True
return False
def get_path(obj, path, delim = '/', return_false_path = False):
'''
Returns the value of path in obj
path is a string of a path delimited by delim
Parameters
----------
obj: dict
a dictionary (if it's sthg else, obj will be returned immediately)
path: string
the path in obj to get what we want
delim: str, optional
the path delimiter (default to "/")
return_false_path: any, optional
what to return if the path is not found in obj
Returns
-------
sub_obj: any
the element in obj at the right path
return_false_path: any
if the path is not found in obj
obj: any
the original obj if obj is not a dictionary
'''
if path == '': return obj
if not isinstance(obj, dict): return obj
if path[0] == delim: path = path[1:]
if path[-1] == delim: path = path[:-1]
path_list = path.split(delim)
if len(path_list) == 0 or path == '': return obj
if path_list[0] in obj: return get_path(obj[path_list[0]], delim.join(path_list[1:]), delim, return_false_path)
return return_false_path
def dict_inside_out(d) -> dict:
"""
Turns a dict inside out
Parameters
----------
d: dict
a dictionary to be turned inside out
Returns
-------
dict
the dict d turned inside out
"""
newd = {}
for k, v in d.items():
if isinstance(v, str):
newd[v] = k
elif isinstance(v, list):
for el in v:
if isinstance(el, str): newd[el] = k
return newd
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
fromaddr = "healthyheartdoctor@gmail.com"
toaddr = '2017.akash.magdum@ves.ac.in'
msg = MIMEMultipart()
msg['From'] = "healthyheartdoctor@gmail.com"
msg['To'] = toaddr
msg['Subject'] = "Daily Health Report"
body = "TAKE CARE!"
msg.attach(MIMEText(body, 'plain'))
filename = "healthreport.pdf"
attachment = open("healthreport.pdf", "rb")
p = MIMEBase('application', 'octet-stream')
p.set_payload((attachment).read())
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(p)
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(fromaddr, "Doctorheart@123")
text = msg.as_string()
s.sendmail(fromaddr, toaddr, text)
s.quit()
|
# *************************************************************
#
# The OpenTRV project licenses this file to you
# under the Apache Licence, Version 2.0 (the "Licence");
# you may not use this file except in compliance
# with the Licence. You may obtain a copy of the Licence at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the Licence is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Licence for the
# specific language governing permissions and limitations
# under the Licence.
#
# *************************************************************
# Author(s) / Copyright (s): Deniz Erbilgin 2016
# Damon Hart-Davis 2017
# RN2483_LoRaWAN_setup.py
# @author Deniz Erbilgin 2016
# @brief programs RN2483 eeprom to communicate with OpenTRV server over TTN.
# @todo - write code
# - work out test procedure
# - test this works
# - find out if all the settings are as we want (e.g. do we want adr?)
# - license
#import necessary modules
import serial
#Setup values
SERIAL_PORT = "/dev/ttyUSB0"
SERIAL_BAUD = 57600 #standard RN2483 baud
RN2483_DEVADDR = "020111xx" #this is the OpenTRV device block
RN2483_APPSKEY = "" #16 byte private key. Written in hex
RN2483_NWKSKEY = "" #16 byte network key. Written in hex (doesn't need special handling, uses standard TheThingsNetwork(TTN) key
RN2483_STARTUPSTRING = "RN2483" #Edit this to be correct
#open serial connection
ser = serial.Serial(SERIAL_PORT, SERIAL_BAUD)
#check RN2483 is connected (will return a standard string on powerup)
#if ser.readline() != RN2483:
# throw error
#write values:
# 1: sys factoryRESET #return to a known starting point
# 2: mac set devaddr RN2483_DEVADDR # set device address
# 3: mac set appskey RN2483_APPSKEY # set application key
# 4: mac set nwkskey RN2483_NWKSKEY # set network key
# 5: mac set adr off # set adaptive data rate off
# 6: mac save # save values to EEPROM
#test values:
# send an example frame to OpenTRV server
|
from kadi import events
from utilities import append_to_array, find_first_after, same_limits, heat_map
close('all')
temp = 'PM3THV2T'
on_range = 60
off_range = 89
t_start = '2000:001'
t_stop = None
#t_stop = '2013:268'
t_event = array([DateTime('2006:351:04:38:00.000').secs,
DateTime('2006:351:04:38:00.000').secs])
x = fetch.Msid(temp, t_start, t_stop)
dt = diff(x.vals)
local_min = (append_to_array(dt <= 0., pos=0, val=bool(0)) &
append_to_array(dt > 0., pos=-1, val=bool(0)))
local_max = (append_to_array(dt >= 0., pos=0, val=bool(0)) &
append_to_array(dt < 0., pos=-1, val=bool(0)))
htr_on_range = x.vals < on_range
htr_off_range = x.vals > off_range
htr_on = local_min & htr_on_range
htr_off = local_max & htr_off_range
#remove any incomplete heater cycles at end of timeframe
last_off = nonzero(htr_off)[0][-1]
htr_on[last_off:] = 0
t_on = x.times[htr_on]
t_off = x.times[htr_off]
match_i = find_first_after(t_on, t_off)
dur = t_off[match_i] - t_on
#compute duty cycles by month
on_dates = DateTime(t_on).iso
on_yrs = [date[0:4] for date in on_dates]
on_mos = [date[5:7] for date in on_dates]
on_freq = zeros(168)
on_time = zeros(168)
avg_on_time = zeros(168)
dates = zeros(168)
i = 0
for yr in range(2000, 2014):
for mo in range(1,13):
yr_match = array([on_yr == str(yr) for on_yr in on_yrs])
mo_match = array([on_mo == str(mo).zfill(2)
for on_mo in on_mos])
on_freq[i] = sum(yr_match & mo_match)
on_time[i] = sum(dur[yr_match & mo_match])
avg_on_time[i] = mean(dur[yr_match & mo_match])
dates[i] = DateTime(str(yr) + '-' + str(mo).zfill(2)
+ '-01 00:00:00.000').secs
i = i + 1
dates_range = append(dates, DateTime('2014:001').secs)
dc = on_time / (dates_range[1:] - dates_range[:-1])
figure(1)
plot_cxctime(t_on, dur, 'b.', alpha=.05, mew=0)
plot_cxctime(t_event, ylim(),'r:')
ylabel('On-Time Durations [sec]')
title('MUPS-3 Valve Heater On-Time Durations')
figure(2)
plot_cxctime(x.times, x.vals, mew=0)
plot_cxctime(x.times, x.vals, 'b*',mew=0)
plot_cxctime(x.times[htr_on], x.vals[htr_on], 'c*',mew=0, label='Heater On')
plot_cxctime(x.times[htr_off], x.vals[htr_off], 'r*',mew=0, label='Heater Off')
plot_cxctime(t_event, ylim(),'r:')
legend()
figure(3)
hist(dur, bins=100, normed=True)
xlabel('On-Time Durations [sec]')
title('MUPS-3 Valve Heater On-Time Durations')
figure(4)
plot_cxctime(dates, dc*100, '*', mew=0)
plot_cxctime(t_event, ylim(),'r:')
title('MUPS-3 Valve Heater Duty Cycle')
ylabel('Heater Duty Cycle by Month [%] \n (Total On-time / Total Time)')
figure(5)
plot_cxctime(dates, on_freq, '*', mew=0)
plot_cxctime(t_event, ylim(),'r:')
title('MUPS-3 Valve Heater Cycling Frequency')
ylabel('Heater Cycles per Month')
figure(6)
plot_cxctime(dates, on_time/3600, '*', mew=0)
plot_cxctime(t_event, ylim(),'r:')
title('MUPS-3 Valve Heater On-Time')
ylabel('Heater On-Time by Month [hrs]')
figure(7)
plot_cxctime(dates, avg_on_time/3600, '*', mew=0)
plot_cxctime(t_event, ylim(),'r:')
title('MUPS-3 Valve Heater Average On-Time')
ylabel('Mean Heater On-Time by Month [hrs]')
|
import random
import telebot
import JSON
from file import read_from_file
bot = telebot.TeleBot(read_from_file('token.txt'))
users_json = 'users.json'
def create_user(data, message):
data[message.from_user.id] = {'name': message.from_user.first_name, 'username': message.from_user.username,
'last_name': message.from_user.last_name, 'wantable_presents': [], 'recipient': None}
JSON.write_to_json(users_json, data)
def update_presents(data, message):
urls = data[str(message.from_user.id)].get('wantable_presents', [])
urls.extend(
map(str.strip, message.text.split('\n')))
data[str(message.from_user.id)]['wantable_presents'] = list(set(urls))
def send_to_everyone(users_id, message: str) -> None:
for ID in users_id:
try:
bot.send_message(ID, message)
except:
pass
@bot.message_handler(commands=['start'])
def start_message(message):
data = JSON.read_from_json(users_json)
if data.get(str(message.from_user.id), None) is not None:
bot.send_message(message.chat.id,
f'{normalize_string(message.chat.first_name)} {normalize_string(message.chat.last_name)}, а я тебя знаю уже!')
else:
create_user(data, message)
bot.send_message(message.chat.id,
f'Привет, {normalize_string(message.chat.first_name)} {normalize_string(message.chat.last_name)}, я тебя добавил в список гостей')
bot.send_message(message.chat.id, 'Для этого, присылайте дедушке, что бы Вы хотели на Новый Год.')
bot.send_message(message.chat.id, 'Дедушка молодой, прогрессивный, так что присылайте дедушке ссылки на '
'подарки, так дедушке будет легче разобраться. '
'Можете присылать по одной или несколько ссылок. Я все запишу))')
@bot.message_handler(regexp=r'Рассылка.*')
def mailing(message):
if str(message.from_user.id) != '531184087':
return
bot.send_message(message.chat.id,
f'Что ты хочешь разослать?')
bot.register_next_step_handler(message, print_news)
def print_news(message):
send_to_everyone(JSON.read_from_json(users_json), message.text)
def normalize_string(s: str) -> str:
return s if s else ''
@bot.message_handler(
regexp=r"(https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&\/=]*))+")
def set_presents(message):
data = JSON.read_from_json(users_json)
if data.get(str(message.from_user.id), None) is not None:
update_presents(data, message)
else:
create_user(data, message)
update_presents(data, message)
bot.send_message(message.chat.id,
f'Привет, {normalize_string(message.chat.first_name)} {normalize_string(message.chat.last_name)}, я тебя добавил в список гостей')
bot.reply_to(message, 'Добавил Ваши желание в свой список))')
JSON.write_to_json(users_json, data)
@bot.message_handler(regexp=r'НГ.*')
def the_new_year(message):
if str(message.from_user.id) != '531184087':
return
data = JSON.read_from_json(users_json)
users = list(data.keys())
random.shuffle(users)
for i, ID in enumerate(users):
data[ID]['recipient'] = users[(i + 1) % len(users)]
data_keys = list(data.keys())
for ID in data_keys:
try:
bot.send_message(ID,
f"Ты даришь подарок человечку с именем {normalize_string(data[data[ID]['recipient']]['name'])} {normalize_string(data[data[ID]['recipient']]['last_name'])}: ")
for present in data[data[ID]['recipient']]['wantable_presents']:
bot.send_message(ID, f'Он(а) хочет {present}')
except:
del data[ID]
JSON.write_to_json(users_json, data)
@bot.message_handler(regexp=r'DEL.*')
def clear_file(message):
if str(message.from_user.id) != '531184087':
return
data = dict()
JSON.write_to_json(users_json, data)
bot.polling()
|
# coding:utf-8
city = " 北京 "
# 原始输出
print(city)
# 去除字符串两边的空格
print(city.strip())
# 去除字符串开头的空格
print(city.lstrip())
# 去除字符串末尾的空格
print(city.rstrip())
|
from pprint import pprint
from django.core.management import BaseCommand
from web.models import NotionDocument
from web.utils import make_topic_model
from web.utils import preprocess_docs_to_words
class Command(BaseCommand):
def handle(self, *args, **options):
notion_docs = NotionDocument.objects.all()
text_docs = [x.to_plaintext() for x in notion_docs]
words = preprocess_docs_to_words(text_docs)
lda_model, coherence_score = make_topic_model(words, num_topics=7)
pprint(lda_model.print_topics())
print(f"Coherence score: {coherence_score}")
from IPython import embed; embed()
|
def valid(num):
_str= str(num)
total = 0
for i, char in enumerate(_str, 1):
digit = int(char)
total += digit ** i
return total == num
def sum_dig_pow(a, b): # range(a, b + 1) will be studied by the function
output = []
for i in range(a, b+1):
if valid(i):
output.append(i)
return output
|
from django.db import models
from team.models import Team
from user.models import User
image_path = "image"
class Announcement(models.Model):
id = models.AutoField(primary_key=True)
author = models.CharField(max_length=16, verbose_name="发布人")
title = models.TextField(max_length=100, verbose_name="标题")
content = models.TextField(max_length=800, verbose_name="内容")
to_group = models.ManyToManyField(Team, related_name="to_group", blank=True, verbose_name="接收组")
to_people = models.ManyToManyField(User, related_name="to_people", blank=True, verbose_name="接收人")
require_upload = models.BooleanField(default=False, verbose_name="需要上传")
issue_datetime = models.DateTimeField(auto_now_add=True, verbose_name="发布时间")
edit_datetime = models.DateTimeField(auto_now=True, verbose_name="最新修改时间")
deadline = models.DateTimeField(blank=True, verbose_name="截止时间")
url_address = models.TextField(max_length=200, blank=True, verbose_name="转发路径")
active = models.BooleanField(default=True, verbose_name="启用")
team_id = models.CharField(max_length=32, null=True, verbose_name="团队id")
class Meta:
verbose_name = "公告"
verbose_name_plural = "公告"
def __str__(self):
return self.title
class AnnouncementRecord(models.Model):
id = models.AutoField(primary_key=True)
aid = models.IntegerField(verbose_name="通知id")
reader = models.CharField(max_length=16, verbose_name="阅读人")
image = models.ImageField(upload_to=image_path, blank=True, verbose_name="图片")
read_datetime = models.DateTimeField(auto_now=True, verbose_name="确认时间")
read_status = models.CharField(max_length=10, verbose_name="阅读状态")
team_id = models.CharField(max_length=32, null=True, verbose_name="团队id")
class Meta:
verbose_name = "公告确认明细"
verbose_name_plural = "公告确认明细"
def __str__(self):
return self.reader
class Feedback(models.Model):
id = models.AutoField(primary_key=True)
aid = models.IntegerField(verbose_name="通知id")
sender = models.CharField(max_length=16, verbose_name="发送人")
sent_datetime = models.DateTimeField(auto_now=True, verbose_name="发送时间")
comment = models.TextField(max_length=100, verbose_name="内容")
reply_to = models.IntegerField(null=True, verbose_name="回复id")
team_id = models.CharField(max_length=32, null=True, verbose_name="团队id")
class Meta:
verbose_name = "留言"
verbose_name_plural = "留言"
def __str__(self):
return self.sender
|
import cv2,time
first_frame=None
video=cv2.VideoCapture(0)
while True:
check,frame=video.read()
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
gray=cv2.GaussianBlur(gray,(21,21),0)
if first_frame is None:
first_frame=gray
continue
delta_frame=cv2.absdiff(first_frame,gray)
thresh_frame=cv2.threshold(delta_frame,30,255.,cv2.THRESH_BINARY)[1]
thresh_frame=cv2.dilate(thresh_frame,None,iterations=2)
(_,cnts,_)=cv2.findContours(thresh_frame.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 1000:
continue
cv2.drawContours(frame, [contour], 0, (0,0,255), 3)
cv2.imshow("Gray Frame",gray)
cv2.imshow("Delta Frame",delta_frame)
cv2.imshow("Threshold Frame",thresh_frame)
cv2.imshow("Color Frame",frame)
key=cv2.waitKey(1)
print(gray)
print(delta_frame)
if key==ord('q'):
break
video.release()
cv2.destroyAllWindows()
|
#coding:utf-8
import sys
import pickle
class_dict = pickle.load(open(sys.argv[1]))
count = 0
for c, wlist in class_dict.items():
print
print c, "\n------", len(wlist), "words in class-------"
for w in sorted(wlist):
count += 1
print "\t", w
print "---------\nall words", count
|
import unittest
from katas.kyu_8.grasshopper_bug_squashing import (
coins, health, log, main, position
)
class GrasshopperTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(coins, 0)
def test_equals_2(self):
self.assertEqual(health, 100)
def test_equals_3(self):
self.assertEqual(position, 0)
def test_equals_4(self):
self.assertEqual(log, ['roll_dice', 'move', 'combat', 'get_coins',
'buy_health', 'print_status'])
def test_equals_5(self):
self.assertIsNone(main())
|
import numpy as np
import matplotlib.pyplot as plt
class DataLoader:
def class_weights(COUNT_PNEUMONIA, COUNT_NORMAL, TRAIN_IMG_COUNT):
initial_bias = np.log([COUNT_PNEUMONIA / COUNT_NORMAL])
print(initial_bias)
weight_for_0 = (1 / COUNT_NORMAL) * (TRAIN_IMG_COUNT) / 2.0
weight_for_1 = (1 / COUNT_PNEUMONIA) * (TRAIN_IMG_COUNT) / 2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
return class_weight
def get_generator(datagen, directory, img_width, img_height, BATCH_SIZE):
generator = datagen.flow_from_directory(
directory,
target_size=(img_width, img_height),
batch_size=BATCH_SIZE,
class_mode="binary",
)
return generator
def show_batch(image_batch, label_batch, BATCH_SIZE):
plt.figure(figsize=(10, 10))
for n in range(BATCH_SIZE):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(image_batch[n])
if label_batch[n]:
plt.title("PNEUMONIA")
else:
plt.title("NORMAL")
plt.axis("off")
plt.savefig("image_batch.png")
|
#importing stuff
import turtle
import time
wn = turtle.Screen()
wn.title('Boring game')
wn.bgcolor('black')
wn.setup(width=600, height=600)
wn.tracer(0)
ball = turtle.Turtle()
start_game = turtle.Turtle()
start_game.speed(0)
start_game.color('white')
start_game.penup()
start_game.hideturtle()
start_game.goto(0,0)
start_game.write('Press s to start and q to quit', align='center', font=('Courier', '30', 'bold'))
x = 1
# starting the game
def game():
start_game.clear()
ball.speed(0)
ball.shape('square')
ball.color('white')
ball.penup()
ball.goto(0, 0)
# changing directions
def up():
ball.setheading(90)
def down():
ball.setheading(270)
def left():
ball.setheading(180)
def right():
ball.setheading(0)
def quit_game():
wn.bye()
# keyboard presses
wn.listen()
wn.onkey(game, 's')
wn.onkey(up, 'Up')
wn.onkey(down, 'Down')
wn.onkey(left, 'Left')
wn.onkey(right, 'Right')
wn.onkey(quit_game, 'q')
while True:
# kinda like wn.mainloop()
wn.update()
#setting the ball speed
ball.forward(x)
# checking collisions
if ball.ycor() > 278:
time.sleep(1)
ball.goto(0, 0)
x *= 2
if ball.ycor() < -278:
time.sleep(1)
ball.goto(0, 0)
x *= 2
if ball.xcor() > 278:
time.sleep(1)
ball.goto(0, 0)
x *= 2
if ball.xcor() < -278:
time.sleep(1)
ball.goto(0, 0)
x *= 2
|
"""Unit test for treadmill.appcfg.abort
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import json
import os
import shutil
import tempfile
import unittest
import kazoo
import mock
import treadmill
from treadmill import appenv
from treadmill import context
from treadmill import fs
from treadmill.apptrace import events
from treadmill.appcfg import abort as app_abort
class AppCfgAbortTest(unittest.TestCase):
"""Tests for teadmill.appcfg.abort"""
def setUp(self):
self.root = tempfile.mkdtemp()
self.tm_env = appenv.AppEnvironment(root=self.root)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
@mock.patch('treadmill.appcfg.abort.flag_aborted', mock.Mock())
@mock.patch('treadmill.supervisor.control_service', mock.Mock())
def test_abort(self):
"""Tests abort sequence."""
container_dir = os.path.join(self.root, 'apps', 'proid.myapp#001',
'data')
fs.mkdir_safe(container_dir)
app_abort.abort(container_dir,
why=app_abort.AbortedReason.INVALID_TYPE,
payload='test')
treadmill.appcfg.abort.flag_aborted.assert_called_with(
container_dir,
app_abort.AbortedReason.INVALID_TYPE,
'test'
)
treadmill.supervisor.control_service.assert_called_with(
os.path.join(self.root, 'apps', 'proid.myapp#001'),
treadmill.supervisor.ServiceControlAction.down
)
def test_flag_aborted(self):
"""Tests flag abort sequence."""
container_dir = os.path.join(self.root, 'apps', 'proid.myapp#001',
'data')
fs.mkdir_safe(container_dir)
app_abort.flag_aborted(container_dir,
why=app_abort.AbortedReason.INVALID_TYPE,
payload='test')
aborted_file = os.path.join(container_dir, 'aborted')
with io.open(aborted_file) as f:
aborted = json.load(f)
self.assertEqual('invalid_type', aborted.get('why'))
self.assertEqual('test', aborted.get('payload'))
@mock.patch('kazoo.client.KazooClient.exists', mock.Mock())
@mock.patch('kazoo.client.KazooClient.create', mock.Mock())
@mock.patch('kazoo.client.KazooClient.delete', mock.Mock())
@mock.patch('kazoo.client.KazooClient.get_children', mock.Mock())
@mock.patch('treadmill.appevents.post', mock.Mock())
@mock.patch('treadmill.sysinfo.hostname',
mock.Mock(return_value='xxx.xx.com'))
@mock.patch('treadmill.zkutils.connect', mock.Mock())
@mock.patch('treadmill.zkutils.put', mock.Mock())
def test_report_aborted(self):
"""Tests report abort sequence."""
context.GLOBAL.zk.url = 'zookeeper://xxx@hhh:123/treadmill/mycell'
treadmill.zkutils.connect.return_value = kazoo.client.KazooClient()
kazoo.client.KazooClient.get_children.return_value = []
kazoo.client.KazooClient.exists.return_value = True
kazoo.client.KazooClient.create.reset()
kazoo.client.KazooClient.delete.reset()
app_abort.report_aborted(self.tm_env, 'proid.myapp#001',
why=app_abort.AbortedReason.TICKETS,
payload='test')
treadmill.appevents.post.assert_called_with(
mock.ANY,
events.AbortedTraceEvent(
instanceid='proid.myapp#001',
why='tickets',
payload='test',
),
)
if __name__ == '__main__':
unittest.main()
|
Python 3.3.0 (v3.3.0:bd8afb90ebf2, Sep 29 2012, 01:25:11)
[GCC 4.2.1 (Apple Inc. build 5666) (dot 3)] on darwin
Type "copyright", "credits" or "license()" for more information.
>>> WARNING: The version of Tcl/Tk (8.5.9) in use may be unstable.
Visit http://www.python.org/download/mac/tcltk/ for current information.
>>> graf = open('/Users/james/Documents/moose.txt')
>>> f.readlines()
Traceback (most recent call last):
File "<pyshell#3>", line 1, in <module>
f.readlines()
NameError: name 'f' is not defined
>>> graf.readlines
<built-in method readlines of _io.TextIOWrapper object at 0x102f65bb0>
>>> graf.readline
<built-in method readline of _io.TextIOWrapper object at 0x102f65bb0>
>>> graf2=open('/Users/james/Documents/moose2.txt')
>>> graf2.readlines
<built-in method readlines of _io.TextIOWrapper object at 0x102f65c90>
>>> graf2
<_io.TextIOWrapper name='/Users/james/Documents/moose2.txt' mode='r' encoding='US-ASCII'>
>>> test=graf.read()
>>> test
' \\ \\ / \\/ ___//\n \\_ / // \\] //~~~\n \\\\ ]] // //\n \\__\\ _]_\\_ _\\\\ __/\\//\n __ _____\\ /_\\// _\n __ _/ \\/~~~~~~\\/ \\__ //\n _/ [ ] \\/\n /[ / \\ ]\n / [(0 0)]\n / [ ]\n _________~ [ ]\n \\ < > /\n / \\______/\n ] (_)\n ] \n'
>>> test.readline
Traceback (most recent call last):
File "<pyshell#11>", line 1, in <module>
test.readline
AttributeError: 'str' object has no attribute 'readline'
>>> graf.readlines()
[]
>>>
>>> graf.readline()
''
>>> ''
''
>>>
>>>
>>>
>>>
>>>
>>> graf.readlines()
[]
>>>
>>> for line in graf:
print line,
SyntaxError: invalid syntax
>>> for x in graf:
print x
SyntaxError: invalid syntax
>>> for line in graf:
print(line)
>>>
>>>
>>> graf
<_io.TextIOWrapper name='/Users/james/Documents/moose.txt' mode='r' encoding='US-ASCII'>
>>> graf2
<_io.TextIOWrapper name='/Users/james/Documents/moose2.txt' mode='r' encoding='US-ASCII'>
>>> for line in graf2:
print(line)
\ \ / \/ ___//\n
\_ / // \] //~~~\n
\\ ]] // //\n
\__\ _]_\_ _\\ __/\//\n
__ _____\ /_\// _\n
__ _/ \/~~~~~~\/ \__ //\n
_/ [ ] \/\n
/[ / \ ]\n
/ [(0 0)]\n
/ [ ]\n
_________~ [ ]\n
\ < > /\n
/ \______/\n
] (_)\n
] \n
>>> for line in graf:
print(line)
>>>
>>>
>>>
>>>
>>> y = input()
blech
>>> y
'blech'
>>> print "Enter your guess.", input()
SyntaxError: invalid syntax
>>> y = input ("Enter your name: ")
Enter your name:
>>> y = input ("Enter your guess: ")
Enter your guess: r
>>> y
'r'
>>> word_list=['computer', 'memory', 'disk space', 'monitor', 'video card']
>>> import random
>>> chosen_puzzle = random.shuffle(word_list)
>>> chosen_puzzle
>>> chosen_puzzle = (word_list).shuffle
Traceback (most recent call last):
File "<pyshell#54>", line 1, in <module>
chosen_puzzle = (word_list).shuffle
AttributeError: 'list' object has no attribute 'shuffle'
>>> word_list
['disk space', 'monitor', 'video card', 'memory', 'computer']
>>> chosen_puzzle
>>> print(chosen_puzzle)
None
>>> from random import shuffle
>>> shuffle(world_list)
Traceback (most recent call last):
File "<pyshell#59>", line 1, in <module>
shuffle(world_list)
NameError: name 'world_list' is not defined
>>> shuffle(word_list)
>>> for x in word_list:
shuffle(x)
Traceback (most recent call last):
File "<pyshell#63>", line 2, in <module>
shuffle(x)
File "/Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/random.py", line 265, in shuffle
x[i], x[j] = x[j], x[i]
TypeError: 'str' object does not support item assignment
>>> import random
>>> chosen_puzzle = (random.randint(0,list_count))
Traceback (most recent call last):
File "<pyshell#65>", line 1, in <module>
chosen_puzzle = (random.randint(0,list_count))
NameError: name 'list_count' is not defined
>>> list_count=len(world_list)
Traceback (most recent call last):
File "<pyshell#66>", line 1, in <module>
list_count=len(world_list)
NameError: name 'world_list' is not defined
>>> list_count=len(word_list)
>>> chosen_puzzle = (random.randint(0,list_count))
>>> for i in word_list:
if i!= '':
print("_",end="")
else:
SyntaxError: invalid syntax
>>> for i in word_list:
if i!= '':
print("_",end="")
SyntaxError: unexpected indent
>>> for i in word_list:
if i != '':
print("_",end="")
else:
SyntaxError: invalid syntax
>>> for i in word_list:
if i != '':
print("_",end="")
else:
SyntaxError: invalid syntax
>>> for i in word_list:
if i != '':
print("_",end="")
else
SyntaxError: invalid syntax
>>> for i in word_list:
if i != '':
print("_ ",end="")
else:
SyntaxError: invalid syntax
>>> for i in chosen_puzzle:
if i != '':
print(_ ),end="")
SyntaxError: invalid syntax
>>> for i in chosen_puzzle:
if i != '':
print("_ ",end="")
else:
SyntaxError: invalid syntax
>>> print(chosen_puzzle)
5
>>> str = (word_list[chosen_puzzle])
Traceback (most recent call last):
File "<pyshell#87>", line 1, in <module>
str = (word_list[chosen_puzzle])
IndexError: list index out of range
>>> word_list
['video card', 'computer', 'monitor', 'disk space', 'memory']
>>> list_count
5
>>> chosen_puzzle = (random.randint(0,list_count))
>>> chosen_puzzle
3
>>> str = (word_list[chosen_puzzle])
>>> str
'disk space'
>>> for i in chosen_puzzle:
if i != '':
print("_ ",end="")
else:
SyntaxError: invalid syntax
>>>
>>> for i in str:
if i != '':
print("_ ",end="")
else:
SyntaxError: invalid syntax
>>> for i in str:
if i != '':
print("_ ", end="")
else:
SyntaxError: invalid syntax
>>> for i in str:
if i != ' ':
print( "_ ", end="")
else:
print (" ", end="")
_ _ _ _ _ _ _ _ _
>>> userguess = input("Enter your guess: ")
Enter your guess: b
>>>
>>>
>>> for y in str:
if y != ' ':
print( "_ ", end="")
else if y = userguess:
print(userguess)
else:
print (" ", end="")
SyntaxError: invalid syntax
>>> for y in str:
if y != ' ':
print( "_ ", end="")
else if y == userguess:
print(userguess)
else:
print (" ", end="")
SyntaxError: invalid syntax
>>> for y in str:
if y != ' ':
print( "_ ", end="")
else if y == 'userguess'
print(userguess)
else:
print (" ", end="")
SyntaxError: invalid syntax
>>> for y in str:
if y != ' ':
print( "_ ", end="")
elif y == userguess
print(userguess)
else:
print (" ", end="")
SyntaxError: invalid syntax
>>>
>>> for y in str:
if y != ' ':
print( "_ ", end="")
elif y == userguess:
print(userguess)
else:
print (" ", end="")
_ _ _ _ _ _ _ _ _
>>> userguess = input("Enter your guess: ")
Enter your guess: d
>>> for y in str:
if y != ' ':
print( "_ ", end="")
elif y == userguess:
print(userguess)
else:
print (" ", end="")
_ _ _ _ _ _ _ _ _
>>>
for y in str:
if y == userguess:
print(userguess)
elif y != ' ':
print( "_ ", end="")
else:
print (" ", end="")
SyntaxError: unindent does not match any outer indentation level
>>> for y in str:
if y == userguess:
print(userguess)
elif y != ' ':
print( "_ ", end="")
else:
print (" ", end="")
d
_ _ _ _ _ _ _ _
>>> for y in str:
if y == userguess:
print(userguess),
elif y != ' ':
print( "_ ", end="")
else:
print (" ", end="")
d
(None,)
_ _ _ _ _ _ _ _
>>> for y in str:
if y == userguess:
print(userguess,)
elif y != ' ':
print( "_ ", end="")
else:
print (" ", end="")
d
_ _ _ _ _ _ _ _
>>>
|
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
import random
author = 'Your name here'
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'CTB'
players_per_group = None
num_rounds = 1
semanas = 5
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
rta_1 = models.StringField()
rta_2 = models.StringField()
rta_3 = models.StringField()
rta_4 = models.StringField()
rta_5 = models.StringField()
rta_6 = models.StringField()
rta_7 = models.StringField()
rta_8 = models.StringField()
rta_9 = models.StringField()
rta_10 = models.StringField()
rta_11 = models.StringField()
rta_12 = models.StringField()
rta_13 = models.StringField()
rta_14 = models.StringField()
rta_15 = models.StringField()
rta_16 = models.StringField()
rta_17 = models.StringField()
rta_18 = models.StringField()
rta_19 = models.StringField()
rta_20 = models.StringField()
rta_21 = models.StringField()
rta_22 = models.StringField()
rta_23 = models.StringField()
rta_24 = models.StringField()
|
# You are given a file containing the coordinates in 2D plane of 3 points of
# N triangles (line by line, comma-seperated in each line).
#
# Task: Count the number of triangles that contain the origin.
MAX_LEN = 3
MAX_NUM = 6
def countNumTriangleContainOrigin():
fileName = input('Enter the name of the file: ')
inputFile = open(fileName, 'r')
count = 0
pointOrigin = Point3D([])
for line in inputFile:
arrStrNum = line.split(',')
# Find the coordinates of the 3 points A, B, C of a triangle
pointA = Point3D([int(arrStrNum[0]), int(arrStrNum[1])])
pointB = Point3D([int(arrStrNum[2]), int(arrStrNum[3])])
pointC = Point3D([int(arrStrNum[4]), int(arrStrNum[5])])
if containPoint(pointA, pointB, pointC, pointOrigin):
count += 1
return count
# Return True if the triangle ABC contains the point D. Return False otherwise
def containPoint(pointA, pointB, pointC, pointD):
if isOnSameSide(pointA, pointB, pointC, pointD) and isOnSameSide(pointB, pointC, pointA, pointD) and isOnSameSide(pointA, pointC, pointB, pointD):
return True
return False
# Return True if point C and point D are on the same side relative to
# the straight line AB
# Return False otherwise
def isOnSameSide(pointA, pointB, pointC, pointD):
AB = Vector3D([pointA, pointB])
AC = Vector3D([pointA, pointC])
AD = Vector3D([pointA, pointD])
crossOne = AB.crossProduct(AC)
crossTwo = AB.crossProduct(AD)
if crossOne.dotProduct(crossTwo) >= 0:
return True
return False
class Point3D:
# Constructor
def __init__(self, inputTuple):
# Note that we only care the first 3 numbers in the tuple!
myLen = min(MAX_LEN, len(inputTuple))
self.coordArr = [0] * MAX_LEN
for index in range(0, myLen):
self.coordArr[index] = inputTuple[index]
def __repr__(self):
return "Point3D(%s, %s, %s)" % (self.coordArr[0], self.coordArr[1], self.coordArr[2])
def __str__(self):
return "(%f, %f, %f)" % (self.coordArr[0], self.coordArr[1], self.coordArr[2])
class Vector3D:
# Constructor
def __init__(self, listPoint):
numElement = len(listPoint)
if numElement == 0:
# Case 1: listPoint is empty. Then we create a zero vector
self.coordArr = [0] * MAX_LEN
elif numElement == 1:
# Case 2: there is 1 point A. Then we create the vector OA
if not isinstance(listPoint[0], Point3D):
raise TypeError(listPoint[0] + ' is not a Point3D object!')
self.coordArr = [0] * MAX_LEN
for index in range(0, MAX_LEN):
self.coordArr[index] = listPoint[0].coordArr[index]
else:
# Case 3: there are 2 points A and B. Then we create the vector AB
for index in range(0, 2):
if not isinstance(listPoint[index], Point3D):
raise TypeError(listPoint[index] + ' is not a Point3D object!')
self.coordArr = [0] * MAX_LEN
for index in range(0, MAX_LEN):
self.coordArr[index] = listPoint[1].coordArr[index] - listPoint[0].coordArr[index]
def __repr__(self):
return "Vector3D(%s, %s, %s)" % (self.coordArr[0], self.coordArr[1], self.coordArr[2])
def __str__(self):
return "(%f, %f, %f)" % (self.coordArr[0], self.coordArr[1], self.coordArr[2])
# Compute the sum of 2 vectors
def __add__(self, other):
sumVector = Vector3D([])
for index in range(0, MAX_LEN):
sumVector.coordArr[index] = self.coordArr[index] + other.coordArr[index]
return sumVector
# Compute the dot product of this vector and otherVector
def dotProduct(self, otherVector):
mySum = 0
for index in range(0, MAX_LEN):
mySum += self.coordArr[index] * otherVector.coordArr[index]
return mySum
# Compute the cross product of this vector and otherVector in 3D
# This is only applicable to 3D vectors
def crossProduct(self, otherVector):
crossVector = Vector3D([])
for index in range(0, MAX_LEN):
crossVector.coordArr[index] = self.coordArr[(index + 1) % 3] * otherVector.coordArr[(index + 2) % 3] - self.coordArr[(index + 2) % 3] * otherVector.coordArr[(index + 1) % 3]
return crossVector
|
# quotes/urls.py
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from quotes import views
urlpatterns = [
# Examples:
# url(r'^$', 'myproject.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^(?P<quote_id>\d+)/download/$', views.download_quote_picture, name="download_quote_picture" ),
]
|
from math import ceil
def reindeer(presents):
assert 0 <= presents <= 180
return int(2 + (ceil(presents / 30.0)))
|
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
import logging
import evidence_pb2
import device_pb2
import systemconfig_pb2
import address_calculator
from sawtooth_sdk.processor.exceptions import InternalError
# Initialize logger
LOGGER = logging.getLogger(__name__)
# Addresses for global settings to check evidence validity
system_config_address = '5a7526f43437fca1d5f3d0381073ed3eec9ae42bf86988559e98009795a969919cbeca'
devices_address = '5a75264f03016f8dfef256580a4c6fdeeb5aa0ca8b4068e816a677e908c95b3bdd2150'
# Function to load the global system config
def fetchSystemConfig(context):
state_entries = context.get_state([system_config_address])
SystemConfig = systemconfig_pb2.Systemconfig()
try:
StoredSystemConfig = state_entries[0].data
SystemConfig.ParseFromString(StoredSystemConfig)
except:
raise InternalError('Failed to load system config')
return SystemConfig
def findDeviceProperties(context,device):
deviceList = fetchDeviceList(context)
for Device in deviceList.Device:
if device == Device.DeviceIdentity:
return Device
if deviceList == []:
LOGGER.info('Devices List is empty')
return 0
else:
for Device in deviceList.Device:
if device == Device.DeviceIdentity:
return Device
return 0
# Function to load the global device list
def fetchDeviceList(context):
state_entries = context.get_state([devices_address])
deviceList = device_pb2.DevicesList()
try:
StoredDeviceList = state_entries[0].data
deviceList.ParseFromString(StoredDeviceList)
except:
raise InternalError('Failed to load device list')
return deviceList
# Loads the right entry for evidence properties
# def findEvidenceProperties(context, evidence):
# propertiesList = fetchPropertiesList(context)
# if propertiesList == []:
# LOGGER.info('Properties List is empty')
# else:
# for properties in propertiesList.Properties:
# if evidence.Trustee == properties.AttestationType:
# return properties
# return
# Load the global Security Parameter
def loadSecurityParameter(context):
SystemConfig = fetchSystemConfig(context)
if SystemConfig == []:
LOGGER.info('System Config is empty')
else:
SecurityParameter = SystemConfig.SecurityParameter
return SecurityParameter
# Delete an evidence from the global state
def _deleteEvidence(context, evidence):
address = address_calculator._assembleEvidenceStorageAddress(evidence)
state_entries = context.get_state([address])
evidenceList = evidence_pb2.EvidenceList()
newEvidenceList = evidence_pb2.EvidenceList()
if state_entries != []:
try:
StoredEvidenceList = state_entries[0].data
evidenceList.ParseFromString(StoredEvidenceList)
except:
raise InternalError('Failed to load state data - deleteEvidence')
for currentEvidence in evidenceList.Evidences:
if (currentEvidence != evidence):
newEvidenceList.Evidences.extend([currentEvidence])
state_data = newEvidenceList.SerializeToString()
addresses = context.set_state({address: state_data})
# check if data was actually written to addresses
if len(addresses) < 1:
raise InternalError("State Error")
# Add event submission
context.add_event(
event_type="attestation/evidence_deletion",
attributes=[("verifier", str(evidence.VerifierIdentity)), ("prover", str(evidence.ProverIdentity))])
|
from typing import List
from leetcode import test
def move_zeroes(nums: List[int]) -> None:
i = 0
for j, num in enumerate(nums):
if num != 0:
nums[i] = nums[j]
i += 1
while i < len(nums):
nums[i] = 0
i += 1
test(move_zeroes, [([0, 1, 0, 3, 12], [1, 3, 12, 0, 0])], actual_func=lambda t, _: t[1])
|
from bert_serving.client import BertClient
def get_bert_client():
# return BertClient(ip="192.168.86.176") # if in Aperture Science
return BertClient() # requires active SSH tunnel with local forwards on 5555 and 5556
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.