blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e6122138c275a7ca084bb1ca0a6f3523a11a7775
|
Python
|
SimonFans/LeetCode
|
/Design/L716_Max_Stack.py
|
UTF-8
| 2,019
| 4.3125
| 4
|
[] |
no_license
|
Design a max stack that supports push, pop, top, peekMax and popMax.
push(x) -- Push element x onto stack.
pop() -- Remove the element on top of the stack and return it.
top() -- Get the element on the top.
peekMax() -- Retrieve the maximum element in the stack.
popMax() -- Retrieve the maximum element in the stack, and remove it. If you find more than one maximum elements, only remove the top-most one.
Example 1:
MaxStack stack = new MaxStack();
stack.push(5);
stack.push(1);
stack.push(5);
stack.top(); -> 5
stack.popMax(); -> 5
stack.top(); -> 1
stack.peekMax(); -> 5
stack.pop(); -> 1
stack.top(); -> 5
Note:
-1e7 <= x <= 1e7
Number of operations won't exceed 10000.
The last four operations won't be called when stack is empty.
class MaxStack:
def __init__(self):
"""
initialize your data structure here.
"""
# 需要两个stack,一个做正常压入,一个存到目前为止的最大值
self.stack=[]
self.max_stack=[]
def push(self, x: int) -> None:
self.stack.append(x)
if len(self.max_stack)==0:
self.max_stack.append(x)
return
if self.max_stack[-1]>x:
self.max_stack.append(self.max_stack[-1])
else:
self.max_stack.append(x)
def pop(self) -> int:
if len(self.stack)!=0:
self.max_stack.pop(-1)
return self.stack.pop(-1)
def top(self) -> int:
return self.stack[-1]
def peekMax(self) -> int:
if len(self.max_stack) != 0:
return self.max_stack[-1]
def popMax(self) -> int:
val=self.peekMax()
buff=[]
while self.top()!=val:
buff.append(self.pop())
self.pop()
while len(buff)!=0:
self.push(buff.pop(-1))
return val
# Your MaxStack object will be instantiated and called as such:
# obj = MaxStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.peekMax()
# param_5 = obj.popMax()
| true
|
65519638472d945e59ebac62d7b48613add96684
|
Python
|
rettenls/ServerlessAggregation
|
/Generator/.~c9_invoke_HQovSy.py
|
UTF-8
| 6,527
| 2.703125
| 3
|
[] |
no_license
|
# General Imports
import random
import json
import hashlib
import time
import collections
import uuid
import sys
from pprint import pprint
# Multithreading
import logging
import threading
# AWS Imports
import boto3
# Project Imports
sys.path.append("../Common")
from functions import *
from constants import *
# Random Type Constructor
def random_type():
type_name = None
for type_level in type_levels:
if type_name is None:
type_name = random.choice(type_level)
elif type_name in type_level:
type_name += ":" + random.choice(type_level[type_name])
return type_name
# Aggregate along tree
def aggregate_along_tree(data):
# Determine aggregation depth
aggregation_depth = max([key.count(":") for key in data.keys()])
# Start at max depth and go higher
for depth in range(aggregation_depth, 0, -1):
children = [key for key in data.keys() if key.count(":") == depth]
for child in children:
parent = child[:child.rfind(":")]
add_to_dict_entry(data, parent, data[child])
return data
def generate_messages(totals, print_to_console):
thread_state = dict()
thread_totals = dict()
for i in range(NUMBER_OF_BATCHES_PER_THREAD):
# The designated thread should print current progress
if print_to_console:
progress = (i / NUMBER_OF_BATCHES_PER_THREAD) * 100
print_progress_bar(progress)
# Initialize record list for this batch
records = []
# Calcu late number of duplicates that we add
if DUPLICATES_PER_BATCH < BATCH_SIZE:
number_of_duplicate_messages = DUPLICATES_PER_BATCH
else:
number_of_duplicate_messages = max(0, BATCH_SIZE - 1)
for j in range(BATCH_SIZE - number_of_duplicate_messages):
# Initialize Empty Message
message = {}
# Random decision: 10% chance for Modify, 90% chance for New Entry
if len(thread_state) == 0 or random.uniform(0,100) < (100 - PERCENTAGE_MODIFY):
# Generate ID
message["id"] = str(uuid.uuid4())
# Add Version
message["version"] = 0
# Count
add_to_dict_entry(thread_totals, "count:add", 1)
else:
# Pick existing ID
message["id"] = random.choice(list(thread_state.keys()))
# Get New Version
if thread_state[message["id"]]["version"] == 0 or random.uniform(1,100) < (100 - PERCENTAGE_OUT_OR_ORDER):
# Iterate Version
message["version"] = thread_state[message["id"]]["version"] + 1
add_to_dict_entry(thread_totals, "count:modify:in_order", 1)
else:
print("\nOut or Order Message!\n")
# Insert Older Version
message["version"] = thread_state[message["id"]]["version"] - 1
add_to_dict_entry(thread_totals, "count:modify:out_of_order", 1)
# Add Random Value
v = random.randint(1, MAX_NUMBER_OF_ITEMS_PER_MESSAGE)
message["value"] = v
# Add Random Type
k = random_type()
message["type"] = k
# Dump to String
message_string = json.dumps(message)
# Append to Record List
record = {"Data" : message_string, "PartitionKey" : hashlib.sha256(message_string.encode()).hexdigest()}
records.append(record)
# Append to Internal Storage - if Message was sent in Order
if (message["id"] not in thread_state) or (thread_state[message["id"]]["version"] < message["version"]):
thread_state[message["id"]] = message
# Add Duplicates
for k in range(number_of_duplicate_messages):
duplicate_index = random.randint(0, BATCH_SIZE - number_of_duplicate_messages - 1)
records.append(records[duplicate_index])
add_to_dict_entry(thread_totals, "count:duplicates", number_of_duplicate_messages)
#Print records
if DEBUG > 1:
print('\n')
pprint(records)
print('\n')
# Send Batch to Kinesis Stream
batch_put_with_exp_backoff(kinesis_client, KINESIS_STREAM_NAME, records)
# Aggregate over Final State
for entry in thread_state.values():
k = entry['type']
v = entry['value']
add_to_dict_entry(thread_totals, k, v)
# Add to Totals
for k,v in thread_totals.items():
add_to_dict_entry(totals, k, v)
# Initialize Kinesis Consumer
kinesis_client = boto3.client("kinesis", region_name=REGION_NAME)
# Take start time
start_time = time.time()
# Print general info
print("\nGenerating items and writing to Kinesis...\n")
print("Example message: {'id': '0d957288-2913-4dbb-b359-5ec5ff732cac', 'version': 0, 'value': 1, 'type': 'vegetable:cucumber'}\n")
# Invoke Threads
totals = dict()
threads = list()
print("Invoking " + str(THREAD_NUM) + " threads...\n")
for index in range(THREAD_NUM):
x = threading.Thread(target=generate_messages, args=(totals, index == (THREAD_NUM - 1),))
threads.append(x)
x.start()
for index, thread in enumerate(threads):
thread.join()
print("\n\nAll threads finished.\n")
# Print to Console
end_time = time.time()
print("\nSimple Data producer finished!\nTotal number of messages: {}.\nTotal ingestion time: {:.1f} seconds.\nAverage ingeston rate: {:.1f} messages / second.".format(BATCH_SIZE * NUMBER_OF_BATCHES_PER_THREAD * THREAD_NUM, end_time - start_time, BATCH_SIZE * NUMBER_OF_BATCHES_PER_THREAD * THREAD_NUM / (end_time - start_time)))
# Print Totals to Compare
totals = aggregate_along_tree(totals)
ordered_totals = collections.OrderedDict(sorted(totals.items()))
print("\nMessage Counts:\n")
for k,v in ordered_totals.items():
if k[:5] == "count":
level = k.count(":")
print("{:<25}".format(k) + (" " * level) + "{:>10}".format(v))
print("\n")
print("\nTotals:\n")
for k,v in ordered_totals.items():
if k[:5] != "count":
level = k.count(":")
print("{:<25}".format(k) + (" " * level) + "{:>10}".format(v))
print("\n")
| true
|
fa0d3e13e2afcbefd0906b59cb2a407ef7e53dbf
|
Python
|
Birathan/tableBuilder
|
/listHelper.py
|
UTF-8
| 837
| 4.6875
| 5
|
[] |
no_license
|
def list_to_str(lis):
'''(list of str) -> str
This function returns the string representation of the list, with elements
of list seperated by ','.
>>> list_to_str(['a', 'b', 'c'])
'a, b, c'
>>> list_to_str([' a', 'b ', ' c '])
' a, b , c '
'''
text = ''
for element in lis:
text += (element+', ')
return text[:-2]
def clean_list(lis):
'''(list of str) -> str
This function formats the list so that each str is formatted of its leading
and trailing spaces
>>> a = ['a', 'b', 'c']
>>> clean_list(a)
>>> a
['a', 'b', 'c']
>>> a = [' a', 'b ', ' c ']
>>> clean_list(a)
>>> a
['a', 'b', 'c']
'''
for i in range(0, len(lis)):
if type(lis[i]) == str:
lis[i] = lis[i].strip(' ')
| true
|
0c0fc170a788fead63ea462920587d794656168a
|
Python
|
rchicoli/ispycode-python
|
/Operating-System-Modules/OS-Module/Get-File-Stat.py
|
UTF-8
| 407
| 2.625
| 3
|
[] |
no_license
|
import stat
import time
import os
f = "example.py"
st = os.stat(f)
mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime = st
print("size: %s bytes" % size)
print("owner: %s %s" % (uid,gid) )
print("created: %s" % time.ctime(ctime))
print("last accessed: %s" % time.ctime(atime))
print("last modified: %s" % time.ctime(mtime))
print("mode: %s" % oct(mode))
print("inode/dev: %s %s" % (ino,dev))
| true
|
225a2126114d8ee5c936c8541c3e75a71917402f
|
Python
|
afcarl/python-tutorial-1
|
/week6/week6-hw-recursive.py
|
UTF-8
| 221
| 3.484375
| 3
|
[] |
no_license
|
def printAll(depth, l):
if depth == 0:
print(' '.join(l))
return
printAll(depth - 1, [] + l)
printAll(depth - 1, [str(depth)] + l)
while 1:
printAll(int(input('How many number? ')), [])
| true
|
b9845297700fef069fde6c364f256f91ca4e2311
|
Python
|
rrampage/udacity-code
|
/cs387/Problem Set 3/5.py
|
UTF-8
| 5,274
| 3.640625
| 4
|
[] |
no_license
|
# cs387 ; Problem Set 3 ; 5
# HW3-5 Version 1
# For this assignment you will be given all of the public information
# of a Diffie-Hellman key exchange plus the number of multiplications
# necessary to calculate (g**b)**a mod p, given g**b where `a` is
# Alice's private key and `b` is Bob's private key
#
# With this information, you should be able to determine Alice's
# private key and then decrypt the message - which is given at the
# bottom of this file
#
# If you think you've found a bug, post here -
# http://forums.udacity.com/cs387-april2012/questions/2188/hw3-challenge-problem-issues-and-bugs
# For other discussion of the problem, this topic is more appropriate -
# http://forums.udacity.com/cs387-april2012/questions/2190/hw3-challenge-problem-general-discussion
import string
#############
# p and g are public information
#
# 2 ** 100 - 153 is prime
# (from http://primes.utm.edu/lists/2small/0bit.html)
# and verified using Wolfram Alpha
p = 1267650600228229401496703205223
# primitive root (calculated using wolfram alpha)
g = 3
#############
# g_a, g_b are both transmitted public
# and easily intercepted by a passive eavesdropper
#
# g_a = g**a mod p
# g_b = g**b mod p
g_a = 142621255265782287951127214876
g_b = 609743693736442153553407144551
#############
# Unfortunately, for Alice, she is using a modular
# exponentiation function similar to the one discussed
# in lecture and we were able to count the number of
# multiplications used to calculate the key
n_multiplications = 26
############################
# This eliminates the recursion in the mod_exp
# shown in lecture
# and does bitwise operations
# to speed things up a bit
# but the number of multiplications stays
# the same
def mod_exp(a, b, q):
"""return a**b % q"""
val = 1
mult = a
while b > 0:
odd = b & 1 # bitwise and
if odd == 1:
val = (val * mult) % q
b -= 1
if b == 0:
break
mult = (mult * mult) % q
b = b >> 1 # bitwise divide by 2
return val
# `count_multiplications` might be useful
# to see if you've found an exponent that
# would require the same number multiplications
# as Alice's private key
def count_multiplications(exponent):
"""return the number of multiplications
necessary to raise a number to `exponent`"""
bits = convert_to_bits(exponent)
return len(bits) + sum(b for b in bits) - 2
# this is the encode function used to
# create the cipher text found at the bottom of the file
def encode(plaintext, key):
assert len(plaintext) <= len(key)
return [m^k for m, k in zip(plaintext, key)]
# use this function to decrypt the ciphertext
def decode(ciphertext, key):
assert len(ciphertext) <= len(key)
return [c^k for c,k in zip(ciphertext, key)]
# is_valid returns True if the input consist of valid
# characters (numbers, upper case A-Z and lower case a-z and space)
# The message still might be garbage, but this is a decent
# and reasonably fast preliminary filter
valid_chars = set(c for c in string.printable[:62])
valid_chars.add(' ')
def is_valid(decode_guess):
return (len(decode_guess) == 14 and
all(d in valid_chars for d in decode_guess))
# Below are the typical bit manipulation functions
# that you might find useful
# Note that ASCII_BITS is set to 7 for this problem
BITS = ('0', '1')
ASCII_BITS = 7
def display_bits(b):
"""converts list of {0, 1}* to string"""
return ''.join([BITS[e] for e in b])
def seq_to_bits(seq):
return [0 if b == '0' else 1 for b in seq]
def pad_bits(bits, pad):
"""pads seq with leading 0s up to length pad"""
assert len(bits) <= pad
return [0] * (pad - len(bits)) + bits
def convert_to_bits(n):
"""converts an integer `n` to bit array"""
result = []
if n == 0:
return [0]
while n > 0:
result = [(n % 2)] + result
n = n / 2
return result
def string_to_bits(s):
def chr_to_bit(c):
return pad_bits(convert_to_bits(ord(c)), ASCII_BITS)
return [b for group in
map(chr_to_bit, s)
for b in group]
def bits_to_char(b):
assert len(b) == ASCII_BITS
value = 0
for e in b:
value = (value * 2) + e
return chr(value)
def list_to_string(p):
return ''.join(p)
def bits_to_string(b):
return ''.join([bits_to_char(b[i:i + ASCII_BITS])
for i in range(0, len(b), ASCII_BITS)])
############
# `ciphertext` is the observed message exchanged between Alice
# and Bob - which is what you need to decrypt
#
# key = convert_to_bits(mod_exp(g_b, a, p))
# ciphertext = encode(string_to_bits(plaintext), key)
ciphertext = string_to_bits(' x\x0br\x1fu/W\x00gJ@h#')
###########
# `plaintext` is the variable you will need to set
# with the decrypted message
plaintext = "" # Your answer here
# Might be a useful test function.
# If you've calculated Alice's key
# and the plaintext, you can
# calculate a cipher-text to see
# if it matches the given `ciphertext`
def test(alices_key, plaintext):
key = convert_to_bits(mod_exp(g_b, alices_key, p))
test_cipher = encode(string_to_bits(plaintext), key)
return test_cipher == ciphertext
### uncomment to run
# print test(alices_key, plaintext)
| true
|
7337aaaeb23281d7365154b6592f348c8ac4820b
|
Python
|
Lehcs-py/guppe
|
/Seção_05/Exercício_13.py
|
UTF-8
| 782
| 4.625
| 5
|
[] |
no_license
|
print("""
13. Faça um algoritmo que calcule A média ponderada das notas de 3 provas. A primeira e A segunda têm peso 1 e A terceira tem peso 2. Ao final,
mostrar A média do aluno e indicar se o aluno foi aprovado ou reprovado. A nota para aprovação deve ser igual ou superior A 60 pontos.
""")
print('Intervalo: 0 até 100')
nota1 = float(input('Insira A primeira nota: '))
nota2 = float(input('Insira A segunda nota: '))
nota3 = float(input('Insira A terceira nota: '))
media = ((nota1 * 1) + (nota2 * 1) + (nota3 * 2)) / 4
if media < 60:
print(f'Sua média {media} é insuficiente, está reprovado.')
elif media >= 60:
print(f'Você atingiu o objetivo com A nota {media}, está aprovado.')
else:
print('Você inseriu notas inválidas.')
| true
|
0a3e8310764a4a302735ebed8af1368ae21253ba
|
Python
|
IamUttamKumarRoy/python-start
|
/python_if_else.py
|
UTF-8
| 303
| 3.828125
| 4
|
[
"Apache-2.0"
] |
permissive
|
amount=int(input("Enter amount: "))
if amount<1000:
discount=amount*0.05
print ("Discount",discount)
elif amount<5000:
discount=amount*0.10
print ("Discount",discount)
else:
discount=amount*0.15
print ("Discount",discount)
print ("Net payable:",amount-discount)
| true
|
2ac93d40bd2db8f41fb5d920911543c7d37f154c
|
Python
|
Mithrilwoodrat/coffesploit
|
/src/coffesploit/core/helpmanager.py
|
UTF-8
| 1,109
| 2.875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
class HelpManager(object):
"""class to print help informations help users use coffesploit"""
def __init__(self):
self.help_list = {"show": self.help_show,
"use": self.help_use,
"target": self.help_set_tartget
}
def help_show(self):
print """Usage:show [target|<tool>]
exp:show nmap"""
def help_use(self):
print """Usage:use <tool>
exp:use nmap"""
def help_set_tartget(self):
print """Usage:target target_address
exp: target 192.168.1.2 """
def main_help(self):
print """Welcome to Coffesploit
'help target' show help about target set
'help show' show help about 'show' options
'plugins' show plugins list
or just type sh command to exec
"""
def help_set(self):
pass
def gethelp(self, arg):
if arg is None:
self.main_help()
if arg in self.help_list:
self.help_list[arg]()
else:
print "no help info about",arg
| true
|
7386438517633f0c8183f8d8648f2e769dfa86e1
|
Python
|
patback66/COEN169
|
/projects/project2/recs.py
|
UTF-8
| 34,745
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin python
"""
@author Matthew Koken <mkoken@scu.edu>
@file recs.py
This file takes tab delimited txt files for users and their movie ratings.
Based on the users and ratings, recommendations are given.
"""
#pin 3224592822747566
import csv
import math
import sys as Sys
import time
import numpy
# Globals here
# Could go in main + pass along, but this is easier/cleaner for now
REC_ROW_USERS = 200
REC_COL_MOVIES = 1000
RATING_MAX = 5
TRAIN_RECS = [[0 for x in range(REC_COL_MOVIES)] for y in range(REC_ROW_USERS)]
USER_RATINGS = [] #U M R, U = [0][x], M = [1][x], R = [2][x]
PREDICTED_RATINGS = []
DEBUG = 0 #disable progress bar
###################
#Class Definitions#
###################
class Algorithms:
"""
@class Algorithms
Holds the possible algorithm selections to be used.
"""
pearson, pearson_iuf, pearson_case, cosine_sim, item_cos, custom = range(6)
class User:
"""
@class Relevant_User
Holds movie ratings for co-rated movies and similarity weights.
"""
def calc_avg(self, ratings):
if len(ratings) > 0:
if self.needed_rating != 0:
return float(sum(ratings) + self.needed_rating) / float(len(ratings) + 1)
else:
return float(sum(ratings)) / float(len(ratings))
else:
if self.needed_rating != 0:
return self.needed_rating
else:
return 0
def calc_std_dev(self, ratings):
variance = map(lambda x: (x - self.average)**2, ratings)
avg_variance = (float(sum(variance)) / len(variance))
return math.sqrt(avg_variance)
def set_similarity(self, similarity):
self.similarity = similarity
def __init__(self, similarity, corated_movie_ratings, needed_rating):
self.similarity = similarity
self.corated_movie_ratings = corated_movie_ratings
self.needed_rating = needed_rating
self.average = self.calc_avg(corated_movie_ratings)
self.std_dev = self.calc_std_dev(corated_movie_ratings)
class Movie:
"""
@class Movie
Holds movie ratings for user ratings and similarity weights.
"""
def calc_avg(self, ratings):
if len(ratings) > 0:
if self.needed_rating != 0:
return float(sum(ratings) + self.needed_rating) / float(len(ratings) + 1)
else:
return float(sum(ratings)) / float(len(ratings))
else:
if self.needed_rating != 0:
return self.needed_rating
else:
return 0
def __init__(self, m_id, similarity, user_ratings, needed_rating):
self.m_id = m_id
self.similarity = similarity
self.ratings = user_ratings
self.needed_rating = needed_rating
self.average = self.calc_avg(self.ratings)
def set_similarity(self, similarity):
self.similarity = similarity
def append_rating(self, new_rating):
self.ratings.append(new_rating)
def recalc(self):
self.average = self.calc_avg(self.ratings)
######################
#Function Definitions#
######################
def clear_console():
print "\n" * 180
# result5.txt consists a list of predictions in the form:
# (U, M, R),
# where U is the userid, M is the movieid, and R is your predicted rating.
# ID's OFFSET BY 1: [1:200]
def write_file(file_out, data, delim):
"""
@function write_recs
@param file_out: string for the file to write
@param data: array of data to be written
Writes values of given array to the specified results file
"""
with open(file_out, "wb") as out_file:
writer = csv.writer(out_file, delimiter=delim)
for item in data:
writer.writerow(item)
def int_wrapper(reader):
"""
@function int_wrapper
@param reader: a csv reader
Maps the string content read by the reader to an int value
"""
for v in reader:
yield map(int, v)
def read_in(file_to_read, delim):
"""
@function read_in
@param file: the file that will be imported
Returns an array of integers read in from the tab delimited file.
"""
data = []
with open(file_to_read, "rU") as in_file:
reader = csv.reader(in_file, delimiter=delim)
reader = int_wrapper(reader)
data = list(reader)
return data
def pearson_sim(cur_user=None, test_user=None):
"""
@Function pearson_sim
@param cur_user: the current user we want to compare to
@param test_user: the user we want to compare to
Calculates the similarity between users using the pearson method
"""
numerator = 0.0
len1 = 0.0
len2 = 0.0
for index in range(len(cur_user.corated_movie_ratings)):
if cur_user.corated_movie_ratings[index] != 0 and test_user.corated_movie_ratings[index] != 0:
diff_1 = (cur_user.corated_movie_ratings[index] - cur_user.average)
diff_2 = (test_user.corated_movie_ratings[index] - test_user.average)
numerator += (float(diff_1) * float(diff_2))
len1 += float(diff_1 * diff_1)
len2 += float(diff_2 * diff_2)
denominator = (math.sqrt(len1) * math.sqrt(len2))
# Don't break, just no similarity if denominator = 0
if denominator == 0:
return 0
#final calc for similarity
return float(numerator) / float(denominator)
def pearson(user_id=None, movie_id=None):
"""
@function pearson
@param user_id: id of the user that we will be predicting for
@param movie_id: id of the movie that will be given a predicted rating
Uses pearson correlation to calculate a predicted rating for the movie.
"""
#calculate the user's standard deviation
#cur_user_std_dev = 0
#for rating in cur_user:
# cur_user_std_dev += (rating - cur_user_average) * (rating - cur_user_average)
#cur_user_std_dev = math.sqrt(cur_user_std_dev)
#global TRAIN_RECS
#global USER_RATINGS
#global PREDICTED_RATINGS
#global RATING_MAX
num_similar = 30
rating = 0
relevant_users = []
cur_user_ratings = []
cur_user_rated_movies = []
#calculate average for the current user
num_rated_movies = 0
cur_user_average = 0.0
for recs in USER_RATINGS:
# [U, M, R] -> [0, 1, 2]
if recs[0] == user_id and recs[2] != 0 and num_rated_movies < RATING_MAX:
cur_user_average += recs[2]
cur_user_ratings.append(recs[2])
num_rated_movies += 1
cur_user_rated_movies.append(recs[1])
cur_user_average = float(cur_user_average)/float(num_rated_movies)
cur_user = User(0, cur_user_ratings, 0)
#find the most similar users
for index, user in enumerate(TRAIN_RECS):
sim_user_ratings = []
#get ratings that other user has also rated
for movie in cur_user_rated_movies:
sim_user_ratings.append(TRAIN_RECS[index][movie - 1])
#sim user has a similarity weight and a rating for movie_id
needed_rating = TRAIN_RECS[index][movie_id - 1]
sim_user = User(0, sim_user_ratings, needed_rating)
#caclulate similarity
w_a_u = pearson_sim(cur_user, sim_user)
sim_user.set_similarity(w_a_u)
#keep only the k most relevant users
if len(relevant_users) < num_similar:
relevant_users.append(sim_user)
else:
saved_index = -1
for idx in range(len(relevant_users)):
if sim_user.similarity > relevant_users[idx].similarity:
saved_index = idx
if saved_index !=-1:
relevant_users[saved_index] = sim_user
#have the most similar users, now calculate
numerator = 0.0
denominator = 0.0
for user in relevant_users:
#user: [w_a_u, r_a_i]
w_a_u = user.similarity
r_a_i = user.needed_rating
numerator += (float(w_a_u) * float(r_a_i - user.average))
denominator += abs(float(w_a_u))
if denominator != 0:
#rounding too early here?
rating = cur_user.average + (float(numerator)/float(denominator))
rating = round(rating)
#default to the user's average rating
if rating == 0:
rating = round(cur_user.average)
if rating > 5: # don't exceed the max rating
rating = 5
#cleanup
del relevant_users[:]
del cur_user_ratings[:]
del cur_user_rated_movies[:]
return int(rating)
def get_num_ratings(movie_id, cur_user_rated):
"""
@function get_num_ratings
@param movie_id: the id for a movie. [1,1000]
Returns the number of users that have rated the movie.
"""
num_ratings = cur_user_rated
for user in TRAIN_RECS:
if user[movie_id - 1] != 0:
num_ratings += 1
return num_ratings
def get_iuf(movie_id, cur_user_rated):
"""
@function get_iuf
@param movie_id: the id for a movie
Returns the iuf of a movie
"""
#IUF(j) = log(m/m_j)
#m = number of users
#m_j = number of users that rated movie j
m = 201 #the number of users
m_j = get_num_ratings(movie_id, cur_user_rated)
if m_j != 0:
iuf = math.log((float(m)/float(m_j)), 2)
else:
iuf = 0
return iuf
def pearson_iuf_sim(cur_user=None, test_user=None):
"""
@Function pearson_sim
@param cur_user: the current user we want to compare to
@param test_user: the user we want to compare to
Calculates the similarity between users using the pearson method
"""
numerator = 0.0
len1 = 0.0
len2 = 0.0
for index in range(len(cur_user.corated_movie_ratings)):
if cur_user.corated_movie_ratings[index] != 0 and test_user.corated_movie_ratings[index] != 0:
iuf1 = get_iuf(cur_user.corated_movie_ratings[index], 1)
iuf2 = get_iuf(test_user.corated_movie_ratings[index], 1)
diff_1 = (iuf1 * cur_user.corated_movie_ratings[index] - cur_user.average)
diff_2 = (iuf2 * test_user.corated_movie_ratings[index] - test_user.average)
numerator += (float(diff_1) * float(diff_2))
len1 += float(diff_1 * diff_1)
len2 += float(diff_2 * diff_2)
denominator = (math.sqrt(len1) * math.sqrt(len2))
# Don't break, just no similarity if denominator = 0
if denominator == 0:
return 0
#final calc for similarity
return float(numerator) / float(denominator)
def pearson_iuf(user_id=None, movie_id=None):
"""
@function pearson_iuf
@param user_id: the id of the user that needs a movie rating prediction
@param movie_id: the movie id that the user needs a rating for
Uses the pearson method to predict user ratings, with the addition
of IUF modification.
"""
#calculate the user's standard deviation
#cur_user_std_dev = 0
#for rating in cur_user:
# cur_user_std_dev += (rating - cur_user_average) * (rating - cur_user_average)
#cur_user_std_dev = math.sqrt(cur_user_std_dev)
#global TRAIN_RECS
#global USER_RATINGS
#global PREDICTED_RATINGS
#global RATING_MAX
num_similar = 50
rating = 0
relevant_users = []
cur_user_ratings = []
cur_user_rated_movies = []
#calculate average for the current user
num_rated_movies = 0
#cur_user_average = 0.0
for recs in USER_RATINGS:
# [U, M, R] -> [0, 1, 2]
if recs[0] == user_id and recs[2] != 0 and num_rated_movies < RATING_MAX:
#cur_user_average += recs[2]
#iuf = get_iuf(recs[1], 1) #the user has rated the movie
cur_user_ratings.append(recs[2])
num_rated_movies += 1
cur_user_rated_movies.append(recs[1])
#cur_user_average = float(cur_user_average)/float(num_rated_movies)
cur_user = User(0, cur_user_ratings, 0)
#find the most similar users
for index, user in enumerate(TRAIN_RECS):
sim_user_ratings = []
#get ratings that other user has also rated
for movie in cur_user_rated_movies:
#IUF(j) = log(m/m_j)
#m = number of users
#m_j = number of users that rated movie j
#iuf = get_iuf(movie - 1, 1) #the user has rated the movie
user_rating = TRAIN_RECS[index][movie - 1]
sim_user_ratings.append(user_rating)
#sim user has a similarity weight and a rating for movie_id
#iuf = get_iuf(movie_id - 1, 0) #the user has not rated - needs a prediction
needed_rating = TRAIN_RECS[index][movie_id - 1]
sim_user = User(0, sim_user_ratings, needed_rating)
#caclulate similarity
w_a_u = pearson_iuf_sim(cur_user, sim_user)
sim_user.set_similarity(w_a_u)
#keep only the k most relevant users
if len(relevant_users) < num_similar:
relevant_users.append(sim_user)
else:
saved_index = -1
for idx in range(len(relevant_users)):
if sim_user.similarity > relevant_users[idx].similarity:
saved_index = idx
if saved_index !=-1:
relevant_users[saved_index] = sim_user
#have the most similar users, now calculate
numerator = 0.0
denominator = 0.0
for user in relevant_users:
#user: [w_a_u, r_a_i]
w_a_u = user.similarity
r_a_i = user.needed_rating
numerator += (float(w_a_u) * float((r_a_i - user.average)))
denominator += abs(float(w_a_u))
if denominator != 0:
#rounding too early here?
rating = cur_user.average + (float(numerator)/float(denominator))
rating = round(rating)
#default to the user's average rating
if rating == 0:
rating = round(cur_user.average)
if rating > 5: # don't exceed the max rating
rating = 5
#cleanup
del relevant_users[:]
del cur_user_ratings[:]
del cur_user_rated_movies[:]
return int(rating)
def pearson_case(user_id=None, movie_id=None):
"""
@function pearson_case
@param user_id: the id of the active user
@param movie_id: the id of the movie for which the active user needs a prediction
Pearson using case amplification.
"""
#global TRAIN_RECS
#global USER_RATINGS
#global PREDICTED_RATINGS
#global RATING_MAX
rho = 3.5
num_similar = 30
rating = 0
relevant_users = []
cur_user_ratings = []
cur_user_rated_movies = []
#calculate average for the current user
num_rated_movies = 0
#cur_user_average = 0.0
for recs in USER_RATINGS:
# [U, M, R] -> [0, 1, 2]
if recs[0] == user_id and recs[2] != 0 and num_rated_movies < RATING_MAX:
#cur_user_average += recs[2]
cur_user_ratings.append(recs[2])
num_rated_movies += 1
cur_user_rated_movies.append(recs[1])
#cur_user_average = float(cur_user_average)/float(num_rated_movies)
cur_user = User(0, cur_user_ratings, 0)
#find the most similar users
for index, user in enumerate(TRAIN_RECS):
sim_user_ratings = []
#get ratings that other user has also rated
for movie in cur_user_rated_movies:
sim_user_ratings.append(TRAIN_RECS[index][movie - 1])
#sim user has a similarity weight and a rating for movie_id
needed_rating = TRAIN_RECS[index][movie_id - 1]
sim_user = User(0, sim_user_ratings, needed_rating)
#caclulate similarity
w_a_u = pearson_sim(cur_user, sim_user)
w_amplified = w_a_u * math.pow(abs(w_a_u), rho - 1)
sim_user.set_similarity(w_amplified)
#keep only the k most relevant users
if len(relevant_users) < num_similar:
relevant_users.append(sim_user)
else:
saved_index = -1
for idx in range(len(relevant_users)):
if sim_user.similarity > relevant_users[idx].similarity:
saved_index = idx
if saved_index !=-1:
relevant_users[saved_index] = sim_user
#have the most similar users, now calculate
numerator = 0.0
denominator = 0.0
for user in relevant_users:
#user: [w_a_u, r_a_i]
w_a_u = user.similarity
r_a_i = user.needed_rating
numerator += (float(w_a_u) * float(r_a_i - user.average))
denominator += abs(float(w_a_u))
if denominator != 0:
#rounding too early here?
rating = cur_user.average + (float(numerator)/float(denominator))
rating = round(rating)
#default to the user's average rating
if rating == 0:
rating = round(cur_user.average)
if rating > 5: # don't exceed the max rating
rating = 5
#cleanup
del relevant_users[:]
del cur_user_ratings[:]
del cur_user_rated_movies[:]
return int(rating)
def cosine_calc(user1=None, user2=None):
"""
@function cosine_calc
@param user1: a list of user movie ratings
@param user2: a list of user movie ratings
Calculates the cosine similarity between lists of user ratings as long
as both users have rated the same movie.
"""
# cosine sim = AdotB/(len(A) * len(B))
# dot product = sum of multiplications, but only for shared ratings:
# A[0] * B[0] + A[1] * B[1] + ... + A[n-1] * B[n-1]
#dot_product = sum([user1[i]*user2[i] for i in range(len(user2))])
dot_product = 0.0
len_1 = 0.0
len_2 = 0.0
#using adjusted cosine
for idx in range(len(user2.corated_movie_ratings)):
#if both have provided ratings for the same, then this is a valid point
if user1.corated_movie_ratings[idx] != 0 and user2.corated_movie_ratings[idx] != 0:
diff1 = user1.corated_movie_ratings[idx] - user1.average
diff2 = user2.corated_movie_ratings[idx] - user2.average
dot_product += (diff1 * diff2)
len_1 += (diff1 * diff1)
len_2 += (diff2 * diff2)
# length of vector = sqrt(A[0]*A[0] + A[1]*A[1] + ... + A[n]*A[n])
len_1 = math.sqrt(float(len_1))
len_2 = math.sqrt(float(len_2))
#vectors of length 0 break, aren't relevant
if len_1 == 0 or len_2 == 0:
return 0
return float(dot_product) / float((len_1 * len_2))
#cosine similarity
def cosine_sim(user_id=None, movie_id=None):
"""
@function cosine_sim
@param user_id: the id of the user in USER_RATINGS that we are predicting for
@param movie_id the id of the movie we are predicting for
Uses cosine similarity to calculate the weight for predicting a movie rating.
"""
#global TRAIN_RECS
#global USER_RATINGS
#global PREDICTED_RATINGS
#global RATING_MAX
num_similar = 30
rating = 0
relevant_users = []
cur_user_ratings = []
cur_user_rated_movies = []
#calculate average for the current user
num_rated_movies = 0
cur_user_average = 0.0
for recs in USER_RATINGS:
# [U, M, R] -> [0, 1, 2]
if recs[0] == user_id and recs[2] != 0 and num_rated_movies < RATING_MAX:
cur_user_average += recs[2]
cur_user_ratings.append(recs[2])
num_rated_movies += 1
cur_user_rated_movies.append(recs[1])
cur_user_average = float(cur_user_average)/float(num_rated_movies)
cur_user = User(0, cur_user_ratings, 0)
#find the most similar users
for index, user in enumerate(TRAIN_RECS):
sim_user_ratings = []
#get ratings that other user has also rated
for movie in cur_user_rated_movies:
sim_user_ratings.append(TRAIN_RECS[index][movie - 1])
#sim user has a similarity weight and a rating for movie_id
needed_rating = TRAIN_RECS[index][movie_id - 1]
sim_user = User(0, sim_user_ratings, needed_rating)
#caclulate similarity
w_a_u = cosine_calc(cur_user, sim_user)
sim_user.set_similarity(w_a_u)
#keep only the k most relevant users
if len(relevant_users) < num_similar:
relevant_users.append(sim_user)
else:
saved_index = -1
for idx in range(len(relevant_users)):
if sim_user.similarity > relevant_users[idx].similarity:
saved_index = idx
if saved_index !=-1:
relevant_users[saved_index] = sim_user
#have the most similar users, now calculate
numerator = 0.0
denominator = 0.0
for user in relevant_users:
w_a_u = user.similarity
r_a_i = user.needed_rating
numerator += (float(w_a_u) * float(r_a_i))
denominator += abs(float(w_a_u))
if denominator != 0:
#rounding too early here?
rating = cur_user.average + (float(numerator)/float(denominator))
rating = round(rating)
#default to the user's average rating
if rating == 0:
rating = round(cur_user.average)
if rating > 5: # don't exceed the max rating
rating = 5
#cleanup
del relevant_users[:]
del cur_user_ratings[:]
del cur_user_rated_movies[:]
return int(rating)
def item_adjs_cos(movie1=None, movie2=None, r_u_avgs=None, r_a_avg=None):
"""
@function item_adj_cos
@param movie1: the movie we want to compare against
@param movie2: the movie we are comparing too
@param r_u_avgs: the averages of user ratings in TRAIN_RECS
Uses adjusted cosine similarity
"""
numerator = 0.0
len1 = 0.0
len2 = 0.0
#sum((r_u_i - r_u_avg) * (r_u_j - r_u_avg)
for index in range(len(movie1.ratings)):
if(movie1.ratings[index]!=0 and movie2.ratings[index]!=0):
diff1 = movie1.ratings[index] - r_a_avg
diff2 = movie2.ratings[index] - r_u_avgs[index]
numerator += diff1 * diff2
len1 += diff1 * diff1
len2 += diff2 * diff2
len1 = math.sqrt(len1)
len2 = math.sqrt(len2)
if len1 == 0 or len2 == 0:
return 0
return float(numerator) / float(len1 * len2)
def item_cos(user_id=None, movie_id=None):
"""
@function item_cos
@param user_id: The id of the user to be predicted for
@param movie_id: The id of the movie that the user will predict for
Uses item based comparison with adjusted cosine similarity to predict
a rating for the user. Compares the users previously rated movies against
the movie that is to be predicted for.
"""
#global TRAIN_RECS
#global USER_RATINGS
#global PREDICTED_RATINGS
#global RATING_MAX
rating = 0
cur_user_ratings = []
cur_user_rated_movies = []
rated_movies = []
rel_user_ratings_averages = [0 for x in range(200)]
num_rated_movies = 0
for recs in USER_RATINGS:
# [U, M, R] -> [0, 1, 2]
if recs[0] == user_id and recs[2] != 0 and num_rated_movies < RATING_MAX:
cur_user_ratings.append(recs[2])
num_rated_movies += 1
cur_user_rated_movies.append(recs[1])
rated_movies.append(Movie(recs[1], 0, [], recs[2]))
cur_user = User(0, cur_user_ratings, 0)
needed_movie = Movie(movie_id, 0, [], 0)
# build ratings lists for the movies
for user in TRAIN_RECS:
for idx, movie in enumerate(rated_movies):
rated_movies[idx].append_rating(user[movie.m_id - 1])
needed_movie.append_rating(user[movie_id - 1])
# recalc averages, etc
needed_movie.recalc()
for index in range(len(rated_movies)):
rated_movies[index].recalc()
#calc user averages
for index, user in enumerate(TRAIN_RECS):
avg = numpy.average(user)
rel_user_ratings_averages[index] = avg
#find the most similar items
for index, check_movie in enumerate(rated_movies):
#caclulate similarity
w_a_u = item_adjs_cos(needed_movie, check_movie, rel_user_ratings_averages, cur_user.average)
rated_movies[index].set_similarity(w_a_u)
#have the most similar users, now calculate
numerator = 0.0
denominator = 0.0
for movie in rated_movies:
w_a_u = movie.similarity
r_a_i = movie.needed_rating
numerator += (float(w_a_u) * float(r_a_i))
denominator += abs(float(w_a_u))
if denominator != 0:
#rounding too early here?
rating = cur_user.average + (float(numerator)/float(denominator))
rating = round(rating)
#default to the user's average rating
if rating <= 0:
rating = round(cur_user.average)
if rating > 5: # don't exceed the max rating
rating = 5
return int(rating)
def euclidean_distance(user1, user2):
"""
@function euclidean_distance
@param user1: The current user
@param user2: The user to be compared against
Calculates the euclidean distance between two vectors of user ratings.
Similarity = 1/(distance + 1)
"""
cur_sum = 0
for idx in range(len(user2.corated_movie_ratings)):
#if both have provided ratings for the same, then this is a valid point
if user1.corated_movie_ratings[idx] != 0 and user2.corated_movie_ratings[idx] != 0:
diff = abs(user1.corated_movie_ratings[idx] - user2.corated_movie_ratings[idx])
cur_sum += (diff * diff)
distance = math.sqrt(cur_sum)
return distance
def euclidean_custom(user_id=None, movie_id=None):
"""
@function euclidean_custom
@param user_id: the id of the user to be predicted for.
@param movie_id: the id of the movie that is ot be receiving a prediction
Uses euclidean distance to calculate similarity between movies in order
for predicting ratings.
"""
#similarity = 1/(d+1)
#global TRAIN_RECS
#global USER_RATINGS
#global PREDICTED_RATINGS
#global RATING_MAX
num_similar = 30
rating = 0
relevant_users = []
cur_user_ratings = []
cur_user_rated_movies = []
#calculate average for the current user
num_rated_movies = 0
cur_user_average = 0.0
for recs in USER_RATINGS:
# [U, M, R] -> [0, 1, 2]
if recs[0] == user_id and recs[2] != 0 and num_rated_movies < RATING_MAX:
cur_user_average += recs[2]
cur_user_ratings.append(recs[2])
num_rated_movies += 1
cur_user_rated_movies.append(recs[1])
cur_user_average = float(cur_user_average)/float(num_rated_movies)
cur_user = User(0, cur_user_ratings, 0)
#find the most similar users
for index, user in enumerate(TRAIN_RECS):
sim_user_ratings = []
#get ratings that other user has also rated
for movie in cur_user_rated_movies:
sim_user_ratings.append(TRAIN_RECS[index][movie - 1])
#sim user has a similarity weight and a rating for movie_id
needed_rating = TRAIN_RECS[index][movie_id - 1]
sim_user = User(0, sim_user_ratings, needed_rating)
#caclulate similarity
w_a_u = 1/float(euclidean_distance(cur_user, sim_user) + 1)
sim_user.set_similarity(w_a_u)
#keep only the k most relevant users
if len(relevant_users) < num_similar:
relevant_users.append(sim_user)
else:
saved_index = -1
for idx in range(len(relevant_users)):
if sim_user.similarity > relevant_users[idx].similarity:
saved_index = idx
if saved_index !=-1:
relevant_users[saved_index] = sim_user
#have the most similar users, now calculate
numerator = 0.0
denominator = 0.0
for user in relevant_users:
w_a_u = user.similarity
r_a_i = user.needed_rating
numerator += (float(w_a_u) * float(r_a_i))
denominator += abs(float(w_a_u))
if denominator != 0:
#rounding too early here?
rating = cur_user.average + (float(numerator)/float(denominator))
rating = round(rating)
#default to the user's average rating
if rating == 0:
rating = round(cur_user.average)
if rating > 5: # don't exceed the max rating
rating = 5
#cleanup
del relevant_users[:]
del cur_user_ratings[:]
del cur_user_rated_movies[:]
return int(rating)
def custom(user_id=None, movie_id=None):
"""
@function custom
@param user_id: the id of the user to be predicted for.
@param movie_id: the id of the movie that is ot be receiving a prediction
Uses a hybrid of cosine similarity ratings and euclidean_distance_ratings
to predict the new movie's rating.
"""
cosine_sim_rating = cosine_sim(user_id, movie_id)
euclidean_distance_rating = euclidean_custom(user_id, movie_id)
rating = round((euclidean_distance_rating + cosine_sim_rating) / 2.0)
#rating = euclidean_distance_rating
return int(rating)
def algo_driver(algo=None):
"""
@function algo_driver
@param algo: Algorithm class to specify which computation to perform
Main loop for calculations. Loops through all users to find users w/o ratings
(R = 0) and predicts their ratings using the specified algorithm
"""
global PREDICTED_RATINGS
global USER_RATINGS
start = time.time()
for index, rec in enumerate(USER_RATINGS):
#have the user id, movie id, and rating {USERID, MOVIEID, RATING}
rating = 0
#If there is no rating (rating = 0), predict using algo
if rec[2] == 0:
#calculate the predicted rating
if algo == Algorithms.cosine_sim:
rating = cosine_sim(rec[0], rec[1]) # userid, movieid
elif algo == Algorithms.pearson:
rating = pearson(rec[0], rec[1]) # userid, movieid
elif algo == Algorithms.pearson_iuf:
rating = pearson_iuf(rec[0], rec[1]) # userid, movieid
elif algo == Algorithms.pearson_case:
rating = pearson_case(rec[0], rec[1]) # userid, movieid
elif algo == Algorithms.item_cos:
rating = item_cos(rec[0], rec[1]) # userid, movieid
elif algo == Algorithms.custom:
rating = custom(rec[0], rec[1]) # userid, movieid
#update with the predicted rating
#USER_RATINGS[index][2] = rating
PREDICTED_RATINGS.append(([rec[0]] + [rec[1]] + [rating]))
#print "Movie being rated: " + str(rec[1])
#print rating
if DEBUG != 1:
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
elapsed = ("{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
#Show progress, but not too often
filledLength = int(round(30 * index / float(len(USER_RATINGS))))
percents = round(100.00 * (index / float(len(USER_RATINGS))), 1)
bar = '#' * filledLength + '-' * (30 - filledLength)
Sys.stdout.write('%s [%s] %s%s %s\r' % ("Progress", bar, percents, '%', "done. Time elapsed: " + elapsed))
Sys.stdout.flush()
if index == len(USER_RATINGS):
print "\n\n"
def main():
"""
@function main
The main loop, reads in the base train.txt file and gives options for
next files to import and perform analysis for recommendations.
"""
global TRAIN_RECS
global USER_RATINGS
global RATING_MAX
TRAIN_RECS = read_in("train.txt", "\t")
#Driver for importing files
response = 1
while response != 0:
option_text = """Which file would you like to test?
(1)test5.txt
(2)test10.txt
(3)test20.txt
(0) quit\n> """
response = input(option_text)
read_file = ""
out_file = ""
#no case switch so use if
if response == 1:
read_file = "test5.txt"
RATING_MAX = 5
out_file = "result5.txt"
elif response == 2:
read_file = "test10.txt"
RATING_MAX = 10
out_file = "result10.txt"
elif response == 3:
read_file = "test20.txt"
RATING_MAX = 20
out_file = "result20.txt"
elif response == 0: #exit condition
break
else:
print "Invalid option"
continue #didn't get a valid option, do not proceed, try again
# got a valid file, now proceed with import and recommendations
del USER_RATINGS[:]
del PREDICTED_RATINGS[:]
USER_RATINGS = read_in(read_file, " ")
#Driver for selecting math to perform
math_selection = 1
while math_selection != 0:
print "Current file: " + read_file
algorithm_text = """Which algorithm would you like to use?
(1) Pearson Correlation
(2) Pearson Correlation - Inverse User Frequency
(3) Pearson Correlation - Case Amplification
(4) Cosine Similarity
(5) Item based Similarity with Cosine
(6) Custom Algorithm
(0) Quit\n>"""
math_selection = input(algorithm_text)
print "Calculating..."
algo = Algorithms.cosine_sim
if math_selection == 1:
# Pearson Correlation
algo = Algorithms.pearson
elif math_selection == 2:
# Pearson Correlation - Inverse User Frequency
algo = Algorithms.pearson_iuf
elif math_selection == 3:
# Pearson Correlation - Case Modification
algo = Algorithms.pearson_case
elif math_selection == 4:
# Cosine Similarity
algo = Algorithms.cosine_sim
elif math_selection == 5:
# Item based similarity with cosine
algo = Algorithms.item_cos
elif math_selection == 6:
# Custom algorithm
algo = Algorithms.custom
elif math_selection == 0: #exit condition
break
else:
print "Invalid option"
continue #didn't get a valid option, do not proceed, try again
algo_driver(algo)
print "\nDone! saving to: " + out_file
write_file(out_file, PREDICTED_RATINGS, " ")
################################################################################
"""
Main Function Call
"""
if __name__ == '__main__':
main()
| true
|
11a47b5b4216a1f3222ce34c077fb91874bdfc1c
|
Python
|
kliner/funCode
|
/algs-py/TwoArraySumAboveQuery.py
|
UTF-8
| 811
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
# two sorted array, from each choose one num, calc total count which sum >= query
# [3,5,6], [4,9], 9
# return 2
def solve(arr1, arr2, q):
l1, l2 = len(arr1), len(arr2)
i, j = 0, l2
ans = 0
while i < l1 and j > 0:
print i, j
if arr1[i] + arr2[j-1] >= q:
j-=1
else:
ans += (l2-j)
i+=1
ans += (l1-i)*l2
return ans
print solve([3,5,6], [4,9], 9)
print solve([1,1,1,1,1,1,1,1,1,1], [1,1,1,1,1,1,1,1,1,1], 9)
print solve([1,1,1,1,1,1,1,1,1,1], [1,1,1,1,1,1,1,1,1,1], 2)
# two sorted array, from each choose one num, calc total count which sum >= lower and sum <= upper
# [3,5,6], [4,9], 8, 10
# [1,2,3,5,6], [4,5,6,7,8,9], 8, 10
# return 2
a,b,lo,hi = [1,2,3,5,6], [4,5,6,7,8,9], 8, 10
print solve(a, b, lo) - solve(a, b, hi+1)
| true
|
cd018201800cf336a04575d502de54ee05164db6
|
Python
|
josh-howes/tidymongo
|
/tidymongo/tools.py
|
UTF-8
| 3,014
| 2.78125
| 3
|
[] |
no_license
|
import pandas as pd
from collections import defaultdict
from copy import deepcopy
class TidyResult(object):
def __init__(self, observational_unit):
self.observational_unit = observational_unit
self.collection_id = '{}_id'.format(self.observational_unit)
self.ref_tables_ = defaultdict(list)
@property
def tables(self):
return self.ref_tables_.keys()
@property
def __ref_tables(self):
tables = dict()
for k, v in self.ref_tables_.iteritems():
df = pd.DataFrame(data=v)
if '_id' in df.columns:
df.set_index('_id', inplace=True)
tables[k] = df
return tables
def add_document(self, doc_type, document):
if isinstance(document, list):
self.ref_tables_[doc_type].extend(document)
else:
self.ref_tables_[doc_type].append(document)
def merge(self, other):
return merge(self, other)
def __getattr__(self, name):
if name in self.__ref_tables.keys():
return self.__ref_tables[name]
else:
raise AttributeError()
def __repr__(self):
return "TidyResult(tables={})".format(self.tables)
def tidy(data, observational_unit, schema='infer'):
documents = deepcopy(data)
from collections import OrderedDict
results = OrderedDict()
results[observational_unit] = TidyResult(observational_unit)
for document in documents:
try:
foreign_key = document['_id']
except KeyError:
foreign_key = None
observation_vars = []
for k, v in document.iteritems():
if isinstance(v, list) or isinstance(v, dict):
tr = results.get(k, TidyResult(observational_unit=k))
if foreign_key:
# TODO: right now observational_unit is plural, might want to make it singular is "_id"
tr.add_document(k, add_foreign_key(v, observational_unit, foreign_key))
else:
tr.add_document(k, v)
results[k] = tr
else:
observation_vars.append(k)
results[observational_unit].add_document(observational_unit, {k: document[k] for k in observation_vars})
return reduce(lambda a, b: a.merge(b), results.values())
def merge(left, right):
results = left
for k, v in right.ref_tables_.iteritems():
if k == right.observational_unit:
key = k
else:
key = '{0}_{1}'.format(right.observational_unit, k)
results.add_document(key, v)
return results
def add_foreign_key(data, key_name, key_value):
if isinstance(data, list):
return [add_foreign_key(d, key_name, key_value) for d in data]
elif isinstance(data, dict):
d = deepcopy(data)
d[key_name] = key_value
return d
else:
raise TypeError('Unrecognized data type. '
'Must be list of dict.')
| true
|
e0cefa79b312d62f73ce5e4e8a6719decabe2c31
|
Python
|
peterhinch/micropython-async
|
/v2/nec_ir/art.py
|
UTF-8
| 1,319
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
# art.py Test program for IR remote control decoder aremote.py
# Supports Pyboard and ESP8266
# Author: Peter Hinch
# Copyright Peter Hinch 2017 Released under the MIT license
# Run this to characterise a remote.
from sys import platform
import uasyncio as asyncio
ESP32 = platform == 'esp32' or platform == 'esp32_LoBo'
if platform == 'pyboard':
from pyb import Pin
elif platform == 'esp8266' or ESP32:
from machine import Pin, freq
else:
print('Unsupported platform', platform)
from aremote import *
errors = {BADSTART : 'Invalid start pulse', BADBLOCK : 'Error: bad block',
BADREP : 'Error: repeat', OVERRUN : 'Error: overrun',
BADDATA : 'Error: invalid data', BADADDR : 'Error: invalid address'}
def cb(data, addr):
if data == REPEAT:
print('Repeat')
elif data >= 0:
print(hex(data), hex(addr))
else:
print('{} Address: {}'.format(errors[data], hex(addr)))
def test():
print('Test for IR receiver. Assumes NEC protocol.')
if platform == 'pyboard':
p = Pin('X3', Pin.IN)
elif platform == 'esp8266':
freq(160000000)
p = Pin(13, Pin.IN)
elif ESP32:
p = Pin(23, Pin.IN)
ir = NEC_IR(p, cb, True) # Assume r/c uses extended addressing
loop = asyncio.get_event_loop()
loop.run_forever()
test()
| true
|
9d67eca4141f72ec5aa2b855a61b7e41c86481bc
|
Python
|
Hynus/Python
|
/AmzOnPageAna/amzpageana.py
|
UTF-8
| 13,518
| 2.53125
| 3
|
[] |
no_license
|
# coding:utf-8
import urllib
import urllib2
import Image
import cStringIO
import os
from pyquery import PyQuery as pq
# 指定并爬取特定页面——————————————————————————————————————————————————————
def get_the_url_page(pdt_asin, filename):
url = "https://www.amazon.com/dp/" + pdt_asin
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
values = {'name': 'Sunyh',
'location': 'pythontab',
'language': 'Python'}
headers = {'User-Agent': user_agent}
data = urllib.urlencode(values)
req = urllib2.Request(url, data, headers)
response = urllib2.urlopen(req)
the_page = response.read()
amz_pdt_page = pq(the_page)
f = open(filename, 'w+')
f.write('###############################################################\n')
f.close()
print "###############################################################"
return amz_pdt_page
# —————————————————————————————————————————————————————————————————————
# 获取标题及长度—————————————————————————————————————————————————————————
def get_title(amz_pdt_page, filename):
pdt_title = amz_pdt_page("#productTitle").text()
pdt_title_size = len(pdt_title)
print "The length of product's title is: " + str(pdt_title_size) + "!"
f = open(filename, 'a')
f.write("The length of product's title is: " + str(pdt_title_size) + "!\n")
f.write('###############################################################\n')
f.close()
print "###############################################################"
return pdt_title
# —————————————————————————————————————————————————————————————————————
# 获取bullet point及数量—————————————————————————————————————————————————
def get_bullet_points(amz_pdt_page, filename):
bullet_pt_pq = amz_pdt_page("#feature-bullets").find("li").not_("#replacementPartsFitmentBullet")
bullet_pt_text = amz_pdt_page("#feature-bullets").find("li").not_("#replacementPartsFitmentBullet").text()
bullet_pt_pq_size = len(bullet_pt_pq)
bullet_pt_arr = []
for temp_li in bullet_pt_pq.items():
bullet_pt_arr.append(temp_li.text()) # 获取产品大体情况描述信息字符串列表
print "There are " + str(bullet_pt_pq_size) + " points in the product's bullet!"
print "###############################################################"
f = open(filename, 'a')
f.write("There are " + str(bullet_pt_pq_size) + " points in the product's bullet!\n")
f.write("###############################################################\n")
f.close()
return bullet_pt_text
# ————————————————————————————————————————————————————————————————————
# 获取产品描述字符数量———————————————————————————————————————————————————
def get_details(amz_pdt_page, filename):
# 第一部分:获取商品详情信息
pdt_desp_pq = amz_pdt_page("#productDescription").find("p")
pdt_desp_text = amz_pdt_page("#productDescription").find("p").text()
pdt_desp_size1 = len(pdt_desp_text)
# pdt_desp_arr = []
# for temp_p in pdt_desp_pq.items():
# pdt_desp_arr.append(temp_p.text()) # 获取产品详情描述信息字符串列表
# 第二部分:获取商品详情表格信息
pdt_details_table_pq = amz_pdt_page("#prodDetails").find(".pdTab")
if len(pdt_details_table_pq) == 0:
tem_pdt_details_table_text = amz_pdt_page("#productDetails_detailBullets_sections1").text()
if len(tem_pdt_details_table_text) == 0:
pass
else:
pd_start_spilt_idx = tem_pdt_details_table_text.find('Customer Reviews')
pd_end_spilt_idx = tem_pdt_details_table_text.rfind('stars')
pdt_details_table_text = tem_pdt_details_table_text[:pd_start_spilt_idx] + \
tem_pdt_details_table_text[pd_end_spilt_idx + 5:]
pdt_desp_size2 = len(pdt_details_table_text)
else:
tem_pdt_details_table_text = pdt_details_table_pq.text()
pd_start_spilt_idx = tem_pdt_details_table_text.find('Customer Reviews')
pd_end_spilt_idx = tem_pdt_details_table_text.rfind('stars')
pdt_details_table_text = tem_pdt_details_table_text[:pd_start_spilt_idx] + \
tem_pdt_details_table_text[pd_end_spilt_idx + 5:] # 获取产品详情表格信息字符长度
pdt_desp_size2 = len(pdt_details_table_text)
pdt_desp_total_size = pdt_desp_size1 + pdt_desp_size2 # 获取商品描述字符总数量
print "The total lengh of the product's detail is: " + str(pdt_desp_total_size) + "!"
print "###############################################################"
f = open(filename, 'a')
f.write("The total lengh of the product's detail is: " + str(pdt_desp_total_size) + "!\n")
f.write("###############################################################\n")
f.close()
return pdt_desp_text, pdt_details_table_text
# ————————————————————————————————————————————————————————————————————
# 获取当前页面的图片数量及分辨率——————————————————————————————————————————
def get_images(amz_pdt_page, filename):
img_block = amz_pdt_page("#imageBlock").find("img")
img_urls = []
f = open(filename, 'a')
for sub_block in img_block.items():
img_urls.append(sub_block.attr('src'))
img_size_list = []
i = 1
for tem_url in img_urls:
img_file = cStringIO.StringIO(urllib2.urlopen(tem_url).read())
imgs = Image.open(img_file)
img_size_list.append(imgs.size)
print "The No." + str(i) +" image's infomation is got!!!"
f.write("The No." + str(i) +" image's infomation is got!!!\n")
i += 1
if (1,1) in img_size_list:
img_size_list.remove((1,1))
img_number = len(img_size_list)
print "There are " + str(img_number) + " pictures in current page!"
f.write("There are " + str(img_number) + " pictures in current page!\n")
print "The size of these pictures are as followed: (width, height)"
f.write("The size of these pictures are as followed: (width, height)\n")
for a_img in img_size_list:
print a_img
f.write(str(a_img)+"\n")
print "###############################################################"
f.write("###############################################################\n")
f.close()
# ————————————————————————————————————————————————————————————————————
# 判断关键字是否出现在标题,五点描述,产品描述中————————————————————————————
def judge_keyword(pdt_title, bullet_pt_text, pdt_desp_text, pdt_details_table_text, amz_search_keyword, filename):
amz_search_words_list = amz_search_keyword.split(" ")
f = open(filename, 'a')
count_num = 0
for wordi in amz_search_words_list:
tag_in_pdt_title = pdt_title.lower().find(wordi.lower())
if tag_in_pdt_title != -1:
print "The keyword '" + wordi +"' can be found in the product's title!"
f.write("The keyword '" + wordi +"' can be found in the product's title!\n")
tag_in_pdt_bullet = bullet_pt_text.lower().find(wordi.lower())
if tag_in_pdt_bullet != -1:
print "The keyword '" + wordi +"' can be found in the product's bullet!"
f.write("The keyword '" + wordi +"' can be found in the product's bullet!\n")
tag_in_pdt_details1 = pdt_desp_text.lower().find(wordi.lower())
tag_in_pdt_details2 = pdt_details_table_text.lower().find(wordi.lower())
if (tag_in_pdt_details1 != -1) or (tag_in_pdt_details1 != -1):
print "The keywords '" + wordi +"' can be found in the product's details!"
f.write("The keyword '" + wordi +"' can be found in the product's details!\n")
if tag_in_pdt_title == tag_in_pdt_bullet == tag_in_pdt_details1 == tag_in_pdt_details2 == -1:
print "The keyword '" + wordi +"' can't be found anywhere!!"
f.write("The keyword '" + wordi +"' can't be found anywhere!!\n")
count_num += 1
print "------THE NO." + str(count_num) +" KEYWORD SEARCHING COMPLETED!-----"
f.write("------THE NO." + str(count_num) +" KEYWORD SEARCHING COMPLETED!-----\n")
print "###############################################################"
f.write("###############################################################\n")
f.close()
# ————————————————————————————————————————————————————————————————————
# 产品星级以及产品的评价数量——————————————————————————————————————————————
def get_reviews(amz_pdt_page, filename):
f = open(filename, 'a')
judge_txt = amz_pdt_page("#dp-no-customer-review-yet").text()
if len(judge_txt) != 0:
print judge_txt
f.write(judge_txt+"\n")
else:
pdt_review_pq = amz_pdt_page("#reviewSummary").find(".a-row.a-spacing-small")
pdt_review_stars = pdt_review_pq.eq(0).text()
print "The overall reviews is: " + pdt_review_stars + "!" # 产品星级
f.write("The overall reviews is: " + pdt_review_stars + "!\n")
review_people_count = amz_pdt_page("#reviewSummary").find(".a-size-medium.totalReviewCount").eq(0).text()
review_star_details = pdt_review_pq.eq(1).text()
tem_review_star_list = review_star_details.split("%")[:5]
review_star_list = []
for items_in_rs in tem_review_star_list:
items_in_rs += '%'
review_star_list.append(items_in_rs)
review_star_list[0] = ' ' + review_star_list[0]
print "There are " + review_people_count + " customer reviews!" # 评价人数及分别评级占比详情
f.write("There are " + review_people_count + " customer reviews!\n")
print "The details are as follow:"
f.write("The details are as follow:\n")
for starItem in review_star_list:
print starItem
f.write(starItem + "\n")
print "###############################################################"
f.write("###############################################################\n")
f.close()
# ————————————————————————————————————————————————————————————————————
# 判断产品是否是fulfilled by amazon—————————————————————————————————————
def judge_ful_byamz(amz_pdt_page, filename):
f = open(filename, 'a')
merchant_info = amz_pdt_page("#merchant-info").eq(0).text().lower()
idx_f = merchant_info.find('fulfilled by amazon')
idx_d = merchant_info.find('sold by amazon')
if idx_f != -1 or idx_d != -1:
print "The product is fulfilled by Amazon!"
f.write("The product is fulfilled by Amazon!\n")
else:
print "The is not fulfilled by Amazon!"
f.write("The is not fulfilled by Amazon!\n")
print "#############################################################"
f.write("###############################################################\n")
f.close()
# ————————————————————————————————————————————————————————————————————
def run_main():
pdt_asin = raw_input("Please enter a product's ASIN code: ")
amz_search_keyword = raw_input("Please enter the keywords you want to search: ")
filename = "Product_" + pdt_asin + ".txt"
amz_pdt_page = get_the_url_page(pdt_asin, filename)
pdt_title = get_title(amz_pdt_page, filename)
bullet_pt_text = get_bullet_points(amz_pdt_page, filename)
pdt_desp_text, pdt_details_table_text = get_details(amz_pdt_page, filename)
get_images(amz_pdt_page, filename)
judge_keyword(pdt_title, bullet_pt_text, pdt_desp_text, pdt_details_table_text, amz_search_keyword, filename)
get_reviews(amz_pdt_page, filename)
judge_ful_byamz(amz_pdt_page, filename)
if __name__=="__main__":
run_main()
| true
|
16aa58b665e30d66d0ba518f2fc1d0315a78d0b6
|
Python
|
AK-1121/code_extraction
|
/python/python_24731.py
|
UTF-8
| 134
| 2.953125
| 3
|
[] |
no_license
|
# Numpy array, insert alternate rows of zeros
a=np.zeros((982,5))
b=np.random.randint(0,100,(491,5)) # your 491 row matrix
a[::2] = b
| true
|
7609930727c471d1c060fb19b0d73072ef5f4882
|
Python
|
saurabhgangurde/EE677_VLSI_CAD
|
/Ramanpreet/path_finder.py
|
UTF-8
| 1,399
| 3
| 3
|
[] |
no_license
|
from pyeda.inter import*
#from RothAlgebra import RothVariable
#Or(Xor(And(Not(Or(Or(And(a, b), c), d)), d), c), And(b, d))
Fanin=[None,None,None,None,[And,0,1],[Or,4,2],[And,5,3],[And,6,3]]
Fanout=[[] for i in range(len(Fanin))]
for i in range(len(Fanin)): #i is index of node for which Fanout is to be found
for j in range(len(Fanin)): #j itertaes over all elements of Fanin to find i in its input
if Fanin[j]!=None: #jth node has no Fanin
for find in range(1,len(Fanin[j])):
if i==Fanin[j][find]: #if ith node found in Fanin of j
Fanout[i].append(j)
break
path = [] #Final path
p1=[] #temp path
def pathFinder(Fanout,n):
# global path
# global p1
p1.append(n) #append current node
if(Fanout[n]== []):
p2=p1[:] #shallow copy
path.append(p2) #append current list to all paths
p1.pop() #pop last node
return
for i in range(len(Fanout[n])):
pathFinder(Fanout,Fanout[n][i]) #next Fanout of current node
p1.pop() #pop last node
return
print(Fanout)
pathFinder(Fanout,5)
print(path)
#Assume we are checking for node 5 s-a-0
# CorrectValue=[RothVariable('X','x') 'X' for i in range(len(Fanin))]
# CorrectValue[5]=RothVariable()
# FaultyValue=[]
# c=Fanin[6][0](Fanin[6][1],Fanin[6],[2])
# print (c)
# print (Fanout)
| true
|
79b87f85b10a5a2f5b53b7a1b5af5d0cb498b446
|
Python
|
nnminh98/Routing-with-DeepRL
|
/Network_environment/env/Old_implenentations/testing.py
|
UTF-8
| 1,678
| 2.546875
| 3
|
[] |
no_license
|
import random
import functools
import simpy
from SimComponents import PacketGenerator, PacketSink, SwitchPort, RandomBrancher, Packet
from Node import NetworkNode
if __name__ == '__main__':
env = simpy.Environment()
mean_pkt_size = 100.0 # in bytes
port_rate = 2.2 * 8 * mean_pkt_size
adist1 = functools.partial(random.expovariate, 2.0)
sdist = functools.partial(random.expovariate, 1.0 / mean_pkt_size)
samp_dist = functools.partial(random.expovariate, 0.50)
'''switch_port = SwitchPort(env, port_rate*2)
switch_port2 = SwitchPort(env, port_rate*2)
for i in range(3):
packet = Packet(env.now, mean_pkt_size, i)
switch_port.put(packet)
print(switch_port.getQueueSize())
print("something")
switch_port.out = switch_port2
switch_port.run()'''
node1 = NetworkNode(env, "NW1", port_rate, adist1, sdist, samp_dist)
node2 = NetworkNode(env, "NW2", port_rate, adist1, sdist, samp_dist)
node3 = NetworkNode(env, "NW3", port_rate, adist1, sdist, samp_dist)
node4 = NetworkNode(env, "NW4", port_rate, adist1, sdist, samp_dist)
node5 = NetworkNode(env, "NW5", port_rate, adist1, sdist, samp_dist)
node1.addPort(node2, True)
node1.addPort(node3, True)
node3.addPort(node4, True)
node2.addPort(node4, True)
node2.addPort(node5, True)
node4.addPort(node5, True)
print(node1.getPorts())
print(node2.getPorts())
print(node3.getPorts())
print(node4.getPorts())
packet = Packet(env.now, mean_pkt_size, 1, "NW1", "NW4")
#packet2 = Packet(env.now, mean_pkt_size, 1, "NW2", "NW1")
node1.put(packet)
#node2.put(packet2)
env.run(until=40000)
| true
|
1a5f4e31abaec6402fb1779fae0ab23afc9c4b7c
|
Python
|
arita37/deeplearning
|
/theano/multi dim grid lstm to python.py
|
UTF-8
| 47,737
| 2.859375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Torch to Theano Python
grid-lstm-tensorflow
Examples of using GridLSTM (and GridRNN in general) in tensorflow
The GridRNN implementation in tensorflow is generic, in the sense that it supports multiple dimensions with various settings for input/output dimensions, priority dimensions and non-recurrent dimensions. The type of recurrent cell can also be selected among LSTM, GRU or vanilla RNN.
Here we collect some examples that demonstrate GridRNN, which will be added over time. The current list of examples include:
char-rnn: 2GridLSTM for character-level language modeling.
"""
########################### grid-lstm/model/GridLSTM.lua #########################
import 'nn'
import 'nngraph'
'''
This is called once per dimension inside a grid LSTM block to create the gated
update of the dimension's hidden state and memory cell.
It takes h_t and h_d, the hidden states from the temporal and
depth dimensions respectively, as well as prev_c, the dimension's previous memory cell.
It returns next_c, next_h along the dimension, using a standard lstm gated update,
conditioned on the concatenated time and depth hidden states.
'''
def lstm(h_t, h_d, prev_c, rnn_size)
all_input_sums = nn.CAddTable()({h_t, h_d})
reshaped = nn.Reshape(4, rnn_size)(all_input_sums)
n1, n2, n3, n4 = nn.SplitTable(2)(reshaped):split(4)
# decode the gates
in_gate = nn.Sigmoid()(n1)
forget_gate = nn.Sigmoid()(n2)
out_gate = nn.Sigmoid()(n3)
# decode the write inputs
in_transform = nn.Tanh()(n4)
# perform the LSTM update
next_c = nn.CAddTable()({
nn.CMulTable()({forget_gate, prev_c}),
nn.CMulTable()({in_gate, in_transform})
})
# gated cells form the output
next_h = nn.CMulTable()({out_gate, nn.Tanh()(next_c)})
return next_c, next_h
'''
GridLSTM:
1) Map input x into memory and hidden cells m(1), h(1) along the depth dimension.
2) Concatenate previous hidden states from time and depth dimensions, [h(1), h(2)] into H.
3) Forward the time LSTM, LSTM_2(H) -> h(2)', m(2)'.
4) Concatenate transformed h(2)' and h(1) into H' = [h(1), h(2)']
5) Forward the depth LSTM, LSTM_1(H') -> h(1)', m(1)'
6) Either repeat 2-5 for another layer or map h(1)', the final hidden state along the depth
dimension, to a character prediction.
'''
GridLSTM = {}
def GridLSTM.grid_lstm(input_size, rnn_size, n, dropout, should_tie_weights)
dropout = dropout or 0
# There will be 2*n+1 inputs
inputs = {}
table.insert(inputs, nn.Identity()()) # input c for depth dimension
table.insert(inputs, nn.Identity()()) # input h for depth dimension
for L = 1,n :
table.insert(inputs, nn.Identity()()) # prev_c[L] for time dimension
table.insert(inputs, nn.Identity()()) # prev_h[L] for time dimension
shared_weights
if should_tie_weights == 1 :
shared_weights = {nn.Linear(rnn_size, 4 * rnn_size), nn.Linear(rnn_size, 4 * rnn_size)}
outputs_t = {} # Outputs being handed to the next time step along the time dimension
outputs_d = {} # Outputs being handed from one layer to the next along the depth dimension
for L in range(1,n) :
# Take hidden and memory cell from previous time steps
prev_c_t = inputs[L*2+1]
prev_h_t = inputs[L*2+2]
if L == 1 :
# We're in the first layer
prev_c_d = inputs[1] # input_c_d: the starting depth dimension memory cell, just a zero vec.
prev_h_d = nn.LookupTable(input_size, rnn_size)(inputs[2])
# input_h_d: the starting depth dim hidden state. map a char into hidden space via a lookup table
else
# We're in the higher layers 2...N
# Take hidden and memory cell from layers below
prev_c_d = outputs_d[((L-1)*2)-1]
prev_h_d = outputs_d[((L-1)*2)]
if dropout > 0 : prev_h_d = nn.Dropout(dropout)(prev_h_d):annotate{name='drop_' .. L} # apply dropout, if any
# Evaluate the input sums at once for efficiency
t2h_t = nn.Linear(rnn_size, 4 * rnn_size)(prev_h_t):annotate{name='i2h_'..L}
d2h_t = nn.Linear(rnn_size, 4 * rnn_size)(prev_h_d):annotate{name='h2h_'..L}
# Get transformed memory and hidden states pointing in the time direction first
next_c_t, next_h_t = lstm(t2h_t, d2h_t, prev_c_t, rnn_size)
# Pass memory cell and hidden state to next timestep
table.insert(outputs_t, next_c_t)
table.insert(outputs_t, next_h_t)
# Evaluate the input sums at once for efficiency
t2h_d = nn.Linear(rnn_size, 4 * rnn_size)(next_h_t):annotate{name='i2h_'..L}
d2h_d = nn.Linear(rnn_size, 4 * rnn_size)(prev_h_d):annotate{name='h2h_'..L}
# See section 3.5, "Weight Sharing" of http://arxiv.org/pdf/1507.01526.pdf
# The weights along the temporal dimension are already tied (cloned many times in train.lua)
# Here we can tie the weights along the depth dimension. Having invariance in computation
# along the depth appears to be critical to solving the 15 digit addition problem w/ high accy.
# See fig 4. to compare tied vs untied grid lstms on this task.
if should_tie_weights == 1 :
print("tying weights along the depth dimension")
t2h_d.data.module:share(shared_weights[1], 'weight', 'bias', 'gradWeight', 'gradBias')
d2h_d.data.module:share(shared_weights[2], 'weight', 'bias', 'gradWeight', 'gradBias')
# Create the lstm gated update pointing in the depth direction.
# We 'prioritize' the depth dimension by using the updated temporal hidden state as input
# instead of the previous temporal hidden state. This implements Section 3.2, "Priority Dimensions"
next_c_d, next_h_d = lstm(t2h_d, d2h_d, prev_c_d, rnn_size)
# Pass the depth dimension memory cell and hidden state to layer above
table.insert(outputs_d, next_c_d)
table.insert(outputs_d, next_h_d)
# set up the decoder
top_h = outputs_d[#outputs_d]
if dropout > 0 : top_h = nn.Dropout(dropout)(top_h)
proj = nn.Linear(rnn_size, input_size)(top_h):annotate{name='decoder'}
logsoft = nn.LogSoftMax()(proj)
table.insert(outputs_t, logsoft)
return nn.gModule(inputs, outputs_t)
return GridLSTM
########################### grid-lstm/model/GridLSTM.lua #########################
######## grid-lstm/train.lua
'''
This file trains a character-level multi-layer RNN on text data
Code is based on implementation in
https://github.com/oxford-cs-ml-2015/practical6
but modified to have multi-layer support, GPU support, as well as
many other common model/optimization bells and whistles.
The practical6 code is in turn based on
https://github.com/wojciechz/learning_to_execute
which is turn based on other stuff in Torch, etc... (long lineage)
'''
'torch'
import 'nn'
import 'nngraph'
import 'optim'
import 'lfs'
import 'cudnn'
import 'util.OneHot'
import 'util.misc'
CharSplitLMMinibatchLoader = import 'util.CharSplitLMMinibatchLoader'
model_utils = import 'util.model_utils'
LSTM = import 'model.LSTM'
GridLSTM = import 'model.GridLSTM'
GRU = import 'model.GRU'
RNN = import 'model.RNN'
cmd = torch.CmdLine()
cmd:text()
cmd:text('Train a character-level language model')
cmd:text()
cmd:text('Options')
# data
cmd:option('-data_dir','data/tinyshakespeare','data directory. Should contain the file input.txt with input data')
# task params
cmd:option('-task', 'char', 'task to train on: char, addition')
cmd:option('-digit_length', 4, 'length of the digits to add for the addition task')
# model params
cmd:option('-rnn_size', 128, 'size of LSTM internal state')
cmd:option('-num_layers', 2, 'number of layers in the LSTM')
cmd:option('-model', 'lstm', 'lstm, grid_lstm, gru, or rnn')
cmd:option('-tie_weights', 1, 'tie grid lstm weights?')
# optimization
cmd:option('-learning_rate',2e-3,'learning rate')
cmd:option('-learning_rate_decay',0.97,'learning rate decay')
cmd:option('-learning_rate_decay_after',10,'in number of epochs, when to start decaying the learning rate')
cmd:option('-decay_rate',0.95,'decay rate for rmsprop')
cmd:option('-dropout',0,'dropout for regularization, used after each RNN hidden layer. 0 = no dropout')
cmd:option('-seq_length',50,'number of timesteps to unroll for')
cmd:option('-batch_size',50,'number of sequences to train on in parallel')
cmd:option('-max_epochs',50,'number of full passes through the training data')
cmd:option('-grad_clip',5,'clip gradients at this value')
cmd:option('-train_frac',0.95,'fraction of data that goes into train set')
cmd:option('-val_frac',0.05,'fraction of data that goes into validation set')
# test_frac will be computed as (1 - train_frac - val_frac)
cmd:option('-init_from', '', 'initialize network parameters from checkpoint at this path')
# bookkeeping
cmd:option('-seed',123,'torch manual random number generator seed')
cmd:option('-print_every',1,'how many steps/minibatches between printing out the loss')
cmd:option('-eval_val_every',1000,'every how many iterations should we evaluate on validation data?')
cmd:option('-checkpoint_dir', 'cv', 'output directory where checkpoints get written')
cmd:option('-savefile','lstm','filename to autosave the checkpont to. Will be inside checkpoint_dir/')
cmd:option('-accurate_gpu_timing',0,'set this flag to 1 to get precise timings when using GPU. Might make code bit slower but reports accurate timings.')
# GPU/CPU
cmd:option('-gpuid',0,'which gpu to use. -1 = use CPU')
cmd:option('-opencl',0,'use OpenCL (instead of CUDA)')
cmd:text()
# parse input params
opt = cmd:parse(arg)
torch.manualSeed(opt.seed)
# train / val / test split for data, in fractions
test_frac = math.max(0, 1 - (opt.train_frac + opt.val_frac))
split_sizes = {opt.train_frac, opt.val_frac, test_frac}
# initialize cunn/cutorch for training on the GPU and fall back to CPU gracefully
if opt.gpuid >= 0 and opt.opencl == 0 :
ok, cunn = pcall(require, 'cunn')
ok2, cutorch = pcall(require, 'cutorch')
if not ok : print('package cunn not found!')
if not ok2 : print('package cutorch not found!')
if ok and ok2 :
print('using CUDA on GPU ' .. opt.gpuid .. '...')
cutorch.setDevice(opt.gpuid + 1) # note +1 to make it 0 indexed! sigh lua
cutorch.manualSeed(opt.seed)
else
print('If cutorch and cunn are installed, your CUDA toolkit may be improperly configured.')
print('Check your CUDA toolkit installation, rebuild cutorch and cunn, and try again.')
print('Falling back on CPU mode')
opt.gpuid = -1 # overwrite user setting
# initialize clnn/cltorch for training on the GPU and fall back to CPU gracefully
if opt.gpuid >= 0 and opt.opencl == 1 :
ok, cunn = pcall(require, 'clnn')
ok2, cutorch = pcall(require, 'cltorch')
if not ok : print('package clnn not found!')
if not ok2 : print('package cltorch not found!')
if ok and ok2 :
print('using OpenCL on GPU ' .. opt.gpuid .. '...')
cltorch.setDevice(opt.gpuid + 1) # note +1 to make it 0 indexed! sigh lua
torch.manualSeed(opt.seed)
else
print('If cltorch and clnn are installed, your OpenCL driver may be improperly configured.')
print('Check your OpenCL driver installation, check output of clinfo command, and try again.')
print('Falling back on CPU mode')
opt.gpuid = -1 # overwrite user setting
# create the data loader class
loader = CharSplitLMMinibatchLoader.create(opt.data_dir, opt.batch_size, opt.seq_length, split_sizes)
vocab_size = loader.vocab_size # the number of distinct characters
vocab = loader.vocab_mapping
print('vocab size: ' .. vocab_size)
# make sure output directory exists
if not path.exists(opt.checkpoint_dir) : lfs.mkdir(opt.checkpoint_dir)
# define the model: prototypes for one timestep, : clone them in time
do_random_init = true
if string.len(opt.init_from) > 0 :
print('loading a model from checkpoint ' .. opt.init_from)
checkpoint = torch.load(opt.init_from)
protos = checkpoint.protos
# make sure the vocabs are the same
vocab_compatible = true
checkpoint_vocab_size = 0
for c,i in pairs(checkpoint.vocab) do
if not (vocab[c] == i) :
vocab_compatible = false
checkpoint_vocab_size = checkpoint_vocab_size + 1
if not (checkpoint_vocab_size == vocab_size) :
vocab_compatible = false
print('checkpoint_vocab_size: ' .. checkpoint_vocab_size)
assert(vocab_compatible, 'error, char voca for this dataset and one in saved checkpoint are not the same. ')
# overwrite model settings based on checkpoint to ensure compatibility
print('overwriting rnn_size=' .. checkpoint.opt.rnn_size .. ', num_layers=' .. checkpoint.opt.num_layers .. ', model=' .. checkpoint.opt.model .. ' based on the checkpoint.')
opt.rnn_size = checkpoint.opt.rnn_size
opt.num_layers = checkpoint.opt.num_layers
opt.model = checkpoint.opt.model
do_random_init = false
else
print('creating an ' .. opt.model .. ' with ' .. opt.num_layers .. ' layers')
protos = {}
if opt.model == 'lstm' :
protos.rnn = LSTM.lstm(vocab_size, opt.rnn_size, opt.num_layers, opt.dropout)
elif opt.model == 'grid_lstm' :
protos.rnn = GridLSTM.grid_lstm(vocab_size, opt.rnn_size, opt.num_layers, opt.dropout, opt.tie_weights)
elif opt.model == 'gru' :
protos.rnn = GRU.gru(vocab_size, opt.rnn_size, opt.num_layers, opt.dropout)
elif opt.model == 'rnn' :
protos.rnn = RNN.rnn(vocab_size, opt.rnn_size, opt.num_layers, opt.dropout)
protos.criterion = nn.ClassNLLCriterion()
# the initial state of the cell/hidden states
init_state = {}
for L=1,opt.num_layers do
h_init = torch.zeros(opt.batch_size, opt.rnn_size)
if opt.gpuid >=0 and opt.opencl == 0 : h_init = h_init:cuda()
if opt.gpuid >=0 and opt.opencl == 1 : h_init = h_init:cl()
table.insert(init_state, h_init:clone())
if opt.model == 'lstm' or opt.model == 'grid_lstm' :
table.insert(init_state, h_init:clone()) # extra initial state for prev_c
# ship the model to the GPU if desired
if opt.gpuid >= 0 and opt.opencl == 0 : for k,v in pairs(protos) do v:cuda()
if opt.gpuid >= 0 and opt.opencl == 1 : for k,v in pairs(protos) do v:cl()
# put the above things into one flattened parameters tensor
params, grad_params = model_utils.combine_all_parameters(protos.rnn)
# initialization
if do_random_init :
params:uniform(-0.08, 0.08) # small uniform numbers
# initialize the LSTM forget gates with slightly higher biases to encourage remembering in the beginning
if opt.model == 'lstm' or opt.model == 'grid_lstm' :
for layer_idx = 1, opt.num_layers do
for _,node in ipairs(protos.rnn.forwardnodes) do
if node.data.annotations.name == "i2h_" .. layer_idx :
print('setting forget gate biases to 1 in LSTM layer ' .. layer_idx)
# the gates are, in order, i,f,o,g, so f is the 2nd block of weights
node.data.module.bias[{{opt.rnn_size+1, 2*opt.rnn_size}}]:fill(1.0)
print('number of parameters in the model: ' .. params:nElement())
# make a bunch of clones after flattening, as that reallocates memory
clones = {}
for name,proto in pairs(protos) do
print('cloning ' .. name)
clones[name] = model_utils.clone_many_times(proto, opt.seq_length, not proto.parameters)
# preprocessing helper function
function prepro(x,y)
x = x:transpose(1,2):contiguous() # swap the axes for faster indexing
y = y:transpose(1,2):contiguous()
if opt.gpuid >= 0 and opt.opencl == 0 : # ship the input arrays to GPU
# have to convert to float because integers can't be cuda()'d
x = x:float():cuda()
y = y:float():cuda()
if opt.gpuid >= 0 and opt.opencl == 1 : # ship the input arrays to GPU
x = x:cl()
y = y:cl()
return x,y
function get_input_mem_cell()
input_mem_cell = torch.zeros(opt.batch_size, opt.rnn_size)
if opt.gpuid >= 0 and opt.opencl == 0 :
input_mem_cell = input_mem_cell:float():cuda()
return input_mem_cell
function get_zeroed_d_output_t(vocab_size)
zeroed_d_output_t = torch.zeros(opt.batch_size, vocab_size)
if opt.gpuid >= 0 and opt.opencl == 0 :
zeroed_d_output_t = zeroed_d_output_t:float():cuda()
return zeroed_d_output_t
# evaluate the loss over an entire split
function eval_split(split_index, max_batches)
print('evaluating loss over split index ' .. split_index)
n = loader.split_sizes[split_index]
if max_batches ~= nil : n = math.min(max_batches, n)
loader:reset_batch_pointer(split_index) # move batch iteration pointer for this split to front
loss = 0
accy = 0
normal = 0
rnn_state = {[0] = init_state}
for i = 1,n do # iterate over batches in the split
# fetch a batch
x, y = loader:next_batch(split_index)
x,y = prepro(x,y)
# forward pass
for t=1,opt.seq_length do
clones.rnn[t]:evaluate() # for dropout proper functioning
if opt.model == "grid_lstm" :
input_mem_cell = get_input_mem_cell()
rnn_inputs = {input_mem_cell, x[t], unpack(rnn_state[t-1])}
# if we're using a grid lstm, hand in a zero vec for the starting memory cell state
else
rnn_inputs = {x[t], unpack(rnn_state[t-1])}
lst = clones.rnn[t]:forward(rnn_inputs)
rnn_state[t] = {}
for i=1,#init_state do table.insert(rnn_state[t], lst[i])
prediction = lst[#lst]
target_delimiter_position = opt.seq_length - (opt.digit_length + 2)
if opt.task == "addition" and t > target_delimiter_position :
max, pred_argmax = torch.max(prediction,2)
accy = accy + torch.eq(pred_argmax, y[t]):sum()
normal = normal + prediction:size(1)
loss = loss + clones.criterion[t]:forward(prediction, y[t])
# carry over lstm state
rnn_state[0] = rnn_state[#rnn_state]
print(i .. '/' .. n .. '...')
out
if opt.task == "addition" :
out = accy / normal
else
out = loss / opt.seq_length / n
return out
# do fwd/bwd and return loss, grad_params
init_state_global = clone_list(init_state)
def feval(x) :
if x ~= params :
params:copy(x)
grad_params:zero()
#----------------# get minibatch -------------------
x, y = loader:next_batch(1)
x,y = prepro(x,y)
#----------------- forward pass -------------------
rnn_state = {[0] = init_state_global}
predictions = {} # softmax outputs
loss = 0
for t=1,opt.seq_length do
clones.rnn[t]:training() # make sure we are in correct mode (this is cheap, sets flag)
rnn_inputs
if opt.model == "grid_lstm" :
input_mem_cell = get_input_mem_cell()
rnn_inputs = {input_mem_cell, x[t], unpack(rnn_state[t-1])} # if we're using a grid lstm, hand in a zero vec for the starting memory cell state
else
rnn_inputs = {x[t], unpack(rnn_state[t-1])}
lst = clones.rnn[t]:forward(rnn_inputs)
rnn_state[t] = {}
for i=1,#init_state do table.insert(rnn_state[t], lst[i]) # extract the state, without output
predictions[t] = lst[#lst] # last element is the prediction
loss = loss + clones.criterion[t]:forward(predictions[t], y[t])
loss = loss / opt.seq_length
----------------# backward pass -------------------
# initialize gradient at time t to be zeros (there's no influence from future)
drnn_state = {[opt.seq_length] = clone_list(init_state, true)} # true also zeros the clones
for t=opt.seq_length,1,-1 do
# If we do addition task and we're at t < position of target delimiter, just use a vec of zeros for dL/dOutput
# We don't want to suffer prediction loss prior to the target delimiter, just recurrence loss.
target_delimiter_position = opt.seq_length - (opt.digit_length + 2)
if opt.task == "addition" and t < target_delimiter_position :
doutput_t = get_zeroed_d_output_t(loader.vocab_size)
else
doutput_t = clones.criterion[t]:backward(predictions[t], y[t])
# backprop through loss, and softmax/linear
table.insert(drnn_state[t], doutput_t) # drnn_state[t] already has dL/dH_t+1 vectors for every layer; just adding the dL/dOutput to the list.
dlst = clones.rnn[t]:backward(rnn_inputs, drnn_state[t]) # <- right here, you're apping the doutput_t to the list of dLdh for all layers, : using that big list to backprop into the input and unpacked rnn state vecs at t-1
drnn_state[t-1] = {}
skip_index
if opt.model == "grid_lstm" : skip_index = 2 else skip_index = 1
for k,v in pairs(dlst) do
if k > skip_index : # k <= skip_index is gradient on inputs, which we dont need
# note we do k-1 because first item is dembeddings, and : follow the
# derivatives of the state, starting at index 2. I know...
drnn_state[t-1][k-skip_index] = v
----------------------# misc ----------------------
# transfer final state to initial state (BPTT)
init_state_global = rnn_state[#rnn_state] # NOTE: I don't think this needs to be a clone, right?
# grad_params:div(opt.seq_length) # this line should be here but since we use rmsprop it would have no effect. Removing for efficiency
# clip gradient element-wise
grad_params:clamp(-opt.grad_clip, opt.grad_clip)
return loss, grad_params
# start optimization here
train_losses = {}
val_losses = {}
optim_state = {learningRate = opt.learning_rate, alpha = opt.decay_rate}
iterations = opt.max_epochs * loader.ntrain
iterations_per_epoch = loader.ntrain
loss0 = nil
for i = 1, iterations do
epoch = i / loader.ntrain
timer = torch.Timer()
_, loss = optim.rmsprop(feval, params, optim_state)
if opt.accurate_gpu_timing == 1 and opt.gpuid >= 0 :
'''
Note on timing: The reported time can be off because the GPU is invoked async. If one
wants to have exactly accurate timings one must call cutorch.synchronize() right here.
I will avoid doing so by default because this can incur computational overhead.
'''
cutorch.synchronize()
time = timer:time().real
train_loss = loss[1] # the loss is inside a list, pop it
train_losses[i] = train_loss
# exponential learning rate decay
if i % loader.ntrain == 0 and opt.learning_rate_decay < 1 :
if epoch >= opt.learning_rate_decay_after :
decay_factor = opt.learning_rate_decay
optim_state.learningRate = optim_state.learningRate * decay_factor # decay it
print('decayed learning rate by a factor ' .. decay_factor .. ' to ' .. optim_state.learningRate)
# every now and : or on last iteration
if i % opt.eval_val_every == 0 or i == iterations :
# evaluate loss on validation data
val_loss = eval_split(2) # 2 = validation
val_losses[i] = val_loss
savefile = string.format('%s/lm_%s_epoch%.2f_%.4f.t7', opt.checkpoint_dir, opt.savefile, epoch, val_loss)
print('saving checkpoint to ' .. savefile)
checkpoint = {}
checkpoint.protos = protos
checkpoint.opt = opt
checkpoint.train_losses = train_losses
checkpoint.val_loss = val_loss
checkpoint.val_losses = val_losses
checkpoint.i = i
checkpoint.epoch = epoch
checkpoint.vocab = loader.vocab_mapping
torch.save(savefile, checkpoint)
if i % opt.print_every == 0 :
print(string.format("%d/%d (epoch %.3f), train_loss = %6.8f, grad/param norm = %6.4e, time/batch = %.4fs", i, iterations, epoch, train_loss, grad_params:norm() / params:norm(), time))
if i % 10 == 0 : collectgarbage()
# handle early stopping if things are going really bad
if loss[1] ~= loss[1] :
print('loss is NaN. This usually indicates a bug. Please check the issues page for existing issues, or create a new issue, if none exist. Ideally, please state: your operating system, 32-bit/64-bit, your blas version, cpu/cuda/cl?')
break # halt
if loss0 == nil : loss0 = loss[1]
if loss[1] > loss0 * 3 :
print('loss is exploding, aborting.')
break # halt
!pip install chainer
!pip install -U chainer -vvvv
!set VS100COMNTOOLS=%VS120COMNTOOLS%`
!set PATH= %VS120COMNTOOLS%\..\..\VC\bin;%PATH%`
##################################################################
########## grid-lstm/sample.lua
'''
This file samples characters from a trained model
Code is based on implementation in
https://github.com/oxford-cs-ml-2015/practical6
'''
import 'torch'
import 'nn'
import 'nngraph'
import 'optim'
import 'lfs'
import 'util.OneHot'
import 'util.misc'
cmd = torch.CmdLine()
cmd:text()
cmd:text('Sample from a character-level language model')
cmd:text()
cmd:text('Options')
# required:
cmd:argument('-model','model checkpoint to use for sampling')
# optional parameters
cmd:option('-seed',123,'random number generator\'s seed')
cmd:option('-sample',1,' 0 to use max at each timestep, 1 to sample at each timestep')
cmd:option('-primetext',"",'used asprompt to "seed" state the LSTM using a given sequence, before we sample.')
cmd:option('-length',2000,'number of characters to sample')
cmd:option('-temperature',1,'temperature of sampling')
cmd:option('-gpuid',0,'which gpu to use. -1 = use CPU')
cmd:option('-opencl',0,'use OpenCL (instead of CUDA)')
cmd:option('-verbose',1,'set to 0 to ONLY print the sampled text, no diagnostics')
cmd:text()
# parse input params
opt = cmd:parse(arg)
# gated print: simple utility function wrapping a print
function gprint(str)
if opt.verbose == 1 : print(str)
# check that cunn/cutorch are installed if user wants to use the GPU
if opt.gpuid >= 0 and opt.opencl == 0 :
ok, cunn = pcall(require, 'cunn')
ok2, cutorch = pcall(require, 'cutorch')
if not ok : gprint('package cunn not found!')
if not ok2 : gprint('package cutorch not found!')
if ok and ok2 :
gprint('using CUDA on GPU ' .. opt.gpuid .. '...')
gprint('Make sure saved checkpoint wastrained with GPU. If it was trained with CPU use -gpuid -1 for sampling as well')
cutorch.setDevice(opt.gpuid + 1) # note +1 to make it 0 indexed! sigh lua
cutorch.manualSeed(opt.seed)
else
gprint('Falling back on CPU mode')
opt.gpuid = -1 # overwrite user setting
# check that clnn/cltorch are installed if user wants to use OpenCL
if opt.gpuid >= 0 and opt.opencl == 1 :
ok, cunn = pcall(require, 'clnn')
ok2, cutorch = pcall(require, 'cltorch')
if not ok : print('package clnn not found!')
if not ok2 : print('package cltorch not found!')
if ok and ok2 :
gprint('using OpenCL on GPU ' .. opt.gpuid .. '...')
gprint('Make sure that your saved checkpoint was also trained with GPU. If it was trained with CPU use -gpuid -1 for sampling as well')
cltorch.setDevice(opt.gpuid + 1) # note +1 to make it 0 indexed! sigh lua
torch.manualSeed(opt.seed)
else
gprint('Falling back on CPU mode')
opt.gpuid = -1 # overwrite user setting
torch.manualSeed(opt.seed)
# load the model checkpoint
if not lfs.attributes(opt.model, 'mode') :
gprint('Error: File ' .. opt.model .. ' does not exist. Are you sure you didn\'t forget to prep cv/ ?')
checkpoint = torch.load(opt.model)
protos = checkpoint.protos
protos.rnn:evaluate() # put in eval mode so that dropout works properly
# initialize the vocabulary (and its inverted version)
vocab = checkpoint.vocab
ivocab = {}
for c,i in pairs(vocab) do ivocab[i] = c
# initialize the rnn state to all zeros
gprint('creating an ' .. checkpoint.opt.model .. '...')
current_state
current_state = {}
for L = 1,checkpoint.opt.num_layers do
# c and h for all layers
h_init = torch.zeros(1, checkpoint.opt.rnn_size):double()
if opt.gpuid >= 0 and opt.opencl == 0 : h_init = h_init:cuda()
if opt.gpuid >= 0 and opt.opencl == 1 : h_init = h_init:cl()
table.insert(current_state, h_init:clone())
if checkpoint.opt.model == 'grid_lstm' :
table.insert(current_state, h_init:clone()) # extra initial state for prev_c
state_size = #current_state
function get_input_mem_cell()
input_mem_cell = torch.zeros(1, checkpoint.opt.rnn_size)
if opt.gpuid >= 0 and opt.opencl == 0 :
input_mem_cell = input_mem_cell:float():cuda()
return input_mem_cell
# do a few seeded timesteps
seed_text = opt.primetext
if string.len(seed_text) > 0 :
gprint('seeding with ' .. seed_text)
gprint('--------------------------')
for c in seed_text:gmatch'.' do
prev_char = torch.Tensor{vocab[c]}
io.write(ivocab[prev_char[1]])
if opt.gpuid >= 0 and opt.opencl == 0 : prev_char = prev_char:cuda()
if opt.gpuid >= 0 and opt.opencl == 1 : prev_char = prev_char:cl()
if checkpoint.opt.model == "grid_lstm" :
input_mem_cell = get_input_mem_cell()
rnn_inputs = {input_mem_cell, prev_char, unpack(current_state)} # if we're using a grid lstm, hand in a zero vec for the starting memory cell state
else
rnn_inputs = {prev_char, unpack(current_state)}
lst = protos.rnn:forward(rnn_inputs)
# lst is a list of [state1,state2,..stateN,output]. We want everything but last piece
current_state = {}
for i=1,state_size do table.insert(current_state, lst[i])
prediction = lst[#lst] # last element holds the log probabilities
else
# fill with uniform probabilities over characters (? hmm)
gprint('missing seed text, using uniform probability over first character')
gprint('--------------------------')
prediction = torch.Tensor(1, #ivocab):fill(1)/(#ivocab)
if opt.gpuid >= 0 and opt.opencl == 0 : prediction = prediction:cuda()
if opt.gpuid >= 0 and opt.opencl == 1 : prediction = prediction:cl()
# start sampling/argmaxing
for i=1, opt.length do
# log probabilities from the previous timestep
if opt.sample == 0 :
# use argmax
_, prev_char_ = prediction:max(2)
prev_char = prev_char_:resize(1)
else
# use sampling
prediction:div(opt.temperature) # scale by temperature
probs = torch.exp(prediction):squeeze()
probs:div(torch.sum(probs)) # renormalize so probs sum to one
prev_char = torch.multinomial(probs:float(), 1):resize(1):float()
# forward the rnn for next character
if checkpoint.opt.model == "grid_lstm" :
input_mem_cell = get_input_mem_cell()
rnn_inputs = {input_mem_cell, prev_char, unpack(current_state)} # if we're using a grid lstm, hand in a zero vec for the starting memory cell state
else
rnn_inputs = {prev_char, unpack(current_state)}
lst = protos.rnn:forward(rnn_inputs)
current_state = {}
for i=1,state_size do table.insert(current_state, lst[i])
prediction = lst[#lst] # last element holds the log probabilities
io.write(ivocab[prev_char[1]])
io.write('\n') io.flush()
################################################################################
##############Tensor Flow Model ###############################################
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/grid_rnn/python/kernel_tests/grid_rnn_test.py
# model.py
import numpy as np, tensorflow as tf
from tensorflow.models.rnn import rnn_cell
from tensorflow.models.rnn import seq2seq
from tensorflow.contrib import grid_rnn
class Model(object):
'''
batchsize, seq_length, model "gridlstm",
rnn_size, vocab_size, num_layers,
'''
def __init__(self, args, infer=False):
self.args = args
if infer:
args.batch_size = 1
args.seq_length = 1
additional_cell_args = {}
if args.model == 'rnn': cell_fn = rnn_cell.BasicRNNCell
elif args.model == 'gru': cell_fn = rnn_cell.GRUCell
elif args.model == 'lstm': cell_fn = rnn_cell.BasicLSTMCell
elif args.model == 'gridlstm':
cell_fn = grid_rnn.Grid2LSTMCell
additional_cell_args.update({'use_peepholes': True, 'forget_bias': 1.0})
elif args.model == 'gridgru': cell_fn = grid_rnn.Grid2GRUCell
else: raise Exception("model type not supported: {}".format(args.model))
cell = cell_fn(args.rnn_size, **additional_cell_args)
self.cell = cell = rnn_cell.MultiRNNCell([cell] * args.num_layers)
self.input_data = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.targets = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.initial_state = cell.zero_state(args.batch_size, tf.float32)
with tf.variable_scope('rnnlm'):
#input softmax: SentenceSize x VocaSize
softmax_w = tf.get_variable("softmax_w", [args.rnn_size, args.vocab_size])
#Ouput 1 word/character ahead
softmax_b = tf.get_variable("softmax_b", [args.vocab_size])
#Format the input
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [args.vocab_size, args.rnn_size])
inputs = tf.split(1, args.seq_length, tf.nn.embedding_lookup(embedding, self.input_data))
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
#Loop over the sequence
def loop(prev, _):
prev = tf.nn.xw_plus_b(prev, softmax_w, softmax_b)
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
outputs, last_state = seq2seq.rnn_decoder(inputs, self.initial_state, cell,
loop_function=loop if infer else None, scope='rnnlm')
output = tf.reshape(tf.concat(1, outputs), [-1, args.rnn_size])
self.logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
self.probs = tf.nn.softmax(self.logits)
#Loss function and training by gradient diffusion
loss = seq2seq.sequence_loss_by_example([self.logits],
[tf.reshape(self.targets, [-1])],
[tf.ones([args.batch_size * args.seq_length])],
args.vocab_size)
self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length #Sum_Loss
self.final_state = last_state
self.lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), args.grad_clip)
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
#Generate Sample
def sample(self, sess, chars, vocab, num=200, prime='The '):
state = self.cell.zero_state(1, tf.float32).eval()
for char in prime[:-1]:
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state: state}
[state] = sess.run([self.final_state], feed)
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return (int(np.searchsorted(t, np.random.rand(1) * s)))
ret = prime
char = prime[-1]
for n in xrange(num):
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state: state}
[probs, state] = sess.run([self.probs, self.final_state], feed)
p = probs[0]
# sample = int(np.random.choice(len(p), p=p))
sample = weighted_pick(p)
pred = chars[sample]
ret += pred
char = pred
return ret
####################################################################################
####################################################################################
# train.py
import argparse, cPickle, os, time
import tensorflow as tf, pandas as pd
from model import Model
from utils import TextLoader
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/tinyshakespeare',
help='data directory containing input.txt')
parser.add_argument('--save_dir', type=str, default='save',
help='directory to store checkpointed models')
parser.add_argument('--rnn_size', type=int, default=128,
help='size of RNN hidden state')
parser.add_argument('--num_layers', type=int, default=2,
help='number of layers in the RNN')
parser.add_argument('--model', type=str, default='lstm',
help='rnn, gru, lstm, gridlstm, gridgru')
parser.add_argument('--batch_size', type=int, default=50,
help='minibatch size')
parser.add_argument('--seq_length', type=int, default=50,
help='RNN sequence length')
parser.add_argument('--num_epochs', type=int, default=50,
help='number of epochs')
parser.add_argument('--save_every', type=int, default=1000,
help='save frequency')
parser.add_argument('--grad_clip', type=float, default=5.,
help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=0.002,
help='learning rate')
parser.add_argument('--decay_rate', type=float, default=0.97,
help='decay rate for rmsprop')
args = parser.parse_args()
train(args)
def train(args):
data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length)
args.vocab_size = data_loader.vocab_size
with open(os.path.join(args.save_dir, 'config.pkl'), 'w') as f:
cPickle.dump(args, f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'w') as f:
cPickle.dump((data_loader.chars, data_loader.vocab), f)
model = Model(args)
with tf.Session() as sess:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
train_loss_iterations = {'iteration': [], 'epoch': [], 'train_loss': [], 'val_loss': []}
for e in xrange(args.num_epochs):
sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e)))
data_loader.reset_batch_pointer()
state = model.initial_state.eval()
for b in xrange(data_loader.num_batches):
start = time.time()
x, y = data_loader.next_batch()
feed = {model.input_data: x, model.targets: y, model.initial_state: state}
train_loss, state, _ = sess.run([model.cost, model.final_state, model.train_op], feed)
= time.time()
batch_idx = e * data_loader.num_batches + b
print "{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
.format(batch_idx,
args.num_epochs * data_loader.num_batches,
e, train_loss, - start)
train_loss_iterations['iteration'].app(batch_idx)
train_loss_iterations['epoch'].app(e)
train_loss_iterations['train_loss'].app(train_loss)
if batch_idx % args.save_every == 0:
# evaluate
state_val = model.initial_state.eval()
avg_val_loss = 0
for x_val, y_val in data_loader.val_batches:
feed_val = {model.input_data: x_val, model.targets: y_val, model.initial_state: state_val}
val_loss, state_val, _ = sess.run([model.cost, model.final_state, model.train_op], feed_val)
avg_val_loss += val_loss / len(data_loader.val_batches)
print 'val_loss: {:.3f}'.format(avg_val_loss)
train_loss_iterations['val_loss'].app(avg_val_loss)
checkpoint_path = os.path.join(args.save_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=e * data_loader.num_batches + b)
print "model saved to {}".format(checkpoint_path)
else:
train_loss_iterations['val_loss'].app(None)
pd.DataFrame(data=train_loss_iterations,
columns=train_loss_iterations.keys()).to_csv(os.path.join(args.save_dir, 'log.csv'))
if __name__ == '__main__':
main()
####################################################################################
####################################################################################
# utils.py
import cPickle, collections, os, codecs, numpy as np
class TextLoader(object):
def __init__(self, data_dir, batch_size, seq_length):
self.data_dir = data_dir
self.batch_size = batch_size
self.seq_length = seq_length
input_file = os.path.join(data_dir, "input.txt")
vocab_file = os.path.join(data_dir, "vocab.pkl")
tensor_file = os.path.join(data_dir, "data.npy")
if not (os.path.exists(vocab_file) and os.path.exists(tensor_file)):
print "reading text file"
self.preprocess(input_file, vocab_file, tensor_file)
else:
print "loading preprocessed files"
self.load_preprocessed(vocab_file, tensor_file)
self.create_batches()
self.reset_batch_pointer()
def preprocess(self, input_file, vocab_file, tensor_file):
with codecs.open(input_file, "r") as f:
data = f.read()
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
self.chars, _ = list(zip(*count_pairs))
self.vocab_size = len(self.chars)
self.vocab = dict(zip(self.chars, range(len(self.chars))))
with open(vocab_file, 'w') as f:
cPickle.dump(self.chars, f)
self.tensor = np.array(map(self.vocab.get, data))
np.save(tensor_file, self.tensor)
def load_preprocessed(self, vocab_file, tensor_file):
with open(vocab_file) as f:
self.chars = cPickle.load(f)
self.vocab_size = len(self.chars)
self.vocab = dict(zip(self.chars, range(len(self.chars))))
self.tensor = np.load(tensor_file)
self.num_batches = self.tensor.size / (self.batch_size * self.seq_length)
def create_batches(self):
self.num_batches = self.tensor.size / (self.batch_size * self.seq_length)
self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]
xdata = self.tensor
ydata = np.copy(self.tensor)
ydata[:-1] = xdata[1:]
ydata[-1] = xdata[0]
self.x_batches = np.split(xdata.reshape(self.batch_size, -1), self.num_batches, 1)
self.y_batches = np.split(ydata.reshape(self.batch_size, -1), self.num_batches, 1)
validation_batches = int(self.num_batches * .2)
self.val_batches = zip(self.x_batches[-validation_batches:], self.y_batches[-validation_batches:])
self.x_batches = self.x_batches[:-validation_batches]
self.y_batches = self.y_batches[:-validation_batches]
self.num_batches -= validation_batches
def next_batch(self):
x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]
self.pointer += 1
return x, y
def reset_batch_pointer(self):
self.pointer = 0
def visualize_result():
import pandas as pd
import matplotlib.pyplot as plt
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
files = [('GridGRU, 3 layers', 'save_gridgru3layers/log.csv'),
# ('GridGRU, 6 layers', 'save_gridgru6layers/log.csv'),
('GridLSTM, 3 layers', 'save_gridlstm3layers/log.csv'),
('GridLSTM, 6 layers', 'save_gridlstm6layers/log.csv'),
('Stacked GRU, 3 layers', 'save_gru3layers/log.csv'),
# ('Stacked GRU, 6 layers', 'save_gru6layers/log.csv'),
('Stacked LSTM, 3 layers', 'save_lstm3layers/log.csv'),
('Stacked LSTM, 6 layers', 'save_lstm6layers/log.csv'),
('Stacked RNN, 3 layers', 'save_rnn3layers/log.csv'),
('Stacked RNN, 6 layers', 'save_rnn6layers/log.csv')]
file1= './save/tinyshakespeare/{}'
for i, (k, v) in enumerate(files):
train_loss = pd.read_csv(file1.format(v)).groupby('epoch').mean()['train_loss']
plt.plot(train_loss.index.tolist(), train_loss.tolist(), label=k, lw=2, color=tableau20[i*2])
plt.leg()
plt.xlabel('Epochs')
plt.ylabel('Average training loss')
plt.show()
####################################################################################
####################################################################################
# sample.py
import argparse, cPickle, os, tensorflow as tf
from model import Model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', type=str, default='save',
help='model directory to store checkpointed models')
parser.add_argument('-n', type=int, default=500, help='number of characters to sample')
parser.add_argument('--prime', type=str, default=' ', help='prime text')
args = parser.parse_args()
sample(args)
def sample(args):
with open(os.path.join(args.save_dir, 'config.pkl')) as f: saved_args = cPickle.load(f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl')) as f: chars, vocab = cPickle.load(f)
model = Model(saved_args, True)
with tf.Session() as sess:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
ckpt = tf.train.get_checkpoint_state(args.save_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print model.sample(sess, chars, vocab, args.n, args.prime)
if __name__ == '__main__':
main()
| true
|
d63e66101b10e34a045704b620158258beeb3b1f
|
Python
|
adrianna-andrzejewska/UMZ_ZALICZENIE
|
/Zestaw1_3.py
|
UTF-8
| 628
| 2.84375
| 3
|
[] |
no_license
|
import pandas as pd
# loading pandas library
# loading data and changing float settings
df_data = pd.read_csv(
'train.tsv',
sep='\t',
names=[
'price', 'nr_rooms', 'meters', 'floors', 'location', 'description'])
pd.options.display.float_format = '{:.2f}'.format
# add csv file
df_description = pd.read_csv('description.csv')
# add columns names
df_description.columns = ['floors', 'name_floor']
# merge tabels - the key is column floors
df_flat_name_floor = pd.merge(df_data, df_description, on=['floors'])
# save to file
df_flat_name_floor.to_csv('out2.csv', header=False, index=None)
| true
|
80347fc72b5b0ac5517f7ec95bed3bc26784ac81
|
Python
|
dersonf/aulaemvideo
|
/exercicios/ex089.py
|
UTF-8
| 733
| 3.796875
| 4
|
[] |
no_license
|
#!/usr/bin/python36
ficha = []
cont = 'S'
while cont != 'N':
nome = str(input('Nome: '))
nota1 = float(input('Nota 1: '))
nota2 = float(input('Nota 2: '))
media = (nota1 + nota2) / 2
ficha.append([nome, [nota1, nota2], media])
cont = str(input('Deseja continuar? [S/N]')).upper().strip()[0]
print(f'ID - NOME - MÉDIA')
print('='*30)
for c in range(0, len(ficha)):
print(f'{c:<2} - {ficha[c][0]:15} - {ficha[c][2]:>4}')
print('='*30)
while True:
aluno = int(input('Mostrar notas de qual aluno? (999 exit)'))
if aluno == 999:
break
else:
for c, v in enumerate(ficha):
if c == aluno:
print(f'As notas de {ficha[c][0]} são {ficha[c][1]}')
| true
|
9358597ab334356e6eb964bf484587843fc7e17a
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03041/s658319623.py
|
UTF-8
| 96
| 2.5625
| 3
|
[] |
no_license
|
n,k=list(map(int,input().split()))
s=list(input())
s[k-1]=chr(ord(s[k-1])+32)
print("".join(s))
| true
|
611bf70295049e21a3c92190c323662086534d3d
|
Python
|
Seralpa/AdventOfCode2018
|
/day10/p1_p2.py
|
UTF-8
| 1,537
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
import re
import operator
data = re.compile(r"position=< ?(-?[0-9]+), ?(-?[0-9]+)> velocity=< ?(-?[0-9]+), ?(-?[0-9]+)>")
class Point:
def __init__(self, pos: tuple[int, int], vel: tuple[int, int]):
self.pos = pos
self.vel = vel
def move(self):
self.pos = (self.pos[0] + self.vel[0], self.pos[1] + self.vel[1])
def print_pos(points: list[Point], minx, maxx):
prev_point = points[0]
for p in points:
if p == points[0]:
for _ in range(p.pos[0] - minx):
print('.', end = '')
print('#', end = '')
continue
if p.pos == prev_point.pos:
continue
if p.pos[1] == prev_point.pos[1]:
for _ in range(p.pos[0] - prev_point.pos[0] - 1):
print('.', end = '')
else:
for _ in range(maxx - prev_point.pos[0]):
print('.', end = '')
for _ in range(p.pos[1] - prev_point.pos[1]):
print('')
for _ in range(p.pos[0] - minx):
print('.', end = '')
print('#', end = '')
prev_point = p
print('')
points: list[Point] = list()
with open("input.txt", "r") as f:
for l in f:
matcher = data.match(l)
points.append(Point((int(matcher.group(1)), int(matcher.group(2))), (int(matcher.group(3)), int(matcher.group(4)))))
time = 0
while True:
minx = min(points, key = lambda x: x.pos[0]).pos[0]
maxx = max(points, key = lambda x: x.pos[0]).pos[0]
if maxx - minx < 100:
points.sort(key = lambda x: x.pos[0])
points.sort(key = lambda y: y.pos[1])
print('')
print_pos(points, minx, maxx)
print(f"{time = }")
input("Enter to keep going")
for p in points:
p.move()
time += 1
| true
|
c9e853fbeac886d4b01c6151c9df91730cf2c7cb
|
Python
|
namratha-21/5003-assignment12
|
/5003-tuplelist.py
|
UTF-8
| 150
| 3.03125
| 3
|
[] |
no_license
|
list =[("abc",93), ("mno",45), ("xyz",65)]
dict1=dict()
for student,score in list:
dict1.setdefault(student, []).append(score)
print(dict1)
| true
|
66bcf69005a4000037e069a02613abb34a667b64
|
Python
|
kwon-o/projecteuler
|
/051-100/055.py
|
UTF-8
| 841
| 3.4375
| 3
|
[] |
no_license
|
def deaching(int1):
str_lst = list(str(int1))
lst = []
for i in str_lst:
lst.append(int(i))
cnt = 0
for i in range(0,int(len(lst)/2)+1):
if lst[i] == lst[len(lst)-1-i]:
cnt += 1
else:
return False
break
if cnt == int(len(lst)/2) + 1:
return True
def rever(int1):
str_lst = list(str(int1))
str_lst.reverse()
rever_num = int(''.join(str_lst))
return rever_num
Lychrel_numbers = []
for i in range(10, 10001):
a = i
roop = 0
while deaching(a + rever(a)) == True or roop < 50:
if deaching(a + rever(a)) == True:
break
else:
a = a + rever(a)
roop += 1
if roop == 50:
Lychrel_numbers.append(i)
print(Lychrel_numbers)
print(len(Lychrel_numbers))
| true
|
7902914a3628103ba075487eb6d6429deb161bf2
|
Python
|
45-Hrishi/Pandas-For-Data-Science
|
/12_Missing data.py
|
UTF-8
| 1,166
| 3.03125
| 3
|
[] |
no_license
|
'''
1. Real world data will often be misssing data for a variety of reasons.
2. Many machine learning models and statistical methods can not work with missing data points ,in which case we need to decide what to do with the missing data.
3. When reading in missing values.pandas will display them as NaN valueds.
4. There are also newer specialized null values such as pd.NaT to imply the value missing should be a timestamp.
options for missing data
keep it
remove it
replace it
note that there is never a 100% correct approach that applies to all circumstances, it all depends on the exact situation you encounter.
Removing or Dropping missing data
Dropping a Row --> makes a sense when a lot of info is missing
Year Pop GDP Area
USA 1776 NaN NaN NaN
CANADA 1867 38 1.7 3.86
MEXICO 1821 126 1.22 0.76
---> Dropping USA row is the right decision becoz we have missing lot of data of that row
---> Often a good idea to calculate a percentage of what data is droppe
---> Good choice if every row is missing that particular feature.
'''
| true
|
5899d788d1d4bfea0b0144f39cf113c1b7853634
|
Python
|
Prismary/python
|
/bag simulator/bag-simulator.py
|
UTF-8
| 2,789
| 3.296875
| 3
|
[] |
no_license
|
import ctypes
ctypes.windll.kernel32.SetConsoleTitleW("Bag-Simulator by Prismary")
print("---------------------------")
print(" Bag-Simulator by Prismary")
print("---------------------------")
contents = []
try:
readsave = open("bag.txt", "r")
for line in readsave:
contents.append(line.replace("\n", ""))
readsave.close()
print("Contents loaded.")
except:
readsave = open("bag.txt", "x")
print("[!] Save file not found, no contents loaded.")
def save(contents):
save = open("bag.txt", "w")
for item in contents:
save.write(item+"\n")
print("\nAvailable commands: 'view', 'add', 'rename','remove', 'move', 'swap', 'empty'")
while True:
cmd = input("\n>> ")
if cmd.startswith("exit"):
break
elif cmd == "view":
print("\nBag contents: \n-------------")
for item in contents:
print(item)
print("-------------")
elif cmd.startswith("add"):
try:
contents.append(cmd.split(" ")[1])
save(contents)
print("Successfully added "+cmd.split(" ")[1]+" to the bag.")
except:
print("[!] Please define the item's name.")
elif cmd.startswith("rename"):
try:
pos = contents.index(cmd.split(" ")[1])
contents.remove(cmd.split(" ")[1])
contents.insert(pos, cmd.split(" ")[2])
save(contents)
print("Successfully renamed "+cmd.split(" ")[1]+" to "+cmd.split(" ")[2]+".")
except:
print("[!] Unable to rename "+cmd.split(" ")[1]+".")
elif cmd.startswith("remove"):
try:
contents.remove(cmd.split(" ")[1])
save(contents)
print("Successfully removed "+cmd.split(" ")[1]+" from the bag.")
except:
print("[!] Unable to remove "+cmd.split(" ")[1]+" from the bag.")
elif cmd.startswith("move"):
try:
pos = int(cmd.split(" ")[2])-1
contents.remove(cmd.split(" ")[1])
contents.insert(pos, cmd.split(" ")[1])
save(contents)
print("Successfully moved "+cmd.split(" ")[1]+" to position "+cmd.split(" ")[2]+".")
except:
print("[!] Unable to move "+cmd.split(" ")[1]+" to position "+cmd.split(" ")[2]+".")
elif cmd.startswith("swap"):
try:
pos1 = contents.index(cmd.split(" ")[1])
pos2 = contents.index(cmd.split(" ")[2])
contents.remove(cmd.split(" ")[1])
contents.insert(pos1, cmd.split(" ")[2])
contents.remove(cmd.split(" ")[2])
contents.insert(pos2, cmd.split(" ")[1])
save(contents)
print("Successfully swapped "+cmd.split(" ")[1]+" and "+cmd.split(" ")[2]+".")
except:
print("[!] Unable to swap "+cmd.split(" ")[1]+" and "+cmd.split(" ")[2]+".")
elif cmd.startswith("empty"):
print("[!] Are you sure you want to empty the bag? (y/n)")
if input("") == "y":
contents = []
save(contents)
print("Successfully emptied the bag.")
else:
print("[!] Unknown command.")
| true
|
478795cb884cdca1159c5956cc85bef7ee673860
|
Python
|
matsulib/cinema3-movies
|
/database/manage.py
|
UTF-8
| 551
| 2.5625
| 3
|
[] |
no_license
|
import os
import sys
import json
from urllib.parse import urlsplit
from pymongo import MongoClient
def delete_all(col):
col.delete_many({})
def insert_data(col, data):
col.insert_many(data)
if __name__ == '__main__':
url = os.getenv('MONGODB_URI', 'mongodb://localhost:27017/movies')
db_name = urlsplit(url).path[1:]
col = MongoClient(url)[db_name]['movies']
with open('{}/init.json'.format(os.path.dirname(sys.argv[0])), 'r') as f:
data = json.load(f)
delete_all(col)
insert_data(col, data)
| true
|
4441b1db4f563439056060b0f9f9c23be8b49057
|
Python
|
sakthi/Kingdoms
|
/kingdoms/websetup.py
|
UTF-8
| 2,008
| 2.515625
| 3
|
[] |
no_license
|
"""Setup the endless-insomnia application"""
import logging, os, hashlib, datetime
from sqlalchemy.orm.exc import NoResultFound
from kingdoms.config.environment import load_environment
from kingdoms.model import meta
from kingdoms.model import Player, UnitTypeDescriptor
log = logging.getLogger(__name__)
def setup_app(command, conf, vars):
"""Place any commands to setup kingdoms here"""
load_environment(conf.global_conf, conf.local_conf)
# Create the tables if they don't already exist
meta.metadata.create_all(bind=meta.engine)
if len(meta.Session.query(UnitTypeDescriptor).all()) <= 0:
footman = UnitTypeDescriptor()
footman.short_name = u'Mercenary Footman'
footman.long_name = u'Hired infantry with swords and shields.'
footman.tile_type = u'footman'
meta.Session.add(footman)
archer = UnitTypeDescriptor()
archer.short_name = u'Mercenary Archer'
archer.long_name = u'Hired infantry with long bows.'
archer.tile_type = u'archer'
meta.Session.add(archer)
meta.Session.commit()
count = meta.Session.query(Player).count()
if count == 0:
user_name = os.getenv('USER')
try:
admin = meta.Session.query(Player).filter(Player.login == user_name).one()
except NoResultFound:
#login
tmp = raw_input('What is your name, master (%s):' % user_name)
if len(tmp) > 0:
user_name = tmp
#password
password = raw_input('Enter your password:')
#sex
sex_txt = raw_input('Can I call you my daddy? (Keep empty for male by default)')
if len(sex_txt) <= 0:
sex = 1
else:
sex = 0
owner = Player()
owner.login = user_name
owner.password_hash = hashlib.md5(password).hexdigest()
owner.fullname = 'Great %s, master of realms. Ruler of mortals.' % user_name.capitalize()
owner.timestamp = datetime.datetime.now()
owner.email = user_name +'@kingdoms.com'
owner.sex = sex
meta.Session.add(owner)
meta.Session.commit()
finally:
print 'What is your wish master, %s?' % user_name
else:
print '%d users are known' % count
| true
|
2ee9ef013762978955da9adc6b91d675d9b3557d
|
Python
|
DwaynesWorld/deeplearning
|
/basic/linear_regression_excercise.py
|
UTF-8
| 2,098
| 2.9375
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.metrics import (mean_squared_error, classification_report)
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
# Read Data
housing_data = pd.read_csv('../__tensorflow__/02-TensorFlow-Basics/cal_housing_clean.csv')
# print(housing_data.head())
print(housing_data.describe().transpose())
# Perform a Train Test Split on the Data
X = housing_data.drop('medianHouseValue', axis=1)
y = housing_data['medianHouseValue']
# print(X.head())
# print(y.head())
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Transform Data
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train = pd.DataFrame(
data=scaler.transform(X_train),
columns=X_train.columns,
index=X_train.index)
X_test = pd.DataFrame(
data=scaler.transform(X_test),
columns=X_test.columns,
index=X_test.index)
# Create Feature Columns
age = tf.feature_column.numeric_column('housingMedianAge')
rooms = tf.feature_column.numeric_column('totalRooms')
bedrooms = tf.feature_column.numeric_column('totalBedrooms')
pop = tf.feature_column.numeric_column('population')
households = tf.feature_column.numeric_column('households')
income = tf.feature_column.numeric_column('medianIncome')
feat_cols = [age, rooms, bedrooms, pop, households, income]
# Train
input_func = tf.estimator.inputs.pandas_input_fn(
x=X_train,
y=y_train,
batch_size=10,
num_epochs=1000,
shuffle=True)
model = tf.estimator.DNNRegressor(hidden_units=[6, 10, 6], feature_columns=feat_cols)
model.train(input_fn=input_func, steps=25000)
# Predict
predict_input_func = tf.estimator.inputs.pandas_input_fn(
x=X_test,
batch_size=10,
num_epochs=1,
shuffle=False)
predictions = list(model.predict(predict_input_func))
# Get Metrics
final_preds = []
for pred in predictions:
final_preds.append(pred['predictions'])
print(classification_report(y_test, final_preds))
print(mean_squared_error(y_test, final_preds)**0.5)
| true
|
fde4188c0a4eb59aae61bc7dfaf3dda5970d3c0e
|
Python
|
aCoffeeYin/pyreco
|
/repoData/vimoutliner-vimoutliner/allPythonContent.py
|
UTF-8
| 108,049
| 2.859375
| 3
|
[] |
no_license
|
__FILENAME__ = otl2html
#!/usr/bin/python2
# otl2html.py
# convert a tab-formatted outline from VIM to HTML
#
# Copyright 2001 Noel Henson All rights reserved
#
# ALPHA VERSION!!!
###########################################################################
# Basic function
#
# This program accepts text outline files and converts them
# to HTML. The outline levels are indicated by tabs. A line with no
# tabs is assumed to be part of the highest outline level.
#
# 10 outline levels are supported. These loosely correspond to the
# HTML H1 through H9 tags. Alphabetic, numeric and bullet formats
# are also supported.
#
# CSS support has been added.
#
###########################################################################
# include whatever mdules we need
import sys
import re
import os
import time
###########################################################################
# global variables
formatMode = "indent"
copyright = ""
level = 0
div = 0
silentdiv = 0
slides = 0
hideComments = 0
showTitle = 1
inputFile = ""
outline = []
flatoutline = []
inBodyText = 0 # 0: no, 1: text, 2: preformatted text, 3: table
styleSheet = "nnnnnn.css"
inlineStyle = 0
###########################################################################
# function definitions
# usage
# print the simplest form of help
# input: none
# output: simple command usage is printed on the console
def showUsage():
print """
Usage:
otl2html.py [options] inputfile > outputfile
Options
-p Presentation: slide show output for use with
HtmlSlides.
-D First-level is divisions (<div> </div>) for making
pretty web pages.
-s sheet Use the specified style sheet with a link. This is the
default.
-S sheet Include the specified style sheet in-line the
output. For encapsulated style.
-T The first line is not the title. Treat it as
outline data
-c comments (line with [ as the first non-whitespace
character. Ending with ] is optional.
-C copyright Override the internal copyright notice with the
one supplied in the quoted string following this
flag. Single or double quotes can be used.
-H Show the file syntax help.
output is on STDOUT
Note: if neither -s or -S are specified, otl2html.py will default
to -s. It will try to use the css file 'nnnnnn.css' if it
exists. If it does not exist, it will be created automatically.
"""
def showSyntax():
print """
Syntax
Syntax is Vim Outliner's normal syntax. The following are supported:
Text
: Body text marker. This text will wrap in the output.
; Preformmated text. This text will will not wrap.
Tables
|| Table header line.
| Table and table columns. Example:
|| Name | Age | Animal |
| Kirby | 9 | Dog |
| Sparky | 1 | Bird |
| Sophia | 8 | Cat |
This will cause an item to be left-justified.
| whatever |
This will cause an item to be right-justified.
| whatever |
This will cause an item to be centered.
| whatever |
This will cause an item to be default aligned.
| whatever |
Character Styles
** Bold. Example: **Bold Text**
// Italic. Example: //Italic Text//
+++ Highlight. Example: +++Highlight Text+++
--- Strikeout. Example: ---Strikeout Text---
Insane ---+++//**Wow! This is insane!**//+++---
Just remember to keep it all on one line.
Horizontal Rule
---------------------------------------- (40 dashes).
Copyright
(c) or (C) Converts to a standard copyright symbol.
Including Images (for web pages)
[imagename] Examples:
[logo.gif] [photo.jpg] [car.png]
[http://i.a.cnn.net/cnn/.element/img/1.1/logo/logl.gif]
or from a database:
[http://www.lab.com/php/image.php?id=4]
Including links (for web pages)
[link text-or-image] Examples:
[about.html About] [http://www.cnn.com CNN]
or with an image:
[http://www.ted.com [http://www.ted.com/logo.png]]
Links starting with a '+' will be opened in a new
window. Eg. [+about.html About]
Including external files
!filename! Examples:
!file.txt!
Including external outlines (first line is parent)
!!filename!! Examples:
!!menu.otl!!
Including output from executing external programs
!!!program args!!! Examples:
!!!date +%Y%m%d!!!
Note:
When using -D, the top-level headings become divisions (<div>)
and will be created using a class of the heading name. Spaces
are not allowed. If a top-level heading begins with '_', it
will not be shown but the division name will be the same as
without the '_'. Example: _Menu will have a division name of
Menu and will not be shown.
"""
# getArgs
# Check for input arguments and set the necessary switches
# input: none
# output: possible console output for help, switch variables may be set
def getArgs():
global inputFile, debug, formatMode, slides, hideComments, copyright, \
styleSheet, inlineStyle, div, showTitle
if (len(sys.argv) == 1):
showUsage()
sys.exit()()
else:
for i in range(len(sys.argv)):
if (i != 0):
if (sys.argv[i] == "-d"):
debug = 1 # test for debug flag
elif (sys.argv[i] == "-?"): # test for help flag
showUsage() # show the help
sys.exit() # exit
elif (sys.argv[i] == "-p"): # test for the slides flag
slides = 1 # set the slides flag
elif (sys.argv[i] == "-D"): # test for the divisions flag
div = 1 # set the divisions flag
elif (sys.argv[i] == "-T"): # test for the no-title flag
showTitle = 0 # clear the show-title flag
elif (sys.argv[i] == "-c"): # test for the comments flag
hideComments = 1 # set the comments flag
elif (sys.argv[i] == "-C"): # test for the copyright flag
copyright = sys.argv[i + 1] # get the copyright
i = i + 1 # increment the pointer
elif (sys.argv[i] == "-s"): # test for the style sheet flag
styleSheet = sys.argv[i + 1] # get the style sheet name
formatMode = "indent" # set the format
i = i + 1 # increment the pointer
elif (sys.argv[i] == "-S"): # test for the style sheet flag
styleSheet = sys.argv[i + 1] # get the style sheet name
formatMode = "indent" # set the format
inlineStyle = 1
i = i + 1 # increment the pointer
elif (sys.argv[i] == "--help"):
showUsage()
sys.exit()
elif (sys.argv[i] == "-h"):
showUsage()
sys.exit()
elif (sys.argv[i] == "-H"):
showSyntax()
sys.exit()
elif (sys.argv[i][0] == "-"):
print "Error! Unknown option. Aborting"
sys.exit()
else: # get the input file name
inputFile = sys.argv[i]
# getLineLevel
# get the level of the current line (count the number of tabs)
# input: linein - a single line that may or may not have tabs at the beginning
# output: returns a number 1 is the lowest
def getLineLevel(linein):
strstart = linein.lstrip() # find the start of text in line
x = linein.find(strstart) # find the text index in the line
n = linein.count("\t", 0, x) # count the tabs
return(n + 1) # return the count + 1 (for level)
# getLineTextLevel
# get the level of the current line (count the number of tabs)
# input: linein - a single line that may or may not have tabs at the
# beginning
# output: returns a number 1 is the lowest
def getLineTextLevel(linein):
strstart = linein.lstrip() # find the start of text in line
x = linein.find(strstart) # find the text index in the line
n = linein.count("\t", 0, x) # count the tabs
n = n + linein.count(" ", 0, x) # count the spaces
return(n + 1) # return the count + 1 (for level)
# colonStrip(line)
# stip a leading ':', if it exists
# input: line
# output: returns a string with a stipped ':'
def colonStrip(line):
if (line[0] == ":"):
return line[1:].lstrip()
else:
return line
# semicolonStrip(line)
# stip a leading ';', if it exists
# input: line
# output: returns a string with a stipped ';'
def semicolonStrip(line):
if (line[0] == ";"):
return line[1:]
else:
return line
# dashStrip(line)
# stip a leading '-', if it exists
# input: line
# output: returns a string with a stipped '-'
def dashStrip(line):
if (line[0] == "-"):
return line[1:]
else:
return line
# pipeStrip(line)
# stip a leading '|', if it exists
# input: line
# output: returns a string with a stipped '|'
def pipeStrip(line):
if (line[0] == "|"):
return line[1:]
else:
return line
# plusStrip(line)
# stip a leading '+', if it exists
# input: line
# output: returns a string with a stipped '+'
def plusStrip(line):
if (line[0] == "+"):
return line[1:]
else:
return line
# handleBodyText
# print body text lines with a class indicating level, if style sheets
# are being used. otherwise print just <p>
# input: linein - a single line that may or may not have tabs at the beginning
# output: through standard out
def handleBodyText(linein, lineLevel):
global inBodyText
if (inBodyText == 2):
print "</pre>"
if (inBodyText == 3):
print "</table>"
print "<p",
if (styleSheet != ""):
print " class=\"P" + str(lineLevel) + "\"",
inBodyText = 1
print ">" + colonStrip(linein.strip()),
# handlePreformattedText
# print preformatted text lines with a class indicating level, if style sheets
# are being used. otherwise print just <pre>
# input: linein - a single line that may or may not have tabs at the beginning
# output: through standard out
def handlePreformattedText(linein, lineLevel):
global inBodyText
if (inBodyText == 1):
print "</p>"
if (inBodyText == 3):
print "</table>"
print "<pre",
if (styleSheet != ""):
print " class=\"PRE" + str(lineLevel) + "\"",
inBodyText = 2
print ">" + semicolonStrip(linein.strip()),
# isAlignRight
# return flag
# input: coldata, a string
def isAlignRight(coldata):
l = len(coldata)
if (coldata[0:2] == " ") and (coldata[l - 2:l] != " "):
return 1
else:
return 0
# isAlignLeft
# return flag
# input: coldata, a string
def isAlignLeft(coldata):
l = len(coldata)
if (coldata[0:2] != " ") and (coldata[l - 2:l] == " "):
return 1
else:
return 0
# isAlignCenter
# return flag
# input: coldata, a string
def isAlignCenter(coldata):
l = len(coldata)
if (coldata[0:2] == " ") and (coldata[l - 2:l] == " "):
return 1
else:
return 0
# getColumnAlignment(string)
# return string
# input: coldata
# output:
# <td align="left"> or <td align="right"> or <td align="center"> or <td>
def getColumnAlignment(coldata):
if isAlignCenter(coldata):
return '<td align="center">'
if isAlignRight(coldata):
return '<td align="right">'
if isAlignLeft(coldata):
return '<td align="left">'
return '<td>'
# handleTableColumns
# return the souce for a row's columns
# input: linein - a single line that may or may not have tabs at the beginning
# output: string with the columns' source
def handleTableColumns(linein, lineLevel):
out = ""
coldata = linein.strip()
coldata = coldata.split("|")
for i in range(1, len(coldata) - 1):
out += getColumnAlignment(coldata[i])
out += coldata[i].strip() + '</td>'
return out
# handleTableHeaders
# return the souce for a row's headers
# input: linein - a single line that may or may not have tabs at the beginning
# output: string with the columns' source
def handleTableHeaders(linein, lineLevel):
out = ""
coldata = linein.strip()
coldata = coldata.split("|")
for i in range(2, len(coldata) - 1):
out += getColumnAlignment(coldata[i])
out += coldata[i].strip() + '</td>'
out = out.replace('<td', '<th')
out = out.replace('</td', '</th')
return out
# handleTableRow
# print a table row
# input: linein - a single line that may or may not have tabs at the beginning
# output: out
def handleTableRow(linein, lineLevel):
out = "<tr>"
if (lineLevel == linein.find("|| ") + 1):
out += handleTableHeaders(linein, lineLevel)
else:
out += handleTableColumns(linein, lineLevel)
out += "</tr>"
return out
# handleTable
# print a table, starting with a <TABLE> tag if necessary
# input: linein - a single line that may or may not have tabs at the beginning
# output: through standard out
def handleTable(linein, lineLevel):
global inBodyText
if (inBodyText == 1):
print "</p>"
if (inBodyText == 2):
print "</pre>"
if (inBodyText != 3):
print "<table class=\"TAB" + str(lineLevel) + "\">"
inBodyText = 3
print handleTableRow(linein, lineLevel),
# linkOrImage
# if there is a link to an image or another page, process it
# input: line
# output: modified line
def linkOrImage(line):
line = re.sub('\[(\S+?)\]', '<img src="\\1" alt="\\1">', line)
line = re.sub('\[(\S+)\s(.*?)\]', '<a href="\\1">\\2</a>', line)
line = re.sub('(<a href=")\+(.*)"\>', '\\1\\2" target=_new>', line)
line = line.replace('<img src="X" alt="X">', '[X]')
line = line.replace('<img src="_" alt="_">', '[_]')
return line
# tabs
# return a string with 'count' tabs
# input: count
# output: string of tabs
def tabs(count):
out = ""
if (count == 0):
return ""
for i in range(0, count - 1):
out = out + "\t"
return out
# includeFile
# include the specified file, if it exists
# input: line and lineLevel
# output: line is replaced by the contents of the file
def includeFile(line, lineLevel):
filename = re.sub('!(\S+?)!', '\\1', line.strip())
incfile = open(filename, "r")
linein = incfile.readline()
while linein != "":
linein = re.sub('^', tabs(lineLevel), linein)
processLine(linein)
linein = incfile.readline()
incfile.close()
return
# includeOutline
# include the specified file, if it exists
# input: line and lineLevel
# output: line is replaced by the contents of the file
def includeOutline(line, lineLevel):
filename = re.sub('!!(\S+?)!!', '\\1', line.strip())
incfile = open(filename, "r")
linein = incfile.readline()
linein = re.sub('^', tabs(lineLevel), linein)
processLine(linein)
linein = incfile.readline()
while linein != "":
linein = re.sub('^', tabs(lineLevel + 1), linein)
processLine(linein)
linein = incfile.readline()
incfile.close()
return
# execProgram
# execute the specified program
# input: line
# output: program specified is replaced by program output
def execProgram(line):
program = re.sub('.*!!!(.*)!!!.*', '\\1', line.strip())
child = os.popen(program)
out = child.read()
err = child.close()
out = re.sub('!!!(.*)!!!', out, line)
processLine(out)
if err:
raise RuntimeError('%s failed w/ exit code %d' % (program, err))
return
# divName
# create a name for a division
# input: line
# output: division name
def divName(line):
global silentdiv
line = line.strip()
if (line[0] == '_'):
silentdiv = 1
line = line[1:]
line = line.replace(' ', '_')
return'<div class="' + line + '">'
# getTitleText(line)
# extract some meaningful text to make the document title from the line
# input: line
# output: modified line
def getTitleText(line):
out = re.sub('.*#(.*)#.*', '\\1', line)
out = re.sub('<.*>', '', out)
# if (out != ""): out = re.sub('\"(.*?)\"', '\\1', line)
return(out)
# stripTitleText(line)
# strip the title text if it is enclosed in double-quotes
# input: line
# output: modified line
def stripTitleText(line):
out = re.sub('#\W*.*#', '', line)
return(out)
# beautifyLine(line)
# do some optional, simple beautification of the text in a line
# input: line
# output: modified line
def beautifyLine(line):
if (line.strip() == "-" * 40):
return "<br><hr><br>"
out = line
line = ""
while (line != out):
line = out
# out = replace(out, '---', '<strike>', 1)
if (line[0].lstrip() != ";"):
out = re.sub('\-\-\-(.*?)\-\-\-', '<strike>\\1</strike>', out)
out = linkOrImage(out)
# out = replace(out, '**', '<strong>', 1)
out = re.sub('\*\*(.*?)\*\*', '<strong>\\1</strong>', out)
# out = replace(out, '//', '<i>', 1)
out = re.sub('\/\/(.*?)\/\/', '<i>\\1</i>', out)
# out = replace(out, '+++', '<code>', 1)
out = re.sub('\+\+\+(.*?)\+\+\+', '<code>\\1</code>', out)
out = re.sub('\(c\)', '©', out)
out = re.sub('\(C\)', '©', out)
return out
# closeLevels
# generate the number of </ul> or </ol> tags necessary to proplerly finish
# input: format - a string indicating the mode to use for formatting
# level - an integer between 1 and 9 that show the current level
# (not to be confused with the level of the current line)
# output: through standard out
def closeLevels():
global level, formatMode
while (level > 0):
if (formatMode == "bullets"):
print "</ul>"
if (formatMode == "alpha") or (formatMode == "numeric") or \
(formatMode == "roman") or (formatMode == "indent"):
print "</ol>"
level = level - 1
# processLine
# process a single line
# input: linein - a single line that may or may not have tabs at the beginning
# format - a string indicating the mode to use for formatting
# level - an integer between 1 and 9 that show the current level
# (not to be confused with the level of the current line)
# output: through standard out
def processLine(linein):
global level, formatMode, slides, hideComments, inBodyText, styleSheet, \
inlineStyle, div, silentdiv
if (linein.lstrip() == ""):
return
linein = beautifyLine(linein)
lineLevel = getLineLevel(linein)
if ((hideComments == 0) or (lineLevel != linein.find("[") + 1)):
if (lineLevel > level): # increasing depth
while (lineLevel > level):
if (formatMode == "indent" or formatMode == "simple"):
if (inBodyText == 1):
print"</p>"
inBodyText = 0
elif (inBodyText == 2):
print"</pre>"
inBodyText = 0
elif (inBodyText == 3):
print"</table>"
inBodyText = 0
if not (div == 1 and lineLevel == 1):
print "<ol>"
else:
sys.exit("Error! Unknown formatMode type")
level = level + 1
elif (lineLevel < level): # decreasing depth
while (lineLevel < level):
if (inBodyText == 1):
print"</p>"
inBodyText = 0
elif (inBodyText == 2):
print"</pre>"
inBodyText = 0
elif (inBodyText == 3):
print"</table>"
inBodyText = 0
print "</ol>"
level = level - 1
if (div == 1 and level == 1):
if (silentdiv == 0):
print'</ol>'
else:
silentdiv = 0
print'</div>'
else:
print # same depth
if (div == 1 and lineLevel == 1):
if (lineLevel != linein.find("!") + 1):
print divName(linein)
if (silentdiv == 0):
print "<ol>"
if (slides == 0):
if (lineLevel == linein.find(" ") + 1) or \
(lineLevel == linein.find(":") + 1):
if (inBodyText != 1):
handleBodyText(linein, lineLevel)
elif (colonStrip(linein.strip()) == ""):
print "</p>"
handleBodyText(linein, lineLevel)
else:
print colonStrip(linein.strip()),
elif (lineLevel == linein.find(";") + 1):
if (inBodyText != 2):
handlePreformattedText(linein, lineLevel)
elif (semicolonStrip(linein.strip()) == ""):
print "</pre>"
handlePreformattedText(linein, lineLevel)
else:
print semicolonStrip(linein.strip()),
elif (lineLevel == linein.find("|") + 1):
if (inBodyText != 3):
handleTable(linein, lineLevel)
elif (pipeStrip(linein.strip()) == ""):
print "</table>"
handleTable(linein, lineLevel)
else:
print handleTableRow(linein, lineLevel),
elif (lineLevel == linein.find("!!!") + 1):
execProgram(linein)
elif (lineLevel == linein.find("!!") + 1):
includeOutline(linein, lineLevel)
elif (lineLevel == linein.find("!") + 1):
includeFile(linein, lineLevel)
else:
if (inBodyText == 1):
print"</p>"
inBodyText = 0
elif (inBodyText == 2):
print"</pre>"
inBodyText = 0
elif (inBodyText == 3):
print"</table>"
inBodyText = 0
if (silentdiv == 0):
print "<li",
if (styleSheet != ""):
if (lineLevel == linein.find("- ") + 1):
print " class=\"LB" + str(lineLevel) + "\"",
print ">" + \
dashStrip(linein.strip()),
elif (lineLevel == linein.find("+ ") + 1):
print " class=\"LN" + str(lineLevel) + "\"",
print ">" + \
plusStrip(linein.strip()),
else:
print " class=\"L" + str(lineLevel) + "\"",
print ">" + linein.strip(),
else:
silentdiv = 0
else:
if (lineLevel == 1):
if (linein[0] == " "):
if (inBodyText == 0):
handleBodyText(linein, lineLevel)
else:
print linein.strip(),
else:
print "<address>"
print linein.strip(),
print "</address>\n"
else:
if (lineLevel == linein.find(" ") + 1) or \
(lineLevel == linein.find(":") + 1):
if (inBodyText == 0):
handleBodyText(linein, lineLevel)
else:
print linein.strip(),
else:
if (inBodyText == 1):
print"</p>"
inBodyText = 0
print "<li",
if (styleSheet != ""):
print " class=\"LI.L" + str(lineLevel) + "\"",
print ">" + linein.strip(),
# flatten
# Flatten a subsection of an outline. The index passed is the
# outline section title. All sublevels that are only one level
# deeper are indcluded in the current subsection. Then there is
# a recursion for those items listed in the subsection. Exits
# when the next line to be processed is of the same or lower
# outline level. (lower means shallower)
# input: idx - the index into the outline. The indexed line is the title.
# output: adds reformatted lines to flatoutline[]
def flatten(idx):
if (outline[idx] == ""):
return
if (len(outline) <= idx):
return
titleline = outline[idx]
titlelevel = getLineLevel(titleline)
if (getLineLevel(outline[idx + 1]) > titlelevel):
if (titleline[titlelevel - 1] != " "):
flatoutline.append(titleline.lstrip())
exitflag = 0
while (exitflag == 0):
if (idx < len(outline) - 1):
idx = idx + 1
currlevel = getLineLevel(outline[idx])
if (currlevel == titlelevel + 1):
if (currlevel == outline[idx].find(" ") + 1):
flatoutline.append("\t " + outline[idx].lstrip())
else:
flatoutline.append("\t" + outline[idx].lstrip())
elif (currlevel <= titlelevel):
exitflag = 1
else:
exitflag = 1
# level = titlelevel # FIXME level assigned but never used
return
def createCSS():
global styleSheet
output = """ /* copyright notice and filename */
body {
font-family: helvetica, arial, sans-serif;
font-size: 10pt;
}
/* title at the top of the page */
H1 {
font-family: helvetica, arial, sans-serif;
font-size: 14pt;
font-weight: bold;
text-align: center;
color: black;
background-color: #ddddee;
padding-top: 20px;
padding-bottom: 20px;
}
H2 {
font-family: helvetica, arial, sans-serif;
font-size: 12pt;
font-weight: bold;
text-align: left;
color: black;
}
H3 {
font-family: helvetica, arial, sans-serif;
font-size: 12pt;
text-align: left;
color: black;
}
H4 {
font-family: helvetica, arial, sans-serif;
font-size: 12pt;
text-align: left;
color: black;
}
H5 {
font-family: helvetica, arial, sans-serif;
font-size: 10pt;
text-align: left;
color: black;
}
/* outline level spacing */
OL {
margin-left: 1.0em;
padding-left: 0;
padding-bottom: 8pt;
}
/* global heading settings */
LI {
font-family: helvetica, arial, sans-serif;
color: black;
font-weight: normal;
list-style: lower-alpha;
padding-top: 4px;
}
/* level 1 heading overrides */
LI.L1 {
font-size: 12pt;
font-weight: bold;
list-style: none;
}
/* level 2 heading overrides */
LI.L2 {
font-size: 10pt;
font-weight: bold;
list-style: none;
}
/* level 3 heading overrides */
LI.L3 {
font-size: 10pt;
list-style: none;
}
/* level 4 heading overrides */
LI.L4 {
font-size: 10pt;
list-style: none;
}
/* level 5 heading overrides */
LI.L5 {
font-size: 10pt;
list-style: none;
}
/* level 6 heading overrides */
LI.L6 {
font-size: 10pt;
list-style: none;
}
/* level 7 heading overrides */
LI.L7 {
font-size: 10pt;
list-style: none;
}
/* level 1 bullet heading overrides */
LI.LB1 {
font-size: 12pt;
font-weight: bold;
list-style: disc;
}
/* level 2 bullet heading overrides */
LI.LB2 {
font-size: 10pt;
font-weight: bold;
list-style: disc;
}
/* level 3 bullet heading overrides */
LI.LB3 {
font-size: 10pt;
list-style: disc;
}
/* level 4 bullet heading overrides */
LI.LB4 {
font-size: 10pt;
list-style: disc;
}
/* level 5 bullet heading overrides */
LI.LB5 {
font-size: 10pt;
list-style: disc;
}
/* level 6 bullet heading overrides */
LI.LB6 {
font-size: 10pt;
list-style: disc;
}
/* level 7 bullet heading overrides */
LI.LB7 {
font-size: 10pt;
list-style: disc;
}
/* level 1 numeric heading overrides */
LI.LN1 {
font-size: 12pt;
font-weight: bold;
list-style: decimal;
}
/* level 2 numeric heading overrides */
LI.LN2 {
font-size: 10pt;
font-weight: bold;
list-style: decimal;
}
/* level 3 numeric heading overrides */
LI.LN3 {
font-size: 10pt;
list-style: decimal;
}
/* level 4 numeric heading overrides */
LI.LN4 {
font-size: 10pt;
list-style: decimal;
}
/* level 5 numeric heading overrides */
LI.LN5 {
font-size: 10pt;
list-style: decimal;
}
/* level 6 numeric heading overrides */
LI.LN6 {
font-size: 10pt;
list-style: decimal;
}
/* level 7 numeric heading overrides */
LI.LN7 {
font-size: 10pt;
list-style: decimal;
}
/* body text */
P {
font-family: helvetica, arial, sans-serif;
font-size: 9pt;
font-weight: normal;
color: darkgreen;
}
/* preformatted text */
PRE {
font-family: fixed, monospace;
font-size: 9pt;
font-weight: normal;
color: darkblue;
}
TABLE {
margin-top: 1em;
font-family: helvetica, arial, sans-serif;
font-size: 12pt;
font-weight: normal;
border-collapse: collapse;
}
TH {
border: 1px solid black;
padding: 0.5em;
background-color: #eeddee;
}
TD {
border: 1px solid black;
padding: 0.5em;
background-color: #ddeeee;
}
CODE {
background-color: yellow;
}
TABLE.TAB1 {
margin-top: 1em;
font-family: helvetica, arial, sans-serif;
font-size: 12pt;
font-weight: normal;
border-collapse: collapse;
}
TABLE.TAB2 {
margin-top: 1em;
font-family: helvetica, arial, sans-serif;
font-size: 11pt;
font-weight: normal;
border-collapse: collapse;
}
TABLE.TAB3 {
margin-top: 1em;
font-family: helvetica, arial, sans-serif;
font-size: 10pt;
font-weight: normal;
border-collapse: collapse;
}
TABLE.TAB4 {
margin-top: 1em;
font-family: helvetica, arial, sans-serif;
font-size: 10pt;
font-weight: normal;
border-collapse: collapse;
}
TABLE.TAB5 {
margin-top: 1em;
font-family: helvetica, arial, sans-serif;
font-size: 10pt;
font-weight: normal;
border-collapse: collapse;
}
TABLE.TAB6 {
margin-top: 1em;
font-family: helvetica, arial, sans-serif;
font-size: 10pt;
font-weight: normal;
border-collapse: collapse;
"""
file = open(styleSheet, "w")
file.write(output)
def printHeader(linein):
global styleSheet, inlineStyle
print """<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"s
\"http://www.w3.org/TR/html4/strict.dtd\">
<html><head><title>""" + getTitleText(linein) + "</title></head>"
try:
file = open(styleSheet, "r")
except IOError:
createCSS()
file = open(styleSheet, "r")
if (styleSheet != "" and inlineStyle == 0):
print "<link href=\"" + styleSheet + \
"\" rel=\"stylesheet\" type=\"text/css\">"
if (styleSheet != "" and inlineStyle == 1):
print "<style type=\"text/css\">"
csslinein = file.readline()
while csslinein != "":
print csslinein,
csslinein = file.readline()
file.close()
print "</style></head>"
print "<body>"
def printFirstLine(linein):
print '''<div class="DocTitle">
<h1>%s</h1>
</div>
<div class="MainPage">''' % stripTitleText(linein.strip())
def printFooter():
global slides, div
print "</div>"
if (slides == 0 and div == 0):
print "<div class=\"Footer\">"
print "<hr>"
print copyright
print "<br>"
print inputFile + "   " + \
time.strftime("%Y/%m/%d %H:%M", time.localtime(time.time()))
print "</div>"
print "</body></html>"
def main():
global showTitle
getArgs()
file = open(inputFile, "r")
if (slides == 0):
firstLine = beautifyLine(file.readline().strip())
printHeader(firstLine)
if (showTitle == 1):
printFirstLine(firstLine)
linein = beautifyLine(file.readline().strip())
else:
linein = firstLine
while linein != "":
processLine(linein)
linein = file.readline()
closeLevels()
else:
linein = beautifyLine(file.readline().strip())
outline.append(linein)
linein = file.readline().strip()
while linein != "":
outline.append("\t" + linein)
linein = file.readline().rstrip()
for i in range(0, len(outline) - 1):
flatten(i)
printHeader(flatoutline[0])
for i in range(0, len(flatoutline)):
processLine(flatoutline[i])
printFooter()
file.close()
if __name__ == "__main__":
main()
########NEW FILE########
__FILENAME__ = otl2latex
usage="""
otl2latex.py
Translate a Vim Outliner file to a LaTeX document.
Usage:
otl2latex.py -[abp] file.otl [file.tex]
-a: Output to article class
-b: Output to book class
-p: Output to Beamer (presentation) class (default)
Author: Serge Rey <sjsrey@gmail.com>
Version 0.1 (2007-01-21)
"""
import os,sys
class Line:
"""Class for markup lines"""
def __init__(self, content):
ntabs=content.count("\t")
content=content.lstrip("\t")
level = ntabs - content.count("\t")
self.level=level
self.content = content
self.markup=0
if content[0]=="|":
self.markup=1
#3 lines added here
self.bullet=0
if len(content) > 2 and (content[2]=='*' or content[1]=='*'):
self.bullet=1
#print "%d: %s"%(self.bullet,content)
class Manager:
"""Abstract class for LaTeX document classes"""
def __init__(self, content, fileOut):
self.content=content
self.fileOut=open(fileOut,'w')
self.parse()
self.fileOut.write(self.markup)
self.fileOut.close()
def parse(self):
self.lines=[ Line(line) for line in self.content]
preambleStart=0
nl=len(self.lines)
id=zip(range(nl),self.lines)
level1=[i for i,line in id if line.level==0]
preambleEnd=level1[1]
preamble=self.lines[0:preambleEnd]
self.level1=level1
preambleMarkup=[]
for line in preamble:
if line.content.count("@"):
tmp=line.content.split("@")[1]
tmp=tmp.split()
env=tmp[0]
content=" ".join(tmp[1:])
mu="\\%s{%s}"%(env,content)
preambleMarkup.append(mu)
self.preamble=preambleMarkup
self.preambleLines=preamble
self.documentLines=self.lines[preambleEnd:]
class Beamer(Manager):
"""Manager for Beamer document class"""
def __init__(self, content,fileOut):
self.top1="""
\documentclass[nototal,handout]{beamer}
\mode<presentation>
{
\usetheme{Madrid}
\setbeamercovered{transparent}
}
\usepackage{verbatim}
\usepackage{fancyvrb}
\usepackage[english]{babel}
\usepackage[latin1]{inputenc}
\usepackage{times}
\usepackage{tikz}
\usepackage[T1]{fontenc}
\usepackage{graphicx} %sjr added
\graphicspath{{figures/}}
\usepackage{hyperref}"""
self.top2="""
% Delete this, if you do not want the table of contents to pop up at
% the beginning of each subsection:
\AtBeginSubsection[]
{
\\begin{frame}<beamer>
\\frametitle{Outline}
\\tableofcontents[currentsection,currentsubsection]
\end{frame}
}
% If you wish to uncover everything in a step-wise fashion, uncomment
% the following command:
\\beamerdefaultoverlayspecification{<+->}
\\begin{document}
\\begin{frame}
\\titlepage
\end{frame}
\\begin{frame}
\\frametitle{Outline}
\\tableofcontents[pausesections]
% You might wish to add the option [pausesections]
\end{frame}
"""
self.bulletLevel = 0
Manager.__init__(self, content, fileOut)
def itemize(self,line):
nstars=line.content.count("*")
content=line.content.lstrip("|").lstrip().lstrip("*")
self.currentBLevel = nstars - content.count("*")
stuff=[]
if self.currentBLevel == self.bulletLevel and line.bullet:
mu='\\item '+line.content.lstrip("|").lstrip().lstrip("*")
elif line.bullet and self.currentBLevel > self.bulletLevel:
self.bulletLevel += 1
stuff.append("\\begin{itemize}\n")
mu='\\item '+line.content.lstrip("|").lstrip().lstrip("*")
elif self.currentBLevel < self.bulletLevel and line.bullet:
self.bulletLevel -= 1
stuff.append("\\end{itemize}\n")
mu='\\item '+line.content.lstrip("|").lstrip().lstrip("*")
elif self.currentBLevel < self.bulletLevel:
self.bulletLevel -= 1
stuff.append("\\end{itemize}\n")
mu=line.content.lstrip("|")
else:
panic()
return stuff,mu
def parse(self):
Manager.parse(self)
#print self.content
#print self.lines
#print self.level1
#for info in self.preamble:
# print info
# do my own preamble
field=("author ","instituteShort ","dateShort ","date ","subtitle ",
"title ", "institute ", "titleShort ")
pattern=["@"+token for token in field]
f=zip(field,pattern)
d={}
for field,pattern in f:
t=[line.content for line in self.preambleLines if line.content.count(pattern)]
if t:
d[field]= t[0].split(pattern)[1].strip()
else:
d[field]=""
preamble="\n\n\\author{%s}\n"%d['author ']
preamble+="\\institute[%s]{%s}\n"%(d['instituteShort '],d['institute '])
preamble+="\\title[%s]{%s}\n"%(d['titleShort '],d['title '])
preamble+="\\subtitle{%s}\n"%(d['subtitle '])
preamble+="\\date[%s]{%s}\n"%(d['dateShort '],d['date '])
print self.preamble
self.preamble=preamble
body=[]
prev=0
frameOpen=0
blockOpen=0
frameCount=0
blockCount=0
for line in self.documentLines:
if line.level==0:
for i in range(0,self.bulletLevel):
self.bulletLevel -= 1
body.append("\\end{itemize}\n")
if blockOpen:
body.append("\\end{block}")
blockOpen=0
if frameOpen:
body.append("\\end{frame}")
frameOpen=0
mu="\n\n\n\\section{%s}"%line.content.strip()
elif line.level==1:
for i in range(0,self.bulletLevel):
self.bulletLevel -= 1
body.append("\\end{itemize}\n")
if blockOpen:
body.append("\\end{block}")
blockOpen=0
if frameOpen:
body.append("\\end{frame}")
frameOpen=0
mu="\n\n\\subsection{%s}"%line.content.strip()
elif line.level==2:
# check if this frame has blocks or is nonblocked
if line.markup:
if line.bullet or self.bulletLevel:
stuff,mu=self.itemize(line)
if len(stuff) > 0:
for i in stuff:
body.append(i)
else:
mu=line.content.lstrip("|")
else:
for i in range(0,self.bulletLevel):
self.bulletLevel -= 1
body.append("\\end{itemize}\n")
if blockOpen:
body.append("\\end{block}")
blockOpen=0
if frameOpen:
body.append("\\end{frame}")
else:
frameOpen=1
# check for verbatim here
tmp=line.content.strip()
if tmp.count("@vb"):
tmp=tmp.split("@")[0]
mu="\n\n\\begin{frame}[containsverbatim]\n\t\\frametitle{%s}\n"%tmp
else:
mu="\n\n\\begin{frame}\n\t\\frametitle{%s}\n"%tmp
frameCount+=1
elif line.level==3:
# check if it is a block or body content
if line.markup:
if line.bullet or self.bulletLevel:
stuff,mu=self.itemize(line)
if len(stuff) > 0:
for i in stuff:
body.append(i)
else:
mu=line.content.lstrip("\t")
mu=mu.lstrip("|")
else:
for i in range(0,self.bulletLevel):
self.bulletLevel -= 1
body.append("\\end{itemize}\n")
#block title
if blockOpen:
body.append("\\end{block}")
else:
blockOpen=1
mu="\n\\begin{block}{%s}\n"%line.content.strip()
blockCount+=1
else:
mu=""
body.append(mu)
for i in range(0,self.bulletLevel):
self.bulletLevel -= 1
body.append("\\end{itemize}\n")
if blockOpen:
body.append("\\end{block}")
if frameOpen:
body.append("\\end{frame}")
self.body=" ".join(body)
self.markup=self.top1+self.preamble+self.top2
self.markup+=self.body
self.markup+="\n\\end{document}\n"
print self.markup
# Process command line arguments
args = sys.argv
nargs=len(args)
dispatch={}
dispatch['beamer']=Beamer
inputFileName=None
outputFileName=None
def printUsage():
print usage
sys.exit()
if nargs==1:
printUsage()
else:
docType='beamer'
options=args[1]
if options.count("-"):
if options.count("a"):
docType='article'
elif options.count("b"):
docType='book'
if nargs==2:
printUsage()
elif nargs==3:
inputFileName=args[2]
elif nargs==4:
inputFileName=args[2]
outputFileName=args[3]
else:
printUsage()
elif nargs==2:
inputFileName=args[1]
elif nargs==3:
inputFileName=args[1]
outputFileName=args[3]
else:
printUsage()
# Dispatch to correct document class manager
fin=open(inputFileName,'r')
content=fin.readlines()
fin.close()
dispatch[docType](content,outputFileName)
########NEW FILE########
__FILENAME__ = otl2ooimpress
#!/usr/bin/python2
# otl2ooimpress.py
# needs otl2ooimpress.sh to work in an automated way
#############################################################################
#
# Tool for Vim Outliner files to Open Office Impress files.
# Copyright (C) 2003 by Noel Henson, all rights reserved.
#
# This tool is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
#
#############################################################################
# ALPHA VERSION!!!
###########################################################################
# Basic function
#
# This program accepts VO outline files and converts them
# to the zipped XML files required by Open Office Impress.
#
# 10 outline levels are supported. These loosely correspond to the
# HTML H1 through H9 tags.
#
###########################################################################
# include whatever mdules we need
import sys
###########################################################################
# global variables
level = 0
inputFile = ""
outline = []
flatoutline = []
pageNumber = 0
inPage = 0
debug = 0
###########################################################################
# function definitions
# usage
# print the simplest form of help
# input: none
# output: simple command usage is printed on the console
def showUsage():
print
print "Usage:"
print "otl2ooimpress.py [options] inputfile > outputfile"
print ""
print "output is on STDOUT"
print
# getArgs
# Check for input arguments and set the necessary switches
# input: none
# output: possible console output for help, switch variables may be set
def getArgs():
global inputfile, debug
if (len(sys.argv) == 1):
showUsage()
sys.exit()()
else:
for i in range(len(sys.argv)):
if (i != 0):
if (sys.argv[i] == "-d"):
debug = 1 # test for debug flag
elif (sys.argv[i] == "-?"): # test for help flag
showUsage() # show the help
sys.exit() # exit
elif (sys.argv[i] == "--help"):
showUsage()
sys.exit()
elif (sys.argv[i] == "-h"):
showUsage()
sys.exit()
elif (sys.argv[i][0] == "-"):
print "Error! Unknown option. Aborting"
sys.exit()
else: # get the input file name
inputfile = sys.argv[i]
# getLineLevel
# get the level of the current line (count the number of tabs)
# input: linein - a single line that may or may not have tabs at the beginning
# output: returns a number 1 is the lowest
def getLineLevel(linein):
strstart = linein.lstrip() # find the start of text in line
x = linein.find(strstart) # find the text index in the line
n = linein.count("\t", 0, x) # count the tabs
return(n + 1) # return the count + 1 (for level)
# getLineTextLevel
# get the level of the current line (count the number of tabs)
# input: linein - a single line that may or may not have tabs at the beginning
# output: returns a number 1 is the lowest
def getLineTextLevel(linein):
strstart = linein.lstrip() # find the start of text in line
x = linein.find(strstart) # find the text index in the line
n = linein.count("\t", 0, x) # count the tabs
n = n + linein.count(" ", 0, x) # count the spaces
return(n + 1) # return the count + 1 (for level)
# colonStrip(line)
# stip a leading ':', if it exists
# input: line
# output: returns a string with a stipped ':'
def colonStrip(line):
if (line[0] == ":"):
return line[1:].lstrip()
else:
return line
# processLine
# process a single line
# input: linein - a single line that may or may not have tabs at the beginning
# level - an integer between 1 and 9 that show the current level
# (not to be confused with the level of the current line)
# output: through standard out
def processLine(linein):
global inPage, pageNumber
if (linein.lstrip() == ""):
print
return
if (getLineLevel(linein) == 1):
if (inPage == 1):
print '</draw:text-box></draw:page>'
inPage = 0
pageNumber += 1
outstring = '<draw:page draw:name="'
outstring += 'page'
outstring += str(pageNumber)
outstring += '" draw:style-name="dp1" draw:id="1" ' + \
'draw:master-page-name="Default" ' + \
'presentation:presentation-page-layout-name="AL1T0">'
print outstring
outstring = '<draw:text-box presentation:style-name="pr1" ' + \
'draw:layer="layout" svg:width="23.911cm" ' + \
'svg:height="3.508cm" svg:x="2.057cm" svg:y="1.0cm" ' + \
'presentation:class="title">'
print outstring
outstring = '<text:p text:style-name="P1">'
outstring += linein.lstrip()
outstring += "</text:p></draw:text-box>"
print outstring
outstring = '<draw:text-box presentation:style-name="pr1" ' + \
'draw:layer="layout" svg:width="23.911cm" ' + \
'svg:height="3.508cm" svg:x="2.057cm" svg:y="5.38cm" ' + \
'presentation:class="subtitle">'
print outstring
inPage = 1
else:
outstring = '<text:p text:style-name="P1">'
outstring += linein.lstrip()
outstring += '</text:p>'
print outstring
# flatten
# Flatten a subsection of an outline. The index passed is the outline section
# title. All sublevels that are only one level deeper are indcluded in the
# current subsection. Then there is a recursion for those items listed in the
# subsection. Exits when the next line to be processed is of the same or lower
# outline level.
# (lower means shallower)
# input: idx - the index into the outline. The indexed line is the title.
# output: adds reformatted lines to flatoutline[]
def flatten(idx):
if (outline[idx] == ""):
return
if (len(outline) <= idx):
return
titleline = outline[idx]
titlelevel = getLineLevel(titleline)
if (getLineLevel(outline[idx + 1]) > titlelevel):
if (titleline[titlelevel - 1] != " "):
flatoutline.append(titleline.lstrip())
exitflag = 0
while (exitflag == 0):
if (idx < len(outline) - 1):
idx = idx + 1
currlevel = getLineLevel(outline[idx])
if (currlevel == titlelevel + 1):
if (currlevel == outline[idx].find(" ") + 1):
flatoutline.append("\t " + outline[idx].lstrip())
else:
flatoutline.append("\t" + outline[idx].lstrip())
elif (currlevel <= titlelevel):
exitflag = 1
else:
exitflag = 1
return
def printHeader(linein):
print'''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE office:document-content PUBLIC
"-//OpenOffice.org//DTD OfficeDocument 1.0//EN"
"office.dtd">
<office:document-content xmlns:office="http://openoffice.org/2000/office"
xmlns:style="http://openoffice.org/2000/style"
xmlns:text="http://openoffice.org/2000/text"
xmlns:table="http://openoffice.org/2000/table"
xmlns:draw="http://openoffice.org/2000/drawing"
xmlns:fo="http://www.w3.org/1999/XSL/Format"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:number="http://openoffice.org/2000/datastyle"
xmlns:presentation="http://openoffice.org/2000/presentation"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:chart="http://openoffice.org/2000/chart"
xmlns:dr3d="http://openoffice.org/2000/dr3d"
xmlns:math="http://www.w3.org/1998/Math/MathML"
xmlns:form="http://openoffice.org/2000/form"
xmlns:script="http://openoffice.org/2000/script"
office:class="presentation" office:version="1.0">
<office:script/>
<office:body>'''
def printFooter():
print '</draw:text-box></draw:page>'
print'</office:body>'
def main():
getArgs()
file = open(inputFile, "r")
linein = file.readline().strip()
outline.append(linein)
linein = file.readline().strip()
while linein != "":
outline.append("\t" + linein)
linein = file.readline().rstrip()
for i in range(0, len(outline) - 1):
flatten(i)
printHeader(flatoutline[0])
for i in range(0, len(flatoutline)):
processLine(flatoutline[i])
printFooter()
file.close()
main()
########NEW FILE########
__FILENAME__ = otl2table
#!/usr/bin/python2
# otl2table.py
# convert a tab-formatted outline from VIM to tab-delimited table
#
# Copyright (c) 2004 Noel Henson All rights reserved
#
# ALPHA VERSION!!!
###########################################################################
# Basic function
#
# This program accepts text outline files and converts them
# the tab-delimited text tables.
# This:
# Test
# Dog
# Barks
# Howls
# Cat
# Meows
# Yowls
# Becomes this:
# Test Dog Barks
# Test Dog Howls
# Test Cat Meows
# Test Cat Yowls
#
# This will make searching for groups of data and report generation easier.
#
###########################################################################
# include whatever mdules we need
import sys
from string import *
#from time import *
###########################################################################
# global variables
level = 0
inputFile = ""
formatMode = "tab"
noTrailing = 0
columns = []
###########################################################################
# function definitions
# usage
# print the simplest form of help
# input: none
# output: simple command usage is printed on the console
def showUsage():
print
print "Usage:"
print "otl2table.py [options] inputfile > outputfile"
print "Options"
print " -n Don't include trailing columns."
print " -t type Specify field separator type."
print " Types:"
print " tab - separate fields with tabs (default)"
print " csv - separate fields with ,"
print " qcsv - separate fields with \",\""
print " bullets - uses HTML tags <ul> and <li>"
print "output is on STDOUT"
print
# getArgs
# Check for input arguments and set the necessary switches
# input: none
# output: possible console output for help, switch variables may be set
def getArgs():
global inputfile, debug, noTrailing, formatMode
if (len(sys.argv) == 1):
showUsage()
sys.exit()()
else:
for i in range(len(sys.argv)):
if (i != 0):
if (sys.argv[i] == "-d"): debug = 1 # test for debug flag
if (sys.argv[i] == "-n"): noTrailing = 1 # test for noTrailing flag
elif (sys.argv[i] == "-?"): # test for help flag
showUsage() # show the help
sys.exit() # exit
elif (sys.argv[i] == "--help"):
showUsage()
sys.exit()
elif (sys.argv[i] == "-h"):
showUsage()
sys.exit()
elif (sys.argv[i] == "-t"): # test for the type flag
formatMode = sys.argv[i+1] # get the type
i = i + 1 # increment the pointer
elif (sys.argv[i][0] == "-"):
print "Error! Unknown option. Aborting"
sys.exit()
else: # get the input file name
inputfile = sys.argv[i]
# getLineLevel
# get the level of the current line (count the number of tabs)
# input: linein - a single line that may or may not have tabs at the beginning
# output: returns a number 1 is the lowest
def getLineLevel(linein):
strstart = lstrip(linein) # find the start of text in line
x = find(linein,strstart) # find the text index in the line
n = count(linein,"\t",0,x) # count the tabs
return(n+1) # return the count + 1 (for level)
# getLineTextLevel
# get the level of the current line (count the number of tabs)
# input: linein - a single line that may or may not have tabs at the beginning
# output: returns a number 1 is the lowest
def getLineTextLevel(linein):
strstart = lstrip(linein) # find the start of text in line
x = find(linein,strstart) # find the text index in the line
n = count(linein,"\t",0,x) # count the tabs
n = n + count(linein," ",0,x) # count the spaces
return(n+1) # return the count + 1 (for level)
# closeLevels
# print the assembled line
# input: columns - an array of 10 lines (for 10 levels)
# level - an integer between 1 and 9 that show the current level
# (not to be confused with the level of the current line)
# noTrailing - don't print trailing, empty columns
# output: through standard out
def closeLevels():
global level,columns,noTrailing,formatMode
if noTrailing == 1 :
colcount = level
else:
colcount = 10
if formatMode == "tab":
for i in range(1,colcount+1):
print columns[i] + "\t",
print
elif formatMode == "csv":
output = ""
for i in range(1,colcount):
output = output + columns[i] + ","
output = output + columns[colcount]
print output
elif formatMode == "qcsv":
output = "\""
for i in range(1,colcount):
output = output + columns[i] + "\",\""
output = output + columns[colcount] + "\""
print output
for i in range(level+1,10):
columns[i] = ""
# processLine
# process a single line
# input: linein - a single line that may or may not have tabs at the beginning
# format - a string indicating the mode to use for formatting
# level - an integer between 1 and 9 that show the current level
# (not to be confused with the level of the current line)
# output: through standard out
def processLine(linein):
global level, noTrailing, columns
if (lstrip(linein) == ""): return
lineLevel = getLineLevel(linein)
if (lineLevel > level):
columns[lineLevel] = lstrip(rstrip(linein))
level = lineLevel
elif (lineLevel == level):
closeLevels()
columns[lineLevel] = lstrip(rstrip(linein))
else:
closeLevels()
level = lineLevel
columns[lineLevel] = lstrip(rstrip(linein))
def main():
global columns
getArgs()
file = open(inputfile,"r")
for i in range(11):
columns.append("")
linein = lstrip(rstrip(file.readline()))
while linein != "":
processLine(linein)
linein = file.readline()
closeLevels()
file.close()
main()
########NEW FILE########
__FILENAME__ = otl2tags
#!/usr/bin/python2
# otl2tags.py
# Convert an OTL file to any tags-based file using config user-
# definable configuration files. HTML, OPML, XML, LATEX and
# many, many others should be easily supportables.
#
# Copyright (c) 2005-2010 Noel Henson All rights reserved
###########################################################################
# Basic function
#
# This program accepts text outline files in Vim Outliners .otl format
# and converts them to a tags-based equivalent
###########################################################################
# include whatever mdules we need
import sys
from ConfigParser import ConfigParser
import re
###########################################################################
# global variables
config = ConfigParser() # configuration
linecount = 0 # outline size in lines
parents = [] # parent stack, (linenum, enum) enum is an order numer
v = {} # variable dictionary for substitution
outline = [] # line tuples (value, indent)
output = [] # output outline
escapeDict = {} # dictionary of character escape codes
debug = 0
inputfile = ""
###########################################################################
# arugment, help and debug functions
# usage
# print debug statements
# input: string
# output: string printed to standard out
def dprint(*vals):
global debug
if debug != 0:
print >> sys.stderr, vals
# usage
# print the simplest form of help
# input: none
# output: simple command usage is printed on the console
def showUsage():
print """
Usage:
otl2table.py [options] inputfile
Options
-c config-file
-d debug
--help show help
output filenames are based on the input file name and the config file
"""
# getArgs
# Check for input arguments and set the necessary switches
# input: none
# output: possible console output for help, switch variables may be set
def getArgs():
global inputfile, debug, noTrailing, formatMode, config
if (len(sys.argv) == 1):
showUsage()
sys.exit()()
else:
for i in range(len(sys.argv)):
if (i != 0):
if (sys.argv[i] == "-c"): # test for the type flag
config.read(sys.argv[i + 1]) # read the config
i = i + 1 # increment the pointer
elif (sys.argv[i] == "-d"):
debug = 1
elif (sys.argv[i] == "-?"): # test for help flag
showUsage() # show the help
sys.exit() # exit
elif (sys.argv[i] == "--help"):
showUsage()
sys.exit()
elif (sys.argv[i] == "-h"):
showUsage()
sys.exit()
elif (sys.argv[i][0] == "-"):
print "Error! Unknown option. Aborting"
sys.exit()
else: # get the input file name
inputfile = sys.argv[i]
# printConfig
# Debugging routine to print the parsed configuration file
# input: none
# output: configuration data printed to console
def printConfig():
global config
print >> sys.stderr, "Config ---------------------------------------------"
list = config.sections()
for i in range(len(list)):
print >> sys.stderr
print >> sys.stderr, list[i]
for x in config.options(list[i]):
if (x != "name") and (x != "__name__"):
print >> sys.stderr, x, ":", config.get(list[i], x)
print >> sys.stderr, "----------------------------------------------------"
print >> sys.stderr
###########################################################################
# low-level outline processing functions
# indentLevel
# get the level of the line specified by linenum
# input: line
# output: returns the level number, 1 is the lowest
def indentLevel(line):
strstart = line.lstrip() # find the start of text in line
x = line.find(strstart) # find the text index in the line
n = line.count("\t", 0, x) # count the tabs
n = n + line.count(" ", 0, x) # count the spaces
return(n + 1) # return the count + 1 (for level)
# stripMarker
# return a line without its marker and leading and trailing whitespace
# input: line, marker
# output: stripped line
def stripMarker(line, marker):
return line.lstrip(marker).strip()
# getLineType
# return the type of the line specified by linenum
# input: line
# output: returns text, usertext, table, preftext, etc.
def getLineType(line):
if (line[0] == ':'):
return 'text'
elif (line[0] == ';'):
return 'preftext'
elif (line[0] == '>'):
return 'usertext'
elif (line[0] == '<'):
return 'userpreftext'
elif (line[0] == '|'):
return 'table'
elif (line[0] == '-'):
return 'bulletheading'
elif (line[0] == '+'):
return 'numberheading'
# elif (line[0] == '['):
# return 'checkboxheading'
elif (line[0] == ''):
return 'blank'
else:
return 'heading'
# getChildren
# return a list of line numbers for children of the passed line number
# input: linenum
# output: a (possibly) empty list of children
def getChildren(linenum):
global outline, linecount
children = []
mylevel = outline[linenum][1]
childlevel = mylevel + 1
linenum = linenum + 1
while (linenum < linecount) and (outline[linenum][1] > mylevel):
if (outline[linenum][1] == childlevel):
children.append(linenum)
linenum = linenum + 1
return children
# subTags
# substitute variables in output expressions
# input: section - section from config
# input: type - object type (to look up in config)
# input: - substitution item (by name) from config array
# output: string - the substitution expression with variables inserted
def subTags(section, type):
global config, v, parents
varlist = v.keys()
pattern = config.get(section, type)
if len(parents) > 0:
v["%p"] = str(parents[len(parents) - 1])
for var in varlist:
x = ""
x = var
y = ""
y = v.get(var)
pattern = re.sub(x, y, pattern)
return pattern
#getBlock
#return a list of lines that match a mark (like : or ;)
#input: line number
#output: list of stripped lines
def getBlock(linenum, marker):
global outline, linecount
lines = []
line = outline[linenum][0]
while line[0] == marker:
lines.append(stripMarker(line, marker))
linenum = linenum + 1
if linenum == linecount:
break
line = outline[linenum][0]
return lines
#getUnstrippedBlock
#return a list of lines that match a mark (like : or ;)
#input: line number
#output: list of stripped lines
def getUnstrippedBlock(linenum, marker):
global outline, linecount
lines = []
line = outline[linenum][0]
while line[0] == marker:
lines.append(line)
linenum = linenum + 1
if linenum == linecount:
break
line = outline[linenum][0]
return lines
###########################################################################
# embedded object processing functions
# buildEscapes
# construct the dictionary for escaping special characters
# intput: config:escapes
# output: filled escapes dictionary
def buildEscapes():
escapes = config.get("Document", "escapes")
if len(escapes):
list = escapes.split(" ")
for pair in list:
key, value = pair.split(",")
escapeDict[key] = value
# charEscape
# escape special characters
# input: line
# output: modified line
def charEscape(line):
return "".join(escapeDict.get(c, c) for c in line)
# getURL
# if there is a url, [url text], return the extracted link, url and value
# input: line
# output: link, url, text
def getURL(line):
tags = []
for tag in line.split("]"):
tags.append(tag.split("["))
for tag in tags:
if len(tag) > 1 and re.search(" ", tag[1]):
link = tag[1]
url, text = link.split(" ", 1)
link = "[" + tag[1] + "]"
return link, url, text
# return link.group(0), url, text
# else:
# return None, None, None
return None, None, None
def handleURL(line):
link, url, text = getURL(line)
if link is None:
return re.replace(line, "[url]", "")
v["%u"] = url
v["%v"] = text
text = subTags("URLs", "url")
line = re.replace(line, link, text)
url = subTags("URLs", "url-attr")
line = re.replace(line, "[url]", url)
return line
###########################################################################
# outline header processing functions
# all outline object processors accept and output the following:
# input: linenum, enum
# output: print the output for each object
def handleHeading(linenum, enum):
global outline, parents
line = outline[linenum][0]
# url handling
# extract url data from line
# replace url object in line
# subTags line
# replace url attribute marker
v["%%"] = line
v["%l"] = str(outline[linenum][1])
v["%n"] = str(linenum)
v["%c"] = str(enum)
children = getChildren(linenum)
if enum == 1:
output.append(subTags("Headings", "before-headings"))
if children:
output.append(subTags("Headings", "branch-heading"))
parents.append([linenum, enum])
handleObjects(children)
parents.pop()
output.append(subTags("Headings", "after-headings"))
else:
output.append(subTags("Headings", "leaf-heading"))
def handleBulleted(linenum, enum):
global outline, parents
v["%%"] = outline[linenum][0]
v["%l"] = str(outline[linenum][1])
v["%n"] = str(linenum)
v["%c"] = str(enum)
children = getChildren(linenum)
if enum == 1:
output.append(subTags("Headings", "before-bulleted-headings"))
if children:
output.append(subTags("Headings", "bulleted-branch-heading"))
parents.append([linenum, enum])
handleObjects(children)
parents.pop()
output.append(subTags("Headings", "after-bulleted-headings"))
else:
output.append(subTags("Headings", "bulleted-leaf-heading"))
def handleNumbered(linenum, enum):
global outline, parents
v["%%"] = outline[linenum][0]
v["%l"] = str(outline[linenum][1])
v["%n"] = str(linenum)
v["%c"] = str(enum)
children = getChildren(linenum)
if enum == 1:
output.append(subTags("Headings", "before-numbered-headings"))
if children:
output.append(subTags("Headings", "numbered-branch-heading"))
parents.append([linenum, enum])
handleObjects(children)
parents.pop()
output.append(subTags("Headings", "after-numbered-headings"))
else:
output.append(subTags("Headings", "numbered-leaf-heading"))
###########################################################################
# outline text block processing functions
# all outline object processors accept and output the following:
# input: linenum, enum
# output: print the output for each object
def handleText(linenum, enum):
global outline, parents
if enum != 1:
return # only execute for first call
v["%l"] = str(outline[linenum][1])
v["%n"] = str(linenum)
v["%c"] = str(enum)
list = getBlock(linenum, ':')
output.append(subTags("Text", "before"))
lines = ""
for line in list:
if line == "":
lines = lines + config.get("Text", "paragraph-sep")
else:
lines = lines + line + config.get("Text", "line-sep")
v["%%"] = lines
output.append(subTags("Text", "text"))
output.append(subTags("Text", "after"))
def handleUserText(linenum, enum):
global outline, parents
if enum != 1:
return # only execute for first call
v["%l"] = str(outline[linenum][1])
v["%n"] = str(linenum)
v["%c"] = str(enum)
list = getBlock(linenum, '>')
output.append(subTags("UserText", "before"))
lines = ""
for line in list:
if line == "":
lines = lines + config.get("UserText", "paragraph-sep")
else:
lines = lines + line + config.get("UserText", "line-sep")
v["%%"] = lines.strip() # remove a possible extra separator
output.append(subTags("UserText", "text"))
output.append(subTags("UserText", "after"))
def handlePrefText(linenum, enum):
global outline, parents
if enum != 1:
return # only execute for first call
v["%l"] = str(outline[linenum][1])
v["%n"] = str(linenum)
v["%c"] = str(enum)
list = getBlock(linenum, ';')
output.append(subTags("PrefText", "before"))
lines = ""
for line in list:
if line == "":
lines = lines + config.get("PrefText", "paragraph-sep")
else:
lines = lines + line + config.get("PrefText", "line-sep")
v["%%"] = lines.strip() # remove a possible extra separator
output.append(subTags("PrefText", "text"))
output.append(subTags("PrefText", "after"))
def handleUserPrefText(linenum, enum):
global outline, parents
if enum != 1:
return # only execute for first call
v["%l"] = str(outline[linenum][1])
v["%n"] = str(linenum)
v["%c"] = str(enum)
list = getBlock(linenum, '<')
output.append(subTags("UserPrefText", "before"))
lines = ""
for line in list:
if line == "":
lines = lines + config.get("UserPrefText", "paragraph-sep")
else:
lines = lines + line + config.get("UserPrefText", "line-sep")
v["%%"] = lines.strip() # remove a possible extra separator
output.append(subTags("UserPrefText", "text"))
output.append(subTags("UserPrefText", "after"))
###########################################################################
# outline table processing functions
# isAlignRight
# return flag
# input: col, a string
def isAlignRight(col):
l = len(col)
if (col[0:2] == " ") and (col[l - 2:l] != " "):
return 1
else:
return 0
# isAlignLeft
# return flag
# input: col, a string
def isAlignLeft(col):
l = len(col)
if (col[0:2] != " ") and (col[l - 2:l] == " "):
return 1
else:
return 0
# isAlignCenter
# return flag
# input: col, a string
def isAlignCenter(col):
l = len(col)
if (col[0:2] == " ") and (col[l - 2:l] == " "):
return 1
else:
return 0
# handleHeaderRow
# process a non-header table row
# input: row
# output: print the output for each object
def handleHeaderRow(row):
global outline, parents
row = row.rstrip("|").lstrip("|")
columns = row.split("|")
output.append(subTags("Tables", "before-table-header"))
for col in columns:
v["%%"] = col.strip()
if isAlignCenter:
output.append(subTags("Tables", "table-header-column-center"))
elif isAlignCenter:
output.append(subTags("Tables", "table-header-column-center"))
elif isAlignCenter:
output.append(subTags("Tables", "table-header-column-center"))
else:
output.append(subTags("Tables", "table-header-column"))
output.append(subTags("Tables", "after-table-header"))
# handleRow
# process a non-header table row
# input: row
# output: print the output for each object
def handleRow(row):
global outline, parents
if row[0:2] == "||":
handleHeaderRow(row)
return
row = row.rstrip("|").lstrip("|")
columns = row.split("|")
output.append(subTags("Tables", "before-table-row"))
for col in columns:
v["%%"] = col.strip()
if isAlignCenter:
output.append(subTags("Tables", "table-column-center"))
elif isAlignLeft:
output.append(subTags("Tables", "table-column-left"))
elif isAlignRight:
output.append(subTags("Tables", "table-column-right"))
else:
output.append(subTags("Tables", "table-column"))
output.append(subTags("Tables", "after-table-row"))
# handleTable
# process a table
# input: linenum, enum
# output: print the output for each object
def handleTable(linenum, enum):
global outline, parents
if enum != 1:
return # only execute for first call
v["%l"] = str(outline[linenum][1])
v["%n"] = str(linenum)
v["%c"] = str(enum)
list = getUnstrippedBlock(linenum, '|')
output.append(subTags("Tables", "before"))
for row in list:
handleRow(row)
output.append(subTags("Tables", "after"))
###########################################################################
# outline wrapper processing functions
# addPreamble
# create the 'header' for the output document
# input: globals
# output: standard out
def addPreamble():
global outline, v
v["%%"] = ""
output.append(subTags("Document", "preamble"))
# addPostamble
# create the 'header' for the output document
# input: globals
# output: standard out
def addPostamble():
global outline, v
v["%%"] = ""
output.append(subTags("Document", "postamble"))
###########################################################################
# outline tree fuctions
# handleObject
# take an object and invoke the appropriate fuction to precess it
# input: linenum, enum (enum is the child order number of a parent)
# output: print the output of a object
def handleObject(linenum, enum):
global outline, linecount
obj = getLineType(outline[linenum][0])
if obj == 'heading':
handleHeading(linenum, enum)
elif obj == 'bulled':
handleBulleted(linenum, enum)
elif obj == 'numbered':
handleNumbered(linenum, enum)
elif obj == 'text':
handleText(linenum, enum)
elif obj == 'usertext':
handleUserText(linenum, enum)
elif obj == 'preftext':
handlePrefText(linenum, enum)
elif obj == 'userpreftext':
handleUserPrefText(linenum, enum)
elif obj == 'table':
handleTable(linenum, enum)
else:
print
print "Error: unknown line type @ ", linenum
sys.exit(1)
# handleObjects
# take an object list and invoke the appropriate fuctions to precess it
# input: linenum
# output: print the output of a object
def handleObjects(objs):
for i in range(len(objs)):
handleObject(objs[i], i + 1)
###########################################################################
# file functions
# readFile
# read the selected file into lines[]
# input: filename to be loaded
# output: a loaded-up lines[]
def readFile(inputfile):
global outline, linecount, config
file = open(inputfile, "r")
linein = file.readline()
while linein != "":
indent = indentLevel(linein)
line = charEscape(linein.strip())
outline.append([line, indent])
linein = file.readline()
file.close
outline[0][1] = 0 # set the first line to level 0
linecount = len(outline)
###########################################################################
# Main Program Loop
def main():
global outline, inputfile, linecount
# get the arguments
getArgs()
# constuct the escapes dictionary
buildEscapes()
# read the input file
readFile(inputfile)
# get the title
v["%t"] = outline[0][0].strip()
# construct the initial data
# parsing headings, text and tables
# but not parsing links or images
addPreamble()
if config.get("Document", "first-is-node") == "true":
objs = [0]
else:
objs = getChildren(0)
handleObjects(objs)
addPostamble()
# handle embeded objects
# parsing and constructing links, images and other embedded objects
for i in range(len(output)):
output[i] = handleURL(output[i])
# output the final data
for line in output:
if line.strip() != "":
print line.strip()
main()
########NEW FILE########
__FILENAME__ = otlgrep
#!/usr/bin/python2
# otlgrep.py
# grep an outline for a regex and return the branch with all the leaves.
#
# Copyright 2005 Noel Henson All rights reserved
###########################################################################
# Basic function
#
# This program searches an outline file for a branch that contains
# a line matching the regex argument. The parent headings (branches)
# and the children (sub-branches and leaves) of the matching headings
# are returned.
#
# Examples
#
# Using this outline:
#
# Pets
# Indoor
# Cats
# Sophia
# Hillary
# Rats
# Finley
# Oliver
# Dogs
# Kirby
# Outdoor
# Dogs
# Kirby
# Hoover
# Goats
# Primrose
# Joey
#
# a grep for Sophia returns:
#
# Indoor
# Cats
# Sophia
#
# a grep for Dogs returns:
#
# Indoor
# Dogs
# Kirby
# Hoover
# Outdoor
# Dogs
# Kirby
# Hoover
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
###########################################################################
# include whatever mdules we need
import sys
import re
###########################################################################
# global variables
debug = 0
ignorecase = 0
pattern = ""
inputfiles = []
###########################################################################
# function definitions# usage
#
# print debug statements
# input: string
# output: string printed to standard out
def dprint(*vals):
global debug
if debug != 0:
print vals
# usage
# print the simplest form of help
# input: none
# output: simple command usage is printed on the console
def showUsage():
print """
Usage:
otlgrep.py [options] pattern [file...]
Options
-i Ignore case
--help Show help.
[file...] is zero or more files to search. Wildcards are supported.
if no file is specified, input is expected on stdin.
output is on STDOUT
"""
# getArgs
# Check for input arguments and set the necessary switches
# input: none
# output: possible console output for help, switch variables may be set
def getArgs():
global debug, pattern, inputfiles, ignorecase
if (len(sys.argv) == 1):
showUsage()
sys.exit()()
else:
for i in range(len(sys.argv)):
if (i != 0):
if (sys.argv[i] == "-d"):
debug = 1 # test for debug flag
elif (sys.argv[i] == "-i"):
ignorecase = 1 # test for debug flag
elif (sys.argv[i] == "-?"): # test for help flag
showUsage() # show the help
sys.exit() # exit
elif (sys.argv[i] == "--help"):
showUsage()
sys.exit()
elif (sys.argv[i][0] == "-"):
print "Error! Unknown option. Aborting"
sys.exit()
else: # get the input file name
if (pattern == ""):
pattern = sys.argv[i]
else:
inputfiles.append(sys.argv[i])
# getLineLevel
# get the level of the current line (count the number of tabs)
# input: linein - a single line that may or may not have tabs at the beginning
# output: returns a number 1 is the lowest
def getLineLevel(linein):
strstart = linein.lstrip() # find the start of text in line
x = linein.find(strstart) # find the text index in the line
n = linein.count("\t", 0, x) # count the tabs
return(n) # return the count + 1 (for level)
# processFile
# split an outline file
# input: file - the filehandle of the file we are splitting
# output: output files
def processFile(file):
global debug, pattern, ignorecase
parents = []
parentprinted = []
for i in range(10):
parents.append("")
parentprinted.append(0)
matchlevel = 0
line = file.readline() # read the outline title
# and discard it
line = file.readline() # read the first parent heading
while (line != ""):
level = getLineLevel(line)
parents[level] = line
parentprinted[level] = 0
if (ignorecase == 1):
linesearch = re.search(pattern, line.strip(), re.I)
else:
linesearch = re.search(pattern, line.strip())
if (linesearch is not None):
matchlevel = level
for i in range(level): # print my ancestors
if (parentprinted[i] == 0):
print parents[i][:-1]
parentprinted[i] = 1
print parents[level][:-1] # print myself
line = file.readline()
while (line != "") and (getLineLevel(line) > matchlevel):
print line[:-1]
line = file.readline()
else:
line = file.readline()
# main
# split an outline
# input: args and input file
# output: output files
def main():
global inputfiles, debug
getArgs()
if (len(inputfiles) == 0):
processFile(sys.stdin)
else:
for i in range(len(inputfiles)):
file = open(inputfiles[i], "r")
processFile(file)
file.close()
main()
########NEW FILE########
__FILENAME__ = otlsplit
#!/usr/bin/python2
# otlslit.py
# split an outline into several files.
#
# Copyright 2005 Noel Henson All rights reserved
###########################################################################
# Basic function
#
# This program accepts text outline files and splits them into
# several smaller files. The output file names are produced from the
# heading names of the parents.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
###########################################################################
# include whatever mdules we need
import sys
import re
###########################################################################
# global variables
debug = 0
subdir = ""
level = 1
title = 0
inputfile = ""
###########################################################################
# function definitions# usage
#
# print debug statements
# input: string
# output: string printed to standard out
def dprint(*vals):
global debug
if debug != 0:
print vals
# usage
# print the simplest form of help
# input: none
# output: simple command usage is printed on the console
def showUsage():
print """
Usage:
otlsplit.py [options] inputfile
Options
-l level The number of levels to split down to. The default is 1
-D dir Specifiy a target directory for the output files
-t Include a title line (the parerent heading) in split files
-h Show help.
output is on STDOUT
"""
# getArgs
# Check for input arguments and set the necessary switches
# input: none
# output: possible console output for help, switch variables may be set
def getArgs():
global debug, level, inputfile, title, subdir
if (len(sys.argv) == 1):
showUsage()
sys.exit()()
else:
for i in range(len(sys.argv)):
if (i != 0):
if (sys.argv[i] == "-d"):
debug = 1 # test for debug flag
elif (sys.argv[i] == "-?"): # test for help flag
showUsage() # show the help
sys.exit() # exit
elif (sys.argv[i] == "-l"): # test for the level flag
level = int(sys.argv[i + 1]) # get the level
i = i + 1 # increment the pointer
elif (sys.argv[i] == "-D"): # test for the subdir flag
subdir = sys.argv[i + 1] # get the subdir
i = i + 1 # increment the pointer
elif (sys.argv[i] == "-t"):
title = 1 # test for title flag
elif (sys.argv[i] == "--help"):
showUsage()
sys.exit()
elif (sys.argv[i] == "-h"):
showUsage()
sys.exit()
elif (sys.argv[i][0] == "-"):
print "Error! Unknown option. Aborting"
sys.exit()
else: # get the input file name
inputfile = sys.argv[i]
# getLineLevel
# get the level of the current line (count the number of tabs)
# input: linein - a single line that may or may not have tabs at the beginning
# output: returns a number 1 is the lowest
def getLineLevel(linein):
strstart = linein.lstrip() # find the start of text in line
x = linein.find(strstart) # find the text index in the line
n = linein.count("\t", 0, x) # count the tabs
return(n + 1) # return the count + 1 (for level)
# convertSensitiveChars
# get the level of the current line (count the number of tabs)
# input: line - a single line that may or may not have tabs at the beginning
# output: returns a string
def convertSensitiveChars(line):
line = re.sub('\W', '_', line.strip())
return(line)
# makeFileName
# make a file name from the string array provided
# input: line - a single line that may or may not have tabs at the beginning
# output: returns a string
def makeFileName(nameParts):
global debug, level, subdir
filename = ""
for i in range(level):
filename = filename + convertSensitiveChars(nameParts[i]).strip() + "-"
filename = filename[:-1] + ".otl"
if subdir != "":
filename = subdir + "/" + filename
return(filename.lower())
# processFile
# split an outline file
# input: file - the filehandle of the file we are splitting
# output: output files
def processFile(ifile):
global debug, level, title
nameparts = []
for i in range(10):
nameparts.append("")
outOpen = 0
line = ifile.readline() # read the outline title
# and discard it
line = ifile.readline() # read the first parent heading
dprint(level)
while (line != ""):
linelevel = getLineLevel(line)
if (linelevel < level):
if outOpen == 1:
ifile.close()
outOpen = 0
nameparts[linelevel] = line
dprint(level, linelevel, line)
else:
if outOpen == 0:
ofile = open(makeFileName(nameparts), "w")
outOpen = 1
if title == 1:
dprint("title:", title)
ofile.write(nameparts[level - 1])
ofile.write(line[level:])
line = file.readline()
# main
# split an outline
# input: args and input file
# output: output files
def main():
global inputfile, debug
getArgs()
file = open(inputfile, "r")
processFile(file)
file.close()
main()
########NEW FILE########
__FILENAME__ = freemind
#!/usr/bin/python2
'''
usage:
freemind.py -o [fmt] <files>, where ofmt selects output format: {otl,mm}
freemind.py -o otl <files>:
Read in an freemind XML .mm file and generate a outline file
compatable with vim-outliner.
freemind.py -o mm <files>:
Read in an otl file and generate an XML mind map viewable in freemind
NOTE:
Make sure that you check that round trip on your file works.
Author: Julian Ryde
'''
import sys
import getopt
import codecs
import otl
import xml.etree.ElementTree as et
from xml.etree.ElementTree import XMLParser
debug = False
class Outline: # The target object of the parser
depth = -1
indent = '\t'
current_tag = None
def start(self, tag, attrib): # Called for each opening tag.
self.depth += 1
self.current_tag = tag
# print the indented heading
if tag == 'node' and self.depth > 1:
#if 'tab' in attrib['TEXT']:
#import pdb; pdb.set_trace()
print (self.depth - 2) * self.indent + attrib['TEXT']
def end(self, tag): # Called for each closing tag.
self.depth -= 1
self.current_tag = None
def data(self, data):
if self.current_tag == 'p':
bodyline = data.rstrip('\r\n')
bodyindent = (self.depth - 5) * self.indent + ": "
#textlines = textwrap.wrap(bodytext, width=77-len(bodyindent),
# break_on_hyphens=False)
#for line in textlines:
print bodyindent + bodyline
def close(self): # Called when all data has been parsed.
pass
def mm2otl(*arg, **kwarg):
fname = arg[0][0]
file = codecs.open(fname, 'r', encoding='utf-8')
filelines = file.readlines()
outline = Outline()
parser = XMLParser(target=outline, encoding='utf-8')
parser.feed(filelines[0].encode('utf-8'))
parser.close()
# TODO body text with manual breaks
# TODO commandline arguments for depth, maxlength etc.
# TODO do not read whole file into memory?
# TODO handle decreasing indent by more than one tab
# TODO handle body text lines sometimes not ending with space
depth = 99
def attach_note(node, textlines):
et.ElementTree
# Format should look like
#<richcontent TYPE="NOTE">
#<html>
# <head> </head>
# <body>
# %s
# </body>
#</html>
#</richcontent>
notenode = et.SubElement(node, 'richcontent')
notenode.set('TYPE', 'NOTE')
htmlnode = et.SubElement(notenode, 'html')
bodynode = et.SubElement(htmlnode, 'body')
for line in textlines:
pnode = et.SubElement(bodynode, 'p')
pnode.text = line
def otl2mm(*arg, **kwarg):
fname = arg[0][0]
# node ID should be based on the line number of line in the
# otl file for easier debugging
#for lineno, line in enumerate(open(fname)):
# enumerate starts at 0 I want to start at 1
# FIXME freemind.py|107| W806 local variable 'lineno' is assigned to but never used
lineno = 0
mapnode = et.Element('map')
mapnode.set('version', '0.9.0')
topnode = et.SubElement(mapnode, 'node')
topnode.set('TEXT', fname)
parents = [mapnode, topnode]
#left_side = True # POSITION="right"
# read otl file into memory
filelines = codecs.open(fname, 'r', encoding='utf-8')
# first handle the body texts turn it into a list of headings
# with associated body text for each one this is because the
# body text especially multi-line is what makes it awkward.
headings = []
bodytexts = []
for line in filelines:
if otl.is_heading(line):
headings.append(line)
bodytexts.append([])
else:
# TODO this ': ' removal should go in otl.py?
bodytexts[-1].append(line.lstrip()[2:] + '\n')
#import pdb; pdb.set_trace()
oldheading = ''
for heading, bodytext in zip(headings, bodytexts):
if debug:
print heading, bodytext
level = otl.level(heading)
oldlevel = otl.level(oldheading)
if level == oldlevel:
pass
elif level > oldlevel:
# about to go down in the hierarchy so add this line
# as a parent to the stack
# FIXME freemind.py|149| W802 undefined name 'node'
parents.append(node)
elif level < oldlevel:
# about to go up in the hierarchy so remove parents from the stack
leveldiff = oldlevel - level
parents = parents[:-leveldiff]
node = et.SubElement(parents[-1], 'node')
node.set('TEXT', heading.lstrip().rstrip('\r\n'))
#if len(bodytext) > 0:
attach_note(node, bodytext)
oldheading = heading
xmltree = et.ElementTree(mapnode)
xmltree.write(sys.stdout, 'utf-8')
print
def usage():
print "usage: %s -[mo] <files>" % (sys.argv[0])
def main():
args = sys.argv
try:
opts, args = getopt.getopt(sys.argv[1:], 'moh', [""])
except getopt.GetoptError, err:
usage()
print str(err)
sys.exit(2)
for o, a in opts:
if o == "-m":
otl2mm(args)
elif o == "-o":
mm2otl(args)
elif o == "-h":
usage()
sys.exit(0)
else:
usage()
assert False, "unhandled option: %s" % o
return args
if __name__ == "__main__":
main()
# vim: set noet :
########NEW FILE########
__FILENAME__ = freemind_outline
#!/usr/bin/python2
'''Converts a freemind xml .mm file to an outline file compatable with vim
outliner.
Make sure that you check that round trip on your file works.
Author: Julian Ryde
'''
import sys
from xml.etree.ElementTree import XMLParser
import textwrap
import codecs
class Outline: # The target object of the parser
depth = -1
indent = '\t'
current_tag = None
def start(self, tag, attrib): # Called for each opening tag.
self.depth += 1
self.current_tag = tag
# print the indented heading
if tag == 'node' and self.depth > 1:
#if 'tab' in attrib['TEXT']:
#import pdb; pdb.set_trace()
print (self.depth-2)*self.indent + attrib['TEXT']
def end(self, tag): # Called for each closing tag.
self.depth -= 1
self.current_tag = None
def data(self, data):
if self.current_tag == 'p':
bodyline = data.rstrip('\r\n')
bodyindent = (self.depth-5)*self.indent + ": "
#textlines = textwrap.wrap(bodytext, width=77-len(bodyindent), break_on_hyphens=False)
#for line in textlines:
print bodyindent + bodyline
def close(self): # Called when all data has been parsed.
pass
outline = Outline()
parser = XMLParser(target=outline, encoding='utf-8')
fname = sys.argv[1]
file = codecs.open(fname, 'r', encoding='utf-8')
filelines = file.readlines();
print "filelines", type(filelines[0]), filelines[0]
parser.feed(filelines[0].encode('utf-8'))
parser.close()
########NEW FILE########
__FILENAME__ = otl
# Some integer IDs
# headings are 1, 2, 3, ....
bodynowrap = -1 # ;
bodywrap = 0 # :
def level(line):
'''return the heading level 1 for top level and down and 0 for body text'''
if line.lstrip().find(':')==0: return bodywrap
if line.lstrip().find(';')==0: return bodynowrap
strstart = line.lstrip() # find the start of text in lin
x = line.find(strstart) # find the text index in the line
n = line.count("\t",0,x) # count the tabs
return(n+1) # return the count + 1 (for level)
def is_bodywrap(line):
return level(line) == bodywrap
def is_bodynowrap(line):
return level(line) == bodynowrap
def is_heading(line):
return level(line) > 0
def is_body(line):
return not is_heading(line)
########NEW FILE########
__FILENAME__ = outline_freemind
#!/usr/bin/python2
'''Read in an otl file and generate an xml mind map viewable in freemind
Make sure that you check that round trip on your file works.
Author: Julian Ryde
'''
import sys
import os
import xml.etree.ElementTree as et
import otl
import codecs
fname = sys.argv[1]
max_length = 40
depth = 99
debug = False
# TODO body text with manual breaks
# TODO commandline arguments for depth, maxlength etc.
# TODO do not read whole file into memory?
# TODO handle decreasing indent by more than one tab
# TODO handle body text lines sometimes not ending with space
otlfile = open(fname)
indent = ' '
def attach_note(node, textlines):
et.ElementTree
# Format should look like
#<richcontent TYPE="NOTE">
#<html>
# <head> </head>
# <body>
# %s
# </body>
#</html>
#</richcontent>
notenode = et.SubElement(node, 'richcontent')
notenode.set('TYPE', 'NOTE')
htmlnode = et.SubElement(notenode, 'html')
headnode = et.SubElement(htmlnode, 'head')
bodynode = et.SubElement(htmlnode, 'body')
for line in textlines:
pnode = et.SubElement(bodynode, 'p')
pnode.text = line
# node ID should be based on the line number of line in the otl file for easier
# debugging
#for lineno, line in enumerate(open(fname)):
# enumerate starts at 0 I want to start at 1
lineno = 0
mapnode = et.Element('map')
mapnode.set('version', '0.9.0')
topnode = et.SubElement(mapnode, 'node')
topnode.set('TEXT', fname)
parents = [mapnode, topnode]
#left_side = True # POSITION="right"
# read otl file into memory
filelines = codecs.open(fname, 'r', encoding='utf-8')
# remove those that are too deep or body text and pesky end of line characters
#filelines = [line.rstrip('\r\n') for line in filelines if otl.level(line) <= depth]
#filelines = [line for line in filelines if otl.is_heading(line)]
# first handle the body texts turn it into a list of headings with associated
# body text for each one this is because the body text especially multi-line is
# what makes it awkward.
headings = []
bodytexts = []
for line in filelines:
if otl.is_heading(line):
headings.append(line)
bodytexts.append([])
else:
# TODO this ': ' removal should go in otl.py?
bodytexts[-1].append(line.lstrip()[2:] + '\n')
#import pdb; pdb.set_trace()
oldheading = ''
for heading, bodytext in zip(headings, bodytexts):
if debug: print heading, bodytext
level = otl.level(heading)
oldlevel = otl.level(oldheading)
if level == oldlevel:
pass
elif level > oldlevel:
# about to go down in the hierarchy so add this line as a parent to the
# stack
parents.append(node)
elif level < oldlevel:
# about to go up in the hierarchy so remove parents from the stack
leveldiff = oldlevel - level
parents = parents[:-leveldiff]
node = et.SubElement(parents[-1], 'node')
node.set('TEXT', heading.lstrip().rstrip('\r\n'))
#if len(bodytext) > 0:
attach_note(node, bodytext)
oldheading = heading
xmltree = et.ElementTree(mapnode)
xmltree.write(sys.stdout, 'utf-8')
print
########NEW FILE########
| true
|
85feb2fc630fa3598bfd15a3debf5839b3f87571
|
Python
|
koallen/cz4071-project-1
|
/graph_analyzer/plot.py
|
UTF-8
| 5,477
| 2.671875
| 3
|
[] |
no_license
|
import sys
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import networkx as nx
import graph
import pandas as pd
import numpy as np
import os
import pickle
import math
def plot_curve(data, x_label, y_label, title, save_as, log=False, h_line=None, v_line=None):
x = list(data.keys())
y = list(data.values())
if log:
# Remove zeros for log-log plots
for k in x:
if k == 0 or data[k] == 0:
del data[k]
x = [math.log(i) for i in data.keys()]
y = [math.log(i) for i in data.values()]
plt.scatter(x, y, s=10)
if h_line:
if log:
h_line = math.log(h_line)
plt.axhline(h_line, color='r')
if v_line:
if log:
v_line = math.log(v_line)
plt.axvline(v_line, color='r')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.savefig(save_as)
plt.show()
def plot_heatmap(graph, pos, hubs, data, save_as):
dataframe = pd.DataFrame(data, columns=['value'])
dataframe.apply(lambda x: ((x - np.mean(x)) / (np.max(x) - np.min(x)))*225)
dataframe = dataframe.reindex(graph.nodes())
# Providing a continuous color scale with cmap
node_size = []
for i in (graph.nodes()):
if i not in hubs:
node_size.append(0.6)
else:
# enlarge hub size
node_size.append(5)
opts = {
"node_color":dataframe['value'],
'node_size': node_size, #0.6,
'with_labels': False,
"pos":pos,
"cmap":plt.cm.plasma
}
nodes = nx.draw_networkx_nodes(graph, **opts)
nodes.set_norm(mcolors.SymLogNorm(linthresh=0.01, linscale=1))
edges = nx.draw_networkx_edges(graph, pos, width=0.05)
plt.colorbar(nodes)
plt.axis('off')
plt.savefig(save_as)
plt.show()
def plot_gragh(graph, save_dir):
pos = nx.random_layout(graph)
options = {
'pos': pos,
'node_color': 'black',
'node_size': 0.7,
'width': 0.05,
}
nx.draw(graph, **options)
plt.savefig(os.path.join(save_dir, 'graph.png'))
plt.show()
return pos
def draw_properties(graph, pos, hubs, degrees, save_dir):
with open(os.path.join(save_dir, "properties.pkl"), "rb") as f:
property_info_dict = pickle.load(f)
degree_corr = property_info_dict["degree_correlation"]
degree_distribution = property_info_dict["degree_distribution"]
clustering_coef = property_info_dict["clustering_coef"]
plot_curve(clustering_coef, "log(k)", "log(C(k))", "Clustering Coefficient",
save_as=os.path.join(save_dir, "clustering_coef.png"),
log=True,
h_line=property_info_dict["avg_clustering_coef"])
plot_curve(degree_corr, "log(k)", "log(knn)", "Degree Correlation",
save_as=os.path.join(save_dir, "degree_corr.png"),
log=True)
plot_curve(degree_distribution, "log(k)", "log(P(k))", "Degree Distribution",
save_as=os.path.join(save_dir, "degree_distribution.png"),
log=True,
v_line=property_info_dict["avg_degree"])
bc_values = property_info_dict["bc_values"]
cc_values = property_info_dict["closeness"]
bc_degree = {}
cc_degree = {}
for i in range(len(degrees)):
k = degrees[i]
if cc_values[i] > 5000:
continue
if k not in bc_degree:
bc_degree[k] = [bc_values[i]]
else:
bc_degree[k].append(bc_values[i])
if k not in cc_degree:
cc_degree[k] = [cc_values[i]]
else:
cc_degree[k].append(cc_values[i])
for k in bc_degree.keys():
bc_degree[k] = sum(bc_degree[k])/float(len(bc_degree[k]))
cc_degree[k] = sum(cc_degree[k])/float(len(cc_degree[k]))
plot_curve(bc_degree, "log(k)", "log(bc)", "Betweenness v.s. Degree",
log=True,
save_as=os.path.join(save_dir, "bc_degree.png"))
plot_curve(cc_degree, "log(k)", "log(cc)", "Closeness v.s. Degree",
log=True,
save_as=os.path.join(save_dir, "cc_degree.png"))
bc_cc = {}
for i in range(len(degrees)):
if cc_values[i] > 5000:
continue
bc_cc[bc_values[i]] = cc_values[i]
plot_curve(bc_cc, "log(bc)", "log(cc)", "Betweenness v.s. Closeness",
log=True,
save_as=(os.path.join(save_dir, "bc_cc.png")))
plot_heatmap(graph, pos, hubs, bc_values,
save_as=os.path.join(save_dir, 'betweenness.png'))
plot_heatmap(graph, pos, hubs, cc_values,
save_as=os.path.join(save_dir,'closeness.png'))
if __name__ == "__main__":
if len(sys.argv) < 4:
print("Usage: python plot.py /path/to/graph /path/to/analysis/result <k>")
exit()
k = int(sys.argv[3])
plt.rcParams["figure.figsize"] = (11, 7)
nx_graph = nx.Graph()
own_graph = graph.Graph(sys.argv[1])
degrees = own_graph.get_degrees()
hubs = []
matplotlib.rcParams.update({'font.size': 20})
for v in own_graph.get_vertices():
if degrees[v] > k:
hubs.append(v)
for w in own_graph.neighbor_of(v):
nx_graph.add_edge(v, w)
result_dir = sys.argv[2]
if nx_graph.nodes():
pos = plot_gragh(nx_graph, result_dir)
draw_properties(nx_graph, pos, hubs, degrees, result_dir)
else:
print("There is no node satisfying your degree threshold.")
| true
|
807fc970073592ee0b15c5a8bb9d350d3c1c3976
|
Python
|
arinanda/huffman-code-implementation-webapps
|
/storage/compressor/huffman/adaptive_huffman/compress.py
|
UTF-8
| 1,008
| 2.75
| 3
|
[] |
no_license
|
from common import util
from huffman.adaptive_huffman.adaptive_huffman import *
def encode(text):
encoded_text = str()
root = None
null = Node('null', 0)
node_list = dict()
for char in text:
if char in node_list:
encoded_text += get_code(node_list[char])
node_list[char].value += 1
else:
encoded_text += get_code(null)
encoded_text += '{0:08b}'.format(ord(char))
node_list[char] = insert_node(null, char)
if root is None:
root = null.parent
update_tree(null.parent)
return encoded_text
def save(b, filename):
with open(filename, 'wb') as output:
output.write(b)
def compress(filename):
text = util.load_file_as_text(filename)
encoded_text = encode(text)
encoded_bytes = util.to_byte_array(encoded_text)
output = util.get_output_filename(filename)
save(encoded_bytes, output)
print(util.get_compression_ratio(encoded_bytes, text))
| true
|
9087fc34a07550700b110fb6a52c0b74c3956d2e
|
Python
|
icavrak/SmartHomeSim
|
/PricingProfile_Dual.py
|
UTF-8
| 797
| 3.0625
| 3
|
[] |
no_license
|
import datetime
from PricingProfile import PricingProfile
class PricingProfile_Dual(PricingProfile):
def __init__(self):
self.low_price = 1.0
self.high_price = 2.0
def __init__(self, init_arguments):
arg_list = init_arguments.split(", ")
self.low_price = float(arg_list[0])
self.high_price = float(arg_list[1])
self.low_to_high = datetime.datetime.strptime("07:00:00", "%H:%M:%S").time()
self.high_to_low = datetime.datetime.strptime("21:00:00", "%H:%M:%S").time()
def getCurrentPrice(self, time):
current_time = time.time()
if current_time < self.low_to_high:
return self.low_price
if current_time > self.high_to_low:
return self.low_price
return self.high_price
| true
|
4a8bb6e21e4ec579a42f10de91942d08bb8509ad
|
Python
|
jugal13/Python_Lab
|
/Python Lab/Programs/Program4a.py
|
UTF-8
| 146
| 3.625
| 4
|
[] |
no_license
|
def Initials(name):
return ''.join(list(map(lambda x:x[0],name.split())))
name=input("Enter full name: ")
print("Initials are: "+Initials(name))
| true
|
45a4ea8f7b8b928083cb873e565b513850f9f38b
|
Python
|
minhntm/algorithm-in-python
|
/datastructures/tree/challenge2_find_kth_maximum.py
|
UTF-8
| 933
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
"""
Problem Statement:
- Implement a function findKthMax(root,k) which will take a BST and any number
“k” as an input and return kth maximum number from that tree.
Output:
- Returns kth maximum value from the given tree
Sample Input:
bst = {
6 -> 4,9
4 -> 2,5
9 -> 8,12
12 -> 10,14
}
where parent -> leftChild,rightChild
k = 3
Sample Output:
- 10
"""
from Node import Node
from BinarySearchTree import BinarySearchTree
def findKthMax(root, k):
inorder_tree = list(inorder_traverse(root))
return inorder_tree[-k]
def inorder_traverse(root):
if root.leftChild:
yield from inorder_traverse(root.leftChild)
yield root.val
if root.rightChild:
yield from inorder_traverse(root.rightChild)
if __name__ == "__main__":
BST = BinarySearchTree(6)
BST.insert(1)
BST.insert(133)
BST.insert(12)
print(findKthMax(BST.root, 3))
| true
|
5e415c04f92fd542da428d890bdf4dcaf70355da
|
Python
|
JordanRussell3030/Farm_Simulation
|
/cow_class.py
|
UTF-8
| 352
| 3.53125
| 4
|
[] |
no_license
|
from animal_class import *
class Cow(Animal):
def __init__(self):
super().__init__(1, 5, 4)
self._type = "Cow"
def grow(self, food, water):
if food >= self._food_need and water >= self._water_need:
self._weight += self._growth_rate
self._days_growing += 1
self._update_status()
| true
|
81e9a86ede649ff1a479f6061896b33b07c6a140
|
Python
|
DouglasMartins1999/WorldHunting
|
/database/Words.py
|
UTF-8
| 11,848
| 2.875
| 3
|
[] |
no_license
|
countries = []
countries.append({
"name": "França",
"categories": {
"easy": [
["Paris", "Capital francesa"],
["Euro", "Moeda em circulação na França"],
["Torre Eiffel", "Popular torre erguida na capital do país"],
["Louvre", "Museu mais popular"],
["Carrefour", "Rede de supermercados multinacional francesa"],
["Fraternidade", "Lema da Revolução Francesa: Liberdade, Igualdade e _______"],
["Guiana Francesa", "Território ultramarino na América do Sul"],
],
"medium": [
["Champs Elysee", "Popular e movimentada avenida da capital francesa"],
["Versalhes", "Berço da Revolução, cidade criada artificalmente em 1682"],
["Bonaparte", "Sobrenome do Popular Imperador Revolucionista"],
["Entente", "Triplice na qual os franceses se aliaram na I Guerra Mundial"],
["Renault", "Multinacional francesa fabricante de automóveis"],
["Triunfo", "Arco do _______, monumento na capital representando as vitórias de Napoleão"],
["Joana", "____ d’Arc, heroína e santa francesa"],
["Descartes", "Filósofo: \"Penso, logo existo\""],
["Petit Gateau", "Sobremesa composta de pequeno bolo de chocolate"],
["Macaron", "Doce arredondado em formato de biscoito"],
],
"hard": [
["Francos", "Nome como era conhecido o povo frances na antiguidade"],
["Cem Anos", "A Guerra dos _______ foi um conflito travado pela França e Inglaterra entre 1337 e 1453"],
["Luis XIV", "\"Rei Sol\", luxuoso, governou a França de 1643 a 1715"],
["Marseillaise", "La __________, hino nacional francês"],
["Bastilha", "A tomada da ________ representou o inicio da revolução"],
["Galia", "Nome pela qual a região da frança era conhecida no império Romano"],
["Monet", "Popular pintor impressionista: Claude ______"],
["Rousseau", "Jean-Jacques ________, importante filósofo"]
]
}
})
countries.append({
"name": "China",
"categories": {
"easy": [
["Grande Muralha", "A China possui a maior construção do mundo, a _______ ________"],
["Ideografia", "Utiliza a ___________ para representar palavras e conceitos abstratos"],
["Poluicao", "Segundo país com mais _________ no mundo"],
["Pequim","______, a enorme capital da China"],
["Suina", "A carne _______ é a mais consumida entre Chineses"],
["Asia", "A China é o maior país da _____ Oriental"],
["Exercito", "O maior número de soldados do _______ encontra-se na China"]
],
"medium": [
["Economia","Possui a maior ________ do mundo"],
["Comunista","O País é governado pelo partido ________"],
["Xi Jinping", "__ ______ é como se chama o presidente deste País"],
["Wang", "O nome mais popular na China é _____"],
["Taiwan", "_____ é a Reública da China"],
["Terremoto", "O ______ mais mortal da história aconteceu em 1556 e matou 830 mil pessoas"],
["Oceano","Quase um terço da poluição do ar de San Francisco, vem da China pelo ______"],
["Eletricidade", "A China gera cerca de '70%' da sua ______ em usinas de carvão"],
["Censura", "A China tem uma das políticas de ______ mais rígidas do mundo"],
["Morcego", "O _____ é um símbolo da sorte no país"],
],
"hard": [
["Pandas", "Todos os ______ do mundo são emprestados da China"],
["Mukden", "Em 1931, os japoneses invadiram o território chinês, evento conhecido como Incidente ______"],
["Guerra Civil", "A ____ _____ entre comunistas e nacionalistas foi parcialmente interrompida em 1930"],
["Nacionalistas", "A China era ocupada por diferentes nações, como Inglaterra e França, o que motivou movimentos ________"],
["BRICS", "O país é membro do grupo _____ de economias emergentes importantes"],
["Reforma Economica", "Com a ____ ______ chinesa, milhões de trabalhadores rurais do país se mudaram para as grandes cidades"],
["Carceraria", "O país também tem a segunda maior população ______ do planeta (atrás apenas dos Estados Unidos)"],
["Filho unico", "Preocupada com seu crescimento populacional, tentou implementar uma política de planejamento familiar, chamada: política do ____ _____"]
]
}
})
countries.append({
"name": "África do Sul",
"background": "",
"categories": {
"easy": [
["Africano", "A África do Sul é um país situado na extremidade sul do continente _______"],
["Nobel", "A África do Sul tem 10 prêmios _____"],
["Sul", "A África do Sul é um país que fica localizado no extremo ___ do continente africano"],
["Nelson Mandela", "Ganhador do Prêmio Nobel e ex-presidente"],
["Apartheid", "Regime de segregação racial"],
["Leis", "As ____ de trânsito são parecidas com as brasileiras"],
["Inglês", "Idioma oficial"]
],
"medium": [
["Joanesburgo", "A maior cidade"],
["Cidade do Cabo", "Dentre suas capitais estão: _______, Pretória e Bloemfontein"],
["Capoeira", "Cultura brasileira herdada dos africanos."],
["Atlantico", "Oceano que separa o continente americano do continente africano."],
["Africano", "Continente"],
["futebol", "Principais esportes do país: Rugby, Crinquete, Surf e _____"],
["Segregacao", "Mesmo após o fim do regime apartheid, a _________ racial ainda é visível neste país"],
["Casamentos", "Durante o apartheid, uma das leis era: proibição de ___________ entre brancos e negros"]
],
"hard": [
["Mao inglesa", "Regime de trânsito vigente no País, em que se trafega na mão esquerda"],
["Economica", "País considerado a maior força _______ do continente africano."],
["Desertificacao", "Fenômeno que ocorre devido ao mau uso do solo e que vem aumentando áreas de desertos na África."],
["Nilo", "Principal rio africano"],
["República", "Nome completo da África do Sul: ______ da África do Sul"],
["Brasil", "O clima do país é semelhante ao clima do sul do _____"],
["Secas", "São um grande problema para a agricultura no sul da África"],
["Lenha", "A fonte de energia mais consumida na África para fins domésticos e manufatureiros"],
["Copa", "Décima nona edição da ____ do mundo, ocorreu de 11 de junho até 11 de julho, sedido neste país."],
["Nobel", "A literatura do país conta com vários escritores renomeados, onde alguns deles já receberam homenagens do Prêmio _____ de Literatura"]
]
}
})
countries.append({
"name": "Austrália",
"background":"",
"categories": {
"easy": [
["Indico", "A Austrália é um país continental cercado pelos oceanos ______ e Pacífico"],
["Sydney", " O país é conhecido pela ______ Opera House"],
["Outback", "Deserto interior e rede de restaurante norte-americano"],
["Canguru", "_______ animal popular da Austrália"],
["Melbourne", "Segunda cidade mais populosa do país"],
["Quente", "Clima do país"]
],
"medium": [
["Ingles", "Idioma do país"],
["Desenvolvimento", "A Austrália é reconhecida por seu _______ econômico e qualidade de vida "],
["Alto", "O país possui ____ IDH"],
["Feminino", "A Austrália foi a primeira nação independente a permitir o voto _______"],
["Colonia", "A Austrália deixou de ser ______ inglesa em 1901"],
["Futebol", "A cultura do país também é destacada no _____ australiano - que exige muito condicionamento físico dos atletas"],
["Sudeste", "A Nova Zelândia localiza-se a _______ da Austrália"],
["Turismo", "Devido ao grande número de estudantes, o país é destaque no _______"],
],
"hard": [
["Aumentar", "Devido a baixa taxa de fecundidade no país, existe um grande incentivo do governo para ______ a população local"],
["Ouro", "A Austrália exporta carne, trigo, lã e minérios como bauxita, chumbo, níquel, manganês, _______ e prata"],
["Capricornio", "A Austrália é cortada pelo Trópico de _______."],
["Gratuito", "Há transportes no país que são muito eficientes, limpos e ______"],
["Camberra", "Capital da Austrália"],
["Igualdade", "Segundo país do mundo que olhou de forma crítica para a ________ de gênero "],
["Estudantes", "Terceiro país mais procurado por ________ estrangeiros"],
["Restrito", "Por conta do grande consumo de bebida alcoólica, seu consumo é muito ______ neste país"],
["Economia", "A Segunda Guerra Mundial contribuiu para grandes mudanças na _______ do país, o que resultou em sua melhoria financeira mais tarde"],
["Coala", "Espécie da fauna australiana, que habita no alto dos eucaliptos e se alimenta de suas folhas"]
]
}
})
countries.append({
"name": "Brasil",
"categories": {
"easy": [
["Cristo Redentor", "Uma das 7 maravilhas do mundo"],
["Salvador", "Primeira capital do país"],
["Independencia", "Comemorado em 7 de setembro"],
["Paulista", "Gentílico de São Paulo e principal avenida do estado"],
["Globo", "Maior conglomerado de mídia da américa latina"],
["Pele", "Conhecido como \"O rei do futebol\""],
["Isabel", "Princesa responsável pela abolição da escravidão"],
["Atlantico", "Oceano que banha o Brasil"],
],
"medium": [
["Marechal Deodoro", "Primeiro Presidente"],
["Manaus", "Local do maior polo industrial do país"],
["Embraer", "Fabricante nacional de aviões"],
["Feijoada", "Guisado de feijão muito popular"],
["Pedro I", "Primeiro Rei do Brasil"],
["Caatinga", "Bioma exclusivamente brasileiro, encontrado em boa parte do nordeste"],
["Niemeyer", "Oscar _______, renomado arquiteto modernista"],
["Ronaldinho", "Conhecido como \"O fenômeno\", futebolista que atuou como atacante"],
["Rondonia", "Estado cuja capital é Porto Velho"],
["Amazonas", "Estado que recebe o nome do maior rio do país"],
["Japoneses", "O Brasil abriga a maior côlonia de _______ fora do seu país"],
["Aparecida", "Padroeira do Brasil"],
["Planalto", "Palácio do _______, sede do poder executivo"],
["Bossa Nova", "Genero musical brasileiro, surgido ao fim dos anos 50"]
],
"hard": [
["ANTT", "Uma das agências reguladoras de transportes"],
["Machado de Assis", "Fundador da Academia Brasileira de Letras"],
["Portinari", "Pintor Brasileiro, autor de \"O Lavrador de Café\""],
["Guanabara", "Estado estinto em 1975, abrigava o antigo Distrito Federal"],
["Castelo Branco", "Primeiro presidente do regime militar brasileiro"],
["Paraguai", "Pais cujo Brasil, Argentina e Uruguai declaram guerra em 1864"],
["Tiradentes", "Mártir brasileiro, atuante na Inconfidência Mineira. Possui um feriado em sua homenagem."],
["Chui", "Cidade mais ao sul (meridional) do país"],
]
}
})
| true
|
83ad4939afc767f8886bacf66d68e931215e4bed
|
Python
|
anandi24/PythonGames
|
/src/BullsAndCows/BullsAndCows.py
|
UTF-8
| 1,604
| 3.90625
| 4
|
[] |
no_license
|
import pandas as pd
import numpy as np
import random
def main():
print("Firstly, input the length of the sequence to be stored in memory")
print("Secondly, keep guessing the number stored in memory. Note: Every guess should be of the same length sequence as declared in step 1")
print("Keep guessing till you get all the bulls right")
length = int(input('Input the length of sequence: '))
ans = generateSeq(length)
bullseye = False
count = 0
while(not bullseye):
try:
num = int(input('Enter your Guess of ' + str(length) + ' digit number:'))
numList = [int(i) for i in str(num)]
count +=1
#print(numList
if len(numList) != length:
bullseye = False
else:
bullseye = checkBullsEye(numList, ans)
except ValueError:
print("Sorry, I didn't understand that.")
# better try again... Return to the start of the loop
continue
print("Your guess count is : " + str(count))
print("You got it right!! Congratulations")
def checkBullsEye(num, ans):
bulls = 0
cows = 0
bullsEye = False
for i, a in zip(num, ans):
if i == a:
bulls +=1
elif i in ans:
cows +=1
print(str(bulls) + " bulls " + str(cows) + " cows")
if bulls == len(ans):
bullsEye = True
return bullsEye
def generateSeq(n):
num = random.sample(range(0, 9), n)
if num[0] == 0:
num = generateSeq(n)
#print(num
return num
if __name__ == '__main__':
main()
| true
|
ff09bfde71fa3640bcebf6a19a302bed9d372adf
|
Python
|
khanhvu11/make-appointments-skill
|
/__init__.py
|
UTF-8
| 10,599
| 2.5625
| 3
|
[] |
no_license
|
from mycroft import MycroftSkill, intent_handler
from adapt.intent import IntentBuilder
from mycroft.util.parse import extract_datetime
from mycroft.util.time import now_local, default_timezone
import caldav
from caldav.elements import dav
from datetime import datetime, timedelta
import json
import pytz
from icalendar import Calendar, Event
DEFAULT_TIME = now_local().replace(hour=8, minute=0, second=0)
class MyCalendar:
def __init__(self):
self.username = "dv029"
self.password = "dv029admin123"
self.url = "https://" + self.username + ":" + self.password + \
"@next.social-robot.info/nc/remote.php/dav"
self.saved = False
self.berlin = pytz.timezone('Europe/Berlin')
def getCalendars(self):
# auf Nextcloud-Kalender durch Url zugreifen
client = caldav.DAVClient(self.url)
principal = client.principal()
# alle vorhandene Kalender abholen
calendars = principal.calendars()
return calendars
def searchForAppointment(self, calendar):
""" Termin finden """
# Es gibt noch keinen gefundenen Termin
apmtNotExisted = True
# ab wann der Termine gesucht werden. Hier: ab der Benutzer nach nächsten Terminen fragt
startOfDay = datetime.now(self.berlin)
# Intervall des Suchens. Hier ist 1 Tag
nextDay = startOfDay + \
timedelta(days=1)
# Intervall wird in der Schleife geschoben bis Termine gefunden wird.
# wenn kein Termine gefunden wird
while(apmtNotExisted):
# Termin wird gesucht ab 'startOfDay' bis 'nextDay' (ein Intervall)
events = calendar.date_search(
start=startOfDay, end=nextDay)
# wenn Termine gefunden wird
if len(events) > 0:
# wenn Termine in einem Tag gefunden werden
# dann finde den ersten Termin des Tages mit Intervall von 30 Minuten
start = startOfDay
end = start + timedelta(hours=0.5)
while(apmtNotExisted):
event = calendar.date_search(start=start, end=end)
# wenn der erste Termin gefunden wird
if len(event) > 0:
# die Schleife aufhören und der Termin zurückgeben
apmtNotExisted = False
return event
# wenn nicht, erhöhen sich 'start' und 'end' 30 Minuten
start = end
end += timedelta(hours=0.5)
# wenn nicht, erhöhen sich 'startOfDay' und 'nextDay' 1 Tag
startOfDay = nextDay
nextDay += timedelta(days=1)
def getNextAppointmentDate(self):
"""Information von dem Termin bekommen"""
# Information des nächsten Termin
nextAppointment = {}
# Kalender holen
calendars = self.getCalendars()
if len(calendars) > 0:
# Erste Kalender auswählen
calendar = calendars[0]
# nächter Termin finden
event = self.searchForAppointment(calendar)
# caldav event zu ical event ändern
nextEvent = Calendar.from_ical(event[0]._data)
for component in nextEvent.walk():
if component.name == "VEVENT":
# Name des Termin speichern
nextAppointment.update(
{'Summary': component.get('summary')})
if component.get('discription') != None:
# Beschreibung des Termin speichern
nextAppointment.update(
{'Discription': component.get('discription')})
# Anfangdatum des Termin speichern
nextAppointment.update(
{'Start Date': component.get('dtstart').dt.strftime('%d/%m/%Y')})
# Anfangstunde des Termin speichern
nextAppointment.update(
{'Start Time': component.get('dtstart').dt.astimezone(self.berlin).strftime('%H:%M')})
# Enddatum des Termin speichern
nextAppointment.update(
{'End Date': component.get('dtend').dt.strftime('%d/%m/%Y')})
# Endstunde des Termin speichern
nextAppointment.update(
{'End Time': component.get('dtend').dt.astimezone(self.berlin).strftime('%H:%M')})
return nextAppointment
def saveAppointment(self, apmt, apmt_timedate):
"""Einen Kalendereintrag machen"""
cal = Calendar()
event = Event()
myCal = self.getCalendars()
# Information des Termin speichern
event.add('summary', apmt)
event.add('dtstart', apmt_timedate)
event.add('dtend', apmt_timedate + timedelta(hours=1))
# event in ical-Kalender erstellen
cal.add_component(event)
# termin in Nextcloud schreiben
myCal[0].save_event(cal)
self.saved = True
def eventExisted(self, dt):
"""einen bestimmten Termin suchen"""
calendars = self.getCalendars()
if len(calendars) > 0:
calendar = calendars[0]
# termin am bestimmten Tag
event = calendar.date_search(
start=dt, end=(dt + timedelta(minutes=5)))
return event
def deleteAppointment(self, dt):
"""einen Termin löschen"""
calendars = self.getCalendars()
if len(calendars) > 0:
calendar = calendars[0]
# termin am bestimmten Tag
event = calendar.date_search(
start=dt, end=(dt + timedelta(minutes=5)))
# termin am bestimmten Tag löschen
event[0].delete()
print(event[0], 'was deleted')
class MakeAppointments(MycroftSkill):
def __init__(self):
# objekt von Klasse 'myCalendar' erstellen
self.myCal = MyCalendar()
MycroftSkill.__init__(self)
@intent_handler('next.appointments.intent')
def handle_next_appointment(self, message):
"""Frage von dem Benutzer bekommen und akustisch sie beantworten"""
# näschter Termin gefunden durch die Funktion 'getNextAppointmentDate' von Objekt 'myCal'
nextAp = self.myCal.getNextAppointmentDate()
# Name des Termins
todo = nextAp['Summary']
# Datum des Termins
dateS = nextAp['Start Date']
# Uhrzeit des Termins
timeS = nextAp['Start Time']
# akustisch beantworten
self.speak_dialog(
'Your next appointment is on {} at {} and is entitled {}.'.format(dateS, timeS, todo))
@intent_handler('make.appointment.intent')
def add_new_appointment(self, msg=None):
""" Handler zum Hinzufügen eines Termins mit einem Namen zu einem bestimmten Zeitpunkt. """
# Name des Termins
appointment = msg.data.get('appointment', None)
# wenn kein Name da ist
if appointment is None:
# Rückmelden, dass kein Name gibt
return self.unnamed_appointment(msg)
# die Eingabe abholen
utterance = msg.data['utterance']
# Eine Datums- / Uhrzeitangabe wurde extrahiert
appointment_time, _ = (extract_datetime(utterance, now_local(),
self.lang,
default_time=DEFAULT_TIME) or
(None, None))
if appointment_time:
# den Kalendereintrag machen
self.myCal.saveAppointment(appointment, appointment_time)
if self.myCal.saved:
# bestätigen, dass den Eintrag gemacht wurde
self.speak_dialog('appointments.make')
else:
# wenn kein Datum gibt, rückmelden
self.speak_dialog('NoDate')
@intent_handler('unnamed.appointment.intent')
def unnamed_appointment(self, msg=None):
""" Behandelt den Fall, in dem eine Uhrzeit angegeben wurde, aber kein Terminname hinzugefügt wurde."""
# die Eingabe abholen
utterance = msg.data['utterance']
# Eine Datums- / Uhrzeitangabe wurde extrahiert
apmt_time, _ = (extract_datetime(utterance, now_local(),
self.lang,
default_time=DEFAULT_TIME) or
(None, None))
# nach den Terminname fragen
response = self.get_response('AppointmentName')
# wenn Terminname und Datum und Uhrzeit da sind
if response and apmt_time:
# den Kalendereintrag machen
self.myCal.saveAppointment(response, apmt_time)
if self.myCal.saved:
# bestätigen, dass den Eintrag gemacht wurde
self.speak_dialog('appointments.made')
@intent_handler('deleteAppointment.intent')
def remove_appointment(self, msg=None):
"""Entfernen Sie alle Termine für das angegebene Datum."""
# Eine Datums- / Uhrzeitangabe wurde extrahiert
if 'date' in msg.data:
date, _ = extract_datetime(msg.data['date'], lang=self.lang)
else:
date, _ = extract_datetime(msg.data['utterance'], lang=self.lang)
if date:
if date.time():
# schauen, ob der Termin am bestimmten Tag existiert
if self.myCal.eventExisted(date):
# wenn ja, nach der Bestätigung zum Löschen fragen
answer = self.ask_yesno(
'confirmDelete', data={'date': date.strftime("%d/%m/%Y %H:%M")})
if answer == 'yes':
# wenn die Antwort 'Ja' ist, den Entrag löschen
self.myCal.deleteAppointment(date)
self.speak_dialog('Your appointment on {} was removed.'.format(
date.strftime("%d/%m/%Y %H:%M")))
else:
# wenn kein Termin, rückmelden
self.speak_dialog('noAppointment', {
'date': date.strftime("%d/%m/%Y %H:%M")})
else:
# wenn kein Datum gibt, rückmelden
response = self.get_response('repeatDeleteDate')
if response:
self.remove_appointment(response)
def stop(self):
self.stop_beeping()
def shutdown(self):
pass
def create_skill():
return MakeAppointments()
| true
|
b965e247e6b792e94b6ee0cbd0c3a4257dedede1
|
Python
|
charankk21/KittyBank
|
/kittyTempWork.py
|
UTF-8
| 4,713
| 3.109375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 06:27:57 2018
@author: Charan
"""
import pandas as pd
transactions = pd.DataFrame()
customerProfile = pd.DataFrame()
def inititalizeTransaction_df():
global transactions
print('Initalizing the Kitty Bank')
dict2 = {'Custid':[101,102,103,104,105],
'Transactionno':[21,22,23,24,25],
'Date':['13/11/2018','13/11/2018','13/10/2018','10/11/2018','01/11/2018'],
'Txn_type':[0,1,0,1,0],
'Amount':[100000,10000,15000,5000,25000],
'Balance':[100000,10000,15000,5000,25000]
}
transactions=pd.DataFrame(data=dict2)
def initalizeCustomers_df():
global customerProfile
dict1 = {'Custid':[101,102,103,104,105,106],
'Name':['Swetha','Swecha','Susmita','Sitara','Samanvi','Shlok'],
'Age':[20,22,25,21,20,29],
'City':['Hyd','Bng','Che','Kol','Coa','Goa'],
'Gender':['F','F','F','F','F','M']
}
customerProfile=pd.DataFrame(data=dict1)
def displayCustomerProfile():
print('displayCustomerProfile')
customerId = input('please enter your customerId :')
print(customerId)
for x in range(0,len(customerProfile)):
if(customerProfile.iloc[x].loc['Custid']==int(customerId)):
print('Customer Name is : {} and his balance is : {}'.format(customerProfile.iloc[x].loc['Name'],transactions.loc[transactions.Balance].loc['Balance']))
#Not requried to update in customer Details with amount
def acceptDeposit1():
print('AcceptDeposit')
customerId = int(input('please enter your customerId :'))
amount =int(input('please enter your amount :'))
customerProfile.loc[customerProfile['Custid']==customerId ,'Balance'] = customerProfile.loc[customerProfile['Custid']==customerId ,'Balance']+ amount
print(customerProfile.loc[customerProfile['Custid']==customerId ,'Balance'])
def acceptDeposit():
print('AcceptDeposit')
customerId = int(input('please enter your customerId :'))
amount =int(input('please enter your amount :'))
txno = max(transactions.Transactionno)+1
previousbalance = transactions.loc[(transactions.Custid == customerId) & (transactions.Txn_type == 0),'Amount'].sum() - transactions.loc[(transactions.Custid == customerId) & (transactions.Txn_type == 1),'Amount'].sum()
transactions.loc[max(transactions.index)+1] = [customerId,txno,'14/11/2018',0,amount,previousbalance+amount]
def addNewCustomer():
print('Adding new customers')
#Get Customer Name
customerName = input('Please provide customer Name :')
while(customerName.isalpha() == False):
print('Please provide Valid Customer Name')
customerName = input('Please provide customer Name :')
#Get Age
age = input('Please provide your age: ')
while(age.isdigit() == False):
print('Please provide Valid Age')
age = input('Please provide your age: ')
#Get Gender
Gender = input('Please provide your gender as M for Male or F for Female: ')
while((Gender == 'M' or Gender == 'F') == False):
print('Gender can take M for Male or F for Female, Please provide Valid Gender')
Gender = input('Please provide your gender as M for Male or F for Female: ')
city = input('Please provide your city: ')
#Get City
while(city.isalpha() == False):
print('Please provide Valid City Name')
city = input('Please provide your city: ')
#Generate New customer Id
newCustomerId = max(customerProfile.Custid)+1
customerProfile.loc[max(customerProfile.index)+1] = [newCustomerId,customerName,int(age),city,Gender]
def customerPassbook():
print('printing customerPassbook')
txDetails =transactions.loc[transactions.Custid == 105]
print(txDetails.iloc[:,1:].to_string(index=False))
def withdrawMoney():
print('withdrawMoney')
customerId = int(input('please enter your customerId :'))
amount =int(input('please enter your amount to withdraw :'))
txno = max(transactions.Transactionno)+1
previousbalance = transactions.loc[(transactions.Custid == customerId) & (transactions.Txn_type == 0),'Amount'].sum() - transactions.loc[(transactions.Custid == customerId) & (transactions.Txn_type == 1),'Amount'].sum()
if(previousbalance >= amount):
transactions.loc[max(transactions.index)+1] = [customerId,txno,'14/11/2018',1,amount,previousbalance-amount]
inititalizeTransaction_df()
initalizeCustomers_df()
addNewCustomer()
acceptDeposit()
withdrawMoney()
customerPassbook()
| true
|
b0e67e307f81ab4589084ec4bff6ff1c3dd2477c
|
Python
|
olavobacelar/spens-data-completion
|
/src/dataset.py
|
UTF-8
| 8,944
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
# Preparar os dados no __init__ para ser mais fácil depois.
#
import pandas as pd
import numpy as np
import numpy.random as rd
import random
from typing import Union
import torch
from dataclasses import dataclass, astuple
from torch.utils.data import Dataset, DataLoader
N_LETTERS = 26 # tirar daqui, precisa de estar também no modulo que chama. E não devia estar hardcoded
@dataclass(frozen=True)
class DatasetConfig:
'''To configure a Dataset, set those parameters below.
It's not exactly necessary, but is helpful to have the configuration
as a dataclass (Google Colab still doesn't type check anything though.)
'''
batch_size: int
dataset_mode: str
n_visible_letters: Union[None, int] = None
prob_choice: Union[None, float] = None
initial_seed: int = 48
def __post_init__(self):
# With Google Colab we can't easily use type hints, so we add this
if self.dataset_mode not in ['fixed', 'mixed', 'prob'] or not isinstance(self.batch_size, int):
raise ValueError
def __iter__(self):
return iter(astuple(self))
def print_training_repr(self):
output = f'Dataset Config: batch_size: {self.batch_size} • mode: '
if self.dataset_mode == 'fixed':
print(output + f'fixed • n_visible_letters: {self.n_visible_letters}')
elif self.dataset_mode == 'mixed':
print(output + f'mixed')
elif self.dataset_mode == 'prob':
print(output + f'prob • prob_choice: {self.prob_choice}')
class FontsDataset(Dataset):
# É ineficiente transformar em listas??? Tentar mudar para tensores mesmo?
def __init__(self, data, dataset_config):
# y is the (unmasked) output. We'll need to construct the masked input
# # Não uso o mode, que passava antes. Apagar?
_, dataset_mode, n_visible_letters, prob_choice, initial_seed = dataset_config
if dataset_mode not in ['fixed', 'mixed', 'prob']:
raise ValueError("the variable mode must be one of the following: "
"'fixed', 'mixed', 'prob'")
# Data input and self.letter_masks are expected to be a pandas dataframe
self.y = data.y.values.tolist()
# Cuidado com as masks, pode ser lista de Nones?
if self.y[0].shape[0] != N_LETTERS:
raise ValueError
self.n_fonts = len(self.y)
# Here we build a random number generator for each dataset, which adds to
# the ease of reproducilibity
self.init_rngs(initial_seed)
self.build_masked_input(dataset_config)
def __len__(self):
return self.n_fonts
def __getitem__(self, index):
# É mais rápido transformar em tensor e float aqui ou dentro da rotina das features?
return torch.tensor(self.x[index]), torch.tensor(self.y[index]), self.letter_masks[index]
def init_rngs(self, seed):
self.rng = random.Random(seed)
self.np_rng = rd.default_rng(seed)
def get_rngs_states(self):
return self.rng.getstate(), self.np_rng.bit_generator.state
def set_rngs_states(self, states):
self.rng.setstate(states[0])
self.np_rng.bit_generator.state = states[1]
def set_seed(self, seed):
# What exactly does this do?
# Different modules are used when the probability is used vs. when a fixed n_visible_letters is used
# therefore, when we call set_seed we set both seeds.
# Torch.manual_seed will set the seed for the data loader, fixing the order the different fonts are
# loaded in shuffle mode.
self.rng.seed(seed)
self.np_rng = rd.default_rng(seed)
def new_letter_masks_with_prob(self, n_fonts, prob_choice):
# prob_choice is the probability of a certain character NOT being masked
assert isinstance(n_fonts, int) and isinstance(prob_choice, float)
letter_masks = self.random_choice_at_least_one(n_fonts, prob_choice)
letter_masks = list(letter_masks.astype(bool))
return letter_masks
def new_letter_masks_fixed(self, n_fonts, n_visible_letters):
assert isinstance(n_fonts, int) and isinstance(n_visible_letters, int)
assert 1 <= n_visible_letters <= 25
letter_masks = np.concatenate([np.ones((n_fonts, n_visible_letters), dtype=bool),
np.zeros((n_fonts, N_LETTERS-n_visible_letters), dtype=bool)],
axis=1)
for one_font_of_letter_masks in letter_masks:
self.np_rng.shuffle(one_font_of_letter_masks)
return letter_masks
def new_letter_masks_mixed(self, n_fonts):
letter_masks = []
for i in range(n_fonts):
random_n_letters = self.rng.randint(1, N_LETTERS-1)
letter_mask = np.concatenate([np.ones(random_n_letters, dtype=bool),np.zeros(N_LETTERS - random_n_letters, dtype=bool)])
self.np_rng.shuffle(letter_mask)
letter_masks.append(letter_mask)
return letter_masks
def build_masked_input(self, dataset_config, another_seed=None, letter_masks=None):
'''Generate a masked input of the letters for each font, according to
the dataset_mode chosen. Each letter is masked by a list 'letter_masks',
which is generated by other functions. This masks are used to modify
the input, so that only the letters corresponding to a True value in the
mask are shown.
We can also pass another_seed that may be different from the one passed
in the beginning.
'''
# Describe possible modes in the doc string!
_, dataset_mode, n_visible_letters, prob_choice, _ = dataset_config
if dataset_mode not in ['fixed', 'mixed', 'prob']:
raise ValueError("the variable dataset_mode must be one of the following: "
"'fixed', 'mixed', 'prob'")
if another_seed is not None:
self.set_seed(another_seed)
# Generate letter_masks depending on the dataset_mode, or get them from a parameter
if letter_masks is None:
if dataset_mode == 'fixed':
if n_visible_letters is not None:
self.letter_masks = self.new_letter_masks_fixed(self.n_fonts, n_visible_letters)
self.n_visible_letters = n_visible_letters
else:
raise ValueError
elif dataset_mode == 'mixed':
self.letter_masks = self.new_letter_masks_mixed(self.n_fonts)
elif dataset_mode == 'prob':
if prob_choice is not None:
self.letter_masks = self.new_letter_masks_with_prob(self.n_fonts, prob_choice)
self.prob_choice = prob_choice
else:
raise ValueError
else:
self.letter_masks = letter_masks
if len(self.letter_masks) != self.n_fonts:
raise ValueError('The number of fonts and masks for them is not the same!')
# Generate a masked input, using the given letter_masks
self.x = [None]*self.n_fonts
for font_index in range(self.n_fonts):
self.x[font_index] = np.zeros((N_LETTERS, *LETTER_SHAPE), dtype=np.float32) + 0.5
for letter_index in range(N_LETTERS):
if self.letter_masks[font_index][letter_index] == True:
self.x[font_index][letter_index] = self.y[font_index][letter_index]
def random_choice_at_least_one(self, n_fonts, prob_choice):
'''Choose one letter per font randomly, and choose the others ones with
probability equal to prob_choice. Therefore, choosing prob_choice=0.0
will select only one letter per font. We do this because we want at
least one letter per font to be shown during training or testing.
'''
def choose_mask(i, random_letter, prob_choice):
if self.rng.random() < prob_choice or i == random_letter:
return True
else:
return False
letter_masks = []
for font in range(n_fonts):
random_letter = self.rng.randint(0, N_LETTERS-1)
letter_mask = np.array([choose_mask(i, random_letter, prob_choice) for i in range(N_LETTERS)])
letter_masks.append(letter_mask)
return np.array(letter_masks)
def get_letter_masks(self):
if self.letter_masks is not None:
return self.letter_masks
else:
raise Exception("No font masks were created yet!")
| true
|
4c8722eeaa5d5cb72991b78f702dac4183d5578c
|
Python
|
adityaskarnik/algorithms
|
/Bubble Sort/bubble_sort.py
|
UTF-8
| 644
| 4.3125
| 4
|
[] |
no_license
|
def bubbleSort(itemList):
moreSwaps = True
counter = 0
while moreSwaps:
counter = counter + 1
print("Iteration number",counter)
moreSwaps = False
for element in range(len(itemList)-1):
if itemList[element] > itemList[element+1]:
moreSwaps = True
temp = itemList[element]
itemList[element] = itemList[element+1]
itemList[element+1] = temp
return itemList
def testBubbleSort():
itemList = [5,2,7,1,9,3,6]
print("Input given", itemList)
sortedList = bubbleSort(itemList)
print(sortedList)
testBubbleSort()
| true
|
30e8ee351b8ebf2b38af2ef7a452eee5e8f4581c
|
Python
|
EduardoArgenti/Python
|
/CursoEmVideo/ex039.py
|
UTF-8
| 634
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
# Faça um programa que leia o ano de nascimento de um jovem e informe,
# de acordo com sua idade:
# - Se ele ainda vai se alistar ao serviço militar
# - Se é a hora de se alistar
# - Se já passou do tempo do alistamento
# - Seu programa também deverá mostrar o tempo que falta ou que
# passou do prazo
from datetime import datetime
ano = int(input('Ano de nascimento: '))
idade = datetime.now().year - ano
if idade < 18:
print('\nVocê ainda vai se alistar daqui {} anos!'.format(18 - idade))
elif idade > 18:
print('\nVocê chegou {} anos atrasado!'.format(idade - 18))
else:
print('\nJá é hora de se alistar!')
| true
|
4a4140adeaf59f48cf4dfb0f89ca1423e1f2310a
|
Python
|
nbanion/money
|
/money/category.py
|
UTF-8
| 8,029
| 3.828125
| 4
|
[] |
no_license
|
"""Utilities for categorizing transactions.
This module provides functionality for categorizing series__ of transactions
using regex-based categorization schemes and index-specific manual edits. It is
useful for categorizing transactions and for validating categorizations.
__ https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html
The module has the following high-level functions. These functions take series
and categorization instructions, and they return new series.
- :func:`money.category.categorize` applies a category to each item.
- :func:`money.category.count_candidates` counts candidate categories by item.
These high-level functions take a series of transaction descriptions, and they
use these transaction descriptions and their indices to assign categories.
The functions use two other inputs to assign cateogies. The first is a dict of
catergories. The keys in this dict are categories, and the values are lists of
regular expressions. If a description fully matches any of the regular
expressions, then it fits the category. An example category dict: ::
{"cat0": ["re0", ...], ...}
The second input is a dict of index-specific manual edits. The keys in this dict
are indices, and the values are categories to assign. For example: ::
{0: "cat0", ...}
In this example, we start by defining a series, categories, and edits. ::
import pandas as pd
import category
# Series of descriptions. Changing the index for demonstration.
series = pd.Series(["blah", "COFFEE 001", "stuff"],
index=[10, 11, 12])
# Categories with regular expressions.
categories = {"coffee": [r"COFFEE \\d+"]}
# Index-specific manual categorizations.
edits = {11: "misc", 12: "misc"}
Next, we apply categories to the series. Note how the second transaction
description is recognized as coffee, and the third transaction is hard coded as
miscellaneous. As the second transaction shows, the algorithm favors regex
matches over hard codes. ::
category.categorize(series, categories, edits=edits)
# 10 None
# 11 coffee
# 12 misc
Ideally, the algorithm shouldn't have to make a choice; each transaction should
have one and only one category. When we test this condition, we can see that
the first transaction has no categories, and the second transaction has two
categories. We might want to address these cases before analysis. ::
category.count_candidates(series, categories, edits=edits)
# 10 0
# 11 2
# 12 1
Each high-level function has a companion ``row_*`` function that applies an
algorithm to each value in the series.
:func:`money.category.row_list_candidates` does most of the categorizing work.
The high-level functions use :func:`money.category.apply_to_series_using_index`
to apply the ``row_*`` functions in a way that exposes the series index.
"""
import pandas as pd
import re
def categorize(series, categories, edits=None):
"""Assign categories for a series of transaction descriptions.
This function applies :func:`money.category.row_categorize` to every value
in a ``series``, creating a series of categories with the same index.
Arguments:
series: Length 2 iterable of transaction descriptions.
categories (dict): Regex patterns for each category.
edits (dict): Index-specific manual categorizations.
Returns:
A Pandas Series with categories.
"""
return apply_to_series_using_index(row_categorize, series,
categories, edits=edits)
def count_candidates(series, categories, edits=None):
"""Count candidate categories for a series of transaction descriptions.
This function applies :func:`money.category.row_count_candidates` to every
value in a ``series``, creating a series of counts with the same index.
Arguments:
series: Length 2 iterable of transaction descriptions.
categories (dict): Regex patterns for each category.
edits (dict): Index-specific manual categorizations.
Returns:
A Pandas Series with counts.
"""
return apply_to_series_using_index(row_count_candidates,
series, categories, edits=edits)
def row_categorize(row, categories, edits=None):
"""Categorize one indexed transaction "row".
The function arbitrarily returns the first candidate category assigned to
the row. It's written with the expectation that each row *should* only fit
one category. In practice, it's a good idea to test this assumption.
Arguments:
row: Length 2 iterable with an index and a description.
categories (dict): Regex patterns for each category.
edits (dict): Index-specific manual categorizations.
Returns:
str: A category for the row.
"""
candidates = row_list_candidates(row, categories, edits=edits)
if candidates:
return candidates[0]
def row_count_candidates(row, categories, edits=None):
"""Count candidate categories for one indexed transaction "row".
Arguments:
row: Length 2 iterable with an index and a description.
categories (dict): Regex patterns for each category.
edits (dict): Index-specific manual categorizations.
Returns:
int: Number of candidate categoires for the row.
"""
candidates = row_list_candidates(row, categories, edits=edits)
return len(candidates)
def row_list_candidates(row, categories, edits=None):
"""Identify candidate categories for one indexed transaction "row".
Each row has two fields. The first is the transaction index, and the second
is the transaction description. This function uses the index to assign
manual category edits to specific transactions.
Arguments:
row: Length 2 iterable with an index and a description.
categories (dict): Regex patterns for each category.
edits (dict): Index-specific manual categorizations.
Returns:
list: Candidate categories for the row.
"""
index, description = row
candidates = []
# Pattern match descriptions to categories.
for category, patterns in categories.items():
if is_match(description, patterns):
candidates.append(category)
# Apply index-specific manual categorizations.
if edits:
category = edits.get(index)
if category:
candidates.append(category)
return candidates
def apply_to_series_using_index(f, series, *args, **kwargs):
"""Apply a function to a series, making the series index available.
The function converts the ``series`` to a two-column data frame with the
series index as a column, so that function ``f`` can process the index when
doing its job. Afterward, the data frame returns to a series with the
original index intact. See `Stack Overflow`__.
__ https://stackoverflow.com/a/18316830
This function could be written as a wrapper for ``f``, but it becomes
unclear while glancing at the arguments of ``f`` whether it should take a
series or a row as its first argument. The current approach is transparent.
Arguments:
f (function): Function to apply to the series.
series: Pandas Series to have the function applied.
*args: Additional positional argmunents for ``f``.
**kwargs: Additional keyword arguments for ``f``.
Returns:
A Pandas series with the function applied.
"""
result = (series.reset_index()
.apply(f, axis=1, args=args, **kwargs))
return pd.Series(result.values, index=series.index)
def is_match(string, patterns):
"""Test if a string matches any pattern in a given list.
Arguments:
string (str): String that might match ``patterns``.
patterns (list): Patterns to match.
Returns:
bool: True for a match, otherwise false.
"""
return any([re.fullmatch(p, string) for p in patterns])
| true
|
bced7f810bbc828266f8465cf7a4a055db43e450
|
Python
|
msrosenberg/TaxonomyMonographBuilder
|
/TMB_Create_Graphs.py
|
UTF-8
| 11,659
| 2.984375
| 3
|
[] |
no_license
|
"""
Module containing the various graph and chart drawing algorithms (except for those related to maps)
"""
# external dependencies
from typing import Optional
import matplotlib.pyplot as mplpy
import matplotlib.ticker
from wordcloud import WordCloud
__TMP_PATH__ = "temp/"
# my approximation of the pygal color scheme
__COLOR_LIST__ = ["salmon", "royalblue", "lightseagreen", "gold", "darkorange", "mediumorchid", "deepskyblue",
"lightgreen", "sandybrown", "palevioletred", "lightskyblue", "mediumaquamarine", "lemonchiffon",
"red", "green", "blue", "yellow"]
def create_pie_chart_file(filename: str, data: dict, graph_font: Optional[str] = None) -> None:
datalist = list(data.keys())
datalist.sort()
sizes = []
for d in datalist:
sizes.append(data[d])
color_list = __COLOR_LIST__
# create a two-panel plot, one for pie, one for legend
fig, (panel1, panel2) = mplpy.subplots(1, 2, figsize=[6, 3])
# create pie chart in first panel
pie = panel1.pie(sizes, colors=color_list, startangle=90, counterclock=False)
panel1.axis("equal")
# create legend in second panel
panel2.axis("off") # hide axes in second plot
panel2.legend(pie[0], datalist, loc="center", frameon=False, ncol=2, prop={"family": graph_font})
mplpy.rcParams["svg.fonttype"] = "none"
mplpy.tight_layout()
mplpy.savefig(__TMP_PATH__ + filename, format="png", dpi=600)
mplpy.close("all")
def create_language_bar_chart_file(filename: str, lang_by_year: dict, graph_font: Optional[str] = None) -> None:
color_list = __COLOR_LIST__
langlist = list(lang_by_year.keys())
langlist.sort()
yearlist = list(lang_by_year[langlist[0]].keys())
minyear = min(yearlist)
maxyear = max(yearlist)
year_cnts = {y: 0 for y in range(minyear, maxyear+1)}
for lang in lang_by_year:
dat = lang_by_year[lang]
for y in dat:
year_cnts[y] += dat[y]
x_list = [x for x in range(minyear, maxyear+1)]
y_lists = []
for lang in langlist:
ylist = []
dat = lang_by_year[lang]
for y in range(minyear, maxyear+1):
if year_cnts[y] > 0:
ylist.append(dat[y] / year_cnts[y])
else:
ylist.append(0)
y_lists.append(ylist)
# create a three-panel plot, two for bar graphs, one for legend
fig, (panel1, panel2, panel3) = mplpy.subplots(3, 1, figsize=[6.5, 6])
split_year = 1850
split_index = x_list.index(split_year)
bottoms = [0 for _ in range(split_index)]
bars = []
for j, ylist in enumerate(y_lists):
bars.append(panel1.bar(x_list[:split_index], ylist[:split_index], bottom=bottoms, color=color_list[j],
edgecolor="black", linewidth=0.25))
for i in range(len(ylist[:split_index])):
bottoms[i] += ylist[i]
panel1.spines["right"].set_visible(False)
panel1.spines["top"].set_visible(False)
bottoms = [0 for _ in range(len(x_list) - split_index)]
for j, ylist in enumerate(y_lists):
panel2.bar(x_list[split_index:], ylist[split_index:], bottom=bottoms, color=color_list[j], edgecolor="black",
linewidth=0.25)
for i, v in enumerate(ylist[split_index:]):
bottoms[i] += v
panel2.spines["right"].set_visible(False)
panel2.spines["top"].set_visible(False)
panel3.axis("off") # hide axes in second plot
panel3.legend(bars, langlist, loc="center", frameon=False, ncol=4, prop={"family": graph_font})
mplpy.xticks(fontname=graph_font)
mplpy.yticks(fontname=graph_font)
mplpy.rcParams["svg.fonttype"] = "none"
mplpy.tight_layout()
mplpy.savefig(__TMP_PATH__ + filename, format="png", dpi=600)
mplpy.close("all")
def create_bar_chart_file(filename: str, data: list, minx: int, maxx: int, y: int,
graph_font: Optional[str] = None) -> None:
x_list = [x for x in range(minx, maxx+1)]
y_list = [d[y] for d in data]
fig, faxes = mplpy.subplots(figsize=[6.5, 2])
faxes.bar(x_list, y_list, color="blue", edgecolor="darkblue")
faxes.spines["right"].set_visible(False)
faxes.spines["top"].set_visible(False)
if maxx-minx > 200:
tick_step = 40
else:
tick_step = 20
mplpy.yticks(fontname=graph_font)
mplpy.xticks([i for i in range(minx, maxx + 1, tick_step)], fontname=graph_font)
mplpy.rcParams["svg.fonttype"] = "none"
mplpy.tight_layout()
mplpy.savefig(__TMP_PATH__ + filename, format="png", dpi=600)
mplpy.close("all")
def create_stacked_bar_chart_file(filename: str, data: list, minx: int, maxx: int, cols: list,
graph_font: Optional[str] = None) -> None:
# currently assumes only two stacked bars
x_list = [x for x in range(minx, maxx+1)]
fig, faxes = mplpy.subplots(figsize=[6.5, 2])
col_names = [c[0] for c in cols]
y_list1 = [d[cols[0][1]] for d in data]
y_list2 = [d[cols[1][1]] for d in data]
faxes.bar(x_list, y_list1, color="blue", edgecolor="darkblue")
faxes.bar(x_list, y_list2, bottom=y_list1, color="red", edgecolor="darkred")
faxes.spines["right"].set_visible(False)
faxes.spines["top"].set_visible(False)
faxes.legend(col_names, loc="upper left", frameon=False, prop={"family": graph_font})
mplpy.xticks(fontname=graph_font)
mplpy.yticks(fontname=graph_font)
mplpy.rcParams["svg.fonttype"] = "none"
mplpy.tight_layout()
mplpy.savefig(__TMP_PATH__ + filename, format="png", dpi=600)
mplpy.close("all")
def create_qual_bar_chart_file(filename: str, label_list: list, data_dict: dict, max_value: int,
graph_font: Optional[str] = None) -> None:
x_list = [x for x in range(len(label_list))]
y_list = [data_dict[x] for x in label_list]
fig, faxes = mplpy.subplots(figsize=[6.5, 2.5])
faxes.bar(x_list, y_list, color="blue", edgecolor="darkblue")
mplpy.yticks(fontname=graph_font)
mplpy.xticks(rotation="vertical", style="italic", fontname=graph_font)
faxes.set_xticks(x_list)
faxes.set_xticklabels(label_list)
faxes.spines["right"].set_visible(False)
faxes.spines["top"].set_visible(False)
mplpy.ylim(0, max_value)
mplpy.rcParams["svg.fonttype"] = "none"
mplpy.tight_layout()
mplpy.savefig(__TMP_PATH__ + filename, format="png", dpi=600)
mplpy.close("all")
def create_line_chart_file(filename: str, data: list, minx: int, maxx: int, y: int,
graph_font: Optional[str] = None) -> None:
x_list = [x for x in range(minx, maxx+1)]
y_list = [d[y] for d in data]
fig, faxes = mplpy.subplots(figsize=[6.5, 2])
faxes.plot(x_list, y_list, "blue")
faxes.spines["right"].set_visible(False)
faxes.spines["top"].set_visible(False)
if maxx-minx > 200:
tick_step = 40
else:
tick_step = 20
mplpy.yticks(fontname=graph_font)
mplpy.xticks([i for i in range(minx, maxx + 1, tick_step)], fontname=graph_font)
mplpy.rcParams["svg.fonttype"] = "none"
mplpy.tight_layout()
mplpy.savefig(__TMP_PATH__ + filename, format="png", dpi=600)
mplpy.close("all")
def create_chronology_chart_file(filename: str, miny: int, maxy: int, maxcnt: int, yearly_data: dict,
graph_font: Optional[str] = None) -> None:
y_list = []
for y in range(miny, maxy + 1):
y_list.append(float(yearly_data[y]))
x = [y for y in range(miny, maxy+1)]
fig, faxes = mplpy.subplots(figsize=[6.5, 1.5])
mplpy.ylim(-maxcnt, maxcnt)
mplpy.xlim(miny, maxy)
faxes.stackplot(x, y_list, baseline="sym", colors=["black"])
for spine in faxes.spines:
faxes.spines[spine].set_visible(False)
cur_axes = mplpy.gca()
cur_axes.axes.get_yaxis().set_visible(False)
mplpy.xticks([i for i in range(miny, maxy+1, 20)], fontname=graph_font)
mplpy.rcParams["svg.fonttype"] = "none"
mplpy.tight_layout()
mplpy.savefig(__TMP_PATH__ + filename, format="png", dpi=600)
mplpy.close("all")
def create_word_cloud_image(binomial_cnts: dict, specific_cnts: dict, font_path: Optional[str] = None) -> None:
# generate wordcloud image from binomials
wordcloud = WordCloud(width=2000, height=1500, background_color="white", max_words=1000, normalize_plurals=False,
collocations=False, font_path=font_path).generate_from_frequencies(binomial_cnts)
wordcloud.to_file(__TMP_PATH__ + "binomial_word_cloud.png")
# generate wordcloud image from specific names
wordcloud = WordCloud(width=2000, height=1500, background_color="white", max_words=1000, normalize_plurals=False,
collocations=False, font_path=font_path).generate_from_frequencies(specific_cnts)
wordcloud.to_file(__TMP_PATH__ + "specific_word_cloud.png")
if __name__ == "__main__":
# the following creates a quick chart of each type to check formatting changes
test_data = {
"English": 100,
"German": 50,
"Chinese": 20,
"Dutch": 10,
"French": 50,
"Italian": 50,
"Japanese": 50,
"Latin": 10,
"Polish": 3,
"Portuguese": 3,
"Russian": 3,
"Spanish": 3,
"Thai": 3,
"Danish": 3,
"Korean": 3,
"Vietnamese": 3
}
create_pie_chart_file("testpie.png", test_data)
create_word_cloud_image(test_data, test_data, r"C:\Windows\Fonts\NotoSerif-regular.ttf")
test_data = {
1800: 5,
1801: 4,
1802: 1,
1803: 0,
1804: 2,
1805: 2,
1806: 7,
1807: 12,
1808: 14,
1809: 10,
1810: 10
}
create_chronology_chart_file("testchron.png", 1800, 1810, 14, test_data)
test_data = [
[5],
[4],
[1],
[0],
[2],
[2],
[7],
[12],
[14],
[10],
[10]
]
create_line_chart_file("testline.png", test_data, 1800, 1810, 0)
create_bar_chart_file("testbar.png", test_data, 1800, 1810, 0)
create_stacked_bar_chart_file("teststackbar.png", test_data, 1800, 1810, [["A", 0], ["B", 0]])
test_data = {
"pugilator": 20,
"pugnax": 5,
"tangeri": 12
}
create_qual_bar_chart_file("testqualbar.png", ["pugilator", "pugnax", "tangeri"], test_data, 20)
def create_handedness_chart_file(filename: str, data: list, graph_font: Optional[str] = None) -> None:
y_list = [i for i in range(len(data))]
right_x = [d.right_cnt for d in data]
left_x = [-d.left_cnt for d in data]
max_cnt = max(right_x)
if -min(left_x) > max_cnt:
max_cnt = -min(left_x)
height = max(1.0, (len(y_list)+1)*0.2)
fig, faxes = mplpy.subplots(figsize=[6.5, height])
mplpy.xlim(-max_cnt, max_cnt)
faxes.barh(y_list, right_x)
faxes.barh(y_list, left_x)
# fix labels
xlabels = list(faxes.get_xticks())
for i, x in enumerate(xlabels):
if x < 0:
xlabels[i] = -x
for i, x in enumerate(xlabels):
xlabels[i] = int(x)
ticks_loc = faxes.get_xticks().tolist()
faxes.xaxis.set_major_locator(matplotlib.ticker.FixedLocator(ticks_loc))
faxes.set_xticklabels(xlabels)
mplpy.xlabel("Left Count / Right Count", fontname=graph_font)
faxes.spines["right"].set_visible(False)
faxes.spines["top"].set_visible(False)
faxes.spines["left"].set_visible(False)
faxes.yaxis.set_visible(False)
mplpy.rcParams["svg.fonttype"] = "none"
mplpy.tight_layout()
mplpy.savefig(filename, format="png", dpi=600)
mplpy.close("all")
| true
|
9d6a559c94c0f75967057a24a75b2c96fed76a20
|
Python
|
kiran-kotresh/Python-code
|
/pop_growth.py
|
UTF-8
| 143
| 2.890625
| 3
|
[] |
no_license
|
def nb_year(p0, percent, aug, p):
current=p0
n=0
while(current<p):
current=current+current*(percent*0.01)+aug
n+=1
print(n)
| true
|
21f112f9fdd71c727912073487ff0a50ff7d8a22
|
Python
|
quvinh/pythoncb
|
/baitap3.py
|
UTF-8
| 296
| 3.03125
| 3
|
[] |
no_license
|
n = int(input("Nhap so phan tu:"))
lt = list()
for i in range(n):
x = int(input("Nhap phan tu thu %d :"%(i+1)))
lt.append(x)
s = 0
for i in lt:
s += i
f = open("file.txt","w")
f.write(str(lt))
f.write("tong :%d"%s)
f.close()
f = open("file.txt","r")
print(f.read())
| true
|
4866e717ad1c80707aa245618ebc9776758bf6f6
|
Python
|
zzc558/SpectralClusterNetflex
|
/clustering.py
|
UTF-8
| 2,226
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
from dataPreprocess import laplacian
import numpy as np
import scipy as sp
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
def clustering(matrix, k, movieIndex):
eValue, eVector = sp.sparse.linalg.eigs(matrix, k, which='SM')
# get rid of the movie nodes
count = 0
for i in movieIndex:
eVector = np.delete(eVector, i - count, axis=0)
count += 1
eVector_real = eVector.real
eVec_row_norm = np.linalg.norm(eVector_real, ord=2, axis=1)
eVector_norm = (eVector_real.T / eVec_row_norm).T
kmeans = KMeans(n_clusters=k, random_state=1231).fit(eVector_norm)
return kmeans.labels_
#labels = kmeans.labels_
#fh = open("label.txt", "w+")
#for value in labels:
# fh.write(str(value))
#fh.close()
def label_predictor(fname, userLabel):
testFile = open(fname, 'r')
#testFile.readline()
userInfo = {}
movie_user_list = []
for line in testFile:
c = line.split(',')
movie = int(c[0])
user = int(c[1])
movie_user_list.append((movie, user))
#time = int(c[3].split('-')[0])
if(user not in userLabel):
print("Unknown user in test set:", user)
continue
if(user not in userInfo):
userInfo[user] = []
userInfo[user].append(userLabel[user])
userInfo[user].append(movie)
testFile.close()
return userInfo, movie_user_list
# get average rating for the movie from users with the same label
def get_rating(label, movie, clusterDict, ratingDict):
users = clusterDict[label]
rating = set()
for user in users:
rating.add(ratingDict[user][movie])
return int(sum(rating) / len(rating) + 0.5)
# get predict rating value for every movie in movieRating = {movie:{user: rating}}
def rating_predictor(userDict, clusterDict, ratingDict):
movieRating = {}
for user, info in userDict.items():
label = info.pop(0)
for movie in info:
if(movie not in movieRating):
movieRating[movie] = {}
movieRating[movie][user] = get_rating(label, movie, clusterDict, ratingDict)
return movieRating
| true
|
4f181325c1ac81fb9668a166d3aa1883e34f30b5
|
Python
|
rezo8/LyricClassifier
|
/song.py
|
UTF-8
| 7,021
| 2.890625
| 3
|
[] |
no_license
|
import spotifyclient
import pickle as pickle
from nltk.tokenize import word_tokenize
import os
import nltk
import numpy as np
import scipy
GENRES = [
'folk',
'rap',
'rock',
'r&b',
'country',
'blues'
]
class Song(object):
"""
Object containing the lyrics to a single song
Attributes:
lyrics: string containing song lyrics
genres: list containing genres
title: string containing song title (optional)
artist: string containing primary artist (optional)
numVerses: integer containing number of verses in song
numChoruses: integer containing number of choruses in song
numLines: integer containing number of lines in song
"""
def __init__(self, lyrics,genres, title='', artist='', popularity = 0, duration_ms = 0, notfound='ignore'):
#Constructor takes in local variables and option if genre is not found thru Spotify client
self.lyrics = lyrics
self.title = title.replace('\n', '')
self.artist = artist.replace('\n', '')
self.genres = genres if notfound=='add' else []
self.genres = genres if (notfound=='replace' or notfound=='add') else []
self.popularity = popularity
self.duration_ms = duration_ms
self.numVerses = 0
self.numLines = 0
self.numChoruses = 0
self.nGrams = None
self.songVector = None
self.tokenizedSentences = None
if len(genres)==0 or notfound=='add':
artistgenres = spotifyclient.getArtistProperties(self.artist, GENRES)
if artistgenres:
for g in artistgenres:
self.genres.append(g)
elif notfound == 'prompt':
genres = raw_input('Genres not found, please input: ').split(',')
if len(genres) > 0:
self.genres = genres
def filter(self, allowed):
#Takes in a list of allowed genres and updates self.genres
#returns a list of removed genres
removed = []
new = []
for g in self.genres:
for a in allowed:
if a not in new and a in g:
new.append(a)
else:
removed.append(g)
self.genres = new
return removed
'''
for g in self.genres:
for a in allowed:
if g == a and a not in new:
new.append(a)
else:
removed.append(g)
self.genres = new
return removed
'''
def tokens(self):
return word_tokenize(self.simpleLyrics())
#throws out all lyrics after mismatched bracket
#Must handle this to update self, and
def processLyrics(self):
#Removes "[Chorus]", "[Verse X]", etc., punctuation, and newlines
self.numChoruses = 0
self.numVerses = 0
self.numLines = 0
lyrics = self.lyrics.lower()
i = 0
allowedChars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ \''
simpleLyrics = ''
#print("-------- ORIGINAL LYRICS --------")
#print(self.lyrics)
while i < len(lyrics): #I think this is bad practice. w/e
c = lyrics[i]
if c in allowedChars:
simpleLyrics += c
if c=='[':
if lyrics[i+1].lower() == 'v':
self.numVerses = self.numVerses + 1
elif lyrics[i+1].lower() == 'c':
self.numChoruses = self.numChoruses + 1
while i<len(lyrics) and lyrics[i]!=']':
i +=1
elif c == '\n':
self.numLines +=1
simpleLyrics += '\n'
elif c == '\t':
simpleLyrics += '\n'
i+=1
self.lyrics = simpleLyrics
#print("-------- SIMPLE LYRICS --------")
#print(self.lyrics)
def tokenFrequencies(self):
#Takes in a string of song lyrics and returns a dictionary containing
#each unique word in the lyrics and its frequency
lyrics = self.simpleLyrics()
words = word_tokenize(lyrics)
freq = {}
for word in words:
if word in freq:
freq[word] += 1
elif not word=='':
freq[word] = 1
return freq
def setnGrams(self, sents):
self.nGrams = sents
def setTokenizedSentences(self, sents):
self.tokenizedSentences = sents
def saveLyrics(self, filename):
#Saves title artist, lyrics to file at filename (creates a file if none exists)
#NOTE: To save the entire Song object, use saveSong()
f = open(filename, 'w+')
f.write(self.title+'\n')
f.write(self.artist+'\n')
f.write(self.lyrics+'\n')
f.close()
def saveSong(self, filename, subdirectory=''):
#Saves Song object to file at filename, which can include a subdirectory
if len(subdirectory) == 0:
f = open(filename, 'wb+')
else:
try:
os.mkdir(subdirectory)
except Exception:
pass
f = open(os.path.join(subdirectory, filename), 'wb+')
pickle.dump(self, f, protocol=2)
def vectorizeSong(self, model):
word_vectors = model.wv
words = []
notFound = 0
vector = np.zeros((100,), dtype=float)
for sent in self.tokenizedSentences:
for word in sent:
words.append(word)
if len(words) == 0:
return np.zeros((100,), dtype=float)
for word in words:
if word in word_vectors.vocab:
wordVector = model.wv[str(word)]
vector = np.add(vector,wordVector)
else:
notFound += 1
divisor = np.full((100,), (len(words) - notFound), dtype=float)
vector = np.divide(vector,divisor)
self.songVector = vector
return vector
def returnVectorGenre(self, wordVector, allGenreVectors):
min = 1
index = 0
for i in range(len(allGenreVectors)):
distance = scipy.spatial.distance.cosine(wordVector,allGenreVectors[i])
if (distance < min):
min = distance
index = i
return GENRES[index]
@staticmethod
def openLyrics(filename):
#Returns new Song object with title, artist, and lyric drawn from file at filename
#NOTE: To open an entire Lyric object, use openSong()
f = open(filename, 'r')
contents = f.read()
title = contents[:contents.index('\n')]
contents = contents[contents.index('\n')+1:]
artist = contents[:contents.index('\n')]
lyrics = contents[contents.index('\n')+1:]
return Song(lyrics, title, artist)
@staticmethod
def openSong(filename):
#Returns a new Song object with all data drawn from filename
f = open(filename, 'rb')
return pickle.load(f)
| true
|
73d91f5c793268e7dc0b6ccb99ce1d68ef20449c
|
Python
|
mpreddy960/pythonPROJECTnew
|
/range.py
|
UTF-8
| 34
| 2.984375
| 3
|
[] |
no_license
|
for i in range(1,23):
print(i)
| true
|
31ee8b4b28d5fa4bd2db68c4df9465274ab3dbaa
|
Python
|
apalevich/PyMentor
|
/01_reminder_dates.py
|
UTF-8
| 3,481
| 4.28125
| 4
|
[] |
no_license
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This is an exercise maintained with my mentor Zcho05.
The exercise contains function called create_message() that returns a message about some reminder.
For example:
>>> create_message((2, 4, 6, 7,))
'Мы оповестим вас о предстоящих событиях за 2, 4, 6 и 7 дней'
"""
def create_message(dates, separator=None):
"""
This function construct a message from a tuple with dates:
>>> create_message((2, 4, 5, 7))
'Мы оповестим вас о предстоящих событиях за 2, 4, 5 и 7 дней'
You can even use a tuple with only date. Conjunction "и" ("and") will not be inputed then:
>>> create_message((7,))
'Мы оповестим вас о предстоящих событиях за 7 дней'
Also strings and lists are supported:
>>> create_message('5, 7, 2, 8, ')
'Мы оповестим вас о предстоящих событиях за 5, 7, 2 и 8 дней'
>>> create_message([5, 7, 2, 8])
'Мы оповестим вас о предстоящих событиях за 5, 7, 2 и 8 дней'
Commas are not required with strings:
>>> create_message('5 7 2 8')
'Мы оповестим вас о предстоящих событиях за 5, 7, 2 и 8 дней'
Don’t worry about non-digit symbols:
>>> create_message(' 24, 46, 745, а, рыбки?, 300 xxx ')
'Мы оповестим вас о предстоящих событиях за 24, 46, 745 и 300 дней'
You can use optional separator between numbers by adding it as argument:
>>> create_message('5 7 2 8', '; ')
'Мы оповестим вас о предстоящих событиях за 5; 7; 2 и 8 дней'
But beware using create_message with empty input:
>>> create_message(())
Traceback (most recent call last):
...
Exception: Отсутствуют даты
Don't input a letters neither:
>>> create_message((3, 5, 6, 3, 'a', ))
Traceback (most recent call last):
...
Exception: Вместо даты использована буква
"""
# Check input argument
if not dates:
raise Exception('Отсутствуют даты')
integers = []
if isinstance(dates, str):
temp = ''
for i in dates:
if i.isdigit():
temp += i
else:
temp += ' '
integers = temp.split()
integers = list(map(int, integers))
elif isinstance(dates, (list, tuple)):
for c in dates:
try:
integers.append(int(c))
except ValueError:
raise Exception('Вместо даты использована буква')
# Create string for a return
message = ''
# Choose correct case for ending word
if str(integers[-1]).endswith('1'):
message = ' день'
elif str(integers[-1]).endswith(('2', '3', '4')):
message = ' дня'
elif str(integers[-1]).endswith(('5', '6', '7', '8', '9', '0')):
message = ' дней'
# Construct final message
if len(integers) > 1:
message = 'Мы оповестим вас о предстоящих событиях за {} и {}'.format(str(integers[0:-1])[1:-1], str(integers[-1])) + message
else:
message = 'Мы оповестим вас о предстоящих событиях за {}'.format(str(integers)[1:-1]) + message
# Replace comma with an selected separator
if separator:
message = message.replace(', ', separator)
return message
if __name__ == '__main__':
# Test cases from docstrings
import doctest
doctest.testmod()
| true
|
de5d784f1c98723db35f425782de8236e1c66dc1
|
Python
|
nobita44/CIP2021_tkinterQuiz
|
/main.py
|
UTF-8
| 4,927
| 3.71875
| 4
|
[] |
no_license
|
# Python program to create a simple GUI
# Simple Quiz using Tkinter
# import everything from tkinter
from tkinter import *
# and import messagebox as mb from tkinter
from tkinter import messagebox as mb
# import json to use json file for data
import json
# class to define the components of the GUI
class Quiz:
# This is the first method which is called when a
# new object of the class is initialized.
def __init__(self):
self.q_no = 0
self.display_title()
self.display_question()
self.opt_selected = IntVar()
self.opts = self.radio_buttons()
self.display_options()
self.buttons()
self.data_size = len(question)
self.correct = 0
def display_result(self):
# calculates the wrong count
wrong_count = self.data_size - self.correct
correct = f"Correct: {self.correct}"
wrong = f"Wrong: {wrong_count}"
# calcultaes the percentage of correct answers
score = int(self.correct / self.data_size * 100)
result = f"Score: {score}%"
# Shows a message box to display the result
mb.showinfo("Result", f"{result}\n{correct}\n{wrong}")
# This method checks the Answer after we click on Next.
def check_ans(self, q_no):
# checks for if the selected option is correct
if self.opt_selected.get() == answer[q_no]:
# if the option is correct it return true
return True
def next_btn(self):
# Check if the answer is correct
if self.check_ans(self.q_no):
# if the answer is correct it increments the correct by 1
self.correct += 1
# Moves to next Question by incrementing the q_no counter
self.q_no += 1
# checks if the q_no size is equal to the data size
if self.q_no == self.data_size:
# if it is correct then it displays the score
self.display_result()
# destroys the GUI
gui.destroy()
else:
# shows the next question
self.display_question()
self.display_options()
def buttons(self):
next_button = Button(gui, text="Next", command=self.next_btn,
width=10, bg="blue", fg="white", font=("courier", 16, "bold"))
# palcing the button on the screen
next_button.place(x=350, y=380)
# This is the second button which is used to Quit the GUI
quit_button = Button(gui, text="Quit", command=gui.destroy,
width=5, bg="black", fg="white", font=("courier", 16, " bold"))
# placing the Quit button on the screen
quit_button.place(x=700, y=50)
def display_options(self):
val = 0
# deselecting the options
self.opt_selected.set(0)
# looping over the options to be displayed for the
# text of the radio buttons.
for option in options[self.q_no]:
self.opts[val]['text'] = option
val += 1
# This method shows the current Question on the screen
def display_question(self):
# setting the Quetion properties
q_no = Label(gui, text=question[self.q_no], width=60,
font=('courier', 16, 'bold'), anchor='w')
# placing the option on the screen
q_no.place(x=70, y=100)
# This method is used to Display Title
def display_title(self):
# The title to be shown
title = Label(gui, text="CODE IN PLACE 2021 QUIZ HUNT",
width=50, bg="blue", fg="white", font=("courier", 20, "bold"))
# place of the title
title.place(x=0, y=2)
def radio_buttons(self):
# initialize the list with an empty list of options
q_list = []
# position of the first option
y_pos = 150
# adding the options to the list
while len(q_list) < 4:
# setting the radio button properties
radio_btn = Radiobutton(gui, text=" ", variable=self.opt_selected,
value=len(q_list) + 1, font=("courier", 16))
# adding the button to the list
q_list.append(radio_btn)
# placing the button
radio_btn.place(x=100, y=y_pos)
# incrementing the y-axis position by 40
y_pos += 40
# return the radio buttons
return q_list
# Create a GUI Window
gui = Tk()
# set the size of the GUI Window
gui.geometry("800x480")
# set the title of the Window
gui.title("Kamal Agarawal Quiz Window")
# get the data from the json file
with open('data.json') as f:
data = json.load(f)
# set the question, options, and answer
question = (data['question'])
options = (data['options'])
answer = (data['answer'])
# create an object of the Quiz Class.
quiz = Quiz()
# Start the GUI
gui.mainloop()
# END OF THE PROGRAM
| true
|
618e229be5f7a32fc8823218e522f597e4c9e9b0
|
Python
|
peterwilliams97/blank
|
/make_page_corpus.py
|
UTF-8
| 10,290
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
"""
PDF to text conversion
"""
import os
from glob import glob
from collections import defaultdict, OrderedDict
import hashlib
from subprocess import CalledProcessError, Popen, PIPE
import re
from utils_peter import pdf_dir, summary_dir, save_json
from html_to_text import update_summary
import json
KBYTE = 1024
MBYTE = 1024 * 1024
# Settings
min_size = 1 * KBYTE
max_size = 10000 * MBYTE
permission_errors = [
'You do not have permission to extract text',
'Permission Error'
]
PDF_BOX = './pdfbox-app-2.0.7.jar'
PDF_SUMMARIZE = './pdf_page_summaries'
for path in [PDF_BOX, PDF_SUMMARIZE]:
assert os.path.exists(path), path
def run_command(cmd, raise_on_error=True):
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
so = ''
se = ''
if stdout:
print('~' * 80)
so = stdout.decode("utf-8")
print(so)
if stderr:
print('^' * 80)
se = stderr.decode("utf-8")
print(se)
if not any(pe in s for s in (so, se) for pe in permission_errors):
print('-' * 80)
print('run_command real error')
print(' '.join(cmd))
if raise_on_error:
raise CalledProcessError(p.returncode, cmd)
return p.returncode, stdout, stderr
def pdf_summarize(pdf_path):
cmd = [PDF_SUMMARIZE, pdf_path]
retcode, stdout, stderr = run_command(cmd, raise_on_error=False)
ok = retcode == 0
if not ok:
print('FAILURE: retcode=%d stderr=<%s>' % (retcode, stderr))
return ok, None
text = stdout.decode('utf-8')
summary = json.loads(text)
return ok, summary
def pdf_to_pages(pdf_path):
"""Extract pages from PDF file `pdf_path` using PdfBox
Returns: ok, text, pages
ok: Analysis succeeded. PDF is valid
text: Text of PDF in html format
pages: Pages of PDF in html format
"""
cmd = ['java', '-jar', PDF_BOX, 'ExtractText',
'-html', '-console', pdf_path]
retcode, stdout, stderr = run_command(cmd, raise_on_error=False)
ok = retcode == 0
if not ok:
print('FAILURE: retcode=%d stderr=<%s>' % (retcode, stderr))
return ok, '', []
text = stdout.decode('utf-8')
sep = '<div style="page-break-before:always; page-break-after:always">'
return ok, text, text.split(sep)[1:]
# Num Pages: 1
RE_NUMPAGES = re.compile(b'Num Pages:\s+(\d+)')
def pdf_num_pages(pdf_path):
"""Use Unidoc to count pages in PDF file `pdf_path`"""
cmd = ['./pdf_info', pdf_path]
retcode, stdout, stderr = run_command(cmd, raise_on_error=False)
ok = retcode == 0
if not ok:
return ok, 0
m = RE_NUMPAGES.search(stdout)
return ok, int(m.group(1))
xlation = {
"GraphMarkedPages": 'marked_graph',
"TextMarkedPages": 'marked_text',
}
def save_pdf_summary(pdf_path, summary_path):
"""Extract text from `pdf`, break it into pages and write the summary to 'summary_path
"""
ok, text, pages_html = pdf_to_pages(pdf_path)
if not ok:
return
print('save_pdf_summary: %s->%s' % (pdf_path, summary_path))
summary = {
'path': pdf_path,
'name': os.path.basename(pdf_path),
'n_pages': len(pages_html),
'n_chars': sum(len(page) for page in pages_html),
'pages': pages_html,
'text': text,
}
ok, pages_summary = pdf_summarize(pdf_path)
if not ok:
return
assert pages_summary['NumPages'] == summary['n_pages'], (pdf_path, pages_summary['NumPages'],
summary['n_pages'])
for k, v in pages_summary.items():
if k == 'NumPages':
continue
elif k in xlation:
summary[xlation[k]] = v
else:
summary[k] = v
# NumPages int
# Width float64
# Height float64
# TextMarkedPages []int
# GraphMarkedPages []int
update_summary(summary)
if not summary_path.endswith('.json'):
summary_path = '%s.json' % summary_path
outpath = os.path.abspath(summary_path)
save_json(outpath, summary)
def sha1_digest(path):
sha1 = hashlib.sha1()
with open(path, 'rb') as f:
while True:
data = f.read(50000)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def extract_name(path, root, whole=False):
# print(path)
# assert False
name = os.path.relpath(path, root)
while True:
head, tail = os.path.split(name)
if not (head and tail):
break
name = '_'.join((head, tail))
# if root is None:
# name = os.path.basename(path)
# else:
# name = os.path.relpath(path, start=root)
# direct = os.path.dirname(path)
# for special in ['blank_pages', 'spool', 'MOB-810', 'xarc']:
# if special in direct and special not in name:
# name = '%s_%s' % (special, name)
if whole:
return name
return os.path.splitext(name)[0]
def flatten_path(path, root):
path = os.path.relpath(path, root)
while True:
head, tail = os.path.split(path)
if not (head and tail):
break
path = '_'.join((head, tail))
path = os.path.join(root, path)
assert os.path.isfile(path), path
return path
def ascii_count(s, limit):
return len([c for c in s if ord(c) > limit])
def punc_count(s):
return len([c for c in s if not ((ord('A') <= ord(c) <= ord('Z')) or
(ord('a') <= ord(c) <= ord('z')))])
def find_keeper(paths, root):
"""Return the 1 file in of the identical files in `paths` that we will use"""
paths = sorted(paths, key=lambda x: (-len(x), x))
for path in paths:
other_paths = [p for p in paths if p != path]
if 'xarc' in path and not any('xarc' in p for p in other_paths):
print('1 %s -> %s' % (other_paths, path))
return {path}
name = extract_name(path, root)
other_names = [extract_name(p, root) for p in other_paths]
if all(name in p for p in other_names):
print('2 %s -> %s' % (other_paths, path))
return {path}
for limit in 255, 127:
if ascii_count(path, limit) < min(ascii_count(p, limit) for p in other_paths):
print('3 %s -> %s' % (other_paths, path))
return {path}
if punc_count(path) < min(punc_count(p) for p in other_paths):
print('4 %s -> %s' % (other_paths, path))
return {path}
print('5 %s -> %s' % (paths[1:], path[0]))
return {paths[0]}
def corpus_to_keepers(pdf_dir):
"""Return the unique files in `pdf_dir` that we will use"""
print('corpus_to_keepers: pdf_dir="%s"' % pdf_dir)
path_list = list(glob(os.path.join(pdf_dir, '**'), recursive=True))
print('corpus_to_keepers: %d total' % len(path_list))
path_list = [path for path in path_list if os.path.isfile(path)]
print('corpus_to_keepers: %d files' % len(path_list))
path_list = [path for path in path_list if os.path.splitext(path)[1] == '.pdf']
print('corpus_to_keepers: %d pdf files' % len(path_list))
# for i, path in enumerate(path_list):
# assert os.path.isfile(path), path
# path_list = [flatten_path(path, pdf_dir) for path in path_list]
sha1_paths = defaultdict(set)
xarc = []
for i, path in enumerate(path_list):
assert os.path.isfile(path), path
assert os.path.abspath(path) == path, (os.path.abspath(path), path)
sha1 = sha1_digest(path)
sha1_paths[sha1].add(path)
if 'xarc' in path:
xarc.append(path)
print('%d xarc files of %d (raw total: %d)' % (len(xarc), len(sha1_paths), i))
assert xarc
for sha1 in sha1_paths:
paths = sha1_paths[sha1]
if len(paths) > 1:
sha1_paths[sha1] = find_keeper(paths, pdf_dir)
keepers = []
for paths in sha1_paths.values():
assert len(paths) == 1, (len(paths), paths)
keepers.append(list(paths)[0])
keepers.sort()
return keepers
exclusions = {
'~/testdata/Year_8_Pythagoras_Booklet.pdf',
'~/testdata/missing.pdf',
'~/testdata/nsdi17-gowda.pdf',
'~/testdata/nsdi17-horn-daniel.pdf',
'~/testdata/rdp2018-03.pdf',
}
exclusions = {os.path.expanduser(path) for path in exclusions}
def corpus_to_text(pdf_dir, summary_dir):
"""Convert the unique PDF files in `pdf_dir` to file with the same name in `summary_dir`
"""
keepers = corpus_to_keepers(pdf_dir)
os.makedirs(summary_dir, exist_ok=True)
pdf_summary = OrderedDict()
summary_pdf = OrderedDict()
for i, pdf_path in enumerate(keepers):
size = os.path.getsize(pdf_path)
print('%3d: %s [%.1f]' % (i, pdf_path, size / MBYTE), end=' -> ')
assert os.path.abspath(pdf_path) == pdf_path
if min_size <= size <= max_size:
name = extract_name(pdf_path, pdf_dir)
assert not name.endswith('.json'), name
name = '%s.json' % name
summary_path = os.path.join(summary_dir, name)
assert summary_path not in summary_pdf, (pdf_path, summary_pdf[summary_path])
pdf_summary[pdf_path] = summary_path
summary_pdf[summary_path] = pdf_path
print(summary_path, end=' ')
# assert not os.path.exists(summary_path)
# save_pdf_summary(pdf_path, summary_path)
print()
print('^' * 100)
started = set()
for i, (pdf_path, summary_path) in enumerate(pdf_summary.items()):
if pdf_path in exclusions:
started.add(pdf_path)
continue
# if len(started) < len(exclusions):
# continue
print('%4d: %s -> %s' % (i, pdf_path, summary_path), flush=True)
save_pdf_summary(pdf_path, summary_path)
if __name__ == '__main__':
corpus_to_text(pdf_dir, summary_dir)
print('=' * 80)
for directory in (pdf_dir, summary_dir):
path_list = list(glob(os.path.join(directory, '**'), recursive=True))
print('%s: %d files' % (directory, len(path_list)))
| true
|
8a3dd94a7898bc65dd67c3d5912a76be4ed1abd5
|
Python
|
kotaYkw/web_scraping_samples
|
/javascriptsample.py
|
UTF-8
| 722
| 2.65625
| 3
|
[] |
no_license
|
import requests
from bs4 import BeautifulSoup
url ='http://www.webscrapingfordatascience.com/simplejavascript/'
r = requests.get(url)
html_soup = BeautifulSoup(r.text, 'html.parser')
# ここにタグは含まれていない
ul_tag = html_soup.find('ul')
print(ul_tag)
# JavaScriptのこーどを表示する
script_tag = html_soup.find('script', attrs={'src': None})
print(script_tag)
url = 'http://www.webscrapingfordatascience.com/simplejavascript/quotes.php'
# Cookieの値は文字列で指定する必要がある
# Cookieを設定しないと、ブラウザから見ていないとバレてアクセスを拒否される
r = requests.get(url, cookies={'jsenabled': '1'})
print(r.json())
| true
|
41133b2b7a4d9cfbd1eae8832a07585cc4f604bc
|
Python
|
webclinic017/aqua
|
/src/aqua/security/stock.py
|
UTF-8
| 671
| 3.53125
| 4
|
[] |
no_license
|
"""
Defines a stock contract
"""
from aqua.security.security import Security
class Stock(Security):
"""
A stock represents a share of a company or index.
We assume that it can be uniquely defined by a symbol (ticker).
"""
def __init__(self, symbol: str) -> None:
self.symbol = symbol.upper()
def __eq__(self, o: object) -> bool:
if isinstance(o, Stock):
return self.symbol == o.symbol
if isinstance(o, str):
return self.symbol == o.upper()
return NotImplemented
def __hash__(self) -> int:
return hash(self.symbol)
def __repr__(self) -> str:
return self.symbol
| true
|
4980d6a9bda50932dfb6439d0f38e7cfca3bc170
|
Python
|
cse442-at-ub/cse442-semester-project-indra-infared-remote-access
|
/RaspberryPi/util/pi_lirc.py
|
UTF-8
| 4,823
| 2.5625
| 3
|
[] |
no_license
|
from subprocess import check_output
import shutil
import dbus
import time
import os
from threading import Thread
from queue import Queue
LIRC_CONF_DIR = '/etc/lirc/lircd.conf.d'
def send_ir_signal(remote_name:str, button:str, method:str="ONCE", device:str=None) -> bool:
"""Sends an IR signal with LIRC.
Attempts to use LIRC to send the IR command associated with the provided remote_name and button.
If no device is provided then the default LIRC device will be used.
Parameters
----------
remote_name : str
Name of the remote config to use.
button : str
Name of the button to 'press'
method : str, optional
Should be "ONCE", "START", or "STOP" but will default to "ONCE"
device : str, optional
The device that LIRC will use to send the command. If None then the default device is used.
It is highly recommended that you always call this with the LIRC device that is used
for sending commands as the configured default may not be correct. (default is None)
Returns
-------
bool
True if successful, False if not.
"""
command = []
if not device:
command = ['irsend', 'SEND_' + method, remote_name, button]
output = None
try:
output = check_output(command)
except:
return False
return len(output) == 0
def search(brand, device):
output = check_output(['irdb-get','find', brand]).decode()
output = output.split("\n")
op1 = [i1.split('.l', 1)[0] for i1 in output[0:-1]]
op2 = [i2.split('/', 1) for i2 in op1]
res1 = [i[0] for i in op2]
res2 = [i[1] for i in op2]
final = [{'brand': f, 'device': c} for f,c in zip(res1,res2)]
result = list(filter(lambda item: device.lower() in item['device'].lower(), final))
return result
def download_lirc_config(brand: str, device: str, dst_dir=LIRC_CONF_DIR ) -> (bool, str):
lookup = brand + '/' + device + '.lircd.conf'
output = check_output(['irdb-get', 'download', lookup]).decode()
if 'Cannot' in output:
return (False, None)
filename = output.split('as')[-1].strip()
try:
resulting_location = shutil.move('./' + filename, dst_dir)
pass
except shutil.Error:
return (True, filename)
pass
output = (False, None)
if dst_dir in resulting_location and restart_lirc_service():
output = (True, filename)
return output
def restart_lirc_service():
"""
Requires authorization to interact with systemd. Therefore, run your piece of code with 'sudo' privileges.
"""
sysbus = dbus.SystemBus()
systemd = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = dbus.Interface(systemd, 'org.freedesktop.systemd1.Manager')
output = manager.RestartUnit('lircd.service', 'fail')
time.sleep(.5)
return is_lirc_running(sysbus)
def is_lirc_running(sysbus=dbus.SystemBus()):
"""
Requires authorization to interact with systemd. Therefore, run your piece of code with 'sudo' privileges.
"""
systemd = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = dbus.Interface(systemd, 'org.freedesktop.systemd1.Manager')
service = sysbus.get_object('org.freedesktop.systemd1', object_path=manager.GetUnit('lircd.service'))
interface = dbus.Interface(service, dbus_interface='org.freedesktop.DBus.Properties')
return interface.Get('org.freedesktop.systemd1.Unit', 'ActiveState') == 'active'
def read_lirc_config_file(filename, src_dir=LIRC_CONF_DIR):
path_to_file = src_dir + '/' + filename
output = None
if os.path.exists(path_to_file):
with open(path_to_file, 'r') as lirc_f:
try:
output = lirc_f.read()
pass
except:
output = None
pass
finally:
lirc_f.close()
return output
class IrSendDaemon(Thread):
def __init__(self, min_delay):
self.command_q = Queue(maxsize=0)
self.min_delay = min_delay
self.running = False
super().__init__()
def start(self):
self.running = True
super().setDaemon(True)
super().start()
def run(self):
current_time = round(time.monotonic() * 1000)
while self.running:
remote, button = self.command_q.get()
send_ir_signal(remote, button)
print(round(time.monotonic() * 1000) - current_time)
current_time = round(time.monotonic() * 1000)
time.sleep(.001 * self.min_delay)
def add_to_queue(self, remote, button):
self.command_q.put((remote, button), block=False)
| true
|
26a055f95853e218e7ad5425bcd71f89848f0d12
|
Python
|
weiiiiweiiii/BreakSpace
|
/spacebreakerlib/ScrollbarXY.py
|
UTF-8
| 845
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Liangze Yu
"""
import tkinter as tk
class ScrollbarXY:
def __init__(self,textArea):
#pack with Window
self.__root = textArea.master
#function aligned to Text
self.__textArea = textArea
self.__xScrollbar()
self.__yScrollbar()
def __xScrollbar(self):
self.__xBar = tk.Scrollbar(self.__root,command= self.__textArea.xview, orient = tk.HORIZONTAL)
self.__textArea.config(xscrollcommand = self.__xBar.set)
self.__xBar.pack(side = 'bottom', fill = 'x')
def __yScrollbar(self):
self.__yBar = tk.Scrollbar(self.__root,command= self.__textArea.yview, orient = tk.VERTICAL)
self.__textArea.config(yscrollcommand = self.__yBar.set)
self.__yBar.pack(side = 'right', fill = 'y')
| true
|
c6424c89aff12786212667ea046e8d8b702716dd
|
Python
|
georgetao/comp-programming
|
/beads.py
|
UTF-8
| 828
| 3.453125
| 3
|
[] |
no_license
|
"""
ID: georget2
LANG: PYTHON3
TASK: beads
"""
import sys
with open("beads.in", "r") as fin:
n = int(fin.readline())
beads = fin.readline().replace("\n", "")
def longest_beads(n, beads):
curr_char = ""
prev = 0
curr = 0
longest = 0
tail_whites = 0
newStreak = False
for _ in range(2):
for c in beads:
if c == curr_char:
curr += 1
tail_whites = 0
elif c == "w":
curr += 1
tail_whites += 1
else:
prev = curr
curr_char = c
curr = 1
newStreak = True
if prev + curr > longest:
longest = prev + curr
if newStreak:
curr += tail_whites
prev -= tail_whites
tail_whites = 0
newStreak = False
if longest >= n:
return longest
return longest
longest = longest_beads(n, beads)
with open("beads.out", "w") as fout:
fout.write(str(longest) + "\n")
| true
|
0cbe8f5588e8343644fb6af1bcc0b24c180649cd
|
Python
|
shkumagai/pyside-sandbox
|
/sample_ghost.py
|
UTF-8
| 495
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
import logging
import ghost
def main(url, output_path):
g = ghost.Ghost()
with g.start(display=True, viewport_size=(1366, 800)) as session:
res = session.open(url, timeout=30)
print(res)
if __name__ == '__main__':
url = 'http://www.google.com/'
output_path = 'capture_0.png'
logger = logging.getLogger('script')
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
ghost.logger = logger
main(url, output_path)
| true
|
93db1c4546a65d0ed8fd0476d861886e7f65e689
|
Python
|
perikain/Pruebas
|
/upd1.py
|
UTF-8
| 454
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
#!-*- coding: utf-8 -*-
import socket
host = "192.168.56.1"
port = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host,port))
s.settimeout(5) #añade una espera de 5 segundos, sólo entonces muestra el mensade error.
data, addr = s.recvfrom(1024) #recvfrom devuelve dos datos, primero el dato y segundo la dirección del cliente.
print "received from ", addr
print "obtained ", data
s.close()
| true
|
e9c394ec9aaacc947ee2578e1920202417963867
|
Python
|
RahulJain7/Openmodelica-Thermodynamic-Engine
|
/PythonFiles/UNIQUAC.py
|
UTF-8
| 921
| 2.640625
| 3
|
[] |
no_license
|
import csv
Compound = []
A12 = []
A21 = []
alpha = []
with open("UNIFAC.csv") as csvfile:
csvreader = csv.reader(csvfile,delimiter=',')
for row in csvreader:
Comp1 = row[2]
Comp1 = Comp1.capitalize()
Comp1 = Comp1.strip(" ")
Comp2 = row[3]
Comp2 = Comp2.capitalize()
Comp2 = Comp2.strip(" ")
if Comp1+'_'+Comp2 not in Compound:
Compound.append(Comp1+'_'+Comp2)
A12.append(row[0])
A21.append(row[1])
# alpha.append(row[4])
print Compound
print A12
no = len(Compound)
no1 = len(A12)
print no
print no1
with open('UNIQUAC_FINAL.txt','a') as txtfile:
txtfile.write("{")
for i in range(0,no):
txtfile.write('"'+Compound[i]+'",')
txtfile.write("}")
txtfile.write("\n")
txtfile.write("\n")
for i in range(0,no):
txtfile.write("{"+str(A12[i])+","+str(A21[i])+"},")
| true
|
4980500d446bc774f7c00abe2bb0543fdbe1cf66
|
Python
|
wsustcid/Udacity-Self-Driving-Car-Engineer-Nanodegree
|
/Term1/Scripts/02.[Project] Finding Lane Lines/4_color_selection.py
|
UTF-8
| 1,736
| 3.28125
| 3
|
[] |
no_license
|
'''
@Author: Shuai Wang
@Github: https://github.com/wsustcid
@Version: 1.0.0
@Date: 2020-03-26 11:45:38
@LastEditTime: 2020-04-02 11:26:00
'''
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
# Read in the image
image = mpimg.imread('lane.jpg')
print('This image is: ',type(image),
'with dimensions:', image.shape)
# image.shape = [height, width]
# Grab the x and y size and make a copy of the image
height = image.shape[0]
width = image.shape[1]
# Note: Always make a copy of arrays or other variables in Python.
# If instead, you use "a = b" then all changes you make to "a"
# will be reflected in "b" as well!
color_select = np.copy(image)
# Define color selection criteria
###### MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION
red_threshold = 200
green_threshold = 200
blue_threshold = 200
######
# answer: 200 (recognize all 4 lane lines)
# If we set to 200, can extract two lines directly in front of the vehicle.
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
# Do a boolean or with the "|" character to identify
# pixels below the thresholds
thresholds = (image[:,:,0] < rgb_threshold[0]) \
| (image[:,:,1] < rgb_threshold[1]) \
| (image[:,:,2] < rgb_threshold[2])
# thresholds is a 2-D boolean matrix,
# The matrix elements are False only when the RGB values are all above the corresponding rgb_thresholds.
color_select[thresholds] = [0,0,0]
# Uncomment the following code if you are running the code locally and wish to save the image
mpimg.imsave("lane_color_selection.png", color_select)
# Display the image
plt.imshow(color_select)
plt.show()
| true
|
f0343f86e207f2c0a7e8898faeb52d2d656288af
|
Python
|
danyontb/ProgrammingPortfolio
|
/Classes/pyramid.py
|
UTF-8
| 302
| 3.25
| 3
|
[] |
no_license
|
class pyramid:
import math
l = input('Enter in a length:')
w = input('Enter in a width:')
h = input('Enter in a height:')
L = int(l)
W = int(w)
H= int(h)
print('volume: ', (L*W*H)/3)
print('surface area: ', L*W+L*math.sqrt(((W*W)/4)+H*H)+W*math.sqrt(((L*L)/4)+H*H))
| true
|
988ef3f265aa445e72e52a39e1e08bb543e14349
|
Python
|
CXOldStar/lorawanprotocol
|
/lorawanprotocol/assert_judge.py
|
UTF-8
| 644
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
from .customer_error import AssertError
class AssertJudge:
@staticmethod
def assert_nwkskey(nwkskey):
if not (isinstance(nwkskey, bytes) and len(nwkskey) == 16):
raise AssertError('NwkSKey', 'NwkSKey must be a 16 bytes data.')
@staticmethod
def assert_appskey(appskey):
if not (isinstance(appskey, bytes) and len(appskey) == 16):
raise AssertError('AppSKey', 'AppSKey must be a 16 bytes data.')
@staticmethod
def assert_appkey(appkey):
if not (isinstance(appkey, bytes) and len(appkey) == 16):
raise AssertError('AppKey', 'AppKey must be a 16 bytes data.')
| true
|
d9611a5577f39f2956102aea79c478f3d27aaa95
|
Python
|
abdallawi/PythonBasic
|
/python-standard-lib/WorkingWithPaths.py
|
UTF-8
| 2,802
| 4.09375
| 4
|
[] |
no_license
|
from pathlib import Path
# Makes sense:
new_line = '\n'
# When you wanna work with files and directories,
# you will need a Path object that represents where that file or directory lives.
# Here are 2 examples that show the difference between a Unix and a Windows folder hierarchy:
# Unix: /home/elliot/Desktop/password.txt
# Windows: C:\Users\Mr Robot\Desktop\password.txt
# When creating a Path instance on windows we have 2 options:
# Because Windows uses a backslash as a separator we need to use this notation:
windows_path_1 = Path('C:\\Downloads\\Secret Porn\\video.mp4')
# Here we are using a raw string, meaning \ is not an escape character, the value given is taken as is!
windows_path_2 = Path(r'C:\Downloads\Secret Porn\video.mp4')
# If we dont know where we are in the current folder hierarchy,
# we can simply make an Path instance without giving it an argument during creation:
place_in_universe = Path()
# We can also combine Path objects with strings to form a new Path instance as such:
# C:\Users\abdelmounaim\Documents\MyDesktop\python file\python_basics
# project = Path('/home') / 'eliot' / 'PycharmProjects' / 'python_basics/'
project = Path(r'C:\Users') / 'abdelmounaim' / 'Documents' / 'MyDesktop' / 'python file' / 'python_basics'
# If you want to know the current home of the user you can use the following method:
home = Path.home()
print(home)
# We will use the project variable to play around with, and see the most useful methods
# See if our path exists:
print(f"The path {project} exists: {project.exists()}{new_line}")
# Check if our instance is a file:
print(f"The path {project} is a file: {project.is_file()}{new_line}")
# Check if our instance is a directory:
print(f"The path {project} is a directory: {project.is_dir()}{new_line}")
# We can extract individual components in this path:
this_file = project / 'python-standard-lib/WorkingWithPaths.py'
print(this_file.name) # Returns name of file at the end of our Path if no file found returns last directory
# We can also lose the file extension like so:
print(this_file.stem)
# If we want only the file extension:
print(this_file.suffix)
# If we only want the parent path:
print(this_file.parent)
my_list = list(this_file.parents)
for i in range(len(my_list)):
print(my_list[i])
# What if we want to modify our name and extension and store it as a new Path, beware this will not make a new file!
this_file_modified = this_file.with_name('not_a_virus_for_sure_homeboy.exe')
print(this_file_modified)
# If you need an absolute path:
print(this_file_modified.absolute()) # Doesn't mean that the file exist, this is only a representation of a path!!
# If you want to find out more about the pathlib module go here: https://docs.python.org/3/library/pathlib.html
| true
|
df525b01a7037dc73a9e286223ef7e29977b6610
|
Python
|
chakshujain/Python-projects
|
/Whatsapp_automation.py
|
UTF-8
| 665
| 2.90625
| 3
|
[] |
no_license
|
from selenium import webdriver
driver = webdriver.Chrome()
driver.get('https://web.whatsapp.com/')
nameslist = list(map(str,input("Enter names of users you wanna send message: ").split()))
msg = input("Enter message: ")
count = int(input("How many times you wanna send: "))
input("Enter any key after scanning code")
for name in nameslist:
user = driver.find_element_by_xpath('//span[@title = "{}" ]'.format(name))
user.click()
msg_box = driver.find_element_by_class_name('_3u328')
for i in range(count):
msg_box.send_keys(msg)
btn = driver.find_element_by_class_name('_3M-N-')
btn.click()
driver.minimize_window()
| true
|
d36258c11914ca73f7e2bb3c99e8c38c053c6fa5
|
Python
|
lolizz00/JESD
|
/ConsoleApp.py
|
UTF-8
| 8,571
| 2.953125
| 3
|
[] |
no_license
|
from JESDdriver import JESD
import sys
from version import version_logo
class ConsoleApp:
def __init__(self):
self.dev = JESD()
def outError(self):
sys.stdout.write('Wrong args!')
def handleArgs(self, argv):
# вывод помощи, копипаст из README
if argv[0] == '-h' or argv[0] == '--help':
print("'-h' или '--help' --- Вывод помощи.")
print("'-v' или '--ver' --- Вывод текущей версии. ")
print("'-l' или '--list' --- Вывод списка доступных устройств")
print("'-d [номер]' или '--device [номер]' --- Необязательный аргумент, по умолчанию 0. Выбор устройства для работы.")
print("'-st' или '--status' --- Вывод статуса устройства. Пример вызова: `start.py -d 1 -st` или `start.py --status`.")
print("'-ld [файл]' или '--load [файл]' --- Загрузка файла в устройство. Пример вызова: `start.py -d 1 -ld text.txt` или `start.py --load H:\TEST\\test.txt`.")
print("'-bl' или '--blink' --- Мигнуть светодиодом. Пример вызова: `start.py -d 0 -bl` или `start.py --blink`.")
print("'-rs' или '--reset' --- Сбросить устройство. Пример вызова: `start.py -d 0 -rs` или `start.py --reset`.")
print("'-cs' или '--clrstat' --- Сбросить статус PLL. Пример вызова: `start.py -d 0 -cs` или `start.py --clrstat`.")
print("'-rd [адрес(hex)]' или '--read' --- Считать значение регистра. Пример вызова: `start.py -d 0 -rd 0x03` или `start.py --read 0xAA`.")
print("'-wr [адрес(hex)] [значение(hex)]' или '--write [адрес(hex)] [значение(hex)]' --- Запись в регистр. Пример вызова: `start.py -d 0 -wr 0x0 0x0` или `start.py --write 0xA 0xFF`.")
return 0
# версия ПО из генератора
if argv[0] == '-v' or argv[0] == '--ver':
print('Version: ' + version_logo)
return 0
# вывод списка устройств, OK
if argv[0] == '-l' or argv[0] == '--list':
lst = self.dev.getListStr()
for i in range(len(lst)):
sys.stdout.write(str(i) + ': ' + lst[i])
return 0
# Если нужно, указываем номер устройства
devFlg = 0
devn = 0
if argv[0] == '-d' or argv[0] == '--device':
try:
devn = int(argv[1]) # если номер указан, увеличиваем номер аргумента, с которым работаем
devFlg = 2
except:
print('Неверный номер устрйства!')
return -1
# проверяем, что устройство живое
try:
info = self.dev.connect(devn)
print('Выбранное устройство: ' + self.dev.getListStr()[devn])
except Exception as e:
print('Ошибка при подключении: ' + str(e))
return -1
## Работа с устройствами
# проверяем, есть ли аргументы.
try:
tmp = argv[0 + devFlg]
except:
print('Неверные аргументы!')
return -1
# вывод статуса
if argv[0 + devFlg] == '-st' or argv[0 + devFlg] == '--status':
stat1 = self.dev.checkStatus(1)
stat2 = self.dev.checkStatus(2)
print('PLL1 Status: ' + stat1)
print('PLL2 Status: ' + stat2)
return 0
# запись в файл, работает так же, как и в оконном
# если непонятно, смотри MainForm::parseFile
elif argv[0 + devFlg] == '-ld' or argv[0 + devFlg] == '--load':
self.dev.set4Wire()
try:
sch = 0
fname = argv[1 + devFlg]
fd = open(fname, 'r')
for line in fd:
sch = sch + 1
_line = line
line = line.replace('\n', '')
line = line.replace('0x', '')
if line == '':
continue
line = line.split('\t')
line = line[1]
line = int(line, 16)
regAddr = line >> 8
regVal = line & 0xFF
self.dev.write(regAddr, regVal)
fd.seek(0)
flg = True
for line in fd:
_line = line
line = line.replace('\n', '')
line = line.replace('0x', '')
if line == '':
continue
line = line.split('\t')
line = line[1]
line = int(line, 16)
regAddr = line >> 8
regVal = line & 0xFF
# пропускаем сбросы
skip = [0x0, 0x1ffd, 0x1ffe, 0x1fff, 0x006]
if regAddr in skip:
continue
tmp = self.dev.read(regAddr)
if tmp != regVal:
print('Предупреждение: Регистр ' + hex(regAddr) + ' после записи значения ' + hex(
regVal) + ' равен ' + hex(tmp))
flg = False
fd.close()
if flg:
print('Запись прошла без ошибок!')
print('Запись завершена.')
return 0
except Exception as e:
if sch:
print('Неверный файл! ' + "Ошибка: '" + str(e) + "' на строке " + str(sch))
else:
print('Неверный файл! ' + "Ошибка: '" + str(e))
return -1
# мигнуть светодиодом
elif argv[0 + devFlg] == '-bl' or argv[0 + devFlg] == '--blink':
self.dev.LEDBlink()
print('Успешно помигали светодиодом.')
return 0
# сбросить
elif argv[0 + devFlg] == '-rs' or argv[0 + devFlg] == '--reset':
self.dev.reset()
print('Устройство успешно сброшено.')
return 0
# очистить статус
elif argv[0 + devFlg] == '-cs' or argv[0 + devFlg] == '--clrstat':
self.dev.clearStatus()
print('Статус успешно очищен.')
return 0
# чтение регистра
elif argv[0 + devFlg] == '-rd' or argv[0 + devFlg] == '--read':
try:
self.dev.set4Wire()
self.dev.enableWrite()
addr = argv[1 + devFlg]
_addr = addr
addr = int(addr, 16)
val = self.dev.read(addr)
print('Значение регистра ' + hex(addr) + ' : ' + hex(val))
return 0
except:
print('Неверный номер регистра!')
return -1
# запись в регистр
elif argv[0 + devFlg] == '-wr' or argv[0 + devFlg] == '--write':
try:
self.dev.set4Wire()
self.dev.enableWrite()
addr = argv[1 + devFlg]
addr = int(addr, 16)
val = argv[2 + devFlg]
val = int(val, 16)
self.dev.write(addr, val)
print("Успешно записано.")
return 0
except:
print('Неверный номер или значение регистра!')
return -1
else:
print('Неизвестный или отсутсвующий аргумент!')
return -1
| true
|
c3b9d8ddd9d30c182aea5ed3735e2d1ba95c7761
|
Python
|
duykienvp/sigspatial-2021-quantify-voi-of-trajectories
|
/pup/common/information_gain.py
|
UTF-8
| 506
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
# Calculate Information Gain
import logging
import numpy as np
logger = logging.getLogger(__name__)
def calculate_differential_entropy_norm(sigma, base=2) -> float:
""" Differential entropy of a normal distribution with standard deviation sigma is: 0.5log(2*pi*e*sigma*sigma)
:param sigma: standard deviation of the normal distribution
:param base: base of logarithm
:return: the differential entropy
"""
return (0.5 * np.log(2 * np.pi * np.e * sigma * sigma)) / np.log(base)
| true
|
35a59bbd5bafdd69409557d304a26fe5c3057a1e
|
Python
|
yangyu57587720/kindergarten
|
/apps/users/models.py
|
UTF-8
| 2,571
| 2.640625
| 3
|
[] |
no_license
|
"""用户相关的模型表"""
from django.db import models
from django.contrib.auth.models import AbstractUser # 导入auth-user模块
from datetime import datetime
class UserProfile(AbstractUser):
"""继承django模块AbstractBaseUser并扩展"""
nick_name = models.CharField(max_length=32, default="", verbose_name="昵称")
# null针对数据库字段可以为空,blank表单填写时可以为空
birthday = models.DateField(verbose_name="生日", null=True, blank=True)
# 此方法避免在数据库生成多余字段,用数字代替更好
gender_choices = ((1, "男"), (2, "女"))
gender = models.IntegerField(choices=gender_choices, default=1, verbose_name="性别")
address = models.CharField(max_length=64, default="", verbose_name="居住地址")
mobile = models.CharField(max_length=11, null=True, blank=True, verbose_name="手机号码")
# upload_to指明文件的路径(image/年/月)
image = models.ImageField(upload_to="image/%Y/%m", default="image/default.png", verbose_name="头像")
class Meta:
"""设置表名称"""
verbose_name = "用户信息"
# verbose_name_plural设置显示表名的复数形式,不设置会在后面加s
verbose_name_plural = verbose_name
def __str__(self):
# 返回字符串的友好方式
return self.username
class EmailVerifyRecord(models.Model):
"""邮箱验证"""
code = models.CharField(max_length=32, verbose_name="验证码")
email = models.EmailField(max_length=64, verbose_name="邮箱")
send_type_choices = ((0, "注册"), (1, "找回密码"), (2, "修改邮箱"))
send_type = models.IntegerField(choices=send_type_choices, verbose_name="类别")
send_time = models.DateField(default=datetime.now, verbose_name="发送时间")
class Meta:
"""设置表名称"""
verbose_name = "邮箱验证码"
verbose_name_plural = verbose_name
def __str__(self):
return self.email
class Banner(models.Model):
"""轮播图"""
title = models.CharField(max_length=64, verbose_name="标题")
image = models.ImageField(upload_to="banner/%Y/%m", verbose_name="轮播图")
url = models.URLField(max_length=225, verbose_name="访问地址")
index = models.IntegerField(default=99, verbose_name="顺序")
add_time = models.DateField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "轮播图"
verbose_name_plural = verbose_name
def __str__(self):
return self.title
| true
|
801e71f9964f702b756a558ecc2c0c7d57421e66
|
Python
|
singhalsrishty/pythonEthans
|
/29March_assignment.py
|
UTF-8
| 6,924
| 4.40625
| 4
|
[] |
no_license
|
'''
1. Write a program which can compute the factorial of a give numbers.
The results should be printed in a comma-separated sequence on a single line. Suppose the
following input is supplied to the program:
8
Then, the output should be:
40320
'''
def factorial(num):
fact = 1;
if num == 0:
return fact;
else:
for i in range(1, num+1):
fact *= i;
return fact;
number = int(input("Enter the number to calculate factorial: "));
print(factorial(number));
'''
3. Write a program that calculates and prints the value according to the given formula:
Q = Square root of [(2*C*D)/H]
Following are the fixed values of C and H:
C is 50 and H is 30.
D is the variable whose values should be input to your program in a comma-separated sequence.
Example: Let us assume the following comma-separated input sequence is given to the program:
100, 150, 180
The output of the program should be:
18, 22, 24
'''
import math as maths;
c=50; h=30;
def calculateSquareRoot_Formula(seq):
result = [];
for i in seq:
i = int(i);
product = (2*c*i)/h;
result.append(int(maths.pow(product, 1/2)));
return result;
inputs = input("Enter numbers: ");
seq = inputs.split(",");
#print(seq);
print(calculateSquareRoot_Formula(seq));
'''
4. Write a program that accepts a sequence of whitespace seperated words as input and print the
wprds after removing all the duplicate words and sorting them alphanumerically.
Suppose the following input is supplied to the program:
hello world and practice makes perfect and hello world again
Then, the output should be:
again and hello makes perfect practice world
'''
def removeDuplicate(sentence):
result = set();
for words in sentence:
result.add(words);
return result;
def sortLexicographically(sentenceSet):
sorted = list(sentenceSet);
sorted.sort();
return sorted;
sentence = str(input("Enter the sentence: ")).split(" ");
nonDuplicates = removeDuplicate(sentence);
resultList = sortLexicographically(nonDuplicates);
for item in resultList:
print(item, end=" ");
'''
5. Write a program which will find all such numbers between 1000 and 3000 (both included) such that
each digit of the number is an even number. The numbers obtained should be printed in a comma-separated sequence
on a single line.
'''
def printEven():
for i in range(1000, 3001):
if i%2 == 0:
print(i, end=", ")
printEven();
'''
6. Write a program that accepts a sentence and calculate the number of letters and digits.
Suppose the following input is supplied to the program:
hello world! 123
Then, the output should be:
LETTERS 10
DIGITS 3
'''
def countLetters(seq):
letters = 0;
for word in list(seq):
for char in word:
if str(char).isalpha():
letters += 1;
return letters;
def countDigits(seq):
digits = 0;
for word in list(seq):
for char in word:
str(char).isupper();
if str(char).isdigit():
digits += 1;
return digits;
inputs = input("Enter the input: ").split(" ");
print("LETTERS ", countLetters(inputs));
print("DIGITS ", countDigits(inputs));
'''
7. Write a program that accepts a sentence and calculate the number of upper case letters and lower case letters.
Suppose the following input is supplied to the program:
Hello world!
Then, the output should be:
UPPER CASE 1
LOWER CASE 9
'''
def calculateUpperCase(seq):
count = 0;
for word in seq:
for char in word:
if(str(char).isupper()):
count += 1;
return count;
def calculateLowerCase(seq):
count = 0;
for word in seq:
for char in word:
if(str(char).islower()):
count += 1;
return count;
inputs = input("Enter the sentence: ").split(" ");
print("UPPER CASE ", calculateUpperCase(inputs));
print("LOWER CASE ", calculateLowerCase(inputs));
'''
8. Write a program that computes the net amount of a bank account based on a transaction log from console input.
The transaction log format is shown as following:
D 100
W 200
D means deposit while W mean withdrawl.
Suppose the following input is supplied to the program:
D 300
D 300
W 200
D 100
Then, output should be:
500
'''
def processTransactions(transaction_seq):
total = 0;
for transaction in transaction_seq:
type_transaction = transaction.split(" ");
if(type_transaction[0] == "D"):
total += int(type_transaction[1]);
elif (type_transaction[0] == "W"):
total -= int(type_transaction[1]);
return total;
transactions = input("Enter the transaction data: ").splitlines();
print(processTransactions(transactions));
'''
9. A website requires the users to input username and password to register. Write a program to check the
validity of the password input by users. Following are the criteria for checking the password:
1. At least 1 letter between [a-z]
2. At least 1 number between [0-9]
3. At least 1 letter between [A-Z]
4. Minimum length of transaction password: 6
5. Maximum length of transaction password: 12
Your program should accept a sequence of comma-separated passwords and will check them according to the
above criteria. Passwords that match the criteria are to be printed, each separated by a comma.
Example: If the following passwords are given as input to the program:
ABd1234@1, aF1#, 2w3E*, 2We3345.
Then, the output of the program should be:
ABd1234@1
'''
import re as regex;
def validatePassword(passwords):
validPasswords = list();
for password in passwords:
if len(password) >=6 and len(password) <=12:
if regex.findall("[a-zA-Z0-9]", password):
validPasswords.append(password);
return validPasswords;
inputs = str(input("Enter the passwords: ")).split(", ");
validPasswords = validatePassword(inputs);
for password in validPasswords:
print(password, end=", ")
'''
10. You are required to write a program to sort the (name, age, height) tuples by ascending order where the
name is string, age and height are numbers. The tuples are input by console. The sort criteria is:
1. Sort based on name;
2. Then sort based on age;
3. Then sort by score.
The priority is that name>age>score.
If the following tuples are given as input to the program:
Tom, 19, 80
John, 20, 90
Jonny, 17, 91
Jonny, 17, 93
Json, 21, 85
Then, the output of the program should be:
[("John", "20", "90"), ("Jonny", "17", "91"), ("Jonny", "17", "93"), ("Json", "21", "85"),
("Tom", "19", "80")]
'''
from operator import itemgetter;
def sortData(dataset):
dataset.sort(key=itemgetter(2));
dataset.sort(key=itemgetter(1));
dataset.sort(key=itemgetter(0));
return dataset;
rawData = input("Enter the data: ").splitlines();
dataset = list();
for data in rawData:
data = tuple(data.split(", "));
dataset.append(data);
print(sortData(dataset));
| true
|
a30b833a2b5ea85ddaa61ccf77832a724af5dd50
|
Python
|
AdamPellot/AutomateTheBoringStuffProjects
|
/Ch16/autoUnsub.py
|
UTF-8
| 1,229
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#! python3
# autoUnsub.py - Scans through your email account, finds all the
# unsubscribe links in all your emails, and automatically
# opens them in a browser.
# Adam Pellot
import bs4
import imapclient
import pyzmail
import webbrowser
print('Enter your email address:')
myEmail = input()
print('Enter the password to your email address:')
emailPass = input()
# Connect to email.
imapObj = imapclient.IMAPClient('imap.gmail.com', ssl=True)
imapObj.login(myEmail, emailPass)
imapObj.select_folder('INBOX', readonly=True)
# Select all emails and check for each one unsub link.
UIDs = imapObj.search('ALL')
unsubLinks = []
count = 0
for ID in UIDs:
rawMessages = imapObj.fetch(ID, ['BODY[]'])
message = pyzmail.PyzMessage.factory(rawMessages[ID][b'BODY[]'])
if message.html_part:
htl = message.html_part.get_payload().decode(message.html_part.charset)
soup = bs4.BeautifulSoup(htl, 'html.parser')
linkElems = soup.select('a')
for link in linkElems:
if 'unsubscribe' in link.text.lower():
unsubLinks.append(link.get('href'))
imapObj.logout()
# Open each unsub link in webbrowser.
for link in unsubLinks:
webbrowser.open(link)
| true
|
c8903005a4f320d81e6a51d1806d4043878cd4be
|
Python
|
laxmanbudihal/Selenium
|
/open-close.py
|
UTF-8
| 544
| 2.9375
| 3
|
[] |
no_license
|
from selenium import webdriver
import os
if 'chromedriver.exe' in os.listdir():
# platform independent use os module
x = os.path.join(os.getcwd(), 'chromedriver.exe')
print(x)
driver = webdriver.Chrome(x)
else:
# if chrome driver is not found
print('Warning : chrome binaries missing! ')
# open a url part
driver.get("https://www.google.com/")
driver.get("https://duckduckgo.com/")
driver.get("https://www.wikipedia.org/")
o = input('close it ? : ')
if o.lower()[0] == 'y':
driver.close()
| true
|
5406cbc623165068791fc63e6ec23b5802616be5
|
Python
|
deenaariff/Weave-Client
|
/RestClient/helpers/dockerCluster.py
|
UTF-8
| 3,370
| 2.546875
| 3
|
[] |
no_license
|
import dataHelper as dh
import time, sys, os
import docker
import shutil
class Cluster:
def __init__(self, configs, docker_image):
self.docker_ip = "192.168.99.100"
self.docker_image = docker_image;
self.routes = []
self.client = docker.from_env()
self.containers = []
self.default_keys = ["IP Address","Endpoint Port","Voting Port","Heartbeat Port", "State", "Term","Last Applied Index","Commit Index","Votes Obtained"]
try:
result = self.client.images.get(self.docker_image)
print "Detected image '" + self.docker_image + "'"
except docker.errors.ImageNotFound as e:
print "Error: Image '" + self.docker_image + "' not Found"
sys.exit(1)
self.configs = configs
self.num_nodes = len(configs)
self.initialize_cluster()
self.leader = None
# Start a cluster of docker nodes
# Delete existing logs
def initialize_cluster(self):
d='./logs/'
filesToRemove = [os.path.join(d,f) for f in os.listdir( d )]
for f in filesToRemove:
os.remove(f)
count = 0
for config in configs:
print "Starting Node: " + str(count+1)
port_mapping = {}
for port in config:
port_mapping[str(port)+'/tcp'] = port
result = self.client.containers.run(self.docker_image, detach=True, ports=port_mapping)
self.containers.append(result)
port = str(config[0])
url = "http://" + self.docker_ip + ":" + port
self.routes.append(url)
def find_leader(self):
if len(self.routes) > 0:
while not self.leader:
for url in self.urls:
try:
rsp = dh.make_request(url)
if rsp['State'] == 'LEADER':
self.leader = "http://" + rsp['IP Address'] + ":" + str(rsp['Endpoint Port'])
except urllib2.HTTPError, e:
print('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
print('URLError = ' + str(e.reason))
print url + " not reachable"
except Exception, e:
print e
time.sleep(1)
else:
print "Error no routes available"
sys.exit()
def print_cluster(self):
data = []
for url in self.routes:
tmp = []
try:
rsp = dh.make_request(url)
for key in self.default_keys:
tmp.append(rsp[key])
except Exception:
tmp = ["DEAD"] * len(self.default_keys)
data.append(tmp)
print tabulate(data, headers=self.default_keys)
# Remove the cluster of docker nodes
# Add all logs to files in logs/
def remove_cluster(self):
print "Stopping " + str(len(self.containers)) + " containers"
file_index = 1
for container in self.containers:
filename = "logs/log" + str(file_index) + ".txt"
with open(filename, "w+") as f:
f.writelines(container.logs())
f.close()
container.stop()
file_index += 1
| true
|
614fa8d31eae7b19580fe23e1ac2e748ce8385a3
|
Python
|
HongbinW/learn_python
|
/learn_python/名片管理/cards_main.py
|
UTF-8
| 817
| 3.09375
| 3
|
[] |
no_license
|
import cars_tools
while True:
# TODO 显示功能菜单
cars_tools.show_menu()
action_str = input("请选择要执行的操作:")
print("您选择的操作是【%s】" % action_str)
# 1,2,3针对名片的操作
# 0退出新四通
# 其他输入错误,并提示用户
if action_str in ["1","2","3"]:
if action_str =="1":
cars_tools.new_card()
elif action_str =="2":
cars_tools.show_all()
elif action_str =="3":
cars_tools.search_card()
# 若果在开发程序时,不希望立即编写分支内部的代码
# 可以使用pass关键字,表示一个占位符,保证代码结构正确,程序运行时,pass不会执行任何操作
# pass
elif action_str == "0":
print("欢迎再次使用【名片管理系统】")
break
else:
print("选择错误,请重新选择")
| true
|
d0092bd2dc93be4e4a040182a46fc8c4114814a1
|
Python
|
xczhang07/Python
|
/third_party_libs/python_memcached.py
|
UTF-8
| 2,003
| 2.9375
| 3
|
[] |
no_license
|
# concept of memcached: high-performance, distributed memory object caching system.
# official site: https://memcached.org
# how to install: on mac os: brew install memcached
# install python library (client app) to interact with memcached server: pip install pymemcache
''' after install memcached on your device, let us check the process with command " ps ef | grep "memc"(make sure your memcached
process is running, then you can use python library to interact with memcached server.)"
the result as following:
501 10343 1 0 12:03AM ?? 0:00.27 /usr/local/opt/memcached/bin/memcached -l localhost <---
501 11249 11241 0 12:33AM ttys003 0:00.00 grep memc
then we use python following python code to interactive with the memcached server
after running this script, you need to launch another terminal to check the memcached server data:
telnet localhost 11211
Trying ::1...
Connected to localhost.
Escape character is '^]'.
get memcached
VALUE memcached 0 11
hello world
END
'''
from pymemcache.client import base
def run_query_to_db(command):
"""in this function, you are able to use python code to interact with your backend database(mysql, mongodb, etc...),
right now, we just return an simple result
"""
print("running query on database, getting data from db...\n")
return 100
def hit_cache_function(client, key):
"""this function perform the logic mode of the hit or miss memcached"""
result = client.get(key)
if result is None:
result = run_query_to_db("select someting from table1 where user id equals sth")
client.set(key, result)
else:
return result
if __name__ == "__main__":
client = base.Client(('localhost', 11211)) # this is the localhost testing code, we use localhost, in production code, you may need to provide real ip and password
client.set('memcached', 'Hello World')
hit_cache_function(client, "math_score")
| true
|
22d008471f29f1c1b71643b0382d331500109143
|
Python
|
lautarianoo/LautAvito
|
/cities/models.py
|
UTF-8
| 1,120
| 2.578125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
from django.db import models
class City(models.Model):
title = models.CharField(verbose_name='Название города', max_length=100)
def __str__(self):
return self.title
class Meta:
verbose_name = 'Город'
verbose_name_plural = 'Города'
class District(models.Model):
title = models.CharField(verbose_name='Название района', max_length=150)
city = models.ForeignKey(City, verbose_name='Город', on_delete=models.CASCADE, related_name='districts')
def __str__(self):
return f"{self.title} | {self.city.title}"
class Meta:
verbose_name = 'Район'
verbose_name_plural = 'Районы'
class Street(models.Model):
title = models.CharField(verbose_name='Название улицы', max_length=150)
district = models.ForeignKey(District, verbose_name='Район', on_delete=models.CASCADE, related_name='streets')
def __str__(self):
return f"{self.title} | {self.district.title}"
class Meta:
verbose_name = 'Улица'
verbose_name_plural = 'Улицы'
| true
|
1cd2f86e9ce899d12fe48a54287633175e0b027b
|
Python
|
luzifi/CREDIT-RISK-2020B
|
/src/classwork/industry-crawler/models.py
|
UTF-8
| 4,079
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
import json
from typing import List
import requests
from bs4 import BeautifulSoup
class AbstractIndustry:
def __init__(self, title: str, children: List['AbstractIndustry']):
self.title = title
self.children = children
def __repr__(self):
return f"<{self.level}, {self.title}>"
@property
def level(self):
raise NotImplementedError
def add_child(self, child: 'AbstractIndustry'):
self.children.append(child)
def to_dict(self):
return {
"title": self.title,
"level": self.level,
"children": [
child.to_dict()
for child in self.children
]
}
@staticmethod
def from_dict(**kwargs):
raise NotImplementedError
def jsonify(self) -> str:
return json.dumps(self.to_dict(), indent=4)
class Division(AbstractIndustry):
level = "SIC Division"
@staticmethod
def from_dict(**kwargs):
return Division(
title=kwargs["title"],
children=[
MajorGroup.from_dict(**k)
for k in kwargs.get("children", [])
]
)
class MajorGroup(AbstractIndustry):
level = "SIC Major Group"
@staticmethod
def from_dict(**kwargs):
return MajorGroup(
title=kwargs["title"],
children=[
Group.from_dict(**k)
for k in kwargs.get("children", [])
]
)
@staticmethod
def from_url(url):
response = requests.get(url)
html = BeautifulSoup(response.text, "html.parser")
return MajorGroup(
title=[
elm.text
for elm in html.find_all("h2")
if elm.text.lower().startswith("major group")][0],
children=[
Group(
title=group.text,
children=[
Single(
title=single.parent.text,
children=[]
)
for single in html.find_all("a")
if single.attrs.get("href", "").startswith("sic_manual")
and single.parent.text.startswith(group.text.split(":")[0].split(" ")[-1])
]
)
for group in html.find_all("strong")
if group.text.lower().startswith("industry group")
]
)
class Group(AbstractIndustry):
level = "SIC Group"
@staticmethod
def from_dict(**kwargs):
return Group(
title=kwargs["title"],
children=[
Single.from_dict(**k)
for k in kwargs.get("children", [])
]
)
class Single(AbstractIndustry):
level = "SIC Single Industry"
@staticmethod
def from_dict(**kwargs):
return Single(title=kwargs["title"], children=[])
class SIC(AbstractIndustry):
level = "Standard Industry Classification"
@staticmethod
def from_dict(**kwargs):
return SIC(
title=kwargs["title"],
children=[
Division.from_dict(**k)
for k in kwargs.get("children", [])
]
)
@staticmethod
def from_url(url: str) -> 'SIC':
response = requests.get(url)
html = BeautifulSoup(response.text, "html.parser")
divisions = []
for element in html.find_all("a"):
href = element.attrs.get("href", "")
title = element.attrs.get("title", "")
if not href.startswith("sic_manual"):
continue
elif href.endswith("division"):
div = Division(title=title, children=[])
divisions.append(div)
elif href.endswith("group"):
major_group_url = url.replace("sic_manual.html", href)
divisions[-1].add_child(MajorGroup.from_url(url=major_group_url))
return SIC(title="SIC", children=divisions)
| true
|
0a95a75c2c44240a0588a85d1f4451c28a44bf35
|
Python
|
jawhelan/PyCharm
|
/PyLearn/Exercise Files/Treehouse/split and join.py
|
UTF-8
| 432
| 3.53125
| 4
|
[] |
no_license
|
full_name = "james Whelan"
# split full name "James Whale" into "James", "whelan"
name_list = full_name.split()
greeting_var="hello my name is tim"
#split string "hello my name is tim" to "hello", "my"," name", "is", "tim"
greeting_list = greeting_var.split()
# swap out the name tim with james
greeting_list[4] = name_list[0]
# join the split string back into one varable
greeting_list = " ".join(greeting_list)
print(greeting_list)
| true
|
4644782b7e7d940bec7d266d29d07a532fa79771
|
Python
|
michaeltrimm/python-notes
|
/memory.py
|
UTF-8
| 1,298
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/local/bin/python3
import resource
import sys
"""
List
Squaring numbers 1 to 10,000,000
Before: 7.312Mb
After: 332.539Mb
Consumed = 325.22Mb memory
Generator
Squaring numbers 1 to 10,000,000
Before: 332.543Mb
After: 332.543Mb
Consumed = 0.0Mb memory
"""
# Size of the sample set
to = 10000000 # 10M
def memory_usage_resource():
rusage_denom = 1024.
if sys.platform == 'darwin':
rusage_denom = rusage_denom * rusage_denom
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom
def log_memory(text="Before"):
print(' {}: {:,}Mb'.format(text, round(memory_usage_resource(),3)))
print("List")
print("Squaring numbers 1 to {:,}".format(to))
log_memory()
mem_before = memory_usage_resource()
squared_results_small = [x*x for x in range(1,to)]
log_memory("After")
mem_after = memory_usage_resource()
mem_diff = mem_after - mem_before
print(" Consumed = {:,}Mb memory".format(round(mem_diff,2)))
print("")
print("Generator")
print("Squaring numbers 1 to {:,}".format(to))
log_memory()
mem_before = memory_usage_resource()
squared_results_large = (x*x for x in range(1,to))
mem_after = memory_usage_resource()
log_memory("After")
mem_diff = mem_after - mem_before
print(" Consumed = {:,}Mb memory".format(round(mem_diff,2)))
print("")
| true
|
68919dce6728c9371f12037f9b63f1894d9b5ff0
|
Python
|
zuxinlin/leetcode
|
/leetcode/121.BestTimeToBuyAndSellStock.py
|
UTF-8
| 996
| 3.96875
| 4
|
[] |
no_license
|
#! /usr/bin/env python
# coding: utf-8
'''
题目: 买卖股票的最佳时机 https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock/
主题: array & dynamic programming
解题思路:
状态转移方程:dp[i] = max(dp[i-1], prices[i] - min),表示当前到i最大利润
'''
class Solution(object):
def maxProfit(self, prices):
'''
:type prices: List[int]
:rtype: int
'''
# 边界条件
if prices is None or len(prices) <= 1:
return 0
dp = [0 for _ in prices]
min_price = prices[0]
max_profit = 0
for (i, price) in enumerate(prices[1:]):
dp[i] = max(dp[i-1], price - min_price)
min_price = min(price, min_price)
max_profit = max(dp[i], max_profit)
return max_profit
if __name__ == '__main__':
solution = Solution()
assert solution.maxProfit([7, 1, 5, 3, 6, 4]) == 5
assert solution.maxProfit([7, 6, 4, 3, 1]) == 0
| true
|
7fc24b3261634c344c0218a74b4b0bbbcf1dd395
|
Python
|
nikkureev/bioinformatics
|
/ДЗ 15/Task 15.3.py
|
UTF-8
| 279
| 3.078125
| 3
|
[] |
no_license
|
mport re
file = 'C:/Python/2430AD.txt'
# This one will help you to obtain all a-containing words
def a_finder(inp):
with open(inp, 'r') as f:
for lines in f:
for i in re.findall('/\b[\w+]*a[\w+]*\b/gi', lines):
print(i)
a_finder(file)
| true
|
b3f6c8bb6ea51dc56cee3bd5934b6f114e56cad5
|
Python
|
mudits89/Just_Analytics_Test
|
/just_analytics__linked_list.py
|
UTF-8
| 2,668
| 3.875
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 08:53:28 2018
@author: mudit
"""
class Node:
def __init__(self, data, nextNode=None):
self.data = data
self.nextNode = nextNode
def getData(self):
return self.data
def setData(self, val):
self.data = val
def getNextNode(self):
return self.nextNode
def setNextNode(self, val):
self.nextNode = val
class LinkedList:
def __init__(self, head=None):
self.head = head
self.size = 0
def getSize(self):
return self.size
def printNode(self):
curr = self.head
while curr:
print("\t" + str(curr.data))
curr = curr.getNextNode()
def addNode_start(self, data):
newNode = Node(data, self.head)
self.head = newNode
self.size += 1
print("\tAdded " + str(data))
return True
def addNode_end(self, data):
newNode = Node(data)
if self.head is None:
self.head = newNode
return
last = self.head
while (last.getNextNode()):
last = last.getNextNode()
last.setNextNode(newNode)
self.size += 1
print("\tAdded " + str(data))
return True
def findNode(self, value):
curr = self.head
while curr:
if curr.getData() == value:
return True
curr = curr.getNextNode()
return False
def removeNode(self, value):
prev = None
curr = self.head
while curr:
if curr.getData() == value:
if prev:
prev.setNextNode(curr.getNextNode())
else:
self.head = curr.getNextNode()
self.size -= 1
print("\t\tRemoved" + str(value))
return True
prev = curr
curr = curr.getNextNode()
return False
myList = LinkedList()
## inserting nodes
print("Inserting")
myList.addNode_start(5)
myList.addNode_start(15)
myList.addNode_end(25)
myList.addNode_end(42)
## printing nodes
print("\n\nPrinting list order")
myList.printNode()
## removing nodes
print("\n\nRemoving")
myList.removeNode(25)
## printing nodes
print("\n\nPrinting list order")
myList.printNode()
## removing nodes
print("\n\nRemoving")
myList.removeNode(15)
myList.removeNode(5)
myList.removeNode(42)
## printing nodes
print("\n\nPrinting list order")
myList.printNode()
print("\n\nSize")
print("\t" + str(myList.getSize()))
| true
|
63207141aa5149358c23a5d065e344fc1f4d317d
|
Python
|
ShivaBasava/Letsupgrade_WeeklyCodeBattle
|
/Week13/app.py
|
UTF-8
| 3,807
| 4.40625
| 4
|
[
"MIT"
] |
permissive
|
'''
Solution to WEEKLY CODE BATTLE - WEEK 13
NOTE: This 'app.py' file, contains the following-
1] Solution:- To the WEEKLY CODE BATTLE:- WEEK 13, A Language Translator.
2] Code & Explaination:- to the written code, line-by-line as comments.
3] Example Output
1] Solution-
a] We have made use of 'googletrans' library a Google API Language Translator for Python.
Command to install this,
pip install googletrans
b] From 'googletrans' library we are importing the Class 'Translator'.
c] We have created a Class name 'langTranslate', So that it can be reused.
d] Inside this Class, we have defined two methods- langCode() and translateSentence().
e] Method langCode() - Defined for handing the Menu and returning the respective Language Code.
f] Method translateSentence() - Defined for translating the Sentence to respective language by making use of
previous steps data(i.e, Language code).
2] Code & Explaination-
'''
#importing the Class module 'Translator' from 'googletrans library
from googletrans import Translator
#Creating a Class 'langTranslate'
class langTranslate():
#This method of Class 'langTranslate', returns the language code.
def langCode(self):
print("""\nTo translate, Select the respective number from following menu,\n[ Example- To convert the entered scentance to Serbian, select 2 ]
1. English
2. Serbian
3. Spanish""")
strNumber = input()
# We are maintaining a Dictonary (tempLangCode) for Lanugage code for selected number from the console
tempLangCode = {'1': 'en','2': 'sr','3': 'es'}
#Checking whether we have the respective number associated with Language code
if strNumber in tempLangCode:
#Returning the respective Language code
return tempLangCode[strNumber]
else:
#If user enters other number apart from the numbers in Dictonary (tempLangCode)
print("Please restart program and select available number from menu!!")
exit()
#This method of Class 'langTranslate', It'll accept two parameters - Language code 'toLang' and Sentence to be translated
#returns the sentence after translation.
def translateSentence(self, strSentence, strTolang):
self.strString = strSentence
self.strTolang = strTolang
#Creating an Object 'translator', assigning a Class Translator() to it.
translator = Translator()
translation = translator.translate(self.strString, dest=self.strTolang)
return (str(translation.text))
#Program Execution starts from here
if __name__ == "__main__":
#Embedding the login inside the try-catch block
try:
#To get the sentance to be translated
sentence = input("Enter your sentence to translate:\n")
# Creating an Object 'objLang' of Class 'langTranslate'
objLang = langTranslate()
#Now we are calling Method langCode() to get the Input from the user
toLang = objLang.langCode()
#Pass the Language code 'toLang' and sentence to be translated to Method - translateSentence()
strTranslatedText = objLang.translateSentence(sentence,toLang)
print("\n{}".format(strTranslatedText))
except Exception as e:
print(e)
'''
3] Example Output:
a] Run the following command in the terminal,
python3 app.py
b] Following are the series of interactive output.
Enter your sentence to translate:
Letsupgrade is Awesome <3 !!!
To translate, Select the respective number from following menu,
[ Example- To convert the entered scentance to Serbian, select 2 ]
1. English
2. Serbian
3. Spanish
2
ЛетсУпграде је сјајан <3 !!!
'''
| true
|
979ead30158ba9533187ab8976f364fb2820be3d
|
Python
|
nima-m-git/exercism-python
|
/sieve/sieve.py
|
UTF-8
| 199
| 3.171875
| 3
|
[] |
no_license
|
def primes(limit):
all = list(range(2, limit+1))
for i in all:
for x in range(2, round(limit/i)+1):
if (i*x) in all:
all.remove(i*x)
return all
| true
|
9a185a70bed9931e4fd2ed380bbd9aacbad798e5
|
Python
|
rdotlee/hologram-python
|
/scripts/examples/example-sms-csrpsk.py
|
UTF-8
| 1,127
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#
# example-sms-csrpsk.py - Example of sending SMS via CSRPSK Authentication in the Hologram Python SDK
#
# Author: Hologram <support@hologram.io>
#
# Copyright 2016 - Hologram (Konekt, Inc.)
#
# LICENSE: Distributed under the terms of the MIT License
#
import sys
sys.path.append(".")
sys.path.append("..")
sys.path.append("../..")
from Hologram import Hologram
from Hologram.Credentials import Credentials
if __name__ == "__main__":
print ""
print ""
print "Testing Hologram Cloud class..."
print ""
print "* Note: You can obtain CSRPSK IDs and Keys from the Devices page"
print "* at https://dashboard.hologram.io"
print ""
CSRPSKID = raw_input("What is your CSRPSK ID? ")
CSRPSKKey = raw_input("What is your CSRPSK Key? ")
destination_number = raw_input("What is your destination number? ")
credentials = Credentials(CSRPSKID, CSRPSKKey)
hologram = Hologram(credentials)
print ""
recv =hologram.sendSMS(destination_number, "Hello, Python!") # Send SMS to destination number
print "DATA RECEIVED: " + str(recv)
print ""
print "Testing complete."
print ""
| true
|
ca4ee49ef2cb2621ee692e5da8a5d5cf02d63c84
|
Python
|
irisqul/cryptopals-excercise
|
/set1/base64_hex.py
|
UTF-8
| 257
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
from binascii import hexlify, unhexlify
from base64 import b64encode, b64decode
hex_string = '49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d'
b64_string = b64encode(unhexlify(hex_string))
print(b64_string)
| true
|
8534a790d4d8178f1fc185c13233fcf1b1eb9fd2
|
Python
|
zyzisyz/LeetCode
|
/py/0034.py
|
UTF-8
| 377
| 3.171875
| 3
|
[] |
no_license
|
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
try:
length = len(nums)
first = nums.index(target)
second = first
while second+1 < length and nums[second+1] == target:
second = second + 1
return [first, second]
except:
return [-1, -1]
| true
|
f7d8b61754a08da4f834782553ea3e7557ba09ff
|
Python
|
gssgch/gssgML
|
/com.ch/python/pythonCourse/chapter7/QueneTest.py
|
UTF-8
| 1,173
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/python
# encoding:utf-8
# 利用python实现queue
class queue:
def __init__(self, size=20):
self.size = size
self.queue = []
self.end = -1
def setsize(self, size):
self.size = size
def In(self, n):
if self.end < self.size - 1:
self.queue.append(n)
self.end = self.end + 1
else:
raise "Queue is full"
def Out(self):
if self.end != -1:
ret = self.queue[0]
self.queue = self.queue[1:]
self.end = self.end - 1
return ret
else:
raise "Queue is empty"
def End(self):
return self.end
def empty(self):
self.queue = []
self.end = -1
def getsize(self):
return self.end + 1
if __name__ == "__main__":
q = queue()
for i in xrange(15):
q.In(i)
print q.getsize()
for i in xrange(15):
print q.Out(),
print
q.empty()
print q.getsize()
q.setsize(100)
for i in xrange(30):
try:
q.In(i)
except:
print "Error"
else:
print str(i) + " OK"
| true
|
3a9f801a0f05ba1be1d0ebee0f333879694986e1
|
Python
|
giddy123/raspberry-pi-robotics
|
/servocontrol.py
|
UTF-8
| 2,544
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# Servo Control for Raspberry Pi with Adafruit servo control board
# Created by Dmitriy Buslovich
#-------------------------------------------------------------------------------
#### Imports ####
# Download this file from github: https://github.com/adafruit/Adafruit-Raspberry-Pi-Python-Code/blob/master/Adafruit_PWM_Servo_Driver/Adafruit_PWM_Servo_Driver.py
import time
from Adafruit_PWM_Servo_Driver import PWM
#### Constants ####
#-------------------------------------------------------------------------------
#### Servo Initialization
#I2C address is 0x40
pwm = PWM(0x40)
#Servo frequency 50Hz
pwm.setPWMFreq(50)
# these values cannot be smaller than 104 and more than 521; otherwise your servos may be damaged. Use at your own risk!
_PAN_SERVO_CHANNEL=0
_TILT_SERVO_CHANNEL=1
_PAN_SERVO_LEFT=200
_PAN_SERVO_RIGHT=520
_PAN_SERVO_CENTER=225
_TILT_SERVO_CHANNEL=1
_TILT_SERVO_UP=200
_TILT_SERVO_DOWN=520
_TILT_SERVO_CENTER=225
#### Objects ####
#-------------------------------------------------------------------------------
class ServoControl(object):
# Make sure there is only one instance of ServoControl
_instances=[]
# Initialize the object
def __init__(self):
if ( len(self._instances)>1 ):
print "ERROR: One instance of ServoControl is running already."
exit(1)
self._instances.append(self)
#-------------------------------------------------------------------------------
# "Look" left
def panleft(self):
pwm.setPWM(_PAN_SERVO_CHANNEL, 0, _PAN_SERVO_LEFT)
#-------------------------------------------------------------------------------
# "Look" right
def panright(self):
pwm.setPWM(_PAN_SERVO_CHANNEL, 0, _PAN_SERVO_RIGHT)
#-------------------------------------------------------------------------------
# Position pan servo in the middle
def pancenter(self):
pwm.setPWM(_PAN_SERVO_CHANNEL, 0, _PAN_SERVO_LEFT)
#-------------------------------------------------------------------------------
# "Look" up
def tiltup(self):
pwm.setPWM(_TILT_SERVO_CHANNEL, 0, _TILT_SERVO_UP)
#-------------------------------------------------------------------------------
# "Look" down
def tiltdown(self):
pwm.setPWM(_TILT_SERVO_CHANNEL, 0, _TILT_SERVO_DOWN)
#-------------------------------------------------------------------------------
# Position tilt servo in the middle
def tiltcenter(self):
pwm.setPWM(_TILT_SERVO_CHANNEL, 0, _TILT_SERVO_CENTER)
| true
|
b55d5f86cdec95574fa521a4bccac3a56774eabe
|
Python
|
8Michelle/edward
|
/handlers/tasks.py
|
UTF-8
| 9,189
| 2.5625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""This module contains handlers for tasks.
Task interface supports starting a new task and ending current one,
starting new session, checking today working time, current date and all data.
"""
from aiogram import types
import datetime
import asyncio
from core import dp, States, bot, KEYBOARDS
from tools import tasks
from tools.keyboard import make_keyboard
@dp.message_handler(lambda message: message.text == "Начать дело",
state=States.TASKS)
async def begin_task_handler(message):
"""Handle the start of a new task.
Switches the state from TASKS to BEGIN_TASK.
Creates begin task keyboard. Works with free task mode.
"""
user = message.chat.id
await dp.current_state(user=user).set_state(States.BEGIN_TASK)
date = tasks.check_date(user)
buttons = list(tasks.get_tasks_list(user))
buttons.append("Назад")
await message.answer(f"Текущая дата {date.replace('-', '.')}. Чем займетесь?",
reply_markup=make_keyboard(buttons))
@dp.message_handler(lambda message: message.text == "Назад",
state=States.BEGIN_TASK)
async def begin_task_revert_handler(message):
"""Handle a return to the tasks from the beginning a new task.
Switches the state from BEGIN_TASK to TASKS.
Creates free task keyboard.
"""
await dp.current_state(user=message.chat.id).set_state(States.TASKS)
await message.answer("Сейчас вы ничем не заняты",
reply_markup=make_keyboard(KEYBOARDS["free_tasks"]))
@dp.message_handler(state=States.BEGIN_TASK)
async def begin_task_start_handler(message):
"""Handle the beginning of the new task with ``message`` name.
Switches the state from BEGIN_TASK to TASKS.
Creates busy task keyboard. Starts mode checking loop.
"""
user = message.chat.id
task_id = tasks.start_task(message.text, user)
await dp.current_state(user=user).set_state(States.TASKS)
message_text = f"Сейчас вы заняты: {message.text}"
await message.answer(message_text,
reply_markup=make_keyboard(KEYBOARDS["busy_tasks"]))
while True:
await asyncio.sleep(30 * 60)
busy = tasks.check_busy(user, task_id)
if busy == 1:
await message.answer("Вы все еще заняты?")
elif busy == 0:
print("end of task")
break
# TODO: add exception for task end error - to logger
else:
print("TASK END ERROR")
@dp.message_handler(lambda message: message.text == "Завершить",
state=States.TASKS)
async def end_task_handler(message):
"""Handle the end of the current task.
Corresponds to the TASKS state.
Creates free task keyboard. Works with busy task mode.
"""
tasks.end_task(message.chat.id)
await message.answer("Сейчас вы ничем не заняты.",
reply_markup=make_keyboard(KEYBOARDS["free_tasks"]))
@dp.message_handler(lambda message: message.text == "Добавить дело",
state=States.TASKS)
async def custom_task_handler(message):
user = message.chat.id
await dp.current_state(user=user).set_state(States.ADD_CUSTOM_TASK)
await message.answer("Что и в какую дату добавить?",
reply_markup=make_keyboard(["Назад"]))
@dp.message_handler(lambda message: message.text == "Назад",
state=States.ADD_CUSTOM_TASK)
async def select_custom_task_handler(message):
user = message.chat.id
await dp.current_state(user=user).set_state(States.TASKS)
await message.answer("Сейчас вы ничем не заняты",
reply_markup=make_keyboard(KEYBOARDS["free_tasks"]))
@dp.message_handler(state=States.ADD_CUSTOM_TASK)
async def select_custom_task_handler(message):
user = message.chat.id
await dp.current_state(user=user).set_state(States.TASKS)
task, date, time_delta = message.text.split()
tasks.add_custom_task(user, task, date.replace('.', '-'), float(time_delta) * 3600)
await message.answer(f"Добавлено {task} продолжительностью {time_delta} {date}",
reply_markup=make_keyboard(KEYBOARDS["free_tasks"]))
@dp.message_handler(lambda message: message.text == "Начать новую сессию",
state=States.TASKS)
async def new_session_handler(message):
"""Handle a new session starting.
Switches the state from TASKS to NEW_SESSION.
Creates new session keyboard. Works with free task mode.
"""
user = message.chat.id
await dp.current_state(user=user).set_state(States.NEW_SESSION)
date = tasks.check_date(user=user)
new_date = datetime.date.today().isoformat()
await message.answer(f"Текущая дата {date.replace('-', '.')}. Новая дата {new_date.replace('-', '.')}?",
reply_markup=make_keyboard(KEYBOARDS["new_session"]))
@dp.message_handler(lambda message: message.text == "Все верно",
state=States.NEW_SESSION)
async def new_session_submit_handler(message):
"""Handle session date confirmation.
Switches the state from NEW_SESSION to TASKS.
Creates free task keyboard.
"""
user = message.chat.id
await dp.current_state(user=user).set_state(States.TASKS)
date = datetime.date.today().isoformat()
tasks.new_session(user=user, date=date)
await message.answer("Начата новая сессия. Сейчас вы ничем не заняты.",
reply_markup=make_keyboard(KEYBOARDS["free_tasks"]))
@dp.message_handler(lambda message: message.text == "Назад",
state=States.NEW_SESSION)
async def new_session_revert_handler(message):
"""Handle a return to the tasks from the starting a new session.
Switches the state from NEW_SESSION to TASKS.
Creates free task keyboard.
"""
await dp.current_state(user=message.chat.id).set_state(States.TASKS)
await message.answer("Сейчас вы ничем не заняты.",
reply_markup=make_keyboard(KEYBOARDS["free_tasks"]))
@dp.message_handler(state=States.NEW_SESSION)
async def new_session_date_handler(message):
"""Handle the start of a new session with a custom date.
Switches the state from NEW_SESSION to TASKS.
Creates free tasks keyboard.
"""
user = message.chat.id
await dp.current_state(user=user).set_state(States.TASKS)
tasks.new_session(user=user, date=message.text.replace('.', '-'))
await message.answer(f"Начата новая сессия {message.text}. Сейчас вы ничем не заняты.",
reply_markup=make_keyboard(KEYBOARDS["free_tasks"]))
@dp.message_handler(lambda message: message.text == "Какое сегодня число?",
state=States.TASKS)
async def check_date_handler(message):
"""Answer a question about the current date.
Corresponds to the TASKS state.
Works with free mode.
"""
user = message.chat.id
date = tasks.check_date(user=user)
message_text = f"Активная дата: {date.replace('-', '.')}"
await message.answer(message_text)
@dp.message_handler(lambda message: message.text == "Получить данные",
state=States.TASKS)
async def download_tasks_handler(message):
"""Handle a data request.
Corresponds to the TASKS state.
Sends a .xlsx data file. Works with free mode.
"""
user = message.chat.id
if tasks.prepare_tasks_doc(user) == 0:
with open(f'{user}_tasks.xlsx', 'rb') as document:
await bot.send_document(chat_id=user, document=document)
else:
await message.answer("Судя по моим данным, у вас еще не было активности.")
@dp.message_handler(lambda message: message.text == "Сколько я сегодня поработал?",
state=States.TASKS)
async def time_today_handler(message):
"""Handle a question of today working time.
Corresponds to the TASKS state.
Sends text message with today working time. Works with free mode.
"""
user = message.chat.id
time_today = tasks.get_working_time(user)
await message.answer(f"Сегодня вы проработали {time_today[0]} часов {time_today[1]} минут")
@dp.message_handler(lambda message: message.text == "Назад",
state=States.TASKS)
async def tasks_revert_handler(message):
"""Handle a return to the skills from the tasks.
Switches the state from TASKS to SKILLS.
Creates skill keyboard. Works with both modes.
"""
await dp.current_state(user=message.chat.id).set_state(States.SKILLS)
await message.answer("Выберите функцию",
reply_markup=make_keyboard(KEYBOARDS["skills"]))
| true
|
8362128b5b0b981487a8cceeb829d0f03caa0336
|
Python
|
Hemie143/automatetheboringstuff
|
/ch17_keeping_time/ex04_convert.py
|
UTF-8
| 828
| 3.375
| 3
|
[] |
no_license
|
import datetime
oct21st = datetime.datetime(2019, 10, 21, 16, 29, 0)
print(oct21st.strftime('%Y/%m/%d %H:%M:%S')) # '2019/10/21 16:29:00'
print(oct21st.strftime('%I:%M %p')) # '04:29 PM'
print(oct21st.strftime("%B of '%y")) # "October of '19"
print(datetime.datetime.strptime('October 21, 2019', '%B %d, %Y')) # datetime.datetime(2019, 10, 21, 0, 0)
print(datetime.datetime.strptime('2019/10/21 16:29:00', '%Y/%m/%d %H:%M:%S')) # datetime.datetime(2019, 10, 21, 16, 29)
print(datetime.datetime.strptime("October of '19", "%B of '%y")) # datetime.datetime(2019, 10, 1, 0, 0)
print(datetime.datetime.strptime("November of '63", "%B of '%y")) # datetime.datetime(2063, 11, 1, 0, 0)
| true
|
0a7f75292678b205f770287a8e98dfff8553d4b0
|
Python
|
walid-shalaby/knowledge-based-dimensionality-reduction
|
/code/python/ng20/ng20_vocabulary_builder.py
|
UTF-8
| 22,250
| 3.015625
| 3
|
[] |
no_license
|
## 20ng Vocabulary Builder
# Build stemmed and lemmatized vocabulary (unigrams + bigrams) from 20ng corpus and store into DB
def build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf):
from sklearn.feature_extraction.text import CountVectorizer
from ng20_globals import max_df
# tokenize text
vectorizer = CountVectorizer(max_df=max_df,min_df=min_df,tokenizer=tokenizer,ngram_range=(1,max_ngram_size),stop_words=stop_words)
corpus_vectors = vectorizer.fit_transform(corpus)
# apply minimum term frequency threshold
term_freq = corpus_vectors.sum(axis=0) # sum on culomns to obtain term frequencies
terms_to_remove = []
for k,v in vectorizer.vocabulary_.iteritems():
if(term_freq[0,vectorizer.vocabulary_[k]]<min_tf):
terms_to_remove.append(k)
print 'removing ({0}) terms under tf threshold'.format(len(terms_to_remove))
for k in terms_to_remove:
del vectorizer.vocabulary_[k]
return vectorizer.vocabulary_
def save_vocabulary(vocabulary,tbl_name):
# save vocabulary in DB for future use
import sqlite3 as sqlitedb
from clef_globals import db_path
# in case using min_df as fractions
tbl_name = tbl_name.replace('.','_')
l = []
l.extend([i] for i in vocabulary)
con = sqlitedb.connect(db_path)
with con:
con.execute('drop table if exists {0}'.format(tbl_name))
con.execute('create table {0}(term text)'.format(tbl_name))
con.executemany('insert into {0}(term) values(?)'.format(tbl_name),l)
# build raw unigrams vocabulary
def build_all_unigrams_vocabulary(corpus):
#from ng20_globals import *
tokenizer = None
stop_words = {}
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,1,1)
# save to DB
tbl_name = 'ng20_raw_unigrams'.format()
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build unigrams vocabulary
def build_raw_unigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
tokenizer = None
stop_words = {}
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_unigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build lemmatized unigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_lemmatized_unigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import RawLemmaTokenizer
tokenizer = RawLemmaTokenizer()
stop_words = {}
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_lemmas_unigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# In[ ]:
# build lemmatized test unigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_lemmatized_test_unigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import RawLemmaTokenizer
tokenizer = RawLemmaTokenizer()
stop_words = {}
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_lemmas_test_unigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed test unigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_stemmed_test_unigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import RawStemmingTokenizer
tokenizer = RawStemmingTokenizer()
stop_words = {}
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_stems_test_unigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build lemmatized all unigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_lemmatized_all_unigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import RawLemmaTokenizer
tokenizer = RawLemmaTokenizer()
stop_words = {}
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_lemmas_all_unigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed all unigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_stemmed_all_unigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import RawStemmingTokenizer
tokenizer = RawStemmingTokenizer()
stop_words = {}
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_stems_all_unigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build lemmatized unigrams vocabulary
# uses alphabetic tokenizer
def build_lemmatized_unigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import LemmaTokenizer
tokenizer = LemmaTokenizer()
stop_words = {}
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_lemmas_unigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build unigrams stopwords vocabulary
def build_unigrams_stopwords_vocabulary(corpus,stop_words,):
from ng20_globals import min_tf,min_df
tokenizer = None
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_unigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build bigrams stopwords vocabulary
def build_bigrams_stopwords_vocabulary(corpus,stop_words,):
from ng20_globals import min_tf,min_df
tokenizer = None
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_bigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build all unigrams stopwords vocabulary
def build_all_unigrams_stopwords_vocabulary(corpus,stop_words,):
tokenizer = None
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,1,1)
# save to DB
tbl_name = 'ng20_all_unigrams_stopwords'.format()
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build raw unigrams stopwords vocabulary
def build_raw_unigrams_stopwords_vocabulary(corpus,stop_words):
from ng20_globals import min_df,min_tf
tokenizer = None
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_unigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build raw bigrams stopwords vocabulary
def build_raw_bigrams_stopwords_vocabulary(corpus,stop_words):
from ng20_globals import min_df,min_tf
tokenizer = None
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_bigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build all bigrams stopwords vocabulary
def build_all_bigrams_stopwords_vocabulary(corpus,stop_words,):
#from ng20_globals import *
tokenizer = None
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,1,1)
# save to DB
tbl_name = 'ng20_all_bigrams_stopwords'.format()
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build lemmatized unigrams stopwords vocabulary
# uses alphanumeric tokenizer
def build_raw_lemmatized_unigrams_stopwords_vocabulary(corpus,stop_words,):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import RawLemmaTokenizer
tokenizer = RawLemmaTokenizer()
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_lemmas_unigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build lemmatized unigrams stopwords vocabulary
# uses alphabetic tokenizer
def build_lemmatized_unigrams_stopwords_vocabulary(corpus,stop_words,):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import LemmaTokenizer
tokenizer = LemmaTokenizer()
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_lemmas_unigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build raw bigrams vocabulary
def build_all_bigrams_vocabulary(corpus):
tokenizer = None
stop_words = {}
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,1,1)
# save to DB
tbl_name = 'ng20_all_bigrams'.format()
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build lemmatized bigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_lemmatized_bigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import RawLemmaTokenizer
tokenizer = RawLemmaTokenizer()
stop_words = {}
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_lemmas_bigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build lemmatized all bigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_lemmatized_all_bigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import RawLemmaTokenizer
tokenizer = RawLemmaTokenizer()
stop_words = {}
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_lemmas_all_bigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed all bigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_stemmed_all_bigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import RawStemmingTokenizer
tokenizer = RawStemmingTokenizer()
stop_words = {}
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_stems_all_bigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build lemmatized test bigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_lemmatized_test_bigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import RawLemmaTokenizer
tokenizer = RawLemmaTokenizer()
stop_words = {}
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_lemmas_test_bigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed test bigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_stemmed_test_bigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import RawStemmingTokenizer
tokenizer = RawStemmingTokenizer()
stop_words = {}
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_stems_test_bigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build bigrams vocabulary
def build_raw_bigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
tokenizer = None
stop_words = {}
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_bigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build lemmatized bigrams vocabulary
# uses alphabetic tokenizer
def build_lemmatized_bigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import LemmaTokenizer
tokenizer = LemmaTokenizer()
stop_words = {}
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_lemmas_bigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build lemmatized bigrams stopwords vocabulary
# uses alphanumeric tokenizer
def build_raw_lemmatized_bigrams_stopwords_vocabulary(corpus,stop_words,):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import RawLemmaTokenizer
tokenizer = RawLemmaTokenizer()
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_lemmas_bigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build lemmatized bigrams stopwords vocabulary
# uses alphabetic tokenizer
def build_lemmatized_bigrams_stopwords_vocabulary(corpus,stop_words,):
from ng20_globals import min_tf,min_df
from commons.lemmatizing_tokenizer import LemmaTokenizer
tokenizer = LemmaTokenizer()
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_lemmas_bigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed unigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_stemmed_unigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import RawStemmingTokenizer
tokenizer = RawStemmingTokenizer()
stop_words = {}
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_stems_unigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed unigrams vocabulary
# uses alphabetic tokenizer
def build_stemmed_unigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import StemmingTokenizer
tokenizer = StemmingTokenizer()
stop_words = {}
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_stems_unigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed unigrams stopwords vocabulary
# uses alphanumeric tokenizer
def build_raw_stemmed_unigrams_stopwords_vocabulary(corpus,stop_words,):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import RawStemmingTokenizer
tokenizer = RawStemmingTokenizer()
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_stems_unigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed unigrams stopwords vocabulary
# uses alphabetic tokenizer
def build_stemmed_unigrams_stopwords_vocabulary(corpus,stop_words,):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import StemmingTokenizer
tokenizer = StemmingTokenizer()
max_ngram_size = 1
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_stems_unigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed bigrams vocabulary
# uses alphanumeric tokenizer
def build_raw_stemmed_bigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import RawStemmingTokenizer
tokenizer = RawStemmingTokenizer()
stop_words = {}
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_stems_bigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed bigrams vocabulary
# uses alphabetic tokenizer
def build_stemmed_bigrams_vocabulary(corpus):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import StemmingTokenizer
tokenizer = StemmingTokenizer()
stop_words = {}
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_stems_bigrams_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed bigrams stopwords vocabulary
# uses alphanumeric tokenizer
def build_raw_stemmed_bigrams_stopwords_vocabulary(corpus,stop_words,):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import RawStemmingTokenizer
tokenizer = RawStemmingTokenizer()
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_raw_stems_bigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
# build stemmed bigrams stopwords vocabulary
# uses alphabetic tokenizer
def build_stemmed_bigrams_stopwords_vocabulary(corpus,stop_words,):
from ng20_globals import min_tf,min_df
from commons.stemming_tokenizer import StemmingTokenizer
tokenizer = StemmingTokenizer()
max_ngram_size = 2
vocabulary = build_vocabulary(corpus,tokenizer,stop_words,max_ngram_size,min_df,min_tf)
# save to DB
tbl_name = 'ng20_stems_bigrams_stopwords_df{0}_tf{1}'.format(min_df,min_tf)
save_vocabulary(vocabulary,tbl_name)
print 'done '+tbl_name
def build():
from ng20_corpus_loader import load_corpus
from commons.stopwords_loader import load_inquiry_stopwords
# load 20ng docs from DB
corpus_all = load_corpus('both')
corpus = corpus_all['corpus']
# build vocabulary without stopwords removal
build_raw_unigrams_vocabulary(corpus)
#build_all_unigrams_vocabulary(corpus)
#build_lemmatized_unigrams_vocabulary(corpus)
build_raw_lemmatized_unigrams_vocabulary(corpus)
#build_lemmatized_bigrams_vocabulary(corpus)
build_raw_lemmatized_bigrams_vocabulary(corpus)
build_raw_bigrams_vocabulary(corpus)
#build_all_bigrams_vocabulary(corpus)
#build_stemmed_unigrams_vocabulary(corpus)
build_raw_stemmed_unigrams_vocabulary(corpus)
#build_stemmed_bigrams_vocabulary(corpus)
build_raw_stemmed_bigrams_vocabulary(corpus)
# load inquiry stopwords list
stop_words = load_inquiry_stopwords()
# build vocabulary with stopwords removal
#build_unigrams_stopwords_vocabulary(corpus,stop_words)
build_raw_unigrams_stopwords_vocabulary(corpus,stop_words)
#build_bigrams_stopwords_vocabulary(corpus,stop_words)
build_raw_bigrams_stopwords_vocabulary(corpus,stop_words)
#build_lemmatized_unigrams_stopwords_vocabulary(corpus,stop_words)
build_raw_lemmatized_unigrams_stopwords_vocabulary(corpus,stop_words)
#build_lemmatized_bigrams_stopwords_vocabulary(corpus,stop_words)
build_raw_lemmatized_bigrams_stopwords_vocabulary(corpus,stop_words)
#build_stemmed_unigrams_stopwords_vocabulary(corpus,stop_words)
build_raw_stemmed_unigrams_stopwords_vocabulary(corpus,stop_words)
#build_stemmed_bigrams_stopwords_vocabulary(corpus,stop_words)
build_raw_stemmed_bigrams_stopwords_vocabulary(corpus,stop_words)
#corpus_test = load_corpus(,'test')
#build_raw_lemmatized_test_unigrams_vocabulary(corpus_test['corpus'],)
#build_raw_stemmed_test_unigrams_vocabulary(corpus_test['corpus'],)
#build_raw_lemmatized_test_bigrams_vocabulary(corpus_test['corpus'],)
#build_raw_stemmed_test_bigrams_vocabulary(corpus_test['corpus'],)
#corpus = load_corpus(,'both')
#build_raw_lemmatized_all_unigrams_vocabulary(corpus['corpus'],)
#build_raw_stemmed_all_unigrams_vocabulary(corpus['corpus'],)
#build_raw_lemmatized_all_bigrams_vocabulary(corpus['corpus'],)
#build_raw_stemmed_all_bigrams_vocabulary(corpus['corpus'],)
if __name__ == "__main__" and __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
# build 20ng docs vocabulary
build()
print 'done!'
| true
|
dff71182388a15d57244452aebd9657812636943
|
Python
|
NTHU-CS50-2020/week6
|
/107062104/readability.py
|
UTF-8
| 635
| 3.734375
| 4
|
[] |
no_license
|
from cs50 import get_string
def main():
text = get_string('Text: ')
print(GradeCal(text))
def GradeCal(text):
letters = 0
words = 1 if text else 0
sentences = 0
for letter in text:
letters += 1 if letter.isalpha() else 0
words += 1 if letter.isspace() else 0
sentences += 1 if letter == '.' or letter == '!' or letter == '?' else 0
index = round(0.0588 * (letters/words*100) - 0.296 * (sentences/words*100) - 15.8, 0)
if index < 1:
return 'Before Grade 1'
elif index > 16:
return 'Grade 16+'
else:
return 'Grade ' + str(int(index))
main()
| true
|
a3ab7b94874b2ab2fe04f47b7304eccfb3ce50ae
|
Python
|
MrCat9/Python_Note
|
/55_proxy_ip.py
|
UTF-8
| 1,031
| 2.9375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# 配合使用 https://github.com/qiyeboy/IPProxyPool
# 需先开启代理IP的服务
import requests
import json
from random import sample
def get_proxy_ip(url_str):
"""
获取代理IP
返回一个 tuple (ip, port)
:param url_str: 代理IP接口
:return: (ip, port) <class 'tuple'>
"""
r = requests.get(url_str)
ip_list = json.loads(r.text) # 会返回多个代理IP
ip_port = sample(ip_list, 1) # 从多个代理IP中随机选1个
ip = ip_port[0][0]
port = str(ip_port[0][1])
return ip, port
if __name__ == '__main__':
# 获取代理ip
url_str = 'http://127.0.0.1:8000/?type=0&country=国内&count=10' # types 0: 高匿 1: 匿名 2: 透明
ip, port = get_proxy_ip(url_str)
# ip, port = ('114.55.236.62', '3128')
# 使用代理ip
proxies = {
'http': 'http://%s:%s' % (ip, port),
'https': 'http://%s:%s' % (ip, port)
}
r = requests.get('http://httpbin.org/ip', proxies=proxies)
print(r.text)
| true
|
66581051b01da95e0f0dcfe1eb4f09388060b07c
|
Python
|
qiupinghe/BTH-TE2502-MasterThesis
|
/normality_tests.py
|
UTF-8
| 53,562
| 2.546875
| 3
|
[] |
no_license
|
from itertools import islice
import statistics
import numpy
import math
import PyGnuplot as gp
from scipy.stats import sem, t
from scipy import mean, median, stats
import sys
from numpy.random import seed
from numpy.random import randn
from numpy import mean
from numpy import std
from matplotlib import pyplot
from statsmodels.graphics.gofplots import qqplot
from numpy import array
from scipy.stats import shapiro
from scipy.stats import normaltest
def read_entries_proc_status(file):
with open(file, 'r') as f:
VmHWM = list()
VmRSS = list()
roundVmHWM = list()
roundVmRSS = list()
for line in f:
l = line.split()
if(l[0] == "Round:"):
if(len(roundVmHWM) is not 0):
VmHWM.append(roundVmHWM[:])
if(len(roundVmRSS) is not 0):
VmRSS.append(roundVmRSS[:])
del roundVmHWM[:]
del roundVmRSS[:]
elif(l[0] == "VmHWM:"):
roundVmHWM.append(int(l[1]))
elif(l[0] == "VmRSS:"):
roundVmRSS.append(int(l[1]))
VmHWM.append(roundVmHWM[:])
VmRSS.append(roundVmRSS[:])
return VmHWM, VmRSS
def check_differance_rss(data):
for l in data:
if(len(set(l)) != 1 ):
#print("All elements are not the same")
return False
#print("All elements are equal")
return True
def process_data_rss(peakRSS, RSS):
#print("Max peak RSS of all rounds: ", max(max(peakRSS, key=max)))
#print("Min peak RSS of all rounds: ", min(min(peakRSS, key=min)))
#print("Max RSS of all rounds: ", max(max(RSS, key=max)))
#print("Min RSS of all rounds: ", min(min(RSS, key=min)))
mean_list = list()
for l in RSS:
mean_list.append(mean(l))
mean_val = mean(mean_list)
#print("Mean RSS", mean(mean_val))
median_list = list()
for l in RSS:
median_list.append(median(l))
median_val = median(median_list)
#print("Median RSS", median_val)
standard_dev = stats.tstd(mean_list)
#print("Standard dev", standard_dev)
c_interval = confidence_interval(mean_list)
#print("Conf_low", c_interval[0])
#print("Conf_high", c_interval[1])
summary_dict = {
"max_peak_rss": max(max(peakRSS, key=max)),
"min_peak_rss": min(min(peakRSS, key=min)),
"max_rss": max(max(RSS, key=max)),
"min_rss": min(min(RSS, key=min)),
"mean": mean_val,
"median": median_val,
"std_dev": standard_dev,
"conf_low": c_interval[0],
"conf_high": c_interval[1]
}
return summary_dict
def process_proc_status_output(file_path, sample_size=100):
peakRSS, RSS = read_entries_proc_status(file_path)
if (check_differance_rss(peakRSS) and check_differance_rss(RSS)):
# All measurements (RSS and peak) are equal in each test.
peak_list = list()
for l in peakRSS:
peak_list.append(l[0])
rss_list = list()
for l in RSS:
rss_list.append(l[0])
if (len([i for i, j in zip(peak_list, rss_list) if i != j]) == 0):
print("Peak RSS and RSS measurements are all equal (for each round) in "\
+ file_path + ". Returning new list. Setting data length to: " + str(len(rss_list[0:sample_size])))
return rss_list[0:sample_size]
else:
print("Peak RSS and RSS is not the same in all rounds.")
else:
print("Measurements are not the same in all rounds.")
def process_usrbintime_output(file_path, sample_size=100):
maxRSS = list()
with open(file_path, 'r') as f:
for line in f:
maxRSS.append(int(line))
print("Found " + str(len(maxRSS)) + " outputs in " + file_path + \
". Setting list to length " + str(len(maxRSS[0:sample_size])))
return maxRSS[0:sample_size]
def process_usrbintime_output_special(file_path, sample_size=100):
maxRSS = list()
with open(file_path, 'r') as f:
for line in f:
maxRSS.append(int(line))
max_rss_combined = list()
for i in range(0, len(maxRSS), 2):
max_rss_combined.append((maxRSS[i]+maxRSS[i+1])/2)
print("Found " + str(len(maxRSS)) + " outputs in " + file_path + \
". Combining outputs into list of length " + str(len(max_rss_combined)) +\
". Setting list to length " + str(len(max_rss_combined[0:sample_size])))
return max_rss_combined[0:sample_size]
def read_entries(file, no_lines_per_entry):
list_entries = []
with open(file, 'r') as f:
while True:
entry = [x.strip() for x in islice(f, no_lines_per_entry)]
if(entry == []):
break
list_entries.append(entry)
#print(len(list_entries))
#print(len(list_entries[20:]))
#print(list_entries[20:])
return list_entries
def gather_cpu_clock(list_entries, column):
cpu_time_list = []
for entry in list_entries:
cpu_clock = float(entry[column].split()[0].replace(',', '.'))
cpu_time_list.append(cpu_clock)
return cpu_time_list
def gather_real_time(list_entries, column):
real_time_list = []
for entry in list_entries:
real_time = float(entry[column].split()[0].replace(',', '.'))
real_time_list.append(real_time)
real_time_list = [x*1000 for x in real_time_list]
return real_time_list
def gather_cpu_cycles(list_entries, column):
cpu_cycle_list = []
for entry in list_entries:
cycles = ''
for x in entry[column].split():
if(x == 'cycles'):
break
cycles += x
cpu_cycle_list.append(int(cycles))
return cpu_cycle_list
def confidence_interval(list, interval = 0.95):
mean_val = mean(list)
n = len(list)
stdev = stats.tstd(list)
z = stats.norm.ppf((interval + 1)/2)
#z = stats.t.ppf((interval + 1)/2, n)
lower_bound = mean_val - z * stdev / math.sqrt(n)
upper_bound = mean_val + z *stdev / math.sqrt(n)
return lower_bound, upper_bound
def confidence_interval_t_dist(list, interval = 0.95):
data = list
n = len(data)
m = mean(data)
std_err = sem(data)
h = std_err * t.ppf((1 + interval) / 2, n - 1)
return m-h, m+h
def add_statistic_values(in_dict):
data = in_dict["data"]
in_dict["mean"] = mean(data)
in_dict["median"] = median(data)
in_dict["max"] = max(data)
in_dict["min"] = min(data)
in_dict["standard_dev"] = stats.tstd(data)
c_interval = confidence_interval(data)
in_dict["conf_low"] = c_interval[0]
in_dict["conf_high"] = c_interval[1]
return in_dict
def process_perf_stat_output(file_path):
list_entries = read_entries(file_path, 10)
#for entry in list_entries:
#print(entry)
print(str(len(list_entries)) + " perf stat outputs found in file " + file_path + "." \
+ " Removing 30 first entries from list. New length: " + str(len(list_entries[30:])))
list_entries = list_entries[30:]
real_time_values = {}
cpu_time_values = {}
cpu_cycles_values = {}
real_time_values["data"] = gather_real_time(list_entries, 8)
cpu_time_values["data"] = gather_cpu_clock(list_entries, 5)
cpu_cycles_values["data"] = gather_cpu_cycles(list_entries, 6)
add_statistic_values(real_time_values)
add_statistic_values(cpu_time_values)
add_statistic_values(cpu_cycles_values)
return real_time_values, cpu_time_values, cpu_cycles_values
def create_normality_graphs(save_file, title, data):
pyplot.hist(data, bins=50)
title_obj = pyplot.title(title)
pyplot.savefig(save_file + '_histogram')
pyplot.close()
qqplot(array(data), line='s')
title_obj = pyplot.title(title)
pyplot.savefig(save_file + '_qq')
pyplot.close()
if (len(sys.argv) < 2):
print("No argument input")
sys.exit()
# generate graphs for real/cpu time, cpu cycles for sm3 and sha256
if (sys.argv[1] == 'hash'):
#openssl
sm3_real_time, sm3_cpu_time, sm3_cpu_cycles = process_perf_stat_output("output/hash/sm3_perf_o")
create_normality_graphs('normality_graphs/hash/openssl/sm3_real_time', 'SM3 Real Time (OpenSSL)', sm3_real_time["data"])
stat, p = shapiro(sm3_real_time["data"])
print(stat,p)
create_normality_graphs('normality_graphs/hash/openssl/sm3_cpu_time', 'SM3 CPU Time (OpenSSL)', sm3_cpu_time["data"])
create_normality_graphs('normality_graphs/hash/openssl/sm3_cpu_cycles', 'SM3 CPU Cycles (OpenSSL)', sm3_cpu_cycles["data"])
sha_real_time, sha_cpu_time, sha_cpu_cycles = process_perf_stat_output("output/hash/sha256_perf_o")
create_normality_graphs('normality_graphs/hash/openssl/sha_real_time', 'SHA256 Real Time (OpenSSL)', sha_real_time["data"])
stat, p = shapiro(sha_real_time["data"])
print(stat,p)
create_normality_graphs('normality_graphs/hash/openssl/sha_cpu_time', 'SHA256 CPU Time (OpenSSL)', sha_cpu_time["data"])
create_normality_graphs('normality_graphs/hash/openssl/sha_cpu_cycles', 'SHA256 CPU Cycles (OpenSSL)', sha_cpu_cycles["data"])
#botan
botan_sm3_real_time, botan_sm3_cpu_time, botan_sm3_cpu_cycles = process_perf_stat_output("output/hash/sm3_perf")
create_normality_graphs('normality_graphs/hash/botan/sm3_real_time', 'SM3 Real Time (Botan)', botan_sm3_real_time["data"])
create_normality_graphs('normality_graphs/hash/botan/sm3_cpu_time', 'SM3 CPU Time (Botan)', botan_sm3_cpu_time["data"])
create_normality_graphs('normality_graphs/hash/botan/sm3_cpu_cycles', 'SM3 CPU Cycles (Botan)', botan_sm3_cpu_cycles["data"])
botan_sha_real_time, botan_sha_cpu_time, botan_sha_cpu_cycles = process_perf_stat_output("output/hash/sha256_perf")
create_normality_graphs('normality_graphs/hash/botan/sha_real_time', 'SHA256 Real Time (Botan)', botan_sha_real_time["data"])
create_normality_graphs('normality_graphs/hash/botan/sha_cpu_time', 'SHA256 CPU Time (Botan)', botan_sha_cpu_time["data"])
create_normality_graphs('normality_graphs/hash/botan/sha_cpu_cycles', 'SHA256 CPU Cycles (Botan)', botan_sha_cpu_cycles["data"])
### RSS ###
#OpenSSL
RSS_sha = process_proc_status_output("output/hash/sha256_rss_o")
create_normality_graphs('normality_graphs/hash/openssl/sha_rss', 'SHA256 RSS (OpenSSL)', RSS_sha)
RSS_sm3 = process_proc_status_output("output/hash/sm3_rss_o")
create_normality_graphs('normality_graphs/hash/openssl/sm3_rss', 'SM3 RSS (OpenSSL)', RSS_sm3)
#Botan
RSS_sha_botan = process_proc_status_output("output/hash/sha256_rss")
create_normality_graphs('normality_graphs/hash/botan/sha_rss', 'SHA256 RSS (Botan)', RSS_sha_botan)
RSS_sm3_botan = process_proc_status_output("output/hash/sm3_rss")
create_normality_graphs('normality_graphs/hash/botan/sm3_rss', 'SM3 RSS (Botan)', RSS_sm3_botan)
elif(sys.argv[1] == 'ds'):
############ GmSSL ############
# Key generation
rsa_keygen_real_time, rsa_keygen_cpu_time, rsa_keygen_cpu_cycles = process_perf_stat_output("output/ds_perf/rsa_keygen_perf_o")
create_normality_graphs('normality_graphs/ds/gmssl/keygen/rsa_keygen_real_time', 'RSA Keygen Real Time (GmSSL)', rsa_keygen_real_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/keygen/rsa_keygen_cpu_time', 'RSA Keygen CPU Time (GmSSL)', rsa_keygen_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/keygen/rsa_keygen_cpu_cycles', 'RSA Keygen CPU cycles (GmSSL)', rsa_keygen_cpu_cycles["data"])
sm2_keygen_real_time, sm2_keygen_cpu_time, sm2_keygen_cpu_cycles = process_perf_stat_output("output/ds_perf/sm2_keygen_perf_o")
stat, p = shapiro(sm2_keygen_real_time["data"])
print("sm2 keygen gmssl real",stat,p)
stat, p = shapiro(sm2_keygen_cpu_time["data"])
print("sm2 keygen gmssl cpu",stat,p)
stat, p = shapiro(sm2_keygen_cpu_cycles["data"])
print("sm2 keygen gmssl cycles",stat,p)
create_normality_graphs('normality_graphs/ds/gmssl/keygen/sm2_keygen_real_time', 'SM2 Keygen Real Time (GmSSL)', sm2_keygen_real_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/keygen/sm2_keygen_cpu_time', 'SM2 Keygen CPU Time (GmSSL)', sm2_keygen_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/keygen/sm2_keygen_cpu_cycles', 'SM2 Keygen CPU cycles (GmSSL)', sm2_keygen_cpu_cycles["data"])
ecdsa_keygen_real_time, ecdsa_keygen_cpu_time, ecdsa_keygen_cpu_cycles = process_perf_stat_output("output/ds_perf/ecdsa_keygen_perf_o")
stat, p = shapiro(ecdsa_keygen_real_time["data"])
print("ecdsa keygen gmssl real",stat,p)
stat, p = shapiro(ecdsa_keygen_cpu_time["data"])
print("ecdsa keygen gmssl cpu",stat,p)
stat, p = shapiro(ecdsa_keygen_cpu_cycles["data"])
print("ecdsa keygen gmssl cycles",stat,p)
create_normality_graphs('normality_graphs/ds/gmssl/keygen/ecdsa_keygen_real_time', 'ECDSA Keygen Real Time (GmSSL)', ecdsa_keygen_real_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/keygen/ecdsa_keygen_cpu_time', 'ECDSA Keygen CPU Time (GmSSL)', ecdsa_keygen_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/keygen/ecdsa_keygen_cpu_cycles', 'ECDSA Keygen CPU cycles (GmSSL)', ecdsa_keygen_cpu_cycles["data"])
# Signing
rsa_sign_real_time, rsa_sign_cpu_time, rsa_sign_cpu_cycles = process_perf_stat_output("output/ds_perf/rsa_sign_perf_o")
create_normality_graphs('normality_graphs/ds/gmssl/sign/rsa_sign_real_time', 'RSA Sign Real Time (GmSSL)', rsa_sign_real_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/sign/rsa_sign_cpu_time', 'RSA Sign CPU Time (GmSSL)', rsa_sign_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/sign/rsa_sign_cpu_cycles', 'RSA Sign CPU cycles (GmSSL)', rsa_sign_cpu_cycles["data"])
sm2_sign_real_time, sm2_sign_cpu_time, sm2_sign_cpu_cycles = process_perf_stat_output("output/ds_perf/sm2_sign_perf_o")
create_normality_graphs('normality_graphs/ds/gmssl/sign/sm2_sign_real_time', 'SM2 Sign Real Time (GmSSL)', sm2_sign_real_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/sign/sm2_sign_cpu_time', 'SM2 Sign CPU Time (GmSSL)', sm2_sign_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/sign/sm2_sign_cpu_cycles', 'SM2 Sign CPU cycles (GmSSL)', sm2_sign_cpu_cycles["data"])
ecdsa_sign_real_time, ecdsa_sign_cpu_time, ecdsa_sign_cpu_cycles = process_perf_stat_output("output/ds_perf/ecdsa_sign_perf_o")
create_normality_graphs('normality_graphs/ds/gmssl/sign/ecdsa_sign_real_time', 'ECDSA Sign Real Time (GmSSL)', ecdsa_sign_real_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/sign/ecdsa_sign_cpu_time', 'ECDSA Sign CPU Time (GmSSL)', ecdsa_sign_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/sign/ecdsa_sign_cpu_cycles', 'ECDSA Sign CPU cycles (GmSSL)', ecdsa_sign_cpu_cycles["data"])
# Verifying
rsa_verify_real_time, rsa_verify_cpu_time, rsa_verify_cpu_cycles = process_perf_stat_output("output/ds_perf/rsa_verify_perf_o")
create_normality_graphs('normality_graphs/ds/gmssl/verify/rsa_verify_real_time', 'RSA Verify Real Time (GmSSL)', rsa_verify_real_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/verify/rsa_verify_cpu_time', 'RSA Verify CPU Time (GmSSL)', rsa_verify_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/verify/rsa_verify_cpu_cycles', 'RSA Verify CPU cycles (GmSSL)', rsa_verify_cpu_cycles["data"])
sm2_verify_real_time, sm2_verify_cpu_time, sm2_verify_cpu_cycles = process_perf_stat_output("output/ds_perf/sm2_verify_perf_o")
create_normality_graphs('normality_graphs/ds/gmssl/verify/sm2_verify_real_time', 'SM2 Verify Real Time (GmSSL)', sm2_verify_real_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/verify/sm2_verify_cpu_time', 'SM2 Verify CPU Time (GmSSL)', sm2_verify_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/verify/sm2_verify_cpu_cycles', 'SM2 Verify CPU cycles (GmSSL)', sm2_verify_cpu_cycles["data"])
ecdsa_verify_real_time, ecdsa_verify_cpu_time, ecdsa_verify_cpu_cycles = process_perf_stat_output("output/ds_perf/ecdsa_verify_perf_o")
create_normality_graphs('normality_graphs/ds/gmssl/verify/ecdsa_verify_real_time', 'ECDSA Verify Real Time (GmSSL)', ecdsa_verify_real_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/verify/ecdsa_verify_cpu_time', 'ECDSA Verify CPU Time (GmSSL)', ecdsa_verify_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/gmssl/verify/ecdsa_verify_cpu_cycles', 'ECDSA Verify CPU cycles (GmSSL)', ecdsa_verify_cpu_cycles["data"])
############ Botan ############
# Key generation
botan_rsa_keygen_real_time, botan_rsa_keygen_cpu_time, botan_rsa_keygen_cpu_cycles = process_perf_stat_output("output/ds_perf/rsa_keygen_perf")
stat, p = shapiro(botan_rsa_keygen_real_time["data"])
print(stat,p)
create_normality_graphs('normality_graphs/ds/botan/keygen/rsa_keygen_real_time', 'RSA Keygen Real Time (Botan)', botan_rsa_keygen_real_time["data"])
create_normality_graphs('normality_graphs/ds/botan/keygen/rsa_keygen_cpu_time', 'RSA Keygen CPU Time (Botan)', botan_rsa_keygen_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/botan/keygen/rsa_keygen_cpu_cycles', 'RSA Keygen CPU cycles (Botan)', botan_rsa_keygen_cpu_cycles["data"])
botan_sm2_keygen_real_time, botan_sm2_keygen_cpu_time, botan_sm2_keygen_cpu_cycles = process_perf_stat_output("output/ds_perf/sm2_keygen_perf")
create_normality_graphs('normality_graphs/ds/botan/keygen/sm2_keygen_real_time', 'SM2 Keygen Real Time (Botan)', botan_sm2_keygen_real_time["data"])
create_normality_graphs('normality_graphs/ds/botan/keygen/sm2_keygen_cpu_time', 'SM2 Keygen CPU Time (Botan)', botan_sm2_keygen_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/botan/keygen/sm2_keygen_cpu_cycles', 'SM2 Keygen CPU cycles (Botan)', botan_sm2_keygen_cpu_cycles["data"])
botan_ecdsa_keygen_real_time, botan_ecdsa_keygen_cpu_time, botan_ecdsa_keygen_cpu_cycles = process_perf_stat_output("output/ds_perf/ecdsa_keygen_perf")
create_normality_graphs('normality_graphs/ds/botan/keygen/ecdsa_keygen_real_time', 'ECDSA Keygen Real Time (Botan)', botan_ecdsa_keygen_real_time["data"])
create_normality_graphs('normality_graphs/ds/botan/keygen/ecdsa_keygen_cpu_time', 'ECDSA Keygen CPU Time (Botan)', botan_ecdsa_keygen_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/botan/keygen/ecdsa_keygen_cpu_cycles', 'ECDSA Keygen CPU cycles (Botan)', botan_ecdsa_keygen_cpu_cycles["data"])
# Signing
botan_rsa_sign_real_time, botan_rsa_sign_cpu_time, botan_rsa_sign_cpu_cycles = process_perf_stat_output("output/ds_perf/rsa_sign_perf")
create_normality_graphs('normality_graphs/ds/botan/sign/rsa_sign_real_time', 'RSA Sign Real Time (Botan)', botan_rsa_sign_real_time["data"])
create_normality_graphs('normality_graphs/ds/botan/sign/rsa_sign_cpu_time', 'RSA Sign CPU Time (Botan)', botan_rsa_sign_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/botan/sign/rsa_sign_cpu_cycles', 'RSA Sign CPU cycles (Botan)', botan_rsa_sign_cpu_cycles["data"])
botan_sm2_sign_real_time, botan_sm2_sign_cpu_time, botan_sm2_sign_cpu_cycles = process_perf_stat_output("output/ds_perf/sm2_sign_perf")
create_normality_graphs('normality_graphs/ds/botan/sign/sm2_sign_real_time', 'SM2 Sign Real Time (Botan)', botan_sm2_sign_real_time["data"])
create_normality_graphs('normality_graphs/ds/botan/sign/sm2_sign_cpu_time', 'SM2 Sign CPU Time (Botan)', botan_sm2_sign_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/botan/sign/sm2_sign_cpu_cycles', 'SM2 Sign CPU cycles (Botan)', botan_sm2_sign_cpu_cycles["data"])
botan_ecdsa_sign_real_time, botan_ecdsa_sign_cpu_time, botan_ecdsa_sign_cpu_cycles = process_perf_stat_output("output/ds_perf/ecdsa_sign_perf")
create_normality_graphs('normality_graphs/ds/botan/sign/ecdsa_sign_real_time', 'ECDSA Sign Real Time (Botan)', botan_ecdsa_sign_real_time["data"])
create_normality_graphs('normality_graphs/ds/botan/sign/ecdsa_sign_cpu_time', 'ECDSA Sign CPU Time (Botan)', botan_ecdsa_sign_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/botan/sign/ecdsa_sign_cpu_cycles', 'ECDSA Sign CPU cycles (Botan)', botan_ecdsa_sign_cpu_cycles["data"])
#Verifying
botan_rsa_verify_real_time, botan_rsa_verify_cpu_time, botan_rsa_verify_cpu_cycles = process_perf_stat_output("output/ds_perf/rsa_verify_perf")
create_normality_graphs('normality_graphs/ds/botan/verify/rsa_verify_real_time', 'RSA Verify Real Time (Botan)', botan_rsa_verify_real_time["data"])
create_normality_graphs('normality_graphs/ds/botan/verify/rsa_verify_cpu_time', 'RSA Verify CPU Time (Botan)', botan_rsa_verify_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/botan/verify/rsa_verify_cpu_cycles', 'RSA Verify CPU cycles (Botan)', botan_rsa_verify_cpu_cycles["data"])
botan_sm2_verify_real_time, botan_sm2_verify_cpu_time, botan_sm2_verify_cpu_cycles = process_perf_stat_output("output/ds_perf/sm2_verify_perf")
create_normality_graphs('normality_graphs/ds/botan/verify/sm2_verify_real_time', 'SM2 Verify Real Time (Botan)', botan_sm2_verify_real_time["data"])
create_normality_graphs('normality_graphs/ds/botan/verify/sm2_verify_cpu_time', 'SM2 Verify CPU Time (Botan)', botan_sm2_verify_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/botan/verify/sm2_verify_cpu_cycles', 'SM2 Verify CPU cycles (Botan)', botan_sm2_verify_cpu_cycles["data"])
botan_ecdsa_verify_real_time, botan_ecdsa_verify_cpu_time, botan_ecdsa_verify_cpu_cycles = process_perf_stat_output("output/ds_perf/ecdsa_verify_perf")
create_normality_graphs('normality_graphs/ds/botan/verify/ecdsa_verify_real_time', 'ECDSA Verify Real Time (Botan)', botan_ecdsa_verify_real_time["data"])
create_normality_graphs('normality_graphs/ds/botan/verify/ecdsa_verify_cpu_time', 'ECDSA Verify CPU Time (Botan)', botan_ecdsa_verify_cpu_time["data"])
create_normality_graphs('normality_graphs/ds/botan/verify/ecdsa_verify_cpu_cycles', 'ECDSA Verify CPU cycles (Botan)', botan_ecdsa_verify_cpu_cycles["data"])
#GmSSL RSS
ecdsa_keygen_rss = process_usrbintime_output_special('output/ds_rss/rss_ecdsa_key_gmssl')
create_normality_graphs('normality_graphs/ds/gmssl/rss/ecdsa_keygen_gmssl', 'ECDSA Keygen RSS (GmSSL)', ecdsa_keygen_rss)
rsa_keygen_rss = process_usrbintime_output_special('output/ds_rss/rss_rsa_key_gmssl')
create_normality_graphs('normality_graphs/ds/gmssl/rss/rsa_keygen_gmssl', 'RSA Keygen RSS (GmSSL)', rsa_keygen_rss)
sm2_keygen_rss = process_usrbintime_output_special('output/ds_rss/rss_sm2_key_gmssl')
create_normality_graphs('normality_graphs/ds/gmssl/rss/sm2_keygen_gmssl', 'SM2 Keygen RSS (GmSSL)', sm2_keygen_rss)
ecdsa_sign_rss = process_usrbintime_output('output/ds_rss/rss_ecdsa_sign_gmssl')
create_normality_graphs('normality_graphs/ds/gmssl/rss/ecdsa_sign_gmssl', 'ECDSA Sign RSS (GmSSL)', ecdsa_sign_rss)
rsa_sign_rss = process_usrbintime_output('output/ds_rss/rss_rsa_sign_gmssl')
create_normality_graphs('normality_graphs/ds/gmssl/rss/rsa_sign_gmssl', 'RSA Sign RSS (GmSSL)', rsa_sign_rss)
sm2_sign_rss = process_usrbintime_output('output/ds_rss/rss_sm2_sign_gmssl')
create_normality_graphs('normality_graphs/ds/gmssl/rss/sm2_sign_gmssl', 'SM2 Sign RSS (GmSSL)', sm2_sign_rss)
ecdsa_verify_rss = process_usrbintime_output('output/ds_rss/rss_ecdsa_verify_gmssl')
create_normality_graphs('normality_graphs/ds/gmssl/rss/ecdsa_verify_gmssl', 'ECDSA Verify RSS (GmSSL)', ecdsa_verify_rss)
rsa_verify_rss = process_usrbintime_output('output/ds_rss/rss_rsa_verify_gmssl')
create_normality_graphs('normality_graphs/ds/gmssl/rss/rsa_verify_gmssl', 'RSA Verify RSS (GmSSL)', rsa_verify_rss)
sm2_verify_rss = process_usrbintime_output('output/ds_rss/rss_sm2_verify_gmssl')
create_normality_graphs('normality_graphs/ds/gmssl/rss/sm2_verify_gmssl', 'SM2 Verify RSS (GmSSL)', sm2_verify_rss)
#Botan RSS
ecdsa_keygen_rss_botan = process_usrbintime_output('output/ds_rss/rss_ecdsa_key_botan')
create_normality_graphs('normality_graphs/ds/botan/rss/ecdsa_keygen', 'ECDSA Keygen RSS (Botan)', ecdsa_keygen_rss_botan)
rsa_keygen_rss_botan = process_usrbintime_output('output/ds_rss/rss_rsa_key_botan')
create_normality_graphs('normality_graphs/ds/botan/rss/rsa_keygen', 'RSA Keygen RSS (Botan)', rsa_keygen_rss_botan)
sm2_keygen_rss_botan = process_usrbintime_output('output/ds_rss/rss_sm2_key_botan')
create_normality_graphs('normality_graphs/ds/botan/rss/sm2_keygen', 'SM2 Keygen RSS (Botan)', sm2_keygen_rss_botan)
ecdsa_sign_rss_botan = process_usrbintime_output('output/ds_rss/rss_ecdsa_sign_botan')
create_normality_graphs('normality_graphs/ds/botan/rss/ecdsa_sign', 'ECDSA Sign RSS (Botan)', ecdsa_sign_rss_botan)
rsa_sign_rss_botan = process_usrbintime_output('output/ds_rss/rss_rsa_sign_botan')
create_normality_graphs('normality_graphs/ds/botan/rss/rsa_sign', 'RSA Sign RSS (Botan)', rsa_sign_rss_botan)
sm2_sign_rss_botan = process_usrbintime_output('output/ds_rss/rss_sm2_sign_botan')
create_normality_graphs('normality_graphs/ds/botan/rss/sm2_sign', 'SM2 Sign RSS (Botan)', sm2_sign_rss_botan)
ecdsa_verify_rss_botan = process_usrbintime_output('output/ds_rss/rss_ecdsa_verify_botan')
create_normality_graphs('normality_graphs/ds/botan/rss/ecdsa_verify', 'ECDSA Verify RSS (Botan)', ecdsa_verify_rss_botan)
rsa_verify_rss_botan = process_usrbintime_output('output/ds_rss/rss_rsa_verify_botan')
create_normality_graphs('normality_graphs/ds/botan/rss/rsa_verify', 'RSA Verify RSS (Botan)', rsa_verify_rss_botan)
sm2_verify_rss_botan = process_usrbintime_output('output/ds_rss/rss_sm2_verify_botan')
create_normality_graphs('normality_graphs/ds/botan/rss/sm2_verify', 'SM2 Verify RSS (Botan)', sm2_verify_rss_botan)
elif (sys.argv[1] == 'block'):
print('block')
############ OpenSSL ECB mode ############
# AES-NI Encryption
ecb_aes_ni_real_time, ecb_aes_ni_cpu_time, ecb_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_ni_ecb")
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_ni_real_time_enc', 'Encryption: AES-NI ECB Real time (OpenSSL)', ecb_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_ni_cpu_time_enc', 'Encryption: AES-NI ECB CPU time (OpenSSL)', ecb_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_ni_cpu_cycles_enc', 'Encryption: AES-NI ECB CPU cycles (OpenSSL)', ecb_aes_ni_cpu_cycles["data"])
# AES-NI Decryption
dec_ecb_aes_ni_real_time, dec_ecb_aes_ni_cpu_time, dec_ecb_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_ni_ecb_decrypt")
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_ni_real_time_dec', 'Decryption: AES-NI ECB Real time (OpenSSL)', dec_ecb_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_ni_cpu_time_dec', 'Decryption: AES-NI ECB CPU time (OpenSSL)', dec_ecb_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_ni_cpu_cycles_dec', 'Decryption: AES-NI ECB CPU cycles (OpenSSL)', dec_ecb_aes_ni_cpu_cycles["data"])
# AES Encryption
ecb_aes_real_time, ecb_aes_cpu_time, ecb_aes_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_ecb")
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_real_time_enc', 'Encryption: AES ECB Real time (OpenSSL)', ecb_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_cpu_time_enc', 'Encryption: AES ECB CPU time (OpenSSL)', ecb_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_cpu_cycles_enc', 'Encryption: AES ECB CPU cycles (OpenSSL)', ecb_aes_cpu_cycles["data"])
# AES Decryption
dec_ecb_aes_real_time, dec_ecb_aes_cpu_time, dec_ecb_aes_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_ecb_decrypt")
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_real_time_dec', 'Decryption: AES ECB Real time (OpenSSL)', dec_ecb_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_cpu_time_dec', 'Decryption: AES ECB CPU time (OpenSSL)', dec_ecb_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_aes_cpu_cycles_dec', 'Decryption: AES ECB CPU cycles (OpenSSL)', dec_ecb_aes_cpu_cycles["data"])
# SM4 Encryption
ecb_sm4_real_time, ecb_sm4_cpu_time, ecb_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/sm4_ecb")
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_sm4_real_time', 'Encryption: SM4 ECB Real time (OpenSSL)', ecb_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_sm4_cpu_time', 'Encryption: SM4 ECB CPU time (OpenSSL)', ecb_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_sm4_cpu_cycles', 'Encryption: SM4 ECB CPU cycles (OpenSSL)', ecb_sm4_cpu_cycles["data"])
# SM4 Decryption
dec_ecb_sm4_real_time, dec_ecb_sm4_cpu_time, dec_ecb_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/sm4_ecb_decrypt")
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_sm4_real_time_dec', 'Decryption: SM4 ECB Real time (OpenSSL)', dec_ecb_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_sm4_cpu_time_dec', 'Decryption: SM4 ECB CPU time (OpenSSL)', dec_ecb_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/ECB/ecb_sm4_cpu_cycles_dec', 'Decryption: SM4 ECB CPU cycles (OpenSSL)', dec_ecb_sm4_cpu_cycles["data"])
############ OpenSSL CBC mode ############
# AES-NI Encryption
cbc_aes_ni_real_time, cbc_aes_ni_cpu_time, cbc_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_ni_cbc")
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_ni_real_time_enc', 'Encryption: AES-NI CBC Real time (OpenSSL)', cbc_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_ni_cpu_time_enc', 'Encryption: AES-NI CBC CPU time (OpenSSL)', cbc_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_ni_cpu_cycles_enc', 'Encryption: AES-NI CBC CPU cycles (OpenSSL)', cbc_aes_ni_cpu_cycles["data"])
# AES-NI Decryption
dec_cbc_aes_ni_real_time, dec_cbc_aes_ni_cpu_time, dec_cbc_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_ni_cbc_decrypt")
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_ni_real_time_dec', 'Decryption: AES-NI CBC Real time (OpenSSL)', dec_cbc_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_ni_cpu_time_dec', 'Decryption: AES-NI CBC CPU time (OpenSSL)', dec_cbc_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_ni_cpu_cycles_dec', 'Decryption: AES-NI CBC CPU cycles (OpenSSL)', dec_cbc_aes_ni_cpu_cycles["data"])
# AES Encryption
cbc_aes_real_time, cbc_aes_cpu_time, cbc_aes_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_cbc")
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_real_time_enc', 'Encryption: AES CBC Real time (OpenSSL)', cbc_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_cpu_time_enc', 'Encryption: AES CBC CPU time (OpenSSL)', cbc_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_cpu_cycles_enc', 'Encryption: AES CBC CPU cycles (OpenSSL)', cbc_aes_cpu_cycles["data"])
# AES Decryption
dec_cbc_aes_real_time, dec_cbc_aes_cpu_time, dec_cbc_aes_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_cbc_decrypt")
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_real_time_dec', 'Decryption: AES CBC Real time (OpenSSL)', dec_cbc_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_cpu_time_dec', 'Decryption: AES CBC CPU time (OpenSSL)', dec_cbc_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_aes_cpu_cycles_dec', 'Decryption: AES CBC CPU cycles (OpenSSL)', dec_cbc_aes_cpu_cycles["data"])
# SM4 Encryption
cbc_sm4_real_time, cbc_sm4_cpu_time, cbc_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/sm4_cbc")
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_sm4_real_time', 'Encryption: SM4 CBC Real time (OpenSSL)', cbc_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_sm4_cpu_time', 'Encryption: SM4 CBC CPU time (OpenSSL)', cbc_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_sm4_cpu_cycles', 'Encryption: SM4 CBC CPU cycles (OpenSSL)', cbc_sm4_cpu_cycles["data"])
# SM4 Decryption
dec_cbc_sm4_real_time, dec_cbc_sm4_cpu_time, dec_cbc_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/sm4_cbc_decrypt")
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_sm4_real_time_dec', 'Decryption: SM4 CBC Real time (OpenSSL)', dec_cbc_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_sm4_cpu_time_dec', 'Decryption: SM4 CBC CPU time (OpenSSL)', dec_cbc_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CBC/cbc_sm4_cpu_cycles_dec', 'Decryption: SM4 CBC CPU cycles (OpenSSL)', dec_cbc_sm4_cpu_cycles["data"])
############ OpenSSL CTR mode ############
# AES-NI Encryption
ctr_aes_ni_real_time, ctr_aes_ni_cpu_time, ctr_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_ni_ctr")
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_ni_real_time_enc', 'Encryption: AES-NI CTR Real time (OpenSSL)', ctr_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_ni_cpu_time_enc', 'Encryption: AES-NI CTR CPU time (OpenSSL)', ctr_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_ni_cpu_cycles_enc', 'Encryption: AES-NI CTR CPU cycles (OpenSSL)', ctr_aes_ni_cpu_cycles["data"])
# AES-NI Decryption
dec_ctr_aes_ni_real_time, dec_ctr_aes_ni_cpu_time, dec_ctr_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_ni_ctr_decrypt")
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_ni_real_time_dec', 'Decryption: AES-NI CTR Real time (OpenSSL)', dec_ctr_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_ni_cpu_time_dec', 'Decryption: AES-NI CTR CPU time (OpenSSL)', dec_ctr_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_ni_cpu_cycles_dec', 'Decryption: AES-NI CTR CPU cycles (OpenSSL)', dec_ctr_aes_ni_cpu_cycles["data"])
# AES Encryption
ctr_aes_real_time, ctr_aes_cpu_time, ctr_aes_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_ctr")
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_real_time_enc', 'Encryption: AES CTR Real time (OpenSSL)', ctr_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_cpu_time_enc', 'Encryption: AES CTR CPU time (OpenSSL)', ctr_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_cpu_cycles_enc', 'Encryption: AES CTR CPU cycles (OpenSSL)', ctr_aes_cpu_cycles["data"])
# AES Decryption
dec_ctr_aes_real_time, dec_ctr_aes_cpu_time, dec_ctr_aes_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/aes_ctr_decrypt")
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_real_time_dec', 'Decryption: AES CTR Real time (OpenSSL)', dec_ctr_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_cpu_time_dec', 'Decryption: AES CTR CPU time (OpenSSL)', dec_ctr_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_aes_cpu_cycles_dec', 'Decryption: AES CTR CPU cycles (OpenSSL)', dec_ctr_aes_cpu_cycles["data"])
# SM4 Encryption
ctr_sm4_real_time, ctr_sm4_cpu_time, ctr_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/sm4_ctr")
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_sm4_real_time', 'Encryption: SM4 CTR Real time (OpenSSL)', ctr_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_sm4_cpu_time', 'Encryption: SM4 CTR CPU time (OpenSSL)', ctr_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_sm4_cpu_cycles', 'Encryption: SM4 CTR CPU cycles (OpenSSL)', ctr_sm4_cpu_cycles["data"])
# SM4 Decryption
dec_ctr_sm4_real_time, dec_ctr_sm4_cpu_time, dec_ctr_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/openssl/sm4_ctr_decrypt")
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_sm4_real_time_dec', 'Decryption: SM4 CTR Real time (OpenSSL)', dec_ctr_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_sm4_cpu_time_dec', 'Decryption: SM4 CTR CPU time (OpenSSL)', dec_ctr_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/openssl/CTR/ctr_sm4_cpu_cycles_dec', 'Decryption: SM4 CTR CPU cycles (OpenSSL)', dec_ctr_sm4_cpu_cycles["data"])
###############################################################################################
############ Botan ECB mode ############
# AES-NI Encryption
botan_ecb_aes_ni_real_time, botan_ecb_aes_ni_cpu_time, botan_ecb_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_ni_ecb")
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_ni_real_time_enc', 'Encryption: AES-NI ECB Real time (Botan)', botan_ecb_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_ni_cpu_time_enc', 'Encryption: AES-NI ECB CPU time (Botan)', botan_ecb_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_ni_cpu_cycles_enc', 'Encryption: AES-NI ECB CPU cycles (Botan)', botan_ecb_aes_ni_cpu_cycles["data"])
# AES-NI Decryption
botan_dec_ecb_aes_ni_real_time, botan_dec_ecb_aes_ni_cpu_time, botan_dec_ecb_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_ni_ecb_decrypt")
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_ni_real_time_dec', 'Decryption: AES-NI ECB Real time (Botan)', botan_dec_ecb_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_ni_cpu_time_dec', 'Decryption: AES-NI ECB CPU time (Botan)', botan_dec_ecb_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_ni_cpu_cycles_dec', 'Decryption: AES-NI ECB CPU cycles (Botan)', botan_dec_ecb_aes_ni_cpu_cycles["data"])
# AES Encryption
botan_ecb_aes_real_time, botan_ecb_aes_cpu_time, botan_ecb_aes_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_ecb")
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_real_time_enc', 'Encryption: AES ECB Real time (Botan)', botan_ecb_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_cpu_time_enc', 'Encryption: AES ECB CPU time (Botan)', botan_ecb_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_cpu_cycles_enc', 'Encryption: AES ECB CPU cycles (Botan)', botan_ecb_aes_cpu_cycles["data"])
# AES Decryption
botan_dec_ecb_aes_real_time, botan_dec_ecb_aes_cpu_time, botan_dec_ecb_aes_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_ecb_decrypt")
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_real_time_dec', 'Decryption: AES ECB Real time (Botan)', botan_dec_ecb_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_cpu_time_dec', 'Decryption: AES ECB CPU time (Botan)', botan_dec_ecb_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_aes_cpu_cycles_dec', 'Decryption: AES ECB CPU cycles (Botan)', botan_dec_ecb_aes_cpu_cycles["data"])
# SM4 Encryption
botan_ecb_sm4_real_time, botan_ecb_sm4_cpu_time, botan_ecb_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_sm4_ecb")
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_sm4_real_time_enc', 'Encryption: SM4 ECB Real time (Botan)', botan_ecb_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_sm4_cpu_time_enc', 'Encryption: SM4 ECB CPU time (Botan)', botan_ecb_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_sm4_cpu_cycles_enc', 'Encryption: SM4 ECB CPU cycles (Botan)', botan_ecb_sm4_cpu_cycles["data"])
# SM4 Decryption
botan_dec_ecb_sm4_real_time, botan_dec_ecb_sm4_cpu_time, botan_dec_ecb_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_sm4_ecb_decrypt")
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_sm4_real_time_dec', 'Decryption: SM4 ECB Real time (Botan)', botan_dec_ecb_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_sm4_cpu_time_dec', 'Decryption: SM4 ECB CPU time (Botan)', botan_dec_ecb_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/ECB/ecb_sm4_cpu_cycles_dec', 'Decryption: SM4 ECB CPU cycles (Botan)', botan_dec_ecb_sm4_cpu_cycles["data"])
############ Botan CBC mode ############
# AES-NI Encryption
botan_cbc_aes_ni_real_time, botan_cbc_aes_ni_cpu_time, botan_cbc_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_ni_cbc")
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_ni_real_time_enc', 'Encryption: AES-NI CBC Real time (Botan)', botan_cbc_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_ni_cpu_time_enc', 'Encryption: AES-NI CBC CPU time (Botan)', botan_cbc_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_ni_cpu_cycles_enc', 'Encryption: AES-NI CBC CPU cycles (Botan)', botan_cbc_aes_ni_cpu_cycles["data"])
# AES-NI Decryption
botan_dec_cbc_aes_ni_real_time, botan_dec_cbc_aes_ni_cpu_time, botan_dec_cbc_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_ni_cbc_decrypt")
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_ni_real_time_dec', 'Decryption: AES-NI CBC Real time (Botan)', botan_dec_cbc_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_ni_cpu_time_dec', 'Decryption: AES-NI CBC CPU time (Botan)', botan_dec_cbc_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_ni_cpu_cycles_dec', 'Decryption: AES-NI CBC CPU cycles (Botan)', botan_dec_cbc_aes_ni_cpu_cycles["data"])
# AES Encryption
botan_cbc_aes_real_time, botan_cbc_aes_cpu_time, botan_cbc_aes_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_cbc")
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_real_time_enc', 'Encryption: AES CBC Real time (Botan)', botan_cbc_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_cpu_time_enc', 'Encryption: AES CBC CPU time (Botan)', botan_cbc_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_cpu_cycles_enc', 'Encryption: AES CBC CPU cycles (Botan)', botan_cbc_aes_cpu_cycles["data"])
# AES Decryption
botan_dec_cbc_aes_real_time, botan_dec_cbc_aes_cpu_time, botan_dec_cbc_aes_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_cbc_decrypt")
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_real_time_dec', 'Decryption: AES CBC Real time (Botan)', botan_dec_cbc_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_cpu_time_dec', 'Decryption: AES CBC CPU time (Botan)', botan_dec_cbc_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_aes_cpu_cycles_dec', 'Decryption: AES CBC CPU cycles (Botan)', botan_dec_cbc_aes_cpu_cycles["data"])
# SM4 Encryption
botan_cbc_sm4_real_time, botan_cbc_sm4_cpu_time, botan_cbc_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_sm4_cbc")
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_sm4_real_time_enc', 'Encryption: SM4 CBC Real time (Botan)', botan_cbc_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_sm4_cpu_time_enc', 'Encryption: SM4 CBC CPU time (Botan)', botan_cbc_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_sm4_cpu_cycles_enc', 'Encryption: SM4 CBC CPU cycles (Botan)', botan_cbc_sm4_cpu_cycles["data"])
# SM4 Decryption
botan_dec_cbc_sm4_real_time, botan_dec_cbc_sm4_cpu_time, botan_dec_cbc_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_sm4_cbc_decrypt")
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_sm4_real_time_dec', 'Decryption: SM4 CBC Real time (Botan)', botan_dec_cbc_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_sm4_cpu_time_dec', 'Decryption: SM4 CBC CPU time (Botan)', botan_dec_cbc_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CBC/cbc_sm4_cpu_cycles_dec', 'Decryption: SM4 CBC CPU cycles (Botan)', botan_dec_cbc_sm4_cpu_cycles["data"])
############ Botan CTR mode ############
# AES-NI Encryption
botan_ctr_aes_ni_real_time, botan_ctr_aes_ni_cpu_time, botan_ctr_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_ni_ctr")
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_ni_real_time_enc', 'Encryption: AES-NI CTR Real time (Botan)', botan_ctr_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_ni_cpu_time_enc', 'Encryption: AES-NI CTR CPU time (Botan)', botan_ctr_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_ni_cpu_cycles_enc', 'Encryption: AES-NI CTR CPU cycles (Botan)', botan_ctr_aes_ni_cpu_cycles["data"])
# AES-NI Decryption
botan_dec_ctr_aes_ni_real_time, botan_dec_ctr_aes_ni_cpu_time, botan_dec_ctr_aes_ni_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_ni_ctr_decrypt")
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_ni_real_time_dec', 'Decryption: AES-NI CTR Real time (Botan)', botan_dec_ctr_aes_ni_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_ni_cpu_time_dec', 'Decryption: AES-NI CTR CPU time (Botan)', botan_dec_ctr_aes_ni_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_ni_cpu_cycles_dec', 'Decryption: AES-NI CTR CPU cycles (Botan)', botan_dec_ctr_aes_ni_cpu_cycles["data"])
# AES Encryption
botan_ctr_aes_real_time, botan_ctr_aes_cpu_time, botan_ctr_aes_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_ctr")
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_real_time_enc', 'Encryption: AES CTR Real time (Botan)', botan_ctr_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_cpu_time_enc', 'Encryption: AES CTR CPU time (Botan)', botan_ctr_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_cpu_cycles_enc', 'Encryption: AES CTR CPU cycles (Botan)', botan_ctr_aes_cpu_cycles["data"])
# AES Decryption
botan_dec_ctr_aes_real_time, botan_dec_ctr_aes_cpu_time, botan_dec_ctr_aes_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_aes_ctr_decrypt")
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_real_time_dec', 'Decryption: AES CTR Real time (Botan)', botan_dec_ctr_aes_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_cpu_time_dec', 'Decryption: AES CTR CPU time (Botan)', botan_dec_ctr_aes_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_aes_cpu_cycles_dec', 'Decryption: AES CTR CPU cycles (Botan)', botan_dec_ctr_aes_cpu_cycles["data"])
# SM4 Encryption
botan_ctr_sm4_real_time, botan_ctr_sm4_cpu_time, botan_ctr_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_sm4_ctr")
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_sm4_real_time_enc', 'Encryption: SM4 CTR Real time (Botan)', botan_ctr_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_sm4_cpu_time_enc', 'Encryption: SM4 CTR CPU time (Botan)', botan_ctr_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_sm4_cpu_cycles_enc', 'Encryption: SM4 CTR CPU cycles (Botan)', botan_ctr_sm4_cpu_cycles["data"])
# SM4 Decryption
botan_dec_ctr_sm4_real_time, botan_dec_ctr_sm4_cpu_time, botan_dec_ctr_sm4_cpu_cycles = process_perf_stat_output("output/block_perf/botan/botan_sm4_ctr_decrypt")
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_sm4_real_time_dec', 'Decryption: SM4 CTR Real time (Botan)', botan_dec_ctr_sm4_real_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_sm4_cpu_time_dec', 'Decryption: SM4 CTR CPU time (Botan)', botan_dec_ctr_sm4_cpu_time["data"])
create_normality_graphs('normality_graphs/block/botan/CTR/ctr_sm4_cpu_cycles_dec', 'Decryption: SM4 CTR CPU cycles (Botan)', botan_dec_ctr_sm4_cpu_cycles["data"])
###############################################################################################
############ RSS OpenSSL ############
# AES-NI Encryption
RSS_aes_ni = process_proc_status_output("output/block_rss/openssl/openssl_aes_ni_rss")
create_normality_graphs('normality_graphs/block_rss/openssl/rss_aes_ni', 'Encryption: AES-NI RSS (OpenSSL)', RSS_aes_ni)
# AES-NI Decryption
RSS_dec_aes_ni = process_proc_status_output("output/block_rss/openssl/openssl_aes_ni_rss_decrypt")
create_normality_graphs('normality_graphs/block_rss/openssl/rss_aes_ni_dec', 'Decryption: AES-NI RSS (OpenSSL)', RSS_dec_aes_ni)
# AES Encryption
RSS_aes = process_proc_status_output("output/block_rss/openssl/openssl_aes_rss")
create_normality_graphs('normality_graphs/block_rss/openssl/rss_aes', 'Encryption: AES RSS (OpenSSL)', RSS_aes)
# AES Decryption
RSS_dec_aes = process_proc_status_output("output/block_rss/openssl/openssl_aes_rss_decrypt")
create_normality_graphs('normality_graphs/block_rss/openssl/rss_aes_dec', 'Decryption: AES RSS (OpenSSL)', RSS_dec_aes)
# SM4 Encryption
RSS_sm4 = process_proc_status_output("output/block_rss/openssl/openssl_sm4_rss")
create_normality_graphs('normality_graphs/block_rss/openssl/rss_sm4_ni', 'Encryption: SM4 RSS (OpenSSL)', RSS_sm4)
# SM4 Decryption
RSS_dec_sm4 = process_proc_status_output("output/block_rss/openssl/openssl_sm4_rss_decrypt")
create_normality_graphs('normality_graphs/block_rss/openssl/rss_sm4_ni_dec', 'Decryption: SM4 RSS (OpenSSL)', RSS_dec_sm4)
############ RSS Botan ############
# AES-NI Encryption
botan_RSS_aes_ni = process_proc_status_output("output/block_rss/botan/botan_aes_ni_rss_encrypt")
create_normality_graphs('normality_graphs/block_rss/botan/rss_aes_ni', 'Encryption: AES-NI RSS (Botan)', botan_RSS_aes_ni)
# AES-NI Decryption
botan_RSS_dec_aes_ni = process_proc_status_output("output/block_rss/botan/botan_aes_ni_rss_decrypt")
create_normality_graphs('normality_graphs/block_rss/botan/rss_aes_ni_dec', 'Decryption: AES-NI RSS (Botan)', botan_RSS_dec_aes_ni)
# AES Encryption
botan_RSS_aes = process_proc_status_output("output/block_rss/botan/botan_aes_rss_encrypt")
create_normality_graphs('normality_graphs/block_rss/botan/rss_aes', 'Encryption: AES RSS (Botan)', botan_RSS_aes)
# AES-NI Decryption
botan_RSS_dec_aes = process_proc_status_output("output/block_rss/botan/botan_aes_rss_decrypt")
create_normality_graphs('normality_graphs/block_rss/botan/rss_aes_dec', 'Decryption: AES RSS (Botan)', botan_RSS_dec_aes)
# SM4 Encryption
botan_RSS_sm4 = process_proc_status_output("output/block_rss/botan/botan_sm4_rss_encrypt")
create_normality_graphs('normality_graphs/block_rss/botan/rss_sm4', 'Encryption: SM4 RSS (Botan)', botan_RSS_sm4)
# SM4 Decryption
botan_RSS_dec_sm4 = process_proc_status_output("output/block_rss/botan/botan_sm4_rss_decrypt")
create_normality_graphs('normality_graphs/block_rss/botan/rss_sm4_dec', 'Decryption: SM4 RSS (Botan)', botan_RSS_dec_sm4)
else:
print("No valid argument input")
| true
|
8094beb6206722ad86c33fb9d7ae3b52e07d7ea3
|
Python
|
coffeemakr/python-thr
|
/thr/utils.py
|
UTF-8
| 724
| 3.34375
| 3
|
[
"Unlicense"
] |
permissive
|
import hmac
EMAIL_HMAC_KEY = b'0\xa5P\x0f\xed\x97\x01\xfam\xef\xdba\x08A\x90\x0f\xeb\xb8\xe40\x88\x1fz\xd8\x16\x82bd\xec\t\xba\xd7'
PHONE_HMAC_KEY = b'\x85\xad\xf8"iS\xf3\xd9l\xfd]\t\xbf)U^\xb9U\xfc\xd8\xaa^\xc4\xf9\xfc\xd8i\xe2X7\x07#'
def _hmac_sha256_hex(key, msg):
h = hmac.new(key=key, digestmod='sha256')
h.update(msg)
return h.hexdigest()
def hash_email(email: str):
'''
Hashes an e-mail for reverse lookup.
>>> hash_email("Test@Threema.ch")
'1ea093239cc5f0e1b6ec81b866265b921f26dc4033025410063309f4d1a8ee2c'
'''
return _hmac_sha256_hex(
key=EMAIL_HMAC_KEY,
msg=email.strip().lower().encode("ascii"))
def hash_phone(phone):
'''
>>> hash_phone("41791234567")
'ad398f4d7ebe63c6550a486cc6e07f9baa09bd9d8b3d8cb9d9be106d35a7fdbc'
'''
return _hmac_sha256_hex(
key=PHONE_HMAC_KEY,
msg=phone.encode("ascii"))
| true
|
028530cb0de539eaf3a874ffb5197351a9e75ea8
|
Python
|
diverse-project/varylatex
|
/vary/model/files/directory.py
|
UTF-8
| 1,529
| 2.59375
| 3
|
[] |
no_license
|
import os
import shutil
import time
from pathlib import Path
def clear_directory(path):
"""
Removes the content of a directory without removing the directory itself
"""
for root, dirs, files in os.walk(path):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def remove_directory(path):
"""
Removes a directory and its content
"""
shutil.rmtree(path)
def create_temporary_copy(path):
"""
Creates a working directory with a copy of the project files, that can be altered by the program
and used for the compilations.
"""
timestamp = str(time.time())
tmp_path = os.path.join(os.getcwd(), "vary/build", timestamp)
try:
shutil.copytree(path, tmp_path)
macro_path = os.path.join(os.path.split(os.path.realpath(__file__))[0], "../macros.tex")
macro_copy_path = os.path.join(tmp_path, "macros.tex")
shutil.copyfile(macro_path, macro_copy_path)
except shutil.Error:
print("Error creating the temporary copy")
return tmp_path
def create_dir(path):
"""
Creates a directory with the specified path if it does not already exists
"""
Path(path).mkdir(parents=True, exist_ok=True)
def get_secret_key(path):
if os.path.isfile(path):
with open(path, 'rb') as f:
return f.read()
else:
key = os.urandom(16)
with open(path, 'ab') as f:
f.write(key)
return key
| true
|
e900ae1c01cde3cc20cd4a387cee03532d04700c
|
Python
|
srinivasanprashant/Bachelors-degrees-women-USA
|
/data-analysis.py
|
UTF-8
| 7,994
| 3.765625
| 4
|
[] |
no_license
|
import pandas as pd
import matplotlib.pyplot as plt
women_degrees = pd.read_csv('percent-bachelors-degrees-women-usa.csv')
# fig, ax = plt.subplots()
# ax.plot(women_degrees['Year'], women_degrees['Biology'], label='Women')
# ax.plot(women_degrees['Year'], 100-women_degrees['Biology'], label='Men')
# # customize the appearance of the ticks
# ax.tick_params(bottom="off", top="off", left="off", right="off")
# ax.set_title('Percentage of Biology Degrees Awarded By Gender')
# ax.legend(loc="upper right")
# From the plot, we can tell that Biology degrees increased steadily from 1970 and peaked in the early 2000's.
# We can also tell that the percentage has stayed above 50% since around 1987.
# Now let's generate line charts for four STEM degree categories on a grid to encourage comparison
# major_cats = ['Biology', 'Computer Science', 'Engineering', 'Math and Statistics']
# fig = plt.figure(figsize=(12, 10))
#
# for sp in range(0,4):
# ax = fig.add_subplot(2,2,sp+1)
# ax.plot(women_degrees['Year'], women_degrees[major_cats[sp]], c='blue', label='Women')
# ax.plot(women_degrees['Year'], 100-women_degrees[major_cats[sp]], c='green', label='Men')
# # Add your code here.
# # Set the x-axis limit to range from 1968 to 2011
# ax.set_xlim(1968, 2011)
# # Set the y-axis limit to range from 0 to 100
# ax.set_ylim(0, 100)
# # Hide all of the spines and tick marks
# ax.tick_params(bottom="off", top="off", left="off", right="off")
# ax.spines["right"].set_visible(False)
# ax.spines["bottom"].set_visible(False)
# ax.spines["top"].set_visible(False)
# ax.spines["left"].set_visible(False)
# ax.set_title(major_cats[sp])
#
# # Calling pyplot.legend() here will add the legend to the last subplot that was created.
# plt.legend(loc='upper right')
# plt.suptitle('Percentage of STEM Degrees Awarded By Gender')
# plt.show()
# Computer Science and Engineering have big gender gaps while the gap in Biology and Math and Statistics is
# quite small. In addition, the first two degree categories are dominated by men while the latter degree
# categories are much more balanced.
# Now we'll focus on customizing colors, line widths, layout, and annotations to improve the ability
# for a viewer to extract insights from the charts.
# # order the charts by decreasing ending gender gap using list populated in that order
# stem_cats = ['Engineering', 'Computer Science', 'Psychology', 'Biology', 'Physical Sciences', 'Math and Statistics']
# # Make plots more color-blind friendly using colors from color-blind friendly palette
# cb_dark_blue = (0/255, 107/255, 164/255)
# cb_orange = (255/255, 128/255, 14/255)
#
# fig = plt.figure(figsize=(18, 4))
#
# for sp in range(0,6):
# ax = fig.add_subplot(1,6,sp+1)
# ax.plot(women_degrees['Year'], women_degrees[stem_cats[sp]], c=cb_dark_blue, label='Women', linewidth=3)
# ax.plot(women_degrees['Year'], 100-women_degrees[stem_cats[sp]], c=cb_orange, label='Men', linewidth=3)
# for key,spine in ax.spines.items():
# spine.set_visible(False)
# ax.set_xlim(1968, 2011)
# ax.set_ylim(0,100)
# ax.set_title(stem_cats[sp])
# ax.tick_params(bottom="off", top="off", left="off", right="off")
# # add text annotations to plot
# if sp == 0:
# # Annotating using Axes.text(x coordinate, y coordinate, string of text)
# ax.text(2005, 87, "Men")
# ax.text(2002, 8, "Women")
# elif sp == 5:
# ax.text(2005, 62, "Men")
# ax.text(2001, 35, "Women")
# legend = ax.legend()
# legend.remove()
#
# plt.suptitle('Percentage of STEM Degrees Awarded By Gender')
# Next step is to build a bigger plot.
# Because there are seventeen degrees that we need to generate line charts for, we'll use a subplot
# grid layout of 6 rows by 3 columns. We can then group the degrees into STEM, liberal arts, and other
women_degrees = pd.read_csv('percent-bachelors-degrees-women-usa.csv')
# Because there are seventeen degrees that we need to generate line charts for, we'll use a subplot
# grid layout of 6 rows by 3 columns. We can then group the degrees into STEM, liberal arts, and other
stem_cats = ['Psychology', 'Biology', 'Math and Statistics', 'Physical Sciences', 'Computer Science', 'Engineering']
lib_arts_cats = ['Foreign Languages', 'English', 'Communications and Journalism', 'Art and Performance', 'Social Sciences and History']
other_cats = ['Health Professions', 'Public Administration', 'Education', 'Agriculture','Business', 'Architecture']
# Make plots more color-blind friendly using colors from color-blind friendly palette
cb_dark_blue = (0/255, 107/255, 164/255)
cb_orange = (255/255, 128/255, 14/255)
fig = plt.figure(figsize=(16, 20))
for sp in range(0,6):
ax = fig.add_subplot(6,3,(3*sp)+1)
ax.plot(women_degrees['Year'], women_degrees[stem_cats[sp]], c=cb_dark_blue, label='Women', linewidth=3)
ax.plot(women_degrees['Year'], 100-women_degrees[stem_cats[sp]], c=cb_orange, label='Men', linewidth=3)
for key,spine in ax.spines.items():
spine.set_visible(False)
ax.set_xlim(1968, 2011)
ax.set_ylim(0,100)
# enable just the 0 and 100 labels to be displayed
ax.set_yticks([0,100])
# generate a horizontal line across the entire subplot
ax.axhline(50, c=(171/255, 171/255, 171/255), alpha=0.3)
ax.set_title(stem_cats[sp])
ax.tick_params(bottom="off", top="off", left="off", right="off", labelbottom='off')
# add text annotations to plot
if sp == 0:
# Annotating using Axes.text(x coordinate, y coordinate, string of text)
ax.text(2005, 85, "Women")
ax.text(2005, 10, "Men")
elif sp == 5:
ax.text(2005, 87, "Men")
ax.text(2005, 7, "Women")
ax.tick_params(labelbottom='on')
for sp in range(0,5):
ax = fig.add_subplot(6,3,(3*sp)+2)
ax.plot(women_degrees['Year'], women_degrees[lib_arts_cats[sp]], c=cb_dark_blue, label='Women', linewidth=3)
ax.plot(women_degrees['Year'], 100-women_degrees[lib_arts_cats[sp]], c=cb_orange, label='Men', linewidth=3)
for key,spine in ax.spines.items():
spine.set_visible(False)
ax.set_xlim(1968, 2011)
ax.set_ylim(0,100)
# enable just the 0 and 100 labels to be displayed
ax.set_yticks([0,100])
# generate a horizontal line across the entire subplot
ax.axhline(50, c=(171/255, 171/255, 171/255), alpha=0.3)
ax.set_title(lib_arts_cats[sp])
ax.tick_params(bottom="off", top="off", left="off", right="off", labelbottom='off')
# add text annotations to plot
if sp == 0:
# Annotating using Axes.text(x coordinate, y coordinate, string of text)
ax.text(2005, 78, "Women")
ax.text(2005, 18, "Men")
if sp == 4:
ax.tick_params(labelbottom='on')
for sp in range(0,6):
ax = fig.add_subplot(6,3,(3*sp)+3)
ax.plot(women_degrees['Year'], women_degrees[other_cats[sp]], c=cb_dark_blue, label='Women', linewidth=3)
ax.plot(women_degrees['Year'], 100-women_degrees[other_cats[sp]], c=cb_orange, label='Men', linewidth=3)
for key,spine in ax.spines.items():
spine.set_visible(False)
ax.set_xlim(1968, 2011)
ax.set_ylim(0,100)
# enable just the 0 and 100 labels to be displayed
ax.set_yticks([0,100])
# generate a horizontal line across the entire subplot
ax.axhline(50, c=(171/255, 171/255, 171/255), alpha=0.3)
ax.set_title(other_cats[sp])
ax.tick_params(bottom="off", top="off", left="off", right="off", labelbottom='off')
# add text annotations to plot
if sp == 0:
# Annotating using Axes.text(x coordinate, y coordinate, string of text)
ax.text(2005, 90, "Women")
ax.text(2005, 5, "Men")
elif sp == 5:
ax.text(2005, 62, "Men")
ax.text(2005, 30, "Women")
ax.tick_params(labelbottom='on')
plt.plot(women_degrees['Year'], women_degrees['Biology'])
# Export the figure containing all of the line charts
plt.savefig('biology_degrees.png')
plt.show()
| true
|
58344b304457457eee97093ae4618173cd000088
|
Python
|
Yaskeir/Python
|
/cartesianToSpherical.py
|
UTF-8
| 1,358
| 4.59375
| 5
|
[] |
no_license
|
import math
# start with the cartesian input
print("Please provide the x, y and z coordinates:")
cartesianX = float(input("x: "))
cartesianY = float(input("y: "))
cartesianZ = float(input("z: "))
# define two separate recalculation functions so that they can be re-used in other code
def cartesianToSpherical(x, y, z):
#calculate distance from the origin
sphericalR = math.sqrt(x**2+y**2+z**2)
R = round(sphericalR, 2)
#calculate the theta angle and output the result in degrees,
#rounding to the 2nd decimal point
sphericalTheta = math.degrees(math.atan(y/x))
Theta = round(sphericalTheta, 2)
#do the same for the phi angle
sphericalPhi = math.degrees(math.atan(math.sqrt(x**2+y**2)/z))
Phi = round(sphericalPhi, 2)
print("R =", R, "Theta =", Theta, "Phi =", Phi)
def cartesianToCylindrical(x, y, z):
cylindricalR = math.sqrt(x**2+y**2+z**2)
R = round(cylindricalR, 2)
cylindricalTheta = math.degrees(math.atan(y/x))
Theta = round(cylindricalTheta, 2)
print("R =", R, "Theta =", Theta, "Z =", z)
print("Point coordinates in spherical system, rounded up to the 2nd decimal point: ")
cartesianToSpherical(cartesianX, cartesianY, cartesianZ)
print("Point coordinates in cylindrical system, rounded up to the 2nd decimal point: ")
cartesianToCylindrical(cartesianX, cartesianY, cartesianZ)
| true
|
f1bd97c9a0d37e14c3582e7b143dc215626bb4ce
|
Python
|
Dm1triiy/stepik
|
/512.Python.Advanced/3.3.1.py
|
UTF-8
| 394
| 2.96875
| 3
|
[] |
no_license
|
import requests
import re
source = requests.get(input().strip())
target_url = input().strip()
urls = []
step_status = False
if source.status_code == 200:
urls = re.findall(r'href="(.+?)"', source.text)
for url in urls:
page = requests.get(url)
if page.status_code == 200:
if target_url in page.text:
step_status = True
print('Yes' if step_status else 'No')
| true
|