blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
737499b2738577e6229e1f88fb9cb55fb569f037 | Python | nm17/kutana | /kutana/manager/vk/environment.py | UTF-8 | 4,663 | 2.609375 | 3 | [
"MIT"
] | permissive | """Environment for :class:`.VKManager`."""
import json
import aiohttp
from kutana.environment import Environment
class VKEnvironment(Environment):
"""Environment for :class:`.VKManager`"""
async def _upload_file_to_vk(self, upload_url, data):
upload_result_text = None
async with self.manager.session.post(upload_url, data=data) as resp:
if resp.status == 200:
upload_result_text = await resp.text()
try:
upload_result = json.loads(upload_result_text)
if "error" in upload_result:
raise RuntimeError
except RuntimeError:
upload_result = None
return upload_result
async def reply(self, message, attachment=None, **kwargs):
"""
Reply to currently processed message.
:param message: message to reply with
:param attachment: optional attachment or list of attachments to
reply with
:param kwargs: arguments for vkontakte's "messages.send"
:returns: list with results of sending messages
"""
return await self.manager.send_message(
message, self.peer_id, attachment, **kwargs
)
async def upload_doc(self, file, filename, **kwargs):
"""
Upload file to be sent with :func:`.send_message`
(or :func:`.reply`) as document. If you passed "peer_id", vkontakte's
method "docs.getWallUploadServer" will be used.
You can specify document's type with keyword argument "type" or
"doctype". If you passed "graffiti" as type - method
"docs.getWallUploadServer" will be used. Default type is "doc".
:param file: document as file or bytes
:param filename: name of provided file
:param kwargs: arguments for vkontakte's methods
:returns: :class:`.Attachment` or None
"""
if kwargs.get("peer_id") is None:
peer_id = self.peer_id
else:
peer_id = kwargs["peer_id"]
doctype = kwargs.get("type", "") or kwargs.get("doctype", "doc")
if peer_id and doctype != "graffiti":
upload_data = await self.manager.request(
"docs.getMessagesUploadServer",
peer_id=peer_id,
type=doctype,
)
else:
upload_data = await self.manager.request(
"docs.getWallUploadServer",
group_id=kwargs.get("group_id") or self.manager.group_id,
type=doctype,
)
if upload_data.error:
return None
upload_url = upload_data.response["upload_url"]
data = aiohttp.FormData()
data.add_field("file", file, filename=filename)
upload_result = await self._upload_file_to_vk(upload_url, data)
if not upload_result:
return None
attachments = await self.manager.request(
"docs.save", **upload_result
)
if attachments.error:
return None
return self.manager.create_attachment(
attachments.response, "doc"
)
async def upload_photo(self, file, **kwargs):
"""
Upload file to be sent with :func:`.send_message`
(or :func:`.reply`) as photo. If "peer_id" was passed, file will be
uploaded for user with "peer_id".
:param file: photo as file or bytes
:param kwargs: arguments for vkontakte's methods
:returns: :class:`.Attachment` or None
"""
if kwargs.get("peer_id") is None:
peer_id = self.peer_id
else:
peer_id = kwargs.get("peer_id")
upload_data = await self.manager.request(
"photos.getMessagesUploadServer", peer_id=peer_id
)
if upload_data.error:
return None
upload_url = upload_data.response["upload_url"]
data = aiohttp.FormData()
data.add_field("photo", file, filename="image.png")
upload_result = await self._upload_file_to_vk(upload_url, data)
if not upload_result:
return None
attachments = await self.manager.request(
"photos.saveMessagesPhoto", **upload_result
)
if attachments.error:
return None
return self.manager.create_attachment(
attachments.response[0], "photo"
)
async def get_file_from_attachment(self, attachment):
if not attachment or not attachment.link:
return None
async with self.manager.session.get(attachment.link) as response:
return await response.read()
| true |
bb4111cd0428ebab85ecd67d5b83a4b7e396977b | Python | anamariadem/Course5 | /domain.py | UTF-8 | 3,442 | 4 | 4 | [] | no_license | from math import gcd
class RationalNumber:
_noOfInstances = 0
"""
Abstract data type rational number
Domain: {a/b where a,b integer numbers, b!=0, greatest common divisor a, b =1}
"""
def __init__(self, a, b=1):
"""
Initialise a rational number
a,b inteer numbers
"""
if b == 0:
raise ValueError("Denominator cannot be 0!")
a = int(a)
b = int(b)
d = gcd(a, b)
self._nominator = a // d
self._denominator = b // d
RationalNumber._noOfInstances += 1
@property
def Denom(self):
return self._denominator
@Denom.setter
def Denom(self,value):
if value == 0:
raise ValueError("Denominator cannnot be 0!")
self._denominator = value
@property
def Nom(self):
return self._nominator
@Nom.setter
def Nom(self,value):
self._nominator = value
def __eq__(self, other):
'''
tests the equality of 2 rational number
:param other: a rat num
:return: true - if the current object and other are equal
'''
return self._nominator == other._nominator and self._denominator == other._denominator
def add(self, a):
"""
add 2 rational numbers
a is a rational number
Return the sum of two rational numbers as an instance of rational number.
Raise ValueError if the denominators are zero.
"""
if self.Denom == 0 or a.Denom == 0:
raise ValueError("0 denominator not allowed")
return RationalNumber(self._nominator * a._denominator + self._denominator * a._nominator, self._denominator * a._denominator)
def __add__(self, other):
'''
plus sign
:param other:
:return:
'''
return self.add(other)
def __lt__(self, other):
'''
compares 2 rational numbers
:param other:
:return: true - if the current object is < other
'''
return self._nominator * other._denominator < self._denominator * other._nominator
@staticmethod
def noOfInstances():
return RationalNumber._noOfInstances
def __str__(self):
'''
string repr of a rational number
:return:
'''
if self._denominator == 1:
return str(self._nominator)
return str(self._nominator) + '/' + str(self._denominator)
def test_rational_add():
r1 = RationalNumber(1, 2)
r2 = RationalNumber(1, 3)
r3 = r1.add(r2)
assert r3.Nom == 5
assert r3.Denom == 6
def testEqual():
r1 = RationalNumber(1,3)
assert r1 ==r1
r2 = RationalNumber(1,3)
assert r1==r2
def testCreate():
r1 = RationalNumber(1, 3) # create the rational number 1/3
assert r1.Nom == 1
assert r1.Denom == 3
r1 = RationalNumber(4, 3) # create the rational number 4/3
assert r1.Nom == 4
assert r1.Denom == 3
def testAddOperator():
r1 = RationalNumber(1,3)
r2 = RationalNumber(1,2)
r3 = r1+r2
assert r3 == RationalNumber(5,6)
def testCompareOperator():
r1 = RationalNumber(1,3)
r2 = RationalNumber(2,3)
assert r2 > r1
assert r1 < r2
if __name__ == '__main__':
testCreate()
test_rational_add()
testEqual()
testAddOperator()
testCompareOperator()
print(RationalNumber._noOfInstances)
print(RationalNumber(5,6))
| true |
5ab9ce3dd8ca334dcb497716bff08b48865bd08e | Python | ShuDiamonds/MachineLearning | /ML-Explainability/01.py | UTF-8 | 920 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed May 8 13:03:44 2019
@author: shuichi
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
if __name__ == '__main__':
data = pd.read_csv('./input/fifa-2018-match-statistics/FIFA 2018 Statistics.csv')
y = (data['Man of the Match'] == "Yes") # Convert from string "Yes"/"No" to binary
feature_names = [i for i in data.columns if data[i].dtype in [np.int64]]
X = data[feature_names]
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
my_model = RandomForestClassifier(random_state=0).fit(train_X, train_y)
import eli5
from eli5.sklearn import PermutationImportance
perm = PermutationImportance(my_model, random_state=1).fit(val_X, val_y)
eli5.show_weights(perm, feature_names = val_X.columns.tolist())
| true |
c9dc85141809c284b96289f894c7f9102eb03392 | Python | Dataen/tsetlin-connect-4 | /output/find_highest_weight_conv.py | UTF-8 | 4,984 | 2.71875 | 3 | [] | no_license | import numpy as np
weights = []
lowest = 1
new_lines = []
#Grab dataset
def makeData():
print("Making dataset")
file = open("connect-4.data").read()
lines = file.split("\n")
for line in lines:
if line.split(",")[-1] == "draw":
continue
new_lines.append(line)
#Find all clauses with their weights from all clauses in the 10 splits
for j in range(10):
filename = "conv_class_1_split_"+str(j+1)+".txt"
file = open(filename).read()
lines = file.split("\n")
n = 0
for i in range(len(lines)-1):
if n==0:
clauseTxt = lines[i]
clauseTxt = clauseTxt.split(":")
weight = float(clauseTxt[1])
if weight > lowest:
lowest = weight
clause_state = []
pos_plain_txt = lines[i+1]
pos_neg_txt = lines[i+2]
pos_plain = [[pos_plain_txt[13], pos_plain_txt[14]], [pos_plain_txt[18],pos_plain_txt[19],pos_plain_txt[20]]]
pos_neg =[[pos_neg_txt[13], pos_neg_txt[14]], [pos_neg_txt[18],pos_neg_txt[19],pos_neg_txt[20]]]
for k in range(4):
state = lines[i+k+3]
state = state.split(" ")
clause_state.append(state)
weights.append([weight, clauseTxt[0], filename, pos_plain, pos_neg, clause_state])
n = n + 1
if n==8:
n = 0
hh=0
makeData()
weights.sort()
weights.reverse()
weights = weights[hh:hh+10]
results = []
def fits(dataset, clause):
works = True
for i in range(len(clause)):
item = clause[i]
if item == "*":
continue
elif item == "b":
if dataset[i] != "b":
return False
elif item == "x":
if dataset[i] != "x":
return False
elif item == "o":
if dataset[i] != "o":
return False
elif item == "x̄":
if dataset[i] == "x":
return False
elif item == "ō":
if dataset[i] == "o":
return False
return True
def find_pos(pos):
ip = []
for p in pos:
ip.append(int(p))
if (len(ip)==2):
#print(ip)
if ip[0]==0 and ip[1]==0:
return 0
elif ip[0]==1:
return 1
elif ip[1]==1:
return 2
else:
if ip[0]==0 and ip[1]==0 and ip[2]==0:
return 0
elif ip[2]==1:
return 3
elif ip[1]==1:
return 2
elif ip[0]==1:
return 1
return None
def does_work(x_start, x_end, y_start, y_end, data, list):
for myx in range(x_start, x_end+1):
for myy in range(y_start, y_end+1):
if fits(data[myx][myy:myy+4], list[0]):
if fits(data[myx+1][myy:myy+4], list[1]):
if fits(data[myx+2][myy:myy+4], list[2]):
if fits(data[myx+3][myy:myy+4], list[3]):
return True
return False
#Run through dataset and all clauses to test their patterns
for clause in weights:
hh = hh + 1
win = 0
loss = 0
print(hh,clause[:-1])
rev = clause[-1]
clause[-1].reverse()
list = []
doneW = False
doneL = False
n = 0
m = 0
pos_plain_x = clause[3][1]
pos_plain_y = clause[3][0]
pos_negat_x = clause[4][1]
pos_negat_y = clause[4][0]
x_start = find_pos(pos_plain_x)
x_end = find_pos(pos_negat_x)
y_start = find_pos(pos_plain_y)
y_end = find_pos(pos_negat_y)
if x_start >= x_end:
continue
if y_start >= y_end:
continue
#print("X:", x_start, x_end)
#print("Y:", y_start, y_end)
for a,b,c,d in zip(*rev):
list.append([a,b,c,d])
#list = clause[-1]
for line in new_lines:
data = line.split(",")[:-1]
data = np.reshape(data, (-1, 6))
#
#data = np.rot90(data)
if does_work(x_start, x_end, y_start, y_end, data, list):
data_label = line.split(",")[-1]
if data_label == "win":
win = win + 1
m = m + 1
if not doneW and m==500:
print("Example Win Match")
doneW = True
data = np.rot90(data)
data = data.tolist()
for d in data:
print(d[0], d[1], d[2], d[3], d[4], d[5], d[6])
else:
loss = loss + 1
n = n + 1
if not doneL and n==1:
print("Example Loss Match")
doneL = True
data = np.rot90(data)
data = data.tolist()
for d in data:
print(d[0], d[1], d[2], d[3], d[4], d[5], d[6])
print("Wins: ", win, " Losses: ", loss, " Total: ", win+loss)
results.append([clause[:-1], win, loss])
| true |
2701ac289f95407695e869f6884bbbb02718b657 | Python | jsdosanj/Learning-Python-1.0 | /ReadWrite.py | UTF-8 | 1,532 | 3.5 | 4 | [] | no_license | FileName = "./Files/File1.txt"
Contents_str = ""
# FileWrite = open(FileName, "a") # a -> append
FileWrite = open(FileName, "w") # w -> write
FileWrite.write("Line1\n")
FileWrite.write("\n")
FileWrite.write("\n")
FileWrite.write("Line2\n")
FileWrite.write("\n")
FileWrite.write("\n")
FileWrite.close()
FileRead = open(FileName, "r") # r -> read
Contents_str = FileRead.read()
FileRead.close()
print(Contents_str)
print("There are " + str(len(Contents_str)) + " characters in " + FileName + ".")
Lines = Contents_str.split("\n")
for Line in Lines:
if len(Line) > 0:
print(Line)
print("\n###############\n")
FileRead = open("./Files/File1.txt", "r")
Line = FileRead.readline();
Line = Line[0:len(Line) - 1] # Get rid of "\n"
print(Line)
Line = FileRead.readline();
Line = Line[0:len(Line) - 1] # Get rid of "\n"
print(Line)
FileRead.close()
print("\n###############\n")
FileRead = open(FileName, "r")
Line = FileRead.readline()
while(Line):
if len(Line) > 1: # If the file is not empty ("\n")
Line = Line[0:len(Line) - 1] # Get rid of the trailing "\n"
print(Line)
Line = FileRead.readline()
FileRead.close()
print("\n###############\n")
FileRead = open(FileName, "r")
Lines = FileRead.readlines()
for Line in Lines:
if len(Line) > 1:
Line = Line[0:len(Line) - 1]
print(Line)
FileRead.close()
print("Therer are " + str(len(Lines)) + " lines in " + FileName + ".")
| true |
49a8f70c611fd8c8ce22efc9186c78403bc44733 | Python | sloganking/8-Bit-Computer | /Disassembler/disassembler.py | UTF-8 | 5,184 | 2.984375 | 3 | [
"MIT"
] | permissive | import json
import os
from os import listdir
from os.path import isfile, join
import time
class disassembler:
def __init__(self):
# registers in ISA.
# index in array == machineCode
self.regs = ["A", "B", "C", "D"]
self.firstOperandShouldBeLabel = ["JMP_const", "JNC_const", "JC_const", "JNZ_const", "JZ_const"]
self.secondOperandShouldBeLabel = ["MOV_reg_[const]"]
# takes machineCode bytes and returns a string of assembly
def __bytesToInstruc(self, bytes: list):
params = self.__binaryToInstrucLayout(bytes[0])
tokenedParams = params.split("_")
instructionString = ""
instructionString = instructionString + \
self.__nameOfInstuc(self.__binaryToInstrucLayout(bytes[0]))
# x can be 1 and 2
for x in range(1, 3):
operand = ""
if len(tokenedParams) > x:
operandIsAddress = False
if tokenedParams[x].startswith('[') and tokenedParams[x].endswith(']'):
operandIsAddress = True
if x == 2:
instructionString = instructionString + ","
if params in self.firstOperandShouldBeLabel and x == 1:
operand = self.__getLabelFor(bytes[x])
if operandIsAddress:
operand = "[" + operand + "]"
instructionString = instructionString + " " + operand
elif params in self.secondOperandShouldBeLabel and x == 2:
operand = self.__getLabelFor(bytes[x])
if operandIsAddress:
operand = "[" + operand + "]"
instructionString = instructionString + " " + operand
elif "reg" in tokenedParams[x]:
if self.__binaryIsReg(bytes[x]):
operand = tokenedParams[x]
operand = operand.replace(
"reg", self.__binaryToReg(bytes[x]))
if operandIsAddress:
operand = "[" + operand + "]"
instructionString = instructionString + " " + operand
elif "const" in tokenedParams[x]:
operand = tokenedParams[x]
operand = operand.replace("const", str(bytes[x]))
if operandIsAddress:
operand = "[" + operand + "]"
instructionString = instructionString + " " + operand
return instructionString
def __getLabelFor(self, number: int):
# existing label
if number in self.labelValues:
return "l" + str(self.labels[self.labelValues.index(number)])
# label does not exist
else:
self.labels.append(len(self.labels))
self.labelValues.append(number)
return "l" + str(len(self.labels) - 1)
def __binaryToReg(self, binary: int):
try:
return self.regs[binary]
except:
assert (False), f"No reg for given binary: {binary}"
def __binaryIsReg(self, binary: int):
try:
self.__binaryToReg(binary)
return True
except:
return False
# returns string of instruction's name with it's paramater(s) types
def __binaryToInstrucLayout(self, binary: int):
binary = str(binary)
return self.instrucNames[self.instrucNumbers.index(binary)]
def __nameOfInstuc(self, instruc: str):
tokens = instruc.split("_")
return tokens[0]
def disassemble(self, inputBytes):
self.linesToReturn = []
# initialize known label number
self.labels = []
self.labelValues = []
# create byteArray with all file bytes
byte = inputBytes
# create and load binaryToIncruc array
with open(f"./instrucToBinary.json") as json_data:
instrucDict = json.load(json_data)
self.instrucNames = list(instrucDict.keys())
self.instrucNumbers = []
for instrucName in self.instrucNames:
self.instrucNumbers.append(instrucDict[instrucName])
instructionBundles = []
i = 0
while(i < len(byte)):
instrucLayout = self.__binaryToInstrucLayout(byte[i])
layoutTokens = instrucLayout.split("_")
instructionBundle = []
for x in range(len(layoutTokens)):
instructionBundle.append(byte[i + x])
instructionBundles.append(
self.__bytesToInstruc(instructionBundle))
# sets i to location of next instruction.
i = i + len(layoutTokens)
i = 0
for x in range(len(instructionBundles)):
if i in self.labelValues:
self.linesToReturn.append(self.__getLabelFor(i) + ":" + "\n")
self.linesToReturn.append("\t" + instructionBundles[x] + "\n")
# keep track of what bytes we're on
bundleTokens = instructionBundles[x].split(" ")
i = i + len(bundleTokens)
return self.linesToReturn
| true |
d773482d6120af4dfcc86df86fa1aefebea9823b | Python | esthompson1365/gw-data-hw | /Pandas homework/Part_2/Resume-Analysis.py | UTF-8 | 6,537 | 4.125 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # Resume Analysis
# _**HARD: This is a curveball assignment. Plus, this is Python without Pandas.**_
#
# #### The objective of this assignment is for you to explain what is happening in each cell in clear, understandable language.
#
# #### _There is no need to code._ The code is there for you, and it already runs. Your task is only to explain what each line in each cell does.
#
# #### The placeholder cells should describe what happens in the cell below it.
# The cell below imports `os` as a dependency because the `os.path.join` function. Also, the `string` dependency is needed because later in the script, `string.punctuation` will be used to detect and remove punctuation symbols. Explain what the line `from collection import Counter` does.
# In[1]:
import os
import string
from collections import Counter
# In the first line of code below we are writing a path to a .md or markdown file. We then create two constants which are defined by all capital letters. These variables cannot be changed.
# In[2]:
# Paths
resume_path = os.path.join(".", 'resume.md')
# Skills to match
REQUIRED_SKILLS = {"excel", "python", "mysql", "statistics"}
DESIRED_SKILLS = {"r", "git", "html", "css", "leaflet", "modeling"}
REQUIRED_SKILLS
# Next we create a function called load_file by using def. In this function, we're reading a file, turning all of the text to lowercase and then splitting all the words up into a list. By deafult the split function will seperate text with spaces.
# In[3]:
def load_file(filepath):
# Helper function to read a file and return the data.
with open(filepath, "r") as resume_file_handler:
resume_contents = resume_file_handler.read()
resume_contents = resume_contents.lower()
resume_tokens = resume_contents.split()
return resume_tokens
# The cell below passes the path we created above into the load_file function to create a list.
# In[6]:
# Grab the text for a Resume
word_list = load_file(resume_path)
# Replace this with your clear explanation of what happens in the cell below.
# Be sure to answer the following:
# * Why is a `set` created?
# * How are we populating the set
# * Why would it be necessary to create a `punctuation` set?
# * What does subtracting from the set do?
# * * Refer to the `resume = resume - punctuation` line
# * What does `\n` do in a string
# Sets are like lists, but unordered so the items have no index. We use a set here beacuse we can loop through the set items using a for loop. We are populating the set with every token or text speperated by spaces from our word_list using the .add function.
#
# We create a punctuation set to identify all stop words and special characters. Substracting the punctuation from the resume, removes these stop words and special characters from the resume set.
#
# Note that the use of \n in a string creates a new line.
# In[12]:
# Create a set of unique words from the resume
resume = set()
resume
# HINT: Single elements in a programming language are often referred to as tokens
for token in word_list:
resume.add(token)
print('\nWORDS BEFORE MOVING PUNCTUATION')
print(resume)
# Remove Punctuation that were read as whole words
punctuation = set(string.punctuation)
# HINT: Attributes that are in `resume` that are not in `punctuation` (difference)
resume = resume - punctuation
print('\nWORDS AFTER MOVING PUNCTUATION')
print(resume)
# In the first two paragraphs below, we use a set intersection determine which required and desired skill this resume has.
#
# Next, we do some charachter cleaning. Unlike word cleaning, charachter cleaning removes special charachters from words.
#
# After cleaning our words with some automated solutions, we notice there are still words thta we don't want in our word_list. So we create a variable called stop_words with additional words that we want to remove. Then we overwrite our word_list excluding these stop_words using a list comprehension.
# In[15]:
# Calculate the Required Skills Match using Set Intersection
print('REQUIRED SKILLS')
print(resume & REQUIRED_SKILLS)
# Calculate the Desired Skills Match using Set Intersection
print('DESIRED SKILLS')
print(resume & DESIRED_SKILLS)
# Word Punctuation Cleaning
word_list = [word for word in word_list if word not in string.punctuation]
print('\nWORD LIST AFTER PUNCTUATION REMOVAL')
print(word_list)
# Character Punctuation Cleaning
word_list = [''.join(char for char in word if char not in string.punctuation) for word in word_list]
print('\nWORD LIST AFTER CHARACTER PUNCTUATION REMOVAL')
print(word_list)
# Clean Stop Words
stop_words = ["and", "with", "using", "##", "working", "in", "to"]
word_list = [word for word in word_list if word not in stop_words]
print('\nWORD LIST AFTER STOP WORDS')
print(word_list)
# * Collections.counter is optional, but explain the difference between the `for loop` and using `Counter`
#
# Below we create a dictionary named `word_count`. The keys for the dictionary are our elements in `word_list` and the values are set to 0.
#
# The for loop that we created looks at every word in our `word_list` and if those words mathc a word in our word_count dictionary, it adds 1 to the value of that respective key.
#
# You can also use counter here. Note that counter creates a series, while the for loop appends a dictionary.
# In[17]:
# Resume Word Count
# ==========================
# Initialize a dictionary with default values equal to zero
word_count = {}.fromkeys(word_list, 0)
# Loop through the word list and count each word.
for word in word_list:
word_count[word] += 1
# print(word_count)
# Bonus using collections.Counter
word_counter = Counter(word_list)
# print(word_counter)
# Comparing both word count solutions
print(word_count == word_counter)
# Top 10 Words
print("Top 10 Words")
print("=============")
# Replace this with your clear explanation of what happens in the cell below. Which column was sorted and how? How was the top ten selected? Does that explain the significance of `[:10]`?
#
# In the for loop below, the first thing that happens is the the dictionary is sorted. By deafult the dictionary will be sorted on the values.
#
# Then we take words from the beginning to position 10 and print their key and value.
# In[24]:
# Sort words by count and print the top 10
sorted_words = []
for word in sorted(word_count, key=word_count.get, reverse=True)[:10]:
print(f"Token: {word:20} Count: {word_count[word]}")
| true |
f74ba85b54a0dc232d781b62da98b8170608028a | Python | xiaobaiyizhi/xlbbtest | /decorator.py | UTF-8 | 567 | 3 | 3 | [] | no_license | from time import sleep, time
from base_config import PATH
import os
def time_decorator(function): # 时间花费修饰器,用于计算一个函数所用的时间
def wrapper(*args, **kwargs):
start_time = time()
function(*args, **kwargs)
with open(os.path.join(PATH, 'result', 'result_temp.txt'), 'a', encoding='utf-8') as f:
f.write(str(int(time() - start_time)) + 's')
f.write('\n')
print('time=' + str((time() - start_time)) + 's')
print('--------------------------')
return wrapper
| true |
c1e1a8f435390a5b7fda6d1c4f5aa42158c9ed64 | Python | RogelioLozano/Language_statistics_twitter | /fitting_param/estimate_param.py | UTF-8 | 4,919 | 2.5625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
from scipy.optimize import curve_fit
from copy import deepcopy
import os
import pandas as pd
import seaborn as sns
sns.set(style='ticks')
prepath = 'Datos_todoslosPaises/'
def EvalnormCdf(x,mu,sigma):
return scipy.stats.norm.cdf(np.log10(x),loc=mu,scale=sigma)
def ajuste(data_fit,funcion):
X = data_fit[:,0]
Y = data_fit[:,1]
param_model, pcov1 = curve_fit(funcion,X,Y)
return param_model
def get_parametros(country,level,ngrams,timeint,totalgrams):
path = prepath+'{}/Level_{}/results_{}grams/{}hour_{}grams_RD.txt'.format(country,level,totalgrams,timeint,ngrams)
data = np.loadtxt(path)
parametros = ajuste(data,EvalnormCdf)
return parametros
NGRAMS = [1,2,3,4,5]
TIME = [3,6,12,24,48,96]
countries = ["Mexico", "United_Kingdom","Spain","Argentina"]
totalgrams = 1000
pre = os.path.join(os.getenv("HOME"),'parametros_valores')
for country in countries:
# <--------------___!!!!!!!!
#codigo auxiliar <--------------------Esto de abajo se tiene que modificar si aumentas paises
dist4country = { "Mexico":np.arange(0,11), "United_Kingdom":np.arange(0,10),"Spain":np.arange(0,9),"India":np.arange(0,11),'Argentina':np.arange(0,11)}
base=2
distancias = np.power(base, dist4country[country]).astype(float)*3*1000
SPATIAL = dist4country[country]
#ngrams variation
#geographical scale fixed
for level in SPATIAL:
scheme = dict( zip( ['ti='+str(i) for i in TIME], [ [] for k in range(len(TIME)) ]) )
muandsigma = {"mu":scheme, "sigma":deepcopy(scheme)}
# <--------------___!!!!!!!!
#Lo de abajo se debe aumentar acorde al numero de paises que se tengan entonces es el num de repeticiones en la lista de muandsigma
Parametros_ti = dict([(count,elem) for count,elem in zip(countries,[muandsigma,muandsigma,muandsigma,muandsigma])])
for timeint in TIME:
for ngram in NGRAMS:
try:
mu,sigma = get_parametros(country,level,ngram,timeint,totalgrams)
except RuntimeError:
mu,sigma = np.nan,np.nan
Parametros_ti[country]['mu']['ti={}'.format(timeint)].append(mu)
Parametros_ti[country]['sigma']['ti={}'.format(timeint)].append(sigma)
df_mu = pd.DataFrame(Parametros_ti[country]['mu'],index=NGRAMS)
df_sigma = pd.DataFrame(Parametros_ti[country]['sigma'],index=NGRAMS)
savingpathmu = os.path.join(pre,"{}ngrams".format(totalgrams),country,'display_ngrams','mu_vs_temporal')
savingpathsigma = os.path.join(pre,'{}ngrams'.format(totalgrams),country,'display_ngrams','sigma_vs_temporal')
if not os.path.exists(savingpathmu) or not os.path.exists(savingpathsigma):
os.makedirs(savingpathmu)
os.makedirs(savingpathsigma)
df_mu.to_csv(os.path.join(savingpathmu,'{}Km'.format(distancias[level]/1000)))
df_sigma.to_csv(os.path.join(savingpathsigma,'{}Km'.format(distancias[level]/1000)))
# time interval fixed
for timeint in TIME:
tmpmu = dict( zip( dist4country[country], [[] for i in dist4country[country]] ) )
tmpsigma = deepcopy(tmpmu)
#MODIFICAR LO ABAJO SI AUEMNTO PAISES DEBO AGREGALOS A Parametros_sc l<--------------___!!!!!!!!
Parametros_sc = {"Mexico":{'mu':tmpmu ,'sigma':tmpsigma }, "United_Kingdom":{'mu':tmpmu ,'sigma':tmpsigma },'Spain':{'mu':tmpmu,'sigma':tmpsigma},'India':{'mu':tmpmu ,'sigma':tmpsigma },'Argentina':{'mu':tmpmu ,'sigma':tmpsigma}}
for level in SPATIAL:
for ngram in NGRAMS:
try:
mu,sigma=get_parametros(country,level,ngram,timeint,totalgrams)
except RuntimeError:
mu,sigma = np.nan,np.nan
Parametros_sc[country]['mu'][level].append(mu)
Parametros_sc[country]['sigma'][level].append(sigma)
df_mu = pd.DataFrame({ str(distancia): Parametros_sc[country]["mu"][i] for(distancia,i) in zip( distancias,range(len(distancias))) },index=NGRAMS)
df_sigma = pd.DataFrame({ str(distancia): Parametros_sc[country]["sigma"][i] for(distancia,i) in zip( distancias,range(len(distancias))) },index=NGRAMS)
savingpathmu = os.path.join(pre,"{}ngrams".format(totalgrams),country,'display_ngrams','mu_vs_spatial')
savingpathsigma = os.path.join(pre,'{}ngrams'.format(totalgrams),country,'display_ngrams','sigma_vs_spatial')
if not os.path.exists(savingpathmu) or not os.path.exists(savingpathsigma):
os.makedirs(savingpathmu)
os.makedirs(savingpathsigma)
df_mu.to_csv(os.path.join(savingpathmu,'{}hrs'.format(timeint)))
df_sigma.to_csv(os.path.join(savingpathsigma,'{}hrs'.format(timeint)))
| true |
fce516963d2510e97d093ab72633994841c86172 | Python | dinesh-parthasarathy/DeepLearning | /Ex04_Pytorch/data.py | UTF-8 | 1,735 | 2.609375 | 3 | [] | no_license | from torch.utils.data import Dataset
import torch
from skimage.io import imread
from skimage.color import gray2rgb
import torchvision.transforms as transforms
import pandas as pd
train_mean = [0.59685254, 0.59685254, 0.59685254]
train_std = [0.16043035, 0.16043035, 0.16043035]
class ChallengeDataset(Dataset):
def __init__(self, data: pd.DataFrame, mode: str):
self.data_frame = data
self.transform = None
# data augmentation
if mode == 'val':
self.transform = transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
transforms.Normalize(train_mean, train_std)
])
elif mode == 'train':
self.transform = transforms.Compose([
transforms.ToPILImage(),
# Dynamic Data Augmentation
# transforms.RandomHorizontalFlip(p=0.5),
# transforms.RandomVerticalFlip(p=0.5),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2),
transforms.ToTensor(),
transforms.Normalize(train_mean, train_std)
])
# TODO: Data augmentation to rotate images randomly at fixed angles (90,180,270)
def __len__(self):
return len(self.data_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
image = imread('./' + self.data_frame.iloc[idx, 0])
image = gray2rgb(image)
if self.transform:
image = self.transform(image)
data = self.data_frame.iloc[idx, 1:]
data = torch.tensor(data, dtype=torch.float32)
sample = (image, data)
return sample
| true |
5eddac665c222dfba26e1afad94e0f7fc01a652b | Python | karroje/McAllister | /rate_tools/SequenceGen.py | UTF-8 | 3,155 | 2.78125 | 3 | [] | no_license | """
Copyright (c) 2015, Michael McAllister and Dr. John Karro
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import random
def weightedchoice(items): # this doesn't require the numbers to add up to 100
return random.choice("".join(x * y for x, y in items))
def changeChars(perChange, strToChange):
newString =""
for c in strToChange:
if random.random() < perChange:
choices = [("A",1),("C",1),("G",1),("T",1)]
choices.remove((c,1))
c = weightedchoice(choices)
newString +=c
return newString
def makeFastaSequence(outputFile = "testDNASeq", repeatSeq ="TTGCAATACACAAGTGATCG",
len = 500, numRepeats=25, maxChange =.5):
outFile = open(outputFile,"w")
random.seed(1)
repeats = 0
outFile.write(">" + outputFile +" \n")
for i in range(0,len):
if i % (len/numRepeats) == 0:
outString = changeChars(maxChange*repeats/numRepeats,repeatSeq)
outFile.write(outString)
repeats +=1
outFile.write("\n");
else:
choices = [("A",1),("C",1),("G",1),("T",1)]
c = weightedchoice(choices)
outFile.write(c)
def makeAlignmentSequences(outputFile = "testAlign", repeatSeq ="TTGCAATACACAAGTGATCG",
numAligns=5, maxChange =.5):
outFile = open(outputFile,"w")
random.seed(1)
alignments = 0
outFile.write("# STOCKHOLM 1.0 \n")
for i in range(0,numAligns):
outString = changeChars(maxChange*alignments/numAligns,repeatSeq)
outFile.write("Sample" + str(alignments) + " ")
outFile.write(outString)
alignments +=1
outFile.write("\n");
outFile.write("//")
makeAlignmentSequences() | true |
db548345c508a8e186531601d358b0f65492bf68 | Python | Xin128/lyft_data_challenge | /factors_active_drives.py | UTF-8 | 6,362 | 2.8125 | 3 | [] | no_license |
import pandas as pd
import datetime
import numpy as np
# Load the Pandas libraries with alias 'pd'
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', -1)
data_ride_id = pd.read_csv("drop_out_drivers 2.csv")
driver_id_table = pd.read_csv("driver_ids.csv")
data_total_table = pd.read_csv('/Users/xinhao/Downloads/lyft_data_challenge/final/erin_code.csv')
print(driver_id_table)
def compute_drives_per_day(current_table):
"""
compute the number of drives per day
:param id:
:return:
"""
date_sec = list(current_table['picked_up_at'])
date = [x[:10] for x in date_sec]
if len(set(date)) == 0:
return 0
else:
return float(len(date)) / len(set(date))
def compute_total_duration(current_table):
total_dur = sum(current_table['ride_duration']/60)
return total_dur
def compute_profit(current_table):
"""
compute Avearge Profit per drive (average earning/day)
have not considered prime time
:param current_table:
:return:
"""
total = 0
for ind,drive in current_table.iterrows():
profit_per_drive = (2+1.15*drive['ride_distance']/1609.34+0.22*drive['ride_duration']/60)
profit_after_prime = profit_per_drive+ drive['ride_prime_time']/float(drive['ride_duration'])*profit_per_drive*0.22
if profit_after_prime < 5:
profit_after_prime = 5
elif profit_after_prime >400:
profit_after_prime = 400
total += profit_after_prime
return total / float(len(current_table))
def compute_profit_per_day(current_table):
total = 0
for ind,drive in current_table.iterrows():
profit_per_drive = 2+1.15*drive['ride_distance']/1609.34+0.22*drive['ride_duration']/60+1.75
if profit_per_drive < 5:
profit_per_drive = 5
elif profit_per_drive >400:
profit_per_drive = 400
total += profit_per_drive
# print(total,len(current_table),total / float(len(current_table)))
date_sec = list(current_table['picked_up_at'])
date = [x[:10] for x in date_sec]
if len(set(date)) == 0:
return 0
else:
return total / len(set(date))
def compute_responding_time(current_table):
"""
# compute average responding time
# :param current_table:
# :return:
# """
responding_time = datetime.timedelta()
for ind,drive in current_table.iterrows():
try:
request = datetime.datetime.strptime(drive['requested_at'],'%Y-%m-%d %H:%M:%S')
pick_up = datetime.datetime.strptime(drive['picked_up_at'],'%Y-%m-%d %H:%M:%S')
time = pick_up-request
responding_time = responding_time+time
responding_time = int(responding_time.seconds)
except:
continue
return responding_time/float(len(current_table))
def compute_arrival_time(current_table):
"""
# compute average responding time
# :param current_table:
# :return:
# """
responding_time = datetime.timedelta()
for ind,drive in current_table.iterrows():
try:
request = datetime.datetime.strptime(drive['accepted_at'],'%Y-%m-%d %H:%M:%S')
pick_up = datetime.datetime.strptime(drive['arrived_at'],'%Y-%m-%d %H:%M:%S')
time = pick_up-request
responding_time = responding_time+time
responding_time = int(responding_time.seconds)
except:
continue
return responding_time/float(len(current_table))
def compute_waiting_time(current_table):
"""
# compute average responding time
# :param current_table:
# :return:
# """
responding_time = datetime.timedelta()
for ind,drive in current_table.iterrows():
try:
request = datetime.datetime.strptime(drive['picked_up_at'],'%Y-%m-%d %H:%M:%S')
pick_up = datetime.datetime.strptime(drive['arrived_at'],'%Y-%m-%d %H:%M:%S')
time = pick_up-request
responding_time = responding_time+time
responding_time = int(responding_time.seconds)
except:
continue
return responding_time/float(len(current_table))
def compute_speed(current_table):
total_dist = sum(list(current_table['ride_distance']))
total_duration = sum(list(current_table['ride_duration']))
return float(total_dist)/total_duration
def compute_pearson_coefficient(factors):
"""
compute pearson_coefficient with career length
:param x:
:param carreer_len:
:return:
"""
for col in factors.columns:
if col != 'driver_id':
print(col, factors[col].corr(factors['career_len']))
def compute_career_length(current_table):
"""
:param current_table:
:return:
"""
current_table = current_table.sort_values(['requested_at'], ascending=False)
last_ride = datetime.datetime.strptime(current_table.iloc[0]['picked_up_at'], '%Y-%m-%d %H:%M:%S')
driver_id = current_table.iloc[0]['driver']
find_onboard = driver_id_table[driver_id_table['driver_id'] == driver_id].iloc[0]['driver_onboard_date']
onboard_time = datetime.datetime.strptime(find_onboard, '%Y-%m-%d %H:%M:%S')
time = last_ride - onboard_time
career_len = int(time.days)
return career_len
factors = pd.DataFrame(columns = ['driver_id','career_len','rides_per_day','profit','responding','arrival','waiting','speed','total_duration'])
for driver_id in driver_id_table['driver_id']:
current_table = data_total_table[data_total_table['driver'] == driver_id]
if len(current_table) > 0:
num_per_day = compute_drives_per_day(current_table)
profit = compute_profit_per_day(current_table)
responding_time = compute_responding_time(current_table)
arrival_time = compute_arrival_time(current_table)
waiting_time = compute_waiting_time(current_table)
speed = compute_speed(current_table)
dur = compute_total_duration(current_table)
career_len = compute_career_length(current_table)
factors.loc[-1] = [driver_id, career_len,num_per_day, profit,responding_time,arrival_time,waiting_time,speed,dur]
factors.index = factors.index + 1
factors = factors.sort_index()
print(len(factors))
factors.to_csv('main_factors_new.csv')
# print(data_total_table)
| true |
ce3655586c1cca6c63d476f931c34bbce53dce16 | Python | Ben-Lall/AnotherTentativelyNamedRoguelike | /symbol.py | UTF-8 | 225 | 3.0625 | 3 | [] | no_license | class Symbol:
"""A more complex character that has color and displacement."""
def __init__(self, char, color, dx=0, dy=0):
self.color = color
self.char = char
self.dx = dx
self.dy = dy
| true |
d451762a64535dcbf5264e9611ea16e5d7961fc6 | Python | Somg10/PythonBasic | /M1 Start Coding - Python.py | UTF-8 | 313 | 2.90625 | 3 | [] | no_license | #{
#Driver Code Starts
#Initial Template for Python 3
# } Driver Code Ends
#User function Template for python3
def print_fun():
# Your code here
# Please follow proper indentation
#{
#Driver Code Starts.
def main():
print_fun()
if __name__ == '__main__':
main()
#} Driver Code Ends
| true |
3aab4f8203b437c6622473b8bfa05900f4179134 | Python | j00hyun/pre-education | /quiz/pre_python_09.py | UTF-8 | 525 | 4.1875 | 4 | [] | no_license | """
9. 점수 구간에 해당하는 학점이 아래와 같이 정의되어 있다.
점수를 입력했을 때 해당 학점이 출력되도록 하시오.
81~100 : A
61~80 : B
41~60 : C
21~40 : D
0~20 : F
예시
<입력>
score : 88
<출력>
A
"""
print('<입력>')
scr = input('score : ')
print('<출력>')
if int(scr) < 21:
grade = 'F'
elif int(scr) < 41:
grade = 'D'
elif int(scr) < 61:
grade = 'C'
elif int(scr) < 81:
grade = 'B'
else:
grade = 'A'
print(grade) | true |
9be9dc27fb939afa291f59aaf944a85e072cb57e | Python | Irain-LUO/Daily-DeepLearning | /05-Machine-Learning-Code/sklearn/preprocessing/Standardization.py | UTF-8 | 928 | 2.828125 | 3 | [
"MIT"
] | permissive | from sklearn import preprocessing
import numpy as np
X_train = np.array([[ 1., -1., 2.],
[ 2., 0., 0.],
[ 0., 1., -1.]])
X_scaled = preprocessing.scale(X_train)
print(X_scaled)
# [[ 0. -1.22474487 1.33630621]
# [ 1.22474487 0. -0.26726124]
# [-1.22474487 1.22474487 -1.06904497]]
print(X_scaled.mean(axis=0))
#[0. 0. 0.]
print(X_scaled.std(axis=0))
#[1. 1. 1.]
scaler = preprocessing.StandardScaler().fit(X_train)
print(scaler)
#StandardScaler(copy=True, with_mean=True, with_std=True)
print(scaler.mean_)
#[1. 0. 0.33333333]
print(scaler.scale_)
#[0.81649658 0.81649658 1.24721913]
a = scaler.transform(X_train)
print(a.mean(axis=0))
print(a.std(axis=0))
# [0. 0. 0.]
# [1. 1. 1.]
X_test = [[-1., 1., 0.]]
b = scaler.transform(X_test)
print(b.mean(axis=0))
#[-2.44948974 1.22474487 -0.26726124]
| true |
26558b8d096a76494034346543694b8ecdb43ada | Python | taniamukherjee/python-challenge | /Task1/main.py | UTF-8 | 2,033 | 3.71875 | 4 | [] | no_license |
#homework3, Task1:PyBank
# define veriables , import data
# First import the os module
# This will allow us to create file paths across operating systems
import os
csvpath = os.path.join('PyBank', 'budget_data_1.csv')
import csv
with open(csvpath, newline='') as csvfile:
# CSV reader specifies delimiter and variable that holds contents
data = csv.reader(csvfile, delimiter=',')
#1 The total number of months included in the dataset
dates, revenue, rev_change = [], [], []
i=0
for row in data:
dates.append(row[0])
revenue.append(row[1])
i=i+1
print ('-' * 50) #separationlines
print ('-' * 50)
print ("Financial Analysis")
print ('~' * 18)
print ("Total months:",(i-1) ) #print total month
#2 The total amount of revenue gained over the entire period
revenue_total=0
for j in range (1,i) :
val = revenue[j]
if type(int(val)) == int :
revenue_total = revenue_total + int(val)
if j > 1 :
rev_change.append(int(revenue[j])- int(revenue[j-1]))
# 3 The greatest increase in revenue (date and amount) over the entire period
# The greatest decrease in revenue (date and amount) over the entire period
max_change = int(rev_change[0])
min_change = int(rev_change[0])
Rev_change_total = 0
for j in range (0,i-2) :
if int(max_change) < int(rev_change[j]):
max_change = rev_change[j]
date1 = dates[j+2]
if int(min_change) > int(rev_change[j]):
min_change = rev_change[j]
date2 = dates[j+2]
Rev_change_total = Rev_change_total + rev_change[j]
# 4 The average change in revenue between months over the entire period
Average = Rev_change_total/(i-2)
#print all results
print ("Total Revenue : ","$",revenue_total, sep='')
print ("Average Revenue Change: ","$",Average)
print ("Greatest increase in Revenue: ",date1," (",'$',max_change,")", sep='')
print ("Greatest decrease in Revenue: ",date2," (","$",min_change,")", sep='')
print ('-' * 50) #separationlines
print ('-' * 50)
#the end
| true |
f792786efc759a8df542f5b394089a9b00e8f29b | Python | OhkuboSGMS/DeepLearningGroundZero | /Plots/plotTest.py | UTF-8 | 330 | 3.140625 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import numpy as np
def step_function(x):
return np.array(x>0,dtype =np.int)
def sigmoid(x):
return 1/(1+np.exp(-x))
def relu(x):
return np.maximum(0,x)
x =np.arange(-5.0,5.0,0.1)
# y =step_function(x)
y =sigmoid(x)
plt.plot(x,y)
plt.ylim(-0.1,1.1)#yの範囲を指定
plt.show()
| true |
b9e0bd42f765c4763875658bdfc31b6a96a3555a | Python | PeterLiao/FindTrain | /TrainLocation/utils.py | UTF-8 | 5,007 | 3.203125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
__author__ = 'peter_c_liao'
from datetime import timedelta
from django.utils.timezone import utc
import datetime
import math
class GeoDirection:
N = 0
NE = 1
E = 2
SE = 3
S = 4
SW = 5
W = 6
NW = 7
class Direction:
NORTH = 0
SOUTH = 1
OTHERS = 2
def get_utc_now():
return datetime.datetime.utcnow().replace(tzinfo=utc)
def get_local_now():
return get_utc_now() + timedelta(hours=8)
def get_dist(lat1, long1, lat2, long2):
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta, phi)
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
arc = math.acos( cos )
# Remember to multiply arc by the radius of the earth
# in your favorite set of units to get length.
return arc * 6373.0
def get_train_direction(geo__direction):
train_direction = Direction.OTHERS
if geo__direction in [GeoDirection.N, GeoDirection.NE, GeoDirection.NW, GeoDirection.E]:
train_direction = Direction.NORTH
elif geo__direction in [GeoDirection.S, GeoDirection.SE, GeoDirection.SW, GeoDirection.W]:
train_direction = Direction.SOUTH
print 'train direction:', train_direction
return train_direction
def get_train_direction_for_taoyuan_above(geo__direction):
train_direction = Direction.OTHERS
if geo__direction in [GeoDirection.N, GeoDirection.NE, GeoDirection.E, GeoDirection.SE]:
train_direction = Direction.NORTH
elif geo__direction in [GeoDirection.S, GeoDirection.SW, GeoDirection.W, GeoDirection.NW]:
train_direction = Direction.SOUTH
print '[taoyuan above] train direction:', train_direction
return train_direction
def get_geo_direction_by_moving(lat1, long1, lat2, long2):
radians = math.atan2((long2 - long1), (lat2 - lat1))
compassReading = radians * (180 / math.pi);
coordNames = [GeoDirection.N, GeoDirection.NE, GeoDirection.E, GeoDirection.SE, GeoDirection.S, GeoDirection.SW, GeoDirection.W, GeoDirection.NW, GeoDirection.N]
coordIndex = int(round(compassReading / 45))
if coordIndex < 0:
coordIndex = coordIndex + 8
print 'geo direction by moving:', coordNames[coordIndex]
return coordNames[coordIndex]
def get_geo_direction(heading):
print 'heading:', heading
geo_direction = GeoDirection.N
if 0 <= heading <= 22.5 or (360-22.5) < heading <= 360:
geo_direction = GeoDirection.N
elif 22.5 < heading <= (45+22.5):
geo_direction = GeoDirection.NE
elif (45+22.5) < heading <= (90+22.5):
geo_direction = GeoDirection.E
elif (90+22.5) < heading <= (180-22.5):
geo_direction = GeoDirection.SE
elif (180-22.5) < heading <= (180+22.5):
geo_direction = GeoDirection.S
elif (180+22.5) < heading <= (270-22.5):
geo_direction = GeoDirection.SW
elif (270-22.5) < heading <= (270+22.5):
geo_direction = GeoDirection.W
elif (270+22.5) < heading <= (360-22.5):
geo_direction = GeoDirection.NW
print 'geo direction:', geo_direction
return geo_direction
def get_train_direction_by_moving(lat1, long1, lat2, long2):
geo_direction = get_geo_direction_by_moving(lat1, long1, lat2, long2)
return get_train_direction(geo_direction)
def show_schedule_list(schedule_list):
for item in schedule_list:
print item["train_number"], ',', item["train_station"], ',', item["arrive_time"]
def strfdelta(tdelta, fmt):
d = {"days": tdelta.days}
d["hours"], rem = divmod(tdelta.seconds, 3600)
d["minutes"], d["seconds"] = divmod(rem, 60)
return fmt.format(**d)
def get_formatted_timedelta_by_now(date):
tdelta = date - get_utc_now() - timedelta(hours=8)
if tdelta.days < 1 and tdelta.seconds < 60:
return strfdelta(tdelta, "{seconds} 秒")
elif tdelta.days < 1 and tdelta.seconds < 60*60:
return strfdelta(tdelta, "{minutes} 分鐘")
elif tdelta.days < 1:
return strfdelta(tdelta, "{hours} 小時 {minutes} 分")
return strfdelta(tdelta, "{days} 天")
def parse_datetime(datetime_str):
time_list = datetime_str.split(":")
if len(time_list) < 2:
return datetime.datetime(1982, 5, 31, 0, 0, tzinfo=utc)
d = timedelta(hours=int(time_list[0]), minutes = int(time_list[1]))
d2 = timedelta(hours=get_local_now().hour, minutes=get_local_now().minute)
today = get_local_now() - d2 + d
return today
| true |
28b827c70087a883839cd7434c455c04f2fd0f1f | Python | TOTeGuard/hassio | /hassio/host/alsa.py | UTF-8 | 4,384 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | """Host Audio support."""
import logging
import json
from pathlib import Path
from string import Template
import attr
from ..const import ATTR_INPUT, ATTR_OUTPUT, ATTR_DEVICES, ATTR_NAME, CHAN_ID, CHAN_TYPE
from ..coresys import CoreSysAttributes
_LOGGER: logging.Logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
DefaultConfig = attr.make_class("DefaultConfig", ["input", "output"])
AUDIODB_JSON: Path = Path(__file__).parents[1].joinpath("data/audiodb.json")
ASOUND_TMPL: Path = Path(__file__).parents[1].joinpath("data/asound.tmpl")
class AlsaAudio(CoreSysAttributes):
"""Handle Audio ALSA host data."""
def __init__(self, coresys):
"""Initialize ALSA audio system."""
self.coresys = coresys
self._data = {ATTR_INPUT: {}, ATTR_OUTPUT: {}}
self._cache = 0
self._default = None
@property
def input_devices(self):
"""Return list of ALSA input devices."""
self._update_device()
return self._data[ATTR_INPUT]
@property
def output_devices(self):
"""Return list of ALSA output devices."""
self._update_device()
return self._data[ATTR_OUTPUT]
def _update_device(self):
"""Update Internal device DB."""
current_id = hash(frozenset(self.sys_hardware.audio_devices))
# Need rebuild?
if current_id == self._cache:
return
# Clean old stuff
self._data[ATTR_INPUT].clear()
self._data[ATTR_OUTPUT].clear()
# Init database
_LOGGER.info("Update ALSA device list")
database = self._audio_database()
# Process devices
for dev_id, dev_data in self.sys_hardware.audio_devices.items():
for chan_info in dev_data[ATTR_DEVICES]:
chan_id = chan_info[CHAN_ID]
chan_type = chan_info[CHAN_TYPE]
alsa_id = f"{dev_id},{chan_id}"
dev_name = dev_data[ATTR_NAME]
# Lookup type
if chan_type.endswith("playback"):
key = ATTR_OUTPUT
elif chan_type.endswith("capture"):
key = ATTR_INPUT
else:
_LOGGER.warning("Unknown channel type: %s", chan_type)
continue
# Use name from DB or a generic name
self._data[key][alsa_id] = (
database.get(self.sys_machine, {})
.get(dev_name, {})
.get(alsa_id, f"{dev_name}: {chan_id}")
)
self._cache = current_id
@staticmethod
def _audio_database():
"""Read local json audio data into dict."""
try:
return json.loads(AUDIODB_JSON.read_text())
except (ValueError, OSError) as err:
_LOGGER.warning("Can't read audio DB: %s", err)
return {}
@property
def default(self):
"""Generate ALSA default setting."""
# Init defaults
if self._default is None:
database = self._audio_database()
alsa_input = database.get(self.sys_machine, {}).get(ATTR_INPUT)
alsa_output = database.get(self.sys_machine, {}).get(ATTR_OUTPUT)
self._default = DefaultConfig(alsa_input, alsa_output)
# Search exists/new output
if self._default.output is None and self.output_devices:
self._default.output = next(iter(self.output_devices))
_LOGGER.info("Detect output device %s", self._default.output)
# Search exists/new input
if self._default.input is None and self.input_devices:
self._default.input = next(iter(self.input_devices))
_LOGGER.info("Detect input device %s", self._default.input)
return self._default
def asound(self, alsa_input=None, alsa_output=None):
"""Generate an asound data."""
alsa_input = alsa_input or self.default.input
alsa_output = alsa_output or self.default.output
# Read Template
try:
asound_data = ASOUND_TMPL.read_text()
except OSError as err:
_LOGGER.error("Can't read asound.tmpl: %s", err)
return ""
# Process Template
asound_template = Template(asound_data)
return asound_template.safe_substitute(input=alsa_input, output=alsa_output)
| true |
54e76eeaf420618991f482359ae715a2b78efe45 | Python | danschaffer/aoc | /2016/day12.py | UTF-8 | 2,766 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env python
import time
class Day12:
def __init__(self, file, verbose=False):
self.verbose = verbose
self.reset()
self.instructions = {
'cpy': self.cpy,
'inc': self.inc,
'dec': self.dec,
'jnz': self.jnz
}
self.lines = open(file).read().strip().split('\n')
def cpy(self, args):
value = args[0]
if value in self.registers:
value = self.registers[value]
else:
value = int(value)
dest = args[1]
self.registers[dest] = value
if self.verbose:
print(f"{self.instruction_pointer} cpy {args[0]} {args[1]} {self.registers}")
self.instruction_pointer += 1
def inc(self, args):
register = args[0]
self.registers[register] += 1
if self.verbose:
print(f"{self.instruction_pointer} inc {register} {self.registers}")
self.instruction_pointer += 1
def dec(self, args):
register = args[0]
self.registers[register] -= 1
if self.verbose:
print(f"{self.instruction_pointer} dec {register} {self.registers}")
self.instruction_pointer += 1
def jnz(self, args):
register = args[0]
if args[0] not in self.registers:
register_value = int(args[0])
else:
register_value = self.registers[args[0]]
value = int(args[1])
if self.verbose:
print(f"{self.instruction_pointer} jnz {register} {value} {self.registers}")
if register_value != 0:
self.instruction_pointer += value
else:
self.instruction_pointer += 1
def run(self, part2=False):
if part2:
self.registers['c'] = 1
while self.instruction_pointer < len(self.lines):
parts = self.lines[self.instruction_pointer].split()
function = parts[0]
args = parts[1:]
self.instructions[function](args)
return self.registers['a']
def reset(self):
self.instruction_pointer = 0
self.registers = {'a':0, 'b':0, 'c':0, 'd':0}
def test1():
test_day12 = Day12('./day12-test.input')
assert test_day12.run() == 42
test_day12.reset()
assert test_day12.run(part2=True) == 42
def test2():
test_day12 = Day12('./day12.input')
assert test_day12.run() == 318009
test_day12.reset()
assert test_day12.run(part2=True) == 9227663
if __name__ == '__main__':
print("advent of code: day12")
day12 = Day12('./day12.input')
start = time.time()
print(f"part 1: {day12.run()} {round(time.time()-start,1)}s")
day12.reset()
print(f"part 2: {day12.run(part2=True)} {round(time.time()-start,1)}s")
| true |
49b258a904fb96b32444490ff0b2929923c9d552 | Python | fumito-miyake/kmeans | /kmeans/opt.py | UTF-8 | 20,619 | 2.625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
import time as tm
import dataIO as dt
INF = float("inf")
class OPT(object):
def __init__( self, x_dim, seed=tm.time() ):
'''
parm int x_dim: データの次元数
parm int seed: 乱数のシード値
'''
self.x_dim = x_dim
np.random.seed( int(seed) )
self.MU = MLUtil()
def GradOpt( self, mode="GD", x_0=None, T=20, ep=1e-5, pr=0.5, K_ext=None, epoch=10, ave_T=10, delta=1.0, f_info=[], prt=False, write_info=None ):
'''
parm str mode: 最適化手法の選択 [ GD, SGD, MM, SMM ]
parm array x_0: 初期推定値 [if None: ランダムに選ぶ]
parm T: 各エポックの最大繰り返し回数 [if None: 20]
parm float ep: 目標誤差 [if None: 1e-5]
pram float pr: 最大誤り確率 [if None: 0.5]
parm list K_ext: 定義域を指定(0:array center, 1:float rad) [if []: center=zero, rad=inf]
parm int epoch: 最大エポック数 [if None: f_infoから計算] [if None: 10]
parm float delta: 初期smoothingパラメータ [if None: 1.0]
parm dict f_info: 関数の情報(L: Lipschitz, R:grad_lip, sig:strongly-convex, ...) [if None: []]
parm bool prt: 各エポックでの座標の表示の有無
return array x_hat: epochエポック後の推定値
return float val: epochエポック後の損失
'''
file = writer = None
if type(write_info)==dict:
if "path" in write_info and "name" in write_info:
file, writer = dt.csv_w( write_info["path"], write_info["name"] )
if type(K_ext) != dict:
K_ext = { "center": np.zeros(self.x_dim), "radius": 1.0 }
if type(x_0) != list and type(x_0) != np.ndarray:
x_0 = self.MU.make_a_data_ball( dim=self.x_dim, center=K_ext["center"], rad=K_ext["radius"] )
print("new first point")
x_hat = x_0.copy()
x_hat0 = x_hat.copy()
for m in range( epoch ):
if prt:
print(">>epoch {}<<".format(m + 1))
if mode == "GD":
x_hat = self.GD( x_0=x_hat, m=m, pro_info=K_ext, max_T=T, ep=ep, prt=True, writer=writer )
elif mode == "SGD":
K_ext["center"] = x_hat.copy()
K_ext["radius"] = self.update_radius_sgd( delta )
ep = self.update_radius_sgd( delta )
x_hat = self.SGD( x_0=x_hat, m=m, pro_info=K_ext, max_T=T, delta=delta, ave_T=10, ep=ep*1e3, prt=True, writer=writer )
elif mode == "MM":
x_hat = self.MM( x_0=x_hat, m=m, pro_info=K_ext, max_T=T, ep=ep, prt=True, writer=writer )
elif mode == "SMM":
K_ext["center"] = x_hat.copy()
K_ext["radius"] = self.update_radius_sgd( delta )
ep = self.update_radius_smm( delta )
x_hat = self.SMM( x_0=x_hat, m=m, pro_info=K_ext, max_T=T, delta=delta, ave_T=ave_T, ep=ep, prt=True, writer=writer )
val = self.get_val( x_hat ) # 現在の損失を計算
if prt:
if val != INF:
print( "loss: {}\n".format(val) )
else:
print( "loss: INF\n" )
if m >= 1:
if self.valid_err( loss_f=self.get_val, x0=x_hat0, x1=x_hat, err=1e-6 ): # 誤差が小さくなったら終了
print("converge")
break
x_hat0 = x_hat.copy()
delta /= 2
if prt and val != INF:
print("result: {}\n".format(val))
if type(file) != type(None):
file.close()
return x_hat, val
def GD( self, x_0, m, pro_info=None, max_T=10, ep=1e-5, prt=False, writer=None ):
'''
parm array x_0: 初期推定ベクトル
parm list pro_info: 射影先の範囲を指定(0:array center, 1:float rad) [if None: 射影しない]
parm int max_T: 最大ラウンド数
return array x_hat: max_Tラウンド後の推定値
need MLUtil.project: 射影する関数(in:array x_original, list pro_info; out:array x_new)
OR func get_grad: 目的関数の勾配を計算する関数(in:array x; out:array grad)
OR func get_eta: 学習率を計算する関数(in:int t; out:float eta)
'''
x_t = x_0.copy()
x_t0 = x_0.copy()
for t in range( 1, max_T + 1 ):
x_t -= self.get_eta_gd( m ) * self.get_grad( x_t )
if type(pro_info) != type(None): # 射影
x_t = self.MU.project( x_t, pro_info )
if prt: self.round_prt( t, x_t )
if type(writer) != type(None): self.round_wrt( m, t, x_t, writer=writer )
if t >= 1:
if self.valid_err( loss_f=self.get_val, x0=x_t0, x1=x_t, err=ep ): # 誤差が小さくなったら終了
break
x_t0 = x_t.copy()
return x_t
def SGD( self, x_0, m, pro_info=None, max_T=10, delta=1.0, ave_T=10, ep=1e-5, prt=False, writer=None ):
'''
parm array x_0: 初期推定ベクトル
parm list pro_info: 射影先の範囲を指定(0:array center, 1:float rad) [if None: 射影しない]
parm int max_T: 最大ラウンド数 [if None: 10]
parm float delta: smoothingパラメータ [if None: 1.0]
parm float seed: smoothing時に用いる乱数のシード値 [if None: time]
parm int ave_T: smoothing時の乱数の平均を取る回数 [if None: 10]
return array x_hat: max_Tラウンド後の推定値
need MLUtil.project: 射影する関数(in:array x_original, array center, float rad; out:array x_new)
OR func get_grad: 目的関数の勾配を計算する関数(in:array x; out:array grad)
OR func get_eta: 学習率を計算する関数(in:int t; out:float eta)
'''
x_t = x_0.copy()
x_t_l = []
suf_T = max_T // 2 # 後半のsuf_T回分のx_tの平均をとる
for t in range( 1, max_T + 1 ):
x_t -= self.get_eta_sgd( t ) * self.smooth_grad( x_t, delta=delta, ave_T=ave_T )
if type(pro_info) != type(None): # 射影
x_t = self.MU.project( x_t, pro_info )
if t > max_T - suf_T:
x_t_l += [ x_t ]
if prt: self.round_prt( t, x_t )
if type(writer) != type(None): self.round_wrt( m, t, x_t, writer=writer )
return sum( x_t_l ) / suf_T
def MM( self, x_0, m, pro_info=None, max_T=10, ep=1e-5, prt=False, writer=None ):
'''
parm array x_0: 初期推定ベクトル
parm list pro_info: 射影先の範囲を指定(0:array center, 1:float rad) [if None: 射影しない]
parm int max_T: 最大ラウンド数 [if None: 10]
return array x_hat: max_Tラウンド後の推定値
need MLUtil.project: 射影する関数(in:array x_original, array center, float rad; out:array x_new)
OR func srg_min: surrogate関数の最小解を計算する関数(in:array x_original, list info; out:array argmin_x)
'''
x_t = x_0.copy()
x_t0 = x_0.copy()
for t in range( max_T ):
x_t = self.srg_min( x_org=x_t )
if type(pro_info) != type(None): # 射影
x_t = self.MU.project( x_t, pro_info )
if prt: self.round_prt( t, x_t )
if type(writer) != type(None): self.round_wrt( m, t, x_t, writer=writer )
if t >= 1:
if self.valid_err( loss_f=self.get_val, x0=x_t0, x1=x_t, err=ep ): # 誤差が小さくなったら終了
break
x_t0 = x_t.copy()
return x_t
def SMM( self, x_0, m, pro_info=None, max_T=10, delta=1.0, ave_T=10, ep=1e-5, prt=False, writer=None ):
'''
parm array x_0: 初期推定ベクトル
parm list pro_info: 射影先の範囲を指定(0:array center, 1:float rad) [if None: 射影しない]
parm int max_T: 最大ラウンド数
return array x_hat: max_Tラウンド後の推定値
need MLUtil.project: 射影する関数(in:array x_original, array center, float rad; out:array x_new)
OR func srg_min: surrogate関数(の不偏推定)の最小解を計算する関数(in:array x_original, list info, int seed; out:array argmin_x)
'''
x_t = x_0.copy()
x_t0 = x_0.copy()
rho = 0.1
info = { "t": 0, "rho": rho }
for t in range( max_T ):
info[ "t" ] += 1
x_t = self.smooth_srg_min( x_org=x_t, info=info, delta=delta, ave_T=ave_T )
if type(pro_info) != type(None):
x_t = self.MU.project( x_t, pro_info )
if prt: self.round_prt( t, x_t )
if type(writer) != type(None): self.round_wrt( m, t, x_t, writer=writer )
if t >= 1:
if self.valid_err( loss_f=self.get_val, x0=x_t0, x1=x_t, err=ep ): # 誤差が小さくなったら終了
break
x_t0 = x_t.copy()
return x_t
def smooth_val( self, x, delta=1.0, ave_T=10 ):
'''
parm array x: smoothing後の値を返したい座標
parm float delta: smoothingの荒さを決めるパラメータ
parm int seed: smoothingの際に用いる乱数のシード値
parm int ave_T: smoothingの際,ave_T回不偏推定を求め,その平均をsmoothing後の値とする
return float smoothed_val: smoothing後の値
'''
if delta <= 0:
return self.get_val( x )
zero_vec = np.zeros_like( x, np.float )
val = 0.0
for t in range(ave_T):
u = self.MU.make_a_data_ball( dim=self.x_dim, center=zero_vec, rad=delta )
val += self.get_val( x + u )
return val / ave_T
def smooth_grad( self, x, delta=1.0, ave_T=10 ):
'''
parm array x: smoothing後の勾配を返したい座標
parm float delta: smoothingの荒さを決めるパラメータ
parm int seed: smoothingの際に用いる乱数のシード値
parm int ave_T: smoothingの際,ave_T回不偏推定を求め,その平均をsmoothing後の勾配とする
return float smoothed_val: smoothing後のfの勾配
'''
if delta <= 0:
return self.get_grad( x )
zero_vec = np.zeros_like( x, np.float )
grad = np.zeros_like( x, np.float )
for t in range(ave_T):
u = self.MU.make_a_data_ball( dim=self.x_dim, center=zero_vec, rad=delta )
grad += self.get_grad( x + u )
return grad / ave_T
def smooth_generic( self, get_smoothed, x, delta=1.0, ave_T=10 ):
'''
parm func get_smoothed: 目的関数の何かを計算する関数(in:array x; out:anything smoothed)[ex: get_val, get_grad]
parm array x: smoothing後の勾配を返したい座標
parm float delta: smoothingの荒さを決めるパラメータ
parm int ave_T: smoothingの際,ave_T回不偏推定を求め,その平均をsmoothing後の勾配とする
return float smoothed_val: smoothing後のfの勾配
'''
if delta <= 0:
return self.get_smoothed( x )
zero_vec = np.zeros_like( x, np.float )
smoothed = np.zeros_like( get_smoothed( x ), np.float )
for t in range(ave_T):
u = self.MU.make_a_data_ball( dim=self.x_dim, center=zero_vec, rad=delta )
smoothed += self.get_smoothed( x + u )
return smoothed / ave_T
def round_prt( self, t, x_t ):
'''
parm int t: 現在のラウンド
parm array x_t: 現在の解
'''
val = self.get_val( x_t )
if val != INF:
print( "round {}: {}".format(t, val ) )
else:
print( "round {}: INF".format(t) )
def round_wrt( self, m, t, x_t, writer=None ):
'''
parm int m: 現在のエポック
parm int t: 現在のラウンド
parm array x_t: 現在の解
'''
val = self.get_val( x_t )
if val != INF:
writer.writerow( [m, t, val] )
else:
writer.writerow( [m, t, "INF"] )
### Need to Override ###
def srg_min( self, x_org, info=None ):
'''
parm array x_org: 現在の予測点
parm list info: surrogate関数を作成するために必要なパラメータ等(eg. L-lipschitz)
parm int seed: 乱数を用いてsurrogate関数を作成するときのシード値 [if 0: 乱数を用いない]
return array argmin_x: 作成したsurrogate関数の最小解
'''
print("Override the function. \"srg_min\".")
return np.zeros_like( x_org )
def smooth_srg_min( self, x_org, info=None, delta=1.0, ave_T=10 ):
'''
parm array x_org: 現在の予測点
parm list info: surrogate関数を作成するために必要なパラメータ等(eg. L-lipschitz)
parm float delta: smoothingの荒さを決めるパラメータ
parm int ave_T: smoothingの際,ave_T回不偏推定を求め,その平均をsmoothing後の勾配とする
return array argmin_x: 作成したsurrogate関数の最小解
'''
print("Override the function. \"stc_srg_min\".")
return np.zeros_like( x_org )
def get_val( self, x ):
'''
parm array x: 値を求めたい座標
return array val: 関数の値
'''
print("Override the function \"get_val\".")
return 0.0
def valid_err( self, loss_f, x0, x1, err ):
'''
parm func loss_f: 誤差関数 (in: array x0; out: float val)
parm array x0: 前回の解
parm array x1: 今回の解
parm float err: 目標誤差
return bool: 前回と今回の違いが目標誤差に収まっているか否か
'''
print("Override the function \"valid_err\".")
return True
def get_w( self, t ):
'''
parm int t: 重みwを求めたい時刻
return float eta: 時刻tにおけるw
'''
print("Override the function \"get_w\".")
return 1 / t
def get_grad( self, x ):
'''
parm array x: 勾配を求めたい座標
return array grad: 関数の勾配
'''
print("Override the function \"get_grad\".")
return np.zeros_like( x )
def get_eta_gd( self, t ):
'''
parm int t: learning rateを求めたい時刻
return float eta: 時刻tにおけるlearning rate
'''
print("Override the function \"get_eta_gd\".")
return 1 / t
def get_eta_sgd( self, t ):
'''
parm int t: learning rateを求めたい時刻
return float eta: 時刻tにおけるlearning rate
'''
print("Override the function \"get_eta_sgd\".")
return 1 / t
def update_radius_sgd( self, delta ):
'''
parm float delta: 射影先の領域の半径を求めたいdelta
return float radius: 次の射影先の領域の半径
'''
print("Override the function \"update_radius_sgd\".")
return 1.5 * delta
def update_radius_smm( self, delta ):
'''
parm float delta: 射影先の領域の半径を求めたいdelta
return float radius: 次の射影先の領域の半径
'''
print("Override the function \"update_radius_smm\".")
return delta
def update_ep_sgd( self, delta ):
'''
parm float delta: エポック内誤差を求めたいdelta
return float epsilon: 次の射ポック内誤差
'''
print("Override the function \"update_radius_sgd\".")
return (delta **2) / 32
def update_ep_smm( self, delta ):
'''
parm float delta: ポック内誤差を求めたいdelta
return float epsilon: 次のポック内誤差
'''
print("Override the function \"update_radius_smm\".")
return delta * 2
class MLUtil:
def __init__( self ):
pass
def project( self, x_org, pro_info ):
'''
parm array x_org: 射影前の座標
parm list pro_info: 射影先の範囲を指定(0:array center, 1:float rad)
return array x_new: 射影後の座標
'''
center = np.array( pro_info["center"].copy() )
rad = pro_info["radius"]
norm = MLUtil.norm2( [center, x_org] )
if norm <= rad:
return x_org
return ( x_org - center ) * rad / norm + center
@staticmethod
def norm2( vec_list ):
'''
parm list vec_list: 要素数1,または2のlistで,各要素はarray型のベクトル
return float norm: ベクトルのノルムを返す [if 要素数1: ベクトルの2ノルム],[if 要素数2:二つのベクトルの差の2ノルム],[else: 0]
'''
dim = len( vec_list )
if dim == 1:
return np.linalg.norm( np.array(vec_list[0]) )
elif dim == 2:
return np.linalg.norm( np.array(vec_list[0]) - np.array(vec_list[1]) )
else:
return 0
@staticmethod
def valid_err( loss_f, x0, x1, err ):
'''
parm func loss_f: 誤差関数 (in: array x0; out: float val)
parm array x0: 前回の解
parm array x1: 今回の解
parm float err: 目標誤差
return bool: 前回と今回の違いが目標誤差に収まっているか否か
'''
if ( abs( loss_f( x0 ) - loss_f( x1 ) ) / loss_f ( x0 ) ) < err:
return True
return False
def numerical_gradient( self, f, x ):
'''
parm func f: 勾配を求めたい関数(in:array x; out:float val)
parm array x: 勾配を求めたい座標
return array grad: fの勾配
'''
# 勾配を入れるベクトルをゼロで初期化する
grad = np.zeros_like(x)
for i in range( len(x) ):
# i 番目の変数で偏微分する
grad[i] = MLUtil.numerical_diff(f, x, i)
# 計算した勾配を返す
return grad
@staticmethod
def numerical_diff( f, x, i ):
'''中央差分を元に数値微分する関数 (偏微分)
:param func f: 偏微分する関数
:param array x: 偏微分する引数
:param int i: 偏微分する変数のインデックス
'''
# 丸め誤差で無視されない程度に小さな値を用意する
h = 1
# 偏微分する変数のインデックスにだけ上記の値を入れる
h_vec = np.zeros_like(x)
h_vec[i] = h
# 数値微分を使って偏微分する
return (f(x + h_vec) - f(x - h_vec)) / (2 * h)
@staticmethod
def make_a_data_ball( dim, center, rad=1.0 ):
'''
parm int dim: 生成するデータの次元数
parm array center: 生成するデータの各次元の中心座標 [if None: zero]
parm float rad: 生成するデータの球の半径 [if None: 1.0]
return :生成した一つのデータ
'''
x = np.random.randn( dim )
r = np.linalg.norm( x )
if r != 0.:
x /= r
x *= rad
reg_r = np.power( np.random.random(), 1. / dim )
if type(center) == type(None):
return x * reg_r
return x * reg_r + center
def make_data_ball( self, dim, num, center=None, rad=1.0 ):
'''
parm int dim: 生成するデータの次元数
parm int num: 生成するデータの個数
parm array center: 生成するデータの各次元の中心座標 [if None: None]
parm float rad: 生成するデータの球の半径 [if None: 1.0]
return :生成したnum個のデータ
'''
data_l = [ MLUtil.make_a_data_ball( dim, center, rad ) for i in range(num) ]
return np.array( data_l )
| true |
79b0da57acc916e3c8b442b04f5c3f93e58afaa1 | Python | ReignOfComputer/PPRE | /ppre/freeze/module.py | UTF-8 | 1,560 | 2.578125 | 3 | [] | no_license | """ Setup script to freeze a module with a main() as a CLI application
Invoke as `python -m ppre.freeze.module some/ppre/script.py <command>`
For creating windows executable, <command> should be bdist_msi.
Script should have a main() invoked when `__name__ == '__main__'`.
The script can optionally define a data_file_patterns list of globbed files to
include at the root of the packaged archive, eg
`data_file_patterns = ['data/defaults/*.csv']`
"""
import glob
import imp
import os
import sys
from cx_Freeze import setup, Executable
try:
script = sys.argv.pop(1)
except:
print('Usage: python -m ppre.freeze.module some/ppre/script.py <command>')
exit(1)
data_file_targets = []
module = imp.load_source('__script__', script)
try:
data_file_patterns = module.data_file_patterns
except AttributeError:
pass
else:
for data_file_pattern in data_file_patterns:
for match in glob.iglob(data_file_pattern):
if os.path.isdir(match):
for base, dirs, files in os.walk(match):
for fname in files:
data_file_targets.append((os.path.join(base, fname),
os.path.join(base, fname)))
elif os.path.isfile(match):
data_file_targets.append((match, match))
setup(
name=os.path.split(script)[1],
executables=[Executable(script, icon='PPRE.ico')],
options={
'build_exe': {
'compressed': True,
'include_files': data_file_targets
}
},
)
| true |
1f2951423d22be1c4f827e8d110aff7c7dbf766a | Python | ewilkinson/CS689_FinalProj | /maps/map.py | UTF-8 | 7,719 | 3.21875 | 3 | [] | no_license | import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.colors import colorConverter
import matplotlib as mpl
def load_map(file_location):
"""
Loads the map file from the specified location. Returns a numpy array
:type file_location: str
:param file_location: The location of the map file. Loads a png grayscale file.
:return: numpy.ndarray
"""
def rgb2gray(rgb):
gray = np.dot(rgb[..., :3], [0.2989, 0.587, 0.114])
gray[gray < 0.1] = 0.
gray[gray > 0.1] = 1.
return gray
img = mpimg.imread(file_location)
# invert gray so that free space is 0 and obstances are 1
grid_init = 1 - rgb2gray(img)
# set the x-y axes in the convential place
return np.asarray(np.transpose(grid_init), dtype=np.float64)
class Point:
"""
Data structure for dealing with (x,y) corrdinates
"""
def __init__(self, *args):
"""
:param args: ether input x,y or (x,y).
:return:
"""
if len(args) is 1:
self.x, self.y = args[0]
else:
self.x, self.y = args
def __getitem__(self, index):
retval = None
if index is 0:
retval = self.x
elif index is 1:
retval = self.y
return retval
def __setitem__(self, key, value):
if key is 0:
self.x = value
elif key is 1:
self.y = value
def distance(self, p):
diff_x, diff_y = self.x - p.x, self.y - p.y
return math.sqrt(pow(diff_x, 2) + pow(diff_y, 2))
def toArray(self):
return np.array([self.x, self.y])
def toTuple(self):
return (self.x, self.y)
def __hash__(self):
return hash((self.x, self.y))
class MAP2D:
def __init__(self, window_x, window_y):
self.set_window_bounds(window_x, window_y)
def load_map(self, file_location):
self.filename = file_location
self.map = load_map(file_location)
self.alt_map_attrib = np.zeros(self.map.shape, dtype=np.float64)
def get_bounds(self):
"""
Return the min and max values for the map indicies
:rtype : dict
:return : Dictionary with keys 'x' and 'y'
"""
return {'x': [0, self.map.shape[0] - 1],
'y': [0, self.map.shape[1] - 1]}
def show_map(self):
"""
Displays the map. Free space is white and obstacles are black. Note that this flips the actual 0/1 values stored in map
"""
window = np.zeros((2,) + self.map.shape, dtype=np.float64)
window[0, :, :] = self.map[:, :]
window[1, :, :] = self.alt_map_attrib[:, :]
self.show_window(window)
def _get_valid_bounds(self, x, y):
"""
Ugly ass function. Ignore if possible
"""
assert (isinstance(x, int))
assert (isinstance(y, int))
dim_x = int(self.window_x / 2)
dim_y = int(self.window_y / 2)
min_window_x, min_window_y = 0, 0
max_window_x, max_window_y = self.window_x, self.window_y
min_x, min_y = x - dim_x, y - dim_y
max_x, max_y = x + dim_x + 1, y + dim_y + 1
if min_x < 0:
min_x = 0
min_window_x = abs(x - dim_x)
if max_x > self.map.shape[0]:
max_window_x -= (max_x - self.map.shape[0])
max_x = self.map.shape[0]
if min_y < 0:
min_y = 0
min_window_y = abs(y - dim_y)
if max_y > self.map.shape[1]:
max_window_y -= (max_y - self.map.shape[1])
max_y = self.map.shape[1]
slice_window = [slice(min_window_x, max_window_x), slice(min_window_y, max_window_y)]
slice_map = [slice(min_x, max_x), slice(min_y, max_y)]
return slice_window, slice_map
def is_collision(self, x, y):
return self.map[x][y] > 0.5
def raycast(self, p1, p2, check_dist=0.5):
"""
Check if there is a collision free ray between the two provided points
:type p1: Point
:param p1: Start point
:type p2: Point
:param p2: End point
:type check_dist: FloatX
:param check_dist: The minimum distance to travel before checking for collision
:return collision: bool
"""
# compute vector from one point to the other and make it unit size
v1, v2 = p1.toArray(), p2.toArray()
v3 = v2 - v1
v3 = v3 / np.linalg.norm(v3)
# Calculate the number of steps needed to be checked
# check every check_dist pixels traveled
n_steps = int(math.ceil(p1.distance(p2) / check_dist))
ray = v1
for i in xrange(n_steps):
ray = ray + check_dist * v3
if self.is_collision(ray[0], ray[1]):
return True
return False
def set_window_bounds(self, window_x, window_y):
"""
:type window_x: int
:param window_x: Size of the window around x in pixels
:type window_y: int
:param window_y: size of the window around y in pixels
:return:
"""
assert (isinstance(window_x, int))
assert (isinstance(window_y, int))
self.window_x = window_x
self.window_y = window_y
def get_window(self, x, y):
"""
Returns a subwindow surrounding the point provided. Includes obstacle and alt var channels
:type x: int
:param x: X-coordinate
:type y: int
:param y: Y-coordinate
:return window: grid of surrounding window
"""
# Dimension might extend outside of map. Fill in with obstacles for now
# Might consider replacing unknown areas with values of 0.5 at some point
slice_window, slice_map = self._get_valid_bounds(x, y)
window = np.ones((2, self.window_x, self.window_y), dtype=np.float64)
slice_window_0 = [0]
slice_window_1 = [1]
slice_window_0.extend(slice_window)
slice_window_1.extend(slice_window)
window[slice_window_0] = self.map[slice_map]
window[slice_window_1] = self.alt_map_attrib[slice_map]
return window
def show_window(self, subwindow):
obstable_img = np.transpose(subwindow[0, :, :])
alt_var_img = np.transpose(subwindow[1, :, :])
# generate the colors for your colormap
color1 = colorConverter.to_rgba('white')
color2 = colorConverter.to_rgba('blue')
# make the colormaps
cmap1 = mpl.colors.LinearSegmentedColormap.from_list('my_cmap', ['white', 'black'], 256)
cmap2 = mpl.colors.LinearSegmentedColormap.from_list('my_cmap2', [color1, color2], 256)
cmap2._init() # create the _lut array, with rgba values
# create your alpha array and fill the colormap with them.
# here it is progressive, but you can create whathever you want
alphas = np.linspace(0., 1.0, cmap2.N + 3)
cmap2._lut[:, -1] = alphas
plt.figure()
img3 = plt.imshow(obstable_img, interpolation='none', vmin=0, vmax=1, cmap=cmap1, origin='lower')
plt.hold(True)
img2 = plt.imshow(alt_var_img, interpolation='none', vmin=0, vmax=1, cmap=cmap2, origin='lower')
plt.colorbar()
plt.hold(False)
plt.show()
if __name__ == '__main__':
file_location = './maps/squares.png'
window_size = 83
m = MAP2D(window_y=window_size, window_x=window_size)
m.load_map(file_location)
w = m.get_window(250, 250)
m.show_window(w)
p1 = Point(200, 250)
p2 = Point(450, 400)
print 'Raycast outcome : '
print m.raycast(p1, p2)
m.show_map()
| true |
904d7b2de044c75486ae24c150b9a5e0f4b804c1 | Python | Teaching-projects/SZE-Projektmunka2-2020-GSP-Art | /loginpage/setTokens.py | UTF-8 | 2,373 | 2.6875 | 3 | [] | no_license | import mysql.connector
import requests
import sys
def get_refresh_token():
CLIENT_ID = get_data_from_database("client_id", sys.argv[1])[0]
CLIENT_SECRET = get_data_from_database("client_secret", sys.argv[1])[0]
CODE = get_data_from_database("code", sys.argv[1])[0]
POST_URL = "https://www.strava.com/api/v3/oauth/token"
data = {
"client_id" : CLIENT_ID,
"client_secret": CLIENT_SECRET,
"code" : CODE,
"grant_type" : "authorization_code"
}
resp = requests.post(POST_URL, data=data)
jsonfile = resp.json()
REFRESH_TOKEN = jsonfile.get('refresh_token')
update_database("refresh_token", REFRESH_TOKEN,sys.argv[1])
ACCESS_TOKEN = jsonfile.get('access_token')
update_database("access_token", ACCESS_TOKEN,sys.argv[1])
update_database("code", "setted", sys.argv[1])
def get_new_access_token():
CLIENT_ID = get_data_from_database("client_id", sys.argv[1])[0]
CLIENT_SECRET = get_data_from_database("client_secret", sys.argv[1])[0]
REFRESH_TOKEN = get_data_from_database("refresh_token", sys.argv[1])[0]
POST_URL = "https://www.strava.com/api/v3/oauth/token"
data = {
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"grant_type": "refresh_token",
"refresh_token": REFRESH_TOKEN
}
jsonfile = requests.post(POST_URL, data=data).json()
ACCESS_TOKEN = jsonfile.get('access_token')
update_database("access_token", ACCESS_TOKEN, sys.argv[1])
def update_database(key, value, userid):
DB = mysql.connector.connect(
host="localhost",
user="projekt",
password="123Projekt123",
database="projekt2"
)
cursor = DB.cursor()
sql = "UPDATE users SET " + key + " = \"" + value + "\" WHERE id = " + userid
cursor.execute(sql)
DB.commit()
def get_data_from_database(key, userid):
DB = mysql.connector.connect(
host="localhost",
user="projekt",
password="123Projekt123",
database="projekt2"
)
cursor = DB.cursor()
sql = "SELECT " + key + " FROM projekt2.users WHERE id = " + userid
cursor.execute(sql)
return cursor.fetchone()
def main():
if get_data_from_database("code", sys.argv[1])[0] != "setted":
get_refresh_token()
else:
get_new_access_token()
if __name__ == '__main__':
main() | true |
963a93d225dc404756c5ab5820b37be4282973fb | Python | iamvarada/kalman_filter_python | /kalman_filter.py | UTF-8 | 3,315 | 3.03125 | 3 | [] | no_license | #! usr/bin/python3
# Basic implementation of Kalman filter in python
# Developer : Krishna Varadarajan
import numpy as np
import numpy.linalg as lin
class BasicKalmanFilter :
""" Python class for basic kalman filter implementation"""
dt_ = 0.0
t_iniital_ = 0.0
t_current_ = 0.0
last_predict_step_time_ = 0.0
last_udpate_step_time_ = 0.0
filter_initialized_ = False
num_states_ = 0
num_outputs_ = 0
X_ = np.zeros((num_states_, 1))
P_ = np.zeros((num_states_, num_states_))
A_ = np.zeros((num_states_, num_states_))
C_ = np.zeros((num_outputs_, num_states_))
def __init__(self, A, C) :
""" Class constructor (default) """
""" \param[in] A : system numpy matrix; dims: (num_states, num_states) """
""" \param[in] C : output numpy matrix; dims: (num_outputs, num_states)"""
self.A_ = A
self.C_ = C
self.num_states_ = A.shape[0]
self.num_outputs_ = C.shape[0]
def initializer_filter (self, X_init, P_init, t_init, dt) :
""" Initialize the filter zero initial states """
""" \param[in] X_init : initial states of the filter; dims: (num_states, 1) """
""" \param[in] P_init : initial estimation error covariance of the filter; dims: (num_states, num_states) """
""" \param[in] t_init : start time of the filter """
""" \param[dt] dt : time-step of the filter """
self.X_ = X_init
self.filter_initialized_ = True
self.dt_ = dt
self.P_ = P_init
self.t_iniital_ = t_init
self.t_current_ = self.t_iniital_
self.last_udpate_step_time_ = self.t_iniital_
self.last_predict_step_time_ = self.t_iniital_
return
def predict_filter_step (self, Q) :
""" Predict step for the filter when measurement is not received """
""" \param[in] Q : process noise covariance (flexibility to change it at every prediction step) """
if (self.filter_initialized_ == False) :
print ("\nFilter not initialized")
return
self.X_ = np.dot(self.A_, self.X_)
self.P_ = np.dot(self.A_, np.dot(self.P_, self.A_.T)) + Q
self.t_current_ = self.t_current_ + self.dt_
self.last_predict_step_time_ = self.t_current_
return
def update_filter_step(self, Y, R) :
""" Update step for the filter when a measurement is received """
""" \param[in] Y : received measurment """
""" \param[in] R : measurement noise covariance (flexibility to change it at every prediction step) """
if (self.filter_initialized_ == False) :
print ("\nFilter not initialized")
return
temp = np.dot(self.C_, np.dot(self.P_, self.C_.T)) + R
temp_inv = lin.inv(temp)
K_gain = np.dot(self.P_, np.dot(self.C_.T, temp_inv))
residual = Y - np.dot(self.C_, self.X_)
self.X_ = self.X_ + np.dot(K_gain, residual)
self.P_ = np.identity(self.num_states_, self.num_states_) - np.dot(K_gain, self.C_)
self.t_current_ = self.t_current_ + self.dt_
self.last_udpate_step_time_ = self.t_current_
return K_gain
def get_latest_estimated_state(self) :
return self.X_
def get_current_time(self) :
return self.t_current_
| true |
c7b22e23d0e696f07ab7f55a846792ff612d24d6 | Python | alejeau/cocoma-td | /util/net/Connection.py | UTF-8 | 780 | 3.21875 | 3 | [] | no_license | import util.miscellaneous as misc
from abc import ABC, abstractmethod
class Connection(ABC):
def __init__(self):
# The buffer where the connection writes the received messages.
self.BUFFER = []
def get_buffer_and_flush(self) -> list:
answer = self.BUFFER[:]
del self.BUFFER[:]
return answer
@abstractmethod
def send(self, message, address):
pass
'''
@abstractmethod
def receive(self):
pass
'''
def to_string(self, indent_1: int, indent_2 : int):
indent_str_1 = misc.indent(indent_1)
indent_str_2 = misc.indent(indent_2)
print(indent_str_1 + "Connection:\n",
indent_str_2 + "Number of BUFFER elements:", len(self.BUFFER),
"\n")
| true |
cf100544d3fe11f6bbdf113481dd3254fccc8a1f | Python | absalomhr/TheoryOfComputation | /STACK.py | UTF-8 | 477 | 3.9375 | 4 | [] | no_license | class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def top(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
def showStack (self):
stack = ""
for char in self.items:
stack += char
return stack [::-1] | true |
785e1273658ad3df600f8815b37179388790a0fd | Python | digitaldragoon/referenda | /auth/standard.py | UTF-8 | 875 | 2.890625 | 3 | [] | no_license | class Unavailable (Exception):
pass
class InvalidCredentials (Exception):
pass
class Unauthorized (Exception):
pass
class Credentials(object):
"""
Class representing a voter's credentials.
"""
def __init__(self, user_id=None, friendly_name=None, groups=None):
if user_id == None:
raise TypeError, "missing argument 'user_id'"
elif friendly_name == None:
raise TypeError, "missing argument 'friendly_name'"
elif groups == None:
raise TypeError, "missing argument 'groups'"
else:
self.user_id = user_id
self.friendly_name = friendly_name
self.groups = groups
def in_group(self, group):
return self.groups.count(group) > 0
class BaseAuth(object):
def authenticate(self, user_id, password):
raise NotImplementedError
| true |
073effe993fcdfe81eee2b9aee6c5809038f6070 | Python | Park-GH/DSPE_2016706018_-_10 | /Assignment1.py | UTF-8 | 418 | 2.71875 | 3 | [] | no_license | import torch
import socket
import platform
#ip_address 받아옴
ip_address = socket.gethostbyname(socket.gethostname())
#pytorch gpu버전이면 true, 아니면false
if torch.cuda.is_available():
print("cuda is available")
else:
print("cuda is not available")
print(ip_address)
#platform uname 시스템에 대한 여러가지 정보값 확인
#3.3이상버전 namedTuple리턴
print(platform.uname(), "\n") | true |
1e935d803513c31df536232c4ae86a8dc33bec67 | Python | optionalg/Cracking-the-coding-interview-Python | /8queens.py | UTF-8 | 996 | 3.9375 | 4 | [] | no_license | def solve(n):
BOARD_SIZE=8
if n == 0: return [[]] # No RECURSION if n=0.
smaller_solutions = solve(n-1) # RECURSION!!!!!!!!!!!!!!
solutions = []
for solution in smaller_solutions: # I moved this around, so it makes more sense
for column in range(1,BOARD_SIZE+1): # I changed this, so it makes more sense
# try adding a new queen to row = n, column = column
if not under_attack(column , solution):
solutions.append(solution + [(n,column)])
return solutions
def under_attack(column, existing_queens):
# ASSUMES that row = len(existing_queens) + 1
row = len(existing_queens)+1
for queen in existing_queens:
r,c = queen
if r == row: return True # Check row
if c == column: return True # Check column
if (column-c) == (row-r): return True # Check left diagonal
if (column-c) == -(row-r): return True # Check right diagonal
return False
print solve(8)
| true |
230ed5bcc479801512ebdb4223b6cc954fc48e6f | Python | YorkUSTC/homework1 | /help.py | UTF-8 | 945 | 2.671875 | 3 | [] | no_license | from PyQt5.QtWidgets import QWidget, QLabel, QGridLayout
class Help(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("帮助")
grid = QGridLayout()
grid.setSpacing(1)
label=QLabel(self)
head= "<h1 style=\"text-align:center;\">帮助</h1>"
label.setText(head)
grid.addWidget(label,1,1,1,1)
label1=QLabel(self)
body= "<p>使用方法</p><p>可以在设置中更改除法类型和算数中出现的最大值,每次可以生成300道题目,可以选择保存为已排版的html格式文件,可以直接打印。(注意,每次都会覆盖掉上一次生成结果)</p><p>祝您的学生每天都写得开心!</p>"
label.setText(body)
grid.addWidget(label1,2,1,1,1)
self.setLayout(grid) | true |
7e527075c4e7006a973c9ed7ae4425d94cb108a7 | Python | saraabrahamsson/ROI_detect | /Demo.py | UTF-8 | 4,729 | 2.625 | 3 | [] | no_license | # Example Script
from __future__ import division
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from numpy.random import randn, randint
from numpy import zeros, transpose, min, max, array, prod, percentile, outer
from scipy.io import loadmat
from scipy.ndimage.filters import gaussian_filter
from sys import argv
from BlockGroupLasso import gaussian_group_lasso, GetCenters, GetROI, GetActivity
from BlockLocalNMF import LocalNMF, RegionAdd
data_source = 1 if len(argv) == 1 else int(argv[1])
plt.close('all')
# Fetch Data
if data_source == 1: # generate 2D model data
T = 30 # duration of the simulation
sz = (150, 100) # size of image
sig = (5, 5) # neurons size
foo = 0.1 * randn(*((T,) + sz))
bar = zeros((T,) + sz)
N = 15 # number of neurons
lam = 1
for i in range(N):
ind = tuple([randint(x) for x in sz])
for j in range(T):
bar[(j,) + ind] = abs(randn())
data = foo + 10 * gaussian_filter(bar, (0,) + sig)
TargetArea = N * prod(2 * array(sig)) / prod(sz)
TargetRange = [TargetArea * 0.8, TargetArea * 1.2]
NonNegative = True
lam = 1
elif data_source == 2: # Use experimental 2D data
mat = loadmat('Datasets/data_exp2D')
data = transpose(mat['data'], [2, 0, 1])
sig = (6, 6) # estimated neurons size
N = 40 # estimated number of neurons
TargetArea = N * prod(2 * array(sig)) / prod(data[0, :, :].shape)
TargetRange = [TargetArea * 0.8, TargetArea * 1.2]
NonNegative = True
lam = 1
elif data_source == 3: # Use experimental 3D data
mat = loadmat('Datasets/data_exp3D')
data = transpose(mat['data'], [3, 0, 1, 2])
sig = (2, 2, 2) # neurons size
TargetRange = [0.005, 0.015]
NonNegative = True
lam = 0.001
# Run source detection algorithms
x = gaussian_group_lasso(data, sig, lam, NonNegative=NonNegative,
TargetAreaRatio=TargetRange, verbose=True, adaptBias=True)
pic_x = percentile(x, 95, axis=0)
pic_data = percentile(data, 95, axis=0)
# centers extracted from fista output using RegionalMax
cent = GetCenters(pic_x)
# ROI around each center, using watersheding on non-zero regions
ROI = GetROI(pic_x, (array(cent)[:-1]).T)
# temporal traces of activity for each neuron, averaged over each ROI
activity = GetActivity(x, ROI)
MSE_array, shapes, activity, boxes, background = LocalNMF(
data, (array(cent)[:-1]).T, activity, sig,
NonNegative=NonNegative, verbose=True, adaptBias=True)
L = len(shapes) # number of detected neurons
denoised_data = 0 * data
for ll in range(L): # add all detected neurons
denoised_data = RegionAdd(
denoised_data, outer(activity[ll], shapes[ll],), boxes[ll])
pic_denoised = percentile(denoised_data, 95, axis=0)
residual = data - denoised_data - background
# Plot Results
plt.figure(figsize=(12, 4. * data.shape[1] / data.shape[2]))
ax = plt.subplot(131)
ax.scatter(cent[1], cent[0], s=7 * sig[1], marker='o', c='white')
plt.hold(True)
ax.set_title('Data + centers')
ax.imshow(pic_data if data_source != 3 else pic_data.max(-1))
ax2 = plt.subplot(132)
ax2.scatter(cent[1], cent[0], s=7 * sig[1], marker='o', c='white')
ax2.imshow(pic_x if data_source != 3 else pic_x.max(-1))
ax2.set_title('Inferred x')
ax3 = plt.subplot(133)
ax3.scatter(cent[1], cent[0], s=7 * sig[1], marker='o', c='white')
ax3.imshow(pic_denoised if data_source != 3 else pic_denoised.max(-1))
ax3.set_title('Denoised data')
plt.show()
fig = plt.figure()
plt.plot(MSE_array)
plt.xlabel('Iteration')
plt.ylabel('MSE')
plt.show()
# Video Results
fig = plt.figure(figsize=(12, 4. * data.shape[1] / data.shape[2]))
mi = min(data)
ma = max(data)
ii = 0
ax = plt.subplot(131)
ax.scatter(cent[1], cent[0], s=7 * sig[1], marker='o', c='white')
im = ax.imshow(data[ii] if data_source != 3 else data[ii].max(-1), vmin=mi, vmax=ma)
ax.set_title('Data + centers')
ax2 = plt.subplot(132)
ax2.scatter(cent[1], cent[0], s=7 * sig[1], marker='o', c='white')
im2 = ax2.imshow(residual[ii] if data_source != 3 else residual[ii].max(-1), vmin=mi, vmax=ma)
ax2.set_title('Residual')
ax3 = plt.subplot(133)
ax3.scatter(cent[1], cent[0], s=7 * sig[1], marker='o', c='white')
im3 = ax3.imshow(denoised_data[ii] if data_source !=
3 else denoised_data[ii].max(-1), vmin=mi, vmax=ma)
ax3.set_title('Denoised')
def update(ii):
im.set_data(data[ii] if data_source != 3 else data[ii].max(-1))
im2.set_data(residual[ii] if data_source != 3 else residual[ii].max(-1))
im3.set_data(denoised_data[ii] if data_source != 3 else denoised_data[ii].max(-1))
ani = animation.FuncAnimation(fig, update, frames=len(data), blit=False, interval=30,
repeat=False)
plt.show()
| true |
fea747dfbf6a37aa8c964bc545aed35dcee621f6 | Python | martapanc/comp62521 | /src/comp62521/database/database.py | UTF-8 | 31,881 | 2.875 | 3 | [] | no_license | from comp62521.statistics import average
from comp62521.statistics import author_count
from comp62521.statistics import author_lastname
from operator import itemgetter
from collections import defaultdict
import itertools
import numpy as np
from xml.sax import handler, make_parser, SAXException
PublicationType = [
"Conference Paper", "Journal", "Book", "Book Chapter"]
class Publication:
CONFERENCE_PAPER = 0
JOURNAL = 1
BOOK = 2
BOOK_CHAPTER = 3
def __init__(self, pub_type, title, year, authors):
self.pub_type = pub_type
self.title = title
if year:
self.year = int(year)
else:
self.year = -1
self.authors = authors
class Author:
def __init__(self, name):
self.name = name
class Stat:
STR = ["Mean", "Median", "Mode"]
FUNC = [average.mean, average.median, average.mode]
MEAN = 0
MEDIAN = 1
MODE = 2
class Database:
def read(self, filename):
self.publications = []
self.authors = []
self.author_idx = {}
self.min_year = None
self.max_year = None
handler = DocumentHandler(self)
parser = make_parser()
parser.setContentHandler(handler)
infile = open(filename, "r")
valid = True
try:
parser.parse(infile)
except SAXException as e:
valid = False
print ("Error reading file (" + e.getMessage() + ")")
infile.close()
for p in self.publications:
if self.min_year == None or p.year < self.min_year:
self.min_year = p.year
if self.max_year == None or p.year > self.max_year:
self.max_year = p.year
return valid
def get_all_authors(self):
return self.author_idx.keys()
def get_coauthor_data(self, start_year, end_year, pub_type):
coauthors = {}
for p in self.publications:
if ((start_year == None or p.year >= start_year) and
(end_year == None or p.year <= end_year) and
(pub_type == 4 or pub_type == p.pub_type)):
for a in p.authors:
for a2 in p.authors:
if a != a2:
try:
coauthors[a].add(a2)
except KeyError:
coauthors[a] = set([a2])
def display(db, coauthors, author_id):
return "%s (%d)" % (db.authors[author_id].name, len(coauthors[author_id]))
header = ("Author", "Co-Authors")
data = []
for a in coauthors:
data.append([ display(self, coauthors, a),
", ".join([
display(self, coauthors, ca) for ca in coauthors[a] ]) ])
return (header, data)
def get_authors_for_nw(self, author):
authors = {}
coauthors = []
coauthors_list = {}
for p in self.publications:
for a in p.authors:
authors[a] = self.authors[a].name
for a2 in p.authors:
if a != a2 and not [a, a2] in coauthors and not [a2, a] in coauthors:
if a2 > a: # Set all edges to have the smallest index first
coauthors.append([a, a2])
else:
coauthors.append([a2, a])
coauthors.sort(key=itemgetter(0)) # Sort edges by the first index
return authors, coauthors
def get_all_coauthors(self): # Returns a list of lists like [[0, 1], [3, 4], ...]
coauthors = []
for p in self.publications:
for a in p.authors:
for a2 in p.authors:
if a != a2 and not [a, a2] in coauthors and not [a2, a] in coauthors:
coauthors.append([a2, a])
#coauthors.append([a, a2])
coauthors.sort(key=itemgetter(0, 1)) # Sort edges by the first index
return coauthors
def get_all_coauthors_graph(self): # Returns a dictionary of sets, with the author indices as keys
coauthors = defaultdict(set)
for p in self.publications:
for a in p.authors:
for a2 in p.authors:
if a != a2 and a2 not in coauthors[a]:
coauthors[a].add(a2)
return coauthors
def get_average_authors_per_publication(self, av):
header = ("Conference Paper", "Journal", "Book", "Book Chapter", "All Publications")
auth_per_pub = [[], [], [], []]
for p in self.publications:
auth_per_pub[p.pub_type].append(len(p.authors))
func = Stat.FUNC[av]
data = [ func(auth_per_pub[i]) for i in np.arange(4) ] + [ func(list(itertools.chain(*auth_per_pub))) ]
return (header, data)
def get_average_publications_per_author(self, av):
header = ("Conference Paper", "Journal", "Book", "Book Chapter", "All Publications")
pub_per_auth = np.zeros((len(self.authors), 4))
for p in self.publications:
for a in p.authors:
pub_per_auth[a, p.pub_type] += 1
func = Stat.FUNC[av]
data = [ func(pub_per_auth[:, i]) for i in np.arange(4) ] + [ func(pub_per_auth.sum(axis=1)) ]
return (header, data)
def get_average_publications_in_a_year(self, av):
header = ("Conference Paper",
"Journal", "Book", "Book Chapter", "All Publications")
ystats = np.zeros((int(self.max_year) - int(self.min_year) + 1, 4))
for p in self.publications:
ystats[p.year - self.min_year][p.pub_type] += 1
func = Stat.FUNC[av]
data = [ func(ystats[:, i]) for i in np.arange(4) ] + [ func(ystats.sum(axis=1)) ]
return (header, data)
def get_average_authors_in_a_year(self, av):
header = ("Conference Paper",
"Journal", "Book", "Book Chapter", "All Publications")
yauth = [ [set(), set(), set(), set(), set()] for _ in range(int(self.min_year), int(self.max_year) + 1) ]
for p in self.publications:
for a in p.authors:
yauth[p.year - self.min_year][p.pub_type].add(a)
yauth[p.year - self.min_year][4].add(a)
ystats = np.array([ [ len(S) for S in y ] for y in yauth ])
func = Stat.FUNC[av]
data = [ func(ystats[:, i]) for i in np.arange(5) ]
return (header, data)
def get_publication_summary_average(self, av):
header = ("Details", "Conference Paper",
"Journal", "Book", "Book Chapter", "All Publications")
pub_per_auth = np.zeros((len(self.authors), 4))
auth_per_pub = [[], [], [], []]
for p in self.publications:
auth_per_pub[p.pub_type].append(len(p.authors))
for a in p.authors:
pub_per_auth[a, p.pub_type] += 1
name = Stat.STR[av]
func = Stat.FUNC[av]
data = [
[name + " authors per publication"]
+ [ func(auth_per_pub[i]) for i in np.arange(4) ]
+ [ func(list(itertools.chain(*auth_per_pub))) ],
[name + " publications per author"]
+ [ func(pub_per_auth[:, i]) for i in np.arange(4) ]
+ [ func(pub_per_auth.sum(axis=1)) ] ]
return (header, data)
def get_publication_summary(self):
header = ("Details", "Conference Paper",
"Journal", "Book", "Book Chapter", "Total")
plist = [0, 0, 0, 0]
alist = [set(), set(), set(), set()]
for p in self.publications:
plist[p.pub_type] += 1
for a in p.authors:
alist[p.pub_type].add(a)
# create union of all authors
ua = alist[0] | alist[1] | alist[2] | alist[3]
data = [
["No. of publications"] + plist + [sum(plist)],
["No. of authors"] + [ len(a) for a in alist ] + [len(ua)] ]
return (header, data)
def get_average_authors_per_publication_by_author(self, av):
header = ("Author", "No. of conference papers",
"No. of journals", "Number of books",
"No. of book chapers", "All publications")
astats = [ [[], [], [], []] for _ in range(len(self.authors)) ]
for p in self.publications:
for a in p.authors:
astats[a][p.pub_type].append(len(p.authors))
func = Stat.FUNC[av]
data = [ [ author_lastname.get_last_name_first(self.authors[i].name) ]
+ [ func(L) for L in astats[i] ]
+ [ func(list(itertools.chain(*astats[i]))) ]
for i in range(len(astats)) ]
return (header, data)
def get_publications_by_author(self):
header = ("", "Author", "No. of conference papers",
"No. of journals", "No. of books",
"No. of book chapers", "Total")
astats = [ [0, 0, 0, 0] for _ in range(len(self.authors)) ]
for p in self.publications:
for a in p.authors:
astats[a][p.pub_type] += 1
data = [ [self.authors[i].name] + [author_lastname.get_last_name_first(self.authors[i].name)] + astats[i] + [sum(astats[i])]
for i in range(len(astats)) ]
return (header, data)
def get_all_details_of_authors(self):
header = ("Author", "Overall no. of publications", "No. of conference papers",
"No. of journals", "No. of books",
"No. of book chapers", "No. of co-authors", "No. of times appears first", "No. of times appears last", "No. of times appears sole")
astats = [ [0, 0, 0, 0] for _ in range(len(self.authors)) ]
astats2 = [ [0, 0, 0, 0] for _ in range(len(self.authors)) ]
coauthors = {}
for p in self.publications:
for a in p.authors:
astats[a][p.pub_type] += 1
astats2[a][1] += author_count.appearing_first(a, p.authors)
astats2[a][2] += author_count.appearing_last(a, p.authors)
astats2[a][3] += author_count.appearing_sole(a, p.authors)
for a2 in p.authors:
if a != a2:
try:
coauthors[a].add(a2)
except KeyError:
coauthors[a] = set([a2])
for p in self.publications:
for a in p.authors:
try:
astats2[a][0] = len(coauthors[a])
except:
astats2[a][0] = 0
data = [ [ author_lastname.get_last_name_first(self.authors[i].name) ] + [sum(astats[i])] + astats[i] + astats2[i]
for i in range(len(astats)) ]
return (header, data)
def get_authors_count(self, start_year, end_year, pub_type):
header = ("Author", "No. of times the author appears first",
"No. of times the author appears last","No. of times the author appears sole")
astats = [ [0, 0, 0] for _ in range(len(self.authors)) ]
for p in self.publications:
if ( (start_year == None or p.year >= start_year) and
(end_year == None or p.year <= end_year) and
(pub_type == 4 or pub_type == p.pub_type) ):
for a in p.authors:
astats[a][0] += author_count.appearing_first(a, p.authors)
astats[a][1] += author_count.appearing_last(a, p.authors)
astats[a][2] += author_count.appearing_sole(a, p.authors)
data = [ [self.authors[i].name] + [ author_lastname.get_last_name_first(self.authors[i].name) ] + astats[i]
for i in range(len(astats)) ]
return (header, data)
def get_authors_count_for_one_author(self, author_name, start_year, end_year, pub_type):
astats = [0, 0, 0]
for p in self.publications:
if ( (start_year == None or p.year >= start_year) and
(end_year == None or p.year <= end_year) and
(pub_type == 4 or pub_type == p.pub_type) ):
for a in p.authors:
if a == self.author_idx[author_name]:
astats[0] += author_count.appearing_first(a, p.authors)
astats[1] += author_count.appearing_last(a, p.authors)
astats[2] += author_count.appearing_sole(a, p.authors)
return (astats)
def search_authors(self, author):
authors = []
tmp_authors_1 = []
tmp_authors_2 = []
tmp_authors_3 = []
tmp_authors_4 = []
tmp_authors_5 = []
tmp_authors_6 = []
ordered_authors = []
if author == "None" or author == "":
return ordered_authors
for a in self.authors:
if author.lower() in a.name.lower():
authors.append(a.name)
splitted_author = author.split()
if len(splitted_author) == 1:
len_of_author = len(author)
for a in authors:
name_list = a.split()
if ((len(name_list[-1]) >= len_of_author) and (author.lower() == name_list[-1][:len_of_author].lower())):
tmp_authors_1.append(a)
elif ((len(name_list[0]) >= len_of_author) and (author.lower() == name_list[0][:len_of_author].lower())):
tmp_authors_2.append(a)
elif ((len(name_list) > 2) and (len(name_list[1]) >= len_of_author) and (author.lower() == name_list[1][:len_of_author].lower())):
tmp_authors_3.append(a)
elif ((len(name_list) > 3) and (len(name_list[2]) >= len_of_author) and (author.lower() == name_list[2][:len_of_author].lower())):
tmp_authors_4.append(a)
elif ((len(name_list) > 4) and (len(name_list[3]) >= len_of_author) and (author.lower() == name_list[3][:len_of_author].lower())):
tmp_authors_5.append(a)
else:
tmp_authors_6.append(a)
tmp_auth_1 = []
tmp_auth_2 = []
for a in tmp_authors_1:
name_list = a.split()
if(author.lower() == name_list[-1].lower()):
tmp_auth_1.append(a)
else:
tmp_auth_2.append(a)
tmp_authors_1 = sorted(tmp_auth_1, key=str.lower) + sorted(tmp_auth_2, key=lambda s: s.split()[-1])
tmp_auth_1 = []
tmp_auth_2 = []
for a in tmp_authors_2:
name_list = a.split()
if(author.lower() == name_list[0].lower()):
tmp_auth_1.append(a)
else:
tmp_auth_2.append(a)
tmp_authors_2 = sorted(tmp_auth_1, key=lambda s: s.split()[-1]) + sorted(tmp_auth_2, key=str.lower)
tmp_auth_1 = []
tmp_auth_2 = []
for a in tmp_authors_3:
name_list = a.split()
if(author.lower() == name_list[1].lower()):
tmp_auth_1.append(a)
else:
tmp_auth_2.append(a)
tmp_authors_3 = sorted(tmp_auth_1, key=lambda s: s.split()[-1]) + sorted(tmp_auth_2, key=lambda s: s.split()[1])
tmp_auth_1 = []
tmp_auth_2 = []
for a in tmp_authors_4:
name_list = a.split()
if(author.lower() == name_list[2].lower()):
tmp_auth_1.append(a)
else:
tmp_auth_2.append(a)
tmp_authors_4 = sorted(tmp_auth_1, key=lambda s: s.split()[-1]) + sorted(tmp_auth_2, key=lambda s: s.split()[2])
tmp_auth_1 = []
tmp_auth_2 = []
for a in tmp_authors_5:
name_list = a.split()
if(author.lower() == name_list[3].lower()):
tmp_auth_1.append(a)
else:
tmp_auth_2.append(a)
tmp_authors_5 = sorted(tmp_auth_1, key=lambda s: s.split()[-1]) + sorted(tmp_auth_2, key=lambda s: s.split()[3])
tmp_authors_6 = sorted(tmp_authors_6, key=lambda s: s.split()[-1])
ordered_authors = tmp_authors_1 + tmp_authors_2 + tmp_authors_3 + tmp_authors_4 + tmp_authors_5 + tmp_authors_6
else:
ordered_authors = sorted(authors, key=lambda s: s.split()[-1])
return (ordered_authors)
def get_author_stats(self,author):
coauthors = {}
papernumber = journalnumber = booknumber = booksnumber = allpubnumber = coauthornumber = first = last = 0
author_name = ''
author_found = False
astats = [[0, 0, 0, 0, 0, 0, 0, 0] for _ in range(len(self.authors))]
# The overall number of publications,papers,articls,book chapters,books
# The number of co-authors
# The number of times
for p in self.publications:
for a in p.authors:
astats[a][p.pub_type + 1] += 1
for a2 in p.authors:
if a != a2:
try:
coauthors[a].add(a2)
except KeyError:
coauthors[a] = set([a2])
astats[a][5] = len(coauthors[a])
astats[a][6] += author_count.appearing_first(a, p.authors)
astats[a][7] += author_count.appearing_last(a, p.authors)
for a in p.authors:
astats[a][0] = astats[a][1] + astats[a][2] + astats[a][3] + astats[a][4]
data = [ astats[i]
for i in range(len(astats))]
for i in range(len(data)):
if author != "None" and author != "" and author.lower() in self.authors[i].name.lower():
allpubnumber = data[i][0]
papernumber = data[i][1]
journalnumber = data[i][2]
booknumber = data[i][3]
booksnumber = data[i][4]
coauthornumber = data[i][5]
first = data[i][6]
last = data[i][7]
author_found = True
author_name = self.authors[i].name
return (author_found, allpubnumber, papernumber, journalnumber, booknumber, booksnumber,
coauthornumber, first, last, author_name)
def get_author_stats_by_click(self,author):
coauthors = {}
author_name = ''
author_found = False
NoPublications = [0, 0, 0, 0, 0]
NoFirstAuthor = [0, 0, 0, 0, 0]
NoLastAuthor = [0, 0, 0, 0, 0]
NoSoleAuthor = [0, 0, 0, 0, 0]
NoCoAuthor = 0
for p in self.publications:
for a in p.authors:
if str(self.authors[a].name) == author:
author_found = True
author_name = self.authors[a].name
NoPublications[p.pub_type + 1] += 1
for a2 in p.authors:
if a != a2:
try:
coauthors[a].add(a2)
except KeyError:
coauthors[a] = set([a2])
try:
NoCoAuthor = len(coauthors[a])
except:
NoCoAuthor = 0
NoFirstAuthor[p.pub_type + 1] += author_count.appearing_first(a, p.authors)
NoLastAuthor[p.pub_type + 1] += author_count.appearing_last(a, p.authors)
NoSoleAuthor[p.pub_type + 1] += author_count.appearing_sole(a, p.authors)
NoPublications[0] = NoPublications[1] + NoPublications[2] + NoPublications[3] + NoPublications[4]
NoFirstAuthor[0] = NoFirstAuthor[1] + NoFirstAuthor[2] + NoFirstAuthor[3] + NoFirstAuthor[4]
NoLastAuthor[0] = NoLastAuthor[1] + NoLastAuthor[2] + NoLastAuthor[3] + NoLastAuthor[4]
NoSoleAuthor[0] = NoSoleAuthor[1] + NoSoleAuthor[2] + NoSoleAuthor[3] + NoSoleAuthor[4]
return (author_found, NoPublications, NoFirstAuthor, NoLastAuthor, NoSoleAuthor, NoCoAuthor, author_name)
def get_average_authors_per_publication_by_year(self, av):
header = ("Year", "Conference papers",
"Journals", "Books",
"Book chapers", "All publications")
ystats = {}
for p in self.publications:
try:
ystats[p.year][p.pub_type].append(len(p.authors))
except KeyError:
ystats[p.year] = [[], [], [], []]
ystats[p.year][p.pub_type].append(len(p.authors))
func = Stat.FUNC[av]
data = [ [y]
+ [ func(L) for L in ystats[y] ]
+ [ func(list(itertools.chain(*ystats[y]))) ]
for y in ystats ]
return (header, data)
def get_publications_by_year(self):
header = ("Year", "Number of conference papers",
"Number of journals", "Number of books",
"Number of book chapers", "Total")
ystats = {}
for p in self.publications:
try:
ystats[p.year][p.pub_type] += 1
except KeyError:
ystats[p.year] = [0, 0, 0, 0]
ystats[p.year][p.pub_type] += 1
data = [ [y] + ystats[y] + [sum(ystats[y])] for y in ystats ]
return (header, data)
def get_average_publications_per_author_by_year(self, av):
header = ("Year", "Conference papers",
"Journals", "Books",
"Book chapers", "All publications")
ystats = {}
for p in self.publications:
try:
s = ystats[p.year]
except KeyError:
s = np.zeros((len(self.authors), 4))
ystats[p.year] = s
for a in p.authors:
s[a][p.pub_type] += 1
func = Stat.FUNC[av]
data = [ [y]
+ [ func(ystats[y][:, i]) for i in np.arange(4) ]
+ [ func(ystats[y].sum(axis=1)) ]
for y in ystats ]
return (header, data)
def get_author_totals_by_year(self):
header = ("Year", "No. of conference papers",
"No. of journals", "No. of books",
"No. of book chapers", "Total")
ystats = {}
for p in self.publications:
try:
s = ystats[p.year][p.pub_type]
except KeyError:
ystats[p.year] = [set(), set(), set(), set()]
s = ystats[p.year][p.pub_type]
for a in p.authors:
s.add(a)
data = [ [y] + [len(s) for s in ystats[y]] + [len(ystats[y][0] | ystats[y][1] | ystats[y][2] | ystats[y][3])]
for y in ystats ]
return (header, data)
def add_publication(self, pub_type, title, year, authors):
if year == None or len(authors) == 0:
print ("Warning: excluding publication due to missing information")
print (" Publication type:", PublicationType[pub_type])
print (" Title:", title)
print (" Year:", year)
print (" Authors:", ",".join(authors))
return
if title == None:
print ("Warning: adding publication with missing title [ %s %s (%s) ]" % (PublicationType[pub_type], year, ",".join(authors)))
idlist = []
for a in authors:
try:
idlist.append(self.author_idx[a])
except KeyError:
a_id = len(self.authors)
self.author_idx[a] = a_id
idlist.append(a_id)
self.authors.append(Author(a))
self.publications.append(
Publication(pub_type, title, year, idlist))
if (len(self.publications) % 100000) == 0:
print ("Adding publication number %d (number of authors is %d)" % (len(self.publications), len(self.authors)))
if self.min_year == None or year < self.min_year:
self.min_year = year
if self.max_year == None or year > self.max_year:
self.max_year = year
def _get_collaborations(self, author_id, include_self):
data = {}
for p in self.publications:
if author_id in p.authors:
for a in p.authors:
try:
data[a] += 1
except KeyError:
data[a] = 1
if not include_self:
del data[author_id]
return data
def get_coauthor_details(self, name):
author_id = self.author_idx[name]
data = self._get_collaborations(author_id, True)
return [ (self.authors[key].name, data[key])
for key in data ]
def get_network_data(self):
na = len(self.authors)
nodes = [ [self.authors[i].name, -1] for i in range(na) ]
links = set()
for a in range(na):
collab = self._get_collaborations(a, False)
nodes[a][1] = len(collab)
for a2 in collab:
if a < a2:
links.add((a, a2))
return (nodes, links)
def get_degrees_of_separation(self, author1, author2):
global checked_coauthors
coauthors = {}
separation_list = []
checked_coauthors = {}
for p in self.publications:
for a in p.authors:
for a2 in p.authors:
if a != a2:
try:
coauthors[self.authors[a].name].add(self.authors[a2].name)
except KeyError:
coauthors[self.authors[a].name] = set([self.authors[a2].name])
try:
list_of_coauthors_for_author1 = coauthors[author1]
except:
list_of_coauthors_for_author1 = []
if author2 in list_of_coauthors_for_author1:
return 0
else:
if len(list_of_coauthors_for_author1) == 0:
return 'X'
else:
result = self.aux_func_deg_of_sep(author1, author2, coauthors, separation_list, -1)
if (result == 100000):
return 'X'
else:
return result
def aux_func_deg_of_sep(self, author1, author2, coauthors, separation_list, degree):
global checked_coauthors
degree += 1
try:
list_of_coauthors_for_author1 = coauthors[author1]
except:
list_of_coauthors_for_author1 = []
for a in list_of_coauthors_for_author1:
if a in checked_coauthors:
pass
else:
checked_coauthors[a] = degree
for a in list_of_coauthors_for_author1:
if a in checked_coauthors:
if checked_coauthors[a] >= degree:
if author2 == a:
sep_value = degree
else:
sep_value = self.aux_func_deg_of_sep(a, author2, coauthors, separation_list, degree)
separation_list.append(sep_value)
else:
if author2 == a:
sep_value = degree
else:
sep_value = self.aux_func_deg_of_sep(a, author2, coauthors, separation_list, degree)
separation_list.append(sep_value)
if len(separation_list) == 0:
return 100000
else:
return min(separation_list)
def get_two_authors_network(self, author1, author2):
a1 = -1
a2 = -1
for a in self.authors:
if a.name == author1:
a1 = self.authors.index(a)
if a.name == author2:
a2 = self.authors.index(a)
if a1 == -1: return -1, []
elif a2 == -1: return -2, []
allcoauthors = self.get_all_coauthors_graph()
authors = {a1: author1, a2: author2} # The network graph will include at least the two authors' nodes
coauthors = []
min_coauthors = []
if self.get_degrees_of_separation(author1, author2) != "X":
coauthors = list(self.dfs_paths(allcoauthors, a1, a2))
min_len = len(coauthors)
for path in coauthors:
if len(path) < min_len:
min_len = len(path)
min_paths = []
for path in coauthors:
if len(path) == min_len:
min_paths.append(path)
for path in min_paths:
for i in range( 0, len(path[:-1]) ):
if [path[i], path[i+1]] not in min_coauthors:
min_coauthors.append([path[i], path[i+1]])
for i in min_coauthors:
authors[i[0]] = self.authors[i[0]].name
authors[i[1]] = self.authors[i[1]].name
return authors, min_coauthors
def dfs_paths(self, graph, start, goal):
stack = [(start, [start])]
while stack:
(vertex, path) = stack.pop()
for next in graph[vertex] - set(path):
if next == goal:
yield path + [next]
else:
stack.append((next, path + [next]))
def get_single_author_network(self, author_name):
a_id = -1
for a in self.authors:
if a.name == author_name:
a_id = self.authors.index(a)
if a_id == -1:
return -1, []
allcoauthors = self.get_all_coauthors()
coauthors_edges = []
coauthors_nodes = {a_id: self.authors[a_id].name}
for edge in allcoauthors:
if a_id == edge[0] or a_id == edge[1]:
coauthors_edges.append(edge)
for edge in coauthors_edges:
coauthors_nodes[edge[0]] = self.authors[edge[0]].name
coauthors_nodes[edge[1]] = self.authors[edge[1]].name
return coauthors_nodes, coauthors_edges
def get_authors_as_list(self):
author_list = []
for a in self.authors:
author_list.append(a.name)
return author_list
class DocumentHandler(handler.ContentHandler):
TITLE_TAGS = [ "sub", "sup", "i", "tt", "ref" ]
PUB_TYPE = {
"inproceedings":Publication.CONFERENCE_PAPER,
"article":Publication.JOURNAL,
"book":Publication.BOOK,
"incollection":Publication.BOOK_CHAPTER }
def __init__(self, db):
self.tag = None
self.chrs = ""
self.clearData()
self.db = db
def clearData(self):
self.pub_type = None
self.authors = []
self.year = None
self.title = None
def startDocument(self):
pass
def endDocument(self):
pass
def startElement(self, name, attrs):
if name in self.TITLE_TAGS:
return
if name in DocumentHandler.PUB_TYPE.keys():
self.pub_type = DocumentHandler.PUB_TYPE[name]
self.tag = name
self.chrs = ""
def endElement(self, name):
if self.pub_type == None:
return
if name in self.TITLE_TAGS:
return
d = self.chrs.strip()
if self.tag == "author":
self.authors.append(d)
elif self.tag == "title":
self.title = d
elif self.tag == "year":
self.year = int(d)
elif name in DocumentHandler.PUB_TYPE.keys():
self.db.add_publication(
self.pub_type,
self.title,
self.year,
self.authors)
self.clearData()
self.tag = None
self.chrs = ""
def characters(self, chrs):
if self.pub_type != None:
self.chrs += chrs
| true |
8a208c0b68ed9dd350a5fc9c529a09ae23702778 | Python | galahad42/PyZoom | /PyZoom.py | UTF-8 | 1,934 | 2.640625 | 3 | [] | no_license | import time
import pyautogui
import subprocess
from datetime import time as t
import datetime
# Chrome zoom setting for my PC is set to 67%
# Point(x=1251, y=102)//'Maximise'
# Point(x=514, y=636)//'SFS' shortcut
# Point(x=694, y=357)//'Login' erp
# Point(x=859, y=124)//'Cross' btn
# Point(x=15, y=349)//'E-Connect'
# Point(x=931, y=244),(x=937, y=361), (x=938, y=473)//'Start'(develop better method to use span or href)
# Point(x=727, y=189)//'Open Zoom' dialog
# Point(x=898, y=603)//Video Turn off(essential)
# 15:08:24.789150
# Open Chrome in Maximise
pyautogui.hotkey('winleft')
time.sleep(4)
pyautogui.typewrite("chrome", 0.5)
pyautogui.hotkey("enter")
time.sleep(10)
pyautogui.moveTo(1251, 102) # 'Maximise' btn
pyautogui.click()
time.sleep(3)
# 'SFS' shortcut
pyautogui.moveTo(514, 636)
pyautogui.click()
time.sleep(10)
# 'Login' erp
pyautogui.moveTo(694, 357)
pyautogui.click()
time.sleep(10)
pyautogui.moveTo(859, 124) # 'Cross' btn
pyautogui.click()
time.sleep(2)
# 'E-Connect'
pyautogui.moveTo(15, 349)
pyautogui.click()
time.sleep(7)
# 'Start'(Current method not satifactory, developing better method to use selenium to span or href)
pyautogui.moveTo(931, 244)
pyautogui.click()
pyautogui.moveTo(937, 361)
pyautogui.click()
pyautogui.moveTo(938, 473)
pyautogui.click()
time.sleep(10)
# Working model(bugs fixed) but still under development and most probably not the best method
# slot1 = t(7, 30, 00)
# eslot1 = t(9, 00, 00)
# slot2 = time(9, 28, 00)
# eslot2 = time(10, 28, 00)
# a = datetime.datetime.now()
# if a in (slot1, eslot1):
# pyautogui.moveTo(934, 473, 0.8)
# else:
# pyautogui.moveTo(931, 244, 0.8)
#
# pyautogui.click()
# time.sleep(10)
# 'Open Zoom' dialog
pyautogui.moveTo(727, 189)
pyautogui.click()
time.sleep(10)
# Video Turn off(essential)
pyautogui.moveTo(898, 603)
pyautogui.click()
| true |
5ab432b542181173819c9449389a1f04c16ea94e | Python | shg9411/algo | /algo_py/boj/bj20943.py | UTF-8 | 541 | 3.03125 | 3 | [] | no_license | import io
import os
import math
input = io.BytesIO(os.read(0, os.fstat(0).st_size)).readline
def solve():
inc = dict()
n = int(input())
for _ in range(n):
a, b, _ = map(int, input().split())
if b == 0:
inc[float("inf")] = inc.get(float("inf"), 0)+1
else:
d = math.gcd(a, b)
a, b = a//d, b//d
inc[-a/b] = inc.get(-a/b, 0)+1
ans = n*(n-1)//2
for _, v in inc.items():
ans -= v*(v-1)//2
print(ans)
if __name__ == '__main__':
solve()
| true |
c4b60fd38b76a618c0d2a73576dff31c0b804dc7 | Python | Juneel/pysonarqube | /measure.py | UTF-8 | 2,739 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/29 16:45
# @Author : Juneel
# @File : measure.py
import requests
from component import Component
from log import cls_log_handler
@cls_log_handler
class Measure(Component):
def __init__(self, ip, port, username, password):
super().__init__(ip=ip, port=port, username=username, password=password)
self.component_name = None
self.component_id = None
# metrics指扫描类型: bugs, code_smells, coverage, duplicated_lines, vulnerabilities等等
self.metrics = None
def result(self):
"""
获取工程相对应metrics的检查结果
:return: http状态码为200返回测量结果,否则返回None
"""
path = "/api/measures/component"
path = path + "?"
if self.component_id is not None:
path = path + "componentId=" + self.component_id
if self.metrics is not None:
path = path + "&metricKeys=" + self.metrics
self.logger.info("Request url is " + path)
try:
rsp = requests.get(url="http://{0}:{1}{2}".format(self.ip, self.port, path))
self.logger.info("Response content is " + str(rsp.text))
if rsp.status_code == 200:
return rsp.text
else:
return None
except ConnectionError:
return None
def history_result(self, start_time, end_time, page_num, page_size):
"""
查看对应工程的扫描metrics的历史记录
:param start_time: 开始时间 2017-10-19 or 2017-10-19T13:00:00+0200
:param end_time: 结束时间 2017-10-19 or 2017-10-19T13:00:00+0200
:param page_num: 页码
:param page_size: 单页大小
:return: http状态码为200,返回历史记录,否则返回None
"""
path = "/api/measures/search_history"
if self.component_name is not None:
path = path + "?component=" + self.component_name
if self.metrics is not None:
path = path + "&metrics= " + self.metrics
if start_time and end_time:
path = path + "&from=" + str(start_time) + "&to=" + end_time
if page_num and page_size:
path = path + "&p=" + str(page_num) + "&ps=" + str(page_size)
self.logger.info("Request url is " + path)
try:
rsp = requests.get(url="http://{0}:{1}{2}".format(self.ip, self.port, path))
self.logger.info("Response content is " + str(rsp.text))
if rsp.status_code == 200:
return rsp.text
else:
return None
except ConnectionError:
return None
| true |
6ac96812dbe839ff73f2e07869562c21134d5600 | Python | cmaman1/Python-fundamentals | /Guia 01/ejercicio03.py | UTF-8 | 234 | 4.15625 | 4 | [] | no_license | """Ejercicio 3:
Crear un programa que pregunte al usuario su nombre y edad y luego imprima esos datos en renglones distintos."""
nombre = input('Ingrese su nombre: ')
edad = int(input('Ingrese su edad: '))
print(nombre)
print(edad)
| true |
1b2f923213894f49bd1cc576702c4653a0fa347b | Python | roisevege/OpinionDynamics | /models.py | UTF-8 | 818 | 2.921875 | 3 | [] | no_license | import networkx as nx # networkx 2.x
import random
class DeffuantModel(object):
def __init__(self, graph, **kwargs):
self.G = graph
self.nodes = list(self.G.nodes())
self.edges = list(self.G.edges())
self.mu = kwargs['mu'] # convergence param
self.d = kwargs['d'] # threshold
self.strategy = kwargs['strategy']
def opinionUpdate(self):
if self.strategy == 'random':
n1 = random.choice(self.nodes)
n2 = n1
while n2 == n1:
n2 = random.choice(self.nodes)
elif self.strategy == 'neighbor':
n1, n2 = random.choice(list(self.G.edges()))
o1 = self.G.nodes[n1]['opinion']
o2 = self.G.nodes[n2]['opinion']
if abs(o1-o2) <= self.d:
self.G.nodes[n1]['opinion'] += self.mu*(o2-o1)
self.G.nodes[n2]['opinion'] += self.mu*(o1-o2)
| true |
3c7d3a387dae001aaef2756224b0a97104a3a014 | Python | CamiloCstro/python-basic | /1.-Introduccion/multiples.py | UTF-8 | 174 | 3.546875 | 4 | [] | no_license | # Declarar multiples variables en una sola linea
# No declarar muchas variables en una sola linea
name, last_name, age = 'camilo', 'castro', 24
print(name, last_name, age)
| true |
df70b2d8e163960b4feaa4d766b1eeb5d5d88b46 | Python | Vivarta/geiger | /geiger/featurizers/subjectivity.py | UTF-8 | 3,318 | 2.734375 | 3 | [] | no_license | import numpy as np
from textblob import Blobber
from textblob_aptagger import PerceptronTagger
from geiger.util.progress import Progress
class Featurizer():
"""
Builds subjectivity features.
Subjectivity lexicon sourced from
<https://github.com/kuitang/Markovian-Sentiment/blob/master/data/subjclueslen1-HLTEMNLP05.tff>,
presented in:
Theresa Wilson, Janyce Wiebe and Paul Hoffmann (2005). Recognizing Contextual
Polarity in Phrase-Level Sentiment Analysis. Proceedings of HLT/EMNLP 2005,
Vancouver, Canada.
This featurizer is largely based off Jeff Fossett's `SubjFeaturizer`, found at
<https://github.com/Fossj117/opinion-mining/blob/master/classes/transformers/featurizers.py>
"""
lex_path = 'data/subjclueslen1-HLTEMNLP05.tff'
# Map NLTK POS tags to the lexicon's POS tags.
TAG_MAP = {'NN': 'noun',
'NNS': 'noun',
'NNP': 'noun',
'JJ': 'adj',
'JJR': 'adj',
'JJS': 'adj',
'RB': 'adverb',
'RBR': 'adverb',
'RBS': 'adverb',
'VB': 'verb',
'VBD': 'verb',
'VBG': 'verb',
'VBN': 'verb',
'VBP': 'verb',
'VBZ': 'verb'}
def __init__(self):
self.lex = self.load_lexicon()
self.blobber = Blobber(pos_tagger=PerceptronTagger())
def load_lexicon(self):
"""
Loads and processes the subjectivity lexicon.
"""
lex = {}
with open(self.lex_path, 'r') as f:
for line in f.readlines():
chunks = line.strip().split(' ')
data = dict([c.split('=') for c in chunks if '=' in c])
lex[(data['word1'], data['pos1'])] = {
'subjectivity': 1 if data['type'] == 'strongsubj' else 0,
# TO DO should polarity be included as a feature?
'priorpolarity': data['priorpolarity']
}
return lex
def featurize(self, comments, return_ctx=False):
#feats = np.vstack([self._featurize(c.body) for c in comments])
p = Progress('SUBJ')
n = len(comments)
feats = []
for i, c in enumerate(comments):
p.print_progress((i+1)/n)
feats.append(self._featurize(c.body))
feats = np.vstack(feats)
if return_ctx:
return feats, feats
else:
return feats
def _featurize(self, text):
"""
Featurize a single document.
"""
tagged = self.blobber(text).tags
n_strong = 0
n_weak = 0
n_total = len(tagged)
for w, tag in tagged:
# Map the NLTK pos tag to the lexicon's pos tags.
pos = self.TAG_MAP[tag] if tag in self.TAG_MAP else None
try:
info = self.lex[(w, pos)]
except KeyError:
try:
info = self.lex[(w, 'anypos')]
except KeyError:
continue
if info['subjectivity'] == 1:
n_strong += 1
else:
n_weak += 1
return np.array([
n_strong/n_total,
n_weak/n_total,
n_strong + n_weak
])
| true |
fac26d5c214ec4cc1074f60a6e35bf5bd4e428a0 | Python | MarineChap/Machine_Learning | /Association Rule Learning/Apriori.py | UTF-8 | 2,755 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Apriori in following the course "Machine learning A-Z" at Udemy
The dataset can be found here https://www.superdatascience.com/machine-learning/
Subject : Find relation in a list of products.
An association rule has two parts, an antecedent (if) and a consequent (then).
Association rules are created by analyzing data for frequent if/then patterns
and using the criteria support, confidence and lift to identify the most important relationships.
Support(M1) = User(M1) /User(dataset)
Confidence(M1, M2) = User(M1, M2) / User(M1)
Lift(M1, M2) = confidence (M1, M2)/ support(M2)
with M1 : antecedent
M2 : consequent
Created on Mon Mar 5 16:21:36 2018
@author: marinechap
"""
# Import libraries
import pandas as pd
from apyori import apriori
# This function takes as argument your results list and return a tuple list with the format:
# [(rh, lh, support, confidence, lift)]
def inspect(results):
rh = [tuple(result[2][0][0]) for result in results]
lh = [tuple(result[2][0][1]) for result in results]
supports = [result[1] for result in results]
confidences = [result[2][0][2] for result in results]
lifts = [result[2][0][3] for result in results]
return list(zip(rh, lh, supports, confidences, lifts))
# Parameters
name_file = 'Market_Basket_Optimisation.csv'
# Import dataset
dataset = pd.read_csv(name_file, header = None)
transactions =[]
for index_list in range(0, len(dataset.values)):
transactions.append([dataset.values[index_list, index_product] for index_product in range(0, len(dataset.values[0,:])) if str(dataset.values[index_list, index_product]) != 'nan'])
"""
Apriori algorithm :
Step 1 : Set a minimum support and confidence
Step 2 : Take all the subsets in transactions having higher support than minimum support
Step 3: Take all the rules of theses subsets having higher confidence than minimum confidence
Step 4: Store the rule by decreasing lift
"""
# Training Apriori on the dataset
result = list(apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2))
# this command creates a data frame to view
resultDataFrame=pd.DataFrame(inspect(result),
columns=['rhs','lhs','support','confidence','lift'])
resultDataFrame = resultDataFrame.sort_values(by='lift', ascending = False)
"""
5 first rules :
whole wheat pasta, mineral water -> olive oil
milk, mineral water, frozen vegetables -> soup
fromage blanc -> honey
light cream -> chicken
pasta -> escalope
"""
| true |
15b1cffbf63593e22f94fe0f7d773df2b9221f88 | Python | isaiahltupal/OpenCVSamplesPython | /open_video/open_video_color.py | UTF-8 | 367 | 2.6875 | 3 | [] | no_license | # https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
import cv2
cap = cv2.VideoCapture('paper322-2019-11-29_07.26.37.mp4')
while(cap.isOpened()):
ret, frame = cap.read()
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| true |
dd85b32ddf9bd566b624c03f382cc5482c3aa5dd | Python | soonler/Python000-class01 | /Week_01/G20190282010059/top250/data_to_csv.py | UTF-8 | 309 | 2.953125 | 3 | [] | no_license | import csv
def data_to_csv(movies):
print(movies)
with open('movie_data.csv', 'w', encoding='utf_8_sig', newline='') as csv_file:
csv_writer = csv.DictWriter(csv_file, ['title', 'rating', 'comment_count', 'comment_top5'])
csv_writer.writeheader()
csv_writer.writerows(movies)
| true |
39a6158162cfd51a4188c9db59aa4b458140d4f0 | Python | alejsch/horno | /horno/datos/Configuraciones.py | UTF-8 | 2,178 | 2.875 | 3 | [
"MIT"
] | permissive | from configparser import RawConfigParser
from horno.utiles.IO import IOEscritor
#=============================================================================================
class ConfigManager ():
#------------------------------------------------------------------------------------------
def __init__(self):
self._parser = RawConfigParser()
self._archivo = None
#------------------------------------------------------------------------------------------
def CargarCFG(self, archivo, append=False):
self._archivo = archivo
if not append:
self._parser = RawConfigParser()
if archivo.Existe():
self._parser.read(archivo.Ruta())
#------------------------------------------------------------------------------------------
def GuardarCFG(self, archivo=None):
if archivo is None: archivo = self._archivo
archivo.CarpetaPadre().Crear()
with IOEscritor(archivo).Abrir(False, binario=False) as iow:
self._parser.write(iow.Stream())
#------------------------------------------------------------------------------------------
def Obtener(self, seccion, clave, valor_default=None):
valor = self._parser.get(seccion, clave) if self._parser.has_section(seccion) and self._parser.has_option(seccion, clave) else ''
return valor if valor else valor_default
#------------------------------------------------------------------------------------------
def ObtenerTodos(self, seccion):
return self._parser.items(seccion) if self._parser.has_section(seccion) else []
#------------------------------------------------------------------------------------------
def AgregarSeccion(self, seccion):
if not self._parser.has_section(seccion):
self._parser.add_section(seccion)
#------------------------------------------------------------------------------------------
def Setear(self, seccion, clave, valor):
self.AgregarSeccion(seccion)
self._parser.set(seccion, clave, valor)
| true |
cca5ea6b6e0e341b8491f737b2a0d3ba6f151be4 | Python | CNoctis/python-testing | /notifier.py | UTF-8 | 493 | 2.625 | 3 | [] | no_license | from users import UserRepository
from mailer import Mailer
import sys
class Notifier:
def __init__(self,user_repository=UserRepository(),mailer=Mailer()):
self.user_repository=user_repository
self.mailer=mailer
def notify(self,message,usernames):
for username in usernames:
user=self.user_repository.get_user(username)
mail=user['mail']
self.mailer.send_mail(mail,message,message)
if __name__ == "__main__":
notifier=Notifier()
notifier.notify (sys.argv[1],sys.argv[1:])
| true |
142ca30de02f28549665aca4bf3ee66948e7f1f1 | Python | mgh3326/sw_expert_academy_algorithm | /모의 SW 역량테스트/1949. [모의 SW 역량테스트] 등산로 조성/main.py | UTF-8 | 2,904 | 2.859375 | 3 | [] | no_license | dx = [1, 0, -1, 0] # Right, down, left, up
dy = [0, 1, 0, -1]
is_cutting = False
def check_map(dir_idx: int, _peak: list):
global map_list
global result
global max_result
global is_cutting
nx, ny = dx[dir_idx], dy[dir_idx]
peak_value = map_list[_peak[0]][_peak[1]][0]
origin_peak = _peak.copy()
_peak[0] = _peak[0] + ny
_peak[1] = _peak[1] + nx
if 0 <= _peak[1] < n and 0 <= _peak[0] < n:
if map_list[_peak[0]][_peak[1]][1] == False:
if peak_value > map_list[_peak[0]][_peak[1]][0]:
map_list[_peak[0]][_peak[1]][1] = True
result += 1
if max_result < result:
max_result = result
return True
else:
if is_cutting == False:
var_value = map_list[_peak[0]][_peak[1]][0] - peak_value
for cutting_value in range(var_value + 1, k + 1):
is_cutting = True
map_list[_peak[0]][_peak[1]][0] -= cutting_value
map_list[_peak[0]][_peak[1]][1] = True
result += 1
if max_result < result:
max_result = result
dfs(_peak)
is_cutting = False
map_list[_peak[0]][_peak[1]][0] += cutting_value
map_list[_peak[0]][_peak[1]][1] = False
result -= 1
def dfs(_peak: list):
global map_list
global result
global max_result
if not 0 <= _peak[1] < n:
return
if not 0 <= _peak[0] < n:
return
for _dir_idx in range(4):
return_temp = check_map(dir_idx=_dir_idx, _peak=_peak)
if return_temp is not None:
dfs(_peak)
map_list[_peak[0]][_peak[1]][1] = False
result -= 1
nx, ny = dx[_dir_idx], dy[_dir_idx]
_peak[0] = _peak[0] + ny * -1
_peak[1] = _peak[1] + nx * -1
test_case_num = int(input())
for test_case_index in range(test_case_num):
max_result = 1
n, k = map(int, input().split())
# 입력의 첫 번째 줄은 배열의 행 수입니다.
map_list = []
max_value = 0
peak_list = []
for i in range(n):
temp_list = list(map(int, input().split()))
temp_max_value = max(temp_list)
if temp_max_value > max_value:
max_value = temp_max_value
map_list.append(temp_list)
for i in range(n):
for j in range(n):
if map_list[i][j] == max_value:
peak_list.append([i, j])
map_list[i][j] = [map_list[i][j], False]
for peak in peak_list:
map_list[peak[0]][peak[1]][1] = True
result = 1
dfs(peak)
map_list[peak[0]][peak[1]][1] = False
print("#%d %d" % (test_case_index + 1, max_result))
| true |
59e5bc507dc352ba211739f204ee6ba68bc4c116 | Python | elenaborisova/Python-Fundamentals | /Final Exam Preparation/dictionary.py | UTF-8 | 519 | 3.296875 | 3 | [] | no_license | dictionary = {}
words_definitions = input().split(" | ")
for word_definition in words_definitions:
word, definition = word_definition.split(": ")
if word not in dictionary:
dictionary[word] = []
dictionary[word] += [definition]
words = input().split(" | ")
for word in words:
if word in dictionary:
print(word)
print(" -" + '\n -'.join(sorted(dictionary[word], key=lambda x: -len(x))))
command = input()
if command == "List":
print(' '.join(sorted(dictionary.keys())))
| true |
b038f6c98db12a47a8bf01f2484188ce4c446add | Python | NOTITLEUNTITLE/Baekjoon | /1002.py | UTF-8 | 353 | 3.328125 | 3 | [] | no_license | t = int(input())
for i in range(t):
x1,y1,r1,x2,y2,r2 = map(int, input().split())
distance = ((x1-x2) ** 2 + (y1-y2) ** 2) ** 0.5
if x1==x2 and y1==y2 and r1==r2:
print(-1)
elif abs(r1-r2) == distance or r1+r2 == distance:
print(1)
elif abs(r1-r2) < distance < (r1+r2):
print(2)
else:
print(0)
| true |
4132895dc4b4e1ff7d8cbd99bb17057165eba16f | Python | Baekyeongmin/spacing | /loss.py | UTF-8 | 647 | 2.703125 | 3 | [] | no_license | import torch.nn as nn
import torch
import numpy as np
class BCELossWithLength(nn.Module):
def __init__(self):
super(BCELossWithLength, self).__init__()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def forward(self, output, label, length):
assert output.size() == label.size()
#BCE
loss = -(label * torch.log(output) + (1 - label) * torch.log(1 - output))
mask = torch.zeros(output.size()).to(self.device)
for idx, l in enumerate(length):
mask[idx,:l] = 1
loss = loss * mask
return torch.sum(loss) / torch.sum(length)
| true |
3810f21e7e9e519d60fac7cdc025b15d917a7073 | Python | markmbaum/Richards | /scripts/plot_out.py | UTF-8 | 2,321 | 2.65625 | 3 | [
"MIT"
] | permissive | from os import listdir
from os.path import join, basename
from numpy import *
import matplotlib.pyplot as plt
#plt.rc('font', family='serif')
#plt.rc('text', usetex=True)
#-------------------------------------------------------------------------------
#INPUT
#directory with results in it
resdir = join('..', 'out')
#snapped variables to plot
snapvars = ['w', 'q', 'K', 'dpsidw']
#labels of snapped vars
snaplabs = [r'$\theta$', '$q$', '$K$', r'$\partial\psi/\partial\theta$']
#whether to plot grid variables
grid = 0
#-------------------------------------------------------------------------------
#FUNCTIONS
def isint(x):
try:
int(x)
except (TypeError, ValueError):
return(False)
else:
return(True)
readvar = lambda resdir, fn, dtype='float64': fromfile(join(resdir, fn), dtype=dtype)
def readsnaps(resdir, varname):
#get potential file names
fns = listdir(resdir)
#filter for the ones with the variable name
fns = [fn for fn in fns if (('_' + varname + '_' in fn) and isint(fn[-1]))]
#sort by the snap number
fns = sorted(fns, key=lambda fn: int(fn.split('_')[-1]))
#read the files
x = [readvar(resdir, fn) for fn in fns]
#put into a 2D array
x = stack(x).T
return(x)
#-------------------------------------------------------------------------------
#MAIN
#plot results
assert len(snapvars) == len(snaplabs), 'different number of snap variables and labels'
n = len(snapvars)
if grid:
fig, axs = plt.subplots(1,n+1)
else:
fig, axs = plt.subplots(1,n)
ze = readvar(resdir, 'ze')
zc = readvar(resdir, 'zc')
tsnap = readvar(resdir, 'richards_tsnap', dtype='float32')
S = {v:readsnaps(resdir, v) for v in snapvars}
L = dict(zip(snapvars, snaplabs))
for i,v in enumerate(S):
s = S[v]
if s.shape[0] == len(zc):
z = zc
elif s.shape[0] == len(ze):
z = ze
else:
raise(ValueError)
for j in range(s.shape[1]):
axs[i].plot(s[:,j], -z, label='%g hr' % (tsnap[j]/3600))
axs[i].invert_yaxis()
axs[i].set_xlabel(L[v])
axs[0].set_ylabel('Depth (m)')
axs[-1].legend()
#plot the grid spacing if desired
if grid:
ze = readvar(resdir, 'ze')
axs[-1].plot(diff(ze), -ze[:-1])
axs[-1].invert_yaxis()
axs[-1].set_xlabel('Cell Depth')
fig.tight_layout()
plt.show()
| true |
651410019dd9abd3c08f3b3259584b1b55a1ac2f | Python | sbibhas2008/stomble_assignment | /stomble_assignment/src/controllers/location_controller.py | UTF-8 | 2,507 | 3 | 3 | [] | no_license | import mongoengine
from stomble_assignment.src.models.spaceship_model import Spaceship
from stomble_assignment.src.models.location_model import Location
def get_all_locations():
locations = Location.objects()
formatted_locations = []
for location in locations:
location_obj = {
'id': str(location.id),
'city_name': location.city_name,
'planet_name': location.planet_name,
'spaceport_capacity': location.spaceport_capacity,
'spaceships': list(map(lambda x : str(x.id), location.spaceships))
}
formatted_locations.append(location_obj)
return formatted_locations
def add_new_location(city_name, planet_name, spaceport_capacity):
new_location = Location()
new_location.planet_name = planet_name
new_location.city_name = city_name
new_location.spaceport_capacity = spaceport_capacity
new_location.save()
return new_location
'''
Functions requiring id
'''
def get_location_by_id(id):
location = None
try:
location = Location.objects().get(id=str(id))
except:
return None
return {
'id': str(location.id),
'city_name': location.city_name,
'planet_name': location.planet_name,
'spaceport_capacity': location.spaceport_capacity,
'spaceships': list(map(lambda x : str(x.id), location.spaceships))
}
def delete_location_by_id(id):
Location.objects().get(id=str(id)).delete()
def get_location_ref_by_id(location_id):
location = Location.objects().get(id=str(location_id))
return location
def check_location_capacity_by_id(location_id):
location = Location.objects().get(id=str(location_id))
if len(location.spaceships) < location.spaceport_capacity:
return True
return False
def add_spaceship_to_location(location_id, spaceship_id):
location = Location.objects().get(id=str(location_id))
all_spaceships = [spaceship for spaceship in location.spaceships]
all_spaceships.append(spaceship_id)
location.update(spaceships=all_spaceships)
def remove_spaceship(location_id, spaceship_id):
location = Location.objects().get(id=str(location_id))
new_spaceships = [spaceship for spaceship in location.spaceships if str(spaceship.id) != str(spaceship_id)]
location.update(spaceships=new_spaceships)
def location_has_spaceships(id):
location = Location.objects().get(id=str(id))
if len(location.spaceships) > 0:
return True
return False | true |
45d55c0d7698d287f1a8299092733706fe6a7692 | Python | belapyc/colab_access | /auto_encoder.py | UTF-8 | 4,750 | 3.203125 | 3 | [] | no_license | import keras
from keras.layers import Input, Dense, Dropout
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM
from keras.models import Model, Sequential
def AE_train(dataframe, hidden_layer_size, epochs_amount, batch_amount, show_progress):
"""
Creating and training single-layer autoencoder.
Using Keras API for tensorflow environment.
:dataframe: which dataframe to use for training a single-layer autoencoder
:hidden_layer_size: amount of units in the hidden layer
:epochs_amount: amount of epochs
:batch_amount: batch size (16, 32, 64, 128, 256)
:show_progress: 1 - to show epochs, 0 - to ignore epochs
:Example:
>>> encoder, decoder = AE_train(df, 10, 100, 256, 1)
Author: Nikita Vasilenko
"""
input_layer_size = dataframe.shape[1]
# Creating input layer of the neural network
input_sample = Input(shape=(input_layer_size,))
# Second (hidden) layer of the neural network with ReLU activation function
encoded = Dense(hidden_layer_size, activation='relu')(input_sample)
# Third (output) layer of the neural network with Logistic Sigmoid activation function
decoded = Dense(input_layer_size, activation='sigmoid')(encoded)
# Initialising a model, mapping from input layer to the output layer (input -> reconstruction)
autoencoder = Model(input_sample, decoded)
# Splitting the model into two parts. Encoder - first two layers of the neural network
encoder = Model(input_sample, encoded)
# Creating an additional tensor (layer), effectively representing the encoded input (middle layer)
encoded_input = Input(shape=(hidden_layer_size,))
# Reconstructing decoder layer
decoder_layer = autoencoder.layers[-1](encoded_input)
# Create the decoder model
decoder = Model(encoded_input, decoder_layer)
# Compiling autoencoder model
autoencoder.compile(optimizer='Adadelta', loss='binary_crossentropy')
print('... [training in process] ...')
# Training autoencoder
autoencoder.fit(dataframe, dataframe,
epochs=epochs_amount,
batch_size=batch_amount,
shuffle=True,
verbose = show_progress)
# Computing the training error (RMSE)
hidden_layer = encoder.predict(dataframe)
reconstruction_layer = decoder.predict(hidden_layer)
#print("Training RMSE: ", find_mean_rmse(dataframe.as_matrix(),reconstruction_layer))
return encoder, decoder
def AE_predict(encoder, decoder, df):
"""
Given a trained mode, fit the data to the model and get output from the hidden layer
:encoder: trained Encoder model
:decoder: trained Decoder model
:df: data to fit to the model
:Example:
>>> features = AE_predict(encoder, decoder, df)
Author: Nikita Vasilenko
"""
hidden_layer = encoder.predict(df)
# The reconstruction layer is cast out (we dont need it anymore)
reconstruction_layer = decoder.predict(hidden_layer)
return hidden_layer
def SAE_train(dataframe, hidden_layer_size, epochs_amount, batch_amount, depth, show_progress):
"""
Train a series (stack) of single-layer autoencoders
:dataframe: which dataframe to use for training a single-layer autoencoder
:hidden_layer_size: amount of units in the hidden layer
:epochs_amount: amount of epochs
:batch_amount: batch size (16, 32, 64, 128, 256)
:show_progress: 1 - to show epochs, 0 - to ignore epochs
:Example:
>>> encoder, decoder = SAE_train(df, 10, 100, 256, 4, 1)
Author: Nikita Vasilenko
"""
encoders = []
decoders = []
print('Training AutoEncoder #1')
encoder, decoder = AE_train(dataframe, hidden_layer_size, epochs_amount, batch_amount, show_progress)
hidden_layer = AE_predict(encoder, decoder, dataframe)
encoders.append(encoder)
decoders.append(decoder)
for i in range(0, depth - 1):
print('Training AutoEncoder #', (i + 2))
encoder, decoder = AE_train(pd.DataFrame(hidden_layer), hidden_layer_size, epochs_amount, batch_amount, show_progress)
hidden_layer = AE_predict(encoder, decoder, hidden_layer)
encoders.append(encoder)
decoders.append(decoder)
return encoders, decoders
def SAE_predict(encoders, decoders, dataframe):
"""
Fit data to a trained stacked autoencoder
:encoders: a LIST of trained encoders
:decoders: a LIST of trained decoders
:dataframe: data to fit to the model
:Example:
>>> features = SAE_predict(encoders, decoders, df)
Author: Nikita Vasilenko
"""
hidden_layer = AE_predict(encoders[0], decoders[0], dataframe)
for i in range(1, len(encoders)):
hidden_layer = AE_predict(encoders[i], decoders[i], hidden_layer)
return hidden_layer
| true |
789edc703f66aadee21452e1a89bcd90c7670f68 | Python | sharonzhou/mab-collab | /model-agreement/collaborative_model.py | UTF-8 | 7,728 | 2.90625 | 3 | [] | no_license | import numpy as np
import random, math
"""
Collaborative Model
"""
class CollaborativeModel:
def __init__(self, n_arms=4, T=15, alpha=.65, beta=1.05, my_observability=1, partner_observability=1):
# Time horizon and time step
self.T = T
self.t = 1
# Arms and historical reward rate
self.n_arms = n_arms
self.chosen_count = np.zeros((n_arms))
self.success_count = np.zeros((n_arms))
# Observability and observability counts
self.my_observability = my_observability
self.partner_observability = partner_observability
self.reward_observability_count = np.zeros((n_arms))
self.reward_observability_partner_count = np.zeros((n_arms))
self.reward_observability_both_count = np.zeros((n_arms))
# My belief
self.prior_alpha = alpha
self.prior_beta = beta
self.prior = np.array([self._discretize_beta_pdf(self.prior_alpha, self.prior_beta) for _ in range(n_arms)])
self.q = np.copy(self.prior)
# Model of partner's belief
self.A_partner = np.zeros((n_arms, T + 1))
# Real decision and hypothetical decisions made if purely exploiting,
# purely exploring for own info gain, purely exploring for partner's info gain (for analysis)
self.decisions = None
self.decision = None
self.decision_exploit = None
self.decision_information_gain = None
self.decision_information_gain_partner = None
# Discretizes Beta dist into its pdf with support [0,1], normalized to integrate to 1
def _discretize_beta_pdf(self, alpha, beta):
x = [i / 100. for i in range(101)]
pdf = [0 if i == 0. or i == 1. else i**(alpha - 1.) * (1. - i)**(beta - 1.) for i in x]
pdf = [i / sum(pdf) for i in pdf]
return pdf
# Performs n choose x
def _n_choose_x(self, n, x):
f = math.factorial
return f(n) / f(x) / f(n-x)
# Updates my belief (retains original copy for calculating hypothetical updates)
# my_hypothetical_observability: value of observability, varying for hypothetical updates on non-control conditions only
def _update_my_belief(self, belief, k, r, prior, my_hypothetical_observability=1):
q = np.copy(belief)
pr_observation = np.ones((self.n_arms, 101))
pr_observation[k] = [i / 100. if r else 1 - i / 100. for i in range(101)]
q = pr_observation * prior * my_hypothetical_observability
q = (q.T / np.sum(q, axis=1)).T
return q
# Updates an estimated model of partner's belief (retains original copy for calculating hypothetical updates)
def _update_partner_belief(self, A_partner, success_count, reward_observability_count, reward_observability_partner_count):
# Copy of matrix of partner's belief
A_partner = np.copy(A_partner)
thetas = np.zeros((self.n_arms))
if 0 not in reward_observability_count:
thetas = np.true_divide(success_count, reward_observability_count)
for k in range(self.n_arms):
for i in range(int(reward_observability_partner_count[k])):
pdf_binomial = self._n_choose_x(reward_observability_partner_count[k], i) * thetas[k]**i \
* (1 - thetas[k])**(reward_observability_partner_count[k] - i)
A_partner[k, i] = sum(self.q[k] * pdf_binomial)
return A_partner
# Estimates hypothetical future model of partner's belief
def _update_hypothetical_partner_belief(self, k, A_partner_k, success_count_k, reward_observability_count_k, reward_observability_partner_count_k, partner_hypothetical_observability):
A_partner_k = np.copy(A_partner_k)
theta_k = 0
if reward_observability_count_k != 0:
theta_k = float(success_count_k) / float(reward_observability_count_k)
for i in range(int(reward_observability_partner_count_k)):
pdf_binomial = self._n_choose_x(reward_observability_partner_count_k, i) * theta_k**i \
* (1 - theta_k)**(reward_observability_partner_count_k - i)
A_partner_k[i] = sum(self.q[k] * pdf_binomial)
return A_partner_k * partner_hypothetical_observability
# Observe reward r on arm k, and update belief
def observe(self, k, r, partner_observed):
self.chosen_count[k] += 1
if partner_observed:
self.reward_observability_partner_count[k] += 1
# If you can observe reward
if r is not None:
self.reward_observability_count[k] += 1
if r == 1:
self.success_count[k] += 1
if partner_observed:
self.reward_observability_both_count[k] += 1
self.q = self._update_my_belief(self.q, k, r, self.prior)
# Update model of partner's belief
self.A_partner = self._update_partner_belief(self.A_partner, self.success_count, self.reward_observability_count, self.reward_observability_partner_count)
self.t += 1
def choose(self):
# Information gain
information_gain = np.zeros((self.n_arms))
information_gain_partner = np.zeros((self.n_arms))
for k in range(self.n_arms):
# Expected reward rates
expectation = sum([self.q[k, i] * i / 100. for i in range(101)])
expectation_partner = 0
if self.reward_observability_partner_count[k] != 0:
expectation_partner = sum(self.A_partner[k]) / self.reward_observability_partner_count[k]
# Hypothetical success
q_success = self._update_my_belief(self.q, k, 1, self.prior, my_hypothetical_observability=self.my_observability)
# Both agents have a probability of observability, here added to the hypothetical counts as probabilities
A_success_partner = self._update_hypothetical_partner_belief(k, self.A_partner[k], self.success_count[k] + 1, self.reward_observability_count[k] + 1, self.reward_observability_partner_count[k] + 1, self.partner_observability)
# Hypothetical failure
q_failure = self._update_my_belief(self.q, k, 0, self.prior, my_hypothetical_observability=self.my_observability)
# Again: Both agents have a probability of observability, here added to the hypothetical counts as probabilities
A_failure_partner = self._update_hypothetical_partner_belief(k, self.A_partner[k], self.success_count[k], self.reward_observability_count[k] + 1, self.reward_observability_partner_count[k] + 1, self.partner_observability)
# Max exploitative value
max_expected_success = np.max([sum([q_success[kk, i] * i / 100. for i in range(101)]) \
for kk in range(self.n_arms)])
max_expected_failure = np.max([sum([q_failure[kk, i] * i / 100. for i in range(101)]) \
for kk in range(self.n_arms)])
information_gain[k] = np.max(max_expected_success * expectation + max_expected_failure * (1 - expectation))
max_expected_success_partner = np.array(list(np.max(A_success_partner[kk] for kk in range(self.n_arms))))
max_expected_failure_partner = np.array(list(np.max(A_failure_partner[kk] for kk in range(self.n_arms))))
information_gain_partner[k] = np.max(max_expected_success_partner * expectation_partner \
+ max_expected_failure_partner * (1 - expectation_partner))
# Take max of expected value now + my info gain + partner's info gain over all arms
decisions = np.array([sum([self.q[k, i] * i / 100. for i in range(101)]) for k in range(self.n_arms)]) \
+ math.ceil((self.T - self.t - 1) / 2.) * information_gain \
+ math.floor((self.T - self.t - 1) / 2.) * information_gain_partner
# This term here represents partner's immediate/exploitative value - tried for model agreement, but no better (slightly worse)
# + np.sum(self.A_partner, axis=1) / self.A_partner.shape[1]
self.decisions = decisions
self.decision = np.argmax(decisions)
# Decisions if exploiting, purely gaining info for self, and purely gaining info for partner (for analysis)
self.decision_exploit = np.argmax(np.array([sum([self.q[k, i] * i / 100. for i in range(101)]) for k in range(self.n_arms)]))
self.decision_information_gain = np.argmax(information_gain)
self.decision_information_gain_partner = np.argmax(information_gain_partner)
# Choose next arm
return self.decision
| true |
69a658656376e925f05d84de9bc77ffbf5bbb57c | Python | Aasthaengg/IBMdataset | /Python_codes/p03672/s096682167.py | UTF-8 | 204 | 3.5625 | 4 | [] | no_license | s = input()
len_s = len(s)
is_ok = False
while(is_ok == False):
len_s -= 2
word = s[:len_s]
if word[:len_s // 2] == word[len_s // 2 :]:
is_ok = True
else:
pass
print(len_s) | true |
6ab9a6df745e3419b086a41a4160b2bcd58973a4 | Python | USPA-Technology/cnn_age_gender | /predict.py | UTF-8 | 1,090 | 2.625 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
import cv2
model_path = "./model.h5"
model = load_model(model_path)
output_path = ""
img_path = ""
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
pic = cv2.imread(img_path)
gray = cv2.cvtColor(pic,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray,1.3,5)
age_ = []
gender_ = []
for (x,y,w,h) in faces:
img = gray[y-50:y+40+h,x-10:x+10+w]
img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
img = cv2.resize(img,(200,200))
predict = model.predict(np.array(img).reshape(-1,200,200,3))
age_.append(predict[0])
gender_.append(np.argmax(predict[1]))
gend = np.argmax(predict[1])
if gend == 0:
gend = 'Man'
col = (255,0,0)
else:
gend = 'Woman'
col = (203,12,255)
cv2.rectangle(pic,(x,y),(x+w,y+h),(0,225,0),4)
cv2.putText(pic,"Age : "+str(int(predict[0]))+" / "+str(gend),(x,y),cv2.FONT_HERSHEY_SIMPLEX,w*0.005,col,4)
pic1 = cv2.cvtColor(pic,cv2.COLOR_BGR2RGB)
plt.imshow(pic1)
plt.show()
print(age_,gender_)
cv2.imwrite(output_path,pic)
| true |
4536883b17e1babbd342a3d635ffac63eead0df7 | Python | stackless-dev/stackless-testsuite | /stackless_testsuite/v3_1/channel/test_channel.py | UTF-8 | 10,513 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 by Anselm Kruis
# Copyright (c) 2013 by Kristjan Valur Jónsson
# Copyright (c) 2012 by Richard Tew
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, print_function, division
import unittest
import stackless
try:
import threading
withThreads = True
except ImportError:
withThreads = False
import sys
import traceback
import contextlib
from stackless_testsuite.util import StacklessTestCase, require_one_thread
@contextlib.contextmanager
def block_trap(trap=True):
"""
A context manager to temporarily set the block trap state of the
current tasklet. Defaults to setting it to True
"""
c = stackless.getcurrent()
old = c.block_trap
c.block_trap = trap
try:
yield
finally:
c.block_trap = old
class TestChannels(StacklessTestCase):
def testBlockingSend(self):
''' Test that when a tasklet sends to a channel without waiting receivers, the tasklet is blocked. '''
# Function to block when run in a tasklet.
def f(testChannel):
testChannel.send(1)
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
# The tasklet should be blocked.
self.assertTrue(
tasklet.blocked, "The tasklet should have been run and have blocked on the channel waiting for a corresponding receiver")
# The channel should have a balance indicating one blocked sender.
self.assertTrue(
channel.balance == 1, "The channel balance should indicate one blocked sender waiting for a corresponding receiver")
def testBlockingReceive(self):
''' Test that when a tasklet receives from a channel without waiting senders, the tasklet is blocked. '''
# Function to block when run in a tasklet.
def f(testChannel):
testChannel.receive()
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
# The tasklet should be blocked.
self.assertTrue(
tasklet.blocked, "The tasklet should have been run and have blocked on the channel waiting for a corresponding sender")
# The channel should have a balance indicating one blocked sender.
self.assertEqual(
channel.balance, -1, "The channel balance should indicate one blocked receiver waiting for a corresponding sender")
def testNonBlockingSend(self):
''' Test that when there is a waiting receiver, we can send without blocking with normal channel behaviour. '''
originalValue = 1
receivedValues = []
# Function to block when run in a tasklet.
def f(testChannel):
receivedValues.append(testChannel.receive())
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
# Make sure that the current tasklet cannot block when it tries to receive. We do not want
# to exit this test having clobbered the block trapping value, so we make sure we restore
# it.
oldBlockTrap = stackless.getcurrent().block_trap
try:
stackless.getcurrent().block_trap = True
channel.send(originalValue)
finally:
stackless.getcurrent().block_trap = oldBlockTrap
self.assertTrue(len(receivedValues) == 1 and receivedValues[
0] == originalValue, "We sent a value, but it was not the one we received. Completely unexpected.")
def testNonBlockingReceive(self):
''' Test that when there is a waiting sender, we can receive without blocking with normal channel behaviour. '''
originalValue = 1
# Function to block when run in a tasklet.
def f(testChannel, valueToSend):
testChannel.send(valueToSend)
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel, originalValue)
tasklet.run()
# Make sure that the current tasklet cannot block when it tries to receive. We do not want
# to exit this test having clobbered the block trapping value, so we make sure we restore
# it.
oldBlockTrap = stackless.getcurrent().block_trap
try:
stackless.getcurrent().block_trap = True
value = channel.receive()
finally:
stackless.getcurrent().block_trap = oldBlockTrap
tasklet.kill()
self.assertEqual(
value, originalValue, "We received a value, but it was not the one we sent. Completely unexpected.")
@require_one_thread
def testMainTaskletBlockingWithoutASender(self):
''' Test that the last runnable tasklet cannot be blocked on a channel. '''
self.assertEqual(stackless.getruncount(
), 1, "Leakage from other tests, with tasklets still in the scheduler.")
c = stackless.channel()
self.assertRaises(RuntimeError, c.receive)
@unittest.skipUnless(withThreads, "Compiled without threading")
def testInterthreadCommunication(self):
''' Test that tasklets in different threads sending over channels to each other work. '''
self.assertEqual(stackless.getruncount(
), 1, "Leakage from other tests, with tasklets still in the scheduler.")
commandChannel = stackless.channel()
def master_func():
commandChannel.send("ECHO 1")
commandChannel.send("ECHO 2")
commandChannel.send("ECHO 3")
commandChannel.send("QUIT")
def slave_func():
while 1:
command = commandChannel.receive()
if command == "QUIT":
break
def scheduler_run(tasklet_func):
t = stackless.tasklet(tasklet_func)()
while t.alive:
stackless.run()
thread = threading.Thread(target=scheduler_run, args=(master_func,))
thread.start()
scheduler_run(slave_func)
def testSendException(self):
# Function to send the exception
def f(testChannel):
testChannel.send_exception(ValueError, 1, 2, 3)
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
self.assertRaises(ValueError, channel.receive)
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
try:
channel.receive()
except ValueError as e:
self.assertEqual(e.args, (1, 2, 3))
def testSendThrow(self):
# subfunction in tasklet
def bar():
raise ValueError(1, 2, 3)
# Function to send the exception
def f(testChannel):
try:
bar()
except Exception:
testChannel.send_throw(*sys.exc_info())
# Get the tasklet blocked on the channel.
channel = stackless.channel()
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
self.assertRaises(ValueError, channel.receive)
tasklet = stackless.tasklet(f)(channel)
tasklet.run()
try:
channel.receive()
except ValueError:
exc, val, tb = sys.exc_info() # @UnusedVariable
self.assertEqual(val.args, (1, 2, 3))
# Check that the traceback is correct
l = traceback.extract_tb(tb)
self.assertEqual(l[-1][2], "bar")
def testBlockTrapSend(self):
'''Test that block trapping works when receiving'''
channel = stackless.channel()
count = [0]
def f():
with block_trap():
self.assertRaises(RuntimeError, channel.send, None)
count[0] += 1
# Test on main tasklet and on worker
f()
stackless.tasklet(f)()
stackless.run()
self.assertEqual(count[0], 2)
def testBlockTrapRecv(self):
'''Test that block trapping works when receiving'''
channel = stackless.channel()
count = [0]
def f():
with block_trap():
self.assertRaises(RuntimeError, channel.receive)
count[0] += 1
f()
stackless.tasklet(f)()
stackless.run()
self.assertEqual(count[0], 2)
class TestClose(StacklessTestCase):
"""Test using close semantics with channels"""
def setUp(self):
super(TestClose, self).setUp()
self.c = stackless.channel()
def testSequence(self):
def sender():
self.c.send_sequence(range(10))
self.c.send_throw(StopIteration)
self.c.close()
data = []
def receiver():
for i in self.c:
data.append(i)
data.append(10)
stackless.tasklet(sender)()
stackless.tasklet(receiver)()
stackless.run()
self.assertEqual(data, list(range(11)))
self.assertTrue(self.c.closed)
def testSender(self):
self.c.close()
self.assertRaises(ValueError, self.c.send, None)
def testReceiver(self):
self.c.close()
self.assertRaises(ValueError, self.c.receive)
def testIterator(self):
self.c.close()
i = iter(self.c)
def n():
return next(i)
self.assertRaises(StopIteration, n)
class Subclassing(StacklessTestCase):
def test_init(self):
"""Test that we can subclass channel without overriding __new__"""
class myclass(stackless.channel):
def __init__(self, name):
super(myclass, self).__init__()
self.name = name
name = "bong"
c = myclass(name)
self.assertEqual(c.name, name)
| true |
6235fc878fa4369ff53933e83414898f5729c53b | Python | paulipotter/sasa-weather-tool | /main.py | UTF-8 | 1,413 | 2.5625 | 3 | [] | no_license | import psycopg2
import csv
#from datetime import datetime
#from pytz import timezone
from format import *
from constants import *
# Connect to PostgreSQL Database
conn = psycopg2.connect("dbname=template1 user=postgres")
cur = conn.cursor()
print("connected to db")
# Open CSV File
with open(FILE_NAME) as csv_file:
rw = csv.reader(csv_file)
# Ignore first line of CSV File (Header)
next(csv_file)
#For every row in the CSV
for row in rw:
#Create a new list that discards the first 4 columns and reformats the date
data = list(format_list(row))
#Insert the new list to the corresponding table
cur.execute( """ INSERT INTO testone
(yrmodahrmn,TEMP,MIN,MAX,DEWP,
DIR,SPD,GUS,PCP01,PCPXX,
PCP06,PCP24,SD,SKC,CLG,
L,M,H,SLP,STP,
ALT,VSB)
VALUES
(%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
%s, %s)""", data)
# Set Zeroes to NULL
for item in COLUMNS:
query = UPDATE_NULL_IF_ZERO.format(item)
cur.execute(query)
# Make the changes to the database persistent
conn.commit()
# Close communication with the database
cur.close()
conn.close()
| true |
963241b1675f250841dc6415d98568c3ef982402 | Python | pstreff/AdventOfCode | /day2/part1/day2.py | UTF-8 | 1,230 | 3.109375 | 3 | [] | no_license |
def main():
reader = open('input.txt', 'r')
input_list = list(map(int, reader.read().split(',')))
pointer = 0
restore_1202_program_alarm_state(input_list)
print(input_list)
while input_list[pointer] != 99:
opcode, first_input, output_position, second_input = get_instructions(input_list, pointer)
if opcode == 1:
output = first_input + second_input
elif opcode == 2:
output = first_input * second_input
elif opcode == 3:
print('Opcode 3 halting!')
exit(3)
elif opcode == 99:
print('Opcode 99 halting!')
print(input_list)
exit(99)
input_list[output_position] = output
pointer += 4
print(input_list)
print(input_list[0])
def get_instructions(input_list, pointer):
opcode = input_list[pointer]
output_position = input_list[pointer + 3]
first_input = input_list[input_list[pointer + 1]]
second_input = input_list[input_list[pointer + 2]]
return opcode, first_input, output_position, second_input
def restore_1202_program_alarm_state(input_list):
input_list[1] = 12
input_list[2] = 2
if __name__ == '__main__':
main()
| true |
9672e0c37d1262395b046daa315af71f42de0b83 | Python | egaudrain/PICKA | /Install/manage_picka.py | UTF-8 | 5,869 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# Some functions to install and manage the PICKA source code. These are called
# from Matlab's setup_nl.m and setup_gb.m. In fact, the source code is included
# in base64 form into the Matlab file itself thanks to make_setup.py.
#--------------------------------------------------------------------------
# Etienne Gaudrain <etienne.gaudrain@cnrs.fr> - 2018-05-02
# CNRS UMR 5292, FR | University of Groningen, UMCG, NL
#--------------------------------------------------------------------------
import shutil, os, sys, zipfile, time, socket, fnmatch
# If we want to copy to a zip file
class archive_zip:
def __init__(self, zip_filename, path_mangle):
self._file = zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED)
self.path_mangle = path_mangle # We will mangle that when storing files
def add(self, filename):
self._file.write(filename, filename.replace(self.path_mangle, '', 1))
def close(self):
self._file.close()
# If we want to copy to a folder
class archive_folder:
def __init__(self, foldername, path_mangle):
self.foldername = foldername
self.path_mangle = path_mangle
def add(self, filename):
dst = os.path.join(self.foldername, filename.replace(os.path.join(self.path_mangle,''), '', 1))
p, f = os.path.split(dst)
if not os.path.isdir(p):
os.makedirs(p)
shutil.copy2(filename, dst)
def close(self):
pass
def snapshot(install_dir, snapshot_dir, code_only=True, compress=True):
# install_dir is the path of the installation we want a snapshot of
# snapshot_dir is where we want the snapshot stored; this is a top directory, the exact target name will be generated
if compress:
zip_filename = os.path.join(snapshot_dir, "%s_%s.zip" % (time.strftime("%Y-%m-%d_%H%M%S"), socket.gethostname()))
dst = archive_zip(zip_filename, install_dir)
else:
foldername = os.path.join(snapshot_dir, "%s_%s" % (time.strftime("%Y-%m-%d_%H%M%S"), socket.gethostname()))
dst = archive_folder(foldername, install_dir)
if code_only:
copytree(install_dir, dst, ['*.m', '*.py'])
else:
copytree(install_dir, dst, ['*.m', '*.py', '*.wav', '*.png', '*.jpg', '*.md', '*.html'], ['tmp'])
def copytree(src, dst, patterns, exclude=[]):
# patterns are included
for (dirpath, dirnames, filenames) in os.walk(src):
do_this_dirpath = True
for p in dirpath.replace(dst.path_mangle, '').split(os.sep):
if p in exclude:
do_this_dirpath = False
break
if not do_this_dirpath:
continue
for f in filenames:
for p in patterns:
if fnmatch.fnmatch(f, p):
dst.add(os.path.join(dirpath, f))
break
def install(src, dst, lang):
log = []
errors = []
# If dst is not empty, we take a snapshot
if os.path.isdir(dst) and len(os.listdir(dst))>0:
log.append('The target directory "%s" already exists and is not empty, so we are taking a snapshot of it.' % dst)
snapshot_dir = os.path.join(src, 'Snapshots')
if not os.path.isdir(snapshot_dir):
os.makedirs(snapshot_dir)
try:
log.append('Taking a snapshot of "%s" to "%s"...' % (dst, snapshot_dir))
snapshot(dst, snapshot_dir, False, True)
log.append('Snapshot done.')
except Exception, e:
log.append("The snapshot of \"%s\" couldn't be taken...")
errors.append(e)
return log, errors
dsta = archive_folder(dst, src)
try:
log.append("Copying files from \"%s\" to \"%s\"..." % (src,dst))
copytree(src, dsta, ['*.m', '*.py', '*.wav', '*.png', '*.jpg', '*.md', '*.mex*'], ['tmp'])
log.append('The copy has succeeded.')
# Remove the language files that are not needed
except Exception,e:
log.append("An error occured during the copy.")
errors.append(e)
log_l, errors_l = localize(dst, lang)
log.extend(log_l)
errors.extend(errors_l)
return log, errors
#==============================================================================
def localize(dst, lang):
log = []
errors = []
try:
f = open(os.path.join(dst, 'Experiments', 'default_participant.m'), 'rb')
nf = []
for l in f:
if l.strip().startswith('participant.language = '):
nf.append(" participant.language = '%s';" % lang)
else:
nf.append(l)
f.close()
open(os.path.join(dst, 'Experiments', 'default_participant.m'), 'wb').write('\n'.join(nf))
except Exception,e:
log.append("An error occured during the copy.")
errors.append(e)
return log, errors
#==============================================================================
def main(argv):
# Test of the functions
# src = os.path.expanduser("~/Sources/tmp/test_picka_snapshots/src")
# dst = os.path.expanduser("~/Sources/tmp/test_picka_snapshots/snapshots")
# snapshot(src, dst, False, False)
if len(argv)<3:
print "You need to provide a command followed by two path names."
return 1
if argv[0] not in ['install', 'snapshot']:
print "The valid commands are 'install' and 'snapshot'."
return 2
if argv[0]=='install':
log, errors = install(argv[1], argv[2], argv[3])
print "\n".join(log)
if len(errors)>0:
print "---Errors:"
print "\n".join([str(e) for e in errors])
return 3
elif argv[0]=='snapshot':
snapshot(argv[1], argv[2])
return 0
#==============================================================================
if __name__=='__main__':
main(sys.argv[1:])
| true |
ba7112fff530ac58f03882904ae860fb4db128fc | Python | MoiMaity/Python | /Udemy SPaul/Session 8/intro_tuple.py | UTF-8 | 220 | 2.75 | 3 | [] | no_license | # Tuple in Python
#tpl = ()
major = ('Physics', 'Chemistry', 'Mathematics', 'Music', 'Comp Sc.')
#print(len(major))
#for m in major:
# print(m)
#print(major[-1::-2])
del major[0]
print(major)
| true |
d756a1cbcfbe85c572235d41908ba54866f56978 | Python | ALuesink/Project-Blok8 | /index.py | UTF-8 | 2,087 | 2.875 | 3 | [] | no_license | from flask import Flask, request, render_template
from werkzeug.contrib.cache import SimpleCache
from Bio import Entrez
from Entrez import findArticles, getAbstracts, tabel
from textmining import openbestand, tokenize, overeenkomst, toJson
app = Flask(__name__)
cache = SimpleCache()#dit is een cache om informatie tijdelijk in op te slaan
#Geeft de template index.html, de hoofdpagina, terug.
@app.route('/')
def webintro():
return render_template('./index.html')
#Deze functie geeft de template table.html met de tabel met alle resultaten terug
#hierbij wordt het zoekwoord opgehaald, om artikelen te vinden die dit zoekwoord bevatten.
#Hierbij wordt er gebruik gemaakt van een cache, in deze cache zit een dictionary met belangrijke keywords uit de artikelen.
#Als deze cache leeg, wordt de dictionary_text toegevoegd, die opgehaald is bij getAbstracts().
@app.route('/tabel', methods=["GET"])
def tabelweergeven():
zoekwoord = request.args.get("zoekwoord")
Entrez.email = "your.email@example.com"
webintro()
ID = findArticles(zoekwoord)
keys, abstracts, auteur, datum, titel, dictionary, dictionary_text = getAbstracts(ID) #haalt van elke artikel informatie op en voegt elk soort informatie(bijv. auteurs) in een aparte lijst toe.
dic = cache.get('dictionary')
if dic is None:
dic = dictionary_text
cache.set('dictionary',dic)
return render_template('table.html', zoekwoord=zoekwoord, tabel=tabel(ID, auteur, keys, datum, titel))
#Maakt een graph aan met overeenkomstige keywords uit de artikelen.
#Deze functie maakt gebruik van een cache, waarin een dictionary zit met alle keywords.
#Deze functie geeft de template graph.html terug met de graph.
@app.route('/graph')
def graph():
dictionary = cache.get('dictionary')
terrier = openbestand()
combos = tokenize(dictionary, terrier)
overeenkomsten = overeenkomst(combos)
graph_data, json_bestand= toJson(overeenkomsten)
return render_template('graph.html', data=graph_data, json=json_bestand)
app.secret_key = 'sajfldjslafjlsdajfl;sadjfl;sjaf'
if __name__ == '__main__':
app.run()
| true |
867f6ea93b97f431fddcc87e31c02a6468897a7d | Python | hellohoo/ALgorithm | /leetcode/python/53.最大子序和.py | UTF-8 | 334 | 2.875 | 3 | [] | no_license | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
n = [0 for i in range(len(nums))]
n[0] = nums[0]
for i in range(1,len(nums)):
n[i] = max(nums[i],n[i-1]+nums[i])
return max(n)
if __name__ == '__main__':
s = Solution()
nums = eval(input())
s.maxSubArray(nums) | true |
e4843bcdb2af841972a3dc52e801d24e0cf1d5fe | Python | akgeni/nlp_aug_lexical_wordnet | /text_classification_with_aug.py | UTF-8 | 2,922 | 2.890625 | 3 | [] | no_license | import re
import unidecode
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import cross_val_score
from bs4 import BeautifulSoup
import nltk
from nltk.tag import pos_tag
from nltk import sent_tokenize
from nltk.corpus import wordnet
def get_synonym_for_word(word):
"""returns the synonym given word if found, otherwise returns the same word"""
synonyms = []
for syn in wordnet.synsets(word):
for l in syn.lemmas():
synonyms.append(l.name())
synonyms = [syn for syn in synonyms if syn!=word]
if len(synonyms) == 0:
return word
else:
return synonyms[0]
def augment_sentence_wordnet(sentence, filters=['NN', 'JJ']):
"""Augments words in sentence which are filtered by pos tags"""
pos_sent = pos_tag(sentence.split())
new_sent = []
for word,tag in pos_sent:
if tag in filters:
new_sent.append(get_synonym_for_word(word))
else:
new_sent.append(word)
return " ".join(new_sent)
def augment_data(data, target):
"""Creates augmented data using wordnet synonym imputation."""
aug_data = []
aug_target = []
for row, t in zip(data, target):
aug_row = []
row_sents = sent_tokenize(row)
#print("row_sents", row_sents)
for line in row_sents:
line = augment_sentence_wordnet(line)
aug_row.append(line)
row_sents = " ".join(aug_row)
#print(row_sents)
aug_data.append(row)
aug_data.append(row_sents)
aug_target.append(t)
aug_target.append(t)
#print(len(aug_data))
return aug_data, aug_target
def clean_data(text):
soup = BeautifulSoup(text)
html_pattern = re.compile('<.*?>')
text = html_pattern.sub(r' ', soup.text)
text = unidecode.unidecode(text)
text = re.sub('[^A-Za-z0-9.]+', ' ', text)
text = text.lower()
return text
categories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']
twenty_train = fetch_20newsgroups(subset='train',
categories=categories,
shuffle=True,
remove=('headers', 'footers', 'quotes'),
random_state=42)
print(twenty_train.target_names)
data = twenty_train.data
data = [clean_data(txt) for txt in data]
target = twenty_train.target
aug_data, aug_target = augment_data(data, target)
count_vect = CountVectorizer()
X_train_aug_counts = count_vect.fit_transform(aug_data)
print(X_train_aug_counts.shape)
mnb_aug = MultinomialNB()
print("Mean Accuracy: {:.2}".format(cross_val_score(mnb_aug, X_train_aug_counts, aug_target, cv=5).mean()))
# I get 85% accuracy here, during my experiment.
| true |
abce4acc7081af750291898c86ec283874794ef6 | Python | VitoWang/DataMiningVito | /renrenScrawler_27/src/GetrenrenFriendList.py | UTF-8 | 1,651 | 2.5625 | 3 | [] | no_license | #! /bin/env python
# -*- coding: utf-8 -*-
__author__ = 'anonymous_ch'
import urllib,urllib2,cookielib,re
def login_func():
login_page = "http://www.renren.com/ajaxLogin/login"
data = {'email': '929431626@qq.com', 'password': 'wang4502'}
post_data = urllib.urlencode(data)
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
print u"登录人人网"
req = opener.open(login_page, post_data)
req = urllib2.urlopen("http://www.renren.com/home")
html = req.read()
uid = re.search("'ruid':'(\d+)'", html).group(1)#获取用户的uid"
print u"登陆成功"
return uid
def get_list(uid):
pagenum = 0
print u"开始解析好友列表"
while True:
page = "http://friend.renren.com/GetFriendList.do?curpage=" + str(pagenum) + "&id=" + str(uid)
res = urllib2.urlopen(page)
html = res.read()
pattern = '<a href="http://www\.renren\.com/profile\.do\?id=(\d+)"><img src="[\S]*" alt="[\S]*[\s]\((.*)\)" />'
m = re.findall(pattern, html)#查找目标
if len(m) == 0:
break#不存在
for i in range(0, len(m)):
userid = m[i][0]
uname = m[i][1]
try:
print u"账户:"+userid+u" 姓名:"+unicode(uname,'utf-8')
except:
print u"账户:"+userid+u" 姓名:",
print uname,
print " "
pagenum += 1
print u"好友列表分析完毕."
if __name__ =="__main__":
get_list(login_func()) | true |
365b598bb02f04db8603e3a1e5c30b61d1424d3d | Python | HigorSenna/python-study | /guppe/dir_e_help.py | UTF-8 | 453 | 3.71875 | 4 | [] | no_license | """
Utilitários python para auxiliar na programação
dir -> Apresenta todos atributos/propriedades e funções/métodos disponiveis
para determinado tipo de dado ou variável
dir(tipo de dado/variavel)
dir('texto')
Hhlp -> Apresenta a documentação/como utilizar os atributos/propriedades e funcções/metodos disponiveis
para determinato tipo de dado ou variavel
help(tipo de dado.propriedade)
help('texto'.lower)
num = 4
help(num.real)
""" | true |
fddcf8759b274b5da713d52759c9b4894e3013d5 | Python | JoachimIsaac/Interview-Preparation | /LinkedLists/23MergeKSortedLists.py | UTF-8 | 1,587 | 4.34375 | 4 | [] | no_license | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
"""
UMPIRE:
Understand:
--> So we are going to recieve an array of linked lists as our input?yes
--> Can we use extra space?
--> What do we return ? a sorted linked list that has all the values
--> what if the input is empty or it only has one list? return None or the list
Match:
--> we can use a heap(sorted list class)
--> then we can loop over it and create a linked list like that
Plan:
--> declare sortedcontainers class and import SortedList
--> iterate through the array and load all the values of each list into the array
--> create the list and then return it
[
1->4->5,
1->3->4,
2->6
]
[1,1,2,3,4,4,5,6]
Evaluate:
Time complexity: O(N * K) where N is the number of list and K is the number of values in each list
Space complexity O(K)
Still need to learn optimal solution with heap
"""
from sortedcontainers import SortedList
class Solution:
def mergeKLists(self, lists: List[ListNode]) -> ListNode:
if len(lists) == 0:
return None
if len(lists) == 1:
return lists[0]
numbers = SortedList([])
for curr_list in lists:
curr = curr_list
while curr:
numbers.add(curr.val)
curr = curr.next
dummy = ListNode(-1)
result = dummy
for number in numbers:
dummy.next = ListNode(number)
dummy = dummy.next
return result.next
| true |
4dbe099fb2ffd10e95ac56a0bde71adacc562812 | Python | sjpanchal/heli-shooter | /heli_shooter.py | UTF-8 | 12,902 | 2.859375 | 3 | [] | no_license | import pygame,time,random
pygame.init()
pygame.mixer.init()
Aqua =( 0, 255, 255)
Black= ( 0, 0, 0)
Blue =( 0, 0, 255)
Fuchsia= (255, 0, 255)
Gray= (128, 128, 128)
Green= ( 0, 128, 0)
Lime= ( 0, 255, 0)
Maroon= (128, 0, 0)
NavyBlue= ( 0, 0, 128)
Olive =(128, 128, 0)
Purple =(128, 0, 128)
Red= (255, 0, 0)
Silver =(192, 192, 192)
Teal =( 0, 128, 128)
White= (255, 255, 255)
Yellow =(255, 255, 0)
dw=500
dh=500
screen=pygame.display.set_mode([dw,dh])
pygame.display.set_caption(" Shooter")
clock=pygame.time.Clock()
bg=pygame.image.load("bg.png")
def msg(txt,size,color,x,y):
font=pygame.font.SysFont("comicsansms",size,bold=1)
txtsurf=font.render(txt,True,color)
txtrect=txtsurf.get_rect()
txtrect.x=x
txtrect.y=y
screen.blit(txtsurf,txtrect)
class Player(pygame.sprite.Sprite):
def __init__(self,x,y):
super().__init__()
self.i1=pygame.image.load("copter.png")
self.i1=pygame.transform.scale(self.i1,[60,50])
self.i2=pygame.image.load("copter1.png")
self.i2=pygame.transform.scale(self.i2,[60,50])
self.image=self.i1
self.rect=self.image.get_rect()
self.rect.x=x
self.rect.y=y
self.vx=0
self.vy=0
self.last_shot=pygame.time.get_ticks()
self.last=pygame.time.get_ticks()
self.shot_delay=1000
def shoot(self):
now=pygame.time.get_ticks()
keys=pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
if now-self.last_shot>self.shot_delay:
self.last_shot=now
bullet=Bullet(self.rect.centerx,self.rect.top,10,0)
all_sprites.add(bullet)
bullets.add(bullet)
if keys[pygame.K_LSHIFT]:
if now-self.last_shot>self.shot_delay:
self.last_shot=now
bullet=Bullet(self.rect.centerx,self.rect.bottom,0,10)
all_sprites.add(bullet)
bullets.add(bullet)
def update(self):
n=pygame.time.get_ticks()
if n-self.last>100:
self.last=n
self.image=self.i2
else:
self.image=self.i1
self.vx,self.vy=0,0
keys=pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
self.vx=-5
elif keys[pygame.K_RIGHT]:
self.vx=5
elif keys[pygame.K_UP]:
self.vy=-5
elif keys[pygame.K_DOWN]:
self.vy=5
self.rect.x+=self.vx
self.rect.y+=self.vy
if self.rect.right>=400:
self.rect.right=400
if self.rect.left<=0:
self.rect.left=0
if self.rect.bottom>=400:
self.rect.bottom=400
if self.rect.top<=0:
self.rect.top=0
class Bullet(pygame.sprite.Sprite):
def __init__(self,x,y,vx,vy):
super().__init__()
self.image=pygame.image.load("b.png")
self.rect=self.image.get_rect()
self.rect.x=x
self.rect.y=y
self.vx=vx
self.vy=vy
def update(self):
self.rect.x+=self.vx
self.rect.y+=self.vy
if self.rect.right>dw+10:
self.kill()
class Mob(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.i1=pygame.image.load("loon.png")
self.i1=pygame.transform.scale(self.i1,[30,50])
self.image=self.i1
self.rect=self.image.get_rect()
x=500
y=random.randrange(50,270)
self.rect.x=x
self.rect.y=y
self.shot_delay=500
self.last=pygame.time.get_ticks()
def update(self):
if self.rect.right<=0:
self.rect.x=500
self.rect.y=random.randrange(30,250)
self.vx=random.randrange(-6,-1)
self.rect.x+=-3
class Cloud(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.image=pygame.image.load("cloud.png")
self.image=pygame.transform.scale(self.image,(200,100))
self.rect=self.image.get_rect()
self.rect.x=500
self.rect.y=random.randrange(40,200)
self.vx=-1
def update(self):
self.rect.x+=self.vx
if self.rect.x<-200:
self.rect.x=500
class Ebullet(pygame.sprite.Sprite):
def __init__(self,x,y,vx,vy):
super().__init__()
self.image=pygame.image.load("b.png")
self.rect=self.image.get_rect()
self.rect.x=x
self.rect.y=y
self.vx=vx
self.vy=vy
def update(self):
if self.rect.right<0:
self.kill()
self.rect.x+=self.vx
self.rect.y+=self.vy
class Boat(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.image=pygame.image.load("boat.png")
self.image=pygame.transform.scale(self.image,[60,50])
self.rect=self.image.get_rect()
self.rect.x=500
self.rect.y=300
self.vx=0
self.vy=0
self.last_shot=pygame.time.get_ticks()
self.shot_delay=400
self.vx=-0.5
self.vy=0
def update(self):
self.rect.x+=self.vx
if self.rect.left<=0:
self.kill()
mobgen()
now=pygame.time.get_ticks()
if now-self.last_shot>600:
self.last_shot=now
ebullet=Ebullet(self.rect.centerx,self.rect.top,-10,-10)
all_sprites.add(ebullet)
ebullets.add(ebullet)
class Ship(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.image=pygame.image.load("ship.png")
self.image=pygame.transform.scale(self.image,(70,50))
self.rect=self.image.get_rect()
self.rect.x=600
self.rect.y=random.randrange(50,250)
self.vx=0
def update(self):
self.rect.x+=-10
if self.rect.x<-50:
self.kill()
mobgen()
class Ecopter(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.i1=pygame.image.load("ecopter.png")
self.i1=pygame.transform.scale(self.i1,[60,50])
self.i2=pygame.image.load("ecopter1.png")
self.i2=pygame.transform.scale(self.i2,[60,50])
self.image=self.i1
self.rect=self.image.get_rect()
self.rect.x=510
self.rect.y=random.randrange(50,250)
self.last=pygame.time.get_ticks()
self.lanim=pygame.time.get_ticks()
self.vx=0
self.vy=0
def shoot(self):
now=pygame.time.get_ticks()
if now-self.last>600:
self.last=now
ebullet=Ebullet(self.rect.centerx,self.rect.top,-10,0)
all_sprites.add(ebullet)
ebullets.add(ebullet)
def update(self):
n=pygame.time.get_ticks()
if n-self.lanim>100:
self.lanim=n
self.image=self.i2
else:
self.image=self.i1
if self.rect.x<=510 and self.rect.x>350:
self.vx=-2
self.rect.x+=self.vx
self.rect.y+=self.vy
self.shoot()
def drawlives(x,y,lives):
for a in range(lives):
i=pygame.image.load("copter.png")
i=pygame.transform.scale(i,(30,25))
irect=i.get_rect()
irect.x=x+40*a+10
irect.y=y
screen.blit(i,irect)
def newmob():
mob=Mob()
mobs.add(mob)
all_sprites.add(mob)
def newboat():
boat=Boat()
all_sprites.add(boat)
boats.add(boat)
def newecopter():
ecopter=Ecopter()
all_sprites.add(ecopter)
ecopters.add(ecopter)
def newship():
ship=Ship()
if ship.rect.x>500:
msg("!",100,Red,480,ship.rect.y)
pygame.display.update()
all_sprites.add(ship)
ships.add(ship)
def mobgen():
i=random.random()
if i>0.3 and i<0.5:
newship()
if i>0.6 and i<0.7:
newecopter()
if i>0.80 and i<0.85:
newship()
def start():
global hi_score,score
screen.blit(bg,(0,0))
i=pygame.image.load("cloud.png")
irect=i.get_rect()
irect.x=250
irect.y=250
msg("Heli Shooter",50,Red,100,150)
if score>hi_score:
hscore=open("highscore.txt","w")
hscore.write(str(score))
hscore.close()
msg("High Score:-"+str(hi_score),20,Red,170,50)
msg("Press Enter to Start",30,Red,100,300)
pygame.display.update()
wait=True
while wait:
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
if event.type==pygame.KEYDOWN:
if event.key==pygame.K_RETURN:
wait=0
def pause():
screen.blit(bg,[0,0])
msg("Paused",50,Red,170,220)
pygame.display.update()
wait=True
while wait:
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
if event.type==pygame.KEYDOWN:
if event.key==pygame.K_RETURN:
wait=False
elif event.key==pygame.K_ESCAPE:
pygame.quit()
quit()
def Score():
global intro,score,hscore
gover=True
if score>hi_score:
hscore=open("highscore.txt","w")
hscore.write(str(score))
hscore.close()
while gover:
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
if event.type==pygame.KEYDOWN:
if event.key==pygame.K_RETURN:
gover=False
elif event.key==pygame.K_ESCAPE:
pygame.quit()
quit()
msg("High Score :"+str(hi_score),25,Blue,160,100)
msg("Press Enter to Play Again ",25,Blue,100,175)
msg("Game Over",30,Red,180,250)
pygame.display.flip()
score=0
run=True
intro=True
over=False
hscore=open("highscore.txt","r")
hi_score=int(hscore.read())
while run:
hscore=open("highscore.txt","r")
hi_score=int(hscore.read())
clock.tick(50)
for event in pygame.event.get():
if event.type==pygame.QUIT:
run=False
if event.type==pygame.KEYDOWN:
player.shoot()
if event.key==pygame.K_RETURN:
pause()
if intro:
all_sprites=pygame.sprite.Group()
cloud=Cloud()
start()
player=Player(200,100)
mobs=pygame.sprite.Group()
bullets=pygame.sprite.Group()
boats=pygame.sprite.Group()
ebullets=pygame.sprite.Group()
ships=pygame.sprite.Group()
ecopters=pygame.sprite.Group()
all_sprites.add(cloud)
all_sprites.add(player)
mob=Mob()
mobs.add(mob)
all_sprites.add(mob)
boat=Boat()
boats.add(boat)
all_sprites.add(boat)
score=0
lives=3
intro=False
if over:
Score()
all_sprites=pygame.sprite.Group()
cloud=Cloud()
player=Player(200,100)
mobs=pygame.sprite.Group()
bullets=pygame.sprite.Group()
boats=pygame.sprite.Group()
ebullets=pygame.sprite.Group()
ships=pygame.sprite.Group()
ecopters=pygame.sprite.Group()
all_sprites.add(cloud)
all_sprites.add(player)
mob=Mob()
mobs.add(mob)
all_sprites.add(mob)
boat=Boat()
boats.add(boat)
all_sprites.add(boat)
score=0
lives=3
over=False
all_sprites.update()
last_shot=pygame.time.get_ticks()
hits=pygame.sprite.groupcollide(mobs,bullets,True,True)
if hits:
score+=10
newmob()
mobgen()
hits1=pygame.sprite.spritecollide(player,mobs,True)
if hits1:
lives-=1
newmob()
hits2=pygame.sprite.groupcollide(boats,bullets,True,True)
if hits2:
score+=30
mobgen()
hits4=pygame.sprite.spritecollide(player,ebullets,True)
if hits4:
lives-=1
hits5=pygame.sprite.groupcollide(bullets,ships,True,True)
if hits5:
score+=50
mobgen()
hits6=pygame.sprite.spritecollide(player,ships,True)
if hits6:
lives-=1
newmob()
mobgen()
hits7=pygame.sprite.groupcollide(bullets,ecopters,True,True)
if hits7:
score+=40
mobgen()
hits8=pygame.sprite.spritecollide(player,ecopters,True)
if hits8:
lives-=1
newmob()
mobgen()
hits9=pygame.sprite.spritecollide(player,boats,True)
if hits9:
lives-=1
mobgen()
newmob()
screen.blit(bg,(0,0))
all_sprites.draw(screen)
msg("Score:"+str(score),20,Red,220,15)
drawlives(10,10,lives)
if lives<=0:
over=1
pygame.display.flip()
pygame.quit()
quit()
| true |
745a166b1d1f2b2553d163086e89bbb5197c3c74 | Python | oattia/eccadv | /eccadv/model/nn/neural_net.py | UTF-8 | 1,865 | 2.84375 | 3 | [] | no_license | from enum import Enum
class Libraries(Enum):
KERAS = 0
TF = 1
TORCH = 2
class NeuralNetModel:
"""
Abstract Class to hide the details of the deep learning library used (Keras, TF, Pytorch).
"""
def __init__(self, name, config):
self.name = name
self.config = config
self.network_model = None
self.n_classes = -1
self.input_shape = None
self.output_size = -1
self.is_trainable = True
def initialize(self, input_shape, output_dim, n_classes, load_from=None):
self.input_shape = input_shape
self.output_size = output_dim
self.n_classes = n_classes
if load_from:
self.load_from(load_from.as_posix())
self.is_trainable = self.config.get("train", False)
else:
self._build_model()
def _build_model(self):
"""
Builds a new model from scratch according to the config.
"""
raise NotImplementedError
def compile(self, atkr=None):
"""
Compiles the model adding adversarial training if specified
"""
raise NotImplementedError
def train_batch(self, features, labels):
"""
Train and update the parameters for one batch.
"""
raise NotImplementedError
def predict(self, features):
"""
Returns the network encoding for these features.
"""
raise NotImplementedError
def save_to(self, path):
"""
Dump model to path.
"""
raise NotImplementedError
def load_from(self, path):
"""
Load model from path.
"""
raise NotImplementedError
def context(self):
"""
Returns all information about the model necessary for attackers.
"""
raise NotImplementedError
| true |
5ab60651a7bcae6e0f12cb359f587ccf26d6ff93 | Python | CaffNanot/Ano---Bissexto | /Validação de Ano Bissexto.py | UTF-8 | 2,200 | 3.84375 | 4 | [] | no_license | #O encontro anual da família Pereira vai acontecer e alguns eventos estão programados:
#Futebol de Casados x Solteiros da Família*
#Esconde-esconde das crianças
#Almoço
#Canastra da Família*
#Cabo de Guerra
#Bingo dos idosos*
#Os itens que estão com * são exclusivos da família Pereira.
print("Bem-vindo(a) ao Encontro Anual da Família Pereira, suas atividades seram determinadas com base nos dados informados abaixo, exceto o almoço que é livre para todos. Bom Evento!")
nome = input("Digite seu Nome: ")
idade = int(input("Digite sua Idade: "))
estadoC = input("Digite seu Estado Civil: ")
#if("Pereira" in nome):
#print("Você é um Pereira")
if(idade <= 10): #Valida a idade abaixo de 11.
print("Você participa do Esconde-Esconde das Crianças")
elif(idade >= 11 and idade <=59 and "Pereira" in nome and estadoC == "Solteiro" or estadoC == "Solteira"): #Valida a idade entre 11 e 59, sendo da Familia e estando solteiro
print("Você participa da Canastra da Família, do Cabo de Guerra e esta no time dos Solteiros no Futebol")
elif(idade >= 11 and idade <=59 and "Pereira" in nome and estadoC == "Casado" or estadoC == "Casada"): #Valida a idade entre 11 e 59, sendo da Familia e estando Casado
print("Você participa da Canastra da Família, do Cabo de Guerra e esta no time dos Casados no Futebol")
elif(idade >= 11 and idade <=59 and not "Pereira" in nome): #Valida a idade entre 11 e 59, sem ser da Familia
print("Você participa do Cabo de Guerra")
elif(idade >=60 and not "Pereira" in nome): #Valida a idade acima de 60, sem ser da Familia
print("Você não participa do Bingo dos Idosos")
elif("Pereira" in nome and idade >=60): #Valida a idade acima de 60, sendo da familia
print("Você participa do Bingo dos Idosos")
| true |
ff7616946da7bbd3347fd0ebd5f10df9b756fe35 | Python | hi0t/Outtalent | /Leetcode/150. Evaluate Reverse Polish Notation/solution1.py | UTF-8 | 469 | 2.84375 | 3 | [
"MIT"
] | permissive | h = {
'+': lambda a, b: b + a,
'-': lambda a, b: b - a,
'*': lambda a, b: b * a,
'/': lambda a, b: abs(b) // abs(a) * (1 if (b >= 0 and a >= 0) or (b <= 0 and a <= 0) else -1)
}
class Solution:
def evalRPN(self, tokens: List[str]) -> int:
stack = []
for c in tokens:
if c in h:
stack.append(h[c](stack.pop(), stack.pop()))
else:
stack.append(int(c))
return stack[-1]
| true |
302511d39c9e2ce85a772aa8f8c1394d3b4226f6 | Python | akevinblackwell/Raspberry-Pi-Class-2017 | /multiply.py | UTF-8 | 986 | 3.8125 | 4 | [] | no_license | ##def factorial(n):return reduce(lambda x,y:x*y,[1]+range(1,n+1))
def factorial(x):
if x == 0:
return 1
else:
return x*factorial(x-1)
print (factorial(x))
right = True
x = 1
while right:
try:
x = int(input("Enter a Number"))
print (factorial(x))
right = False
except:
print("You didn't enter a number")
right = True
##invalidinput = True
##factorialnumber = 1
##while invalidinput:
## try:
## factorialnumber = int(input("Enter a positive integer between 1 and 100: "))
## invalidinput = False
## except:
## print("Invalid Input. Try again.")
## invalidinput = True
##if x > 100:
## print('Number '+str(x)+' is too big.')
##elif x >= 1:
## print('The factorial of '+str(x)+' is '+str(factorial(x)))
##elif x ==0:
## print('The factorial of '+str(x)+ ' is 1.')
##else:
## print('The factorial of '+str(x)+ ' is undefined because '+str(x)+' is negative.')
##
| true |
dc52b164a9b49d4d3fb1c27556873c3a72f18094 | Python | dineshbeniwall/P342 | /ass8/ellipsoid(A).py | UTF-8 | 667 | 3.328125 | 3 | [] | no_license | import lib
a = 1
b = 1.5
c = 2
N = 100
for i in [100, 500, 1000, 2000, 5000, 7000, 10000, 15000, 20000, 50000]:
v = lib.ellipsoid(a, b, c, i)
print(f"For N={i} volume of ellipsoid : {v[0]}")
'''
For N=100 volume of ellipsoid : 12.0
For N=500 volume of ellipsoid : 11.904
For N=1000 volume of ellipsoid : 12.888
For N=2000 volume of ellipsoid : 12.492
For N=5000 volume of ellipsoid : 12.384
For N=7000 volume of ellipsoid : 12.713142857142858
For N=10000 volume of ellipsoid : 12.3912
For N=15000 volume of ellipsoid : 12.5712
For N=20000 volume of ellipsoid : 12.4488
For N=50000 volume of ellipsoid : 12.67824
[Finished in 0.47s]
'''
| true |
400a95bd4b2a0b54f821c03c538a215d2b122df2 | Python | anjanagp/RedditCommentAnalyzer | /preprocess_data.py | UTF-8 | 747 | 3.078125 | 3 | [] | no_license | from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
porter= PorterStemmer()
stop_words = set(stopwords.words('english'))
class PreprocessData:
def remove_stop_words(self, comment_id):
"""
Removes all stop words from data and tokenizes it
Returns a list of preprocessed terms derived from the comment
Args:
comment_id
"""
preprocessed_comment = []
tokens = word_tokenize(comment_id.body)
words = [word for word in tokens if word.isalpha()]
for w in words:
if not w in stop_words:
preprocessed_comment.append(porter.stem(w))
return ' '.join(preprocessed_comment)
| true |
27161a033c6401bc44c95b2fce2f46f98e69902c | Python | dridon/aml2 | /Code/classifiers.py | UTF-8 | 12,160 | 3.453125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from math import log
#My implementation is not efficient! Might need to optimize when we use the real dataset.
class NaiveBayesBinary:
def __init__(self, alpha = 1):
"""
Alpha is used for Laplace smoothing. We could cross-validate over this guy.
Typical values ]0,1]
"""
self.alpha = alpha
def train(self, dataset):
"""
Train the Naive Bayes classifier.
Takes a dataset of ints with one sample per row,
with the sample's label at the last column.
The classes must be 0 indexed.
Alpha is used for Laplace smoothing. We could cross-validate over this guy.
"""
dataset = dataset[dataset[:,-1].argsort()] # Sort the dataset by classes.
#print dataset
########
# Compute p(y=1) for all ys.
########
label_counts = np.bincount(dataset[:,-1]) # Get the number of occurrences of each class, sorted.
self.p_ys = label_counts * 1.0 / len(dataset) # Compute probs.
########
# Compute p(x|y) for all x,y.
########
self.feature_count = len(dataset[0]) - 1
self.class_count = len(label_counts)
self.p_xi_given_ys = np.zeros((self.class_count, self.feature_count)) # Initialize matrix
start_index = 0
for i in range(self.class_count): # Loop over each class
end_index = start_index + label_counts[i] # end of this class index
denominator = label_counts[i] + 2.0 * self.alpha
for j in range(self.feature_count): # Loop over each feature
numerator = np.sum(dataset[start_index:end_index,j]) + self.alpha # Sum number of times word j = 1 in class i
self.p_xi_given_ys[i][j] = numerator * 1.0 / denominator # Compute p(xi|y)
start_index = end_index
def test(self, samples):
"""
Compute P(y|x) for each class,
and select the class with highest probability.
Return the array of prediction errors (0:good/1:error),
the predicted classes and the prediction accuracy."""
prediction_errors = np.zeros(len(samples), int)
predictions = np.zeros(len(samples), int)
class_predictions = np.zeros(self.class_count)
for i in range(len(samples)): # Loop over each sample
for j in range(self.class_count): # Loop over each class
class_predictions[j] = self.p_ys[j] # Get p(y) for class j
for k in range(self.feature_count): # Loop over each feature
# Multiply p(y) by p(xi|y)
if(samples[i][k] == 1):
class_predictions[j] *= self.p_xi_given_ys[j][k]
else:
class_predictions[j] *= 1 - self.p_xi_given_ys[j][k]
predictions[i] = np.argmax(class_predictions) # Prediction is class with highest probability.
# Check if the predicted class doesn't match the true class.
if(predictions[i] != samples[i][-1]):
prediction_errors[i] = 1
# Compute accuracy
accuracy = 1.0 - (np.sum(prediction_errors) * 1.0 / len(prediction_errors))
return prediction_errors, predictions, accuracy
def predict(self, samples):
"""
Compute P(y|x) for each class,
and select the class with highest probability.
Return the array of class predictions.
"""
predictions = np.zeros(len(samples), int)
class_predictions = np.zeros(self.class_count)
for i in range(len(samples)): # Loop over each sample
for j in range(self.class_count): # Loop over each class
class_predictions[j] = self.p_ys[j] # Get p(y) for class j
class_predictions[j] *= np.dot(samples[i,:-1], self.p_xi_given_ys[j]) \
+ np.dot( np.ones((np.shape(samples[i,:-1]))) - samples[i,:-1], np.ones((np.shape(self.p_xi_given_ys[j]))) - self.p_xi_given_ys[j])
"""
np.dot(samples[i,:-1], self.p_xi_given_ys[j])
for k in range(self.feature_count): # Loop over each feature
# Multiply p(y) by p(xi|y)
if(samples[i][k] == 1):
class_predictions[j] *= self.p_xi_given_ys[j][k]
else:
class_predictions[j] *= 1 - self.p_xi_given_ys[j][k]
"""
predictions[i] = np.argmax(class_predictions) # Prediction is class with highest probability.
return predictions
#My implementation is not efficient! Might need to optimize when we use the real dataset.
class NaiveBayesMultinomial:
def __init__(self, alpha = 1.0):
"""
Alpha is used for Laplace smoothing. We could cross-validate over this guy.
Typical values ]0,1]
"""
self.alpha = alpha
def train(self, dataset):
"""
Train the Naive Bayes classifier.
Takes a dataset of ints with one sample per row,
with the sample's label at the last column.
The classes must be 0 indexed.
Alpha is used for Laplace smoothing. We could cross-validate over this guy.
"""
dataset = dataset[dataset[:,-1].argsort()] # Sort the dataset by classes.
#print dataset
########
# Compute p(y=1) for all ys.
########
label_counts = np.bincount(dataset[:,-1]) # Get the number of occurrences of each class, sorted.
self.p_ys = np.log(label_counts * 1.0 / len(dataset)) # Compute probs.
########
# Compute p(x|y) for all x,y.
########
self.feature_count = len(dataset[0]) - 1
self.class_count = len(label_counts)
self.p_xi_given_ys = np.zeros((self.class_count, self.feature_count)) # Initialize matrix
start_index = 0
for i in range(self.class_count): # Loop over each class
end_index = start_index + label_counts[i] # end of this class index
class_word_counts = np.sum(dataset[start_index:end_index,:-1]) # sum all words of class i
denominator = class_word_counts + self.alpha * self.feature_count # Here we add the feature_count as Laplace smoothing
for j in range(self.feature_count): # Loop over each feature
single_word_count = np.sum(dataset[start_index:end_index,j]) # sum number times word j appears in class i
numerator = single_word_count + self.alpha
self.p_xi_given_ys[i][j] = log(numerator * 1.0 / denominator) # Compute p(xi|y)
start_index = end_index
def test(self, samples):
"""
Compute P(y|x) for each class,
and select the class with highest probability.
Return the array of prediction errors (0:good/1:error),
the predicted classes and the prediction accuracy."""
prediction_errors = np.zeros(len(samples), int)
predictions = np.zeros(len(samples), int)
class_predictions = np.zeros(self.class_count)
for i in range(len(samples)): # Loop over each sample
for j in range(self.class_count): # Loop over each class
class_predictions[j] = self.p_ys[j] # Get p(y) for class j
# Multiply p(y) by p(xi|y)
class_predictions[j] += np.dot(samples[i,:-1], self.p_xi_given_ys[j])
predictions[i] = np.argmax(class_predictions) # Prediction is class with highest probability.
# Check if the predicted class doesn't match the true class.
if(predictions[i] != samples[i][-1]):
prediction_errors[i] = 1
# Compute accuracy
accuracy = 1.0 - (np.sum(prediction_errors) * 1.0 / len(prediction_errors))
return prediction_errors, predictions, accuracy
def predict(self, samples):
"""
Compute P(y|x) for each class,
and select the class with highest probability.
Return the array of class predictions.
"""
predictions = np.zeros(len(samples), int)
class_predictions = np.zeros(self.class_count)
for i in range(len(samples)): # Loop over each sample
for j in range(self.class_count): # Loop over each class
class_predictions[j] = self.p_ys[j] # Get p(y) for class j
# Multiply p(y) by p(xi|y)
class_predictions[j] += np.dot(samples[i], self.p_xi_given_ys[j])
predictions[i] = np.argmax(class_predictions) # Prediction is class with highest probability.
return predictions
class RandomForest():
def __init__(self, n_trees=10, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1,
max_features='auto', max_leaf_nodes=None, bootstrap=True, oob_score=False, n_jobs=-1, random_state=None,
verbose=0, min_density=None, compute_importances=None):
"""
The random forest has a LOT of parameters. This will be fun to cross-validate.
I think scikit learn has a cross-validate function.
"""
self.random_forest = RandomForestClassifier(n_trees, criterion, max_depth, min_samples_split, min_samples_leaf,
max_features, max_leaf_nodes, bootstrap, oob_score, n_jobs, random_state,
verbose, min_density, compute_importances)
def train(self, dataset):
"""
Train the Random Forest classifier.
Takes a dataset of ints with one sample per row,
with the sample's label at the last column.
"""
self.random_forest.fit(dataset[:,:-1], dataset[:,-1])
def test(self, dataset):
"""
Test the Random Forest classifier.
Takes a dataset of ints with one sample per row,
with the sample's label at the last column.
Return the array of predicted classes for each sample
and the prediction accuracy.
"""
predictions = np.zeros(len(dataset), int)
accuracy = self.random_forest.score(dataset[:,:-1], dataset[:,-1]) # Predict and compute accuracy.
predictions = self.predict(dataset[:,:-1]) # Predict and return list of predictions.
return predictions, accuracy
def predict(self, samples):
"""
Predict using the Random Forest classifier.
Takes a test set of ints with one sample per row.
Return the array of predicted classes for each sample.
"""
return self.random_forest.predict(samples)
def multiclass_toy_data():
"""
Create random dataset for testing purposes.
Columns 0 to 4 contain the features, and 5 the labels.
"""
#dataset = np.zeros((10,5), np.int)
dataset = np.array([[0,0,0,0,4],
[0,0,0,0,5],
[1,3,0,0,0],
[3,1,0,0,1],
[0,0,6,2,0],
[0,0,0,0,0],
[0,0,1,7,2],
[0,0,5,1,5],
[0,0,34,0,0],
[0,0,3,0,0]])
Y = np.array([3,3,2,2,1,0,1,1,0,0])
#for i in range(10):
#for j in range(5):
#dataset[i][j] = np.random.randint(0,10)
dataset = np.column_stack((dataset, Y))
return (dataset)
| true |
99146703d273f210555d007e6989b9db9d9ba175 | Python | TheShellLand/pies | /v3/scripts/testing/media_file_scrubber.py | UTF-8 | 2,212 | 2.9375 | 3 | [
"MIT"
] | permissive | import os
import sys
import re
import base64
import getpass
import Crypto.Hash
from Crypto import Random
from Crypto.Cipher import AES
def walking():
walk_dir = sys.argv[1]
print('walk_dir = ' + walk_dir)
# If your current working directory may change during script execution, it's recommended to
# immediately convert program arguments to an absolute path. Then the variable root below will
# be an absolute path as well. Example:
# walk_dir = os.path.abspath(walk_dir)
print('walk_dir (absolute) = ' + os.path.abspath(walk_dir))
for root, subdirs, files in os.walk(walk_dir):
print('--\nroot = ' + root)
list_file_path = os.path.join(root, 'my-directory-list.txt')
print('list_file_path = ' + list_file_path)
with open(list_file_path, 'wb') as list_file:
for subdir in subdirs:
print('\t- subdirectory ' + subdir)
for filename in files:
file_path = os.path.join(root, filename)
print('\t- file %s (full path: %s)' % (filename, file_path))
with open(file_path, 'rb') as f:
f_content = f.read()
list_file.write(('The file %s contains:\n' % filename).encode('utf-8'))
list_file.write(f_content)
list_file.write(b'\n')
# strings
'''
U2FsdGVkX1/saKb/pXM7aiMydFzDlDkA97pnhtK4297lDDnFdmvFeRNHjC63MGH9
fUET244NFWu8mtzwpQo3dhUoDRjczuEadJ+Z4PihDMszJPXSVopSYyRpwDJH6nEh
mUPZ+O2FslTbWwOJ3mJbxyC/Ug/gNniovhFxD3OvsOij69RKaXFk8b5zd5dscg1m
qy9yPCt/+fdlgwOSHcrLk4z2SpalkY3/eEOSUWjme0OZu4mWyLgxfBkmScT+6nb+
RVFct25p1sJnccJAYlvMtuPjuQszJ05dhU3x0r290N1M/0yC5dYb44UJA7iXRgjC
P6pqtLB1V2IAKFZI4HCjV2Geb+po2VUmnWYqXCyLrKN3HB56u9wxB7aLNV1VYtad
yc8lgQBL4AjMm/hOVtip6rBtNHdH5cmo8ojPFHd0fGbDgqozwQQLgVlAWoKnhXIC
x9eKciw8iVBIo5LVOaJLox9A2oaA+RmENShZplTlpu/0UNhBl09NVN7rIwWF3dEU
MxRSLGuBCdYJG6Tyb3Y7hwHyBR3K2ZGYyZmmnCv1AXWWcQMPGsEuDyEBQki4ldPe
0WnQs6ytNKm28SrzV9ua8ZHet+1OLmjjiKf3+VNV5465S1YRy1/uhNuuiezdkZmU
ty8zFkyU/DI/MQAvIXOabmC62m+9tAEVV8gTEWNBQbQ=
'''
# Clean folder names
def cleaner():
patterns = {
' ': '_',
' - ': '_',
}
for p in patterns:
re.sub(p, folderName) # Needs finishing
# Really simple encryption
def strings_enc():
print('Input program string:')
key = input()
#progString = getpass.getpass()
iv = Random.new().read(AES.block_size)
obj = AES.new(key, AES.MODE_CBC, iv)
message = "The answer is no"
ciphertext = obj.encrypt(message)
obj2 = AES.new('This is a key123', AES.MODE_CBC, 'This is an IV456')
obj2.decrypt(ciphertext)
# 'The answer is no'
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')
# main
strings_enc()
| true |
13998033f3aca95048c6432f7ca61546c3985d43 | Python | pommier/scisoft-ui | /uk.ac.diamond.sda.exporter/scripts/Other/utils/prg_order_fix/prg_order_fix.py | UTF-8 | 8,713 | 2.765625 | 3 | [] | no_license | # 01/12/2002 Mike Miller
#
# CPython script "prg_order_fix" to check the order of single Parker 6k
# program files in a directory tree and ensure programs defined there in
# are defined BEFORE they are used elsewhere in the same file.
#
# This is needed because uploading a number of 6k controller programs to a
# single file can change the order so making programs fail once downloaded
# again.
#
# NB when uploading from the 6k controller, kill all running programs first
# with "!K" or the file may get corrupted en-route to give e.g. missing lines,
# truncated lines etc.
#
# Each entry in "treeList" has all files matched recursively from "searchList"
# and these are set from entries in turn from treeList1..n and searchList1..n
# and indexed by the mask list "lists"
#
#************************************************************************
# Change these variables to what you need
#************************************************************************
# no of each tree and search lists, i.e. 1 for treeList1, searchList1 etc.
lists = [1]
# list of directory tree roots to search in for files in searchListn
treeList1=["."]
# Unix style file match string for each directory in the treeListn
searchList1=["*.prg"]
# set 1 to display some debug messages, 2 for all messages
debug = 2
#************************************************************************
#
# Python imports
import glob
import os
import sys
import string
import traceback
#
# module level variables
# current search list as a copy of searchList1,2,etc
# current directory list as a copy of treeList1,2,etc
treeList = []
searchList = []
hash = "#"
# returns index of substring matched in nonquoted line text
def findUnquoted (line, substring):
str = line[:]
nextIndex=0
totalIndex=0
foundIndex = -1
try:
if (string.find(line, "'") == -1) and (string.find(line, '"') == -1):
return string.find(line, substring)
# calculate position of match
# find substring in current unquoted string
# update next unquoted string search
# end of extracted unquoted strings
# if debugging on follow progress
while (1):
unquoteStr, nextIndex = getUnquotedStr(str[totalIndex:])
if debug>1:
print "findUnquoted: totalIndex, unquoteStr, nextIndex ", totalIndex, unquoteStr, nextIndex
if (unquoteStr == ""):
break
totalIndex = totalIndex + nextIndex
foundIndex = string.find(unquoteStr, substring)
if (foundIndex != -1):
foundIndex = totalIndex - len(unquoteStr) + foundIndex
break
except:
if debug>1: print traceback.print_exc()
return (-1)
return foundIndex
# temporary quote string (single or double)
# single quotes string
# double quotes string
# temporary index
# store end of raw line
# end index of non-quoted string slice
# start index of non-quoted string slice
# return next non-quoted substring and next index
def getUnquotedStr(line):
startSlice=0
endSlice=0
lineLen=len(line)
index=0
dblQuote='"'
sglQuote="'"
quote=dblQuote
# trap empty input line string
if (lineLen == 0):
return ("", lineLen)
# find string start as double or single quote
while 1:
if (line[endSlice] == dblQuote) or (line[endSlice] == sglQuote):
# if at end of string, that's all
# increment to next character
# slice already available so return it now
# if at end of string, that's all
# set substring search to after the quoted section
# no terminating quote found, return rest of line
# try to match closing quote
# if at end of string, that's all
# increment to char following 1st matched quote
# if quote is matched and no substring yet found
# set quote character to the non-default single one
if (line[endSlice] == sglQuote):
quote = sglQuote
if (endSlice == startSlice):
endSlice = endSlice + 1
if (endSlice >= lineLen):
return ("", endSlice)
index = string.find (line[endSlice:], quote)
if (index == -1):
return (line[startSlice:], lineLen)
else:
startSlice = index + endSlice + 1
endSlice = index + endSlice + 1
if (endSlice >= lineLen):
return ("", endSlice)
else:
return (line[startSlice:endSlice], endSlice)
else:
endSlice=endSlice+1
if (endSlice >= lineLen):
return (line[startSlice:], endSlice)
# none found so keep the line
# find any hash not in col1
# whitespace line is target for comments
# col1 comment is target for comments
# target for moves of EOL comments
# buffer for processed file
# move EOL comment lines i.e. not starting in col 1
def moveComments (lineList):
outList = []
lastCommentLine = 0
nCommentsMoved = 0
for i in range(len(lineList)):
line = lineList[i]
if debug>1: print line,
if line[0] == hash:
if debug>1: print "comment_fix : input line %d is a comment line " % i
outList.append(line)
lastCommentLine = len(outList)
continue
stripLine = string.strip(line)
if len(stripLine) == 0:
if debug>1: print "comment_fix : input line %d is a blank line " % i
outList.append(line)
lastCommentLine = len(outList)
continue
startHash = findUnquoted(line, hash)
if startHash == -1:
if debug>1: print "comment_fix : input line %d is a code only line " % i
outList.append(line)
continue
if debug>1: print "comment_fix : input line %d is a code + EOL comment line " % i
if debug: print "hash found at %d" % startHash
# ensure comments start in col1
# move code to to comment line above keeping indent
# append stripped code line to output buffer
# search backwards from has for last code char
nCommentsMoved = nCommentsMoved + 1
for j in range(startHash-1, -1, -1):
if line[j] not in string.whitespace:
if debug>1: print "EOL comment on line %d starts at index %d" % (i, j)
lastCodeChar = j+1
outList.append(line[:lastCodeChar] + "\n")
comment = line [lastCodeChar:]
comment = string.lstrip(comment)
outList.insert (lastCommentLine, comment)
break
print "%d end of line comments moved up and started in col 1" % nCommentsMoved
return outList
# prints elements of a sequence
def printSeq (list):
for name in list:
print name
# save file
# move comments from line ends
# remove cvs "$Id:" lines
# read script/module into a list
# process line to new layout
def processFile (foundName):
if debug>1: print "comment_fix : matched file : %s" % foundName
try:
fptr = open (foundName, "r")
lineList = fptr.readlines()
fptr.close()
except:
print traceback.print_exc()
try:
fptr.close()
except:
pass
saveFile (foundName, lineList)
# chop lines matching cvs "$Id" string
def removeDollar (lineList):
outList = []
for i in range(len(lineList)):
line = lineList[i]
offset = findUnquoted(line, "$Id")
if offset == -1:
outList.append(line)
else:
print "removed : " + string.rstrip(line)
return outList
# save the modifications back to file
def saveFile (fileName, lineList):
try:
fptr = open (fileName, "w")
fptr.writelines (lineList)
except:
print traceback.print_exc()
try:
fptr.close()
except:
pass
# function called by os.path.walk in each directory
def visitFunc (arg, dirName, names):
# carry out action on each named file
# match each directory file with search list
if debug: print "\nsearching in directory : %s" % dirName
for searchStr in searchList:
foundList = glob.glob (dirName + os.sep + searchStr)
for foundName in foundList:
print "\nprocessing file : %s" % foundName
processFile (foundName)
return
# function to print string to screen. wait if pause
def writeMessage (msgStr, pause=0, exit=0):
print msgStr
if pause:
print ">>> Press return to continue : ",
line = sys.stdin.readline()
if exit:
sys.exit()
return
# copy next search list to default name
# copy next tree list to default name
# use each tree and search list in turn
for listNo in lists:
exec "treeList = treeList%s" % (listNo)
exec "searchList = searchList%s" % (listNo)
# recurse each tree and search for searchList matches
for treeName in treeList:
os.path.walk(treeName, visitFunc, "")
# that's all folks
writeMessage ("=== processing finished", 1, 1)
| true |
86ffe5f469c1a364c7714b84a315a8839040bbcf | Python | Boukos/CSCIE63-Big-Data-Analytics | /HW5/p4.TradeVolume.kafka.py | UTF-8 | 3,054 | 2.578125 | 3 | [] | no_license | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Counts words in UTF8 encoded, '\n' delimited text directly received from Kafka in every 2 seconds.
Usage: direct_kafka_wordcount.py <broker_list> <topic>
To run this on your local machine, you need to setup Kafka and create a producer first, see
http://kafka.apache.org/documentation.html#quickstart
and then run the example
`$ bin/spark-submit --jars \
external/kafka-assembly/target/scala-*/spark-streaming-kafka-assembly-*.jar \
examples/src/main/python/streaming/direct_kafka_wordcount.py \
localhost:9092 test`
"""
from __future__ import print_function
import sys
from pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from datetime import datetime
# callback to parse each input line
def parseOrder(line):
s = line.split(",")
try:
if s[6] != "B" and s[6] != "S":
raise Exception('Wrong format')
#print("Received input line (%s): " % line)
return [{"time": datetime.strptime(s[0], "%Y-%m-%d %H:%M:%S"), "orderId": long(s[1]), "clientId": long(s[2]), "symbol": s[3], "amount": int(s[4]), "price": float(s[5]), "buy": s[6] == "B"}]
except Exception as err:
print("Wrong line format (%s): " % line)
return []
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: direct_kafka_wordcount.py <broker_list> <topic>", file=sys.stderr)
exit(-1)
# setMaster to "local"
conf = SparkConf().setMaster("local[2]").setAppName("KafaTopVolume")
sc = SparkContext(conf = conf)
ssc = StreamingContext(sc, 10)
brokers, topic = sys.argv[1:]
kvs = KafkaUtils.createDirectStream(ssc, [topic], {"metadata.broker.list": brokers})
# build a flat map for each stock order
lines = kvs.map(lambda x: x[1])
orders = lines.flatMap(parseOrder)
# find total traded volume for each symbol
from operator import add
symVolumesCurrent= orders.map(lambda o: (o['symbol'], o['amount'])).reduceByKey(add);
# transform, sort and get the top-5 symbols
topVolume = symVolumesCurrent.transform(lambda rdd: rdd.sortBy(lambda x: x[1], False).zipWithIndex().filter(lambda x: x[1] < 1))
topVolume.pprint()
ssc.start()
ssc.awaitTermination()
| true |
cafeff804a773db61823ba6b38233034d233b231 | Python | pradyumnkumarpandey/PythonAlgorithms | /LeetCode/0126_Word_Ladder_II.py | UTF-8 | 868 | 3.078125 | 3 | [
"MIT"
] | permissive | class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
from collections import defaultdict
wordList = set(wordList)
list1 = []
layer = {}
layer[beginWord] = [[beginWord]]
while layer:
newlayer = defaultdict(list)
for a in layer:
if a == endWord:
list1.extend(b for b in layer[a])
else:
for i in range(len(a)):
for j in 'abcdefghijklmnopqrstuvwxyz':
new1 = a[:i]+j+a[i+1:]
if new1 in wordList:
newlayer[new1]+=[k+[new1] for k in layer[a]]
wordList -= set(newlayer.keys())
layer = newlayer
return list1 | true |
39deab5358b248fb9ecc1287b4c6267834f6063b | Python | samtron1412/code | /challenge/fb-challenge/histogram.py | UTF-8 | 695 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import sys
def main(argv=None)
argv = argv or sys.argv
histogram = {min: 0 for min in range(0, 24)}
with open(argv[1], 'r') as f:
for line in f:
try:
ts, code, _ = line.split(' ', 2)
except ValueError:
continue
if not code.startswith("2"):
continue
try:
ts_ = int(ts) // 10000
except ValueError:
continue
histogram[ts_] += 1
for item in sorted(
histogram.items(),
key=itemgetter(1),
):
print(f'{item[0]:02d
if __name__ == "__main__":
main()
| true |
4935c1fd7b66b2a4673e1bf2f295adec8d0e6a97 | Python | senqtus/Algorithms-and-Data-Structures-Problems | /Intersection_of_LinkedLists.py | UTF-8 | 2,085 | 4.34375 | 4 | [] | no_license | """
You are given two singly linked lists. The lists intersect at some node. Find, and return the node.
Note: the lists are non-cyclical.
Example:
A = 1 -> 2 -> 3 -> 4
B = 6 -> 3 -> 4
This should return 3 (you may assume that any nodes with the same value are the same node).
Here is a starting point:
"""
class Node:
def __init__(self, value: 'int', next_element: 'Node' = None):
self.value = value
self.next = next_element
def get_length(node: 'Node') -> 'int':
length = 0
while node is not None:
length += 1
node = node.next
return length
def traverse(node: 'Node', length: 'int'):
while length > 0 and node is not None:
length -= 1
node = node.next
return node
def are_intersected(node_a, node_b) -> 'bool':
while node_a.next is not None:
node_a = node_a.next
while node_b.next is not None:
node_b = node_b.next
return node_a == node_b
def intersection(first: 'Node', second: 'Node') -> 'Node':
if first is None or second is None or not are_intersected(first, second):
return None
length_a = get_length(first)
length_b = get_length(second)
if length_a > length_b:
first = traverse(first, length_a - length_b)
else:
second = traverse(second, length_b - length_a)
while first != second:
first = first.next
second = second.next
return first
class Node(object):
def __init__(self, val):
self.val = val
self.next = None
def __str__(self):
current = self
string = ''
while current is not None:
string += str(current.val) + '->'
current = current.next
string += 'None'
return string
if __name__ == '__main__':
first = Node(1)
first.next = Node(2)
first.next.next = Node(3)
first.next.next.next = Node(4)
second = Node(6)
second.next = first.next.next
c = intersection(first, second)
print(c)
| true |
c3b070fafe6c404a8ad9dc3cce3dbc3118115ce8 | Python | kdlong/CSCGIF | /Utilities/OutputTools.py | UTF-8 | 304 | 3.171875 | 3 | [] | no_license | import os
import errno
def makeDirectory(path):
'''
Make a directory, don't crash
'''
path = os.path.expanduser(path)
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| true |
64e8d5f02460da4311e67bcba04dfaaf3185859b | Python | AliAlAali/Fusion360Scripts | /LogSpiral.py | UTF-8 | 2,746 | 2.53125 | 3 | [] | no_license | #Author-alaalial
#Description-
import adsk.core, adsk.fusion, adsk.cam, traceback
import math
def run(context):
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
ui.messageBox('Hello script')
logSpiral()
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
def archimedeanSpiral():
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
des = adsk.fusion.Design.cast(app.activeProduct)
root = des.rootComponent
# Create a new sketch.
sk = root.sketches.add(root.xYConstructionPlane)
# Create a series of points along the spiral using the spiral equation.
# r = a + (beta * theta)
pnts = adsk.core.ObjectCollection.create()
numTurns = 5
pointsPerTurn = 20
distanceBetweenTurns = 5 # beta
theta = 0
offset = 5 # a
for i in range(pointsPerTurn * numTurns + 1):
r = offset + (distanceBetweenTurns * theta)
x = r * math.cos(theta)
y = r * math.sin(theta)
pnts.add(adsk.core.Point3D.create(x,y,0))
theta += (math.pi*2) / pointsPerTurn
sk.sketchCurves.sketchFittedSplines.add(pnts)
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
def logSpiral():
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
des = adsk.fusion.Design.cast(app.activeProduct)
root = des.rootComponent
# Create a new sketch
sketch = root.sketches.add(root.xYConstructionPlane)
# Create a series of points along the spiral using the spiral equation.
phi = (1 + math.sqrt(5)) / 2
lamba = 2 / math.pi * math.log(phi)
points = adsk.core.ObjectCollection.create()
pointsShadow = adsk.core.ObjectCollection.create()
theta = 0
for i in range(0, 100):
theta += (math.pi * 2) / 50
radius = phi ** (2 / math.pi * theta)
x = radius * math.cos(theta)
y = radius * math.sin(theta)
points.add(adsk.core.Point3D.create(x, y, 0))
radius = phi ** (2 / math.pi * theta) + 0.3
x = radius * math.cos(theta)
y = radius * math.sin(theta)
pointsShadow.add(adsk.core.Point3D.create(x, y, 0))
sketch.sketchCurves.sketchFittedSplines.add(points)
sketch.sketchCurves.sketchFittedSplines.add(pointsShadow)
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
| true |
fb57b1af35e9549b794d7ad3acaef110a8cc8de4 | Python | moonlmq/Tools | /Machine Learning/Computer Vision/Cameo/filters.py | UTF-8 | 6,501 | 3.25 | 3 | [] | no_license | import cv2
import numpy
import utils
def recolorRC(src,dst):
"""Simulate conversion from BGR to RC(red,cyan).
The source and destination images must both be in BGR format.
Blues and greens are replaced with cyans.
Pseudocde:
dst.b = dst.g = 0.5 *(src.b+src.g)
dst.r = src.r
"""
#extact source image's channels as one-dimensional arrays
b,g,r = cv2.split(src)
#replace the B channel's values with an average of B and G
cv2.addWeighted(b,0.5,g,0.5,0,b)
#replace the values in destination image with the modified channels
#use b twice as an argument because the destination's B and G channels
#should be equal
cv2.merge((b,b,r),dst)
def recolorRGV(src,dst):
"""Simulate conversion from BGR to RGV(red,green,value)
The source and destination images must both be in BGR format.
Blues are desaturated.
Pseudocode:
dst.b = min(src.b,src.g,src.r)
dst.g = src.g
dst.r = src.r
"""
b,g,r = cv2.split(src)
#min() compute the per-element minimums of first two arguments
#and writes them to the third argument
cv2.min(b,g,b)
cv2.min(b,r,b)
cv2.merge((b,g,r),dst)
def recolorCMV(src,dst):
"""Simulate conversion from BGR to CMV(cyan,magenta,value)
The source and destination images must both be in BGR format.
Yellows are desaturated.
Pseudocode:
dst.b = max(src.b,src.g,src.r)
dst.g = src.g
dst.r = src.r
"""
b,g,r = cv2.split(src)
#max() compute the per-element maximums of first two arguments
#and writes them to the third argument
cv2.max(b,g,b)
cv2.max(b,r,b)
cv2.merge((b,g,r),dst)
class VFuncFilter(object):
"""A filter that applies a function to V(or all of BGR)."""
def __init__(self,vFunc=None,dtype = numpy.uint8):
length = numpy.iinfo(dtype).max+1
self._vLookupArray = utils.createLookupArray(vFunc,length)
def apply(self,src,dst):
"""Apply the filter with a BGR or gray source/destination"""
srcFlatView = utils.flatView(src)
dstFlatView = utils.flatView(dst)
utils.applyLookupArray(self._vLookupArray,srcFlatView,dstFlatView)
class VCurveFilter(VFuncFilter):
"""A filter that applies a curve to V(or all of BGR)."""
def __init__(self,vPoints,dtype=numpy.uint8):
VFuncFilter.__init__(self,utils.createCurveFunc(vPoints),dtype)
class BGRFuncFilter(object):
"""A filter that applies different functions to each of BGR."""
def __init__(self,vFunc=None,bFunc=None,gFunc=None,rFunc=None,dtype=numpy.uint8):
length = numpy.iinfo(dtype).max +1
self._bLookupArray = utils.createLookupArray(utils.createCompositeFunc(bFunc,vFunc),length)
self._gLookupArray = utils.createLookupArray(utils.createCompositeFunc(gFunc,vFunc),length)
self._rLookupArray = utils.createLookupArray(utils.createCompositeFunc(rFunc,vFunc),length)
def apply(self,src,dst):
"""Apply the filter with a BGR source/destination."""
b,g,r = cv2.split(src)
utils.applyLookupArray(self._bLookupArray,b,b)
utils.applyLookupArray(self._bLookupArray,g,g)
utils.applyLookupArray(self._bLookupArray,r,r)
cv2.merge([b,g,r],dst)
class BGRCurveFilter(BGRFuncFilter):
"""A filter that applies different curves to each of BGR."""
def __init__(self,vPoints=None,bPoints=None,
gPoints=None,rPoints=None,dtype=numpy.uint8):
BGRFuncFilter.__init__(self,
utils.createCurveFunc(vPoints),
utils.createCurveFunc(bPoints),
utils.createCurveFunc(gPoints),
utils.createCurveFunc(rPoints),dtype)
class BGRPortraCurveFilter(BGRCurveFilter):
"""A filter that applies Portra-like curves to BGR."""
def __init__(self,dtype=numpy.uint8):
BGRCurveFilter.__init__(self,
vPoints=[(0,0),(23,20),(157,173),(255,255)],
bPoints=[(0,0),(41,46),(231,228),(255,255)],
gPoints=[(0,0),(52,57),(189,196),(255,255)],
rPoints=[(0,0),(69,69),(213,218),(255,255)],dtype=dtype)
class BGRProviaCurveFilter(BGRCurveFilter):
"""A filter that applies Provia-like curves to BGR."""
def __init__(self,dtype=numpy.uint8):
BGRCurveFilter.__init__(self,
bPoints=[(0,0),(35,25),(205,227),(255,255)],
gPoints=[(0,0),(27,21),(196,207),(255,255)],
rPoints=[(0,0),(59,54),(202,210),(255,255)],dtype=dtype)
class BGRVelviaCurveFilter(BGRCurveFilter):
"""A filter that applies Velvia-like curves to BGR."""
def __init__(self,dtype=numpy.uint8):
BGRCurveFilter.__init__(self,
vPoints=[(0,0),(128,118),(221,215),(255,255)],
bPoints=[(0,0),(25,21),(122,253),(165,206),(255,255)],
gPoints=[(0,0),(25,21),(95,102),(181,208),(255,255)],
rPoints=[(0,0),(41,28),(183,209),(255,255)],dtype=dtype)
class BGRCrossProcessCurveFilter(BGRCurveFilter):
"""A filter that applies cross-process-like curves to BGR."""
def __init__(self,dtype=numpy.uint8):
BGRCurveFilter.__init__(self,
bPoints=[(0,0),(255,235)],
gPoints=[(0,0),(56,39),(208,226),(255,255)],
rPoints=[(0,0),(56,22),(211,255),(255,255)],dtype=dtype)
def strokeEdges(src,dst,blurKsize=7,edgeKsize=5):
if blurKsize >=3:
blurredSrc = cv2.medianBlur(src,blurKsize)
graySrc = cv2.cvtColor(blurredSrc,cv2.COLOR_BGR2GRAY)
else:
graySrc = cv2.cvtColor(src,cv2.COLOR_BGR2GRAY)
cv2.Laplacian(graySrc,cv2.CV_8U,graySrc,ksize=edgeKsize)
nomalizedInverseAlpha = (1.0/255)*(255-graySrc)
channels = cv2.split(src)
for channel in channels:
channel[:] = channel *nomalizedInverseAlpha
cv2.merge(channels,dst)
class VConvolutionFilter(object):
"""A filter that applies a convolution to V (or all of BGR)"""
def __init__(self,kernel):
self._kernel = kernel
def apply(self,src,dst):
"""Apply the filter with a BGR or gray source/destination"""
cv2.filter2D(src,-1,self._kernel,dst)
class SharpenFilter(VConvolutionFilter):
"""A sharpen filter with a 1-pixel radius"""
def __init__(self):
kernel = numpy.array([[-1,-1,-1],
[-1,9,-1],
[-1,-1,-1]])
VConvolutionFilter.__init__(self,kernel)
class FindEdgesFilter(VConvolutionFilter):
"""A edge-finding filter with a 1-pixel radius"""
def __init__(self):
kernel = numpy.array([[-1,-1,-1],
[-1,8,-1],
[-1,-1,-1]])
VConvolutionFilter.__init__(self,kernel)
class BlurFilter(VConvolutionFilter):
"""A blur filter with a 2-pixel radius"""
def __init__(self):
kernel = numpy.array([[0.04,0.04,0.04,0.04,0.04],
[0.04,0.04,0.04,0.04,0.04],
[0.04,0.04,0.04,0.04,0.04],
[0.04,0.04,0.04,0.04,0.04],
[0.04,0.04,0.04,0.04,0.04]])
VConvolutionFilter.__init__(self,kernel)
class EmbossFilter(VConvolutionFilter):
"""An emboss filter with a 1-pixel radius"""
def __init__(self):
kernel = numpy.array([[-2,-1,0],
[-1,1,-1],
[0,1,2]])
VConvolutionFilter.__init__(self,kernel) | true |
b11ffa890f3de5c073988b94dbc7b29e4c0ef816 | Python | AvniNargwani/todoman | /todoman/ui.py | UTF-8 | 12,946 | 2.5625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"ISC"
] | permissive | import datetime
import json
from time import mktime
import click
import parsedatetime
import urwid
from dateutil.tz import tzlocal
from tabulate import tabulate
from . import widgets
_palette = [
('error', 'light red', '')
]
class EditState:
none = object()
saved = object()
class TodoEditor:
"""
The UI for a single todo entry.
"""
def __init__(self, todo, lists, formatter):
"""
:param model.Todo todo: The todo object which will be edited.
"""
self.current_list = todo.list
self.todo = todo
self.lists = list(lists)
self.formatter = formatter
self.saved = EditState.none
self._loop = None
self._msg_text = urwid.Text('')
due = formatter.format_datetime(todo.due, humanize=False) or ''
dtstart = formatter.format_datetime(todo.start, humanize=False) or ''
priority = formatter.format_priority(todo.priority)
self._summary = widgets.ExtendedEdit(parent=self,
edit_text=todo.summary)
self._description = widgets.ExtendedEdit(
parent=self,
edit_text=todo.description,
multiline=True,
)
self._location = widgets.ExtendedEdit(
parent=self,
edit_text=todo.location
)
self._due = widgets.ExtendedEdit(parent=self, edit_text=due)
self._dtstart = widgets.ExtendedEdit(parent=self, edit_text=dtstart)
self._completed = urwid.CheckBox("", state=todo.is_completed)
self._priority = widgets.ExtendedEdit(parent=self, edit_text=priority)
save_btn = urwid.Button('Save', on_press=self._save)
cancel_text = urwid.Text('Hit Ctrl-C to cancel, F1 for help.')
buttons = urwid.Columns([(8, save_btn), cancel_text], dividechars=2)
pile_items = []
for label, field in [("Summary", self._summary),
("Description", self._description),
("Location", self._location),
("Due", self._due),
("Start", self._dtstart),
("Completed", self._completed),
("Priority", self._priority),
]:
label = urwid.Text(label + ":", align='right')
column = urwid.Columns([(13, label), field], dividechars=1)
pile_items.append(('pack', column))
grid = urwid.Pile(pile_items)
spacer = urwid.Divider()
self._ui_content = items = [grid, spacer, self._msg_text, buttons]
self._ui = urwid.ListBox(items)
self._help_text = urwid.Text(
'\n\nGlobal:\n'
' F1: Toggle help\n'
' Ctrl-C: Cancel\n'
' Ctrl-S: Save (only works if not a shell shortcut already)\n\n'
'In Textfields:\n'
+ '\n'.join(' {}: {}'.format(k, v) for k, v
in widgets.ExtendedEdit.HELP)
)
def change_current_list(radio_button, new_state, new_list):
if new_state:
self.current_list = new_list
list_selector = []
for _list in self.lists:
urwid.RadioButton(list_selector, _list.name,
state=_list.name == self.current_list.name,
on_state_change=change_current_list,
user_data=_list)
items.append(urwid.Pile(list_selector))
def _toggle_help(self):
if self._ui_content[-1] is self._help_text:
self._ui_content.pop()
else:
self._ui_content.append(self._help_text)
self._loop.draw_screen()
def message(self, text):
self._msg_text.set_text(text)
def edit(self):
"""
Shows the UI for editing a given todo. Returns True if modifications
were saved.
"""
self._loop = urwid.MainLoop(
self._ui,
palette=_palette,
unhandled_input=self._keypress,
handle_mouse=False,
)
try:
self._loop.run()
except Exception:
try: # Try to leave terminal in usable state
self._loop.stop()
except Exception:
pass
raise
self._loop = None
return self.saved
def _save(self, btn=None):
try:
self._save_inner()
except Exception as e:
self.message(('error', str(e)))
else:
self.saved = EditState.saved
raise urwid.ExitMainLoop()
def _save_inner(self):
self.todo.list = self.current_list
self.todo.summary = self.summary
self.todo.description = self.description
self.todo.location = self.location
self.todo.due = self.formatter.parse_datetime(self.due)
self.todo.start = self.formatter.parse_datetime(self.dtstart)
self.todo.is_completed = self._completed.get_state()
self.todo.priority = self.formatter.parse_priority(self.priority)
# TODO: categories
# TODO: comment
# TODO: priority (0: undef. 1: max, 9: min)
# https://tools.ietf.org/html/rfc5545#section-3.8
# geo (lat, lon)
# RESOURCE: the main room
def _cancel(self, btn):
raise urwid.ExitMainLoop()
def _keypress(self, key):
if key.lower() == 'f1':
self._toggle_help()
elif key == 'ctrl s':
self._save()
@property
def summary(self):
return self._summary.edit_text
@property
def description(self):
return self._description.edit_text
@property
def location(self):
return self._location.edit_text
@property
def due(self):
return self._due.edit_text
@property
def dtstart(self):
return self._dtstart.edit_text
@property
def priority(self):
return self._priority.edit_text
class TodoFormatter:
def __init__(self, date_format, time_format, dt_separator):
self.date_format = date_format
self.time_format = time_format
self.dt_separator = dt_separator
self.datetime_format = date_format + dt_separator + time_format
self._localtimezone = tzlocal()
self.now = datetime.datetime.now().replace(tzinfo=self._localtimezone)
self.tomorrow = self.now.date() + datetime.timedelta(days=1)
# Map special dates to the special string we need to return
self.special_dates = {
self.now.date(): "Today",
self.tomorrow: "Tomorrow",
}
self._parsedatetime_calendar = parsedatetime.Calendar()
def simple_action(self, action, todo):
return '{} "{}"'.format(action, todo.summary)
def compact(self, todo):
return self.compact_multiple([todo])
def compact_multiple(self, todos):
table = []
for todo in todos:
completed = "X" if todo.is_completed else " "
percent = todo.percent_complete or ''
if percent:
percent = " ({}%)".format(percent)
priority = self.format_priority_compact(todo.priority)
due = self.format_datetime(todo.due)
if todo.due and todo.due <= self.now and not todo.is_completed:
due = click.style(due, fg='red')
table.append([
todo.id,
"[{}]".format(completed),
priority,
due,
"{} {}{}".format(
todo.summary,
self.format_database(todo.list),
percent,
),
])
return tabulate(table, tablefmt='plain')
def detailed(self, todo):
"""
Returns a detailed representation of a task.
:param Todo todo: The todo component.
"""
rv = self.compact_multiple([todo])
if todo.description:
rv = "{}\n\nDescription: {}".format(rv, todo.description)
if todo.location:
rv = "{}\n\nLocation: {}".format(rv, todo.location)
return rv
def _format_date(self, date, humanize=True):
"""
Format the date using ``date_format``
If the date is today or tomorrow, return the strings "Today" or
"Tomorrow" respectively.
If the date if ``None``, returns an empty string.
:param datetime.date date: a date object
"""
if date:
if humanize and date in self.special_dates:
rv = self.special_dates[date]
else:
rv = date.strftime(self.date_format)
return rv
else:
return ''
def _format_time(self, time):
if time:
return time.strftime(self.time_format)
else:
return ''
def format_datetime(self, dt, humanize=True):
if not dt:
date_part = None
time_part = None
else:
assert isinstance(dt, datetime.datetime)
date_part = dt.date()
time_part = dt.time()
return self.dt_separator.join(filter(bool, (
self._format_date(date_part, humanize=humanize),
self._format_time(time_part)
)))
def parse_priority(self, priority):
if priority is None or priority is '':
return None
if priority == 'low':
return 9
elif priority == 'medium':
return 5
elif priority == 'high':
return 4
elif priority == 'none':
return 0
else:
raise ValueError('Priority has to be one of low, medium,'
' high or none')
def format_priority(self, priority):
if not priority:
return ''
elif 1 <= priority <= 4:
return 'high'
elif priority == 5:
return 'medium'
elif 6 <= priority <= 9:
return 'low'
def format_priority_compact(self, priority):
if not priority:
return ''
elif 1 <= priority <= 4:
return "!!!"
elif priority == 5:
return "!!"
elif 6 <= priority <= 9:
return "!"
def parse_datetime(self, dt):
if not dt:
return None
rv = self._parse_datetime_naive(dt)
return rv.replace(tzinfo=self._localtimezone)
def _parse_datetime_naive(self, dt):
try:
return datetime.datetime.strptime(dt, self.datetime_format)
except ValueError:
pass
try:
return datetime.datetime.strptime(dt, self.date_format)
except ValueError:
pass
try:
return datetime.datetime.combine(
self.now.date(),
datetime.datetime.strptime(dt, self.time_format).time()
)
except ValueError:
pass
rv, certainty = self._parsedatetime_calendar.parse(dt)
if not certainty:
raise ValueError(
'Time description not recognized: {}' .format(dt)
)
return datetime.datetime.fromtimestamp(mktime(rv))
def format_database(self, database):
return '{}@{}'.format(database.color_ansi or '',
click.style(database.name))
class PorcelainFormatter(TodoFormatter):
def __init__(self):
pass
def _todo_as_dict(self, todo):
return dict(
completed=todo.is_completed,
due=self.format_datetime(todo.due),
id=todo.id,
list=todo.list.name,
percent=todo.percent_complete,
summary=todo.summary,
priority=todo.priority,
)
def compact(self, todo):
return json.dumps(self._todo_as_dict(todo), indent=4, sort_keys=True)
def compact_multiple(self, todos):
data = [self._todo_as_dict(todo) for todo in todos]
return json.dumps(data, indent=4, sort_keys=True)
def simple_action(self, action, todo):
return self.compact(todo)
def parse_priority(self, priority):
if priority is None:
return None
try:
if int(priority) in range(0, 10):
return int(priority)
else:
raise ValueError('Priority has to be in the range 0-9')
except ValueError as e:
raise click.BadParameter(e)
def detailed(self, todo):
return self.compact(todo)
def format_datetime(self, date, humanize=False):
if date:
return int(date.timestamp())
else:
return None
def parse_datetime(self, value):
if value:
return datetime.datetime.fromtimestamp(value)
else:
return None
| true |
586cd0d4d9002285850844e6ac329366c0dca7b4 | Python | YooSuhwa/CodingLife | /Baekjoon_Jungol/2143 두 배열의 합.py | UTF-8 | 1,272 | 3.84375 | 4 | [] | no_license | '''
@ 2019.12.28 ush
* 백준 알고리즘 - 2143 두 배열의 합 (https://www.acmicpc.net/problem/2143)
* python
* MITM
* 두 배열 A,B가 주어졌을 때, A의 부 배열의 합과 B의 부배열의 합을 더한 것이 find가 되는 경우의 수 구하기
* 부 배열은 A[i], A[i+1], …, A[j-1], A[j] (단, 1 ≤ i ≤ j ≤ n)을 말한다.
* 이러한 부 배열의 합은 A[i]+…+A[j]를 의미한다. 즉 배열의 연속된 합
* 1. a,b 배열에 대한 모든 부배열의 합을 계산하여 team1, team2에 저장
* 2. a의 각 부배열 합에 대해서 'find-a ==b'인 b의 부분합의 개수를 센다.
'''
from collections import Counter
find = int(input())
n = int(input())
a = list(map(int, input().split()))
m = int(input())
b = list(map(int, input().split()))
team1 = []
team2 = []
#부 배열은 연속된거!!!!! 따라서 bit마스크 안썼음
for i in range(n):
temp = 0
for j in range(i, n):
temp += a[j]
team1.append(temp)
for i in range(m):
temp = 0
for j in range(i, m):
temp += b[j]
team2.append(temp)
team1.sort()
team2.sort()
counter = Counter(team2)
answer = 0
for num in team1:
temp = find - num
answer += counter[temp]
print(answer) | true |
8aa93a8f29df0ee6b8f3df676cd6553bc6ac1f0f | Python | NorberMV/DjangoBlogLive | /users/views.py | UTF-8 | 4,297 | 2.609375 | 3 | [
"MIT"
] | permissive |
"""Users views."""
# Django
from django.contrib.auth import authenticate, login, logout
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import DetailView, FormView, UpdateView
from django.urls import reverse, reverse_lazy
# Models
from django.contrib.auth.models import User
from posts.models import Post
# Forms
from users.forms import ProfileForm, SignupForm
# Utilities
import pdb
class UserDetailView(LoginRequiredMixin,DetailView):
"""User detail View."""
template_name = "users/detail.html"
slug_field = 'username'#'username'
slug_url_kwarg = 'username'
queryset = User.objects.all() # Trae un queryset de todos los usernames de la base de datos
context_object_name = 'user' # Este es el nombre del query en el contexto y en el template
#pdb.set_trace()
def get_context_data(self, **kwargs): # De donde toma **kwargs?
"""Add user´s posts to context."""
context = super().get_context_data(**kwargs) # Super(). is used to give access to methods and properties of a parent or sibling class
#pdb.set_trace()
user = self.get_object() # Trae el objeto filtrado del queryset por medio del self.kwargs pasado como argumentod del template The object
context['publicaciones'] = Post.objects.filter(user=user).order_by('-created') #Agrega 'publicaciones al context y lo filtra por medio de user para traer los posts del usuario en particular
#pdb.set_trace()
return context
@login_required
def update_profile(request): # Request es una clase
""" Update a user´s profile view. """
profile = request.user.profile # creamos el objeto profile a partír de instanciar la clase profile
#pdb.set_trace()
if request.method =='POST':
form = ProfileForm(request.POST, request.FILES)
#pdb.set_trace()
if form.is_valid():
data = form.cleaned_data
#print(data)
profile.website = data['website']
profile.phone_number = data['phone_number']
profile.biography = data['biography']
profile.picture = data['picture']
profile.save()
url = reverse('users:detail',kwargs={'username':request.user.username})
return redirect(url)
else:
form = ProfileForm()
return render(
request=request,
template_name='users/update_profile.html',
context={
'profile': profile,
'user': request.user,
'form': form
}
)
def login_view(request):
"""Login view."""
if request.method == 'POST':
#pdb.set_trace()
username = request.POST['username'] # Las variables username y password que recibe nuestro servidor fueron nombradas en login.html
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user:
login(request, user)
return redirect('posts:feed')
else:
return render(request, 'users/login.html', {'error': 'Invalid username and password'})
return render(request, 'users/login.html')
class SignupView(FormView):
"""Users sign up view."""
template_name = 'users/signup.html'
form_class = SignupForm
success_url = reverse_lazy('users:login')
def form_valid(self, form):
"""Save form data."""
form.save()
return super().form_valid(form)
"""
def signup(request):
# Sign up view.
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
form.save()
return redirect('users:login')
else:
form = SignupForm()
return render(
request=request,
template_name='users/signup.html',
context={'form': form}
)
#return render(request, 'users/signup.html')
"""
@login_required
def logout_view(request):
"""Logout a user."""
logout(request)
return redirect('users:login')
| true |
1834e00b23ea2810b4bd0515f6b30e7b57248653 | Python | chuhranm/IntroPython | /Week 13 ( More on Classes)/Checkyourself.py | UTF-8 | 880 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 30 11:08:18 2020
@author: devinpowers
"""
class TestClass(object):
def __init__(self,param_str=''):
self.the_str=''
for c in param_str:
if c.isalpha():
self.the_str += c
def __add__(self,param):
if type(param)==TestClass:
the_str = self.the_str + param.the_str
return TestClass(the_str)
else:
return self
def __str__(self):
return 'Value: {}'.format(self.the_str)
inst1 = TestClass('abc')
inst2 = TestClass('123ijk')
sumInst1 = inst1 + inst2
sumInst2 = inst1 + 'xyz'
print(inst1)
print(sumInst1)
print(sumInst2)
print(isinstance(sumInst2,TestClass)) | true |
b0686e07ec7a0f2595d5b2fcc7d505aa7b5f76b2 | Python | LaurenH123/PG_LH | /Function LH.py | UTF-8 | 819 | 4.0625 | 4 | [] | no_license | def hello (name):
print ("Hello " + name + "!")
print ("How's it going?")
day = input ("Is your day looking good?")
day = day.title()
if day == "Yes":
print("That's great!")
elif day == "No":
print ("I'm sorry to hear that!")
else:
print("I didn't quite catch that.")
while True:
food = input("What's for lunch today?")
food == food.lower()
if food == "chicken nuggets":
print("One of us had a dream about that!")
elif food == "tacos":
print("Taco Tuesday!")
elif food == "barbecue chicken":
print("Corect!")
break
else:
print("I guess it could be that...")
print ("OK bye!")
hello('Lauren')
hello('Milli')
hello('Annie')
| true |
9aa216d9ae01719885ccff52619fcb678932828c | Python | Kherrisan/smallwei_redis | /smallwei-Redis/MessageStoreDBModule/MessageStoreDBModule.py | UTF-8 | 1,104 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# 聊天记录的记录模块。
from BaseProcessModule import *
from Message import *
from DatabaseSession import Session
from Sender import *
from Logger import log
class MessageStoreDBModule(BaseProcessModule):
"""该模块将所有小微可见的消息都写进数据库中。
"""
name = "MessageStoreDBModule"
@staticmethod
def process(message):
session = Session()
try:
session.add(Message.produceDBMessage(message)) # 根据Message对象生成MessageModel对象,并写入数据库中。
log(moduleName=MessageStoreDBModule.name,content=message.getContent()+" "+str(message.getGroupQQ())+" "+str(message.getPersonQQ()))
session.commit() # 数据库实际上发生变化是在这一行之后。相当于提交了插入操作。
return
except Exception as e:
if isinstance(e,Block):
raise Block()
log(moduleName=MessageStoreDBModule.name,level="error",content=e.message)
return
finally:
session.close()
| true |
8c68efe342bc748398fe5dbebd972cc852723b29 | Python | dddd1007/Parametric_MVPA_tools | /src/parametric_GLM/single_sub_GLM_parametric_analysis.py | UTF-8 | 3,299 | 2.609375 | 3 | [
"MIT"
] | permissive | # Single subject parameters analysis by GLM in fMRI
# Author: Xiaokai Xia (xia@xiaokai.me)
# Date: 2020-12-9
# This script try to analysis build a design matric for parametric analysis and excute a GLM
# to discover the relationhip between subject parameters and BOLD signal.
import numpy as np
import pandas as pd
from nipype.interfaces.spm import Level1Design, EstimateModel, EstimateContrast
from nipype.algorithms.modelgen import SpecifySPMModel
from nipype.interfaces.utility import Function, IdentityInterface
from nipype.interfaces.io import SelectFiles, DataSink
from nipype import Workflow, Node
from nipype.interfaces.base import Bunch
# Retrive the nii files which will be used
def nii_selector(root_dir, sub, session_num, data_type = "Smooth_8mm"):
"""Retrive the path of nii files
Retrive the nii files path from Lingwang's pre-process directory structure.
Args:
root_dir: Root directory location, which contains all subjects data.
You will see like "sub1" and so on in this directory.
sub: The subject you want to analysis.
session_num: Total number of sessions. e.g 'session_num = 6' mean you have six sessions
data_type: You can specify "Smooth_8mm" or "Normalized" to select specific type of data.
Returns:
nii_list: All nii files for this subject.
"""
import os
import glob
session_list = ["session" + str(i) for i in range(1, session_num+1)]
# print(file_path)
nii_list = []
for s in session_list:
file_path = os.path.join(root_dir, sub, data_type, s)
nii_list.append(glob.glob(file_path + "/*.nii"))
return nii_list
# Build the relationship between onsets and parameters
def condition_generator(single_sub_data, params_name, duration = 2):
"""Build a bunch to show the relationship between each onset and parameter
Build a bunch for make a design matrix for next analysis. This bunch is for describing the relationship
between each onset and parameter.
Args:
single_sub_data: A pandas DataFrame which contains data for one subject.
It must contains the information about run, onsets, and parameters.
params_name: A list of names of parameters which you want to analysis.
The order of the names will be inherited to the design matrix next.
duration: The duration of a TR.
Returns:
subject_info: A list of bunch type which can be resolve by SpecifySPMModel interface in nipype.
"""
from nipype.interfaces.base import Bunch
run_num = set(single_sub_data.run)
subject_info = []
for i in run_num:
tmp_table = single_sub_data[single_sub_data.run == i]
tmp_onset = tmp_table.onset.values.tolist()
pmod_names = []
pmod_params = []
pmod_poly = []
for param in params_name:
pmod_params.append(tmp_table[param].values.tolist())
pmod_names.append(param)
pmod_poly.append(1)
tmp_Bunch = Bunch(conditions=["trial_onset_run"+str(i)], onsets=[tmp_onset], durations=[[duration]],
pmod=[Bunch(name = pmod_names, poly = pmod_poly, param = pmod_params)])
subject_info.append(tmp_Bunch)
return subject_info | true |
a4148f4c1828794959cbdf38e996228a518743df | Python | aronwolf/NLP_demo | /utils/tokenize_statements.py | UTF-8 | 926 | 3.25 | 3 | [] | no_license | import nltk
def tokenize(text):
token_array = []
if (isinstance(text, unicode)) or (isinstance(text, str)):
token_list = token_factory(text)
token_string = str(token_list)
token_array.append(token_string)
return token_array
else:
for line in text:
token_list = token_factory(line)
token_string = str(token_list)
token_array.append(token_string)
return token_array
def token_factory(text_line):
index_word_map = []
lemmatizer = nltk.stem.WordNetLemmatizer()
tokens = nltk.pos_tag(nltk.tokenize.word_tokenize(text_line))
for t in range(len(tokens)):
if "V" in tokens[int(t)][1]:
token = lemmatizer.lemmatize(tokens[int(t)][0], 'v')
else:
token = lemmatizer.lemmatize(tokens[int(t)][0])
index_word_map.append(token)
return index_word_map
| true |
c703cfc88d882724156ebdd2fab7d3176e925b57 | Python | ustcck/weibo_spider | /poi/poi.py | UTF-8 | 1,855 | 3.15625 | 3 | [] | no_license | # encoding:utf-8
import json
import urllib.request
import urllib.parse
import re
import csv
def read_csv(path):
with open(path, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
reader = list(reader)
columns = [t.lower() for t in reader[0]]
result = [dict(zip(columns, item)) for item in reader[1:] if item != []]
return result
def save_csv(path, datalist):
with open(path, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(list(datalist[0].keys()))
for item in datalist:
writer.writerow(list(item.values()))
def get_baidu_gps(poi):
ak = 'og6twr6AIxoj1XMARgaepweY'
host_url = 'http://api.map.baidu.com/geocoder/v2/?address=ADDRESS_NAME&output=json&ak=%s&callback=showLocation' % ak
print(poi)
address_name = poi['address']
address = urllib.parse.quote(address_name)
tmp_url = host_url.replace('ADDRESS_NAME', address)
req = urllib.request.Request(tmp_url)
try:
data = urllib.request.urlopen(req)
except:
data = urllib.request.urlopen(req)
rs = data.read()
print(rs)
text = rs.decode('utf-8')
re_pattern = re.compile('showLocation&&showLocation\((.*)\)')
match = re_pattern.search(text)
poi['gis_x'] = ''
poi['gis_y'] = ''
if match:
try:
tmp = json.loads(match.group(1))
poi['gis_x'] = tmp['result']['location']['lng']
poi['gis_y'] = tmp['result']['location']['lat']
return poi
except:
return poi
else:
return poi
def poi_gis(filename):
data = read_csv(filename)
temp = []
for addr in data:
item = get_baidu_gps(addr)
temp.append(item)
save_csv(r'result_1.csv', temp)
print('Done')
if __name__ == "__main__":
poi_gis('yixian_2.csv')
| true |
d34fd7f1625aae61a8787cab2995f15a55057f3a | Python | jkkummerfeld/jkkummerfeld.github.io | /reading-notes/old-blog/conv.py | UTF-8 | 847 | 2.90625 | 3 | [
"CC-BY-4.0"
] | permissive | import sys
def do_file(name):
to_print = []
to_print.append("---")
in_head = 0
for line in open(name + "/index.md"):
line = line.strip()
if in_head > 1:
to_print.append(line)
if line == "---":
if in_head == 1:
to_print.append('aliases: [ "/post/{}/", ]'.format(name.lower()))
to_print.append("---")
in_head += 1
elif line.startswith("title"):
to_print.append("title: {}".format(line[7:].strip()))
elif line.startswith("summary"):
to_print.append("description: {}".format(line[9:].strip()))
out = open(name + "/index.md", 'w')
print("\n".join(to_print), file=out)
out.close()
for line in sys.stdin:
for part in line.strip().split():
part = part[:-9]
do_file(part)
| true |
4564cf0d8ec5f1cd4133ed151aa434caec07bf85 | Python | ewhauser/commons | /src/python/twitter/pants/base/abbreviate_target_ids.py | UTF-8 | 2,596 | 3.296875 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | __author__ = 'Ryan Williams'
def abbreviate_target_ids(arr):
"""This method takes a list of strings (e.g. target IDs) and maps them to shortened versions of themselves. The
original strings should consist of '.'-delimited segments, and the abbreviated versions are subsequences of these
segments such that each string's subsequence is unique from others in @arr.
For example:
input: [
'com.twitter.pants.a.b',
'com.twitter.pants.a.c',
'com.twitter.pants.d'
]
might return: {
'com.twitter.pants.a.b': 'b',
'com.twitter.pants.a.c': 'c',
'com.twitter.pants.d': 'd'
}
This can be useful for debugging purposes, removing a lot of boilerplate from printed lists of target IDs.
"""
split_keys = [tuple(a.split('.')) for a in arr]
split_keys_by_subseq = {}
def subseq_map(arr, subseq_fn = None, result_cmp_fn = None):
def subseq_map_rec(remaining_arr, subseq, indent = ''):
if not remaining_arr:
if subseq_fn:
subseq_fn(arr, subseq)
return subseq
next_segment = remaining_arr.pop()
next_subseq = tuple([next_segment] + list(subseq))
skip_value = subseq_map_rec(remaining_arr, subseq, indent + '\t')
add_value = subseq_map_rec(remaining_arr, next_subseq, indent + '\t')
remaining_arr.append(next_segment)
if result_cmp_fn:
if not subseq:
# Empty subsequence should always lose.
return add_value
if result_cmp_fn(skip_value, add_value):
return skip_value
return add_value
return None
val = subseq_map_rec(list(arr), tuple())
return val
def add_subseq(arr, subseq):
if subseq not in split_keys_by_subseq:
split_keys_by_subseq[subseq] = set()
if split_key not in split_keys_by_subseq[subseq]:
split_keys_by_subseq[subseq].add(arr)
for split_key in split_keys:
subseq_map(split_key, add_subseq)
def return_min_subseqs(subseq1, subseq2):
collisions1 = split_keys_by_subseq[subseq1]
collisions2 = split_keys_by_subseq[subseq2]
if (len(collisions1) < len(collisions2) or
(len(collisions1) == len(collisions2) and
len(subseq1) <= len(subseq2))):
return True
return False
min_subseq_by_key = {}
for split_key in split_keys:
min_subseq = subseq_map(split_key, result_cmp_fn=return_min_subseqs)
if not min_subseq:
raise Exception("No min subseq found for %s: %s" % (str(split_key), str(min_subseq)))
min_subseq_by_key['.'.join([str(segment) for segment in split_key])] = '.'.join(min_subseq)
return min_subseq_by_key
| true |
e4dbf1c6c11e4ce42902dd11e944d6dcefc5672f | Python | garethsion/SpecialResonatorCalcs | /preprocess.py | UTF-8 | 2,088 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import csv
from vacuum_flucs import CPW
from process_data import SetParams
from ssh_command import SSHCommand
import Plotting
import os
# Define geometry of the superconductor
setp = SetParams.SetParams()
params = setp.set_params()
w = params["w"]
t = params["t"]
l = params["l"]
pen = params["pen"]
# Define resonator params
omega = params["omega"]
Z = params["Z"]
# Define the 'mesh'
x = np.linspace(-w, w, int(1e04))
# Instantiate Special CPW object
cpw = CPW.CPW(x,l,w,t,pen,Z,omega)
Js = cpw.J() #s Current density - not normalised
Jnorm = cpw.normalize_J() # Normalise
I = cpw.current(norm='no') # Current
E = cpw.E() # Electric field
sigma = cpw.conductivity() # Conductivity
# Generate a parameter list for COMSOL modelling
paramlist = setp.param_list(x,I,Jnorm) # Generate COMSOL parameter list
# Save data to csv file
currentDensityFile = str(os.getcwd() + "/data_preprocess/current_density.csv")
np.savetxt(currentDensityFile, np.column_stack((x,Jnorm)), delimiter=",")
currentFile = str(os.getcwd() + "/data_preprocess/current.csv")
np.savetxt(currentFile, np.column_stack((x,I)), delimiter=",")
eFile = str(os.getcwd() + "/data_preprocess/electric_field.csv")
np.savetxt(eFile, np.column_stack((x,E)), delimiter=",")
condFile = str(os.getcwd() + "/data_preprocess/conductivity.csv")
np.savetxt(condFile, np.column_stack((x,sigma)), delimiter=",")
# Plot data - can decide to show or not, and save or not
plts = Plotting.Plotting()
plt0 = plts.plot(x*1e6,I,colr = 'b',xlab='x ($\mu$m)',
ylab='$Current (A/m)$',filename='current.eps',
show='no',save='yes')
plt1 = plts.plot(x*1e6,Jnorm*1e6,colr='r',xlab='x ($\mu$m)',
ylab='Current density\n (MAm$^{-2}$)',
filename='current_density.eps',show='no',save='yes')
plt2 = plts.plot(x*1e6,sigma,colr='g',xlab='x ($\mu$m)',
ylab='Conductivity\n ($S/m}$)',
filename='conductivity.eps',show='no',save='yes')
plt3 = plts.plot(x*1e6,E,colr='b',xlab='x ($\mu$m)',
ylab='Electric Field\n ($V/m$)',
filename='e_files.eps',show='no',save='yes')
| true |
38f2996ccb549ac17ddb81bd24af348f03d58224 | Python | andromedakepecs/npuzzle | /puzzle.py | UTF-8 | 5,040 | 3.40625 | 3 | [] | no_license | import copy
# Andromeda Kepecs
# Redmond Block C
# Read txt file and return a tuple state
def LoadFromFile(filepath):
state = []
n = 0
with open(filepath) as f:
count = 0
contains_hole = False
for line in f:
l = []
if count == 0:
n = f
else:
row = line.strip().split("\t")
for i in row:
if i == '*':
l.append('0')
contains_hole = True
else:
l.append(i)
state.append(tuple(l))
count += 1
if contains_hole == False:
print("Puzzle does not contain hole")
return None
return tuple(state)
def DebugPrint(state):
for i in state:
l = ""
for j in i:
l += j + "\t"
print(l)
# Compute and return neighboring numbers
def ComputeNeighbors(state):
neighbors = []
zero_coordinates = FindZero(state)
row, col = zero_coordinates
# Left
if col - 1 >= 0:
neighbors.append([state[row][col - 1], Swap(state, zero_coordinates, (row, col - 1))])
# Right
if col + 1 <= len(state) -1:
neighbors.append([state[row][col + 1], Swap(state, zero_coordinates, (row, col + 1))])
# Above
if row - 1 >= 0:
neighbors.append([state[row - 1][col], Swap(state, zero_coordinates, (row - 1, col))])
# Below
if row + 1 <= len(state) -1:
neighbors.append([state[row + 1][col], Swap(state, zero_coordinates, (row + 1, col))])
return neighbors
def FindZero(state):
for i in range(len(state)):
for j in range(len(state)):
if state[i][j] == "0":
return tuple([i, j])
def Swap(state, zero_coordinates, swap):
new_state = list(list(row) for row in copy.deepcopy(state))
zero_row, zero_col = zero_coordinates
swap_row, swap_col = swap
new_state[zero_row][zero_col], new_state[swap_row][swap_col] = new_state[swap_row][swap_col], new_state[zero_row][zero_col]
return tuple(tuple(row) for row in new_state)
def IsGoal(state):
index = 0
n = len(state)
for i in range(n):
for j in range(n):
if not int(state[i][j] == index + 1):
return False
if i == n - 1 and j == n -2:
return True
index += 1
def FindGoal(n):
total = n * n
count = 1
end_state = []
for i in range(n):
row = []
for j in range(n):
if count == total:
row.append('0')
else:
row.append(str(count))
count += 1
end_state.append(tuple(row))
return tuple(end_state)
# Breadth First Search
def BFS(state):
frontier = [(0, state)]
discovered = set(state)
parents = {(0, state): None}
path = []
while len(frontier) != 0:
current_state = frontier.pop(0)
discovered.add(current_state[1])
if IsGoal(current_state[1]):
while parents.get((current_state[0], current_state[1])) != None:
path.insert(0, current_state[0])
current_state = parents.get((current_state[0], current_state[1]))
return path
for neighbor in ComputeNeighbors(current_state[1]):
if neighbor[1] not in discovered:
frontier.append(neighbor)
discovered.add(neighbor[1])
parents.update({(neighbor[0], neighbor[1]): current_state})
print("Failed")
return None
# Depth First Search
def DFS(state):
frontier = [(0, state)]
discovered = set([state])
parents = {(0, state): None}
path = []
while len(frontier) != 0:
current_state = frontier.pop(0)
discovered.add(current_state[1])
if IsGoal(current_state[1]):
while parents.get((current_state[0], current_state[1])) != None:
path.insert(0, current_state[0])
current_state = parents.get((current_state[0], current_state[1]))
return path
for neighbor in ComputeNeighbors(current_state[1]):
if neighbor[1] not in discovered:
frontier.insert(0, neighbor)
discovered.add(neighbor[1])
parents.update({(neighbor[0], neighbor[1]): current_state})
print("Failed")
return None
# Bidirectional Search
def BidirectionalSearch(state):
goal = FindGoal(len(state))
frontier1 = [(0, state)]
frontier2 = [(0, goal)]
discovered1 = set([state])
discovered2 = set([goal])
parents1 = {state: []}
parents2 = {goal: []}
while len(frontier1) != 0 or len(frontier2) != 0:
current_state = frontier1.pop(0)
current_end_state = frontier2.pop(0)
discovered1.add(tuple(current_state[1]))
discovered2.add(tuple(current_end_state[1]))
intersection = list(discovered2.intersection(discovered1))
if len(intersection) > 0:
intersection_point = intersection[0]
forward_path = parents1[intersection_point]
backwards_path = list(reversed(parents2[intersection_point]))
return forward_path + backwards_path
for neighbor in ComputeNeighbors(current_state[1]):
if neighbor[1] not in discovered1:
frontier1.append(neighbor)
discovered1.add(neighbor[1])
parents1.update({neighbor[1]: parents1[current_state[1]] + [neighbor[0]]})
for neighbor in ComputeNeighbors(current_end_state[1]):
if neighbor[1] not in discovered2:
frontier2.append(neighbor)
discovered2.add(neighbor[1])
parents2.update({neighbor[1]: parents2[current_end_state[1]] + [neighbor[0]]})
print("Failed")
return None
def AStar(state):
pass
def main():
puzzle = LoadFromFile('easy4.txt')
print(puzzle)
print(BidirectionalSearch(puzzle))
if __name__ == "__main__":
main() | true |
ee1d147515637e1282de5609d36d2c33b5732270 | Python | erantanen/pytoys | /gui_based/old_junk/bw_mov_pixel_with_keypress.py | UTF-8 | 1,555 | 3.25 | 3 | [] | no_license | #
# Had to use a circle for this, pixel to small
# 0:0 starts at top left
from raylibpy import *
def main():
screen_width: int = 800
screen_height: int = 450
current_x = 300
current_y = 0
decr_y = 10
pause = False
pos_x = 0
c_list = [0, 1, 3, 4, 6, 7, 8, 9, 9, 10, 9, 9, 8, 7, 6, 4, 3, 1, 0]
init_window(screen_width, screen_height, "the moving fox?")
shifter_y = 0
shifter_curve = 0
set_target_fps(40)
while not window_should_close():
if is_key_pressed(KEY_P):
if pause:
pause = False
else:
pause = True
#
# window contents start
begin_drawing()
clear_background(BLACK)
draw_text("press p to pause", 0, 0, 20, LIGHTGRAY)
current_y = screen_height - shifter_y
if pause is False:
# shifts position of x by c_list increment
if shifter_y < screen_height:
pos_x = current_x + c_list[shifter_curve]
print(str(pos_x) + " : " + str(c_list[shifter_curve]) + " : " + str(current_y))
draw_circle(pos_x, current_y, 2, GREEN)
if shifter_curve < (len(c_list) - 1):
shifter_curve += 1
else:
shifter_curve = 0
shifter_y += 1
else:
draw_text(" Reached the edge of space", 0, screen_height-20, 20, LIGHTGRAY)
end_drawing()
close_window()
if __name__ == '__main__':
main()
| true |
46935859243311f72fa1ac172a3de82362107e01 | Python | jim7762000/ETF_Data | /model_20180512.py | UTF-8 | 3,465 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import tensorflow as tf
import numpy as np
import pandas as pd
import keras
from sklearn import preprocessing
import csv
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
# Algorithm parameters
TIME_STEPS = 3
INPUT_SIZE = 28
BATCH_SIZE = 50
#BATCH_INDEX = 1
OUTPUT_SIZE = 10
CELL_SIZE = 50 # how many hidden units in hidden layer
LR = 0.001
Train_rate = 0.9
data_list = []
foxconndf= pd.read_csv('0050.csv', index_col=0 )
# Drop the row which contains NA value, how='any' if there's at least one blank, how='all' if all data values are blank
foxconndf.dropna(how='any',inplace=True)
foxconndf = foxconndf.drop(['Date'], axis = 1)
# Initiate
def normalize(df):
newdf= df.copy()
min_max_scaler = preprocessing.MinMaxScaler()
for feature in df.columns:
newdf[feature] = min_max_scaler.fit_transform(df[feature].values.reshape(-1,1))
return newdf
foxconndf_norm = normalize(foxconndf)
#print(foxconndf_norm)
# Split the data into training and testing data set
def data_helper(df, time_frame):
number_features = len(df.columns)
# list the data as matrix
datavalue = df.as_matrix()
result = []
for index in range( len(datavalue) - (time_frame + 1) ):
result.append(datavalue[index: index + (time_frame+1) ])
result = np.array(result)
# number_train = training data numbers
number_train = round(Train_rate * result.shape[0])
x_train = result[:int(number_train), :-1]
y_train = result[:int(number_train), -1][:,-1]
x_test = result[int(number_train):, :-1]
y_test = result[int(number_train):, -1][:,-1]
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], number_features))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], number_features))
return [x_train, y_train, x_test, y_test]
X_train, y_train, X_test, y_test = data_helper(foxconndf_norm, TIME_STEPS)
def build_model(input_length, input_dim):
d = 0.5
model = Sequential()
model.add(LSTM(256, input_shape=(input_length, input_dim), return_sequences=True))
model.add(Dropout(d))
model.add(LSTM(256, input_shape=(input_length, input_dim), return_sequences=False))
model.add(Dropout(d))
model.add(Dense(16,kernel_initializer="uniform",activation='relu'))
model.add(Dense(1,kernel_initializer="uniform",activation='linear'))
model.compile(loss='mse',optimizer='adam', metrics=['accuracy'])
return model
model = build_model(TIME_STEPS, 67 )
model.fit( X_train, y_train, batch_size=BATCH_SIZE, epochs=500, validation_split=0.1, verbose=1)
def denormalize(df, norm_value):
original_value = df['ClosePrice'].values.reshape(-1,1)
norm_value = norm_value.reshape(-1,1)
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler.fit_transform(original_value)
denorm_value = min_max_scaler.inverse_transform(norm_value)
return denorm_value
pred = model.predict(X_test)
denorm_pred = denormalize(foxconndf, pred)
denorm_ytest = denormalize(foxconndf, y_test)
import matplotlib.pyplot as plt
#%matplotlib inline
plt.plot(denorm_pred,color='red', label='Prediction')
plt.plot(denorm_ytest,color='blue', label='Answer')
plt.legend(loc='best')
plt.show() | true |
9eae7a88f8a8652d5540594ba4a7f11941413977 | Python | Aasthaengg/IBMdataset | /Python_codes/p02995/s912616195.py | UTF-8 | 195 | 2.90625 | 3 | [] | no_license | from math import gcd
def count_indivs(N):
return N - (N // C + N // D - N // L)
A, B, C, D = map(int, input().split())
L = C * D // gcd(C, D)
print(count_indivs(B) - count_indivs(A - 1))
| true |