blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
253cfceb918477ab57dff1229e91eeb5fc5ae053 | Python | arshk123/twitter-scraper | /src/db.py | UTF-8 | 472 | 2.75 | 3 | [] | no_license | import psycopg2
import json
""" function to connect to db using credentials specified in config """
def getDBConnection():
with open('../config/config.json') as json_data:
data = json.load(json_data)
data = data['database_credentials']
try:
conn = psycopg2.connect(dbname=data['dbname'], user=data['user'], host=data['host'], password=data['password'])
except:
print("DB connection failed")
exit(1)
return conn
| true |
3af06a38231aaada5f9947a5ce658282de9c5817 | Python | rogeriosilva-ifpi/teaching-tds-course | /programacao_estruturada/20192_186/cap5_condicionais/dojo-condicionais-master/dojo_10_condicional.py | UTF-8 | 580 | 4 | 4 | [] | no_license | # Exercícios by Nick Parlante (CodingBat)
# Troca Letras
# seja uma string s
# se s tiver tamanho <= 1 retorna ela mesma
# caso contrário troca a primeira e última letra
# troca('code') -> 'eodc'
# troca('a') -> 'a'
# troca('ab') -> 'ba'
from Testes import teste, msg_sucesso, msg_inicio
def troca(s):
pass
msg_inicio('Troca Letras')
teste(troca('code'), 'eodc')
teste(troca('a'), 'a')
teste(troca('ab'), 'ba')
teste(troca('abc'), 'cba')
teste(troca(''), '')
teste(troca('Chocolate'), 'ehocolatC')
teste(troca('nythoP'), 'Python')
teste(troca('hello'), 'oellh')
msg_sucesso()
| true |
c8625319dfc76f1ab40104cc25c6c9128eb3141b | Python | ashwin-ramachandran/Sudoku | /sudoku.py | UTF-8 | 3,644 | 3.984375 | 4 | [] | no_license | # A sample board (all boards must be 2D lists)
board = [
[7,8,0,4,0,0,1,2,0],
[6,0,0,0,7,5,0,0,9],
[0,0,0,6,0,1,0,7,8],
[0,0,7,0,4,0,2,6,0],
[0,0,1,0,5,0,9,3,0],
[9,0,4,0,6,0,0,0,5],
[0,7,0,3,0,0,0,1,2],
[1,2,0,0,0,7,4,0,0],
[0,4,9,2,0,6,0,0,7]
]
# Function to print the board, so user may see a before and after being solved
def print_board(board):
# Access the outer elements indexes
for i in range(len(board)):
# Print a horizontal divider line after every third row
if i % 3 == 0 and i != 0:
print("- - - - - - - - - - - -")
# Access the inner elements indexes
for j in range(len(board[i])):
# Print a vertical divider line after every third column
if j % 3 == 0 and j != 0:
print(" | ", end = "")
# Specific conditional formatting to make the board print out symmetric and in a visually appealing manner
if j == 8:
print(board[i][j])
else:
if j == 0:
print(" " + str(board[i][j]) + " ", end = "")
elif j == 2 or j == 5:
print(str(board[i][j]), end = "")
else:
print(str(board[i][j]) + " ", end = "")
# Function for locating an empty spot on the board (indicated by a value of 0), returning that location in a (row, col) fashion
def find_empty_spot(board):
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == 0:
return (i, j) # (row, col)
# Returns None if there are no more empty spots (in other words, the board is solved)
return None
# Function for checking if the attempted entry is valid (if it will work according to the rules of Sudoku)
def valid(board, num, pos):
# Check the row to see if the number entered already exists in that row
for i in range(len(board[0])):
if board[pos[0]][i] == num and pos[1] != i:
return False
# Check the column to see if the number entered already exists in that column
for i in range(len(board)):
if board[i][pos[1]] == num and pos[0] != i:
return False
# Check the spot's local 3x3 box to see if the number entered already exists in that box
# x and y coordinates of the spot's position relative to its local 3x3 box
box_x = pos[1] // 3
box_y = pos[0] // 3
for i in range(box_y*3, box_y*3 + 3):
for j in range(box_x*3, box_x*3 + 3):
if board[i][j] == num and (i, j) != pos:
return False
# True will only be returned if all the checks have been passed, that is if the attempted entry does not exist in the spot's row, column, or local box
return True
# Recursive function that implements the other functions as well to actually do the solving of the board
def solve(board):
# Find an empty spot on the board and return its position to the find variable
find = find_empty_spot(board)
# Base case
# Will only execute and return True if find holds the value of None, which will only happen if no empty spots exist (meaning the board has been solved already)
if not find:
return True
# Recursive case
else:
row, col = find
# Loop will cause i to hold the values of 1-9, the only legal entries in a Sudoku board
for i in range(1, 10):
# Check if entering the value that i holds (some int 1-9) into the board in the specified (row, col) position is valid
if valid(board, i, (row, col)):
# If it is valid, then make the entry
board[row][col] = i
# Enter another function call
if solve(board):
return True
# If solve(board) returns False, the board spot must be reset to 0 (this is where the backtracking comes into play)
board[row][col] = 0
return False
print_board(board)
solve(board)
print_board(board)
| true |
665cad7f8b0d4d15209339fba3295631952caadd | Python | Muhodari/python-complete-crash-Course | /P.2D Lists/2dList.py | UTF-8 | 142 | 3.484375 | 3 | [] | no_license | myList = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
print(myList[0][1])
for lists in myList:
for row in lists:
print(row)
| true |
8299556cbe3e5dc43696580f77b9f0df79da58d6 | Python | ItzFrostbite/BMI_Calculator | /BMI_Calculator.py | UTF-8 | 540 | 3.65625 | 4 | [] | no_license | from time import sleep
height = float(input("Enter Height In Inchs: "))
lbs = float(input("Enter Weight In Pounds: "))
BMI = round(lbs / (height * height) * 703, 2)
print("Your BMI Is:", BMI)
if BMI <= 24.9 and BMI >= 18.5:
print("You Are Healthy")
sleep(3)
exit = input("Press Enter To Exit: ")
elif BMI > 24.9:
print("You Are Overweight")
sleep(3)
exit = input("Press Enter To Exit: ")
elif BMI < 18.5:
print("You Are Underweight")
sleep(3)
exit = input("Press Enter To Exit: ")
| true |
268a046b8cc41c000f40b1eea70cb5818dde9ce4 | Python | Tianyijian/pytorch-tutorial | /AIChallenger2018/HAN-AI18/data_utils.py | UTF-8 | 11,978 | 2.640625 | 3 | [] | no_license | import torch
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
import numpy as np
from collections import Counter
import jieba
from tqdm import tqdm
import pandas as pd
import itertools
import os
import json
import gensim
import logging
import time
import re
def read_csv(csv_folder, split, sentence_limit, word_limit):
"""
Read CSVs containing raw training data, clean documents and labels, and do a word-count.
:param csv_folder: folder containing the CSV
:param split: train or test CSV?
:param sentence_limit: truncate long documents to these many sentences
:param word_limit: truncate long sentences to these many words
:return: documents, labels, a word-count
"""
assert split in {'sentiment_analysis_trainingset',
'sentiment_analysis_validationset'}
docs = []
labels = []
ids = []
word_counter = Counter()
data = pd.read_csv(os.path.join(csv_folder, split + '.csv'))
sent_num = 0
s_l_20 = 0
word_num = 0
w_l_60 = 0
for i in tqdm(range(data.shape[0])):
# for i in range(1000):
row = list(data.loc[i, :])
# sentences = cut_sent(row[1])
sentences = splitsentence(row[1].replace("\n", ""))
sent_num += len(sentences)
if len(sentences) < 20:
s_l_20 += 1
words = list()
for s in sentences[:sentence_limit]:
# for s in sentences:
w = list(jieba.cut(s))
word_num += len(w)
if len(w) < 60:
w_l_60 += 1
w = w[:word_limit]
# If sentence is empty (due to removing punctuation, digits, etc.)
if len(w) == 0:
continue
words.append(w)
word_counter.update(w)
# If all sentences were empty
if len(words) == 0:
continue
labels.append([int(v) + 2 for v in row[2:]])
ids.append(row[0])
docs.append(words)
print("sent_num:{}, avg_sent:{}, s<20:{}, word_num:{}, avg_word:{}, w<60:{}".format(sent_num, float(sent_num) /
data.shape[0], float(s_l_20) / data.shape[0], word_num, float(word_num) / sent_num, float(w_l_60) / sent_num))
return ids, docs, labels, word_counter
def cut_sent(para):
"""
https://blog.csdn.net/blmoistawinde/article/details/82379256
:param para:
:return:
"""
para = re.sub(r'([。!?\?])([^”’])', r"\1\n\2", para) # 单字符断句符
para = re.sub(r'(\.{6})([^”’])', r"\1\n\2", para) # 英文省略号
para = re.sub(r'(\…{2})([^”’])', r"\1\n\2", para) # 中文省略号
para = re.sub(r'([。!?\?][”’])([^,。!?\?])', r'\1\n\2', para)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
para = para.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
return para.split("\n")
resentencesp = re.compile('([﹒﹔﹖﹗.;。!?]["’”」』]{0,2}|:(?=["‘“「『]{1,2}|$))')
def splitsentence(sentence):
"""
https://github.com/fxsjy/jieba/issues/575
:param sentence:
:return:
"""
s = sentence
slist = []
for i in resentencesp.split(s):
if resentencesp.match(i) and slist:
slist[-1] += i
elif i:
slist.append(i)
return slist
def create_input_files(csv_folder, output_folder, sentence_limit, word_limit, min_word_count=5):
"""
Create data files to be used for training the model.
:param csv_folder: folder where the CSVs with the raw data are located
:param output_folder: folder where files must be created
:param sentence_limit: truncate long documents to these many sentences
:param word_limit: truncate long sentences to these many words
:param min_word_count: discard rare words which occur fewer times than this number
"""
# Read training data
print('\nReading and preprocessing training data...\n')
train_ids, train_docs, train_labels, word_counter = read_csv(
csv_folder, 'sentiment_analysis_trainingset', sentence_limit, word_limit)
# Create word map
word_map = dict()
word_map['<pad>'] = 0
for word, count in word_counter.items():
if count >= min_word_count:
word_map[word] = len(word_map)
word_map['<unk>'] = len(word_map)
print('\nDiscarding words with counts less than %d, the size of the vocabulary is %d.\n' % (
min_word_count, len(word_map)))
with open(os.path.join(output_folder, 'word_map.json'), 'w', encoding="utf-8") as j:
json.dump(word_map, j, ensure_ascii=False, indent=4)
print('Word map saved to %s.\n' % os.path.abspath(output_folder))
# Encode and pad
print('Encoding and padding training data...\n')
encoded_train_docs = list(map(lambda doc: list(
map(lambda s: list(map(lambda w: word_map.get(w, word_map['<unk>']), s)) + [0] * (word_limit - len(s)),
doc)) + [[0] * word_limit] * (sentence_limit - len(doc)), train_docs))
sentences_per_train_document = list(map(lambda doc: len(doc), train_docs))
words_per_train_sentence = list(
map(lambda doc: list(map(lambda s: len(s), doc)) + [0] * (sentence_limit - len(doc)), train_docs))
# Save
print('Saving...\n')
assert len(encoded_train_docs) == len(train_labels) == len(sentences_per_train_document) == len(
words_per_train_sentence)
# Because of the large data, saving as a JSON can be very slow
torch.save({'ids': train_ids,
'docs': encoded_train_docs,
'labels': train_labels,
'sentences_per_document': sentences_per_train_document,
'words_per_sentence': words_per_train_sentence},
os.path.join(output_folder, 'TRAIN_data.tar'))
print('Encoded, padded training data saved to %s.\n' %
os.path.abspath(output_folder))
# Free some memory
del train_docs, encoded_train_docs, train_labels, sentences_per_train_document, words_per_train_sentence
# Read test data
print('Reading and preprocessing test data...\n')
test_ids, test_docs, test_labels, _ = read_csv(
csv_folder, 'sentiment_analysis_validationset', sentence_limit, word_limit)
# Encode and pad
print('\nEncoding and padding test data...\n')
encoded_test_docs = list(map(lambda doc: list(
map(lambda s: list(map(lambda w: word_map.get(w, word_map['<unk>']), s)) + [0] * (word_limit - len(s)),
doc)) + [[0] * word_limit] * (sentence_limit - len(doc)), test_docs))
sentences_per_test_document = list(map(lambda doc: len(doc), test_docs))
words_per_test_sentence = list(
map(lambda doc: list(map(lambda s: len(s), doc)) + [0] * (sentence_limit - len(doc)), test_docs))
# Save
print('Saving...\n')
assert len(encoded_test_docs) == len(test_labels) == len(sentences_per_test_document) == len(
words_per_test_sentence)
torch.save({'ids': test_ids,
'docs': encoded_test_docs,
'labels': test_labels,
'sentences_per_document': sentences_per_test_document,
'words_per_sentence': words_per_test_sentence},
os.path.join(output_folder, 'TEST_data.tar'))
print('Encoded, padded test data saved to %s.\n' %
os.path.abspath(output_folder))
print('All done!\n')
def init_embedding(input_embedding):
"""
Initialize embedding tensor with values from the uniform distribution.
:param input_embedding: embedding tensor
"""
bias = np.sqrt(3.0 / input_embedding.size(1))
nn.init.uniform_(input_embedding, -bias, bias)
def load_word2vec_embeddings(word2vec_file, word_map):
"""
Load pre-trained embeddings for words in the word map.
:param word2vec_file: location of the trained word2vec model
:param word_map: word map
:return: embeddings for words in the word map, embedding size
"""
# Load word2vec model into memory
w2v = gensim.models.KeyedVectors.load(word2vec_file, mmap='r')
print("\nEmbedding length is %d.\n" % w2v.vector_size)
# Create tensor to hold embeddings for words that are in-corpus
embeddings = torch.FloatTensor(len(word_map), w2v.vector_size)
init_embedding(embeddings)
# Read embedding file
print("Loading embeddings...")
cnt = 0
for word in word_map:
if word in w2v.vocab:
embeddings[word_map[word]] = torch.FloatTensor(w2v[word])
cnt += 1
print("Embedding vocabulary: %d, in w2v: %d(%.4f).\n" %
(len(word_map), cnt, float(cnt) / len(word_map)))
return embeddings, w2v.vector_size
def clip_gradient(optimizer, grad_clip):
"""
Clip gradients computed during backpropagation to prevent gradient explosion.
:param optimizer: optimized with the gradients to be clipped
:param grad_clip: gradient clip value
"""
for group in optimizer.param_groups:
for param in group['params']:
if param.grad is not None:
param.grad.data.clamp_(-grad_clip, grad_clip)
def save_checkpoint(epoch, model, optimizer, word_map):
"""
Save model checkpoint.
:param epoch: epoch number
:param model: model
:param optimizer: optimizer
:param best_acc: best accuracy achieved so far (not necessarily in this checkpoint)
:param word_map: word map
:param epochs_since_improvement: number of epochs since last improvement
:param is_best: is this checkpoint the best so far?
"""
state = {'epoch': epoch,
'model': model,
'optimizer': optimizer,
'word_map': word_map}
filename = './logs/checkpoint_{}.pth.tar'.format(epoch)
torch.save(state, filename)
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, scale_factor):
"""
Shrinks learning rate by a specified factor.
:param optimizer: optimizer whose learning rates must be decayed
:param scale_factor: factor to scale by
"""
print("\nDECAYING learning rate.")
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * scale_factor
print("The new learning rate is %f\n" % (optimizer.param_groups[0]['lr'],))
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
if __name__ == '__main__':
create_input_files(csv_folder='./data2',
output_folder='./data2',
sentence_limit=20,
word_limit=60,
min_word_count=5)
| true |
e45fd1c58499e5cb728472c66038afde2097cf92 | Python | nddsg/HDTM | /scripts/article2hlda.py | UTF-8 | 538 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python3
# This program convert raw document-word file to Graphlab's LDA format
import sys
with open(sys.argv[2], "w+") as fout:
with open(sys.argv[1]) as f:
for line in f:
raw_data = [int(x) for x in line.split()]
word_set = dict()
for x in raw_data[1:]:
if x not in word_set:
word_set[x] = 0
word_set[x] += 1
fout.write(str(raw_data[0])+" ")
for k in word_set:
fout.write(str())
| true |
6ef44aae11e55be7a2575b65c7aec230f94b6f71 | Python | dba-base/python-homework | /Day05/re模块.py | UTF-8 | 990 | 3.34375 | 3 | [] | no_license | import re
print(re.search('(abc){2}',"abcacabcasdf") )
print(re.search('\A[0-9]+[a-z]+\Z','1234asd')) #1234asd
print(re.search('\A\w+\Z','1234asd')) #同上
print(re.search("(?P<id>[0-9]+)(?P<name>[a-zA-Z]+)","asdfg12345asdf@13").groupdict()) #{'id': '12345', 'name': 'asdf'}
print(re.search("(?P<province>[0-9]{4})(?P<city>[0-9]{2})(?P<birthday>[0-9]{4})","371481199306143242").groupdict("city"))
'''
输出:
{'province': '3714', 'city': '81', 'birthday': '1993'}
'''
print(re.split("[0-9]+","abc123f23GH")) #['abc', 'f', 'GH'] 按数字分隔
#替换
print(re.sub("[0-9]+","...","abc123f23GH23aa23bb")) #abc...f...GH...aa...bb
print(re.sub("[0-9]+","...","abc123f23GH23aa23bb",2)) # 只替换前两个abc...f...GH23aa23bb
#re.I(re.IGNORECASE): 忽略大小写(括号内是完整写法,下同)
res = re.search("[a-z]+","asdASD",flags=re.IGNORECASE) #asdASD
print(res)
str = '''
abcd
\nbdsd
\nasdf
'''
res1 = re.search("[a-z]+d$",str,flags=re.MULTILINE)
print(res1) | true |
25c16516725c262adea8aa125fd28b7305d2ac4f | Python | zhujixiang1997/6969 | /7月11日/作业02.py | UTF-8 | 392 | 3.921875 | 4 | [] | no_license | '''
2.写一段代码判断一个人的体重是否合格:
公式:(身高-108)*2=体重,可以有10斤左右的浮动
'''
weight = eval(input('请输入体重(单位斤):'))
height = eval(input('请输入身高(单位cm):'))
if (height - 108)*2 >= weight - 10 and (height - 108)*2 <= weight+10:
print('恭喜你的体重合格!')
else:
print('兄dei,不行呀!') | true |
a2b9392b71737c3f064b6f0451b913fdcbc18a3c | Python | tensorflow/datasets | /tensorflow_datasets/text/wsc273/wsc273.py | UTF-8 | 5,676 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WSC273 Dataset."""
from __future__ import annotations
import numpy as np
import tensorflow_datasets.public_api as tfds
_CITATION = """
@inproceedings{levesque2012winograd,
title={The winograd schema challenge},
author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
year={2012},
organization={Citeseer}
}
"""
_DESCRIPTION = """
WSC273 is a common sense reasoning benchmark that requires the system to read a sentence with an ambiguous pronoun and select the referent of that pronoun from two choices.
It contains the first 273 examples from the Winograd Schema Challenge.
A Winograd schema is a pair of sentences that differ in only one or two words and that contain an ambiguity that is resolved in opposite ways in the two sentences and requires the use of world knowledge and reasoning for its resolution.
The schema takes its name from a well-known example by Terry Winograd: ``The city councilmen refused the demonstrators a permit because they [feared/advocated] violence.''
If the word is ``feared'', then ``they'' presumably refers to the city council; if it is ``advocated'' then ``they'' presumably refers to the demonstrators.
"""
_HOMEPAGE_URL = (
"https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html"
)
_DOWNLOAD_URL = (
"https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WSCollection.xml"
)
class Wsc273(tfds.core.GeneratorBasedBuilder):
"""The WSC273 dataset."""
VERSION = tfds.core.Version("1.0.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"text": tfds.features.Text(),
"option1": tfds.features.Text(),
"option1_normalized": tfds.features.Text(),
"option2": tfds.features.Text(),
"option2_normalized": tfds.features.Text(),
"pronoun_start": np.int32,
"pronoun_end": np.int32,
"pronoun_text": tfds.features.Text(),
"label": np.int32,
"idx": np.int32,
}),
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
file_path = dl_manager.download(_DOWNLOAD_URL)
return {tfds.Split.TEST: self._generate_examples(file_path)}
def _generate_examples(self, file_path):
"""Yields Examples.
Args:
file_path: Path of the test xml file.
Yields:
The next examples
"""
examples = parse_wsc273_xml(file_path.read_text())
for e in examples:
yield e["idx"], e
def normalize_text(text):
text = text.strip()
# Correct a misspell.
text = text.replace("recieved", "received")
text = text.replace("\n", " ")
text = text.replace(" ", " ")
return text
def normalize_cap(option, pron):
"""Normalize the capitalization of the option according to the pronoun."""
cap_tuples = [
("The", "the"),
("His", "his"),
("My", "my"),
("Her", "her"),
("Their", "their"),
("An", "an"),
("A", "a"),
]
uncap_dict = dict(cap_tuples)
cap_dict = dict([(t[1], t[0]) for t in cap_tuples])
words = option.split(" ")
first_word = words[0]
if pron[0].islower():
first_word = uncap_dict.get(first_word, first_word)
else:
first_word = cap_dict.get(first_word, first_word)
words[0] = first_word
option = " ".join(words)
return option
def parse_wsc273_xml(xml_data):
"""Parse the XML file containing WSC273 examples."""
soup = tfds.core.lazy_imports.bs4.BeautifulSoup(xml_data, "lxml")
schemas = soup.find_all("schema")
# Only the first 273 examples are included in WSC273.
for i, schema in enumerate(schemas[:273]):
txt1 = schema.find_all("txt1")[0].get_text()
txt1 = normalize_text(txt1)
txt2 = schema.find_all("txt2")[0].get_text()
txt2 = normalize_text(txt2)
pron = schema.find_all("pron")[0].get_text()
pron = normalize_text(pron)
answers = [ans.get_text().strip() for ans in schema.find_all("answer")]
normalized_answers = [normalize_cap(ans, pron) for ans in answers]
assert len(answers) == 2
choice = schema.find_all("correctanswer")[0].get_text().strip()
label = {"A": 0, "B": 1}[choice[0]]
if len(txt2) == 1:
# If there is only one punctuation left after the pronoun,
# then no space should be inserted.
text = f"{txt1} {pron}{txt2}"
else:
text = f"{txt1} {pron} {txt2}"
pronoun_text = pron
pronoun_start = len(txt1) + 1
pronoun_end = len(txt1) + len(pron) + 1
example = dict(
text=text,
pronoun_text=pronoun_text,
pronoun_start=pronoun_start,
pronoun_end=pronoun_end,
option1=answers[0],
option2=answers[1],
option1_normalized=normalized_answers[0],
option2_normalized=normalized_answers[1],
label=label,
idx=i,
)
assert text[pronoun_start:pronoun_end] == pronoun_text
yield example
| true |
68812f8f540cdcfb70f803d3370b799b5172707f | Python | rishavanand/little-python-scripts | /rss_fossbytes.py | UTF-8 | 996 | 3.046875 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import json
def get_fossbyte_feed():
"""
Exatracts all news from FossByte's rss feed
including their title, link, date and categories
"""
# Fetch feed
url = 'https://fossbytes.com/feed/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'}
html = requests.get(url, headers=headers)
# Extract news
soup = BeautifulSoup(html.text, 'xml')
items = soup.findAll('item')
feeds = {}
for number, item in enumerate(items):
feed = {}
feed['title'] = item.find('title').text
feed['link'] = item.find('link').text
feed['date'] = item.find('pubDate').text
feed['category'] = item.findAll('category')
cats = []
for category in feed['category']:
cats.append(category.text)
feed['category'] = cats
feeds[number] = feed
return feeds
# Save feeds to file
feeds = get_fossbyte_feed()
my_file = open('rss_fossbytes.json', 'w')
my_file.write(json.dumps(feeds))
my_file.close() | true |
eaba0535613dc0a9db671b443dd1e23b8bf8f1d7 | Python | weiyuyan/LeetCode | /剑指offer/面试题9.用两个栈实现队列.py | UTF-8 | 1,216 | 3.9375 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:ShidongDu time:2020/3/7
'''
用两个栈实现一个队列。队列的声明如下,请实现它的两个函数 appendTail 和 deleteHead ,
分别完成在队列尾部插入整数和在队列头部删除整数的功能。(若队列中没有元素,deleteHead 操作返回 -1 )
示例 1:
输入:
["CQueue","appendTail","deleteHead","deleteHead"]
[[],[3],[],[]]
输出:[null,null,3,-1]
示例 2:
输入:
["CQueue","deleteHead","appendTail","appendTail","deleteHead","deleteHead"]
[[],[],[5],[2],[],[]]
输出:[null,-1,null,null,5,2]
提示:
1 <= values <= 10000
最多会对 appendTail、deleteHead 进行 10000 次调用
'''
class CQueue:
def __init__(self):
self.stack1 = []
self.stack2 = []
def appendTail(self, value: int) -> None:
# 1 -> 2
while self.stack1:
self.stack2.append(self.stack1.pop())
# add value
self.stack1.append(value)
# 1 <- 2
while self.stack2:
self.stack1.append(self.stack2.pop())
return self.stack1
def deleteHead(self) -> int:
if not self.stack1: return -1
return self.stack1.pop() | true |
871edbbcc4e7b1a6b00ad520e588fda59ddef16d | Python | Jeffery12138/Python- | /第5章 if语句/动手试一试/5-4 alien_color2.py | UTF-8 | 173 | 3.625 | 4 | [] | no_license | # alien_color = 'green'
alien_color = 'yellow'
if alien_color == 'green':
print("You will get 5 points for shooting the alien")
else:
print("You will get 10 points") | true |
1d0a01ed93c2cf898989b358d436caa833407c81 | Python | Liubasara/pratice_code | /滑动窗口的最大值.py | UTF-8 | 460 | 3.078125 | 3 | [] | no_license | # -*- coding:utf-8 -*-
class Solution:
def maxInWindows(self, num, size):
# write code here
if size == 0:
return []
if not num:
return None
length = len(num)
return_res = []
for i in range(length-size+1):
res = num[i:i+size]
res.sort()
return_res.append(res[-1])
return return_res
a = Solution()
print a.maxInWindows([1,3,5,7,9,11,13,15],4) | true |
3af79d0f6cbf2843503cf0b6bd9315ce3ac3023c | Python | ericgreveson/projecteuler | /p_050_059/problem57.py | UTF-8 | 495 | 3.390625 | 3 | [
"Apache-2.0"
] | permissive | from fractions import Fraction
from digit_tools import num_digits
def main():
"""
Entry point
"""
denom = 2
heavy_num_count = 0
for i in range(1000):
root_2_approx = 1 + Fraction(1, denom)
denom = 2 + Fraction(1, denom)
if num_digits(root_2_approx.numerator) > num_digits(root_2_approx.denominator):
heavy_num_count += 1
print(f"Number of top-heavy approximations: {heavy_num_count}")
if __name__ == "__main__":
main()
| true |
3f98e8e4d2d2c471807872d4ba516247a7bd2dcd | Python | emanuelgsouza/ping-analyzer | /ping-analyser.py | UTF-8 | 1,022 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 7 12:00:48 2018
@author: emanuel
"""
import pandas as pd
getValuesFromLine = lambda line : line.split(' ')[5:8]
def getValFromColumn(val):
name, val = tuple(val.split('='))
if name in ['icmp_seq', 'ttl']:
val = int(val)
else:
val = float(val)
return (name, val)
getValuesFromColumn = lambda line : dict(list(map(getValFromColumn, line)))
with open('./ping.file.txt', 'r') as file:
lines = list(filter(lambda x : '64 bytes' in x, list(file.readlines())))
clearedLines = list(map(getValuesFromLine, lines))
clearedColumns = list(map(getValuesFromColumn, clearedLines))
# create a DataFrame from values
df = pd.DataFrame(clearedColumns, columns = ['icmp_seq', 'ttl', 'time'])
# save to CSV
#df.to_csv('ping.csv', index = False)
# show column types
df.dtypes
# describe
df.describe()
# show graphic
df.plot.scatter(x='icmp_seq', y='time')
| true |
0f9ebdd6dc2ce6e9a93d82434e8298d9dbac2b5e | Python | adubowski/big-data-ip-hosting | /get_indeces.py | UTF-8 | 2,017 | 2.75 | 3 | [] | no_license | import pickle
import requests
import json
import csv
import time
import multiprocessing as mp
url_list = []
cc = "http://index.commoncrawl.org/CC-MAIN-2020-50-index"
with open("/home/s2017873/top-1m.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
i = 0
for row in csv_reader:
i += 1
url_list.append(row[1])
def get_indeces_from_url(tup):
url, queue = tup
get_string = cc + "/?url=" + url + "/&output=json"
code = 0
while code != 200:
r = requests.get(get_string)
code = r.status_code
if r.status_code == 200:
json_obj_list = r.text.split("\n")
json_obj_list.pop()
l = set()
for json_obj in json_obj_list:
j = json.loads(json_obj)
if '/warc/' in j['filename']:
l.add(j['filename'])
queue.put(l)
break
elif r.status_code == 404:
queue.put(set())
break
elif r.status_code == 503:
time.sleep(0.5)
else :
print(r.status_code)
time.sleep(0.5)
# Partition urls to 1000 workers that each do 1000 urls
def write_to_urls(queue):
url_set = set()
for i in range(top_k):
new_urls = queue.get()
print("{0} / {1}".format(i, top_k))
to_write = new_urls.difference(url_set)
url_set = url_set.union(new_urls)
with open("url_file.txt", "a") as f:
for url in to_write:
f.write(url + "\n")
print("Done")
p = mp.Pool(4)
m = mp.Manager()
q = m.Queue()
top_k = 100
url_list = url_list[:top_k]
url_list = map(lambda url : (url, q), url_list)
writer = mp.Process(target=write_to_urls, args=(q, ))
writer.start()
p.map(get_indeces_from_url, url_list)
p.join()
writer.join()
| true |
8d44e40f3063d47792ab80f3aa438c3d71a7d3a8 | Python | J-sephB-lt-n/exploring_statistics | /recsys_alg_implementation_code/user_item_networkx.py | UTF-8 | 15,003 | 3 | 3 | [] | no_license | import sys
import networkx as nx
import walker # for generating random walks quickly
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import pandas as pd
## simulate user/item data ## ---------------------------------------------------------------------
sys.path.append("..")
from recsys_simulation import recsys_data_simulator
"""
I simulate implicit ratings data:
I expose each simulated user to every item in the simulated item catalogue..
..in order to obtain a buy probability (p) for that user/item combination
Then, each user purchases each item at random, with probability (p)
In order to train the model:
* I use the observed buying behaviour as positive examples (y=1)
* For each positive example (y=1), I create a matching random negative example (y=0)..
..by including a random item that the user didn't purchase (in the same context as the positive example)
For simplicity here, users without any item purchases are removed from model training and evaluation
(since these users complicate the user embedding part of the model)
(in practice, these users will need to be addressed, either within the model or by a different model)
"""
sim_n_users = 5_000
sim_n_items = 200
min_buy_prob = 0
max_buy_prob = 0.1
sim_obj = recsys_data_simulator(
n_users=sim_n_users,
n_items=sim_n_items,
n_user_types=5,
n_item_attr_pref_mutations_per_user=5,
n_additional_context_modifiers_per_user=1,
potential_item_attr={
"colour": [
"red",
"green",
"blue",
"black",
"white",
"purple",
"yellow",
"pink",
],
"size": ["small", "medium", "large"],
"material": ["metal", "wood", "cotton", "plastic", "wool", "stone", "glass"],
"style": [
"industrial",
"warm",
"loud",
"gothic",
"tech",
"sport",
"floral",
"pastel",
"chic",
"beach",
],
},
potential_user_attr={
"location": [
"cape_town",
"london",
"dubai",
"new_york",
"rotterdam",
"porto",
"tokyo",
],
"age_group": ["infant", "child", "teenager", "youth", "middle_aged", "elderly"],
"affluence": ["very_low", "low", "middle", "high", "very_high"],
"main_device": [
"laptop",
"desktop_computer",
"phone",
"tablet",
"postal_service",
],
},
potential_context_attr={
"time_of_day": ["morning", "afternoon", "night"],
"day_of_week": [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
],
"social_context": [
"public_space",
"public_transport",
"private_space",
"private_transport",
],
"user_group_recommendation": [
"user_alone",
"small_user_group",
"large_user_group",
],
},
rating_range={
"min": min_buy_prob,
"max": max_buy_prob,
}, # I use this "rating" as "item purchase probability"
rating_trunc_norm_std_dev=0.01,
n_context_effects=1,
context_effect_abs_size=5,
)
pd.set_option("display.max_rows", sim_n_items)
pd.set_option("display.max_columns", 50)
# expose each user to the entire item catalogue:
# i.e. get the "item purchase probability" for each user/item combination
# (each exposure in a random context)
sim_obj.expose_each_user_to_k_items(
min_k=sim_n_items,
max_k=sim_n_items,
ignore_context=True,
add_noise_to_rating=False, # this speeds up the function (and makes the patterns easier to model)
)
# decide (at random) which items each user buys (using item purchase probability):
for u in sim_obj.user_dict:
for x in sim_obj.user_dict[u]["item_exposure_history"]:
if np.random.uniform() < x["true_affinity_to_item"]:
x["bought_item"] = 1
else:
x["bought_item"] = 0
# delete users without any item purchases:
users_to_delete_list = []
for u in sim_obj.user_dict:
n_items_bought = sum(
[x["bought_item"] for x in sim_obj.user_dict[u]["item_exposure_history"]]
)
if n_items_bought == 0:
users_to_delete_list.append(u)
for u in users_to_delete_list:
del sim_obj.user_dict[u]
print(f"{len(users_to_delete_list)} users deleted (no item purchases)")
# create a user-item graph, where an edge exists between a user and an item if the user bought that item
user_item_graph = nx.Graph()
for u in tqdm(sim_obj.user_dict, desc="creating user/item graph"):
user_node_name = f"u{u}"
user_item_graph.add_node(user_node_name, node_type="user")
for x in sim_obj.user_dict[u]["item_exposure_history"]:
item_node_name = f"i{x['item_ID']}"
if item_node_name not in user_item_graph:
user_item_graph.add_node(item_node_name, node_type="item")
if x["bought_item"] == 1:
user_item_graph.add_edge(user_node_name, item_node_name)
# plot the user/item graph:
# (plot is only readable if you have a small number of users and items)
if sim_n_users < 200:
plt.figure(figsize=(20, 15))
node_types_list = [
user_item_graph.nodes[node]["node_type"]
for node in list(user_item_graph.nodes())
]
node_colours = [2 if x == "user" else 1 for x in node_types_list]
nx.draw_networkx(user_item_graph, node_color=node_colours, cmap="Set1")
# in order to user the [walker](https://github.com/kerighan/graph-walker) library..
# ..nodes must have integer IDs:
user_item_graph_w_integer_labels = nx.convert_node_labels_to_integers(
G=user_item_graph,
label_attribute="str_node_name", # store the original node name as an attribute of the node
)
# make a dictionary in order to be able to match users and items to their integer node IDs:
int_node_label_ref_dict = {}
for node_id in user_item_graph_w_integer_labels.nodes:
str_node_name = user_item_graph_w_integer_labels.nodes[node_id]
int_node_label_ref_dict[node_id] = str_node_name
int_node_label_ref_dict[str_node_name["str_node_name"]] = node_id
# generate item recommendations for a random user by using random walks directly --------------------------------------------------------------------
n_item_recs_per_user = 20
random_user_ID = np.random.choice(list(sim_obj.user_dict.keys()))
random_user_node_name = f"u{random_user_ID}"
random_user_int_node_ID = int_node_label_ref_dict[random_user_node_name]
items_already_bought_df = pd.DataFrame(
{
"user_ID": random_user_ID,
"item_ID": [
x["item_ID"]
for x in sim_obj.user_dict[random_user_ID]["item_exposure_history"]
if x["bought_item"] == 1
],
"bought_item": 1,
}
)
true_pref_df_all_items = pd.DataFrame(
{
"user_ID": random_user_ID,
"item_ID": list(sim_obj.item_dict.keys()),
"true_buy_prob": [
sim_obj.calc_user_preference_for_item(
user_id=random_user_ID,
item_id=i,
recommend_context=sim_obj.generate_random_context(),
)["rating_ignore_context"]
for i in list(sim_obj.item_dict.keys())
],
}
).merge(items_already_bought_df, on=["user_ID", "item_ID"], how="left")
true_pref_df_all_items.loc[
true_pref_df_all_items["bought_item"].isna(), "bought_item"
] = 0
true_pref_df_all_items["bought_item"] = true_pref_df_all_items["bought_item"].astype(
int
)
random_walks = walker.random_walks(
user_item_graph, n_walks=10_000, walk_len=4, start_nodes=[random_user_int_node_ID]
)
items_collected_on_random_walks = np.unique(
np.array(
[random_walks[:, j] for j in range(1, random_walks.shape[1], 2)]
).flatten(),
return_counts=True,
)
items_to_recommend_df = pd.DataFrame(
{
"user_ID": random_user_ID,
"item_int_node_ID": items_collected_on_random_walks[0],
"item_ID": [
int(int_node_label_ref_dict[i]["str_node_name"][1:])
for i in items_collected_on_random_walks[0]
],
"n_times_visited_random_walk": items_collected_on_random_walks[1],
}
)
true_pref_df_all_items = true_pref_df_all_items.merge(
items_to_recommend_df[["user_ID", "item_ID", "n_times_visited_random_walk"]],
on=["user_ID", "item_ID"],
how="left",
)
true_pref_df_all_items["random_seed"] = np.random.uniform(
size=len(true_pref_df_all_items)
)
true_pref_df_all_items.sort_values(
["n_times_visited_random_walk", "random_seed"], ascending=False, inplace=True
)
new_items_to_recommend = true_pref_df_all_items.query("bought_item==0").head(
n_item_recs_per_user
)[["user_ID", "item_ID"]]
new_items_to_recommend["randomWalk_recommend_item"] = 1
true_pref_df_all_items = true_pref_df_all_items.merge(
new_items_to_recommend, on=["user_ID", "item_ID"], how="left"
)
true_pref_df_all_items.loc[
true_pref_df_all_items["randomWalk_recommend_item"].isna(),
"randomWalk_recommend_item",
] = 0
true_pref_df_all_items["randomWalk_recommend_item"] = true_pref_df_all_items[
"randomWalk_recommend_item"
].astype(int)
fig, axs = plt.subplots(
nrows=2, ncols=1, figsize=(10, 5)
) # note: figsize is the size of the global plot
axs[0].hist(
x=true_pref_df_all_items.query(
"bought_item==0 & randomWalk_recommend_item==1"
).true_buy_prob,
range=[min_buy_prob, max_buy_prob],
bins=20,
)
axs[0].set_xlabel("true buy probability (unknown to model)")
axs[0].set_ylabel("n items")
axs[0].set_title(f"Recommended Items ({n_item_recs_per_user} items)")
axs[1].hist(
x=true_pref_df_all_items.query(
"bought_item==0 & randomWalk_recommend_item==0"
).true_buy_prob,
range=[min_buy_prob, max_buy_prob],
bins=20,
)
axs[1].set_xlabel("true buy probability (unknown to model)")
axs[1].set_ylabel("n items")
axs[1].set_title(
f"Items NOT Recommended ({len(true_pref_df_all_items.query('bought_item==0 & randomWalk_recommend_item==0'))} items)"
)
fig.suptitle(
f"Item Recommendations for user_ID={random_user_ID}\n(based on random walk on user/item graph)\n(only considering items not already bought)"
)
fig.tight_layout()
# generate item recommendations for the same random user using their user-neighbourbood defined on the graph ------------------------------------------------
n_users_in_neighHood = 100
random_walks = walker.random_walks(
user_item_graph, n_walks=10_000, walk_len=5, start_nodes=[random_user_int_node_ID]
)
users_collected_on_random_walks = np.unique(
np.array(
[random_walks[:, j] for j in range(0, random_walks.shape[1], 2)]
).flatten(),
return_counts=True,
)
collected_potential_neighbours_df = pd.DataFrame(
{
"user_ID": random_user_ID,
"neighb_user_int_node_ID": users_collected_on_random_walks[0],
"neighb_user_ID": [
# (int_node_label_ref_dict[u]["str_node_name"], int_node_label_ref_dict[u]["str_node_name"][1:])
int(int_node_label_ref_dict[u]["str_node_name"][1:])
for u in users_collected_on_random_walks[0]
],
"n_visits_on_random_walk": users_collected_on_random_walks[1],
"random_seed": np.random.uniform(size=len(users_collected_on_random_walks[0])),
}
).query("user_ID!=neighb_user_ID")
neighbours_df = collected_potential_neighbours_df.sort_values(
["n_visits_on_random_walk", "random_seed"], ascending=False
).head(n_users_in_neighHood)
neighbours_df["neighb_weight"] = (
neighbours_df.n_visits_on_random_walk / neighbours_df.n_visits_on_random_walk.sum()
)
# get bought/not_bought status of each item, for every neighbour:
store_pd_df_list = []
for neighb_user_ID in tqdm(
neighbours_df.neighb_user_ID, desc="neighbour true item buy probs -> pd.DataFrame"
):
for x in sim_obj.user_dict[neighb_user_ID]["item_exposure_history"]:
store_pd_df_list.append(
pd.DataFrame(
{
"neighb_user_ID": [neighb_user_ID],
"item_ID": [x["item_ID"]],
"bought_item": [x["bought_item"]],
}
)
)
neighbours_item_buy_hist_df = pd.concat(store_pd_df_list, axis=0)
neighbours_item_buy_hist_df = neighbours_item_buy_hist_df.merge(
neighbours_df[["neighb_user_ID", "n_visits_on_random_walk", "neighb_weight"]],
on="neighb_user_ID",
how="left",
)
neighbours_item_buy_hist_df["buy_vote"] = (
neighbours_item_buy_hist_df["bought_item"]
* neighbours_item_buy_hist_df["neighb_weight"]
)
neighb_recommend_calc_df = (
neighbours_item_buy_hist_df.groupby("item_ID")
.agg(
unweighted_neighb_vote=("bought_item", "mean"),
weighted_neighb_vote=("buy_vote", "sum"),
)
.sort_values("weighted_neighb_vote", ascending=False)
)
true_pref_df_all_items = true_pref_df_all_items.merge(
neighb_recommend_calc_df.reset_index(), on="item_ID", how="left"
)
new_items_to_recommend = (
true_pref_df_all_items.query("bought_item==0")
.sort_values(["weighted_neighb_vote", "random_seed"], ascending=False)
.head(n_item_recs_per_user)
)[["user_ID", "item_ID"]]
new_items_to_recommend["wtd_neighHood_recommend_item"] = 1
true_pref_df_all_items = true_pref_df_all_items.merge(
new_items_to_recommend, on=["user_ID", "item_ID"], how="left"
)
true_pref_df_all_items.loc[
true_pref_df_all_items["wtd_neighHood_recommend_item"].isna(),
"wtd_neighHood_recommend_item",
] = 0
true_pref_df_all_items["wtd_neighHood_recommend_item"] = true_pref_df_all_items[
"wtd_neighHood_recommend_item"
].astype(int)
fig, axs = plt.subplots(
nrows=2, ncols=1, figsize=(10, 5)
) # note: figsize is the size of the global plot
axs[0].hist(
x=true_pref_df_all_items.query(
"bought_item==0 & wtd_neighHood_recommend_item==1"
).true_buy_prob,
range=[min_buy_prob, max_buy_prob],
bins=20,
color="green",
)
axs[0].set_xlabel("true buy probability (unknown to model)")
axs[0].set_ylabel("n items")
axs[0].set_title(f"Recommended Items ({n_item_recs_per_user} items)")
axs[1].hist(
x=true_pref_df_all_items.query(
"bought_item==0 & wtd_neighHood_recommend_item==0"
).true_buy_prob,
range=[min_buy_prob, max_buy_prob],
bins=20,
color="green",
)
axs[1].set_xlabel("true buy probability (unknown to model)")
axs[1].set_ylabel("n items")
axs[1].set_title(
f"Items NOT Recommended ({len(true_pref_df_all_items.query('bought_item==0 & randomWalk_recommend_item==0'))} items)"
)
fig.suptitle(
f"Item Recommendations for user_ID={random_user_ID}\n(based on user neighbourhood on user/item graph - weighted vote of {n_users_in_neighHood} neighbours)\n(only considering items not already bought)"
)
fig.tight_layout()
| true |
782f5962dfda8f524238ce26871a3097eaeed567 | Python | larsga/fhdb | /reports/chartlib.py | UTF-8 | 851 | 3.125 | 3 | [] | no_license |
from PIL import Image
def combine_charts(images, per_row, filename):
'''images: file names as strings
per_row: number of images per row
filename: output filename'''
images = [Image.open(image) for image in images]
widths, heights = zip(*(i.size for i in images))
width = int(max(widths)) * per_row
rows = int((len(images) / per_row) + min(1, len(images) % per_row))
height = int(max(heights)) * rows
new_im = Image.new('RGB', (width, height))
y_offset = 0
x_offset = 0
for ix in range(len(images)):
if ix == 0:
pass
elif ix % per_row == 0:
y_offset += max(heights)
x_offset = 0
else:
x_offset += max(widths)
#print ix, (x_offset, y_offset), images[ix]
new_im.paste(images[ix], (x_offset, y_offset))
new_im.save(filename)
| true |
e24e089bf5383c13adb42e098888735336ab7c9d | Python | xkjyeah/MRT-and-LRT-Stations | /pull_bus_stops2.py | UTF-8 | 2,284 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python
import onemap
import urllib
import httplib2
import json
import gzip
import cPickle
import re
import pandas as pd
import pyproj
import data_mall
import numpy as np
url = onemap.GetURL('RestServiceUrl')
svy21 = pyproj.Proj(init='epsg:3414')
def download_stuff():
"""
Bus stop codes are 5-digit.
This searches all bus stop codes from 00000
to 99999
"""
data = []
for start in range(0,100):
rset = 1
while True:
rurl = url + \
urllib.quote_plus('SEARCHVAL LIKE \'' + '{0:02d}'.format(start) + '$BUS STOP$\'') + \
'&' + urllib.urlencode({
'rset': str(rset),
'otptFlds': 'POSTALCODE,CATEGORY',
})
print rurl
h = httplib2.Http('.cache')
_,content = h.request(rurl)
obj = json.loads(content)
if len(obj['SearchResults']) == 1:
break
data = data + obj['SearchResults'][1:]
print len(obj['SearchResults'])
rset += 1
return data
# This is the one map data, but it is missing the Malaysian bus stops
data = download_stuff()
# Download stops from data mall
data_mall.authenticate('1hMEqSwhQWWRo88SupMVzQ==', '6b2ffab0-5916-4a20-a0d7-8f9824627d7b')
dm_data = data_mall.get('BusStops', 0)
dm_data = [d for d in dm_data if d['Longitude'] != 0]
dm_data = [d for d in dm_data if re.match('[0-9]{5}', d['BusStopCode']) != None]
for d in dm_data:
X,Y = svy21(d['Longitude'], d['Latitude'])
data.append({
'SEARCHVAL': '%s (BUS STOP)' % (d['BusStopCode']),
'X': X,
'Y': Y
})
# Save the pickle file
cPickle.dump(data, gzip.open('busstops2.pickle.gz', 'w'))
# Output the CSV
def bus_stop_code(d):
matches = re.match('([0-9]{5}) \(BUS STOP\)', d['SEARCHVAL'])
if matches != None:
return matches.group(1)
else:
raise ValueError
def make_row(d):
lnglat = svy21(float(d['X']), float(d['Y']), inverse=True)
return {
'Name': bus_stop_code(d),
'X': d['X'],
'Y': d['Y'],
'Latitude': lnglat[1],
'Longitude': lnglat[0],
}
df_data = [make_row(r) for r in data]
df = pd.DataFrame(df_data)
df.to_csv('./bus_stops.csv')
| true |
027df683d0f4d75f9517a03621bf8b1e4051d3b4 | Python | davidt99/spotipy | /spotipy/util.py | UTF-8 | 6,025 | 2.71875 | 3 | [
"MIT"
] | permissive | import contextlib
import socket
import time
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from urllib import parse
from urllib.parse import parse_qs
from urllib.parse import urlparse
import requests
from spotipy import exceptions
from spotipy import auth
PORT = 8080
REDIRECT_ADDRESS = "http://localhost"
REDIRECT_URI = "{}:{}".format(REDIRECT_ADDRESS, PORT)
def prompt_user_for_authorization_code_provider(
client_id: str,
client_secret: str,
redirect_uri: str = None,
scope: str = None,
state: str = None,
show_dialog=False,
persist_file_path: str = None,
requests_session: requests.Session = None,
deploy_local_server=False,
) -> auth.AuthorizationCode:
""" prompts the user to login if necessary and returns
the user token suitable for use with the spotipy.Spotify
constructor
Parameters:
- client_id - the client id of your app
- client_secret - the client secret of your app
- redirect_uri - The URI to redirect to after the user grants/denies permission
- scope - the desired scope of the request
- persist_file_path - path to location to save tokens
- requests_session - a request.Session object
- deploy_local_server - if true, will deploy local server to get the authorization code automatically
"""
redirect_uri = redirect_uri if not deploy_local_server else redirect_uri or REDIRECT_URI
if not redirect_uri:
raise ValueError("redirect_uri must be supplied of deploy_local_server is false")
params = {"client_id": client_id, "response_type": "code", "redirect_uri": redirect_uri}
if scope:
params["scope"] = scope
if state:
params["state"] = state
if show_dialog is not None:
params["show_dialog"] = show_dialog
auth_url = "{}?{}".format("https://accounts.spotify.com/authorize", parse.urlencode(params))
print(
"""
User authentication requires interaction with your
web browser. You will be prompted to enter your
credentials and give authorization.
"""
)
code = None
if deploy_local_server:
with local_server(redirect_uri) as httpd:
_open_browser(auth_url)
code = get_authentication_code(httpd)
if not code:
_open_browser(auth_url)
url = input("Please paste the url you were redirect to:")
parsed_url = parse.urlparse(url)
if not parsed_url.query:
raise ValueError("invalid url")
code = parse_qs(parsed_url.query)["code"][0]
payload = {"code": code, "grant_type": "authorization_code", "redirect_uri": redirect_uri}
now = int(time.time())
token_info = auth.request_token(payload, client_id, client_secret, requests_session)
refresh_token = token_info["refresh_token"]
access_token = token_info["access_token"]
access_token_expires_at = token_info["expires_in"] + now
auth_provider = auth.AuthorizationCode(
client_id,
client_secret,
refresh_token,
access_token,
access_token_expires_at,
persist_file_path,
requests_session,
)
if persist_file_path:
auth_provider.save()
return auth_provider
def _open_browser(auth_url):
import webbrowser
try:
webbrowser.open(auth_url)
print("Opened {} in your browser".format(auth_url))
except Exception:
print("Please navigate here: {}".format(auth_url))
def assert_port_available(port):
"""
Assert a given network port is available.
raise SpotifyException if the port is not available
:param port: network port to check
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("", port))
except socket.error:
raise exceptions.SpotifyError(
"Port {} is not available. If you are currently running a server, " "please halt it for a min.".format(port)
)
finally:
s.close()
@contextlib.contextmanager
def local_server(redirect_uri: str):
if redirect_uri != REDIRECT_URI:
yield
return
assert_port_available(PORT)
httpd = MicroServer((REDIRECT_ADDRESS.split("://")[1], PORT), CustomHandler)
yield httpd
httpd.server_close()
def get_authentication_code(httpd):
"""
Create a temporary http server and get authentication code.
As soon as a request is received, the server is closed.
:return: the authentication code
"""
while not httpd.latest_query_components:
httpd.handle_request()
if "error" in httpd.latest_query_components:
if httpd.latest_query_components["error"][0] == "access_denied":
raise exceptions.SpotifyError("The user rejected Spotify access")
else:
raise exceptions.SpotifyError(
"Unknown error from Spotify authentication server: {}".format(httpd.latest_query_components["error"][0])
)
if "code" in httpd.latest_query_components:
code = httpd.latest_query_components["code"][0]
else:
raise exceptions.SpotifyError(
"Unknown response from Spotify authentication server: {}".format(httpd.latest_query_components)
)
return code
class CustomHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.server.latest_query_components = parse_qs(urlparse(self.path).query)
self.wfile.write(
b"""<html>
<body>
<p>This tab will be close in 3 seconds</p>
<script>
setTimeout(window.close,3000)
</script>
</body>
</html>"""
)
class MicroServer(HTTPServer):
def __init__(self, server_address, RequestHandlerClass):
self.latest_query_components = None
super().__init__(server_address, RequestHandlerClass)
| true |
a0a67dfcae15650490a7aee0486a1538b4c3177b | Python | aLonelySquidNamedBob/Random-Projects | /NeuralNetworks/Seb_Lague/program.py | UTF-8 | 407 | 2.6875 | 3 | [] | no_license | import neuralnetwork as nn
import numpy as np
import time as t
start_time = t.time()
with np.load('mnist.npz') as data:
training_images = data['training_images']
training_labels = data['training_labels']
layer_sizes = (784, 20, 20, 10)
net = nn.NeuralNetwork(layer_sizes)
net.print_accuracy(training_images, training_labels)
elapsed_time = t.time() - start_time
print(f"{round(elapsed_time, 3)} s")
| true |
6f26806d561589588d9576a385fcd7160a847f2e | Python | radomirbrkovic/algorithms | /hackerrank/other/save-the-prisoner.py | UTF-8 | 326 | 3.421875 | 3 | [] | no_license | # Save the Prisoner! https://www.hackerrank.com/challenges/save-the-prisoner/problem
def saveThePrisoner(n, m, s):
ans = (s+m-1)%n
if ans==0:
return n
return ans
print saveThePrisoner(5, 2, 1)
print saveThePrisoner(5, 2, 2)
print saveThePrisoner(7, 19, 2)
print saveThePrisoner(3, 7, 3) | true |
4d65f48d8eca1deddcd08ef8141b11f5cbe6a4e5 | Python | CseTitan/CseTitanHotel | /organize_menu.py | UTF-8 | 319 | 3.09375 | 3 | [] | no_license | SCREEN_WIDTH = 100
def print_center(s):
x_pos = SCREEN_WIDTH // 2
print((" " * x_pos), s)
def print_bar():
print("=" * 100)
def print_bar_ln():
print_bar()
print()
def input_center(s):
x_pos = SCREEN_WIDTH // 2
print((" " * x_pos), s, end='')
return input()
| true |
1d6203bd06ebd1cdc75ca35ad228ec1b3f90bd58 | Python | ShruthaKashyap/Linear-Regression-Python-Spark | /linreg.py | UTF-8 | 2,283 | 3.59375 | 4 | [] | no_license | # linreg.py
#
# Standalone Python/Spark program to perform linear regression.
# Performs linear regression by computing the summation form of the
# closed form expression for the ordinary least squares estimate of beta.
#
# TODO: Write this.
#
# Takes the yx file as input, where on each line y is the first element
# and the remaining elements constitute the x.
#
# Usage: spark-submit linreg.py <inputdatafile>
# Example usage: spark-submit linreg.py yxlin.csv
#
#
import sys
import numpy as np
import fileinput
from numpy.linalg import inv
from pyspark import SparkContext
#function to compute X.XT
def generateA(Xi):
#print "Xi:",Xi
Xmatrix=np.matrix(Xi, dtype=float)
XiT = np.insert(Xmatrix, 0, 1, axis=1)
#print "XiT:",XiT
Xi = XiT.T
mul_a = np.dot(Xi,XiT)
return mul_a
#function to compute XY
def generateB(Yi,Xi):
Xmatrix=np.matrix(Xi, dtype=float)
XiT = np.insert(Xmatrix, 0, 1, axis=1)
Ymatrix=np.matrix(Yi, dtype=float)
Xi = XiT.T
mul_b = np.dot(Xi,Ymatrix)
return mul_b
if __name__ == "__main__":
if len(sys.argv) !=2:
print >> sys.stderr, "Usage: linreg <datafile>"
exit(-1)
sc = SparkContext(appName="LinearRegression")
# Input yx file has y_i as the first element of each line
# and the remaining elements constitute x_i
yxinputFile = sc.textFile(sys.argv[1])
yxlines = yxinputFile.map(lambda line: line.split(','))
yxfirstline = yxlines.first()
yxlength = len(yxfirstline)
#print "yxlength: ", yxlength
# dummy floating point array for beta to illustrate desired output format
beta = np.zeros(yxlength, dtype=float)
#compute X.XT for each line
A=yxlines.map(lambda line: generateA(line[1:]))
#compute XY for each line
B=yxlines.map(lambda line: generateB(line[0],line[1:]))
#print A.first()
#print B.first()
A_sum=A.reduce(lambda Xi, Xj: np.add(Xi,Xj)) #summation of X.Xt
B_sum=B.reduce(lambda Xi, Yi: np.add(Xi,Yi)) #summation of XY
#print A_sum
#print B_sum
#Finally, compute beta using the formula A(inverse)*B
beta=np.dot(np.linalg.inv(A_sum),B_sum)
#save the output onto a text file
np.savetxt('yxlinoutput.txt',beta)
# print the linear regression coefficients in desired output format
print "beta: "
for coeff in beta:
print coeff
sc.stop()
| true |
2f388675cbbae8e4cae1dabafd9c642808cd3a42 | Python | Peng-YM/pymoo | /tests/test_interface.py | UTF-8 | 1,360 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | import unittest
import numpy as np
from pymoo.algorithms.so_genetic_algorithm import GA
from pymoo.interface import AskAndTell
class InterfaceTest(unittest.TestCase):
def test_ask_and_tell(self):
# set the random seed for this test
np.random.seed(1)
# create the algorithm object to be used
algorithm = GA(pop_size=100, eliminate_duplicates=True)
# create the ask and tell interface object
api = AskAndTell(algorithm, n_var=2, n_obj=1, n_constr=1, xl=-10, xu=10)
# this loop runs always one step of the algorithm
for gen in range(200):
# ask the algorithm for values to be evaluated
X = api.ask()
# evaluate the values - here just some easy calculations
F = np.sum(np.square(X), axis=1)[:, None]
G = 1 - np.abs(X[:, 0])[:, None]
# let the api objects know the objective and constraint values
api.tell(F, G=G)
print(api.get_population().get("F").min())
# retrieve the results form the api - here the whole population of the algorithm
X, F, CV = api.result(only_optimum=False, return_values_of=["X", "F", "CV"])
self.assertTrue(np.allclose(CV, 0))
self.assertTrue(np.allclose(F[:10], 1, atol=1.e-3))
if __name__ == '__main__':
unittest.main()
| true |
30c645a082fb9a188b40fa8f2726a5a3e5be0ed0 | Python | Kitware/vtk-examples | /src/Python/Tutorial/Tutorial_Step2.py | UTF-8 | 2,451 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
"""
=========================================================================
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
"""
# First access the VTK module (and any other needed modules) by importing them.
# noinspection PyUnresolvedReferences
import vtkmodules.vtkInteractionStyle
# noinspection PyUnresolvedReferences
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.vtkCommonColor import vtkNamedColors
from vtkmodules.vtkFiltersSources import vtkConeSource
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderer
)
def main(argv):
colors = vtkNamedColors()
#
# The pipeline creation is documented in Tutorial_Step1.
#
cone = vtkConeSource()
cone.SetHeight(3.0)
cone.SetRadius(1.0)
cone.SetResolution(10)
coneMapper = vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetColor(colors.GetColor3d('MistyRose'))
ren1 = vtkRenderer()
ren1.AddActor(coneActor)
ren1.SetBackground(colors.GetColor3d('MidnightBlue'))
ren1.ResetCamera()
renWin = vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(300, 300)
renWin.SetWindowName('Tutorial_Step2')
# Here is where we setup the observer.
mo1 = vtkMyCallback(ren1)
ren1.AddObserver('StartEvent', mo1)
#
# Now we loop over 360 degrees and render the cone each time.
#
for i in range(0, 360):
# Render the image.
renWin.Render()
# Rotate the active camera by one degree.
ren1.GetActiveCamera().Azimuth(1)
class vtkMyCallback(object):
"""
Callback for the interaction.
"""
def __init__(self, renderer):
self.renderer = renderer
def __call__(self, caller, ev):
position = self.renderer.GetActiveCamera().GetPosition()
print('({:5.2f}, {:5.2f}, {:5.2f})'.format(*position))
if __name__ == '__main__':
import sys
main(sys.argv)
| true |
4ff87c42b818777bd9558766a401cc4736df2111 | Python | slamice/invested | /trademanager/stocks/robinhood_stock.py | UTF-8 | 823 | 2.796875 | 3 | [] | no_license | class RobinHoodStock:
def __init__(self, last_trade: dict):
self.raw_stock_info = last_trade
@property
def code(self):
return self.raw_stock_info.get('symbol')
@property
def adjusted_previous_close(self):
return float(self.raw_stock_info.get('adjusted_previous_close'))
@property
def last_trade_price(self):
return float(self.raw_stock_info.get('last_trade_price'))
@property
def bid_price(self):
return float(self.raw_stock_info.get('bid_price'))
@property
def previous_close(self):
return float(self.raw_stock_info.get('previous_close'))
@property
def buying_price(self):
return float(self.raw_stock_info.get('bid_price'))
def __str__(self):
return '{} {}'.format(self.code, self.buying_price)
| true |
e33555f71727656ff5a126923ca2d10f900cdb37 | Python | Ruzzan/Anime-Downloader | /anime_dl.py | UTF-8 | 2,358 | 2.796875 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
# import PySimpleGUI
import re, os, sys, time
ABS_PATH = os.path.abspath(__file__)
BASE_DIR = os.path.dirname(ABS_PATH)
OUTPUTS = os.path.join(BASE_DIR, 'Downloads')
def get_file_size(file_path):
file_size = os.path.getsize(file_path)
return file_size
anime_name_input = str(input("Anime Name:"))
episode_range = str(input("Episode num/range:"))
anime_name = anime_name_input.lower().replace(" ", "-")
if '-' in episode_range:
start = int(episode_range.split("-")[0])
end = int(episode_range.split("-")[-1])
else:
start = int(episode_range)
end = start
for episode_no in range(start, end+1):
site_url = f"http://www.anime1.com/watch/{anime_name}/episode-{episode_no}"
source = requests.get(site_url).text
status = requests.get(site_url).status_code
if status != 200:
print(f"\nNo anime named {anime_name.upper()} found.")
break
ANIME_FOLDER = os.path.join(OUTPUTS, str(anime_name))
os.makedirs(ANIME_FOLDER, exist_ok=True)
soup = BeautifulSoup(source, 'html.parser')
pattern = re.compile("file:.*")
result = re.findall(pattern, source)[0]
video_link = str(result).split('"')[1].replace(" ", "%20")
video_name = os.path.join(ANIME_FOLDER, f"{anime_name}-{episode_no}.mp4")
start_time = time.time()
with open(video_name, 'wb') as f:
response = requests.get(video_link, stream=True)
total_length = response.headers.get('content-length')
if total_length is None:
f.write(response.content)
else:
dl = 0
total_length = int(total_length)
print(f"Downloading {anime_name} episode {episode_no}")
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(50 * dl / total_length)
sys.stdout.write("\r[%s%s]" % ('█' * done, ' ' * (50 - done)))
sys.stdout.flush()
end_time = time.time()
total_time = end_time - start_time
print(f"\nDownloaded {video_name}")
downloaded_file_size = get_file_size(video_name)
file_size_mb = downloaded_file_size / 1024 / 1024
print(f"File Size: {file_size_mb:.4f} MB")
print(f"Download Time: {total_time:.4f} seconds.")
| true |
fcb46ece8b4be90574513a941251ec83d6988c08 | Python | idaohang/SoftGNSS-python | /geoFunctions/deg2dms.py | UTF-8 | 1,117 | 3.3125 | 3 | [] | no_license | import numpy as np
# deg2dms.m
def deg2dms(deg=None, *args, **kwargs):
# DEG2DMS Conversion of degrees to degrees, minutes, and seconds.
# The output format (dms format) is: (degrees*100 + minutes + seconds/100)
# Written by Kai Borre
# February 7, 2001
# Updated by Darius Plausinaitis
### Save the sign for later processing
neg_arg = False
if deg < 0:
# Only positive numbers should be used while spliting into deg/min/sec
deg = -deg
neg_arg = True
### Split degrees minutes and seconds
int_deg = np.floor(deg)
decimal = deg - int_deg
min_part = decimal * 60
min_ = np.floor(min_part)
sec_part = min_part - np.floor(min_part)
sec = sec_part * 60
### Check for overflow
if sec == 60.0:
min_ = min_ + 1
sec = 0.0
if min_ == 60.0:
int_deg = int_deg + 1
min_ = 0.0
### Construct the output
dmsOutput = int_deg * 100 + min_ + sec / 100
### Correct the sign
if neg_arg:
dmsOutput = -dmsOutput
return dmsOutput
################### end deg2dms.m ################
| true |
8f60e5f61d87e20c14591933b91a55e898b8d445 | Python | SushiMaster8/Ahrens_S_RPS_Fall2020 | /gameComponents/winLose.py | UTF-8 | 935 | 3.40625 | 3 | [
"MIT"
] | permissive | from gameComponents import gameVars
#define a win/lose function and refer to it (invoke it) in our game loop
def winorlose(status):
#print("called winorlose", status)
if status == "won":
pre_message = "THERE'S NO WAY YOU WON, WHAT, HOW? YOU MUST BE CHEATING! ADMIT IT YOU CHEATER! I NEVER LOSE!!!!!"
else:
pre_message = "YOU LOST HAHAHAHAHA! YOU MUST BE SUCH A DISSAPOINTMENT TO YOUR FAMILY, PLAY ME AGAIN IF YOU FEEL LIKE LOSING!\n"
print(pre_message + "WOULD YOU LIKE TO PLAY AGAIN?\n")
choice = input("Y / N")
if choice == "Y" or choice == "y":
#reset the game and start over again
gameVars.player_lives = 3
gameVars.computer_lives = 3
gameVars.player = False
elif choice == "N" or choice == "n":
# exit message and quit
print("AWWWW, TOO TOUGH? DON'T WORRY I UNDERSTAND, GO CRY ALONE YOU SORRY EXCUSE FOR A HUMAN.")
exit()
else:
print("DID I STUTTER??? -- TYPE Y or N")
choice = input("Y / N") | true |
ff4498f415b0dffc113d8bac45ee1d841ad3661d | Python | milindgaikwad13/AWS-BigData | /PythonBootcamp/Lessons/8_Error_Handling/pylintTest.py | UTF-8 | 123 | 2.703125 | 3 | [] | no_license |
## pylint creates a report for all possible errors and missing details such as docstring
a = 1
b = 2
print(a)
print(B) | true |
05920dcbdea3e073a74045c99add145e62948dbf | Python | Kylmalcolm/Diabetes_Analysis | /app.py | UTF-8 | 4,201 | 2.96875 | 3 | [] | no_license | from flask import Flask, render_template, request, send_file
from flask_bootstrap import Bootstrap
import random
from model import diabetesPrediction
class User:
def __init__(self, name, age, weight, height, glucose, active, skin, generate):
if generate == False:
self.name = name
self.weight = float(weight)
self.height = float(height)
self.age = int(age)
self.BMI = self.calculateBMI()
#calculate Blood Pressure
self.bloodPressure = self.randomInRange(62, 122)
#if user LOVES sugar
if glucose:
self.glucose = self.randomInRange(118, 199)
self.insulin = self.randomInRange(0, 30.5)
else:
self.glucose = self.randomInRange(62,117)
self.insulin = self.randomInRange(31, 846)
#if user Loves sports
if active:
self.active = self.randomInRange(73,122)
else:
self.active = self.randomInRange(30,72)
#if user is thick skinned
if skin:
self.skin = self.randomInRange(0, 23)
else:
self.skin = self.randomInRange(24, 99)
self.diabetesPedigree = self.diabetesPedigreeCalculate()
self.prediction = self.predict()
else:
self.generate()
def diabetesPedigreeCalculate(self):
return self.randomInRange(0.078, 2.42)
def calculateBMI(self):
return 703*(float(self.weight)/(float(self.height)**2))
def generate(self):
self.name = "sample user"
self.weight = float(self.randomInRange(120, 300))
self.height = float(self.randomInRange(60, 96))
self.age = int(self.randomInRange(12,102))
self.BMI = self.calculateBMI()
self.glucose = self.randomInRange(70, 250)
self.insulin = self.randomInRange(31, 846)
self.active = self.randomInRange(30,120)
self.bloodPressure = self.randomInRange(62, 122)
self.skin = self.randomInRange(20, 70)
self.diabetesPedigree = self.diabetesPedigreeCalculate()
self.prediction = self.predict()
def predict(self):
return diabetesPrediction(0, self.glucose, self.bloodPressure, self.skin,
self.insulin, self.BMI, self.diabetesPedigree, self.age)
def randomInRange(self, x,y):
return round(random.uniform(x,y), 3)
app=Flask(__name__)
Bootstrap(app)
objects = {}
@app.route("/")
def index():
print("Running index function")
return render_template("index.html")
@app.route("/about")
def about():
return send_file("Resources/about_section.txt")
@app.route("/userUpload", methods=['GET','POST'])
def userUpload():
user = User(request.form.get('userName',''),
request.form.get('age',''),
request.form.get('weight',''),
request.form.get('height',''),
request.form.get('sugar',''),
request.form.get('active',''),
request.form.get('skinThickness',''), False)
return user.name +' Weight: ' + str(user.weight) + ' Height: ' + str(user.height) \
+ ' Glucose: ' + str(user.glucose) + ' Is Active: '+ str(user.active) \
+ ' Skin Thickness: ' + str(user.skin) \
+ ' Is Likely To Have Diabetes: ' + ('Yes' if (user.prediction > 0) else 'No')
@app.route("/userGenerate", methods=['GET','POST'])
def userGenerate():
user = User('',0,0,0,0,False,False,True)
rtn_msg = user.name +' Weight: ' + str(user.weight) + ' Height: ' + str(user.height) \
+ ' Glucose: ' + str(user.glucose) + ' Is Active: '+ str(user.active) \
+ ' Skin Thickness: ' + str(user.skin) \
+ ' Is Likely To Have Diabetes: ' + ('Yes' if (user.prediction > 0) else 'No')
return {'msg':rtn_msg,
'name': user.name,
'weight': user.weight,
'height': user.height,
'age': user.age,
'BMI': user.BMI,
'glucose': user.glucose,
'active': user.active,
'skinthick': user.skin,
'prediction': float(user.prediction[0])}
if __name__ == "__main__":
app.run(debug=True)
| true |
0e93d470b889c0f808fc70354185d5ff60759527 | Python | upple/BOJ | /src/2000/2294.py3.py | UTF-8 | 444 | 2.8125 | 3 | [
"MIT"
] | permissive | import queue
n, k=map(int, input().split())
v=[False for i in range(k+1)]
m=[int(input()) for i in range(n)]
Q = queue.Queue()
v[k]=1
cnt=0
Q.put(k)
while Q.qsize():
size=Q.qsize()
while size:
size-=1
cur=Q.get()
if cur==0:
print(cnt)
exit(0)
for i in m:
if cur-i>=0 and not v[cur-i]:
v[cur-i]=True
Q.put(cur-i)
cnt+=1
print(-1)
| true |
3e19f2f00be0bd2d8039ddf980f4ba4b0f62cf86 | Python | ravanagagan/DataStructures-Python | /DataStructures/3 - HashMap/Hash Map- 1st adn 2nd Exercise.py | UTF-8 | 1,280 | 4.4375 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# nyc_weather.csv contains new york city weather for first few days in the month of January. Write a program that can answer following,
# What was the average temperature in first week of Jan
# What was the maximum temperature in first 10 days of Jan
# Figure out data structure that is best for this problem
data = []
with open('nyc_weather.csv', 'r') as f:
for line in f:
tokens = line.split(',')
data.append(float(tokens[1]))
print(data)
# What was the average temperature in first week of Jan
print(sum(data[0:7])/(len(data[0:7])))
# What was the maximum temperature in first 10 days of Jan
print(data[0:10])
print(max(data[0:10]))
# (2) nyc_weather.csv contains new york city weather for first few days in the month of January. Write a program that can answer following,
# (a) What was the temperature on Jan 9?
# (b) What was the temperature on Jan 4? Figure out data structure that is best for this problem
data = {}
with open('nyc_weather.csv', 'r') as f:
for line in f:
tokens = line.split(',')
data[tokens[0]] = float(tokens[1])
print(data)
# #### What was the temperature on Jan 9
print(data['Jan-09'])
# #### What was the temperature on Jan 4
print(data['Jan-04'])
| true |
ddd01cb9447cfcd3824c1d4a19bbb2c089c7a602 | Python | ekim197711/python-tutorial | /variablesAndDatatypes/my_conversion.py | UTF-8 | 258 | 3.078125 | 3 | [] | no_license |
my_float = float(5)
print(my_float)
my_int = int(10.6)
print(my_int)
my_string = str(25)
print("" + str(25))
# print(my_string)
my_set = set([100,200,300,100])
print(my_set)
my_tuple = tuple({15,26,37})
print(my_tuple)
my_list= list('hello')
print(my_list)
| true |
ec8fbdb7c2e657c868b5de05b92dfa1dd4082585 | Python | ralfleistad/Kattis | /baconeggsandspam.py | UTF-8 | 731 | 3.4375 | 3 | [] | no_license | import sys
import math
### Read list of numbers into a list and convert to integers
# nums = list(map(int, input().split(' ')))
### Sort dictionary by key
# for i in sorted (key_value.keys()) :
while True:
n = int(input())
if n == 0:
break
repo = {}
for i in range(n):
line = input().split(' ')
name = ''.join(line[:1])
for word in line[1:]:
if word not in repo:
repo[word] = []
repo[word].append(name)
for dish in sorted(repo.keys()):
print(dish, end = " ")
for person in sorted(repo[dish]):
print(person, end = " ")
print()
print()
| true |
f335c25867e03f6d2412241be96876f3e6a1b522 | Python | mysqlplus163/aboutPython | /20170213.py | UTF-8 | 350 | 3.015625 | 3 | [] | no_license | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Q1mi"
# Date: 2017/2/13
"""
如何在Pycharm中执行某个文件的某一部分代码
"""
# print("Start...")
#
# name = input("Please input your name: ")
# print("Hello ", name)
#
# print("End...")
exit_flag = False
a = 1
l = [1, ]
while not exit_flag:
l.append(a)
a = 2
| true |
96d56cae1b104c508440692a12150d23ef3c834d | Python | lukas-ke/faint-graphics-editor | /help/example_py/scripting_intro_squares.py | UTF-8 | 165 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | from faint import line
#start
for i in range(100):
line( (0, i * 10), (640, i * 10) ) # Horizontal lines
line( (i * 10, 0), (i * 10, 480) ) # Vertical lines
| true |
3573d527254520d07bc526f4a5771e30225da24e | Python | estercs/snakegame | /snake.py | UTF-8 | 1,067 | 3.40625 | 3 | [
"MIT"
] | permissive | import pygame
import sys
x_cord= 300
y_cord= 200
x_change= 0
y_change= 0
class snake():
def __init__(self):
self.height= 10
self.width= 10
def draw(self, screen, x, y):
self.shape= pygame.Rect((x,y),(self.height, self.width))
pygame.draw.rect(screen, pygame.Color("blue"),self.shape)
if __name__ == "__main__":
pygame.init()
clock = pygame.time.Clock()
pygame.display.set_caption ("cobrinha")
screen= pygame.display.set_mode((600, 400))
snake = snake()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
x_change= 0
y_change= -10
if event.key == pygame.K_DOWN:
x_change= 0
y_change= +10
if event.key == pygame.K_LEFT:
x_change= -10
y_change= 0
if event.key == pygame.K_RIGHT:
x_change= +10
y_change= 0
x_cord += x_change
y_cord += y_change
snake.draw(screen, x_cord, y_cord)
pygame.display.update()
screen.fill(pygame.Color("black"))
clock.tick(15)
| true |
e34f08d1681af47448cf4c5f8d675f3ac9eaa9eb | Python | mauriziokovacic/ACME | /ACME/math/vec2quat.py | UTF-8 | 399 | 2.796875 | 3 | [
"MIT"
] | permissive | from .cart2homo import *
def vec2quat(V, dim=-1):
"""
Converts a vector in quaternion form
Parameters
----------
V : Tensor
a (3,) or (N,3,) vector tensor
dim : int (optional)
the dimension along the vectors should be converted
Returns
-------
Tensor
the (4,) or (N,4,) quaternion tensor
"""
return cart2homo(V, w=0, dim=dim)
| true |
70bb7193962cccc29612b994e1cbb2bc12b14033 | Python | gvnaakhilsurya/20186087_CSPP-1 | /cspp1-assignments/m22/assignment1/assignment1/test.py | UTF-8 | 243 | 2.96875 | 3 | [] | no_license |
def main():
"input and output"
lines = int(input())
lis = []
for i in range(lines):
lis.append(input())
for i in lis:
print(i)
if __name__ == '__main__':
main()
read_input.py
Displaying read_input.py. | true |
fe047a93a149d7a0f9adf9945c0879b9bde200b7 | Python | islamariful/playfair-cipher | /playfairencription.py | UTF-8 | 3,104 | 3.859375 | 4 | [
"MIT"
] | permissive | #Initializing variables a and b for use in recurson in #filling other characters
a = b = 0
#Getting user inputs Key (to make the 5x5 char matrix) and Plain Text (Message that is to be encripted)
key = input("Enter key: ")
key = key.replace(" ", "")
key = key.upper()
plaintext = input("Plain text: ")
plaintext = plaintext.replace(" ", "")
plaintext = plaintext.upper()
#The function matrix that creates a nested list recursively
#The nested list mimics a 5 by 5 grid, where each element in the master list
#is a list containing 5 elements itself
def matrix (x, y, initial):
return [[initial for i in range(x)] for j in range(y)]
#Note to users: you can see the final generated matrix by writing in:
#print(ciphermatrix)
#at the end of the code.
#keyintomatrix starts off as an empty list but incorporates the characters
#into the nested list matrix structure which is later appended at the start
#of the cipher matrix
keyintomatrix=list()
for c in key:
if c not in keyintomatrix:
if c=='J':
keyintomatrix.append('I')
else:
keyintomatrix.append(c)
#this fills in the rest of the matrix with the remaining unused letters from
#the english alphabet
for i in range(65,91):
if chr(i) not in keyintomatrix:
if i==73 and chr(74) not in keyintomatrix:
keyintomatrix.append("I")
a=1
elif a==0 and i==73 or i==74:
pass
else:
keyintomatrix.append(chr(i))
#defining the cipher matrix as a 5x5 matrix with an inital of 0
ciphermatrix=matrix(5,5,0)
for i in range(0,5):
for j in range(0,5):
ciphermatrix[i][j]=keyintomatrix[b] #keyintomatrix being incorporated into the beginning of the cipher matrix
b+=1
#indexlocator is a function that locates the index of a certain character
def indexlocator(x):
lst = list()
if x == 'J':
x == 'I'
for i,j in enumerate(ciphermatrix):
for k,l in enumerate(j):
if x == l:
lst.append(i)
lst.append(k)
return lst
#encription takes in the plaintext and encripts it using the row, rectangle, or column method
#for the playfair cipher encryption method
def encription(text):
i=0
for s in range(0,len(text)+1,2):
if s<len(text)-1:
if text[s]==text[s+1]:
text=text[:s+1]+'X'+text[s+1:]
if len(text)%2!=0:
text=text[:]+'X'
print("Cipher Text: ", end=' ')
while i < len(text):
lst = list()
lst = indexlocator(text[i])
lon = list()
lon = indexlocator(text[i + 1])
if lst[1] == lon[1]:
print(f"{ciphermatrix[(lst[0] + 1) %5][lst[1]]}{ciphermatrix[(lon[0] + 1) %5][lon[1]]}", end=' ')
elif lst[0] == lon[0]:
print(f"{ciphermatrix[lst[0]][(lst[1] + 1) %5]}{ciphermatrix[lon[0]][(lon[1] + 1) %5]}",end=' ')
else:
print(f"{ciphermatrix[lst[0]][lon[1]]}{ciphermatrix[lon[0]][lst[1]]}",end=' ')
i += 2
encription(plaintext)
#read more at https://en.wikipedia.org/wiki/Playfair_cipher | true |
e1bcfa2677d03df66531d02ec4fac76610e1aab0 | Python | mathildaMa/concourse-http-resource | /test/test_in.py | UTF-8 | 1,319 | 2.875 | 3 | [
"MIT"
] | permissive | from helpers import cmd
def test_in(httpbin, tmpdir):
"""Test downloading versioned file."""
source = {
'uri': httpbin + '/range/{version}',
}
in_dir = tmpdir.mkdir('work_dir')
output = cmd('in', source, [str(in_dir)], {'version': '9'})
assert output['version'] == {'version': '9'}
assert {'name': 'url', 'value': httpbin + '/range/9'} in output['metadata']
assert {'name': 'Content-Type', 'value': 'application/octet-stream'} in output['metadata']
assert in_dir.join('9').exists()
assert len(in_dir.join('9').read()) == 9
assert in_dir.join('version').exists()
assert in_dir.join('version').read() == '9'
def test_in_filename(httpbin, tmpdir):
"""Test downloading versioned file with predetermined filename."""
source = {
'uri': httpbin + '/range/{version}',
'filename': 'filename_{version}',
}
in_dir = tmpdir.mkdir('work_dir')
output = cmd('in', source, [str(in_dir)], {'version': '9'})
assert output['version'] == {'version': '9'}
assert {'name': 'url', 'value': httpbin + '/range/9'} in output['metadata']
assert {'name': 'Content-Type', 'value': 'application/octet-stream'} in output['metadata']
assert in_dir.join('filename_9').exists()
assert len(in_dir.join('filename_9').read()) == 9
| true |
48b8005a978fa796702bd43c7b8186c64d8aa6ba | Python | jadecodespy/python | /Code/COTD5.py | UTF-8 | 261 | 3.140625 | 3 | [] | no_license |
def comb_srt(first_word, second_word):
string_one =list(first_word)
string_two =list(second_word)
comb=1
for i in string_two:
string_one.insert(comb,i)
a=+ 2
return "" .join(string_one)
print(comb_srt(Lion, Tiger)) | true |
6fc7fe6e4fc9c838201c27b637c0f56b378bae3a | Python | alexhsamuel/ntab | /ntab/lib/memo.py | UTF-8 | 392 | 2.546875 | 3 | [
"MIT"
] | permissive | import functools
#-------------------------------------------------------------------------------
def lazy_property(fn):
name = fn.__name__
@functools.wraps(fn)
def wrapped(self):
try:
return self.__dict__[name]
except KeyError:
val = fn(self)
self.__dict__[name] = val
return val
return property(wrapped)
| true |
2558bf608fea9c623866b7e55b87adb4df999c22 | Python | ali-sefidmouy/HamrahChallenge | /academy/academyapp/views.py | UTF-8 | 783 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | from django.db import models
from django.http import JsonResponse
from rest_framework import status
from rest_framework.decorators import api_view
def sum(a, b):
return int(a) + int(b)
@api_view(['GET',])
def SumView(request):
if request.method == 'GET':
a = request.query_params.get('a', None)
b = request.query_params.get('b', None)
if a is None or b is None:
return JsonResponse({"error": "a or b is undefined!"}, status=status.HTTP_400_BAD_REQUEST)
try:
return JsonResponse({"sum": sum(a, b)}, status=status.HTTP_200_OK)
except ValueError as e:
return JsonResponse({"error": "Invalid input! Please provide integer number for a & b"}, status=status.HTTP_400_BAD_REQUEST)
| true |
d28b2f608d0743b65fd7e9dd9409bf8a6e626612 | Python | bokveizen/leetcode | /1022_Sum of Root To Leaf Binary Numbers.py | UTF-8 | 642 | 3.328125 | 3 | [] | no_license | # https://leetcode-cn.com/problems/sum-of-root-to-leaf-binary-numbers/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sumRootToLeaf(self, root: TreeNode) -> int:
ans = 0
def f(r, s):
if r:
s = (s << 1) + r.val
if not r.left and not r.right: # leaf
nonlocal ans
ans += s
else:
f(r.left, s)
f(r.right, s)
f(root, 0)
return ans
| true |
89ab7a55b709e8802405dba26e9a6c08787463b8 | Python | ramakrishna00/py-coding | /cross_bridge.py | UTF-8 | 351 | 2.84375 | 3 | [] | no_license | def get_time(a, b, c, d):
return a+b+c+d+min(a,b,c,d)
class GetTime{
public static int main(String args[]){
Scanner sc = new Scanner(System.in);
int a = sc.nextInt();
int b = sc.nextInt();
int c = sc.nextInt();
int d = sc.nextInt();
return a+b+c+d+Math.min(a,Math.min(b,Math.min(c,d)));
}
} | true |
f873e605721b22cf4ae07aaa28c89aad4e2f1230 | Python | 3StarAnchovy/PythonTest | /test18.py | UTF-8 | 144 | 3 | 3 | [] | no_license | score = list(map(int, input().split()))
sum = 0
for i in score:
sum += i
avg = sum/len(score)
print(int(avg))
#print(sum(score)/len(score)) | true |
961f253507f72d9a7aefc0b6cafd3eb169242b6a | Python | Ast3risk-ops/My-Stuff | /My_website.py | UTF-8 | 355 | 3 | 3 | [] | no_license | # A simple web application.
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
return render_template('index.html')
@app.route("/hello/<name>")
def greet(name='Stranger'):
return render_template("greeting.html", name=name)
@app.route("/slash")
def slash():
return render_template("slash.html")
| true |
39ae64169b222e579f6bf2763b557b6ab31e0a2d | Python | amiribr/airLUSI | /src/Rayleigh_direct_transmittance.py | UTF-8 | 3,460 | 2.625 | 3 | [] | no_license | # Amir Ibrahim :: amir.ibrahim@nasa.gov :: April 2020
# These are imports necessary to run the script
import os
import re
import glob
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import numpy.matlib as npm
from scipy.interpolate import interp1d
from matplotlib.legend_handler import HandlerLine2D
import time
def BodhaineROT(Ps = 1013.25, Ts = 288.15, CO2 = 360, λ = np.arange(250,890,5), z = 0, ϕ = 45):
""" This function is to calculate the
Rayleigh optical thickness according to
Bodhaine et al., 1999
Inputs: P: pressure in mbar,
T: temprature in K,
CO2: concentration in ppm
λ: wavelength (nm)
z: Altitude (meters)
ϕ: Latitude (degrees)
BodhaineBM: Run Bodhaine Benchmark case if True. If False read inputs
Outputs: λ (nm), Rayleigh Optical Thickness τr(λ)
"""
# Convert wavelength to μm
λ_μm = λ/1e3
λ_μm = λ_μm[:,np.newaxis]
# define contstants according to Bodhaine et al., 1999
T0 = 273.15 # Reference Temprature (K)
ma = 15.0556*CO2/1e6 + 28.9595 # molecular weight for dry air normalized to CO2 concentration
A = 6.0221367e23 # Avogadro's Number (molecules/mol)
mmr_N2 = 78.084
mmr_O2 = 20.946
mmr_Ar = 0.934
# calcaulated for T = 288.15 K and 1013.25 mb
n300_1 = 1e-8*(8060.51 + (2480990/(132.274 - 1/(λ_μm**2))) + (17455.7/(39.32957 - 1/(λ_μm**2))))
n_co2_1 = n300_1*(1 + 0.54*(CO2*1e-6 - 0.0003))
n = n_co2_1 + 1
F_N2 = 1.034 + 3.17e-4 * (1/(λ_μm**2))
F_O2 = 1.096 + 1.385e-3 * (1/(λ_μm**2)) + 1.448e-4 * (1/(λ_μm**4))
F_Ar = 1.00
F_CO2 = 1.15
# Calculate King's Factor spectrally
F_air_co2 = (78.084*F_N2 + 20.946*F_O2 + 0.934*F_Ar + CO2*1e-4*F_CO2) / (mmr_N2 + mmr_O2 + mmr_Ar + CO2*1e-4)
# Calculate the molar volume given the temprature within the layer of the atmosphere
Ns = 1e-3*(A/22.4141)*(T0/Ts)
# Calculate the scattering cross section of the air molecules
σ_air = ((24*np.pi**3*(n**2 - 1)**2)/ ((λ_μm*1e-4)**4*Ns**2*(n**2 + 2)**2)) * F_air_co2
zc = 0.73737*z + 5517.56
g0 = 980.6160*(1 - 0.0026373*np.cos(2*np.deg2rad(ϕ))) + 0.0000059*(np.cos(2*np.deg2rad(ϕ)))**2
g = g0 - (3.085462e-4 + 2.27e-7*np.cos(2*np.deg2rad(ϕ)))*z \
+ (7.254e-11 + 1e-13*np.cos(2*np.deg2rad(ϕ)))*z**2 - \
(1.517e-17 + 6e-20*np.cos(2*np.deg2rad(ϕ)))*z**3
# Calculate the Rayleigh optical thickness
τr = σ_air * (1e3*Ps*A)/(ma*g)
ρpol = 6*(F_air_co2-1)/(7*F_air_co2 + 3)
return λ, τr, σ_air, ρpol
### Calculate the Rayleigh optical depth at the sea level
λ, τr_sl, _, _ = BodhaineROT()
# Read in atmosphere profile
ProfPath = '../data/atmmod/mcclams.dat'
profile = np.loadtxt(ProfPath)
P = profile[:,1]
T = profile[:,2]
H = profile[:,0]
altitude = 21e3 # in meters
P_alt = interp1d(H, P)(altitude/1e3)
T_alt = interp1d(H, T)(altitude/1e3)
### Calculate the Rayleigh optical depth at the ER-s aircraft altitude of 21 km
## if you know P_alt (pressure in mbar at aircraft altitude) directly from the aircraft
# then just plug it in. Same for T_alt (temprature in Kelvin at aircraft altitude)
λ, τr_alt, _, _ = BodhaineROT(Ps=P_alt, Ts=T_alt, z=altitude)
### Now we can calculate the direct transmittance of the Rayleigh atmosphere
# above the aircraft given the lunar zenith and view zenith
lunar_zenith = 90 - 65
T_ray = np.exp(-τr_alt/np.cos(np.deg2rad(lunar_zenith)))
| true |
5b1b315a08b6e9afa64074b2a61509d144f82015 | Python | ryokugyu/Finding-Sister-City-with-NLP-approach | /algo/skcriteriaVersion.py | UTF-8 | 1,693 | 2.75 | 3 | [] | no_license | import sys
import numpy as np
import math
# Minimize for items with positive weights, maximize for items with negative weights
from skcriteria import Data, MIN, MAX
from skcriteria.madm import closeness
import sharedFormulas as form
from collections import OrderedDict
# Create a dictionary from the city_with_data file
city_dict = form.make_city_dict("city_with_data.csv")
# Get the user's input
city_name, weights = form.get_user_input(city_dict)
# Get the corresponding input city
input_city = city_dict[city_name]
# Calculate distances and absval differences from input city
distance_dict = form.calc_city_dict_distance(city_dict, input_city, True)
# Normalize the dictionary
normalized_dict = form.normalize_city_dict(distance_dict)
# Normalize the weights
criteria, weights = form.normalize_weights(weights)
# NOTE: THIS SHOULD BE UPDATED IF POSSIBLE
# Split weights into eight criteria
criteria_8 = [criteria[0], criteria[1], criteria[1], criteria[1], criteria[1], criteria[2], criteria[3], criteria[3]]
weights_8 = [weights[0], weights[1] / 4, weights[1] / 4, weights[1] / 4, weights[1] / 4, weights[2], weights[3] / 2, weights[3] / 2]
for city in normalized_dict:
if normalized_dict[city][0] > .80 and normalized_dict[city][0] < 1.0:
print (city, distance_dict[city])
# Perform analysis:
# data = Data(list(distance_dict.values()), criteria_8, weights_8, anames=list(distance_dict.keys()))
# print( "Normalized weights: ", str(data.weights))
# print( "Criteria: ", data.criteria)
# dm = closeness.TOPSIS()
# dec = dm.decide(data)
# print( dec.e_.ideal)
# for i in dec.rank_[:10]:
# print( list(distance_dict.keys())[i], list( distance_dict.values() )[i] )
| true |
d69e4b13b28c78134201bdb06fab7ab3130812d6 | Python | thorbenwiese/ML | /Assignment3/assignment3.py | UTF-8 | 13,678 | 3.34375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import scipy.io as sio
from sklearn.neighbors import KNeighborsClassifier as classifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix
'''
Assignment 1
Generated datasets based on known distributions are often the best way to test and understand
new algorithms. Numpy offers a wide range of functions to generate and work with random
numbers.
1a. Read the documentation for the numpy.random functions.
Create arrays of n ∈ [100, 1000, 10000, 100 000] random numbers with uniform distribution.
Plot the raw data, then generate and plot histograms with 10 bins. How do the mean,
minimum and maximum values of the bins (occupation counts) behave?
'''
def uniform(k):
return np.random.uniform(low=0.0, high=1.0, size=k)
def plot(func, input_set, *args):
fig = plt.figure(figsize=(10, 10))
fig.canvas.set_window_title(func.__name__)
outer = gridspec.GridSpec(len(input_set)//2+1, 2, wspace=0.2, hspace=0.2)
for i in range(len(input_set)):
inner = gridspec.GridSpecFromSubplotSpec(1, 3,
subplot_spec=outer[i], wspace=0.1, hspace=0.1)
s= func(input_set[i], *args)
title = input_set[i]
ax = plt.Subplot(fig, inner[0])
ax.set_title('n = {0:d}'.format(title))
ax.hist(s, bins=10)
fig.add_subplot(ax)
ax1 = plt.Subplot(fig, inner[1])
#ax1.set_title('k = {0:d}'.format(k))
ax1.plot(s)
fig.add_subplot(ax1)
ax2 = plt.Subplot(fig, inner[2])
#ax2.set_title('k = {0:d}'.format(k))
ax2.scatter(s, range(len(s)))
fig.add_subplot(ax2)
# plot(uniform, ([100, 1000, 10000, 100000]))
'''
1b. Create random numbers from a Gaussian distribution with mean μ and variance σ2. Plot
the raw data, then generate and plot histograms.
'''
def gauss(k, args):
mu, sigma = args[0], args[1]
return np.random.normal(mu, sigma, k)
# plot(gauss, [100, 1000, 10000, 100000], (0, 0.1))
'''
1c. As before, but using the Binomial distribution with parameters n and p.
'''
def binomial(k, args):
n, p = args[0], args[1]
return np.random.binomial(n, p, k)
'''
1d. Maybe combining multiple random numbers is even better than using single ones?
Use numpy to generate new random numbers from a sum of individual numbers, si =
PMj=1 rj , where the rj are generated from a uniform distribution. Plot scatter plots and
histograms of the resulting data sets for M ∈ [2, 3, 5, 10, 20].
'''
def individual(k, args):
a = np.zeros(args)
for j in range(k):
b = np.random.uniform(low=0.0, high=1.0, size=args)
a = a + b
return a
def individual2(k, args):
a = np.zeros(args)
for j in range(k):
b = np.random.uniform(low=0.0, high=1.0, size=args)
b.sort()
a = a + b
return a
#welche Version richtig?
#Verison 1: Werte des Arrays der Normalverteilung sind nicht sortiert -> es kommt eine Art Normalverteilung heraus
#Version 2: Arrays sind sortiert -> eine gute Gleichverteilung wird ggf schneller erziehlt? TODO (vergleiche hierfür uniform k=20000 mit individual n=20 (20x k=1000)
'''
1e. Generate random numbers with a uniform distribution in a circle of radius r.
(Recent versions of numpy actually have a function for this, but the goal here
is to understand the issue first and then to come up with your own solution.)
'''
def plotRandomCircle(radius):
x = []
y = []
for i in range(0,1000):
angle = np.random.uniform(0,1) * 2 * np.pi
x.append(np.cos(angle)*radius)
y.append(np.sin(angle)*radius)
fig = plt.figure()
plt.scatter(x,y)
plt.axes().set_aspect('equal', 'datalim')
plt.title('Circle of random numbers with radius ' + str(radius))
fig.canvas.set_window_title('circle')
def plotRandomCircle2(radius):
x = []
y = []
for i in range(0,1000):
angle = np.random.uniform(0,1) * 2 * np.pi
r = np.random.uniform(0,1) # random factor for radius
x.append(np.cos(angle)*radius*r)
y.append(np.sin(angle)*radius*r)
fig = plt.figure()
plt.scatter(x,y)
plt.axes().set_aspect('equal', 'datalim')
plt.title('Circle of random numbers with radius ' + str(radius))
fig.canvas.set_window_title('circle')
# # auf Radius oder innerhalb Radius -> auf Kreisaußengrenzen oder innerhalb des Kreis
'''
Assignment 2
Load the data from Adot.mat. Each column of matrix X represents on data point.
2a. Use the function scipy.io.loadmat to parse and load the Matlab/Octave.mat
data file, then access the array(s) inside the data structures.
'''
def loadMatFile():
data = sio.loadmat('Adot.mat')
print( 'X: ', data['X'])
print( 'Version: ', data['__version__'])
print( 'Header: ', data['__header__'])
print( 'Globals: ', data['__globals__'])
return( data['X'])
'''
2b. Create a numpy matrix for the linear mapping V :
theta = pi/3
V = [[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]
Apply the linear mapping on X to get Y = V X. Plot both X and Y in the same
figure. What does the linear mapping V do?
2c. Now apply the transpose of the linear mapping on Y to get Z = V^t*Y.
Plot Z and describe what the linear mapping V^t*V does.
2d. What do the linear mappings D1 = [[2, 0], [0, 2]] and D2 = [[2, 0], [0, 1]]
do? Apply them on X and plot the results.
2e. What does the linear mapping A = V t ∗ D2 ∗ V do? Apply it on X and plot
the result.
'''
def createLinearMapping(X):
# b)
theta = np.pi / 3
V = [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]
Vt = np.transpose(V)
Y = np.dot(V,X)
fig = plt.figure()
plt.title('X and Y with linear mapping V')
fig.canvas.set_window_title('Linear Mapping of X and Y')
plt.plot(X,Y)
# What does V do? -> it rotates the vectors to the left and right
# c)
fig = plt.figure()
plt.title('Z with linear mapping V')
fig.canvas.set_window_title('Linear Mapping of Z')
Z = np.dot(Vt,Y)
plt.plot(Z)
# what does V^tV do? --> [[ 1. 0.]
# [ 0. 1.]]
# ==> Identity matrix. Multiplying any matrix with it will result in the
# original matrix
# d)
D1 = [[2,0],[0,2]]
D2 = [[2,0],[0,1]]
fig = plt.figure()
fig.canvas.set_window_title('Linear Mapping of X with D1 and D2')
plt.subplot(2,1,1)
plt.title('X with linear mapping D1')
plt.plot(X,np.dot(D1,X))
plt.subplot(2,1,2)
plt.title('X with linear mapping D2')
plt.plot(X,np.dot(D2,X))
# Interpretation: what does D1 and D2 do?
# --> D1 doubles the values in the matrix
# --> D2 doubles the first half and keeps the second half
# e)
A = np.dot(np.dot(Vt,D2),V)
fig = plt.figure()
fig.canvas.set_window_title('Linear Mapping of X with A')
plt.title('X with linear mapping A')
plt.plot(X,np.dot(A,X))
# What does A do? --> it rotates the first and second half in different
# directions
'''
3.
In this exercise, we use a kNN classifier to classify handwritten digits from the USPS data-set. You
can reuse your kNN classifier from Assignment 2 or use libraries from Scikit. The USPS data-set
contains grayscale handwritten digit images scanned from envelopes by the U.S. Postal Service.
The images are of size 16 × 16 (256 pixel) with pixel values in the range 0 to 255. We have 10
classes {1, 2, ..., 9, 0}. The training data has 10000 images, stored in a 10000 × 256 Matlab matrix
(usps train.mat). The training label is a 10000 × 1 vector revealing the labels for the training data.
There are 1000 test images for evaluating your algorithm in the test data (usps test.mat).
'''
'''
a. First, we want to classify digit 2 versus digit 3. Prepare the training data: Load the train
data (scipy.io.loadmat) and prepare the training set for classes 2 and 3. We need to convert
the data type from uint8 (8-bit unsigned integer) to double. Do the same for the test data.
'''
def loadData():
train = sio.loadmat('usps/usps_train.mat')
test = sio.loadmat('usps/usps_test.mat')
test_data = test['test_data'].astype(float)
test_label = test['test_label'].astype(float)
train_data = train['train_data'].astype(float)
train_label = train['train_label'].astype(float)
return test_data, test_label, train_data, train_label
def trainModel(num1, num2, train_data, train_label, test_data, test_label, k):
#ohne Skalierung sind die scores höher.....
#scaler = StandardScaler()
#scaler.fit(train_data)
#train_data = scaler.transform(train_data)
#scaler.fit(test_data)
#test_data = scaler.transform(test_data)
# Train Data
train_values = []
train_labels = []
for i,x in enumerate(train_label):
if x == [num1] or x == [num2]:
train_values.append(train_data[i])
train_labels.append(x)
train_labels = np.ravel(train_labels)
# Test Data
test_values = []
test_labels = []
for i,x in enumerate(test_label):
if x == [num1] or x == [num2]:
test_values.append(test_data[i])
test_labels.append(x)
test_labels = np.ravel(test_labels)
knn = classifier(n_neighbors=k)
knn.fit(train_values, train_labels)
predict = knn.predict(test_data)
print ('Prediction:\n', predict, '\n')
print ('Prediction probabilities:\n', knn.predict_proba(test_data))
train_score = knn.score(train_values, train_labels)
test_score = knn.score(test_values, test_labels)
print ('Train score: ', train_score)
print ('Test score: ', test_score)
return train_score, test_score
'''
b. Plot a few example images using matplotlib.pyplot.imshow and the grayscale colormap
(cmap=’grey’ ). Use reshape to convert the image vectors into 16 × 16 images.
'''
def get_image(data, label, index):
return data[index].reshape(16,16), label[index]
def show_single_image(data, label, index):
img, l = get_image(data, label, index)
print(img)
plt.title('Interpretation: '+str(l))
plt.imshow(img, cmap='Greys')
plt.show()
# def plot_images(images, labels):
# fig, axes = plt.subplots(nrows=4, ncols=4)
# fig.tight_layout() # Or equivalently, "plt.tight_layout()"
# fig.canvas.set_window_title('Images')
#
# for i in range(len(images)):
# ax = plt.subplot(fig)
# ax.set_title('n = {0:f}'.format(labels[i][0]))
# ax.imshow(images[i], cmap='Greys')
# fig.add_subplot(ax)
# plt.show()
def plot_images(images, labels):
fig = plt.figure(figsize=(10, 10))
fig.canvas.set_window_title('Images')
outer = gridspec.GridSpec(len(images)//2+1, 2)
outer.update(hspace=0.5)
for i in range(len(images)):
inner = gridspec.GridSpecFromSubplotSpec(1, 1,
subplot_spec=outer[i], wspace=0, hspace=0)
ax = plt.Subplot(fig, inner[0])
ax.set_title('n = {0:d}'.format(int(labels[i][0])))
ax.imshow(images[i], cmap='Greys')
fig.add_subplot(ax)
plt.show()
def show_random_images(data, label, number, singleplots):
assert len(data) > number > 0
selection = np.random.choice(range(len(data)), number, replace=False)
if(singleplots):
for x in selection:
show_single_image(data, label, x)
else:
images = []
labels = []
for x in selection:
img, l = get_image(data, label, x)
images.append(img)
labels.append(l)
plot_images(images, labels)
'''
c. Evaluate the performance of your classifier: Test your classifier with different values k =
1, 3, 5, 7, 10, 15 and plot the training and the test errors.
d. Now you can classify other digits. Run your algorithm to classify digit 3 from 8 and compare
its performance with results from digit 2 versus 3.
'''
def main():
plot(uniform, ([100, 1000, 10000, 100000]))
plot(gauss, [100, 1000, 10000, 100000], (0, 0.1)) # (0,0.1) -> (mean, variance)
plot(binomial, [100, 1000, 10000, 100000], (10, 0.5)) # ((10, 0.5) -> (n, p)
plot(individual, [2, 3, 5, 10, 20], 1000) # 1000 -> größe der uniform distributions die aufaddiert werden)
plotRandomCircle(5)
plotRandomCircle2(5)
X = loadMatFile()
createLinearMapping(X)
test_data, test_label, train_data, train_label = loadData()
show_random_images(test_data, test_label, 10, False)
trainModel(2, 3, train_data, train_label, test_data, test_label, 5)
train_scores = []
test_scores = []
train_scores2 = []
test_scores2 = []
for k in [1, 3, 5, 7, 10, 15]:
train_score, test_score = trainModel(2, 3, train_data, train_label, test_data, test_label, k)
train_scores.append(train_score)
test_scores.append(test_score)
train_score2, test_score2 = trainModel(3, 8, train_data, train_label, test_data, test_label, k)
train_scores2.append(train_score2)
test_scores2.append(test_score2)
fig = plt.figure()
plt.title('Train vs. Test Score (2 and 3) for different k')
plt.xlabel('k')
plt.ylabel('score in %')
x_labels = [1,3,5,7,10,15]
plt.xticks([0,1,2,3,4,5], x_labels)
plt.plot(train_scores, label='train score')
plt.plot(test_scores, label='test score')
plt.legend()
fig = plt.figure()
plt.title('Train vs. Test Score (3 and 8) for different k')
plt.xlabel('k')
plt.ylabel('score in %')
x_labels = [1,3,5,7,10,15]
plt.xticks([0,1,2,3,4,5], x_labels)
plt.plot(train_scores2, label='train score')
plt.plot(test_scores2, label='test score')
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| true |
c945317bfdda80d544be8256a9792cedfe8d60b8 | Python | ritesh-deshmukh/Algorithms-and-Data-Structures | /Practice/fizzbuzz.py | UTF-8 | 519 | 3.953125 | 4 | [] | no_license | # arr = [a for a in range(1,21)]
# arr2 = [] * len(arr)
# print(arr)
for num in range(1,21):
if num%3 == 0 and num%5 == 0:
# arr2.append("FizzBuzz")
print("FizzBuzz", end=" ")
elif num%3 == 0:
# arr2.append("Fizz")
print("Fizz", end=" ")
elif num%5 == 0:
# arr2.append("Buzz")
print("Buzz", end=" ")
else:
# arr2.append(num)
print(num, end=" ")
# print(arr2)
# Encapsulation
# polymorphism
# garbage collector
# linkedlist
# hashtable
| true |
78517cec2e7fe5c10e7897159e6af60a7f4b4669 | Python | sclaughl/biblequiz-helps | /load_romans.py | UTF-8 | 2,916 | 2.859375 | 3 | [] | no_license | import logging
import urllib2
import os
import sqlite3
from BeautifulSoup import BeautifulSoup
# configure logging
log = logging.getLogger('romans_loader')
log.setLevel(logging.DEBUG)
log.addHandler(logging.StreamHandler()) # write log to stderr
# configure database -- delete it if it exists
DB_FILE = os.path.join(os.getcwd(),'romans.db')
if (os.path.exists(DB_FILE) and open(DB_FILE)):
os.remove(DB_FILE)
BOOKS = {
'rom' : 'Romans',
}
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
c.execute('create table verses (book text, chapter int, verse int, verse_text text)')
conn.commit()
c.close()
def get_html_for_scripture(book, chapter):
log.debug("about to dl page")
f = urllib2.urlopen("http://www.youversion.com/bible/chapter/niv/%s/%s" % (book,chapter))
return BeautifulSoup(f, convertEntities=BeautifulSoup.HTML_ENTITIES)
def process_chapter(book, chapter, soup):
""""
Narrows the soup from whole page down to chapter content.
Gets a list of verse spans
For each verse, find the verse spans for this verse (usually one span but not always)
For each verse span, find children spans of class 'content' and accumulate the contents
"""
soup = soup.find('div', attrs={"id": "version_primary"})
log.debug("soup narrowed to %s" % soup)
verse_spans = soup.findAll('span', 'verse')
log.debug("found %r verse spans" % len(verse_spans))
log.debug("evaluating verse spans...")
verse = 1
while True:
verse_spans_for_this_verse = get_verse_spans_for_verse(verse, verse_spans)
if not verse_spans_for_this_verse:
log.debug("%s %s ends with verse %s", book, chapter, verse-1)
break
log.debug(verse_spans_for_this_verse)
process_verse_contents(book, chapter, verse, verse_spans_for_this_verse)
verse += 1
def process_verse_contents(book, chapter, verse_num, verse_spans):
ess = ""
for v in verse_spans:
content_spans = v.findAll('span', 'content')
for span in content_spans:
ess += span.string
log.debug("FINAL: %s", ess.strip())
c = conn.cursor()
c.execute("insert into verses values (?, ?, ?, ?)", (BOOKS[book], chapter, verse_num, ess.strip()))
conn.commit()
c.close()
def get_verse_spans_for_verse(verse_num, verse_spans):
"""Out of verse_spans, return list of spans for the given verse_num"""
log.debug('finding verses for verse %d', verse_num)
verse_spans_for_verse = []
for vs in verse_spans:
if vs['class'] == 'verse v%d' % verse_num:
verse_spans_for_verse.append(vs)
elif verse_spans_for_verse: # we've moved beyond the relevant verse_spans
return verse_spans_for_verse
return verse_spans_for_verse
for book_key in BOOKS.keys():
for chapter in range(9,16):
soup = get_html_for_scripture(book_key, chapter)
process_chapter(book_key, chapter, soup)
| true |
4a47617e9cbcf6b7de20ff82a41aefea917ccda3 | Python | czardien/beautiful-bundlifier | /lib/models/notification.py | UTF-8 | 964 | 3.046875 | 3 | [] | no_license | from typing import List
from datetime import datetime
class Notification:
_TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S"
def __init__(self, timestamp: str, user_id: str, friend_id: str, friend_name: str):
self.timestamp = datetime.strptime(timestamp, self._TIMESTAMP_FORMAT)
self.user_id = user_id
self.friend_id = friend_id
self.friend_name = friend_name
@staticmethod
def _split_line(line: str, csv_delimiter: str):
return [element.strip().replace("\n", "") for element in line.split(csv_delimiter)]
@classmethod
def from_line(cls, line: str, csv_headers: List[str], csv_delimiter: str = ','):
csv_line = cls._split_line(line, csv_delimiter)
return Notification(**dict(zip(csv_headers, csv_line)))
def __str__(self):
return f"Event(timestamp='{self.timestamp}',user_id='{self.user_id}',friend_id='{self.friend_id}'," \
f"friend_name='{self.friend_name}')"
| true |
cfa123412a16a766e5c70ef9bb64c063ea4ed597 | Python | kyosukekita/ROSALIND | /Bioinformatics textbook track/find_substrings_of_a_genome_encoding_a_given_amino_acids_string.py | UTF-8 | 1,327 | 3.09375 | 3 | [] | no_license | def translate(seq):
decoded=""
string = """TTT F CTT L ATT I GTT V
TTC F CTC L ATC I GTC V
TTA L CTA L ATA I GTA V
TTG L CTG L ATG M GTG V
TCT S CCT P ACT T GCT A
TCC S CCC P ACC T GCC A
TCA S CCA P ACA T GCA A
TCG S CCG P ACG T GCG A
TAT Y CAT H AAT N GAT D
TAC Y CAC H AAC N GAC D
TAA * CAA Q AAA K GAA E
TAG * CAG Q AAG K GAG E
TGT C CGT R AGT S GGT G
TGC C CGC R AGC S GGC G
TGA * CGA R AGA R GGA G
TGG W CGG R AGG R GGG G"""
temp=string.split()
codon_map=dict(zip(temp[0::2], temp[1::2]))
for i in range(0, len(seq), 3):
decoded += codon_map[seq[i:i+3]]
return decoded
def reverse_complement(sequence):
temp=sequence.replace("A","x").replace("T","A").replace("x","T")
return (temp.replace("G","x").replace("C","G").replace("x","C")[::-1])
dna=""""""
dna=dna.replace("\n","")
peptide="FHVLQGMAWD"
subst_len=len(peptide)*3
for i in range(0,len(dna)-subst_len+1):
subst=dna[i:i+subst_len]
if translate(subst)==peptide:
print(subst)
elif translate(reverse_complement(subst))==peptide:
print(subst)
| true |
3236b0c0fe85e530ef6100f6d1b4e1e02485ddcc | Python | stacywebb/ledypi | /src/patterns/equation.py | UTF-8 | 3,479 | 3.15625 | 3 | [] | no_license | import logging
from Equation import Expression
from patterns.default import Default
from utils.color import scale
from utils.modifier import Modifier
pattern_logger = logging.getLogger("pattern_logger")
class Equation(Default):
"""
Use user-defined function for the rgb values. The function may depend on :
- the pixel position in the led strip 'idx'
- the current timestep 't' which cycles in a predefined allowed range.
- both
- none
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pattern_name = "Equation"
self.fns = {}
# r,g,b functions in string format
self.red_equation = Modifier('red equation', "cos(t)", on_change=self.on_change_red)
self.green_equation = Modifier('green equation', "idx", on_change=self.on_change_green)
self.blue_equation = Modifier('blue equation', "sin(t)", on_change=self.on_change_blue)
# time step
self.t = 1
# max range for function
self.max_range = self.strip_length * 1000
self.modifiers = dict(
red_equation=self.red_equation,
green_equation=self.green_equation,
blue_equation=self.blue_equation,
)
def on_change_red(self, value):
assert isinstance(value, str), pattern_logger.warning("The equation value is not a string")
self.fns['r_fn'] = Expression(value, ["t", "idx"])
def on_change_green(self, value):
assert isinstance(value, str), pattern_logger.warning("The equation value is not a string")
self.fns['g_fn'] = Expression(value, ["t", "idx"])
def on_change_blue(self, value):
assert isinstance(value, str), pattern_logger.warning("The equation value is not a string")
self.fns['b_fn'] = Expression(value, ["t", "idx"])
def fill(self):
# cicle timestep
if self.strip_length + self.t >= self.max_range:
self.t = 1
# get range for this execution
rng = range(self.t, self.strip_length + self.t)
try:
# get vals for the current range
rs = [self.fns['r_fn'](t=t, idx=idx) for idx, t in enumerate(rng, start=1)]
gs = [self.fns['g_fn'](t=t, idx=idx) for idx, t in enumerate(rng, start=1)]
bs = [self.fns['b_fn'](t=t, idx=idx) for idx, t in enumerate(rng, start=1)]
# scale in 0,255 values
rs, gs, bs = self.scale(rs, gs, bs)
# set values
for idx in range(self.strip_length):
self.pixels[idx]['color'] = (rs[idx], gs[idx], bs[idx], 255)
except Exception as e:
pattern_logger.warning(f"One of the equation failed to execute, please change it\nError: {e}")
# update timestep
self.t += 1
@staticmethod
def scale(rs, gs, bs):
"""
Scale and convert to int lists of rgb values
"""
# get maxs and mins
r_min = min(rs)
r_max = max(rs)
g_min = min(gs)
g_max = max(gs)
b_min = min(bs)
b_max = max(bs)
# scale
rs = [scale(r, 0, 255, r_min, r_max) for r in rs]
gs = [scale(g, 0, 255, g_min, g_max) for g in gs]
bs = [scale(b, 0, 255, b_min, b_max) for b in bs]
# convert to int
rs = [int(elem) for elem in rs]
gs = [int(elem) for elem in gs]
bs = [int(elem) for elem in bs]
return rs, gs, bs
| true |
529637da17b5c86d1c2db73fb15f742e4c314a4d | Python | xuewanqi/incubator-singa | /python/singa/data.py | UTF-8 | 7,254 | 2.921875 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
'''
This module includes classes for loading and prefetching data batches.
Example usage::
import image_tool
from PIL import Image
tool = image_tool.ImageTool()
def image_transform(img_path):
global tool
return tool.load(img_path).resize_by_range(
(112, 128)).random_crop(
(96, 96)).flip().get()
data = ImageBatchIter('train.txt', 3,
image_transform, shuffle=True, delimiter=',',
image_folder='images/',
capacity=10)
data.start()
# imgs is a numpy array for a batch of images,
# shape: batch_size, 3 (RGB), height, width
imgs, labels = data.next()
# convert numpy array back into images
for idx in range(imgs.shape[0]):
img = Image.fromarray(imgs[idx].astype(np.uint8).transpose(1, 2, 0),
'RGB')
img.save('img%d.png' % idx)
data.end()
'''
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
import os
import random
import time
from multiprocessing import Process, Queue
import numpy as np
class ImageBatchIter(object):
'''Utility for iterating over an image dataset to get mini-batches.
Args:
img_list_file(str): name of the file containing image meta data; each
line consists of image_path_suffix delimiter meta_info,
where meta info could be label index or label strings, etc.
meta_info should not contain the delimiter. If the meta_info
of each image is just the label index, then we will parse the
label index into a numpy array with length=batchsize
(for compatibility); otherwise, we return a list of meta_info;
if meta info is available, we return a list of None.
batch_size(int): num of samples in one mini-batch
image_transform: a function for image augmentation; it accepts the full
image path and outputs a list of augmented images.
shuffle(boolean): True for shuffling images in the list
delimiter(char): delimiter between image_path_suffix and label, e.g.,
space or comma
image_folder(boolean): prefix of the image path
capacity(int): the max num of mini-batches in the internal queue.
'''
def __init__(self, img_list_file, batch_size, image_transform,
shuffle=True, delimiter=' ', image_folder=None, capacity=10):
self.img_list_file = img_list_file
self.queue = Queue(capacity)
self.batch_size = batch_size
self.image_transform = image_transform
self.shuffle = shuffle
self.delimiter = delimiter
self.image_folder = image_folder
self.stop = False
self.p = None
with open(img_list_file, 'r') as fd:
self.num_samples = len(fd.readlines())
def start(self):
self.p = Process(target=self.run)
self.p.start()
return
def __next__(self):
assert self.p is not None, 'call start before next'
while self.queue.empty():
time.sleep(0.1)
x, y = self.queue.get() # dequeue one mini-batch
return x, y
def stop(self):
self.end();
def end(self):
if self.p is not None:
self.stop = True
time.sleep(0.1)
self.p.terminate()
def run(self):
img_list = []
is_label_index = True
for line in open(self.img_list_file, 'r'):
item = line.strip('\n').split(self.delimiter)
if len(item) < 2:
is_label_index = False
img_list.append((item[0].strip(), None))
else:
if not item[1].strip().isdigit():
# the meta info is not label index
is_label_index = False
img_list.append((item[0].strip(), item[1].strip()))
index = 0 # index for the image
if self.shuffle:
random.shuffle(img_list)
while not self.stop:
if not self.queue.full():
x, y = [], []
i = 0
while i < self.batch_size:
img_path, img_meta = img_list[index]
aug_images = self.image_transform(
os.path.join(self.image_folder, img_path))
assert i + len(aug_images) <= self.batch_size, \
'too many images (%d) in a batch (%d)' % \
(i + len(aug_images), self.batch_size)
for img in aug_images:
ary = np.asarray(img.convert('RGB'), dtype=np.float32)
x.append(ary.transpose(2, 0, 1))
if is_label_index:
y.append(int(img_meta))
else:
y.append(img_meta)
i += 1
index += 1
if index == self.num_samples:
index = 0 # reset to the first image
if self.shuffle:
random.shuffle(img_list)
# enqueue one mini-batch
if is_label_index:
self.queue.put((np.asarray(x), np.asarray(y, dtype=np.int32)))
else:
self.queue.put((np.asarray(x), y))
else:
time.sleep(0.1)
return
if __name__ == '__main__':
from . import image_tool
from PIL import Image
tool = image_tool.ImageTool()
def image_transform(img_path):
global tool
return tool.load(img_path).resize_by_range(
(112, 128)).random_crop(
(96, 96)).flip().get()
data = ImageBatchIter('train.txt', 3,
image_transform, shuffle=False, delimiter=',',
image_folder='images/',
capacity=10)
data.start()
imgs, labels = next(data)
print(labels)
for idx in range(imgs.shape[0]):
img = Image.fromarray(imgs[idx].astype(np.uint8).transpose(1, 2, 0),
'RGB')
img.save('img%d.png' % idx)
data.end()
| true |
acd1e4cfefe4ffddbe25a4fec134dda4ce7073cc | Python | danielkza/desafios-2015 | /aval1/chat-online/run.py | UTF-8 | 542 | 2.96875 | 3 | [] | no_license | from sys import stdin
from itertools import chain
p, q, l, r = map(int, stdin.readline().rstrip().split())
x_times = set()
z_times = []
for i in range(p):
s, e = map(int, stdin.readline().rstrip().split())
x_times |= set(range(s, e+1))
for i in range(q):
s, e = map(int, stdin.readline().rstrip().split())
z_times.append((s, e+1))
count = 0
for t in range(l, r+1):
z_t_times = set(chain.from_iterable(range(t + s, t + e) for (s,e) in z_times))
if x_times & z_t_times:
count += 1
print(count)
| true |
53c97193c723cd7a00ab36d0e5d6b38cdb0b6db5 | Python | NERSC/dayabay-data-conversion | /roottools.py | UTF-8 | 8,828 | 2.78125 | 3 | [] | no_license | # roottools by peter sadowski
import ROOT
import array
import numpy as np
import itertools
class RootTree():
def __init__(self,filename, treename, intbranches=[], floatbranches=[],ivectorbranches=[],fvectorbranches=[]):
ch = ROOT.TChain(treename)
status = ch.Add(filename)
#if status == 1:
# raise ValueError('Error: File %s does not have tree %s' % (filename, treename))
branchPointers = {}
branchDict = {}
ch.SetMakeClass(1)
for branchname in intbranches:
branchPointers[branchname] = array.array('I', [0])
for branchname in floatbranches:
branchPointers[branchname] = array.array('f', [0])
for branchname in fvectorbranches:
branchPointers[branchname] = ROOT.std.vector('float')()
for branchname in ivectorbranches:
branchPointers[branchname] = ROOT.std.vector('int')()
branches = intbranches + floatbranches + ivectorbranches + fvectorbranches
ch.SetBranchStatus("*",0)
[ ch.SetBranchStatus(branchname, 1) for branchname in branches ]
for branchname in branches:
branchDict[branchname] = ch.GetBranch(branchname)
ch.SetBranchAddress(branchname, branchPointers[branchname])
#return ch, branchDict, branchPointers, branches
self.filename = filename
self.treename = treename
self.ch = ch
self.branchDict = branchDict
self.branchPointers = branchPointers
self.branches = branches
self.intbranches = intbranches
self.floatbranches = floatbranches
self.fvectorbranches = fvectorbranches
self.ivectorbranches = ivectorbranches
self.current = {} # Dict containing data for current entry.
def loadentry(self, i):
self.ch.LoadTree(i)
self.ch.GetEntry(i)
self.current = Entry(self, True)
return self.current
def getentries(self):
''' Return generator for all sequential entries.'''
nEntries= self.ch.GetEntries()
for i in xrange(nEntries):
if i%1000==0:
print "Processing event nr. %i of %i" % (i,nEntries)
current = self.loadentry(i)
yield current
def numEntries(self):
return self.ch.GetEntries()
def find_trigger(self, detector, triggerNumber, startidx=0):
'''
Iterate over events quickly to find trigger.
requirements = list of (branchname, value) pairs, eg. ('detector', 0)
'''
startidx = int(startidx)
for i in xrange(startidx, self.ch.GetEntries()):
self.ch.LoadTree(i)
self.branchDict['detector'].GetEntry(i)
if not self.branchPointers['detector'][0] == detector:
continue
self.branchDict['triggerNumber'].GetEntry(i)
if not self.branchPointers['triggerNumber'][0] == triggerNumber:
continue
return i
raise Exception('Could not find d=%d tn=%d, biggest tn is %d' % (int(detector), int(triggerNumber), self.branchPointers['triggerNumber'][0]))
return None
class Entry(dict):
'''This class stores the information in a TTree entry.
NOTE: It constructs each numpy array separately in a lazy fashion so
they are only built if needed.
'''
def __init__(self, parent_tree, lazy=True):
super(Entry, self).__init__(self)
self.parent = parent_tree
if lazy:
pass
else:
self.unlazyconstruct()
def unlazyconstruct(self):
parent = self.parent
for branchname in parent.branches:
self[branchname]
def __getitem__(self, key):
'''Fetch the item if it already exists, else construct it'''
parent = self.parent
if key in self:
return dict.__getitem__(self, key)
elif key in parent.intbranches:
val = parent.branchPointers[key][0]
dict.__setitem__(self, key, val)
return val
elif key in parent.floatbranches:
val = parent.branchPointers[key][0]
dict.__setitem__(self, key, val)
return val
elif key in parent.fvectorbranches:
val_orig = parent.branchPointers[key]
val = np.array(val_orig, dtype='float32')
dict.__setitem__(self, key, val)
return val
elif key in parent.ivectorbranches:
val_orig = parent.branchPointers[key]
val = np.array(val_orig, dtype='int')
dict.__setitem__(self, key, val)
return val
else: # This will likely raise a KeyError
return dict.__getitem__(self, key)
def getChargesTime(entry, preprocess_flag=True, dtype='float32'):
''' This function takes a readout entry and extracts the charge and time. '''
charge = np.zeros((8, 24), dtype=dtype)
time = np.zeros((8, 24), dtype=dtype)
nHitsAD = entry['nHitsAD']
chargeAD = entry['chargeAD']
timeAD = entry['timeAD']
ring = entry['ring'] - 1 # Convert to 0-idx
column = entry['column'] - 1
for hit in range(nHitsAD):
if charge[ring[hit], column[hit]] != 0.0:
# Second charge deposit in same PMT observed!
time_orig = time[ring[hit], column[hit]]
time_new = timeAD[hit]
orig_in_window = (time_orig > -1650) and (time_orig < -1250)
new_in_window = (time_new > -1650) and (time_new < -1250)
if (new_in_window and not orig_in_window) or \
(new_in_window and (time_new < time_orig)):
# Use new
pass
else:
continue
charge[ring[hit], column[hit]] = chargeAD[hit]
time[ring[hit], column[hit]] = timeAD[hit]
if preprocess_flag:
charge = preprocess(charge)
return charge, time
def preprocess(X):
''' Preprocess charge image by taking log and dividing by scale factor.'''
prelog = 1.0
scale = 10.0 # log(500000) ~= 10
X = np.maximum(X, np.zeros_like(X))
X = np.log(X + prelog) / scale
return X
def isflasher(entry):
''' Is this entry a flasher according to Yasu's cut.'''
MaxQ = entry['MaxQ']
Quadrant = entry['Quadrant']
time_PSD = entry['time_PSD']
time_PSD1 = entry['time_PSD1']
MaxQ_2inchPMT = entry['MaxQ_2inchPMT']
NominalCharge = entry['NominalCharge']
eps = 10**-10
flasher = not(\
np.log10(Quadrant**2 + MaxQ**2/0.45/0.45 + eps) < 0.0 and \
np.log10(4.0 * (1.0-time_PSD)**2 + 1.8 * (1.0-time_PSD1)**2 + eps) < 0.0 and \
MaxQ_2inchPMT < 100.0) and (NominalCharge <= 3000.0)
return flasher
def ismuon(entry):
''' Is this entry a muon according to Yasu's cut. '''
NominalCharge = entry['NominalCharge']
return NominalCharge > 3000.0
def get_num_entries(filename):
return get_num_readout_entries(filename)
def get_num_readout_entries(filename):
t1 = makeCalibReadoutTree(filename)
return t1.numEntries()
def get_num_stat_entries(filename):
t2 = makeCalibStatsTree(filename)
return t2.numEntries()
def makeCalibReadoutTree(filename):
treename = '/Event/CalibReadout/CalibReadoutHeader'
intbranches = ['nHitsAD','triggerNumber', 'detector']
floatbranches = []
ivectorbranches = ["ring","column","wallNumber"] #,"wallspot"]
fvectorbranches = ["timeAD","chargeAD", "timePool", "chargePool", "wallSpot"]
t1 = RootTree(filename, treename, intbranches=intbranches, floatbranches=floatbranches, ivectorbranches=ivectorbranches, fvectorbranches=fvectorbranches)
return t1
def makeCalibStatsTree(filename):
treename = '/Event/Data/CalibStats'
floatbranches = ['MaxQ', 'Quadrant', 'time_PSD', 'time_PSD1', 'MaxQ_2inchPMT', 'NominalCharge'] #, 'dtLast_AD1_ms', 'dtLast_AD2_ms', 'dtLast_AD3_ms', 'dtLast_AD4_ms']
intbranches = ['triggerNumber']#["detector","triggerNumber"] #,"triggerType","triggerTimeSec","triggerTimeNanoSec","nHitsAD","nHitsPool"]
ivectorbranches = []
fvectorbranches = []
t2 = RootTree(filename, treename, intbranches=intbranches, floatbranches=floatbranches, ivectorbranches=ivectorbranches, fvectorbranches=fvectorbranches)
return t2
def rootfileiter(filename):
t2 = makeCalibStatsTree(filename)
t1 = makeCalibReadoutTree(filename)
for entry1, entry2 in itertools.izip(t1.getentries(), t2.getentries()):
entry1.update(entry2)
yield entry1
def calibReadoutIter(filename):
t1 = makeCalibReadoutTree(filename)
for entry in t1.getentries():
yield entry
def calibStatsIter(filename):
t2 = makeCalibStatsTree(filename)
for entry in t2.getentries():
yield entry
| true |
c8ae29d53aa917e4f1924dea558f34d8703e3256 | Python | jigi-33/async_python_beginner_21 | /realpyth_samples/ext1_async_await_syntax_and_coroutines.py | UTF-8 | 1,005 | 3.640625 | 4 | [] | no_license | import asyncio
"""
Coroutine - a function that can suspend its execution before reaching return
and it can indirectly pass control to another coroutine for some time.
countasync.py in original article - http://realpython.com/async-io-python
"""
async def count(): # the native courutine
print("One")
await asyncio.sleep(5) # asyncio has own sleep replacement of time.sleep, use it with asyncio!
print("Two")
async def main(): # main code block
await asyncio.gather( # starts a group of courutine functions
count(), # the order of this output is the heart of async IO.
count(), # talking to each of the calls of count() is a single event loop, or Coordinator.
count()
)
if __name__ == "__main__":
import time
s = time.perf_counter()
asyncio.run(main()) # the specific execution of whole async program
elapsed = time.perf_counter() - s # calculate execution time
print(f"{__file__} executed in {elapsed:0.2f} seconds\n")
| true |
3ff90dabed90d6577540e70ca19bc536e15b311f | Python | rkuo2000/homebot | /RPi/uart.py | UTF-8 | 158 | 3.4375 | 3 | [] | no_license | import serial
ser = serial.Serial('COM6', 9600)
ser.write(b'start ')
while 1:
ser.write(b'readline')
line = ser.readline()
print(line)
| true |
24620f047444d14fb1287c75fb65c5dff2edd54b | Python | QiTai/python | /processManage/fig18_09.py | UTF-8 | 898 | 3.515625 | 4 | [] | no_license | #Demonstrating popen and popen2
import os
#determine operating system, then set directory-listing and reverse-sort commands
if os.name =="nt" or os.name == "dos":
fileList = "dir /B"
sortReverse = "sort /R"
elif os.name =="posix":
fileList = "ls -l"
sortReverse = "sort -r"
else:
sys.exit("OS not supported by this program")
#obtain stdout of directory-listing commands
dirOut = os.popen(fileList,"r")
#obtain stdin, stdout of reverse-sort commands
sortIn,sortOut = os.popen2(sortReverse)
filenames = dirOut.read()
#display output from directory-listing commands
print "Before sending to sort"
print "(Output from '%s'):" % fileList
print filenames
sortIn.write(filenames) #send to stdin of sort commands
dirOut.close()
sortIn.close()
#display output from sort commands
print "After sending to sort"
print "(Output from '%s'):" % sortReverse
print sortOut.read()
sortOut.close()
| true |
3445de8ffa23b2046a099196f63f10caa33946f7 | Python | 24mu13/concierge | /tests/test_intent_toggl.py | UTF-8 | 1,386 | 2.546875 | 3 | [
"MIT"
] | permissive | import datetime
def test_empty_summary(summary_intent):
entities = {
"since": "2019-09-11",
"until": "2019-09-11",
}
summary = summary_intent.execute("random-execution-id", entities)
assert "title" in summary
# assert summary["title"] == "Toggl Summary for Tuesday, September 10th 2019"
assert "text" in summary
assert summary["text"] == f"There are no entries for this date."
# assert "attachments" in summary
def test_summary(summary_intent):
entities = {
"since": "2019-09-10",
"until": "2019-09-10",
}
summary = summary_intent.execute("random-execution-id", entities)
assert "title" in summary
# assert summary["title"] == "Toggl Summary for Tuesday, September 10th 2019"
assert "text" in summary
# assert "attachments" in summary
def test_find_previous_business_day__simple(summary_intent):
thursday = datetime.date(2019, 9, 12)
friday = datetime.date(2019, 9, 13)
previous_business_day = summary_intent._find_previous_business_day(friday)
assert previous_business_day == thursday
def test_find_previous_business_day__cross_weekend(summary_intent):
friday = datetime.date(2019, 9, 13)
monday = datetime.date(2019, 9, 16)
previous_business_day = summary_intent._find_previous_business_day(monday)
assert previous_business_day == friday
| true |
0c3700880ff956fc7c37f8eea993e57a179928e0 | Python | bpinapinon/bpina_Homework5 | /bpina_Pyber.py | UTF-8 | 6,205 | 3.890625 | 4 | [] | no_license | # # Option 1: Pyber
# 
# The ride sharing bonanza continues! Seeing the success of notable players like Uber and Lyft, you've decided to join a fledgling ride sharing company of your own. In your latest capacity, you'll be acting as Chief Data Strategist for the company. In this role, you'll be expected to offer data-backed guidance on new opportunities for market differentiation.
# You've since been given access to the company's complete recordset of rides. This contains information about every active driver and historic ride, including details like city, driver count, individual fares, and city type.
# Your objective is to build a [Bubble Plot](https://en.wikipedia.org/wiki/Bubble_chart) that showcases the relationship between four key variables:
# * Average Fare ($) Per City
# * Total Number of Rides Per City
# * Total Number of Drivers Per City
# * City Type (Urban, Suburban, Rural)
# In addition, you will be expected to produce the following three pie charts:
# * % of Total Fares by City Type
# * % of Total Rides by City Type
# * % of Total Drivers by City Type
# As final considerations:
# * You must use the Pandas Library and the Jupyter Notebook.
# * You must use the Matplotlib library.
# * You must include a written description of three observable trends based on the data.
# * You must use proper labeling of your plots, including aspects like: Plot Titles, Axes Labels, Legend Labels, Wedge Percentages, and Wedge Labels.
# * Remember when making your plots to consider aesthetics!
# * You must stick to the Pyber color scheme (Gold, Light Sky Blue, and Light Coral) in producing your plot and pie charts.
# * When making your Bubble Plot, experiment with effects like `alpha`, `edgecolor`, and `linewidths`.
# * When making your Pie Chart, experiment with effects like `shadow`, `startangle`, and `explosion`.
# * See [Starter Workbook](Pyber/pyber_starter.ipynb) for a reference on expected format.
# Include this line to make plots interactive
# %matplotlib notebook
# Import our dependencies
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# File Paths
CityFilePath = "data/city_data.csv"
RideFilePath = "data/ride_data.csv"
# Read data into Data Frames
DF_City = pd.read_csv(CityFilePath)
DF_Ride = pd.read_csv(RideFilePath)
# Combine data sets into one data set
DF_Combined = pd.merge(DF_City , DF_Ride , on = "city")
print(DF_Combined.head())
print(DF_Combined.dtypes)
DF_Combined["fare"] = pd.to_numeric(DF_Combined["fare"] )
###################
# Bubble Plot
###################
# Obtain the x and y coordinates for each of the three city types
UrbanDF = DF_Combined[DF_Combined["type"]=="Urban"]
SuburbanDF = DF_Combined[DF_Combined["type"]=="Suburban"]
RuralDF = DF_Combined[DF_Combined["type"]=="Rural"]
# Circle size correlates to driver count per city (each circle is one city)
# Get count of rides per ride id
UrbanRides = UrbanDF.groupby(["city"]).count()["ride_id"] #giving us the count of rides based on the ride id
SuburbanRides = SuburbanDF.groupby(["city"]).count()["ride_id"]
RuralRides = RuralDF.groupby(["city"]).count()["ride_id"]
# Get the average fare
RuralAvgFare = RuralDF.groupby(["city"]).mean()["fare"]
SuburbanAvgFare = SuburbanDF.groupby(["city"]).mean()["fare"]
UrbanAvgFare = UrbanDF.groupby(["city"]).mean()["fare"]
# Get Driver Count
RuralDriverCount = RuralDF.groupby(["city"]).mean()["driver_count"] #we still use mean function because we want to use the driver count number(38) not add them
SuburbanDriverCount = SuburbanDF.groupby(["city"]).mean()["driver_count"]
UrbanDriverCount = UrbanDF.groupby(["city"]).mean()["driver_count"]
# Build the scatter plots for each city types
plt.scatter(UrbanRides,UrbanAvgFare , s = 8*UrbanDriverCount , c = "gold" , marker = "o" , label = "Urban" , edgecolors = "black" )
plt.scatter(SuburbanRides,SuburbanAvgFare , s = 8*SuburbanDriverCount , c = "lightskyblue" , marker = "o", label = "Suburban", edgecolors = "black")
plt.scatter(RuralRides,RuralAvgFare , s = 8*RuralDriverCount , c = "lightcoral" , marker = "o" , label = "Rural" , edgecolors = "black" )
# Label Axes and Title
plt.xlabel("Ride Sharing Bubble (Scatter) Plot")
plt.ylabel("Average Fare")
plt.title("Pyber: City Type Average Fare")
# Add grid
plt.grid(True)
# Create Legend
plt.legend(loc = "best" , title = "City Type")
# SAave and Show PLot
plt.savefig("Output/bpina_Output_BubblePlot")
plt.show()
#############################
# TOTAL FARES PER CITY TYPE (62 urban, 30 sub) WRONG
#############################
CombinedTotalFaresPerType = DF_Combined.groupby("type").count()["fare"]
print(CombinedTotalFaresPerType)
# Generate List with Keys (Labels)
CityTypeKeys = CombinedTotalFaresPerType.keys()
print(CityTypeKeys)
# Create a Pie Chart
plt.pie(CombinedTotalFaresPerType , labels = CityTypeKeys , autopct = "%1.1f%%" , shadow = True , startangle = 90)
# Set Title
plt.title("Percent of Total Fares by City Type")
# Save and show plot
plt.savefig("Output/bp_output_PieChart_TotalFARESByCityType.png")
plt.show()
plt.axis("equal")
#############################
# TOTAL RIDES PER CITY TYPE (68 URban, 26 sub) RIGHT
#############################
CombinedTotalRidesPerType = DF_Combined.groupby("type").count()["ride_id"]
print(CombinedTotalRidesPerType)
# Create a Pie Chart
plt.pie(CombinedTotalRidesPerType , labels = CityTypeKeys , autopct = "%1.1f%%" , shadow = True , startangle = 90)
# Set Title
plt.title("Percent of Total Rides by City Type")
# Save and show plot
plt.savefig("Output/bp_output_PieChart_TotalRIDESByCityType.png")
plt.show()
plt.axis("equal")
#############################
# TOTAL DRIVERS PER CITY TYPE (80 urban, 16 sub, 2.6 rural) WRONG
#############################
CombinedTotalDriversPerType = DF_Combined.groupby("type").count()["driver_count"]
print(CombinedTotalDriversPerType)
# Create a Pie Chart
plt.pie(CombinedTotalDriversPerType , labels = CityTypeKeys , autopct = "%1.1f%%" , shadow = True , startangle = 90)
# Set Title
plt.title("Percent of Total Drivers by City Type")
# Save and show plot
plt.savefig("Output/bp_output_PieChart_TotalDRIVERSByCityType.png")
plt.show()
plt.axis("equal") | true |
5431e6b6741d45f58729bd070a765bdf9ea8d85f | Python | HTY2003/CEP-Stuff | /CEP Y3/Unit 2.7 Hash Tables/HengTengYi_PS3/chainhash.py | UTF-8 | 11,348 | 3.46875 | 3 | [] | no_license | import sys
import math
import ctypes
from collections import deque
if sys.version_info[0] == 3:
_get_byte = lambda c: c
else:
_get_byte = ord
class HashTable:
"""
Hash table that uses separate chaining to assign to assign key-value pairs
Separate chaining eliminates the need for collision resolution through probing,
by chaining values together in a list in the index(hashed key),
and the FNV1 hash which is fast and random allows for more even spacing between
values, reducing lookup times.
"""
def __init__(self,size):
"""
Initializes hash table array to the size specified. Prime numbers recommended.
Example usage:
>>> a = HashTable(37) """
#Initializes array and variables
self._size = size
self._taken = 0
PyArrayType = ctypes.py_object * self._size
self._data = PyArrayType(*([None] * self._size))
def __len__(self):
"""
Returns current length of hash table array
Time complexity: O(1)
Example usage:
>>> a = HashTable(37)
>>> print(len(a))
37 """
return self._size
def __str__(self):
"""
Returns string of a hash table array, containing each array index, key, and value it maps to
Time complexity: O(n)
Example usage:
>>> a = HashTable(4)
>>> a.add("Jerry", "Tom")
>>> a.add("Tom", "Jerry")
>>> print(a)
0 None
1 None
2 None
3 Jerry : Tom
Tom : Jerry """
#Heavily formats array into a readable format
string = ""
substring = ""
for index in range(self._size):
if self._data[index] is None:
string += str(index) + (" " * (6-len(str(index)))) + "None" + "\n"
else:
substring += str(index) + (" " * (6-len(str(index))))
for i in self._data[index]:
substring += str(i[0]).strip("\n") + " : " + str(i[1]).strip("\n") + "\n "
string += substring
string = string [:-5]
string += "\n"
substring = ""
# And returns it
return string
__repr__ = __str__
def __iter__(self):
"""
Loops through the values(not the keys) of the hash table array
None values are not iterated.
Time complexity: O(n)
Example usage:
>>> a = HashTable(4)
>>> a.add("Jerry", "Tom")
>>> a.add("Tom", "Jerry")
>>> print(a)
0 None
1 None
2 None
3 Jerry : Tom
Tom : Jerry
>>> for i in a:
>>> print(i)
Tom
Jerry """
values = deque()
#Loops through original array
for item in self._data:
#ignores None values
if item is not None:
for subitem in item:
values.append(subitem[1])
return iter(values)
def __setitem__(self,key,value):
"Same as self.add(key,value): " + self.add.__doc__
return self.add(key,value)
def __getitem__(self,key):
"Same as self.get(key): " + self.get.__doc__
return self.get(key)
def __delitem__(self,key):
"Same as self.remove(key): " + self.remove.__doc__
return self.remove(key)
def __fnv1(self,data):
"""
Function(hidden) that converts integers and strings into bytes
before hashing them into index values using the FNV1 algorithm,
designed for speed and randomness for less collisions.
"""
#The hash has problems with 0, so it's converted to a string
if data == 0:
data = "0"
#Converts integers and strings to bytes
if isinstance(data, int):
data = bytes(data)
else:
data = bytes(data, 'utf-8')
#Hashes bytes through the __fnv1 algorithm into a 32-bit value
hval = 0x811c9dc5
for byte in data:
hval = (hval * 0x01000193) % (2**16)
hval = hval ^ _get_byte(byte)
#Returns modulus of hashed value by the size of the hash table
return hval % self._size
def __chain(self,key,value):
"""
Function(hidden) that adds value to the list in the index of the array.
Since the collections.deque object has O(1) append time, this action is pretty fast.
"""
#Hashes the key into the index
index = self.__fnv1(key)
#Starts chain if it doesn't exist yet
if self._data[index] is None:
self._taken += 1
self._data[index] = deque()
#Adds value to deque in index
self._data[index].append((key,value))
def __search(self,key):
"""
Function(hidden) that attempts to find the value in the chain which
the key maps to. If the key is not found, None is returned.
"""
#Hashes the key into the index
index = self.__fnv1(key)
#Search through any non-empty chains in the index
if self._data[index] is not None:
for i in range(len(self._data[index])):
if self._data[index][i][0] is key:
return index, self._data[index][i][1]
#If the index is empty, return None, None
return None, None
def add(self,key,value):
"""
Function that assigns value to the hash table array using the
FNV1-hashed version of the key as the array index
Two of the same keys CAN be used in one table, but when searching,
only the value of the first instance of the key will be returned until it is removed
Time complexity: O(1)
Example usage:
>>> a = HashTable(4)
>>> a.add("Jerry","Tom")
>>> a.add("Tom","Jerry")
>>> print(a)
0 None
1 None
2 None
3 Jerry : Tom
Tom : Jerry """
#Chains value to the chain in that index
#The collection.deque allows appending to be O(1), hence the fast speed
self.__chain(key, value)
def get(self,key):
"""
Function that retrieves and returns the value which the key maps to
If the key is not in the hash table, it returns None.
Time complexity(average): O(log n)
Time complexity(worst-case): O(n)
Example usage:
>>> a = HashTable(4)
>>> a.add("Jerry", "Tom")
>>> print(a.get("Jerry"))
Tom """
return self.__search(key)[1]
def remove(self,key):
"""
Function that returns and removes the value which the key maps to
If the key is not in the hash table, it returns None.
Time complexity(average): O(log n)
Time complexity(worst-case): O(n)
Example usage:
>>> a = HashTable(4)
>>> a.add("Jerry", "Tom")
>>> a.add("Tom", "Jerry")
>>> print(a)
0 None
1 None
2 None
3 Jerry : Tom
Tom : Jerry
>>> a.remove("Tom")
>>> print(a)
0 None
1 None
2 None
3 Jerry : Tom """
index = self.__search(key)
try:
#removes value from the deque in the index
self._data[index[0]].remove((key,index[1]))
if len(self._data[index[0]]) == 0:
#account for loss of a filled deque
self._taken -= 1
return index
#return None if index is not a deque object
except ValueError:
return None
def used(self):
"""
Function that returns the number of chains in the array
This can be used if the user wants to see how full or empty the array is,
and use the self.rehash() function to resize the array accordingly.
This does NOT refer to the number of key-value pairs in the array, just the
number of non-empty chains in the array which the pairs are stored in.
Time complexity: O(1)
E.g:
>>> a = HashTable(1)
>>> a.add("Jerry","Tom")
>>> a.add("Tom", "Jerry")
>>> print a.used()
1 """
#Return number of non-empty chains in hash table
return self._taken
def checkrehash(self,max=80,min=20,multiplier=2,divisor=2):
"""
Function a user can use to resize the array based on how full it is (no. of chains in the array)
If the array exceeds <max> % capacity(80% by default) in use, the array will multiply itself by the multiplier (2 by default).
If the array goes under <min>% capacity(20% by default), the array will divide itself by the divisor (2 by default).
This allows users to trim or expand their arrays without interfering with their liberty to resize at will.
Time complexity(average): O(log n)
Time complexity(worst-case): O(n)
E.g:
>>> print(A.used(), len(A))
4 5
>>> A.checkrehash()
>>> print(A.used(), len(A))
4 10 """
assert min <= max, "Minimum cannot exceed maximum"
#Resizes array if too full
if self._taken >= (self._size * max / 100):
self.rehash(int(self._size * multiplier))
#If not, resize if too empty
elif self._taken <= (self._size * min / 100):
self.rehash(int(self._size / divisor))
def rehash(self,size):
"""
Function that resizes the hash table array to the specified size,
allowing it to store more key-value pairs.
Time complexity(average): O(log n)
Time complexity(worst-case): O(n)
Example usage:
>>> a = HashTable(10)
>>> print(len(a))
10
>>> a.resize(128)
>>> print(len(a))
128 """
self._size = size
self._taken = 0
self._olddata = self._data
#Makes new array of specified size
PyArrayType = ctypes.py_object * self._size
self._data = PyArrayType(*([None] * self._size))
#If index is not None, it is added to the new array
for item in self._olddata:
if item is not None:
for subitem in item:
self.add(subitem[0], subitem[1])
self._olddata = 0
| true |
e895978865d8cbd235ef6c5c78ebc5b1192fb09b | Python | j-csy/temperature-check | /temperature_check_cmdline.py | UTF-8 | 3,010 | 3.890625 | 4 | [] | no_license | import requests
def continue_question(question = "[y/n]", strict = True):
""" Asks the question specified and if the user input is 'yes' or 'y', returns 'yes'.
If the user input is 'no' or 'n', it returns no.
If strict is False, many other answers will be acceptable."""
question = question.strip(" ")
reply = input("%s " % question)
reply = reply.lower()
if (reply == "yes") or (reply == "y") or (not strict and reply in ["yep", "yeah", "sure", "okay", "ok", "definitely", "certainly", "of course"]):
return True
elif (reply == "no") or (reply == "n") or (not strict and reply in ["nope", "no way", "definitely not", "certainly not", "nah", "of course not"]):
return False
else:
return continue_question(strict = strict)
def get_weather():
API_key = "12549cfffc93587bf8216ce33e6739c5"
base_url = "http://api.openweathermap.org/data/2.5/weather?"
city_name = "Sydney"
Final_url = base_url + "appid=" + API_key + "&q=" + city_name
weather_data = requests.get(Final_url).json()
#convert Kelvin to Celsius and round 2 decimals
curTempC = round(weather_data['main']['temp'] - 273.15)
return (curTempC)
def threshold_check(temp):
#Threshold
minTemp = 30
maxTemp = 90
if int(temp) < minTemp:
print ("WARNING : Temperature below a threshold (30C) !!! ")
elif int(temp) > maxTemp:
print ("WARNING : Temperature is above a threshold (90C) !!! ")
else:
print (">>>>> Temperature entered within range 30C - 90C <<<<<")
def dec_inc_check(newtemp, prevtemp):
print ("\nCurent Temperature : " + str(newtemp) + "C || " + "Previous Temperature : " + str(prevtemp) + "C")
if prevtemp > (newtemp + 20) :
print ("########## ALARM!! : DECREASE > 20C !!! ##########")
elif prevtemp < (newtemp - 20) :
print ("########## ALARM!! : INCREASE > 20C !!! ##########")
else:
print (">>>>> ACCEPTABLE Increase/Decrease : No more than 20C <<<<<")
def process_temp():
curTemp = 0
again = True
items = []
inputTemp = int(input("Please enter initial temperature: "))
items = [inputTemp]
print ("Initial Temperature entered: " + str(items[0]) + "C")
print ("Actual Temperature : " + str(get_weather()) + "C")
# Verify initial inputTemp threshold
threshold_check(inputTemp)
while again:
curTemp = int(input("\nEnter comparison temperature: "))
print ("Comparison Temperature entered: " + str(curTemp) + "C")
items.append(curTemp)
# Verify threshold
threshold_check(curTemp)
# Verify increase/decreas from previous temperature
if curTemp != 0:
dec_inc_check(curTemp, items[-2])
# Prompt user to continue?
again = continue_question("\nWould you like to try again? ", strict=False)
process_temp()
| true |
1d32bcfc6695e5085068227dec552a0d4088cf5a | Python | ericdddddd/NTUST_Information-Retrieval | /Hw1/Hw1.py | UTF-8 | 4,794 | 2.578125 | 3 | [] | no_license | #%%
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
#%%
import os
queriesPath = 'C:\\Users\\User\\Desktop\\IR\\data\\ntust-ir-2020\\queries'
docsPath = 'C:\\Users\\User\\Desktop\\IR\\data\\ntust-ir-2020\\docs'
queriesList = os.listdir(queriesPath)
queriesList.sort()
docsList = os.listdir(docsPath)
docsList.sort()
queriesContext = []
docsContext = []
for query in queriesList:
path = os.path.join(queriesPath,query)
f = open(path)
context = f.read()
queriesContext.append(context)
for doc in docsList:
path = os.path.join(docsPath,doc)
f = open(path)
context = f.read()
docsContext.append(context)
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
#%%
vector = [] # 全部query的不同單字,vector space model 的 Vector
for query in queriesContext:
words = query.split( )
print(words)
for word in words:
if word not in vector:
vector.append(word)
#print(vector)
#%%
def cal_df(term,df):
store = np.zeros(len(term))
i = 0
num = 0
for index in term :
colcount = 0
for doc in docsContext:
context = doc.split( )
if index in context :
num += 1
print(index + ':' + str(num))
df[i] = num
num = 0
i += 1
def cal_doc_tf(vector,tf):
rowcount = 0
for doc in docsContext :
context = doc.split( )
colcount = 0
for index in vector:
val = context.count(index)
tf[rowcount][colcount] = val
colcount += 1
rowcount += 1
def cal_Query_tf(query,vector,tf):
index = 0
for i in vector :
num = query.count(i)
querytf[index] = num
index += 1
def cal_TermWeight2(doc_tw,query_tw,doc_tf,query_tf,df):
if query_tw is None :
for i in range(N):
for j in range(df.shape[0]):
if df[j] == 0 or doc_tf[i][j] == 0 :
doc_tw[i][j] = 0
else:
doc_tw[i][j] = ( 1 + doc_tf[i][j]) * np.log10(N/df[j])
elif doc_tw is None :
for j in range(df.shape[0]):
if df[j] == 0 or query_tf[j] == 0:
query_tw[j] = 0
else:
query_tw[j] = ( 1 + query_tf[j] ) * np.log10(N/df[j])
#print(np.max(query_tf))
#print(query_tw)
def cal_cosSimilarity2(doc_tw,query_tw):
# 取得個別長度 |q| ,|dj|
doc_length = np.zeros(N)
sim = np.zeros(N)
q_length = np.sqrt(np.dot(query_tw,query_tw))
#print('q_length :' + str(q_length))
for i in range(N):
doc_length[i] = np.sqrt(np.dot(doc_tw[i],doc_tw[i]))
# 取得內積
for i in range(N):
innerProduct = np.dot(query_tw,doc_tw[i])
#print('innerProduct : ' + str(innerProduct))
if doc_length[i] * q_length != 0:
#print('doc_length[i] * q_length' + str(doc_length[i] * q_length))
sim[i] = innerProduct / (doc_length[i] * q_length)
elif doc_length[i] * q_length == 0 :
sim[i] = 0
return sim
#%%
index = 0
N = 4191 #檔案總數
count = 0
sim = np.zeros([len(queriesList),N], dtype = float)
df = np.zeros(len(vector),dtype=int)
cal_df(vector,df)
#%%
index = 0
for query in queriesContext:
print(index)
termw = query.split( ) # 取得此個query的詞
# 宣告相關變數
doctf = np.zeros([N,len(vector)])
querytf = np.zeros(len(vector))
doc_tw = np.zeros([N,len(vector)], dtype = float) # doc term-weight
query_tw = np.zeros(len(vector), dtype = float)
cal_doc_tf(vector,doctf) # doc的tf及df
#print(doctf)
cal_Query_tf(termw,vector,querytf)# query的tf
#print(querytf)
cal_TermWeight2(doc_tw,None,doctf,None,df) # doc的term-weight
#print(doc_tw)
cal_TermWeight2(None,query_tw,None,querytf,df) # query的term-weight
#print(query_tw)
sim[index] = cal_cosSimilarity2(doc_tw,query_tw)
#print(sim[index])
index += 1
#%%
res = {}
save_file_name = 'res.txt'
fp = open(save_file_name, "w")
fp.seek(0)
fp.write("Query,RetrievedDocuments2\n")
for loop in range (len(queriesList)):
write_string = queriesList[loop][0:-4] + ","
for i,j in zip(docsList,sim[loop]):
res[i[0:-4]] = j
sorted_x = sorted(res.items(), key=lambda kv: kv[1],reverse = True)
print(sorted_x[:15])
for doc in sorted_x:
write_string += doc[0] + " "
write_string += "\n"
fp.write(write_string)
res.clear()
sorted_x.clear()
fp.truncate()
fp.close()
| true |
ede40b126fd6d5f1e296833a82a10819f0e425e7 | Python | IsCoelacanth/NeurIPS-2018-Adversarial-Vision-Challenge-Targeted-Attack | /localmainBA.py | UTF-8 | 2,305 | 2.578125 | 3 | [
"MIT"
] | permissive | import numpy as np
from foolbox2.criteria import TargetClass
from foolbox2.attacks.boundary_attack import BoundaryAttack
# from adversarial_vision_challenge import load_model
# from adversarial_vision_challenge import read_images
# from adversarial_vision_challenge import store_adversarial
# from adversarial_vision_challenge import attack_complete
from fmodel import create_fmodel as create_fmodel_18
from tiny_imagenet_loader import TinyImageNetLoader
import os, csv
from scipy.misc import imread, imsave
import PIL.Image
def run_attack(loader, model, image, target_class):
assert image.dtype == np.float32
assert image.min() >= 0
assert image.max() <= 255
starting_point, calls, is_adv = loader.get_target_class_image(
target_class, model)
if not is_adv:
print('could not find a starting point')
return None
criterion = TargetClass(target_class)
original_label = model(image)
iterations = (1000 - calls - 1) // 10 // 2
attack = BoundaryAttack(model, criterion)
return attack(image, original_label, iterations=iterations,
max_directions=10, tune_batch_size=False,
starting_point=starting_point)
def read_images():
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "flower")
with open(os.path.join(data_dir, "target_class.csv")) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
yield (row[0], np.array(PIL.Image.open(os.path.join(data_dir, row[1])).convert("RGB")).astype(np.float32), int(row[2]))
def attack_complete():
pass
def store_adversarial(file_name, adversarial):
out_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
imsave(os.path.join(out_dir, file_name + ".png"), adversarial, format="png")
def load_model():
return create_fmodel_18()
def main():
loader = TinyImageNetLoader()
model = load_model()
for (file_name, image, label) in read_images():
adversarial = run_attack(loader, model, image, label)
store_adversarial(file_name, adversarial)
# Announce that the attack is complete
# NOTE: In the absence of this call, your submission will timeout
# while being graded.
attack_complete()
if __name__ == '__main__':
main()
| true |
9b73f1106f5267c2476af8f07eaa15518803b051 | Python | slancast/Immuno | /most_abundant_CDR3_w_functions.py | UTF-8 | 3,130 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
#slancast@scg4.stanford.edu:/srv/gsfs0/projects/snyder/slancast/repertoire/
#I want this program to find the change from 1 to 2 and then from 2 to 3 to see if more antibodies stick around from 2 to 3.
import sys
import numpy as np
from collections import Counter
from repertoire_counting import openfile,mostcommonrep
from repertoire_counting import mostcommon_vdj_in_CDR3
from repertoire_counting import overlap_rep
print("###################")
print("###################")
print("###################")
print(sys.argv[4])
print("###################")
print("###################")
print("###################")
##################################################################################
# sys.argv[1] should be the timepoint that you want to count the CDR3 sequences
##################################################################################
split = openfile(sys.argv[1])
##################################################################################
# creating lists of total CDR and VD recombinations, and then finding the most common members
##################################################################################
most_common = mostcommonrep(split)
##################################################################################
# finding the most common vdj sequences in the CDR3 sequences.
##################################################################################
mc_vdj_output = mostcommon_vdj_in_CDR3(split,most_common[0])
#Don't need to save for now
#output = np.array(output)
#np.savetxt("./vdj_most_common_CDR3.csv", output, delimiter=",", fmt="%s")
##################################################################################
# opening T2 to find the overlap in CDR3 and VDJ sequences
##################################################################################
split = openfile(sys.argv[2])
overlap_rep(mc_vdj_output, split, most_common[0], most_common[1])
##########################################
# now this is going to redo it from 2 to 3
##########################################
##################################################################################
# creating lists of total CDR and VD recombinations, and then finding the most common members
##################################################################################
most_common = mostcommonrep(split)
##################################################################################
# finding the most common vdj sequences in the CDR3 sequences.
##################################################################################
mc_vdj_output = mostcommon_vdj_in_CDR3(split,most_common[0])
##################################################################################
# opening T3 to find the overlap in CDR3 and VDJ sequences
##################################################################################
split = openfile(sys.argv[3])
#In the second timepoint finding the sequences that correspond to each of the most common vdj sequence
#for each of the most common CDR3 sequences.
overlap_rep(mc_vdj_output, split, most_common[0], most_common[1]) | true |
822b8bde8eee33e4866d5e6d3a477bef3765f15f | Python | just-lana-l/module12 | /task_8.py | UTF-8 | 315 | 4.34375 | 4 | [] | no_license | print('Задача 8. НОД')
#Напишите функцию, вычисляющую наибольший общий делитель двух чисел
x = int(input('x: '))
y = int(input('y: '))
def nod(x, y):
while x != 0 and y != 0:
if x > y:
x = x % y
else:
y = y % x
print(x + y)
nod(x, y) | true |
770abbab22d387831532215c4688cea93b719497 | Python | ionut23carp/Training | /PythonAdvance/python-mar2020/exercises_solutions/string_format_exercise.py | UTF-8 | 779 | 3.15625 | 3 | [] | no_license | persons = [
{'first_name': 'John', 'last_name': 'Cornwell', 'net_worth': 2632.345},
{'first_name': 'Emily', 'last_name': 'Alton', 'net_worth': -4578.234},
{'first_name': 'James', 'last_name': 'Bond', 'net_worth': 1000.07},
]
print('-' * 35)
for person in persons:
print('| {last_name:<15} {first_name:.1}. | {net_worth:>+10.2f} |'.format(**person))
# print('| {0:<15} {1:.1}. | {2:>+10.2f} |'.format(person['last_name'], person['first_name'], person['net_worth']))
# print(f'| {person["last_name"]:<15} {person["first_name"]:.1}. | {person["net_worth"]:>+10.2f} |')
# fname = person['first_name']
# lname = person['last_name']
# nworth = person['net_worth']
# print(f'| {lname:<15} {fname:.1}. | {nworth:>+10.2f} |')
print('-' * 35)
| true |
98c2e3ced3db1add653ec2a5cd9ba8c95c39c991 | Python | yuseungwoo/baekjoon | /1371.py | UTF-8 | 420 | 3.109375 | 3 | [] | no_license | # coding: utf-8
import sys
dic = {chr(x):0 for x in range(97, 123)}
s = sys.stdin.readlines()
s = ''.join(s)
s = s.replace('\n', '')
for char in s:
if char in dic:
dic[char] += 1
temp = []
for key in dic.keys():
temp.append((key, dic[key]))
temp = sorted(temp, key=lambda x: x[1], reverse=True)
ans = temp[0][1]
result = [x[0] for x in temp if x[1] == ans]
result.sort()
print(''.join(result))
| true |
0f392192d2acc07e432e61261d4e71338efc4917 | Python | Terorras/pystuff | /prac20.py | UTF-8 | 478 | 3.25 | 3 | [] | no_license | #for i in range(1, 101):
# print i*'*', i*'#'
#for i in range(1, 501):
# if i % 3 == 0 and i % 5 == 0:
# print 'fizzbuzz'
# elif i % 3 == 0:
# print 'fizz'
# elif i % 5 == 0:
# print 'buzz'
# else:
# print i
for i in range(1, 101):
if i * 21 < 100:
print ' ', i * 21
elif i * 21 < 1000:
print ' ', i * 21
elif i * 21 < 10000:
print ' ', i * 21
elif i * 21 < 100000:
print '', i * 21
| true |
c26d5ea54fd3177c2e9d0d9ef2c081eb513bf327 | Python | csev/class2go | /main/courses/management/commands/exam_edit.py | UTF-8 | 5,438 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | from optparse import make_option
from textwrap import wrap
from collections import namedtuple
from pprint import pprint
import sys
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
from c2g.models import Exam
class Command(BaseCommand):
"""
Define the edit_exam manamagement command: bulk updates of exam settings.
"""
# instantiate a dummy exam so we can inspect it
testexam=Exam()
exam_attrs = [a for a in vars(testexam) \
if not callable(getattr(testexam,a)) \
and not a.startswith('_')]
exam_types = [t[0] for t in Exam.EXAM_TYPE_CHOICES]
help = "Make bulk exam changes. With the -u option update the database. " \
" PLEASE BE CAREFUL." \
"\n\nSelect which exams to change with one or more of " \
"-e, -c, and -t. At least one of -e or -c must be used." \
"\n\nThe list of Exam columns are:\n%s" % "\n".join(wrap(", ".join(sorted(exam_attrs))))
option_list = (
# Select
make_option("-e", "--examids", dest="exam_ids", type="string",
help="Select by comma-separated list of exam ID's"),
make_option("-c", "--courseid", dest="course_id", type="int",
help="Select by course. If only this option is chosen, all exams " \
"for that course will be selected."),
make_option("-t", "--type", dest="examtype", type="string",
help="Select by type, valid values are: %s" \
% ", ".join(sorted(exam_types))),
# Change
make_option("-s", "--set", action="append", dest="setlist",
default=[], metavar="NAME=\"VAL\"",
help="Set this to that for every exam that matches your search. " \
"Specify this multiple times to update multiple columns. " \
"The quotes around the value are optional."),
# Do It!
make_option("-u", "--update", action="store_false", dest="dryrun", default=True,
help="actually update database (default is dry run)."),
) + BaseCommand.option_list
def validate_selector(self, options):
"""
Make sure we have a valid set of things to select on, and if we do,
return a named tuple like this:
Selector(exam_ids=[840, 841], course_id=11, type='survey')
"""
if not (options['exam_ids'] or options['course_id']):
raise CommandError("At least one of exam_ids (-e) or course_id (-c) is required.")
Selector = namedtuple('Selector', 'exam_ids, course_id, examtype')
result_exam_id_list = []
if options['exam_ids']:
exid_strings = options['exam_ids'].split(',')
for exid_str in exid_strings:
errstr = None
try:
exid = int(exid_str)
if exid == 0:
errstr = "exam id \"%s\" is invalid"
except ValueError as e:
errstr = e
if errstr:
raiseCommandError("Exam ID parsing error, %s" % errstr)
result_exam_id_list.append(exid)
if options['examtype']:
if options['examtype'] not in self.exam_types:
raise CommandError("Invalid exam type \"%s\" given, allowed types are: %s"
% (options['examtype'], ", ".join(sorted(self.exam_types))))
return Selector(exam_ids=result_exam_id_list,
course_id=options['course_id'],
examtype=options['examtype'])
def validate_setters(self, options):
"""
Decide what we're going to set for each of the exams we select. Returns
a dict with columns and settings for each.
"""
resultdict = {}
if not options['setlist']:
raise CommandError("you must specify at least one set (-s) command")
for cmd in options['setlist']:
splitcmd = cmd.split('=')
if len(splitcmd) != 2:
raise CommandError("cannot parse \"%s\", commands must be of the form NAME=VAL"
% cmd)
(name, val) = splitcmd
if name not in self.exam_attrs:
raise CommandError("value \"%s\" isn't a valid property of Exam, valid values are %s"
% (splitcmd[0], self.exam_attrs))
resultdict[name] = val
return resultdict
def handle(self, *args, **options):
"""The actual exam_edit command"""
selector = self.validate_selector(options)
pprint(selector)
setter_dict = self.validate_setters(options)
sys.stdout.write("Setters = ")
pprint(setter_dict)
exams = Exam.objects.all()
if selector.course_id:
exams = exams.filter(course=selector.course_id)
if selector.exam_ids:
exams = exams.filter(id__in=selector.exam_ids)
if selector.examtype:
exams = exams.filter(exam_type=selector.examtype)
if options['dryrun']:
matches = len(exams)
print "dryrun matches = %d" % matches
else:
updates = exams.update(**setter_dict)
print "updated = %d" % updates
for exam in exams:
sys.stdout.write("%d: " % exam.id)
pprint(exam)
| true |
e008de4c9f0ae4f36cd608c9058d4ac3432ee210 | Python | shuvamoy1983/PythonWebProgramming | /Chap1.py | UTF-8 | 2,552 | 3.046875 | 3 | [] | no_license | import urllib.request
from urllib.error import URLError,HTTPError,ContentTooShortError
print("Step 1) Downloading a web page")
def download(url):
return urllib.request.urlopen(url).read().decode("utf-8")
print("______________________________________________________________")
print("Step 2) Downloading a web page but what happens if we have error")
print("from urllib.error, import URLError,HTTPError,ContentTooShortError")
print("Exception handiling if ")
def download_Error_content(url):
print("Downloading Started")
try:
data = urllib.request.urlopen(url).read().decode("utf-8")
except (URLError,HTTPError,ContentTooShortError) as e:
print("Download Error", e.reason)
data = None
return data
print("______________________________________________________________")
print("Step 3) Downloading a web page but what happens if we have error")
print("This below function is for retrying to download data from Web")
def retrying_downloads(url, attempt):
print("Downloading:")
try:
dataset = urllib.request.urlopen(url).read().decode("utf-8")
except (URLError,HTTPError,ContentTooShortError) as e:
print("download Error" , e.reason)
dataset= None
if attempt> 0:
if hasattr(e,'code') and 500 <= e.code <600:
return retrying_downloads(url,attempt -1)
return dataset
print("______________________________________________________________")
print("Step 4) Setting an user Agent")
def setting_an_user_agent(url,user_agent,attempt):
print("downloading",url)
request= urllib.request.Request(url)
request.add_header('User_agent' ,user_agent)
try:
load = urllib.request.urlopen(url).read().decode("utf=8")
except (URLError,HTTPError,ContentTooShortError) as e:
print("Download Error" ,e.reason)
load= None
if attempt > 0:
if hasattr(e,'code') and 500 <= e.code < 600 :
#recursive call for the below function
return setting_an_user_agent(url,user_agent,attempt -1)
return load
if __name__ == '__main__':
## Enable the line while calling the required the function
rslt = download("http://carlofontanos.com/api/tutorials.php?data=all")
#print(rslt)
data1 = download_Error_content("http://httpstat.us/500")
#print(data1)
#final_data = retrying_downloads("http://httpstat.us/500",10)
#print(final_data)
data_with_agent = setting_an_user_agent("http://www.facebook.com","wswp",5)
print(data_with_agent) | true |
7678782ac065a40b49a6618fe954c17b78cb4c36 | Python | 218478/VariousMLTforOCR | /src/DatasetReader.py | UTF-8 | 11,838 | 2.953125 | 3 | [] | no_license | import argparse
import cv2
import keras
import os
import sys
import numpy as np
from keras import backend as K
from NearestNeighbor import NearestNeighbor
def image_invalid(image):
"""
Checks if the image contains NaN or Inf records.
"""
return np.isinf(image).any() or np.isnan(image).any()
def print_image_array(img):
for row in img:
for cell in row:
sys.stdout.write("%d " % cell)
sys.stdout.write("\n")
class DatasetReader:
def set_filepaths(self, filepath, class_no):
"""
Reads Chars74K dataset and returns 2D array of filepaths. The first
value is the class no (derived from the directory name) and the second
one holds a vector of filepaths to the files. The reader assumes that
the images read have white background and black font
"""
dirs = os.listdir(filepath)
print(("Read " + str(len(dirs)) + " classes"))
self.classNo = class_no
filepaths = [[]] * self.classNo # hard coded
for root, dirs, files in os.walk(filepath):
path = root.split(os.sep)
# print(((len(path) - 1) * '---', os.path.basename(root)))
filepaths_for_class = []
for file in files:
file = os.path.join(root, file)
filepaths_for_class.append(file)
if len(filepaths_for_class) is not 0:
current_class = int(path[-1][-3:]) - 1 # because the number is starting from 1
filepaths[current_class] = filepaths_for_class
self.filepaths = filepaths
self.create_readable_labels()
def create_readable_labels(self):
"""
This function describes, and assigns the class to the number.
Specific to Chars74K dataset.
"""
self.readableLabels = [[]] * self.classNo
for i in range(0, 10):
self.readableLabels[i] = str(i)
for i in range(65, 91):
self.readableLabels[i - 55] = chr(i)
for i in range(97, 123):
self.readableLabels[i - 61] = chr(i)
def load_images_into_memory(self, train_to_test_proportion, maxsize):
"""
Returns a tuple of images. train_setProportion must be between (0; 1.0). It describes
what part of the images are used for training and what part for testing.
Color inversion happens on the fly.
It has the fancy progress bar which is 37 characters wide.
!!! This function also makes sure the images are converted to maxsize (usually 16x16) but
this can be changed by setting the maxsize variable !!!
"""
counts = np.empty((len(self.filepaths), 1))
for idx, val in enumerate(self.filepaths): # counting images in every class
counts[idx] = len(val) # TODO: use this value in the for loop below and use list comprehension
print(("Filenames array size: " + str(
(sys.getsizeof(self.filepaths[0]) + sys.getsizeof(self.filepaths[1])) * self.classNo / 1024) + " kB"))
print("len(self.filepaths[1]) = " + str(len(self.filepaths[1])))
print("len(self.filepaths) = " + str(len(self.filepaths)))
print(("Read: " + str(len(self.filepaths[1]) * len(self.filepaths))))
print("Reading images into memory")
print(("I have %d classes" % self.classNo))
toolbar_width = self.classNo - 1
self.trainCountPerClass = np.ceil(counts * train_to_test_proportion).astype(int)
self.testCountPerClass = (counts - self.trainCountPerClass).astype(int)
self.train_set = np.empty((sum(self.trainCountPerClass)[0], maxsize[0], maxsize[1]))
self.train_labels = np.empty((sum(self.trainCountPerClass)[0]))
self.test_set = np.empty((sum(self.testCountPerClass)[0], maxsize[0], maxsize[1]))
self.test_labels = np.empty((sum(self.testCountPerClass)[0]))
print(("Shape of trainDataset before reading: " + str(self.train_set.shape)))
print(("Shape of testDataset before reading: " + str(self.test_set.shape)))
sys.stdout.write("[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width + 1)) # return to start of line, after '['
for imgClass in range(0, self.classNo - 1):
for idx, filepath in enumerate(self.filepaths[imgClass]):
image = cv2.imread(filepath, flags=cv2.IMREAD_GRAYSCALE)
# IMPORTANT!!! EXPECTING BLACK FONT WITH WHITE BACKGROUND
_, image = cv2.threshold(image, 150, 255, cv2.THRESH_BINARY_INV)
image = cv2.resize(image, (maxsize[1], maxsize[0]), interpolation=cv2.INTER_AREA)
image = np.array(image)
if image_invalid(image):
pass
if idx < self.trainCountPerClass[imgClass]:
self.train_set[imgClass * self.trainCountPerClass[imgClass] + idx] = image
self.train_labels[imgClass * self.trainCountPerClass[imgClass] + idx] = imgClass
else:
self.test_set[
imgClass * self.testCountPerClass[imgClass] + idx - self.trainCountPerClass[imgClass]] = image
self.test_labels[imgClass * self.testCountPerClass[imgClass] + idx - self.trainCountPerClass[
imgClass]] = imgClass
idx += 1
sys.stdout.write("-")
sys.stdout.flush()
# self.printImageArray(image)
# cv2.imshow("test",image)
# cv2.waitKey()
# cv2.destroyAllWindows()
sys.stdout.write("\n")
print(("Shape of read trainDataset: " + str(self.train_set.shape)))
print(("Shape of read testDataset: " + str(self.test_set.shape)))
def save_array_to_file(self, outfile):
for_saving = np.array((self.train_labels, self.train_set, self.test_labels, self.test_set))
outfile = open(outfile, 'wb')
np.save(outfile, for_saving)
outfile.close()
def load_arrays_from_file(self, infile):
infile = open(infile, 'rb')
self.train_labels, self.train_set, self.test_labels, self.test_set = np.load(infile)
print("Loaded")
print(("Length of training set: " + str(len(self.train_set))))
print(("Length of test set: " + str(len(self.test_set))))
def reshape_data_for_mlp(self, maxsize):
self.train_set = self.train_set.reshape(len(self.train_set), maxsize[0] * maxsize[1])
self.test_set = self.test_set.reshape(len(self.test_set), maxsize[0] * maxsize[1])
self.train_set = self.train_set.astype('float32')
self.test_set = self.test_set.astype('float32')
self.train_set /= 255
self.test_set /= 255
self.train_labels = keras.utils.to_categorical(self.train_labels, self.classNo)
self.test_labels = keras.utils.to_categorical(self.test_labels, self.classNo)
def reshape_data_for_knn(self, maxsize):
img_rows, img_cols = maxsize
self.train_set = self.train_set.reshape(self.train_set.shape[0], img_rows * img_cols)
self.test_set = self.test_set.reshape(self.test_set.shape[0], img_rows * img_cols)
self.train_set = self.train_set.astype(np.float32)
self.test_set = self.test_set.astype(np.float32)
self.train_labels = self.train_labels.astype(np.float32)
self.test_labels = self.test_labels.astype(np.float32)
self.train_labels = self.train_labels.reshape(self.train_labels.shape[0], 1)
self.test_labels = self.test_labels.reshape(self.test_labels.shape[0], 1)
print("self.train_labels[0] = " + str(self.train_labels[0]))
print(('self.train_labels shape:', self.train_labels.shape))
print(('self.test_labels shape:', self.test_labels.shape))
print(('self.train_set shape:', self.train_set.shape))
print((self.train_set.shape[0], 'train samples'))
print(('self.test_set shape:', self.test_set.shape))
print((self.test_set.shape[0], 'test samples'))
def reshape_data_for_cnn(self, maxsize):
img_rows, img_cols = maxsize
if K.image_data_format() == 'channels_first':
self.train_set = self.train_set.reshape(self.train_set.shape[0], 1, img_rows, img_cols)
self.test_set = self.test_set.reshape(self.test_set.shape[0], 1, img_rows, img_cols)
else:
self.train_set = self.train_set.reshape(self.train_set.shape[0], img_rows, img_cols, 1)
self.test_set = self.test_set.reshape(self.test_set.shape[0], img_rows, img_cols, 1)
# print(("Shape after reshape: " + str(self.train_set.shape[0])))
self.train_set = self.train_set.astype('float32')
self.test_set = self.test_set.astype('float32')
print_image_array(self.test_set[0])
self.train_set /= 255
self.test_set /= 255
print(('self.train_set shape:', self.train_set.shape))
print((self.train_set.shape[0], 'train samples'))
print(('self.test_set shape:', self.test_set.shape))
print((self.test_set.shape[0], 'test samples'))
self.train_labels = keras.utils.to_categorical(self.train_labels, self.classNo)
self.test_labels = keras.utils.to_categorical(self.test_labels, self.classNo)
def test_letter_from_test_set_nn(self, model, n):
image = self.test_set[n]
cv2.imshow("test", image)
cv2.waitKey()
cv2.destroyAllWindows()
values = model.predict(image, using_training_set=True)
print("Predicted: " + str(values))
print("Should be: " + str((self.test_labels[n].argmax())))
def test_letter_from_test_set_knn(self, model, n):
image = self.test_set[n]
values = model.predict(image, using_training_set=True)
print("Predicted: " + str(values))
print("Should be: " + str(self.test_labels[n]))
image = cv2.resize(image, (64, 64))
cv2.imshow("test", image)
cv2.waitKey()
cv2.destroyAllWindows()
def main(filepath):
batch_size = 128
epochs = 5
maxsize = (64, 64)
class_no = 62
r = DatasetReader()
r.set_filepaths(filepath, class_no)
r.load_images_into_memory(0.9, maxsize)
outfile = "temp_to_save_np_array.temp"
r.save_array_to_file(outfile)
# r.load_arrays_from_file(outfile)
r.reshape_data_for_knn(maxsize)
# r.reshape_data_for_cnn(maxsize)
# r.reshape_data_for_mlp(maxsize)
# bestK = 1
# prevAcc = 0
# for k in range(1,14):
# model = NearestNeighbor(maxsize, k)
# model.train(r.train_set, r.train_labels)
# acc = model.accuracy(r.test_set, r.test_labels)
# if acc > prevAcc:
# bestK = k
# prevAcc = acc
# print("best acc (" + str(prevAcc) + ") for " + str(bestK) + " neighbors")
# best acc (91.45161290322581) for 11 neighbors
model = NearestNeighbor(maxsize, 11)
model.train(r.train_set, r.train_labels)
np.savez('knn_data.npz', train_set=r.train_set, train_labels=r.train_labels)
with np.load('knn_data.npz') as data:
print(data.files)
# model = CNN(maxsize, class_no,"cnn_model_for_my_dataset_64x64_2.h5")
# model = MLP(maxsize, class_no)#, "mlp_model_for_my_dataset.h5")
# model.fit(r.train_set, r.test_set, r.train_labels, r.test_labels, batch_size, epochs)
# model.saveKerasModel("cnn_model_for_my_dataset_64x64_2.h5")
r.test_letter_from_test_set_knn(model, 281)
r.test_letter_from_test_set_knn(model, 28)
r.test_letter_from_test_set_knn(model, 57)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("pathToDatasets", help="Directory to stored datasets")
args = parser.parse_args()
main(args.pathToDatasets)
| true |
839c261c9f05a44c488376443d9bd0b45c5dbd47 | Python | fei-zhang-bb/Lessons | /Machine_Learning_and_Parallel_Computing/FINAL_MLP_MAXMIN/mlp.py | UTF-8 | 4,124 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | import tensorflow as tf
import numpy as np
import scipy.io as scio
import math as ma
from tensorflow.contrib import layers
import time
def next_batch(data, label, batch_size):
index = np.arange(7293)
np.random.shuffle(index)
index = index[0:batch_size]
batch_data = data[index]
batch_label = label[index]
return batch_data,batch_label
def onehot(labels, units):
l = len(labels)
onehot_labels = np.zeros([l,units])
for i in range(0,l):
onehot_labels[i][labels[i][0]] = 1
return onehot_labels
def normalize(data, base):
min_datum = []
max_datum = []
base = np.array(base)
for i in range(len(base[0])):
min_datum += [min(base[:,i])]
max_datum += [max(base[:,i])]
min_datum = np.array(min_datum)
max_datum = np.array(max_datum)
medium_datum = (max_datum + min_datum) * 1.0 / 2
distance = (max_datum - min_datum) * 1.0 / 2
for i in range(len(data)):
data[i] = np.array(data[i])
data[i] = ((data[i] - medium_datum) / distance)
def main():
output_file = time.strftime("%H-%M-%S",time.localtime()) + '.txt'
loss_out = open('./loss_' + output_file, 'w')
acc_train_out = open('./acc_train_' + output_file, 'w')
acc_test_out = open('./acc_test_' + output_file, 'w')
pred_out = open('./predict_' + output_file, 'w')
data_file = './data.mat'
data = scio.loadmat(data_file)
sess = tf.InteractiveSession()
in_units = 310
h1_units = 40
out_units = 4
learning_rate = 0.0001
regular_ratio = 0.9
batch_num = 300
batch_size = 100
train_data = data['train_de']
train_label = onehot(data['train_label_eeg'], out_units)
test_data = data['test_de']
test_label = onehot(data['test_label_eeg'], out_units)
normalize(train_data, test_data)
normalize(test_data, test_data)
len_train_data = len(train_data)
len_test_data = len(test_data)
W1 = tf.Variable(tf.truncated_normal([in_units, h1_units], stddev=0.1))
b1 = tf.Variable(tf.zeros([h1_units]))
W5 = tf.Variable(tf.truncated_normal([h1_units, out_units], stddev=0.1))
b5 = tf.Variable(tf.zeros([out_units]))
x = tf.placeholder(tf.float32, [None, in_units])
hidden1 = tf.nn.sigmoid(tf.matmul(x,W1) + b1)
y = tf.nn.softmax(tf.matmul(hidden1, W5) + b5)
y_ = tf.placeholder(tf.float32, [None, out_units])
regular = layers.l2_regularizer(.5)(W1) + layers.l2_regularizer(.5)(W5) #+ layers.l2_regularizer(.5)(W5)
loss = -tf.reduce_sum(y_ * tf.log(y)) + regular_ratio * regular
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
tf.global_variables_initializer().run()
begin = time.time()
for j in range(0,200):
for i in range(0, batch_num):
batch_x, batch_y = next_batch(train_data, train_label, batch_size)
train_step.run({x:batch_x, y_:batch_y})
print('Iter:' + str(j))
total_cross_entropy = sess.run(loss, feed_dict={x:train_data, y_:train_label})
print('loss: ' + str(total_cross_entropy))
loss_out.write(str(total_cross_entropy) + '\n')
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
result = tf.argmax(y,1)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_accur = accuracy.eval({x:train_data, y_:train_label})
test_accur = accuracy.eval({x:test_data, y_:test_label})
print(train_accur)
print(test_accur)
acc_train_out.write(str(train_accur) + '\n')
acc_test_out.write(str(test_accur) + '\n')
end = time.time()
print((end - begin))
prediction = (sess.run(result, feed_dict = {x:test_data}))
real = (sess.run(result, feed_dict = {y:test_label}))
print(prediction)
print(real)
prediction_static = []
for i in range(4):
prediction_static += [[0,0,0,0]]
for i in range(0,len(real)):
prediction_static[real[i]][prediction[i]] += 1
for i in range(4):
print(prediction_static[i])
pred_out.write(str(prediction_static))
main()
| true |
8cf3abcc32891e224ecaf2f226bb18158e9fa5c5 | Python | snowood1/SPN-project | /RandomGrowth.py | UTF-8 | 1,510 | 3.015625 | 3 | [] | no_license | import numpy as np
import random
from SumProductNets import *
class RandomGenerator(object):
def __init__(self, num_feature, rv_list):
self.features = list(range(num_feature))
self.rv_list = rv_list
def generate(self):
return self._create_node(self.features, True)
def node_selection(self, node_list):
return random.randint(0, len(node_list)-1)
def create_prod_node(self, node_list):
children = []
while node_list != []:
temp_children = []
num_nodes = random.randint(2, 5)
for i in range(num_nodes):
if node_list == []:
break
s = self.node_selection(node_list)
temp_children.append(node_list[s])
return ProductNode(node_list)
def create_sum_node(self, node_list):
children = []
num_nodes = random.randint(2,5)
for i in range(num_nodes):
children.append(ProductNode(node_list))
return SumNode(children)
def create_sub_tree(self, node_list):
top_nodes = []
while node_list != []:
temp_node_list = []
num_nodes = random.randint(8, 16)
for i in range(num_nodes):
if node_list == []:
break
s = self.node_selection(node_list)
temp_node_list.append(node_list[s])
node_list.pop(s)
top_nodes.append(self.create_sum_node())
| true |
08f9cdac0bdb4b159194c4bb5aaaf49b9a124a35 | Python | CmPunk96/LR4 | /LR4.py | UTF-8 | 2,835 | 2.953125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
pd.options.mode.chained_assignment = None
pd.set_option('display.max_columns', 30)
pd.set_option('display.width', 1000)
google_play = pd.read_csv('googleplaystore.csv', encoding='latin-1')
bar_graph_plot = plt.figure(figsize=(15, 8))
bar_graph = bar_graph_plot.add_subplot()
bar_graph.set_xlabel('название приложения')
bar_graph.set_ylabel('рейтинг')
bar_graph.set_title('топ приложения google play')
best_rating = google_play.sort_values('Rating', ascending=False).sort_values('Installs', ascending=False).head(7)
labels = list(best_rating['App'])[1:]
labels_rating = list(best_rating['Rating'])[1:]
bar_graph.bar(labels, labels_rating)
print('Первый график показывает, что самым популярным приложением в google play является', labels[0])
pie_graph_plot = plt.figure(figsize=(15, 8))
pie_graph = pie_graph_plot.add_subplot()
pie_graph.set_title('самые популярные категории приложений')
popular_categories = dict(google_play.value_counts('Category').head(8))
pie_graph.pie(list(popular_categories.values()), labels=list(popular_categories.keys()), autopct='%1.1f%%')
print('Второй график показывает, что самыми популярными категориями являются Family, Game и Tools')
pie_graph_content_rating_plot = plt.figure(figsize=(15, 8))
pie_graph_content_rating = pie_graph_content_rating_plot.add_subplot()
pie_graph_content_rating.set_title('самые популярные возрастные ограничения приложений')
popular_categories = dict(google_play.value_counts('Content Rating').head(4))
pie_graph_content_rating.pie(list(popular_categories.values()), labels=list(popular_categories.keys()),
autopct='%1.1f%%')
print('Третий график показывает, что самыми популярными возрастными ограничениями являются Everyone и Teen')
barh_graph_plot = plt.figure(figsize=(15, 8))
barh_graph = barh_graph_plot.add_subplot()
barh_graph.set_xlabel('количество приложений')
barh_graph.set_ylabel('тип приложения(бесплатное/платное)')
barh_graph.set_title('количество платных и бесплатных популярных приложений в google play')
free_paid = dict(google_play.value_counts('Type'))
barh_graph.barh(list(free_paid.keys())[:-1], list(free_paid.values())[:-1])
print('Четверный график показывает, что популярных бесплатных приложений гораздо больше, чем популярных платных')
plt.show()
| true |
bdd490856ee9c4c5bf02f3b1b77f62caaf8f8bc9 | Python | lihuacai168/python_learning | /TestView/quicksort.py | UTF-8 | 1,379 | 4.0625 | 4 | [] | no_license | def quick_sort(list,first,last):
# 只有一个元素时,就是递归的出口
if first >= last:
return
# 数组的游标
low = first
high = last
# 去数组的第一个值作为参照
mid_value = list[first]
while low < high:
# 如果高位的值大于参照的值并且游标还没相遇,高位游标就一直向左移动.(高位的值等于参照值,也需要移动,否则当数组的前后两个元素刚好相等,会出现死循环)
if low < high and list[high] >= mid_value:
high -= 1
# 退出遍历时,要是高位的值小于参照的值,就放在低位去.要是游标相等时,也进行赋值(两个空值赋值,不会影响).但low+=1放到下一步去操作,还要进行判断low和high的大小.再把低位的游标向右移动,避免出现low>high的情况.
list[low] = list[high]
# 如果低位的值小于参照的值并且游标还没相遇,低位游标就一直向右移动
if low < high and list[low] < mid_value:
low += 1
# 退出遍历时,和high的一样
list[high] = list[low]
# 退出遍历时,游标low和high是相等的,也就是mid_value的游标
list[low] = mid_value
quick_sort(list,0,low-1)
quick_sort(list,low+1,last)
num = [4,7,1,-2,6,3,2,3,90,2,4]
quick_sort(num,0,len(num)-1)
print(num) | true |
3d8b4e1f9053bd7d54cd5f0b6c43107f079e1d53 | Python | axura/shadow_alice | /demo03.py | UTF-8 | 96 | 2.8125 | 3 | [] | no_license | #!/usr/bin/python
import sys
string = sys.stdin.read()
for i in range(0, 3):
print string
| true |
0ddb4857badd13ea9c710024f08bcfb6ea7256f4 | Python | geethayedida/codeacademy_python | /multiplying.py | UTF-8 | 535 | 3.90625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 03 21:00:01 2016
@author: Geetha Yedida
"""
class Animal(object):
"""Makes cute animals."""
is_alive = True
health = "good"
def __init__(self, name, age):
self.name = name
self.age = age
# Add your method here!
def description(self):
print self.name
print self.age
hippo = Animal("Hippopotamus", 98)
sloth = Animal("Sloth",3)
ocelot = Animal("Ocelot", 2)
hippo.description()
print hippo.health
print sloth.health
print ocelot.health | true |
ae429a9829aa93a26ce05b5a5ab1d95dd0c21b8f | Python | mariuszdrynda/kryptografia | /Lista1/zad1.py | UTF-8 | 2,611 | 3.875 | 4 | [] | no_license | from functools import reduce
import math
from random import randint
class prng_lcg:
def __init__(self, seed, m, c, n):
self.state = seed # ziarno
self.m = m # mnożnik
self.c = c
self.n = n # modulo
def next(self): # funkcja generuje kolejną pseudolową liczbę z zakresu [0, n]
self.state = (self.state * self.m + self.c) % self.n
return self.state
# znamy m i n, nie znamy c
def crack_unknown_increment(states, modulus, multiplier):
increment = (states[1] - states[0]*multiplier) % modulus
return modulus, multiplier, increment
# znamy n, nie znamy m i c
# s_2 - s_1 = s1*m - s0*m (mod n)
# s_2 - s_1 = m*(s1 - s0) (mod n)
# m = (s_2 - s_1)/(s_1 - s_0) (mod n)
def crack_unknown_multiplier(states, modulus):
multiplier = (states[2] - states[1]) * modinv(states[1] - states[0], modulus) % modulus
return crack_unknown_increment(states, modulus, multiplier)
# największy wspólny dzielnik
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, x, y = egcd(b % a, a)
return (g, y - (b // a) * x, x)
# modular division - znajduje liczbę c, taką, że (b * c) % n = a % n
def modinv(b, n):
g, x, _ = egcd(b, n)
return x % n
# nie znamy niczego
# mając kilka wielokrotnosci losowej liczby istnieje duże prawopodobieństwo,
# że nwd tych wielokrotności = szukanej liczbie
def crack_unknown_modulus(states):
# lista róźnic kolejnych generowanych liczb
diffs = [s1 - s0 for s0, s1 in zip(states, states[1:])]
zeroes = [t2*t0 - t1*t1 for t0, t1, t2 in zip(diffs, diffs[1:], diffs[2:])]
modulus = abs(reduce(math.gcd, zeroes))
return crack_unknown_multiplier(states, modulus)
# UWAGA! Python3 niepoprawnie interpretuje duże liczby zaokrąglając końcówki
def test():
nosucc = 0
# Liczba pierwsza, (x_1 - x_0) * y mod m = 1, dla dowolnego y < m .
modulus = 1000000007
gen = prng_lcg(randint(1,modulus), randint(1,modulus), randint(1,modulus), modulus)
listGenVal = list()
for x in range(0,10):
listGenVal.append(gen.next())
print(listGenVal)
for i in range(0, 10):
s = gen.next()
n, m, c = crack_unknown_modulus(listGenVal)
print("przewidziane\t", m, "\t", c, n, "\t")
print("rzeczywiste\t", gen.m, "\t", gen.c, gen.n, "\t")
gen2 = prng_lcg(s, m, c, n)
# sprawdz czy nastepna liczba przewidywana przez cracka
# jest tą samą co generowaną przez lcg
if gen.next() == gen2.next(): nosucc+=1
print("nosucc", nosucc)
test() | true |
ace2446edd644493cc7468248e2b548e39e45b77 | Python | balujaashish/NOW_PDF_Reader | /test2.py | UTF-8 | 675 | 3.203125 | 3 | [] | no_license | from PDF_OCR_Reader.String_compare import String_Compare
def is_sublist(a, b):
if not a: return True
if not b: return False
return b[:len(a)] == a or is_sublist(a, b[1:])
def is_substring(str1, str2):
sc = String_Compare()
str1 = sc.strip_punctuations(str1)
w1 = sc.prepare_phrase(str1,0)
str2 = sc.strip_punctuations(str2)
w2 = sc.prepare_phrase(str2,0)
print(w1)
print(w2)
if is_sublist(w2, w1):
return True
else: return False
if __name__ == "__main__":
str1 = 'hs Light Trucks\n(0 - 10,000 Lbs.\n\nG.V.W.)'
str2 = 'Light Trucks (0 - 10,000 Lbs. G.V.W.)'
print(str_compare_with_tokenize(str1, str2))
| true |
ef869593f924de2e89f6a1fec105c7867a053b78 | Python | kmkmkkball/iot | /SAKS-tutorials/nightlight/main.py | UTF-8 | 3,281 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 NXEZ.COM.
# http://www.nxez.com
#
# Licensed under the GNU General Public License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/licenses/gpl-2.0.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tutorials url: http://shumeipai.nxez.com/2015/10/24/saks-diy-tutorials-nightlight.html
__author__ = 'Spoony'
__license__ = 'Copyright (c) 2015 NXEZ.COM'
from sakshat import SAKSHAT
from sakspins import SAKSPins as PINS
import time
#Declare the SAKS Board
SAKS = SAKSHAT()
#当前开关状态
__light_status = False
#在检测到拨码开关状态被修改时自动执行此函数
def dip_switch_status_changed_handler(status):
'''
called while the status of dip switch changed
:param status: current status
:return: void
'''
global __light_status
#在小灯状态开着时执行
if __light_status:
#拨码开关第1位状态为ON
if status[0]:
#点亮第3个LED
SAKS.ledrow.on_for_index(2)
else:
SAKS.ledrow.off_for_index(2)
#拨码开关第2位状态为ON
if status[1]:
#点亮第4个LED
SAKS.ledrow.on_for_index(3)
else:
SAKS.ledrow.off_for_index(3)
print(status)
#在检测到轻触开关触发时自动执行此函数
def tact_event_handler(pin, status):
'''
called while the status of tacts changed
:param pin: pin number which stauts of tact is changed
:param status: current status
:return: void
'''
global __light_status
#判断是否是右边的轻触开关被触发,并且是在被按下
if pin == PINS.TACT_RIGHT and status == True:
#在小灯当前状态关着时将它们点亮并修改小灯当前状态为开; 在小灯当前状态开着时将它们灭掉并修改小灯当前状态为关
if not __light_status:
SAKS.ledrow.on_for_index(0)
SAKS.ledrow.on_for_index(1)
#检测第1位拨码开关状态是否为ON
if SAKS.dip_switch.is_on[0] == True:
#点亮第3个LED
SAKS.ledrow.on_for_index(2)
#检测第2位拨码开关状态是否为ON
if SAKS.dip_switch.is_on[1] == True:
#点亮第4个LED
SAKS.ledrow.on_for_index(3)
else:
SAKS.ledrow.off_for_index(0)
SAKS.ledrow.off_for_index(1)
SAKS.ledrow.off_for_index(2)
SAKS.ledrow.off_for_index(3)
__light_status = not __light_status
print("%d - %s" % (pin, status))
if __name__ == "__main__":
#设定拨码开关回调函数
SAKS.dip_switch_status_changed_handler = dip_switch_status_changed_handler
#设定轻触开关回调函数
SAKS.tact_event_handler = tact_event_handler
raw_input("Enter any keys to exit...")
| true |
4e9475e756744dfc74bdf54c15191d5450f7abc7 | Python | jasonmoofang/codelab | /camp0_reference/turtle.py | UTF-8 | 668 | 3.109375 | 3 | [] | no_license | from ch.aplu.turtle import *
ann = Turtle()
ann.setPenColor("red")
#square
ann.forward(100)
ann.right(90)
ann.forward(100)
ann.right(90)
ann.forward(100)
ann.right(90)
ann.forward(100)
#triangle
ann.setPenColor("blue")
ann.forward(100)
ann.right(120)
ann.forward(100)
ann.right(120)
ann.forward(100)
#hexagon
ann.setPenColor("green")
for i in range(6):
ann.forward(100)
ann.right(60)
#star
for i in range(5):
ann.forward(100)
ann.right(144)
#spiralling star
for i in range(20):
ann.forward(i * 10)
ann.right(144)
#intricate star
for i in range(50):
ann.forward(100)
ann.right(123) | true |
78a556658eea5d03ebbdd76ecf3e7d2888f3860c | Python | rafa761/leetcode-coding-challenges | /2020 - august/008 - non-overlapping intervals.py | UTF-8 | 1,500 | 3.921875 | 4 | [] | no_license | """
Given a collection of intervals, find the minimum number of intervals you need to remove to make the rest of the intervals non-overlapping.
Example 1:
Input: [[1,2],[2,3],[3,4],[1,3]]
Output: 1
Explanation: [1,3] can be removed and the rest of intervals are non-overlapping.
Example 2:
Input: [[1,2],[1,2],[1,2]]
Output: 2
Explanation: You need to remove two [1,2] to make the rest of intervals non-overlapping.
Example 3:
Input: [[1,2],[2,3]]
Output: 0
Explanation: You don't need to remove any of the intervals since they're already non-overlapping.
Note:
You may assume the interval's end point is always bigger than its start point.
Intervals like [1,2] and [2,3] have borders "touching" but they don't overlap each other.
"""
from typing import List
class Solution:
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
if len(intervals) < 2:
return 0
intervals.sort()
count, last_included = 0, 0
for i in range(1, len(intervals)):
if intervals[i][0] < intervals[last_included][1]:
count += 1
if intervals[i][1] < intervals[last_included][1]:
last_included = i
else:
last_included = i
return count
if __name__ == '__main__':
S = Solution()
print('-> 1: ', S.eraseOverlapIntervals([[1, 2], [2, 3], [3, 4], [1, 3]]))
print('-> 2: ', S.eraseOverlapIntervals([[1, 2], [1, 2], [1, 2]]))
print('-> 0: ', S.eraseOverlapIntervals([[1, 2], [2, 3]]))
print('-> 2: ', S.eraseOverlapIntervals([[1, 100], [11, 22], [1, 11], [2, 12]]))
| true |
e3f33f64f607dbab28e475652a619185deb8d652 | Python | evgenii-malov/interviews | /interview_21.08.2016/task1_next_biggest_number/next_biggest_number.py | UTF-8 | 2,191 | 3.546875 | 4 | [] | no_license | # coding: utf8
import sys
def nbn(n):
"""
Logic:
We must increase digit in minimum possible digit index (from right to left)
We can encrease it only by digits from right tail
It is inpossible to increase digits from left part,
because it make number smaller (smaller digit goes left)
So we need to start from [-1] index and move down to [0] index,
Mark this current position index as I.
At every iteration we must check if bigger digit present in
tail [I+1,-1] (right part from I index)
If such digit found we must exchange it with digit at I position
and sort tail in desc order.
Proof:
Where is no smaller digit index to increase
because we move from left possible index and check it increse possibility.
Time complexity:
n - steps to walk from last downto first (worst case)
n*log(n) - for comparsion sort (can be optimized via radix sort to O(N)
total: O(n+n*log(n)) time, with radix sort - O(n)!
SPACE complexity:
O(N)+O(9) for digit tail_set, so total O(N)
"""
if not n:
return 0
try:
n = int(n)
except ValueError:
return 0
finally:
n = list(str(n))
# str = list(str)
tail_set = set()
for i in range(len(n) - 2, -1, -1):
tail_set.add(n[i + 1])
found = any(e > n[i] for e in tail_set)
# print(i, ':', str[i],':',tail_set)
if found:
bigger_alts = (e for e in tail_set if e > n[i])
d = min(bigger_alts)
# print(d)
first_match = next(z for z in range(i + 1, len(n)) if n[z] == d)
# print(first_match, '=', str[first_match])
n[i], n[first_match] = n[first_match], n[i]
n[i + 1:len(n)] = sorted(n[i + 1:len(n)])
return int("".join(n))
return 0
if __name__ == "__main__":
try:
datafile = sys.argv[1]
except IndexError:
exit(
"please specify input file as command line arg (the only),"
" for example: ./next_biggest_number.py datafile.txt")
with open(datafile) as f:
for case in f.readlines():
print('-----{}'.format(nbn(case)))
| true |
f0a7565559095ef9e6c1f93c435b63632195b58c | Python | brooke-zhou/MovieLens-Visualization | /ImplicitImplementation.py | UTF-8 | 4,202 | 3.15625 | 3 | [] | no_license | import implicit
import pandas as pd
from scipy.sparse import csr_matrix
import numpy as np
from statistics import mean
from matrixFactorMethods import get_err, centerUV, calculateProjection
def implicitModel(movieLensDataTrainPath='train_clean.txt', movieLensDataTestPath='test_clean.txt'):
''' Implementation of the implicit model. Takes in train and testing data. '''
# Load in training and testing data
dfTrain = pd.read_csv(movieLensDataTrainPath, sep="\t", header=None)
dfTrain.columns = ["User Id", "Movie Id", "Rating"]
dfTest = pd.read_csv(movieLensDataTestPath, sep="\t", header=None)
dfTest.columns = ["User Id", "Movie Id", "Rating"]
test = dfTest.to_numpy()
train = dfTrain.to_numpy()
# Initialize a model based on the implicit model
model = implicit.als.AlternatingLeastSquares(factors=25, iterations=400, regularization=0.01)
# Declare M and N
M = max(max(train[:, 0]), max(test[:, 0])).astype(int)
N = max(max(train[:, 1]), max(test[:, 1])).astype(int) + 1
# We need a matrix to store all values of Y since it
# expects an actual M x N matrix with each i, j
# entry containing Y_ij.
newTrains = np.zeros((M, N))
for y in train:
i, j, yij = y
i = i - 1
j = j
newTrains[i][j] = yij
newTrains = np.array(newTrains)
# Convert to a format accepted by
# implicit.
train = csr_matrix(newTrains)
# Train the model on a sparse matrix of movie/user/confidence weights
model.fit(train)
# These are our corresponding U and V matrices
return model.item_factors, model.user_factors
# Without this, the error goes up to around 6. Don't know why.
'''def get_err2(U, V, Y, reg=0.0):
"""
Takes as input a matrix Y of triples (i, j, Y_ij) where i is the index of a user,
j is the index of a movie, and Y_ij is user i's rating of movie j and
user/movie matrices U and V.
Returns the mean regularized squared-error of predictions made by
estimating Y_{ij} as the dot product of the ith row of U and the jth column of V^T.
"""
totalLength = len(Y)
sumOfSqs = 0
#meanYs = mean(Y[:, 2])
for y in Y:
#print(y)
i = int(y[0])
j = int(y[1])
yij = y[2]
i = i - 1
j = j
sumOfSqs = sumOfSqs + ((yij - np.dot(U[i], V[j])) ** 2)
normSum = (np.linalg.norm(U, ord='fro') ** 2 + np.linalg.norm(V, ord='fro') ** 2)
return ((reg * normSum) + sumOfSqs) / (2 * totalLength)'''
def Vtrain(Y, max_epochs=400):
''' This trains the matrix V on implicit to retrieve a matrix factorization. '''
model = implicit.als.AlternatingLeastSquares(factors=25, iterations=max_epochs, regularization=0.01)
# Train here
newTrains = np.array(Y)
train = csr_matrix(newTrains)
#print(train.shape)
# train the model on a sparse matrix of item/user/confidence weights
model.fit(train)
print(model.item_factors.shape)
# print(len(model.item_factors))
print(model.user_factors.shape)
# print(len(model.user_factors))
return model.item_factors, model.user_factors
def SVDofV(oldV):
''' SVDofV() finds the SVD of V, using same method as before: through implicit. '''
# Use to compute Ein and Eout
A, B = Vtrain(oldV, max_epochs=300)
return A, B
def tryThis():
''' Main engine, basically we didn't know how promising this was, so
we just wanted to try it. The U, V is obtained, and then transposed
to then collect the SVD of V. Then, the A is used for calculating
projection. Then it is tested. '''
U, V = implicitModel()
U = np.float64(U)
V = np.float64(V)
U = U.T
V = V.T
# Center U and V.
U, V = centerUV(U, V)
# SVD of V!
A, B = SVDofV(V)
A = A.T
# Use the first 2 cols for 2 dimensional projection.
projU, projV = calculateProjection(A, U, V)
dfTest = pd.read_csv('../data/test_clean.txt', sep="\t", header=None)
dfTest.columns = ["User Id", "Movie Id", "Rating"]
# Calculate error.
Y_test = dfTest.to_numpy()
print(get_err(U.T, V.T, Y_test))
print(get_err(projU.T, projV.T, Y_test))
return projU, projV
tryThis() | true |
9ee1a9b07cb5fb4256de4d766f43f8ccb74fb6d4 | Python | LHKze/Notes | /flask-test-login.py | UTF-8 | 2,499 | 2.578125 | 3 | [] | no_license | from flask import Flask, request, render_template, redirect, url_for, make_response
from datetime import datetime, timedelta
app = Flask(__name__)
class UserLogin(object):
@classmethod
def check_cookie(cls):
tmp = request.cookies.get('login')
if tmp == 'you are login in!':
return True
@classmethod
def login_in(cls, username, password):
response = make_response(render_template('homepage.html'))
if check_info(username, password):
response.set_cookie('login', 'you are login in!', expires=datetime.today() + timedelta(days=30))
return response
else:
return render_template('login.html')
@classmethod
def login_out(cls):
tmp = request.cookies.get('login')
if tmp == 'you are login in!':
response = make_response(redirect(url_for('index')))
response.delete_cookie('login')
return response
return redirect(url_for('index'))
def check_info(name, pas):
return True
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
username = request.form.get('p')
password = request.form.get('q')
response = UserLogin.login_in(username, password)
return response
elif request.method == 'GET':
if UserLogin.check_cookie():
return render_template('homepage.html')
else:
return render_template('login.html')
"""
response = make_response(render_template('homepage.html'))
if request.method == 'POST':
name = request.form.get('p')
password = request.form.get('q')
response.set_cookie('login', 'you are login in!', expires=datetime.today()+timedelta(days=30))
return response
if request.method == 'GET':
tmp = request.cookies.get('login')
if tmp == 'you are login in!':
return response
return render_template('login.html')"""
@app.route('/loginout', methods=['POST'])
def login_out():
if request.method == 'POST':
response = UserLogin.login_out()
return response
"""
if request.method == 'POST':
tmp = request.cookies.get('login')
if tmp == 'you are login in!':
response = make_response(redirect(url_for('index')))
response.delete_cookie('login')
return response
return redirect(url_for('index'))"""
if __name__ == '__main__':
app.run()
| true |
9dfa7556370bd84cb1d28ead67c9e3abf6f19a53 | Python | eggypesela/freecodecamp-exercise-data-analysis | /Exercise Files/boilerplate-mean-variance-standard-deviation-calculator/mean_var_std.py | UTF-8 | 2,201 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env python
# created by Regina Citra Pesela (reginapasela@gmail.com)
import numpy as np
def calculate(list):
# first, we should check if the list length is 9
# if it's not 9 then raise ValueError and
# return "List must contain nine numbers"
if len(list) != 9:
raise ValueError('List must contain nine numbers.')
# generate 3 x 3 matrix from the list using numpy
array_list = np.array(list).reshape([3,3])
# calculate mean, variance, standard deviation, max, min and sum
# by axis 1, axis 2 and flattened respectively
list_mean = [
array_list.mean(axis = 0).tolist(),
array_list.mean(axis = 1).tolist(),
array_list.mean().tolist()
]
list_variance = [
array_list.var(axis = 0).tolist(),
array_list.var(axis = 1).tolist(),
array_list.var().tolist()
]
list_sd = [
array_list.std(axis = 0).tolist(),
array_list.std(axis = 1).tolist(),
array_list.std().tolist()
]
list_max = [
array_list.max(axis = 0).tolist(),
array_list.max(axis = 1).tolist(),
array_list.max().tolist()
]
list_min = [
array_list.min(axis = 0).tolist(),
array_list.min(axis = 1).tolist(),
array_list.min().tolist()
]
list_sum = [
array_list.sum(axis = 0).tolist(),
array_list.sum(axis = 1).tolist(),
array_list.sum().tolist()
]
# return calculations as dictionary that contains mean,
# variance, standard deviation, max, min, and sum respectively
calculations = {
'mean' : list_mean,
'variance' : list_variance,
'standard deviation' : list_sd,
'max' : list_max,
'min' : list_min,
'sum' : list_sum
}
return calculations | true |
a1943b54a9e2cea5c20089c8801b861544e5efe2 | Python | Rykkata/marsrover | /Python/cards.py | UTF-8 | 3,886 | 3.25 | 3 | [] | no_license | import random
class Card(object):
def __init__(self, value, suite):
super(Card, self).__init__()
self.Value = value
self.Suite = suite
def __str__(self):
return ("%s%c" % (self.Value, self.Suite))
class Deck(object):
def __init__(self):
super(Deck, self).__init__()
self.cards = []
self.cards.append(Card('A', chr(3)))
self.cards.append(Card('A', chr(4)))
self.cards.append(Card('A', chr(5)))
self.cards.append(Card('A', chr(6)))
for val in xrange(50, 57):
self.cards.append(Card(chr(val), chr(3)))
self.cards.append(Card(chr(val), chr(4)))
self.cards.append(Card(chr(val), chr(5)))
self.cards.append(Card(chr(val), chr(6)))
self.cards.append(Card('10', chr(3)))
self.cards.append(Card('10', chr(4)))
self.cards.append(Card('10', chr(5)))
self.cards.append(Card('10', chr(6)))
self.cards.append(Card('J', chr(3)))
self.cards.append(Card('J', chr(4)))
self.cards.append(Card('J', chr(5)))
self.cards.append(Card('J', chr(6)))
self.cards.append(Card('Q', chr(3)))
self.cards.append(Card('Q', chr(4)))
self.cards.append(Card('Q', chr(5)))
self.cards.append(Card('Q', chr(6)))
self.cards.append(Card('K', chr(3)))
self.cards.append(Card('K', chr(4)))
self.cards.append(Card('K', chr(5)))
self.cards.append(Card('K', chr(6)))
def __str__(self):
retval = ""
for card in self.cards:
retval = retval + str(card) + "\n"
return retval
def shuffle(self):
random.shuffle(self.cards)
def draw(self):
return self.cards.pop()
class Player(object):
def __init__(self):
super(Player, self).__init__()
self.cards = []
def hit(self, card):
self.cards.append(card)
def __str__(self):
retval = ""
for card in self.cards:
retval = retval + " " + str(card)
return retval
def calcHand(self):
retval = [0, 0]
for card in self.cards:
if card.Value == 'A':
retval[0] = retval[0] + 1
retval[1] = retval[1] + 11
elif card.Value == '2':
retval[0] = retval[0] + 2
retval[1] = retval[1] + 2
elif card.Value == '3':
retval[0] = retval[0] + 3
retval[1] = retval[1] + 3
elif card.Value == '4':
retval[0] = retval[0] + 4
retval[1] = retval[1] + 4
elif card.Value == '5':
retval[0] = retval[0] + 5
retval[1] = retval[1] + 5
elif card.Value == '6':
retval[0] = retval[0] + 6
retval[1] = retval[1] + 6
elif card.Value == '7':
retval[0] = retval[0] + 7
retval[1] = retval[1] + 7
elif card.Value == '8':
retval[0] = retval[0] + 8
retval[1] = retval[1] + 8
elif card.Value == '9':
retval[0] = retval[0] + 9
retval[1] = retval[1] + 9
elif card.Value == '10':
retval[0] = retval[0] + 10
retval[1] = retval[1] + 10
elif card.Value == 'J':
retval[0] = retval[0] + 10
retval[1] = retval[1] + 10
elif card.Value == 'Q':
retval[0] = retval[0] + 10
retval[1] = retval[1] + 10
elif card.Value == 'K':
retval[0] = retval[0] + 10
retval[1] = retval[1] + 10
return (retval[0], retval[1]) | true |
0d442f6cfab000650b8e1b971475b292f22047dd | Python | chellya/tfPlayground | /ut.py | UTF-8 | 2,197 | 2.640625 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
from IPython.display import clear_output, Image, display, HTML
import webbrowser, os
import importlib
import pandas as pd
from pandas import DataFrame
def print_df(df,max_row = 500,max_cols = 100):
with pd.option_context('display.max_rows', max_row, 'display.max_columns', max_cols):
display(df)
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = "<stripped %d bytes>"%size
return strip_def
def show_graph_def(graph_def,save_file=False, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
if save_file:
with open('tf_graph.html', 'w') as f:
f.write(HTML(iframe).data)
else:
display(HTML(iframe))
''' For Jupyter Notebook'''
def show_graph(graph=None):
if graph is None:
graph = tf.get_default_graph()
show_graph_def(graph)
''' For none-Jupyter scenarios'''
def show_graph_local(graph=None):
if graph is None:
graph = tf.get_default_graph()
show_graph_def(graph,save_file=True)
webbrowser.open('file://' + os.path.realpath('tf_graph.html')) | true |
795229e3ea995a24f87919d4a796c50c0acb5325 | Python | ChetverikovPavel/Python | /lesson10/task3.py | UTF-8 | 1,065 | 3.84375 | 4 | [] | no_license | class Cell:
def __init__(self, numb):
try:
self.numb = int(numb)
except ValueError:
print('None')
self.numb = None
def __str__(self):
return str(self.numb)
def __add__(self, other):
return Cell(self.numb + other.numb)
def __sub__(self, other):
if self.numb > other.numb:
return Cell(self.numb - other.numb)
else:
print('Вычитание невозможно')
def __mul__(self, other):
return Cell(self.numb * other.numb)
def __floordiv__(self, other):
return Cell(self.numb // other.numb)
def make_order(self, row):
result = ''
for el in range(self.numb):
if el % row == 0 and el != 0:
result += '\n'
result += '*'
return result
my_cell_1 = Cell(30)
my_cell_2 = Cell(5)
my_cell_3 = Cell(8)
print(my_cell_1 + my_cell_2)
print(my_cell_1 - my_cell_2)
print(my_cell_1 * my_cell_2)
print(my_cell_1 // my_cell_2)
print(my_cell_1.make_order(7))
| true |
a2f7dac90c0fb95cfa32c8b47335de45dd019e83 | Python | dbradf/dlgo | /src/dlgo/agent/monte_carlo.py | UTF-8 | 3,846 | 3.0625 | 3 | [] | no_license | from __future__ import annotations
import math
import random
from structlog import get_logger
from dlgo.agent.base import Agent
from dlgo.agent.naive import RandomBot
from dlgo.goboard_fast import Move, GameState
from dlgo.gotypes import Player
from dlgo.scoring import compute_game_result
LOGGER = get_logger(__name__)
def uct_score(
parent_rollouts: int, child_rollouts: int, win_pct: float, temperature: float
) -> float:
exploration = math.sqrt(math.log(parent_rollouts) / child_rollouts)
return win_pct + temperature * exploration
class MCTSNode(object):
def __init__(self, game_state: GameState, parent: MCTSNode = None, move: Move = None):
self.game_state = game_state
self.parent = parent
self.move = move
self.win_counts = {
Player.black: 0,
Player.white: 0
}
self.num_rollouts = 0
self.children = []
self.unvisited_moves = game_state.legal_moves()
def add_random_child(self) -> MCTSNode:
index = random.randint(0, len(self.unvisited_moves) - 1)
new_move = self.unvisited_moves.pop(index)
new_game_state = self.game_state.apply_move(new_move)
new_node = MCTSNode(new_game_state, self, new_move)
self.children.append(new_node)
return new_node
def record_win(self, winner: Player):
self.win_counts[winner] += 1
self.num_rollouts += 1
def can_add_child(self):
return len(self.unvisited_moves) > 0
def is_terminal(self):
return self.game_state.is_over()
def winning_pct(self, player: Player) -> float:
return float(self.win_counts[player]) / float(self.num_rollouts)
class MCTSBot(Agent):
def __init__(self, num_rounds: int = 100, temperature: float = 1.5):
super(MCTSBot, self).__init__()
self.num_rounds = num_rounds
self.temperature = temperature
def select_move(self, game_state: GameState):
root = MCTSNode(game_state)
for i in range(self.num_rounds):
LOGGER.debug("round", i=i)
self._perform_round(root)
best_move = None
best_pct = -1.0
for child in root.children:
child_pct = child.winning_pct(game_state.next_player)
if child_pct > best_pct:
best_pct = child_pct
best_move = child.move
LOGGER.info("best move", best_move=best_move, win_pct=best_pct)
return best_move
def _perform_round(self, root: MCTSNode):
node = root
while not node.can_add_child() and not node.is_terminal():
node = self.select_child(node)
if node.can_add_child():
node = node.add_random_child()
winner = self.simulate_random_game(node.game_state)
while node is not None:
node.record_win(winner)
node = node.parent
def select_child(self, node: MCTSNode):
total_rollouts = sum(child.num_rollouts for child in node.children)
best_score = -1
best_child = None
for child in node.children:
score = uct_score(total_rollouts, child.num_rollouts,
child.winning_pct(node.game_state.next_player),
self.temperature)
if score > best_score:
best_score = score
best_child = child
return best_child
@staticmethod
def simulate_random_game(game_state: GameState) -> Player:
bots = {
Player.black: RandomBot(),
Player.white: RandomBot(),
}
while not game_state.is_over():
bot_move = bots[game_state.next_player].select_move(game_state)
game_state = game_state.apply_move(bot_move)
return compute_game_result(game_state).winner
| true |
a32e401ad176761340cc17f4290b91cdf37eca27 | Python | charleshefer/cbiotools | /ncbi/cfasta_from_ncbi.py | UTF-8 | 1,316 | 2.578125 | 3 | [] | no_license | ###############################################################################
#Download a fasta entry from the NCBI using EUTILS
#@requires:biopython
#@author:charles.hefer@agresearch.co.nz
#@version:0.1
###############################################################################
import optparse
from Bio import Entrez, SeqIO
Entrez.email="charles.hefer@gmail.com"
def __main__():
"""Parse the cmd lne options"""
parser = optparse.OptionParser()
parser.add_option("-a", "--accession", default=None, dest="accession",
help="The accession to retrieve")
parser.add_option("-d", "--database", default=None, dest="database",
help = "The database to retrieve from")
parser.add_option("-o", "--output", default=None, dest="output",
help="The output file")
(options, args) = parser.parse_args()
if not options.accession:
parser.error("Need to specify the input file")
if not options.database:
parser.error("Need to specify the database")
if not options.output:
parser.error("Need to specify the output file")
handle = Entrez.efetch(db=options.database,
id = options.accession,
rettype="fasta")
records = handle.read()
#This is not a fasta object....
with open(options.output, "w") as outhandle:
outhandle.write(records)
if __name__ == "__main__":
__main__()
| true |
0b94a4488c0e8839661b26438b690420f6421dcc | Python | Innerface/qarobot | /model/nlp_stanford_by_nltk_model.py | UTF-8 | 2,003 | 2.671875 | 3 | [] | no_license | # Author: YuYuE (1019303381@qq.com) 2018.01.18
from nltk.parse.stanford import StanfordParser
from nltk.tokenize import StanfordSegmenter
import os
java_path = "C:/Program Files (x86)/Java/jdk1.8.0_144/bin/java.exe"
os.environ['JAVAHOME'] = java_path
def generate_stanford_parser(sentence, path='D:/NLP/stanford/stanford-corenlp-full-2017-06-09/'):
stanford_parser_dir = path
# eng_model_path = "edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz"
zh_model_path = "edu/stanford/nlp/models/lexparser/chinesePCFG.ser.gz"
my_path_to_models_jar = stanford_parser_dir + "stanford-parser-3.8.0-models.jar"
my_path_to_jar = stanford_parser_dir + "stanford-parser.jar"
parser = StanfordParser(model_path=zh_model_path, path_to_models_jar=my_path_to_models_jar,
path_to_jar=my_path_to_jar)
result = parser.raw_parse(sentence)
return result
def generate_stanford_segmenter(sentence, path="D:/NLP/stanford/stanford-corenlp-full-2017-06-09/"):
segmenter = StanfordSegmenter(
path_to_jar=path + "stanford-segmenter-3.8.0.jar",
path_to_slf4j=path + "slf4j-api.jar",
path_to_sihan_corpora_dict=path + "segmenter/data",
path_to_model=path + "segmenter/data/pku.gz",
path_to_dict=path + "segmenter/data/dict-chris6.ser.gz")
result = segmenter.segment(sentence)
return result
if __name__ == "__main__":
sentence = "长城钻石信用卡 与 长城世界之极信用卡,重塑 奢华 定义,再创 顶级之作。八 大 极致 尊荣 服务,只 为 给 您 最 极致 的 礼遇,与 您 共同 镌刻 一生 的 回忆 与 经历。"
result = generate_stanford_parser(sentence)
# sentence = "长城钻石信用卡与长城世界之极信用卡,重塑奢华定义,再创顶级之作。八大极致尊荣服务,只为给您最极致的礼遇,与您共同镌刻一生的回忆与经历。"
# result = generate_stanford_segmenter(sentence)
for res in result:
print(res)
| true |