blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
50c228cf9df7e858ade3d624572e25d6c0cee3af
|
Python
|
WayneHartigan/Data-Application-Development-Labs
|
/ca_2_2/filtering/onlineRetailReducer.py
|
UTF-8
| 382
| 3.09375
| 3
|
[] |
no_license
|
import sys
topFifty = []
limit = 50
for line in sys.stdin:
line = line.strip()
data = line.split(",")
try:
prices = float(data[1])
except ValueError:
continue
topFifty.append((prices, line))
topFifty.sort(reverse = True)
if len(topFifty) > limit:
topFifty = topFifty[:limit]
for (price, lines) in topFifty:
print(price)
| true
|
bdfb3909f7d3ca43d27f7c09773c011db9147bca
|
Python
|
noeljeremydiatta/Python
|
/exo4.py
|
UTF-8
| 203
| 3.6875
| 4
|
[] |
no_license
|
from math import *
x = float(input("Entrer la valeur du réel x: "))
n = int(input("Entrer la valeur de l’entier n: "))
result = float(x ** n)
print("le résultat de la puissance est: ", result)
| true
|
0a7b353f45e011c6e22cb492a03fa911d0265480
|
Python
|
JackDraak/Python_Fifteen_Game
|
/AI_QtMCTS_controller.py
|
UTF-8
| 5,138
| 3.296875
| 3
|
[] |
no_license
|
'''
This module contains the AI_QtMCTS Controller class, which is responsible for handling AI input and updating the console (for now).
'''
# AI_QtMCTS_controller.py
from time import sleep
from console_controller import Controller as cc
from Game import Game
import random
from typing import Union, Tuple
import numpy as np
def uct(node):
return (node.total_reward / node.visits) + np.sqrt(2 * np.log(node.parent.visits) / node.visits)
class Node:
def __init__(self, game_state, parent=None):
self.game_state = game_state
self.parent = parent
self.children = []
self.visits = 0
self.total_reward = 0
class Controller:
def __init__(self, game: Game):
self.game = game
self.console_controller = cc(game)
def command_check(self, command: str) -> Union[str, Tuple[int, int]]:
return self.console_controller.command_check(command)
def input_shuffle(self, game: Game) -> None:
self.console_controller.game.shuffle(50) # TODO: Until further notice, this method will always shuffle 50(?) times, for simplicity.
def input_turn(self, game: Game) -> None:
move_set = self.console_controller.game.get_valid_moves()
self.console_controller.process_turn(self.game, random.choice(move_set))
# metadata required for ML algorithms includes the following:
# - game state, represented by a 2D array of integers, the tile labels
def get_game_state(self) -> list:
game_labels_as_matrix = game.get_labels_as_matrix()
return game_labels_as_matrix
# - distance pairings, represented by a 2D array of paired integers, the label & distance from each tile to its goal position
def get_distance_scores(self) -> list:
game_distance_scores = game.get_distance_scores()
return game_distance_scores
def select(self, node):
# MCTS Selection step
while len(node.children) > 0:
node = max(node.children, key=uct)
return node
def expand(self, node):
"""
Expand a leaf node of the game tree.
"""
valid_moves = self.game.get_valid_moves()
untried_moves = [move for move in valid_moves if move not in node.children]
if len(untried_moves) == 0:
return None
move = random.choice(untried_moves)
new_game_state = node.game_state.copy()
# Slide the tile with the chosen move (tile label)
new_game_state.slide_tile(move)
child_node = Node(new_game_state, node)
node.children.append(child_node)
return child_node
def simulate(self, node):
# MCTS Simulation step
simulation_game_state = node.game_state.copy()
last_move = None
while not simulation_game_state.is_solved():
moves = simulation_game_state.get_valid_moves()
# Exclude the last move (inverse move) from the list of valid moves
if last_move is not None:
moves = [move for move in moves if move != last_move]
random_move = random.choice(moves)
simulation_game_state.move_tile(random_move)
last_move = (random_move[1], random_move[0]) # Invert the move (row, col) -> (col, row)
print(game)
sleep(0.15)
return 10 # Assuming a reward of 1 for reaching the goal
def backpropagate(self, node, reward):
# MCTS Backpropagation step
while node is not None:
node.visits += 1
node.total_reward += reward
node = node.parent
def mcts_search(self, root, iterations):
for _ in range(iterations):
selected_node = self.select(root)
expanded_node = self.expand(selected_node)
reward = self.simulate(expanded_node)
self.backpropagate(expanded_node, reward)
best_child = max(root.children, key=lambda child: child.visits)
return best_child.game_state
def play(self) -> None:
moves = list()
moves.append(0)
while not self.game.is_solved():
print(game)
move_set = self.game.get_valid_moves()
no_move = True
while no_move:
root = Node(self.game)
best_move = self.mcts_search(root, iterations=100) # You can adjust the number of iterations
if not best_move == moves[-1]:
if moves[-1] == 0:
moves.pop() # remove leading placeholder 0 from moves list
self.console_controller.process_turn(self.game, str(best_move)) # Convert best_move to string
moves.append(best_move)
no_move = False
sleep(0.05)
print(game)
print("*** Congratulations, you solved the puzzle! ***\n")
print(f"Total moves: {len(moves)}")
if __name__ == '__main__':
game_size = 4 # TODO extend this so AIs can play any size game
game = Game(game_size, True)
controller = Controller(game)
controller.play()
| true
|
db7280be61ee0b2bb8421d89cdb18e1685c63b1d
|
Python
|
aroraenterprise/brewhacks
|
/backend/api/models/base_model.py
|
UTF-8
| 4,945
| 2.90625
| 3
|
[] |
no_license
|
"""
Project: backend
Author: Saj Arora
Description:
"""
from datetime import date
from google.appengine.ext import ndb
import pydash as _
class Base(ndb.Expando):
"""Base model class, it should always be extended
Attributes:
created (ndb.DateTimeProperty): DateTime when model instance was created
modified (ndb.DateTimeProperty): DateTime when model instance was last time modified
version (ndb.IntegerProperty): Version of app
PUBLIC_PROPERTIES (list): list of properties, which are accessible for public, meaning non-logged
users. Every extending class should define public properties, if there are some
PRIVATE_PROPERTIES (list): list of properties accessible by admin or authrorized user
"""
created = ndb.DateTimeProperty(auto_now_add=True)
modified = ndb.DateTimeProperty(auto_now=True)
PUBLIC_PROPERTIES = ['key', 'version', 'created', 'modified']
PRIVATE_PROPERTIES = []
def to_dict(self, include=None):
"""Return a dict containing the entity's property values, so it can be passed to client
Args:
include (list, optional): Set of property names to include, default all properties
"""
_MODEL = type(self)
repr_dict = {}
if include is None:
return super(Base, self).to_dict(include=include)
for name in include:
#process name eg. email.private becomes email and include becomes private
#include can be public, private
# check if this property is even allowed to be public
# or has a value set
if not hasattr(self, name):
continue
value = getattr(self, name)
if type(getattr(_MODEL, name)) == ndb.StructuredProperty:
if isinstance(value, list):
items = []
for item in value:
items.append(item.to_dict(include=item.get_public_properties()))
repr_dict[name] = items
else:
repr_dict[name] = value.to_dict(include=value.get_public_properties())
elif isinstance(value, date):
repr_dict[name] = value.isoformat()
elif isinstance(value, ndb.Key):
repr_dict[name] = value.urlsafe()
else:
repr_dict[name] = value
if self._key:
repr_dict['id'] = self.get_id()
return repr_dict
def populate(self, **kwargs):
"""Extended ndb.Model populate method, so it can ignore properties, which are not
defined in model class without throwing error
"""
kwargs = _.omit(kwargs, Base.PUBLIC_PROPERTIES + ['key', 'id']) # We don't want to populate those properties
kwargs = _.pick(kwargs, _.keys(self._properties)) # We want to populate only real model properties
super(Base, self).populate(**kwargs)
@classmethod
def get_by(cls, name, value, keys_only=None):
"""Gets model instance by given property name and value
:param name:
:param value:
:param keys_only:
"""
return cls.query(getattr(cls, name) == value).get(keys_only=keys_only)
@classmethod
def fetch_by(cls, name, value, keys_only=None, cursor=None, limit=None):
"""Gets model instance by given property name and value
:param name:
:param value:
:param keys_only:
:param cursor:
:param limit:
"""
return cls.query(getattr(cls, name) == value)\
.fetch(keys_only=keys_only, cursor=cursor, limit=limit)
@classmethod
def get_public_properties(cls):
"""Public properties consist of this class public properties
plus extending class public properties"""
return cls.PUBLIC_PROPERTIES + Base.PUBLIC_PROPERTIES
@classmethod
def get_private_properties(cls):
"""Gets private properties defined by extending class"""
public_properties = cls.get_public_properties()
for item in cls.PRIVATE_PROPERTIES:
try:
name = item.split('.')
public_properties.remove(name[0]) #private overrides public
except:
pass
props = cls.PRIVATE_PROPERTIES + Base.PRIVATE_PROPERTIES + public_properties
return props
@classmethod
def get_all_properties(cls):
"""Gets all model's ndb properties"""
return ['key', 'id'] + _.keys(cls._properties)
def get_id(self):
return self.key.id()
def get_key(self):
return self.key.urlsafe()
@classmethod
def is_valid(self, model):
return True, {}
def get_rsvp_message(self):
return None # default none
def get_name(self):
if hasattr(self, 'name'):
return self.name
else:
return type(self).__name__
| true
|
f569335619959ba55ee827dc334ac6470cbae407
|
Python
|
jintgeorge/NeuralNets_PrimeNumbers
|
/checkPrime.py
|
UTF-8
| 3,498
| 3.625
| 4
|
[] |
no_license
|
# Check/Test for Prime Number in Tensorflow!
# I got approximately 75% accuracy. Feel free to let me know if you find anything wrong
# or ways the performance can be improved
#Inspired by Joel Grus (http://joelgrus.com/2016/05/23/fizz-buzz-in-tensorflow/)
import numpy as np
import tensorflow as tf
from math import sqrt
from itertools import count, islice
NUM_DIGITS = 10
# Represent each input by an array of its binary digits.
def binary_encode(i, num_digits):
return np.array([i >> d & 1 for d in range(num_digits)])
def isPrime(n):
return n > 1 and all(n%i for i in islice(count(2), int(sqrt(n)-1)))
# One-hot encode the desired outputs: [number, "prime"]
def encodeIsPrime(n):
if isPrime(n): return np.array([0,1])
else: return np.array([1, 0])
# Produce synthetic Training data for numbers from 101 tp 1024
trX = np.array([binary_encode(i, NUM_DIGITS) for i in range(101, 2 ** NUM_DIGITS)])
trY = np.array([encodeIsPrime(i) for i in range(101, 2 ** NUM_DIGITS)])
# Randomly initialize weights.
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
# Our model is a standard 1-hidden-layer multi-layer-perceptron with ReLU
# activation. The softmax (which turns arbitrary real-valued outputs into
# probabilities) gets applied in the cost function.
def model(X, w_h, w_o):
h = tf.nn.relu(tf.matmul(X, w_h))
return tf.matmul(h, w_o)
TARGET_SIZE = 2
# Our variables. The input has width NUM_DIGITS, and the output has width 2.(Prime or NotPrime)
X = tf.placeholder("float", [None, NUM_DIGITS])
Y = tf.placeholder("float", [None, TARGET_SIZE])
# How many units in the hidden layer.
NUM_HIDDEN = 100
# Initialize the weights.
w_h = init_weights([NUM_DIGITS, NUM_HIDDEN])
w_o = init_weights([NUM_HIDDEN, TARGET_SIZE])
# Predict y given x using the model.
py_x = model(X, w_h, w_o)
# We'll train our model by minimizing a cost function.
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x, labels=Y))
train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost)
# And we'll make predictions by choosing the largest output.
predict_op = tf.argmax(py_x, 1)
# Finally, we need a way to turn a prediction (and an original number)
# into a fizz buzz output
def Prime(i, prediction):
return [str(i), "Prime"][prediction]
BATCH_SIZE = 128
# Launch the graph in a session
with tf.Session() as sess:
tf.global_variables_initializer().run()
for epoch in range(10000):
# Shuffle the data before each training iteration.
p = np.random.permutation(range(len(trX)))
trX, trY = trX[p], trY[p]
# Train in batches of 128 inputs.
for start in range(0, len(trX), BATCH_SIZE):
end = start + BATCH_SIZE
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})
# And print the current accuracy on the training data.
print(epoch, np.mean(np.argmax(trY, axis=1) ==
sess.run(predict_op, feed_dict={X: trX, Y: trY})))
# And now for real test (Test for number from 1 - 100)
numbers = np.arange(1, 101)
teX = np.transpose(binary_encode(numbers, NUM_DIGITS)) #testX
teY = sess.run(predict_op, feed_dict={X: teX}) #testY
output = np.vectorize(Prime)(numbers, teY)
y1 = np.array([1 if i == "Prime" else 0 for i in output])
y2 = np.array([1 if isPrime(i) else 0 for i in numbers])
print(output)
print('Accuracy = ', np.sum(y1==y2), '%')
| true
|
e551dbd37f65d92da0f489ebdae59b61539a64e1
|
Python
|
alexanderad/pony-standup-bot
|
/pony/dictionary.py
|
UTF-8
| 6,325
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
# coding=utf-8
import string
from datetime import datetime
class Dictionary(object):
"""Collection of phrases."""
PLEASE_REPORT = (
"Hey, just wanted to ask your current status. How it is going?",
"Psst. I know you don't like it. But I have to ask. "
"What is your status? Anything you want to share with the team? "
"Few words.",
"Hi. Ponies don't have to report. However, people made us "
"to ask other people to. How are you doing today? Give me few words "
"to share with the team.",
"Amazing day, dear. How is it going on your side today? "
"Just a few words.",
"Dear, I'm here to ask you about your status for the team. Could "
"you be so kind to share few words on what you are working on now?",
"Hello, it's me again. How are you doing today? Your team will be "
"excited to hear. I need just a few words from you.",
"Heya. Just asked all the team members. You are the last one. How's "
"your day? Anything you want to share with the team?",
"Good morning! Just noticed you are online, decided to ask you "
"your current status for the team. Few words to share?",
"Dear, I apologize for the inconvenience. Would you mind sharing "
"your status with the team? Few words.",
"Good morning. Your beloved Pony is here again to ask your daily "
"status for the team. How are you doing today, anything to share?",
"Hello, dear. Pony here. What's your story today? Anything to share "
"with the team?",
"Good morning! That's a Standup Pony, your best friend. How "
"are you doing today? Asking for the team.",
"Buongiorno. Busy day, eh? May I ask you to spend few seconds to tell "
"me your current status? Just a few words to share with the team.",
"Hello there. I'm asking you the same thing each day. Because of the "
"team. Feels a bit like a date to me. Oh well, what's your today's "
"status?",
"Another day, another question. Oh, wait, the question is the same. "
"Your status is all I need to know. It's not me, it is for the team.",
"Can't stop being bossy and asking people on the team their daily "
"status. What do you have to say?",
"Hi. That's a team check in. How it is going today?",
)
PLEASE_REPORT_LAST_CALL = (
"This is the final boarding call for developers reporting to daily "
"standup. Everything is about to happen! :runner:",
"Pssst! I know you are busy. This happens to me as well. In few "
"minutes I'm going to report daily status. Wanna be part of it?",
"Busy day, eh? Maybe you have a few seconds to report your daily "
"status, I'm about sending the final version of it! :clock430:",
"You know that feeling when you ask somebody something "
"but don't get any response back? That's awful. Wuuuf. Anyway I'm "
"going to report daily status in a few minutes, would like to see you "
"a part of it. ",
"Ladies and gentlemen, captain speaking. We are about to report "
"daily status, this is a kindly reminder for ya! :helicopter:",
"Dear, this is just a kindly reminder for you to report your daily "
"status! :bee:",
"Everything is awesome, but you totally forgot about me! "
"I'm reporting daily status in few moments, wanna join the "
"crowd? :family:",
"Busy like a bee? Just another question: wanna be a part of daily "
"summary? One is soon to be sent out! :timer_clock:",
)
THANKS = (
"Thanks! :+1:",
"Thank you so much. Really appreciate it :+1:",
"Glad to hear that!",
"Thanks a lot. I'm happy about that.",
"Great, this is on my notes now! :notes:",
"Thank you, dear! :star2:",
"Right, noted that!",
"Thank you, I will report that to your boss.",
"Many thanks. You :guitar:!",
"You are so kind. Thanks :+1:",
"Okay, will report that. Thanks.",
"Ah, I see. ",
"You are so hardworking today. Thanks.",
"Love that. Thanks :+1:",
"Nice, I bet Alexandru would give it a :yellow_heart:",
"Oh nice! Great work you do!",
"I see. That's intense! Thanks.",
"Ah, okay. Thanks a lot. <3",
"Whoa! :rocket:",
"Sounds good. Thank you.",
"Lovely, thanks!",
"Alright, noted that.",
"Wonderful :sparkles: thank you for your report!",
"That's a lot. I do not envy you. Thanks, anyway! :+1:",
"Oh, man. Okay, thanks! :+1:",
"Sounds great! :muscle:",
"Okay",
"No way, that's a lot!",
"Working hard day after day!",
"Noted that :white_check_mark:",
"Terrific! (terrific is a new awesome promoted by Woody Allen)",
"Sounds good. Thanks!",
":sparkling_heart: Nice!",
"Thank you!",
":heavy_check_mark: Gotcha.",
"Fantastic! :fire:",
"Good",
"Awesome, thanks! :cake:",
"Thanks for sharing! :star2:",
"Love that! <3",
"Amazing. What else I can say? Thanks.",
"Ty!",
"That's great! :+1:",
"Nice, let me write that down :pencil:",
"Fascinating :sparkles:",
"I see. Thank you!",
"Noted that :bow:",
"Supercalifragilisticexpialidocious! :dancer: ",
"Thanks! :tropical_fish:",
"Nice! :muscle:",
"Great, thanks.",
"Is it Friday already? I wish it was Friday. Noted your status!",
"OK",
"Foarte bine!",
"You are doing great!",
"Incredible. Thanks.",
)
@staticmethod
def initial_seed(user_id):
# Slack IDs look like U023BECGF, U04B1CDVB, U04RVVBAY, etc
digits = [
string.letters.index(x) if not x.isdigit() else int(x)
for x in user_id
]
return sum(digits)
@classmethod
def pick(cls, phrases, user_id):
# we want random phrases to be more predictable
seed = cls.initial_seed(user_id)
day_of_year = datetime.utcnow().timetuple().tm_yday
return phrases[(seed + day_of_year) % len(phrases)]
| true
|
8770966b5104e50763feefc4643c00762fe95c96
|
Python
|
cash2one/Swin
|
/reptile/List.py
|
UTF-8
| 6,003
| 2.671875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
######################## BEGIN LICENSE BLOCK ########################
# The Initial Developer of the Original Code is
# Chunwei from China Agricual University
# Portions created by the Initial Developer are Copyright (C) 2012
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Chunwei Mail: superjom@gmail.com
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import Queue as Q
class List(list):
'the runtime list for all the url list'
def find(self, url):
'''
用法:
li.find('./index.php')
'''
l = len(self)
first = 0
end = l - 1
mid = 0
if l == 0:
self.insert(0,url)
return False
while first < end:
mid = (first + end)/2
if hash(url) > hash(self[mid]):
first = mid + 1
elif hash(url) < hash(self[mid]):
end = mid - 1
else:
break
if first == end:
if hash(self[first]) > hash(url):
self.insert(first, url)
return False
elif hash(self[first]) < hash(url):
self.insert(first + 1, url)
return False
else:
return True
elif first > end:
self.insert(first, url)
return False
else:
return True
def show(self):
print '-'*50
print 'list-'*10
for i in range(len(self)):
url = self[i]
print hash(url),'__',url
def getAll(self):
'''
取得所有信息 便于中断操作
'''
return self
class Urlist:
def __init__(self, siteNum):
self.siteNum = siteNum
self.list = []
for i in range(siteNum):
self.list.append(List())
def find(self, siteID, url):
'''
find url in list
'''
return self.list[siteID].find(url)
def show(self):
print 'show list'
for i in range(self.siteNum):
print '-'*50
print self.list[i].show()
def getAll(self):
return self.list
#get() 超时时间
TIMEOUT = 3
class Queue(Q.Queue):
'''
url队列
'''
def __init__(self):
'''
存储格式:
siteID
home_url
相对地址唯一标志一个url
'''
Q.Queue.__init__(self)
self.siteID = -1
def init(self, siteID):
self.__siteID = siteID
def getAll(self):
'''
返回所有信息
[
siteID,
[
['title', 'url'],
['title', 'url'],
['title', 'url'],
]
]
'''
res = []
urls = []
res.append(self.siteID)
res.append(urls)
try:
q = self.get_nowait()
urls.append(q)
except:
pass
return res
MAX = 100
class UrlQueue:
def __init__(self, siteNum):
self.siteNum = siteNum
self.queue = []
#统一记录每个Queue的长度
self.qsize = []
#扫描指针 从此Queue检测是否符合要求
self.__index = 0
for i in range(self.siteNum):
q = Queue()
q.init(i)
self.queue.append(q)
def getSize(self, siteID):
return self.queue[siteID].qsize()
def getAll(self):
'''
从queue中取出所有信息
'''
res = []
for queue in self.queue:
res.append(queue.getAll())
return res
def __get_right_siteID(self):
'''
取得有一定量url储备的站点id
'''
maxn = 0
max_index = 0
size = 0
for i,q in enumerate(self.queue):
size = q.qsize()
if size > maxn:
maxn = size
max_index = i
return (max_index, maxn)
def put(self, siteID, title, path):
self.queue[siteID].put([title, path])
def get(self, siteID):
'''
如果数据池为空超过 3 s
则引发 Queue.empty 错误
'''
return self.queue[siteID].get(timeout = 2)
def getUrlList(self, maxsize):
'''
从候选队列中选出一个最合适的队列 取出一定量的list
'''
qinfo = self.__get_right_siteID()
idx = qinfo[0]
size = qinfo[1]
print 'find the right idx:',idx
if size == 0:
return False
if size > maxsize:
size = maxsize
ulist = []
print 'get size',size
for i in range(size):
ulist.append(self.get(idx))
res = {}
res['siteID'] = idx
res['urls'] = ulist
return res
def show(self):
print 'show queue'
size = 0
for q in self.queue:
print '-'*50
size = q.qsize()
for i in range(size):
u = q.get()
print u[0],u[1]
| true
|
797039ade0d6080faf9b0ba302e099823bf447ab
|
Python
|
abhijeet0401/chatbot
|
/appointments/create_event.py
|
UTF-8
| 982
| 2.828125
| 3
|
[] |
no_license
|
from datetime import datetime, timedelta
from cal_setup import get_calendar_service
def create_event(start, end, summary='no summary', description='no description'):
# authentication
service = get_calendar_service()
# add event
event_result = service.events().insert(calendarId='primary',
body={
"summary": summary,
"description": description,
"start": {"dateTime": start, "timeZone": 'Etc/GMT+1'},
"end": {"dateTime": end, "timeZone": 'Etc/GMT+1'},
}
).execute()
# print the event's fields
print("created event")
print("id: ", event_result['id'])
print("summary: ", event_result['summary'])
print("starts at: ", event_result['start']['dateTime'])
print("ends at: ", event_result['end']['dateTime'])
return event_result['id']
if __name__ == '__main__':
# for test
create_event('2020-10-13T14:30:00+01:00', 1, 'not default summary', 'not default description')
| true
|
5cb5b8f03e49586b71b3e5ea74b920fb95b9caa1
|
Python
|
carlosfernandez9/ReservaHotelesMinTic
|
/db/user_db.py
|
UTF-8
| 915
| 2.625
| 3
|
[] |
no_license
|
from typing import Dict
from pydantic import BaseModel
class UserInDB(BaseModel):
username: str
password: str
RewardPoints: int
database_users = Dict[str, UserInDB]
database_users = {"camilo24": UserInDB(**{"username":"camilo24",
"password":"root",
"RewardPoints":20000}),
"andres18": UserInDB(**{"username":"andres18",
"password":"hola",
"RewardPoints":35000}),
"guest": UserInDB(**{"username":"guest",
"password":"guest",
"RewardPoints":0}),
}
def get_user(username: str):
if username in database_users.keys():
return database_users[username]
else:
return database_users["guest"]
def update_user(user_in_db: UserInDB):
database_users[user_in_db.username] = user_in_db
return user_in_db
| true
|
cc7a99e42302c448bfd8d59d0e2c2b38670dbeae
|
Python
|
linhuiyangcdns/leetcodepython
|
/两个数组的交集 II.py
|
UTF-8
| 982
| 4.0625
| 4
|
[] |
no_license
|
"""
给定两个数组,写一个方法来计算它们的交集。
例如:
给定 nums1 = [1, 2, 2, 1], nums2 = [2, 2], 返回 [2, 2].
注意:
输出结果中每个元素出现的次数,应与元素在两个数组中出现的次数一致。
我们可以不考虑输出结果的顺序。
跟进:
如果给定的数组已经排好序呢?你将如何优化你的算法?
如果 nums1 的大小比 nums2 小很多,哪种方法更优?
如果nums2的元素存储在磁盘上,内存是有限的,你不能一次加载所有的元素到内存中,你该怎么办?
"""
class Solution:
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
nums3 = []
for i in nums1:
if i in nums2:
nums3.append(i)
return nums3
if __name__ == "__main__":
a = Solution()
nums = a.intersect([1,2,2,1],[2])
print(nums)
| true
|
6d8898b0a0b530aad7b70a5c4b81c0888f13b4eb
|
Python
|
MikimotoH/firmadyne
|
/scripts/shellutils.py
|
UTF-8
| 996
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess
from os import path
import sys
def shell(cmd):
bufsize=8
cmd = path.expandvars(cmd)
proc= subprocess.Popen(cmd, shell=True,bufsize=1,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
ret=None
cmdout=''
while True:
s = proc.stdout.readline()
print(s, flush=True)
# s = proc.stdout.read(bufsize).decode('utf8')
# print(s, end='', flush=True)
cmdout+=s
s = proc.stderr.read(bufsize).decode('utf8')
print(s, end='', flush=True)
cmdout+=s
ret = proc.poll()
if ret is not None:
break
s = proc.stdout.read(bufsize).decode('utf8')
print(s, end='', flush=True)
cmdout+=s
s = proc.stderr.read(bufsize).decode('utf8')
print(s, end='', flush=True)
cmdout+=s
if ret!=0:
print('''\'%s\' returns %d'''%(cmd,ret), file=sys.stderr)
return ret, cmdout
| true
|
42ae32cd63acf3f0c78e4bee4e62ed1e55783662
|
Python
|
ethanpasta/holberton-system_engineering-devops
|
/0x16-api_advanced/0-subs.py
|
UTF-8
| 524
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/bin/python3
""" Module for task 0 """
import requests
def number_of_subscribers(subreddit):
headers = {
'User-Agent': ('Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/76.0.3809.132 Safari/537.36')
}
url = "https://www.reddit.com/r/{}/about.json".format(subreddit)
r = requests.get(url, headers=headers)
try:
return r.json()["data"]["subscribers"]
except Exception:
return 0
| true
|
aa6f08d7f62645c41f77f36f1591f2327d08c6fd
|
Python
|
iraytrace/Adafruit_CircuitPython_Debouncer
|
/adafruit_debouncer.py
|
UTF-8
| 4,182
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
# The MIT License (MIT)
#
# Copyright (c) 2019 Dave Astels for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_debouncer`
====================================================
Debounces an arbitrary predicate function (typically created as a lambda) of 0 arguments.
Since a very common use is debouncing a digital input pin, the initializer accepts a pin number
instead of a lambda.
* Author(s): Dave Astels
Implementation Notes
--------------------
**Hardware:**
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
# imports
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Debouncer.git"
import time
import digitalio
from micropython import const
import touchio
_DEBOUNCED_STATE = const(0x01)
_UNSTABLE_STATE = const(0x02)
_CHANGED_STATE = const(0x04)
class Debouncer(object):
"""Debounce an input pin or an arbitrary predicate"""
def __init__(self, io_or_predicate, interval=0.010):
"""Make am instance.
:param DigitalInOut/function io_or_predicate: the pin (from board) to debounce
:param int interval: bounce threshold in seconds (default is 0.010, i.e. 10 milliseconds)
"""
self.state = 0x00
if isinstance(io_or_predicate, (digitalio.DigitalInOut, touchio.TouchIn)):
self.function = lambda: io_or_predicate.value
else:
self.function = io_or_predicate
if self.function():
self._set_state(_DEBOUNCED_STATE | _UNSTABLE_STATE)
self.previous_time = 0
self.interval = interval
def _set_state(self, bits):
self.state |= bits
def _unset_state(self, bits):
self.state &= ~bits
def _toggle_state(self, bits):
self.state ^= bits
def _get_state(self, bits):
return (self.state & bits) != 0
def update(self):
"""Update the debouncer state. MUST be called frequently"""
now = time.monotonic()
self._unset_state(_CHANGED_STATE)
current_state = self.function()
if current_state != self._get_state(_UNSTABLE_STATE):
self.previous_time = now
self._toggle_state(_UNSTABLE_STATE)
else:
if now - self.previous_time >= self.interval:
if current_state != self._get_state(_DEBOUNCED_STATE):
self.previous_time = now
self._toggle_state(_DEBOUNCED_STATE)
self._set_state(_CHANGED_STATE)
@property
def value(self):
"""Return the current debounced value."""
return self._get_state(_DEBOUNCED_STATE)
@property
def rose(self):
"""Return whether the debounced value went from low to high at the most recent update."""
return self._get_state(_DEBOUNCED_STATE) and self._get_state(_CHANGED_STATE)
@property
def fell(self):
"""Return whether the debounced value went from high to low at the most recent update."""
return (not self._get_state(_DEBOUNCED_STATE)) and self._get_state(_CHANGED_STATE)
| true
|
52289ffbd49d895979c340f7843de75e9fbccff3
|
Python
|
thewtex/dwl-multidop-l2-viewer
|
/source/fileparsing/dwl_multidop_tw.py
|
UTF-8
| 2,164
| 2.875
| 3
|
[
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
import numpy
import os
class TW:
"""Process a DWL Multidop L2 *.TW? file
Arguments:
filepath: path to the *.TW? file
prf: Pulse repetition frequency
doppler_freq_1 Doppler Frequency of Channel 1
doppler_freq_2 Doppler Frequency of Channel 2
After initialization, will have 'chan1' and 'chan2' which are numpy 1D arrays
with the peak velocity for each channel
"""
def __parse_data(self):
print 'Reading ', self._filepath
f = open(self._filepath, 'rb')
samp_per_segment = 64
bytes_per_sample = 2
channels = 2
tcd_dtype= 'int16'
f_size = os.path.getsize(self._filepath)
segments = f_size / ( samp_per_segment * bytes_per_sample * channels )
self._progress_bar.setMinimum(0)
self._progress_bar.setMaximum(segments)
self._value = 0
self._progress_bar.setValue(self._value)
chan1 = numpy.array([], dtype=tcd_dtype)
chan2 = numpy.array([], dtype=tcd_dtype)
data = numpy.zeros((samp_per_segment), dtype=tcd_dtype)
for seg in xrange(segments):
self._value = self._value + 1
self._progress_bar.setValue(self._value)
data = numpy.fromfile(f, dtype=tcd_dtype, count=samp_per_segment)
chan1 = numpy.concatenate((chan1, data.copy()) )
data = numpy.fromfile(f, dtype=tcd_dtype, count=samp_per_segment)
chan2 = numpy.concatenate((chan2, data.copy()) )
f.close()
chan1 = chan1.astype(float) / 2.0**11 * self._prf/2.0 *154000.0 / self._doppler_freq_1/10**3
chan2 = chan2.astype(float) / 2.0**11 * self._prf/2.0 *154000.0 / self._doppler_freq_2/ 10**3
self.chan1 = chan1
self.chan2 = chan2
def __init__(self, filepath, prf, doppler_freq_1, doppler_freq_2,
progress_bar):
self._filepath = filepath
self._prf = float(prf)
self._doppler_freq_1 = float(doppler_freq_1)
self._doppler_freq_2 = float(doppler_freq_2)
self._progress_bar = progress_bar
self.__parse_data()
| true
|
72035140810715d9539ae7f2534099ec64ba6870
|
Python
|
hernandez-jesus/Harvard-REU-2017
|
/motor_init.py
|
UTF-8
| 2,753
| 3.03125
| 3
|
[] |
no_license
|
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
import atexit
# From Adafruit MotorHat example code
# create a default object, no changes to I2C address or frequency
mh = Adafruit_MotorHAT(addr=0x60)
# get each motor: WORKS FOR LITTLE BLUE
# get each motor
myMotor1 = mh.getMotor(1) # right motor
myMotor2 = mh.getMotor(3) # left motor
myMotor3 = mh.getMotor(2) # right motor
myMotor4 = mh.getMotor(4) # left motor
print('motors set')
# get motor values between 0 and 255
def getMotorValue(percent):
mv = percent * 255
mv = int(mv)
return mv
#########################
####### Are we picking the center line of the depth stream, rgb stream, or transformed rgb-depth stream?
##########################
def isCorrectionNeeded(x):
needToCorrect = True
centerLineOfFrame = 320
coneCenterOfMass = x
error = coneCenterOfMass - centerLineOfFrame
# window of acceptable values
if error > 315 and error < 325:
need_to_correct = False
return needToCorrect
return needToCorrect
def getError(x):
cl = 320 #centerline of depth stream
error = int(x - cl)
return error
def getCorrection(error, previous_error, dt):
# set proportional constant
p = 0
# set derivative constant
d = 5
correction = (p*error) + d * ((error - previous_error) / dt)
# get motor values between 0 and 255
def getMotorValue(percent):
mv = percent * 255
mv = int(mv)
return mv
# used to set speed and direction of Right Motor Pairs
def SetAndDriveRight(speed=0, forward=True, MV=0)
if not(speed == 0):
MV = getMotorValue(speed)
MV = abs(MV)
print("SENDING MOTOR VALUE: " + str(MV))
myMotor1.setSpeed(MV)
myMotor3.setSpeed(MV)
if forward:
myMotor1.run(Adafruit_MotorHAT.FORWARD)
myMotor3.run(Adafruit_MotorHAT.FORWARD)
else:
myMotor1.run(Adafruit_MotorHAT.BACKWARD)
myMotor3.run(Adafruit_MotorHAT.BACKWARD)
# used to set speed and direction of Left Motor Pairs
def SetAndDriveLeft(speed=0, forward=True, MV=0)
if not(speed == 0):
MV = getMotorValue(speed)
MV = abs(MV)
myMotor2.setSpeed(MV)
myMotor4.setSpeed(MV)
if forward:
myMotor2.run(Adafruit_MotorHAT.FORWARD)
myMotor4.run(Adafruit_MotorHAT.FORWARD)
else:
myMotor2.run(Adafruit_MotorHAT.BACKWARD)
myMotor4.run(Adafruit_MotorHAT.BACKWARD)
# fuction used to disable motors on shutdown
def turnOffMotors():
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
atexit.register(turnOffMotors)
| true
|
b186b80b777fea51a378e2222dd9caa0249b2f26
|
Python
|
todddeluca/vanvactor_mirna
|
/python/flybaseutil.py
|
UTF-8
| 1,310
| 2.71875
| 3
|
[] |
no_license
|
def select_flybase_gene_ids(gene_conversion_table):
'''
Return a list of unique flybase gene ids from the gene conversion table
downloaded from flybase, skipping ids that did not convert.
'''
uniques = set()
for i, line in enumerate(open(gene_conversion_table)):
# skip comments and blank lines
if not line.strip() or line.strip().startswith('#'):
continue
# Example line with 5 columns with a flybase gene id
# CG10005-RA FBtr0082507 FBgn0037972 CG10005
# Example line with 3 columns and no flybase gene id
# CG6149-RA unknown ID -
# Example line with 4 columns and no flybase gene id
# CG6151-RC FBtp0052133 - -
# Fields: Submitted ID, Current ID, mystery field, Converted ID, Related record
splits = line.strip().split("\t")
# skip values that flybase failed to convert to a gene id
if splits[1] == 'unknown ID' or splits[3] == '-':
continue
# assuming this is a gene conversion table, then flybase converted the
# submitted id into a flybase gene id.
gene = splits[3]
assert gene.startswith("FBgn")
uniques.add(gene)
genes = sorted(uniques)
return genes
| true
|
8dbf93ee1773af72ca7bce7da55cbfaf2ffb64ba
|
Python
|
Control-xl/game
|
/state_display.py
|
UTF-8
| 1,013
| 2.953125
| 3
|
[] |
no_license
|
import pygame
class StateDisplay():
def __init__(self, screen, settings):
self.settings = settings
self.screen = screen
# 设置显示的血量
self.blood = settings.hero_init_blood
self.blood_ico = pygame.image.load('images/heart.ico')
self.blood_ico.convert()
self.blood_rect = self.blood_ico.get_rect()
self.blood_ico_list = []
for i in range(self.blood):
self.blood_ico_list.append(self.blood_ico)
# 设置蓝量
self.magic = settings.hero_init_magic
# 设置图标位置
def update(self, hero):
self.blood = hero.blood
length = len(self.blood_ico_list)
if self.blood > length:
for i in range(self.blood - length):
self.blood_ico_list.append(self.blood_ico)
def blitme(self):
for i in range(self.blood):
self.screen.blit(self.blood_ico_list[i],
(i * self.blood_rect.width, 0))
| true
|
9160393f4e9420ea953a4d65d66d4145b3d33ae2
|
Python
|
nubok/project_euler
|
/problem_28.py
|
UTF-8
| 738
| 3.75
| 4
|
[] |
no_license
|
spiral_size = 1001
spiral = { }
"""direction:
0: right
1: down
2: left
3: up
"""
direction = 0
row = (spiral_size-1)/2
col = (spiral_size-1)/2
delta_row = [0, 1, 0, -1]
delta_col = [1, 0, -1, 0]
current_len = 1
current_number = 1
while current_number != (spiral_size*spiral_size+1):
for j in range(current_len):
spiral[(row, col)] = current_number
current_number += 1
row += delta_row[direction]
col += delta_col[direction]
if direction % 2 == 1:
current_len += 1
direction = (direction + 1) % 4
result = -1 # The number in the middle is 1 - it is counted twice
for i in range(spiral_size):
result += spiral[(i, i)]
result += spiral[(spiral_size-1-i, i)]
print result
| true
|
e0ea79884035a5ab8c26db7d29efec8ad8717db1
|
Python
|
Andres-Hernandez-Mata/Scripts-Python
|
/src/01_Lucky.py
|
UTF-8
| 563
| 3.15625
| 3
|
[] |
no_license
|
"""
Uso: Google search
Creador: Andrés Hernández Mata
Version: 1.0.0
Python: 3.9.1
Fecha: 06 Junio 2021
"""
import os, time, random
try:
from googlesearch import search
except ImportError:
os.system('pip install google')
print('Installing google... Ejecute de nuevo')
exit()
# to search
query = input("Búsqueda: ")
print("Buscando...")
time.sleep(2)
selec = random.randint(0,14)
valor = 0
for enlace in search(query, tld="com", num=15, stop=15, pause=5):
print (enlace)
if valor == selec:
print(selec,enlace)
break
valor += 1
| true
|
c5b59b3512b9e388a155c64742bf7f708d6dfcb3
|
Python
|
itbullet/python_projects
|
/Stack_20190722/stack_homework2.py
|
UTF-8
| 576
| 3.921875
| 4
|
[] |
no_license
|
import stack_class
number_stack = stack_class.Stack()
number_list = [1, 2, 3, 4, 5]
print(number_list)
"""Version 1
for i in range(len(number_list)):
num = number_list[i]
#print(str(i) + " " + str(num))
number_stack.push(num)
"""
#Version 2
for item in number_list:
#print(item)
number_stack.push(item)
number_list.clear()
"""
print("**********")
print(number_list)
print(number_stack.peek())
print("**********")
"""
for i in range(number_stack.size()):
num = number_stack.pop()
#print(num)
number_list.append(num)
print(number_list)
| true
|
78b16b72ec4490ac58b5fa41e202da3a776b44c1
|
Python
|
doc22940/twint-utils
|
/link_counter.py
|
UTF-8
| 3,211
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#This code takes a list of twitter usernames, iterates over them to find tweets where they shared links,
#and then sums up the base URLs of everyones links combined and turns it into a matplotlib graph.
#I put a bunch of code documentation in and it really will help you use this.
#the code does take a bit to run depending on your tweet limit and how many accounts you pull
import pandas as pd
import re
from urllib.parse import urlparse
from urllib.request import urlopen
import csv
import twint #you may need to install this first if you haven't!
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
import csv
import os
#this prevents async problems/ runtime errors
#https://markhneedham.com/blog/2019/05/10/jupyter-runtimeerror-this-event-loop-is-already-running/
import nest_asyncio
nest_asyncio.apply()
#put accounts in between the brackets, comma seperated, without the @sign. ie ["jack", "realDonaldtrump", "Blacksocialists"]
sourceAccounts= ["PUT YOUR ACCOUNTS HERE" , "DIRECTIONS ABOVE"]
if not os.path.isfile('all_urls.csv'):
with open('all_urls.csv', 'wb') as f:
pass
for username in sourceAccounts:
c = twint.Config()
print("pulling tweets for " + str(username) + "...")
c.Username = username
c.Hide_output = True #makes the command line less noisy
c.Limit = 500 #maximum number of tweets to pull per account
c.Store_object = True
#only selects tweets that have links
c.Links = "include"
baseURLs = []
twint.run.Search(c)
tweets = twint.output.tweets_list
for tweet in tweets:
#urls is a class in the twint tweet objects to see all classes: dir(tweet)
for URL in tweet.urls:
parsed_uri = urlparse(URL)
baseURL = str('{uri.netloc}'.format(uri=parsed_uri)) #gets the base URL
if baseURL[:7] == 'twitter': #ignores RTs as links
pass
elif baseURL[:4] == "www.": #strips www for a e s t h e t i c
baseURLs.append([username, baseURL[4:]])
else:
baseURLs.append([username, baseURL])
# I added this in case it gets slow in pulling the list so you can stop at any point and then just
#edit your sourceAccounts list to get rid of the one's you've already done.
with open('all_urls.csv','a', newline='') as f:
for baseURL in baseURLs:
writer = csv.writer(f)
writer.writerow(baseURL)
all_urls = pd.read_csv('all_urls.csv', names = ['username','URL'])
print("total tweets pulled: " + str(len(all_urls)))
labels = ['Base URL', 'Frequency']
countedURLs = all_urls['URL'].value_counts()
countedURLs.to_csv('countedURLs.csv')
top_urls = countedURLs.iloc[:10]
top_urls = top_urls[::-1] #makes it descending
y_pos = np.arange(len(top_urls))
performance = top_urls
print(performance)
baseURLs = top_urls.index
print(baseURLs)
plt.barh(y_pos, performance, align='center', alpha=0.5)
plt.yticks(y_pos, baseURLs)
plt.xlabel('Frequency of Links')
plt.title('Most Frequent External Links of all Handles Tested')
plt.show()
| true
|
b8ce7bc195186f971cfb5cb0785b840e34ecef0c
|
Python
|
Lisa-Apple/myInterface
|
/api_keyword/key_myOperations.py
|
UTF-8
| 1,157
| 2.8125
| 3
|
[] |
no_license
|
'''
title: 对接口响应体(不仅仅)进行分析的方法
time: 2020.12.12
auth: wanglisha
'''
import json, jsonpath
class OperateFunctions():
# 请求参数转换为json格式
def json_dumps(self, data):
return json.dumps(data)
# 返回值转换成字符串格式
def json_loads(self, data):
return json.loads(data)
# 校验字段获取方法
def get_text(self, res, key):
if res is not None:
try:
# 将res文本转换为json,通过jsonpath解析获取到指定key的value值
text = json.loads(res)
value = jsonpath.jsonpath(text, '$..{0}'.format(key))
# jsonpath获取到的结果是list类型的结果,如果获取失败则是False
if value:
# 将list转换成string格式
if len(value) == 1:
return value[0]
# else:
# return value
# else:
return value
except Exception as e:
return e
else:
return None
| true
|
6507b4d898bda0f3430bf62821ad6d8105d0f1c8
|
Python
|
VineetMakharia/LeetCode
|
/463-Island-Perimeter.py
|
UTF-8
| 684
| 3.21875
| 3
|
[] |
no_license
|
class Solution:
def islandPerimeter(self, grid):
if not grid:
return 0
perimeter = 0
rows = len(grid)
cols = len(grid[0])
dirs = [(1,0),(0,1),(-1,0),(0,-1)]
for x in range(rows):
for y in range(cols):
if grid[x][y]==1:
for dx,dy in dirs:
nx = x + dx
ny = y + dy
if nx == -1 or nx == rows or ny == -1 or ny == cols or grid[nx][ny]==0:
perimeter+=1
return perimeter
obj = Solution()
print(obj.islandPerimeter([[0,1,0,0],[1,1,1,0],[0,1,0,0],[1,1,0,0]]))
| true
|
b729d0cb8e150953ee1c53f2b43feff8f8edc108
|
Python
|
charlesjavelona/coding-the-matrix
|
/chapter2/quiz_2_10_6.py
|
UTF-8
| 312
| 2.953125
| 3
|
[] |
no_license
|
from module.vec import Vec
def list2vec(L):
"""
Input: List L of field elements
Output: Return an instance of Vec with domain{0, 1, 2, ..., len(L)-1} such that v[i] = L[i] for each integer i in the domain
Example: [0, 1, 2, 3, 4] -> {0, 1, 2, 3, 4}
"""
return Vec({i for i in L}, {})
| true
|
6c3a750b749214686d719fd8d207efa88605d900
|
Python
|
stjordanis/evolutionary_ensembles
|
/utils/load.py
|
UTF-8
| 2,825
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import utils.dictionary as d
def load_labels(dataset='RSDataset', step='validation', fold=1):
"""Loads ground truth labels from a particular dataset, step (validation or test) and fold number.
Args:
dataset (str): Dataset's identifier.
step (str): Whether it should load from validation or test.
fold (int): Number of fold to be loaded.
Returns:
A numpy array holding the loaded ground truth labels.
"""
# Creates a dictionary of the desired dataset
dictionary = d.create_dictionary(dataset)
# Defines the file input path
file_path = f'data/{dataset}/{step}/ground_{fold}.txt'
# Creates a list of labels
labels = []
# For every possible line in the file
for line in open(file_path, 'r'):
# Appends the label already mapped with the dictionary
labels.append(dictionary[line.strip()])
return np.asarray(labels)
def load_predictions(dataset='RSDataset', step='validation', fold=1):
"""Loads predictions from a particular dataset, step (validation or test) and fold number.
Args:
dataset (str): Dataset's identifier.
step (str): Whether it should load from validation or test.
fold (int): Number of fold to be loaded.
Returns:
A numpy array holding the predicted labels.
"""
# Creates a dictionary of the desired dataset
dictionary = d.create_dictionary(dataset)
# Defines the file input path
file_path = f'data/{dataset}/{step}/pred_{fold}.txt'
# Creates a list of labels
preds = []
# For every possible line in the file
for line in open(file_path, 'r'):
# Appends each line as a new list
cat_preds = list(line.split())
# Maps the categorical predictions using the dictionary
preds.append([dictionary[c] for c in cat_preds])
return np.asarray(preds)
def load_candidates(dataset='RSDataset', step='validation', fold=1):
"""Loads candidates from a particular dataset, step (validation or test) and fold number.
Args:
dataset (str): Dataset's identifier.
step (str): Whether it should load from validation or test.
fold (int): Number of fold to be loaded.
Returns:
Numpy arrays holding the ground truth and predicted labels.
"""
# Loads the ground truth labels from desired dataset, step and fold
labels = load_labels(dataset, step, fold)
# Loads the predictions from desired dataset, step and fold
preds = load_predictions(dataset, step, fold)
# Checks if amount of loaded samples are equal
if labels.shape[0] != preds.shape[0]:
# If not, raises a RuntimeError
raise RuntimeError(
'Amount of ground truth labels differ from predictions.')
return preds, labels
| true
|
5865525201d3b9d803e9d207239d30cae32d66de
|
Python
|
case2012/html_parse
|
/fetch_test.py
|
UTF-8
| 1,181
| 2.71875
| 3
|
[] |
no_license
|
#!/usr/bin/python
import re
fp = open('/home/chen/test.html', 'r')
html_text = ''
for line in fp:
html_text += line
tag_name = 0
tag_attr = 1
tag_end = 2
tag_text = 3
tag_child = 4
tag_parent = 5
tag_ss = '<'
tag_ee = '>'
tag_es = '</'
html_list = gen_taglist(6)
con_nu = html_text.find(tag_ss)
def fetch_tag(test_string):
tag_ss = '<'
tag_ee = '>'
tag_es = '</'
s_cont = 0
e_cont = 0
while(s_cont != len(test_string) or e_cont != len(test_string) or e_cont != -1 or s_cont != -1):
tmp_cont = s_cont
tag_tmp_text = ''
test_string = test_string[e_cont:]
s_cont = test_string.find(tag_ss)
e_cont = test_string.find(tag_ee)
if tmp_cont > e_cont:
tag_tmp_string = test_string[s_cont:e_cont + 1]
for tmp_char in tag_tmp_string:
tag_tmp_text += tmp_char
tag_list_text.append(tag_tmp_text)
tag_tmp_string = test_string[s_cont:e_cont + 1]
for tmp_char in tag_tmp_string:
tag_tmp_text += tmp_char
def gen_taglist(range_num):
tag_list = []
for i in range(range_num):
tag_list.append('')
return tag_list
| true
|
8bb392c1ddfde10079e19a15f09a7aba21657e66
|
Python
|
isobelfc/eng84_python_oop
|
/python.py
|
UTF-8
| 665
| 3.90625
| 4
|
[] |
no_license
|
# Create python class inheriting from snake
from snake import Snake
class Python(Snake):
def __init__(self):
super().__init__()
self.large = True
self.two_lungs = True
self.venom = False # polymorphism - overridden from Snake
def climb(self):
return "up we go"
def swallow(self):
return "can't be bothered to chew"
python_object = Python()
print(python_object.breathe()) # breathe() from Animal class
print(python_object.hunt()) # hunt() from Reptile class
print(python_object.scent_with_tongue()) # scent_with_tongue() from Snake class
print(python_object.climb()) # climb() from Python class
| true
|
6ac4e6670c16f9e986cd87d5bbd255e227b65f6b
|
Python
|
qmisky/python_fishc
|
/6-2猜随机数(gui界面版).py
|
UTF-8
| 1,663
| 3.078125
| 3
|
[] |
no_license
|
import easygui as g
import sys
import random
g.multpasswordbox(msg="请输入您的信息:",title="猜数字游戏",fields=("用户名","密码"),values="")
choice=("简单","中级","难","超级难")
g.buttonbox(msg="请选择游戏等级:",title="游戏等级",choices=choice)
# b=100
# if g.buttonbox(choices=choice(o)):
# b=10
# elif g.buttonbox(choices=choice(1)):
# b=50
# elif g.buttonbox(choices=choice(2)):
# b=100
# elif g.buttonbox(choices=choice(3)):
# b=1000
secret=random.randint(0,1000)
while True:
while True:
temp=g.integerbox(msg="请输入一个数字: ",title="猜数字",default="",lowerbound=0,upperbound=1000)
guess=int(temp)
if guess > 1000 or guess < 0:
g.msgbox(msg="输入不合法。请重新输入!",title="输入有误",ok_button="确定")
else:
break
if guess == secret:
g.msgbox(msg="太厉害啦,你居然猜对啦!",title="congratulations!",ok_button="你好棒!")
g.choicebox(msg="要不要再玩一次?",title="继续游戏",choices=("好呀好呀","不用啦,谢谢!"))
if g.ccbox():
pass
else:
g.multchoicebox(msg="退出的理由是什么?(可多选)", title="请选择退出理由:",choices=("纠结症犯了", "生无可恋", "就是要退出", "你管理由是啥", "不退出整个世界都不好了"))
g.msgbox("goodbye,I will miss you!")
sys.exit(0)
else:
if guess > secret:
g.msgbox(msg="猜的数字太大啦!",title="",ok_button="确定")
else :
g.msgbox(msg="猜的数字太小啦!", title="", ok_button="确定")
| true
|
55907450f1734a47509a123fd648cbbb163362d1
|
Python
|
MOHAMMAD-FATHA/Python_Programs
|
/Data Structures/Tuples/CheckEleinTuple.py
|
UTF-8
| 286
| 3.5
| 4
|
[] |
no_license
|
"""
* @Author: Mohammad Fatha
* @Date: 2021-09-26 19:20
* @Last Modified by: Mohammad Fatha
* @Last Modified time: 2021-09-26 19:20
* @Title: :Python program to check whether an element exists within a tuple
"""
#create a tuple
tuple1 = 2, 4, 5, 6, 2, 3, 4, 4, 7
print(2 in tuple1)
print(5 in tuple1)
| true
|
6b4cfa672163dd5fbdac271e14f19a6d3ff7c27b
|
Python
|
zhaoyinsheng/helloworld
|
/exercises 1-3.py
|
UTF-8
| 541
| 4.375
| 4
|
[] |
no_license
|
### LESSON 1: if elif else
#if guess==num:
# print("Current! \nBut no any prize!")
#elif guess>num:
# print("maybe a little BIGGER")
#else:
# print("maybe a little SMALLER")
#print("DONE!")
### LESSON 2:while else
#guess=int(input("Enter a number:"))
#while guess != num:
# if guess > num:
# print("BIGGER!\n")
# else:
# print("SMALL!\n")
#
# print("you should try again!")
# guess=int(input("Enter a number again:"))
#else:
# print("Current! it's DONE!")
### LESSON 3:for loop
for i in range(1,5):
print("The num is {}".format(i)),
| true
|
4c434be66cc55d253a3f485b40495e4f489869ef
|
Python
|
SusanLovely/Test
|
/testing/test_demo.py
|
UTF-8
| 1,377
| 2.5625
| 3
|
[] |
no_license
|
from appium import webdriver
import pytest
class TestXueQiu:
def setup(self):
desire_cap = {
"platformName": "android",
# "platformVersion": "5.1.1",
# "deviceName": "T3QDU15B04000723",
"deviceName": "66J5T19110001875",
"appPackage": "com.xueqiu.android",
"appActivity": ".view.WelcomeActivityAlias",
"noReset": True
}
self.driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", desire_cap)
self.driver.implicitly_wait(10)
def teardown(self):
self.driver.quit()
def test_search(self):
print("搜索测试用例")
'''
1. 打开雪球app
2. 点击搜索输入框
3. 输入阿里巴巴,选择阿里巴巴进行点击操作
4. 获取阿里巴巴的股价,判定股价的价格大于200
'''
self.driver.find_element_by_id("com.xueqiu.android:id/home_search").click()
self.driver.find_element_by_id("com.xueqiu.android:id/search_input_text").send_keys("阿里巴巴")
self.driver.find_element_by_xpath("//*[@resource-id='com.xueqiu.android:id/name' and @text='阿里巴巴']").click()
current_price = float(self.driver.find_element_by_id("com.xueqiu.android:id/current_price").text)
print(current_price)
assert current_price > 200
| true
|
542a74ca0c0a29dfdf9fc9ae7c316b46962aa169
|
Python
|
fodisi/ByteAcademy-Bootcamp
|
/w3/d1/schema.py
|
UTF-8
| 369
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
def create_table(ticker_symbol):
connection = sqlite3.connect('master.db', check_same_thread=False)
cursor = connection.cursor()
cursor.execute('create table {0} (pk integer primary key autoincrement, last_price float)'.format(ticker_symbol)
cursor.execute()
connection.close()
return True
if __name__ == '__main__':
create_table('nke')
| true
|
478f73e97a5555db0b8d70c6f713d1cb1b741628
|
Python
|
eldridgejm/dsc80-sp21
|
/projects/04/project04.py
|
UTF-8
| 11,123
| 3.5625
| 4
|
[] |
no_license
|
import os
import pandas as pd
import numpy as np
import requests
import time
import re
# ---------------------------------------------------------------------
# Question #1
# ---------------------------------------------------------------------
def get_book(url):
"""
get_book that takes in the url of a 'Plain Text UTF-8' book and
returns a string containing the contents of the book.
The function should satisfy the following conditions:
- The contents of the book consist of everything between
Project Gutenberg's START and END comments.
- The contents will include title/author/table of contents.
- You should also transform any Windows new-lines (\r\n) with
standard new-lines (\n).
- If the function is called twice in succession, it should not
violate the robots.txt policy.
:Example: (note '\n' don't need to be escaped in notebooks!)
>>> url = 'http://www.gutenberg.org/files/57988/57988-0.txt'
>>> book_string = get_book(url)
>>> book_string[:20] == '\\n\\n\\n\\n\\nProduced by Chu'
True
"""
return ...
# ---------------------------------------------------------------------
# Question #2
# ---------------------------------------------------------------------
def tokenize(book_string):
"""
tokenize takes in book_string and outputs a list of tokens
satisfying the following conditions:
- The start of any paragraph should be represented in the
list with the single character \x02 (standing for START).
- The end of any paragraph should be represented in the list
with the single character \x03 (standing for STOP).
- Tokens in the sequence of words are split
apart at 'word boundaries' (see the regex lecture).
- Tokens should include no whitespace.
:Example:
>>> test_fp = os.path.join('data', 'test.txt')
>>> test = open(test_fp, encoding='utf-8').read()
>>> tokens = tokenize(test)
>>> tokens[0] == '\x02'
True
>>> tokens[9] == 'dead'
True
>>> sum([x == '\x03' for x in tokens]) == 4
True
>>> '(' in tokens
True
"""
return ...
# ---------------------------------------------------------------------
# Question #3
# ---------------------------------------------------------------------
class UniformLM(object):
"""
Uniform Language Model class.
"""
def __init__(self, tokens):
"""
Initializes a Uniform languange model using a
list of tokens. It trains the language model
using `train` and saves it to an attribute
self.mdl.
"""
self.mdl = self.train(tokens)
def train(self, tokens):
"""
Trains a uniform language model given a list of tokens.
The output is a series indexed on distinct tokens, and
values giving the (uniform) probability of a token occuring
in the language.
:Example:
>>> tokens = tuple('one one two three one two four'.split())
>>> unif = UniformLM(tokens)
>>> isinstance(unif.mdl, pd.Series)
True
>>> set(unif.mdl.index) == set('one two three four'.split())
True
>>> (unif.mdl == 0.25).all()
True
"""
return ...
def probability(self, words):
"""
probability gives the probabiliy a sequence of words
appears under the language model.
:param: words: a tuple of tokens
:returns: the probability `words` appears under the language
model.
:Example:
>>> tokens = tuple('one one two three one two four'.split())
>>> unif = UniformLM(tokens)
>>> unif.probability(('five',))
0
>>> unif.probability(('one', 'two')) == 0.0625
True
"""
return ...
def sample(self, M):
"""
sample selects tokens from the language model of length M, returning
a string of tokens.
:Example:
>>> tokens = tuple('one one two three one two four'.split())
>>> unif = UniformLM(tokens)
>>> samp = unif.sample(1000)
>>> isinstance(samp, str)
True
>>> len(samp.split()) == 1000
True
>>> s = pd.Series(samp.split()).value_counts(normalize=True)
>>> np.isclose(s, 0.25, atol=0.05).all()
True
"""
return ...
# ---------------------------------------------------------------------
# Question #4
# ---------------------------------------------------------------------
class UnigramLM(object):
def __init__(self, tokens):
"""
Initializes a Unigram languange model using a
list of tokens. It trains the language model
using `train` and saves it to an attribute
self.mdl.
"""
self.mdl = self.train(tokens)
def train(self, tokens):
"""
Trains a unigram language model given a list of tokens.
The output is a series indexed on distinct tokens, and
values giving the probability of a token occuring
in the language.
:Example:
>>> tokens = tuple('one one two three one two four'.split())
>>> unig = UnigramLM(tokens)
>>> isinstance(unig.mdl, pd.Series)
True
>>> set(unig.mdl.index) == set('one two three four'.split())
True
>>> unig.mdl.loc['one'] == 3 / 7
True
"""
return ...
def probability(self, words):
"""
probability gives the probabiliy a sequence of words
appears under the language model.
:param: words: a tuple of tokens
:returns: the probability `words` appears under the language
model.
:Example:
>>> tokens = tuple('one one two three one two four'.split())
>>> unig = UnigramLM(tokens)
>>> unig.probability(('five',))
0
>>> p = unig.probability(('one', 'two'))
>>> np.isclose(p, 0.12244897959, atol=0.0001)
True
"""
return ...
def sample(self, M):
"""
sample selects tokens from the language model of length M, returning
a string of tokens.
>>> tokens = tuple('one one two three one two four'.split())
>>> unig = UnigramLM(tokens)
>>> samp = unig.sample(1000)
>>> isinstance(samp, str)
True
>>> len(samp.split()) == 1000
True
>>> s = pd.Series(samp.split()).value_counts(normalize=True).loc['one']
>>> np.isclose(s, 0.41, atol=0.05).all()
True
"""
return ...
# ---------------------------------------------------------------------
# Question #5,6,7,8
# ---------------------------------------------------------------------
class NGramLM(object):
def __init__(self, N, tokens):
"""
Initializes a N-gram languange model using a
list of tokens. It trains the language model
using `train` and saves it to an attribute
self.mdl.
"""
self.N = N
ngrams = self.create_ngrams(tokens)
self.ngrams = ngrams
self.mdl = self.train(ngrams)
if N < 2:
raise Exception('N must be greater than 1')
elif N == 2:
self.prev_mdl = UnigramLM(tokens)
else:
mdl = NGramLM(N-1, tokens)
self.prev_mdl = mdl
def create_ngrams(self, tokens):
"""
create_ngrams takes in a list of tokens and returns a list of N-grams.
The START/STOP tokens in the N-grams should be handled as
explained in the notebook.
:Example:
>>> tokens = tuple('\x02 one two three one four \x03'.split())
>>> bigrams = NGramLM(2, [])
>>> out = bigrams.create_ngrams(tokens)
>>> isinstance(out[0], tuple)
True
>>> out[0]
('\\x02', 'one')
>>> out[2]
('two', 'three')
"""
return ...
def train(self, ngrams):
"""
Trains a n-gram language model given a list of tokens.
The output is a dataframe with three columns (ngram, n1gram, prob).
:Example:
>>> tokens = tuple('\x02 one two three one four \x03'.split())
>>> bigrams = NGramLM(2, tokens)
>>> set(bigrams.mdl.columns) == set('ngram n1gram prob'.split())
True
>>> bigrams.mdl.shape == (6, 3)
True
>>> bigrams.mdl['prob'].min() == 0.5
True
"""
# ngram counts C(w_1, ..., w_n)
...
# n-1 gram counts C(w_1, ..., w_(n-1))
...
# Create the conditional probabilities
...
# Put it all together
...
return ...
def probability(self, words):
"""
probability gives the probabiliy a sequence of words
appears under the language model.
:param: words: a tuple of tokens
:returns: the probability `words` appears under the language
model.
:Example:
>>> tokens = tuple('\x02 one two one three one two \x03'.split())
>>> bigrams = NGramLM(2, tokens)
>>> p = bigrams.probability('two one three'.split())
>>> np.isclose(p, (1/4)*(1/2)*(1/3))
True
>>> bigrams.probability('one two five'.split()) == 0
True
"""
return ...
def sample(self, M):
"""
sample selects tokens from the language model of length M, returning
a string of tokens.
:Example:
>>> tokens = tuple('\x02 one two three one four \x03'.split())
>>> bigrams = NGramLM(2, tokens)
>>> samp = bigrams.sample(3)
>>> len(samp.split()) == 4 # don't count the initial START token.
True
>>> samp[:2] == '\\x02 '
True
>>> set(samp.split()) <= {'\\x02', '\\x03', 'one', 'two', 'three', 'four'}
True
"""
# Use a helper function to generate sample tokens of length `length`
...
# Transform the tokens to strings
...
return ...
# ---------------------------------------------------------------------
# DO NOT TOUCH BELOW THIS LINE
# IT'S FOR YOUR OWN BENEFIT!
# ---------------------------------------------------------------------
# Graded functions names! DO NOT CHANGE!
# This dictionary provides your doctests with
# a check that all of the questions being graded
# exist in your code!
GRADED_FUNCTIONS = {
'q01': ['get_book'],
'q02': ['tokenize'],
'q03': ['UniformLM'],
'q04': ['UnigramLM'],
'q05': ['NGramLM']
}
def check_for_graded_elements():
"""
>>> check_for_graded_elements()
True
"""
for q, elts in GRADED_FUNCTIONS.items():
for elt in elts:
if elt not in globals():
stmt = "YOU CHANGED A QUESTION THAT SHOULDN'T CHANGE! \
In %s, part %s is missing" %(q, elt)
raise Exception(stmt)
return True
| true
|
2a59366899aeb9e7dfa4af31b7e35f1a025fc481
|
Python
|
liseyko/CtCI
|
/Chapter 3 - Stacks and Queues/s0307.py
|
UTF-8
| 2,110
| 3.515625
| 4
|
[] |
no_license
|
from queue import Queue
class Animal():
animals = {}
cntr = 0
def __init__(self,id=None):
if not self.animal_type:
self.animal_type = "unspecified"
if self.animal_type in Animal.animals:
Animal.animals[self.animal_type] += 1
else:
Animal.animals[self.animal_type] = 1
self.id = self.animal_type + str(Animal.animals[self.animal_type])
self.priority = Animal.cntr
Animal.cntr += 1
class Cat(Animal):
def __init__(self):
self.animal_type = "cat"
super().__init__()
class Dog(Animal):
def __init__(self):
self.animal_type = "dog"
super().__init__()
class AnimalNode():
def __init__(self,animal):
self.animal = animal
self.next = None
setattr(self, 'next_' + animal.animal_type, None)
class AnimalShelter():
def __init__(self):
self.cats = Queue()
self.dogs = Queue()
def enqueue(self,animal):
cur_node = AnimalNode(animal)
if animal.animal_type == "cat":
return self.cats.enqueue(animal)
elif animal.animal_type == "dog":
return self.dogs.enqueue(animal)
else:
return False
def dequeueAny(self):
dogs_cnt = len(self.dogs)
cats_cnt = len(self.cats)
a_cnt = cats_cnt + dogs_cnt
if a_cnt == 0:
return False
if min(cats_cnt, dogs_cnt) > 0:
if self.cats.peek().priority < self.dogs.peek().priority:
return self.cats.dequeue()
else:
return self.dogs.dequeue()
elif cats_cnt == 0:
return self.dogs.dequeue()
elif dogs_cnt == 0:
return self.cats.dequeue()
def dequeueDog(self):
return self.dogs.dequeue()
def dequeueCat(self):
return self.cats.dequeue()
def show(self):
print("cats:")
for a in self.cats:
print(a.data.id,end='->')
print("\ndogs:")
for a in self.dogs:
print(a.data.id,end='->')
print("\n\n")
| true
|
d1a9d444db56467ee661b12a4c15036d8d0742ed
|
Python
|
Tanych/CodeTracking
|
/164-Maximum-Gap/solution.py
|
UTF-8
| 2,012
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
class Solution(object):
def maximumGap(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
"""
It's a problem with bucket sort.Also, we should
has some idea of math.
Assume the min of the array is A, and the max is B
the min of the gap would be make all the gap equal,
otherwise, if some gets smaller other gaps would be larger.
So, the bucket length would be ceiling((B-A)/(N-1))
and the maxium bucket numer is (B-A)/bucketlen+1
some of the bucket would be empty
"""
n=len(nums)
if n<2: return 0
min_num=min(nums)
max_num=max(nums)
# the math ceil might influence the efficency
#bucket_range=max(1,int(math.ceil((max_num-min_num)/(n-1))))
bucket_range=max(1,int((max_num-min_num-1)/(n-1))+1)
bucket_len=(max_num-min_num)/bucket_range+1
buckets=[None]*bucket_len
# adding to buckets
for num in nums:
pos=(num-min_num)/bucket_range
t_bucket=buckets[pos]
if not t_bucket:
# record the min and max
t_bucket={'min':num,'max':num}
buckets[pos]=t_bucket
else:
# update min and max
t_bucket['min']=min(t_bucket['min'],num)
t_bucket['max']=max(t_bucket['max'],num)
# get the possible maxgap
# using the n.min-(n-1).max
res=0
for i in xrange(bucket_len):
# get rid of the empty
if not buckets[i]:
continue
j=i+1
# get rid the continue j is empty
# EX NULL,[1,2],NULL,NULL,[4,5]
while j<bucket_len and not buckets[j]:
j+=1
# get the max gap
if j<bucket_len:
res=max(res,buckets[j]['min']-buckets[i]['max'])
i=j
return res
| true
|
129f4c5b0a25efac6209e81c38f9a4e9959e8fe9
|
Python
|
FedericoV/SysBio_Modeling
|
/measurement/timecourse_measurement.py
|
UTF-8
| 1,923
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
__author__ = 'Federico Vaggi'
from .abstract_measurement import MeasurementABC
class TimecourseMeasurement(MeasurementABC):
"""
A series of measured values, with their associated timepoints and standard deviations (optimal).
:param variable_name: The name of the measured variable
:type: string
:param measurement_value: An (n,) dimensional array containing measurements of the variable_name
:type: numpy.array
:param measurement_time: An (n,) dimensional array containing the times at which measurements were carried out
:type: numpy.array
:type: measurement_std: An (n,) dimensional array indicating the uncertanties in the measurements (optional)
:type: numpy.array
"""
def __init__(self, variable_name, measurement_value, measurement_time, measurement_std=None):
super(TimecourseMeasurement, self).__init__(variable_name, measurement_value, measurement_std)
if not (len(measurement_value) == len(measurement_time)):
raise ValueError('Length of Standard Deviation Array Not Equal to Length of Timepoints')
self.timepoints = measurement_time
def drop_timepoint_zero(self):
self.values = self.values[self.timepoints != 0]
self.std = self.std[self.timepoints != 0]
self.timepoints = self.timepoints[self.timepoints != 0]
def get_nonzero_measurements(self):
values = self.values[self.timepoints != 0]
std = self.std[self.timepoints != 0]
timepoints = self.timepoints[self.timepoints != 0]
return values, std, timepoints
def plot_measurement(self, ax=None, **kwargs):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if len(kwargs) == 0:
kwargs = {'marker': 'o', 'linestyle': '--'}
ax.errorbar(self.timepoints, self.values, self.std, **kwargs)
return ax
| true
|
2050d90a96e5addc52b30ccb71f422c4ed8ed876
|
Python
|
Sen2k9/Algorithm-and-Problem-Solving
|
/leetcode_problems/953_Verifying_an_Alien_Dictionary.py
|
UTF-8
| 4,314
| 4.125
| 4
|
[] |
no_license
|
"""
In an alien language, surprisingly they also use english lowercase letters, but possibly in a different order. The order of the alphabet is some permutation of lowercase letters.
Given a sequence of words written in the alien language, and the order of the alphabet, return true if and only if the given words are sorted lexicographicaly in this alien language.
Example 1:
Input: words = ["hello","leetcode"], order = "hlabcdefgijkmnopqrstuvwxyz"
Output: true
Explanation: As 'h' comes before 'l' in this language, then the sequence is sorted.
Example 2:
Input: words = ["word","world","row"], order = "worldabcefghijkmnpqstuvxyz"
Output: false
Explanation: As 'd' comes after 'l' in this language, then words[0] > words[1], hence the sequence is unsorted.
Example 3:
Input: words = ["apple","app"], order = "abcdefghijklmnopqrstuvwxyz"
Output: false
Explanation: The first three characters "app" match, and the second string is shorter (in size.) According to lexicographical rules "apple" > "app", because 'l' > '∅', where '∅' is defined as the blank character which is less than any other character (More info).
Constraints:
1 <= words.length <= 100
1 <= words[i].length <= 20
order.length == 26
All characters in words[i] and order are English lowercase letters.
"""
class Solution:
def isAlienSorted(self, words, order):
# Solution 1: self
# def checkIndex(ch, order):
# return order.index(ch)
# if len(words) == 1:
# for j in range(len(words[0]) - 1):
# if checkIndex(words[0][j], order) > checkIndex(words[0][j + 1], order):
# return False
# else:
# return True
# for i in range(len(words) - 1):
# j = 0
# while j < min(len(words[i]), len(words[i + 1])):
# if checkIndex(words[i][j], order) > checkIndex(words[i + 1][j], order):
# return False
# elif checkIndex(words[i][j], order) == checkIndex(words[i + 1][j], order):
# j += 1
# continue
# # covers corner case 3
# elif checkIndex(words[i][j], order) < checkIndex(words[i + 1][j], order):
# break
# # To cover corner case 2 and 4
# if j == min(len(words[i]), len(words[i + 1])) and not checkIndex(words[i][j-1], order) < checkIndex(words[i + 1][j-1], order) and len(words[i]) > len(words[i + 1]):
# return False
# return True
# Solution 2: using dictionary, slow
# dic = {}
# for i in range(len(order)):
# dic[order[i]] = i
# while len(words) > 1:
# for i in range(len(words[0])):
# if dic[words[0][i]] < dic[words[1][i]]:
# words = words[1:]
# break
# if dic[words[0][i]] > dic[words[1][i]]:
# return False
# if i == len(words[0]) - 1:
# words = words[1:]
# if i == len(words[1]) - 1: # covers corner case 4
# return False
# return True
# Solution 3: using list comprehension
words_value = [[order.index(ch) for ch in word] for word in words]
while len(words_value) > 1:
for i in range(len(words_value[0])):
if words_value[0][i] < words_value[1][i]:
words_value = words_value[1:]
break
elif words_value[0][i] > words_value[1][i]:
return False
elif i == len(words_value[0]) - 1:
words_value = words_value[1:]
elif i == len(words_value[1]) - 1:
return False
return True
sol = Solution()
words = ["kuvp", "q"]
order = "ngxlkthsjuoqcpavbfdermiywz"
print(sol.isAlienSorted(words, order))
"""
corner cases:
1. one words only
2. ["kuvp","q"]
"ngxlkthsjuoqcpavbfdermiywz"
3. words = ["fxasxpc", "dfbdrifhp", "nwzgs", "cmwqriv", "ebulyfyve",
"miracx", "sxckdwzv", "dtijzluhts", "wwbmnge", "qmjwymmyox"]
order = "zkgwaverfimqxbnctdplsjyohu"
4. words = ["apple", "app"]
order = "abcdefghijklmnopqrstuvwxyz"
"""
| true
|
ed75b6743caa7a6bc4da4e2a829a7bcf3d72ed3c
|
Python
|
chuck2kill/CoursPython
|
/chapitre_6/racine.py
|
UTF-8
| 511
| 4.40625
| 4
|
[] |
no_license
|
# programme 4 page 56
# on demande un chiffre à l'utilisateur
# soit on affiche la racine carrée
# soit on affiche un message pour dire
# que la racine carrée ne peut pas être calculée
# importation de module
from math import *
# on demande le chiffre
chiffre = int(input("Veuillez entrer un chiffre :"))
# condition pour choisir ce que l'on affiche
if chiffre <= 0:
print("Impossible de calculer la racine carrée de", chiffre)
else:
print("La racine carrée de", chiffre, "est", sqrt(chiffre))
| true
|
a70bfab27e0b04715f653006665d8161159cd34b
|
Python
|
CathyZhou0120/pipelines
|
/pull_from_psql.py
|
UTF-8
| 1,412
| 2.71875
| 3
|
[] |
no_license
|
import psycopg2
import csv
import os
#conn_string = """dbname='exampledb' user='cathyzhou@cathydb2' host='cathydb2.postgres.database.azure.com' password='3.14159Zyr' port='5432' sslmode='require'"""
# Construct connection string
def get_data(host,user,dbname,password,port,sslmode):
conn = psycopg2.connect(
host=host,
database=dbname,
user=user,
password=password,
port=port,
sslmode=sslmode
)
cursor = conn.cursor()
cursor.execute("SELECT * FROM iris;")
rows = cursor.fetchall()
with open('data.csv', 'w') as f:
fieldnames = ['sepal_length', 'sepal_width','peta_length','petal_width','class']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for i in rows:
writer.writerow({'sepal_length': i[0], 'sepal_width': i[1],'peta_length': i[2],'petal_width': i[3],'class': i[4]})
def main():
dbname=os.environ['DBNAME']
user=os.environ['DBUSER']
host=os.environ['DBHOST']
password=os.environ['DBPASSWORD']
port=os.environ['DBPORT']
sslmode=os.environ['DBSSL']
#tenent_id=os.environ['TENANT_ID']
#conn_string="""host={0} user={1} dbname={2} password={3} port={4} sslmode={5}""".format(host, user, dbname, password, port, sslmode)
get_data(host,user,dbname,password,port,sslmode)
#print(conn_string, tenent_id)
if __name__ == '__main__':
main()
| true
|
43076f235bcf03af08c954c5ab8c181ddb3a6fed
|
Python
|
nelo81/code2word
|
/converter.py
|
UTF-8
| 2,505
| 2.8125
| 3
|
[] |
no_license
|
import os
import codecs
from docx import Document
doc = Document()
errorlist = []
def convert(dir, mode='flat', title=None, include=None, exclude=None, encoding='utf-8'):
print('copy from diretory: ' + dir)
if title is not None:
doc.add_heading(title, 1)
if include is not None:
inc=include.split('|')
else:
inc=None
if exclude is not None:
exc=exclude.split('|')
else:
exc=None
if mode == 'flat':
walkflat(dir, inc, exc, encoding)
elif mode == 'deep':
walkdeep(dir, 2, inc, exc, encoding)
else:
print('mode is invaild')
def walkflat(dir, inc, exc, encoding):
currentdir = ''
for root, dirs, files in os.walk(dir,False):
for file in files:
if file == 'pom.xml':
print(1)
if (inc is None or os.path.splitext(file)[1][1:] in inc) and (exc is None or os.path.splitext(file)[1][1:] not in exc):
filepath = os.path.join(root,file).replace('\\','/')
try:
with codecs.open(filepath,encoding=encoding) as f:
content = f.read()
thisdir = filepath[len(dir)+1:filepath.rfind('/')]
if currentdir != thisdir:
currentdir = thisdir
doc.add_heading(thisdir, 2)
print('into directory '+thisdir)
doc.add_heading(filepath[filepath.rfind('/')+1:], 3)
doc.add_paragraph(content)
doc.add_page_break()
print('copied '+filepath[filepath.rfind('/')+1:])
except Exception as e:
errorlist.append(filepath)
print('read ' + filepath + ' error')
print(str(e))
def walkdeep(root, level, inc, exc, encoding):
for file in os.listdir(root):
filepath = os.path.join(root,file).replace('\\','/')
if os.path.isfile(filepath):
if (inc is None or os.path.splitext(file)[1][1:] in inc) and (exc is None or os.path.splitext(file)[1][1:] not in exc):
try:
with codecs.open(filepath,encoding=encoding) as f:
content = f.read()
doc.add_heading(filepath[filepath.rfind('/')+1:], level)
doc.add_paragraph(content)
doc.add_page_break()
print('copied '+filepath[filepath.rfind('/')+1:])
except Exception as e:
errorlist.append(filepath)
print('read ' + filepath + ' error')
print(str(e))
else:
doc.add_heading(file, level)
print('into directory '+file)
walkdeep(filepath, level+1, inc, exc, encoding)
| true
|
c6274340edfb073b70ae0a384a445f70502ce67b
|
Python
|
sharmakajal0/codechef_problems
|
/previous_problems/BEGINNER/ONP.sol.py
|
UTF-8
| 659
| 3.9375
| 4
|
[] |
no_license
|
#!/usr/bin/env python
'''module for transformation of infix to postfix'''
def infix_topostfix(infix_exp):
'''Function definition to transform an infix expression into postfix expression'''
stack = []
answer = ''
for i in infix_exp:
if i == '(':
stack.append('(')
elif i >= 'a' and i <= 'z':
answer = answer + i
elif i == ')':
while stack[-1] != '(':
answer = answer + stack.pop()
stack.pop()
else:
stack.append(i)
return answer
T = int(input())
for _ in range(0, T):
expr = list(input())
print(infix_topostfix(expr))
| true
|
2c569fd0d64171e926e6d10aaaac6aeb618448e0
|
Python
|
lahsivvishal/algorithms-in-python
|
/Easy/Nth_fib.py
|
UTF-8
| 654
| 3.953125
| 4
|
[] |
no_license
|
# General
"""
if n == 2:
return 1
elif n == 1:
return 0
elif:
return fib(n-1)+fib(n-2)
"""
# Memoize
"""
def getNthFib(n, memoize = {1:0, 2:1}):
if n in memoize:
return memoize[n]
else:
memoize[n] = getNthFib(n-1, memoize) + getNthFib(n-2, memoize)
return memoize[n]
print(getNthFib(6))
"""
#Iterative method
def getNthfib(n):
lasttwo = [0, 1]
counter = 3
while counter <=3:
nextFib = lasttwo[0] + lasttwo [1]
lasttwo[0] = lasttwo[1]
lasttwo[1] = nextFib
counter += 1
return lasttwo[1] if n > 1 else lasttwo[0]
print(getNthfib(0)
| true
|
1bd728c6e2f90adc43e6706b57df1e3a55028932
|
Python
|
fiso0/my_python
|
/sanitize.py
|
UTF-8
| 304
| 3.46875
| 3
|
[] |
no_license
|
def sanitize(time_string):
if '-' in time_string:
splitter='-'
elif ':' in time_string:
splitter=':'
else:
return(time_string)
(mins, secs)=time_string.split(splitter)
return(mins+'.'+secs)
time_string="2-21"
print(sanitize(time_string))
print(sanitize("2:10"))
print(sanitize("3.3"))
input()
| true
|
038d386b6ba71ccf2690c9207c97c9ab833ef24a
|
Python
|
rizkyramadhana26/TubesDaspro
|
/riwayatGadget.py
|
UTF-8
| 5,465
| 2.78125
| 3
|
[] |
no_license
|
import validasi, variabelGlobal
from datetime import datetime
def cetakRiwayatPinjam(count,sortedriwayat,panjang): # fungsi untuk mencetak riwayat pengambilan
if panjang > 5 : # mengecek panjang list yang belum dicetak
for i in range(count,count + 5): # prosedur percetakan
print("\nID Peminjaman :", sortedriwayat[i][0])
for j in range(len(variabelGlobal.user['data'])):
if sortedriwayat[i][1] == variabelGlobal.user['data'][j][0]:
print("Nama Pengambil :", variabelGlobal.user['data'][j][1])
break
for k in range(len(variabelGlobal.gadget['data'])):
if sortedriwayat[i][2] == variabelGlobal.gadget['data'][k][0]:
print("Nama Gadget :", variabelGlobal.gadget['data'][k][1])
break
print("Tanggal Peminjaman :", sortedriwayat[i][3])
print("Jumlah :", sortedriwayat[i][4])
count += 5 # menambah jumlah data yang telah dicetak
pil = input("\nApakah Anda ingin melihat data riwayat lainnya?(y/n)")
if pil == "y" :
return cetakRiwayatPinjam(count,sortedriwayat,panjang-5) # mengembalikan pada fungsi cetak riwayat dan mengurangi panjang list yang belum dicetak
else :
return
elif panjang == 0: # tidak terdapat data pada file consumable_history
print("Tidak terdapat data riwayat peminjaman.")
return
else : # panjang data <= 5
for i in range(count,len(sortedriwayat)): # prosedur percetakan
print("\nID Peminjaman :", sortedriwayat[i][0])
for j in range(len(variabelGlobal.user['data'])):
if sortedriwayat[i][1] == variabelGlobal.user['data'][j][0]:
print("Nama Pengambil :", variabelGlobal.user['data'][j][1])
break
for k in range(len(variabelGlobal.gadget['data'])):
if sortedriwayat[i][2] == variabelGlobal.gadget['data'][k][0]:
print("Nama Gadget :", variabelGlobal.gadget['data'][k][1])
break
print("Tanggal Peminjaman :", sortedriwayat[i][3])
print("Jumlah :", sortedriwayat[i][4])
return
def cetakRiwayatKembali(count,sortedriwayatPinjam,sortedriwayatKembali,panjang): # fungsi untuk mencetak riwayat pengambilan
if panjang > 5 : # mengecek panjang list yang belum dicetak
for i in range(count,count + 5): # prosedur percetakan
print("\nID Pengembalian :", sortedriwayatKembali[i][0])
for j in range(len(variabelGlobal.user['data'])):
if sortedriwayatKembali[i][1] == variabelGlobal.user['data'][j][0]:
print("Nama Pengambil :", variabelGlobal.user['data'][j][1])
break
for k in range(len(variabelGlobal.gadget['data'])):
if sortedriwayatKembali[i][2] == variabelGlobal.gadget['data'][k][0]:
print("Nama Gadget :", variabelGlobal.gadget['data'][k][1])
break
print("Tanggal Pengembalian :", sortedriwayatKembali[i][2])
if sortedriwayatPinjam[i][5] == 'y':
print("Status : Sudah dikembalikan semua")
else:
sisa = int(sortedriwayatPinjam[i][4]) - int(sortedriwayatKembali[i][3])
print("Status : Belum dikembalikan semua")
print("Sisa : {}".format(sisa))
count += 5 # menambah jumlah data yang telah dicetak
pil = input("\nApakah Anda ingin melihat data riwayat lainnya?(y/n)")
if pil == "y" :
return cetakRiwayatKembali(count,sortedriwayatPinjam,sortedriwayatKembali,panjang-5) # mengembalikan pada fungsi cetak riwayat dan mengurangi panjang list yang belum dicetak
else :
return
elif panjang == 0: # tidak terdapat data pada file consumable_history
print("Tidak terdapat data riwayat pengembalian.")
return
else : # panjang data <= 5
for i in range(count,len(sortedriwayatKembali)): # prosedur percetakan
print("\nID Pengembalian :", sortedriwayatKembali[i][0])
for j in range(len(variabelGlobal.user['data'])):
if sortedriwayatKembali[i][1] == variabelGlobal.user['data'][j][0]:
print("Nama Pengambil :", variabelGlobal.user['data'][j][1])
break
for k in range(len(variabelGlobal.gadget['data'])):
if sortedriwayatKembali[i][2] == variabelGlobal.gadget['data'][k][0]:
print("Nama Gadget :", variabelGlobal.consumable['data'][k][1])
break
print("Tanggal Pengembalian :", sortedriwayatKembali[i][2])
if sortedriwayatPinjam[i][5] == 'y':
print("Status : Sudah dikembalikan semua")
else:
sisa = int(sortedriwayatPinjam[i][4]) - int(sortedriwayatKembali[i][3])
print("Status : Belum dikembalikan semua")
print("Sisa : {}".format(sisa))
return
| true
|
f697b8c275cd103732ff50c8121ae5e7e5fe4148
|
Python
|
TaumarT/python
|
/Quinto_exercicio.py
|
UTF-8
| 194
| 3.8125
| 4
|
[] |
no_license
|
print("---converte metros em centimetros-----")
metros = int(input("digite o numero a ser convertido : "))
cent = metros * 100
print("{} metro equivale a {} centimetros".format( metros,cent))
| true
|
4a8d1ec0d98f0c9e6f81cafb5cf32916e1db74b5
|
Python
|
kwangminini/Algorhitm
|
/CodeUp/CodeUp1091.py
|
UTF-8
| 237
| 3.15625
| 3
|
[] |
no_license
|
num=input().split()
a=int(num[0])
m=int(num[1])
d=int(num[2])
n=int(num[3])
resultList=[]
result=0
result+=a*m+d
resultList.append(a)
for i in range (n-1):
resultList.append(result)
result=(result*m)+d
print(resultList[-1])
| true
|
0ae43f1faf4c628530c8b49f6f96836fbf01fd1c
|
Python
|
Arrrrrr/Hoth
|
/Python/seuss01.py
|
UTF-8
| 711
| 2.90625
| 3
|
[] |
no_license
|
#! /usr/bin/python
# ========== SET UP ===========
# import libraries we need
import pprint
import re
import csv
import os
from _csv import reader
# create a file called seuss.csv
with open('seuss.csv', 'w') as csvfile:
# fieldnames are the headings for each column
fieldnames = ['character', 'habitat']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# add content
writer.writeheader()
writer.writerow({'character': 'Horton', 'habitat': 'Jungle of Nool'})
writer.writerow({'character': 'Sneeches', 'habitat': 'Beaches'})
writer.writerow({'character': 'Cindi Loo Who', 'habitat': 'Whoville'})
writer.writerow({'character': 'The Lorax', 'habitat': 'Truffla Trees'})
| true
|
bfe876ce37abad96ed78d627fd9310d34c11148a
|
Python
|
EmersonDove/Beale
|
/Scripts/Ciphers/Vigenere.py
|
UTF-8
| 471
| 3.140625
| 3
|
[] |
no_license
|
class Vigenere:
global key
def __init__(self,decryptKey):
global key
key=decryptKey
def decrypt(self,text):
global key
output = ""
currentKeyIndex = 0
for i in range(len(text)):
output += chr(((ord(text[i].lower()))-(ord(key[currentKeyIndex].lower()))) % 26 + 97)
currentKeyIndex += 1
if currentKeyIndex is len(key):
currentKeyIndex = 0
return output
| true
|
913044a3b47839425b8167679ea98bd8f80a9918
|
Python
|
chokoryu/atcoder
|
/problems/abc182_c.py
|
UTF-8
| 877
| 2.875
| 3
|
[] |
no_license
|
from fractions import gcd
from collections import Counter, deque, defaultdict
from heapq import heappush, heappop, heappushpop, heapify, heapreplace, merge
from bisect import bisect_left, bisect_right, bisect, insort_left, insort_right, insort
from itertools import accumulate, product, permutations, combinations
def main():
N = input()
if int(N) % 3 == 0:
print(0)
else:
bits = 2 ** len(N)
res = len(N)
for i in range(1, bits):
sum = 0
count = 0
for j in range(len(N)):
if (i >> j) & 1:
sum += int(N[j])
count += 1
if sum % 3 == 0 and len(N)-count < res:
res = len(N) -count
print(-1) if res == len(N) else print(res)
if __name__ == '__main__':
main()
| true
|
33eb372eabd1512234d6ce5232dbeb392aa8ab24
|
Python
|
josephborrego/doom
|
/frames.py
|
UTF-8
| 3,874
| 2.953125
| 3
|
[] |
no_license
|
# I was inspired to emabrk on this journey with the help from Thomas Simonini
# # https://github.com/simoninithomas/Deep_reinforcement_learning_Course/blob/master/Deep%20Q%20Learning/Doom/Deep%20Q%20learning%20with%20Doom.ipynb
import numpy as np
from skimage import transform
import skimage.transform
from collections import deque
import cv2
from PIL import Image
stack_size = 4
# https://towardsdatascience.com/image-pre-processing-c1aec0be3edf
# Preprocessing is an important step, because we want to reduce the complexity of our states to reduce the computation time
# needed for training.
def preprocess_frame(frame):
# converts to gray scale
src = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY).astype('uint8')
#cropped_frame = frame[30:-10,30:-30]
#normalized_frame = cropped_frame/255.0
cropped_frame = src[30:-10, 30:-30]
normalized_frame = cropped_frame/255.0
#preprocessed_frame = skimage.transform.resize(normalized_frame, (84,84))
width = int(normalized_frame.shape[1] * 60 / 100)
height = int(normalized_frame.shape[0] * 60 / 100)
dim = (width, height)
# resize the frame
preprocessed_frame = cv2.resize(normalized_frame, dim, interpolation=cv2.INTER_LINEAR)
preprocessed_frame = preprocessed_frame.astype(np.float32)
# remove gaussian noise
#x = cv2.blur(preprocessed_frame, (5,5))
x = cv2.GaussianBlur(preprocessed_frame, (5, 5), 0).astype('float32')
blur = cv2.GaussianBlur(preprocessed_frame, (1, 9), 0).astype('uint8')
# segmentation & morphology
# segment - separating background from foreground objects & more noise removal
# otsus binarization
ret, thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# apply another blur to improve the looks
# further noise removal
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
# sure background area
sure_bg = cv2.dilate(opening, kernel, iterations=3)
dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)
# finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# seperate different objects in the image with markers
ret, markers = cv2.connectedComponents(sure_fg)
#markers = markers + 1
#markers[unknown == 255] == 0
#markers = cv2.watershed(frame, markers)
#cv2.imshow('yo', sure_fg)
return x
#return markers
# https://pdfs.semanticscholar.org/74c3/5bb13e71cdd8b5a553a7e65d9ed125ce958e.pdf
# stack frames is used for the experience replay buffer
# Stacking frames is really important because it helps us to give have a sense of motion to our NN
# For the first frame, we feed 4 frames
# At each timestep, we add the new frame to deque and then we stack them to form a new stacked frame
#And so onstack
# If we're done, we create a new stack with 4 new frames (because we are in a new episode).
def stack_frames(stacked_frames, state, is_new_episode):
frame = preprocess_frame(state)
if is_new_episode:
# Clear our stacked_frames
stacked_frames = deque([np.zeros((84,84), dtype=np.int) for i in range(stack_size)], maxlen=4)
# Because we're in a new episode, copy the same frame 4x
stacked_frames.append(frame)
stacked_frames.append(frame)
stacked_frames.append(frame)
stacked_frames.append(frame)
# Stack the frames
stacked_state = np.stack(stacked_frames, axis=2)
else:
# Append frame to deque, automatically removes the oldest frame
stacked_frames.append(frame)
# Build the stacked state (first dimension specifies different frames)
stacked_state = np.stack(stacked_frames, axis=2)
return stacked_state, stacked_frames
| true
|
1df44f2489163d4743665d4d0ef41e431671efd8
|
Python
|
anillava1999/Innomatics-Intership-Task
|
/Task5/Task6.py
|
UTF-8
| 454
| 3.46875
| 3
|
[] |
no_license
|
# Regex Substitution in Python - Hacker Rank Solution
# Python 3
# Enter your code here. Read input from STDIN. Print output to STDOUT
# Regex Substitution in Python - Hacker Rank Solution START
import re
def change(match):
if match.group(1) == '&&':
return 'and'
else:
return 'or'
for _ in range(int(input())):
print(re.sub(r"(?<= )(\|\||&&)(?= )", change,input()))
# Regex Substitution in Python - Hacker Rank Solution END
| true
|
89844d00405e8637e8d81fcf7ef1e61b7252e004
|
Python
|
Gustavo-835-tp555/tp555-machine-learning
|
/misc/holdout.py
|
UTF-8
| 1,351
| 3.078125
| 3
|
[] |
no_license
|
# Import all the necessary libraries.
import numpy as np
import timeit
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# data set size.
M = 100
# Create target function and its noisy version.
x = 6*np.random.rand(M, 1) - 3
y = 0.5*x**2 + x + 2
y_noisy = y + np.random.randn(M, 1)
# Split the whole set into random training and validation set.
x_train, x_val, y_train, y_val = train_test_split(x, y_noisy, test_size=0.3, random_state=10)
mean_vec = []
std_vec = []
for d in range(1, 13):
# Instantiate a polynomial.
poly_features = PolynomialFeatures(degree=d, include_bias=include_bias)
# Instantiate a scaler.
std_scaler = StandardScaler()
# Instantiate a linear regressor.
lin_reg = LinearRegression()
# Create a pipeline of actions.
polynomial_regression = Pipeline([
("poly_features", poly_features),
("std_scaler", std_scaler),
("lin_reg", lin_reg),
])
polynomial_regression.fit(x_train, y_train)
y_val_predict = polynomial_regression.predict(x_val)
mean_vec.append(np.sqrt(mean_squared_error(y_val, y_val_predict)))
| true
|
38e00dc0d4b4550025f59d56c4b72ef597bd1511
|
Python
|
wangqi/deuces
|
/deuces/round.py
|
UTF-8
| 2,335
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
from .card import Card
from .deck import Deck
import os
STATUS_FILE = "round.state"
class Round:
def __init__(self, num_player=0):
self.num_player = num_player
self.players = {}
self.player_keys = []
self.flop_card_strs = ""
def add_player_cards(self, player_id, player_name, card_strs):
key = str(player_id) + "|" + player_name
if self.players.get(key) is None:
self.player_keys.append(key)
self.players[key] = card_strs
def add_player_card_ints(self, player_id, player_name, card_ints):
key = str(player_id) + "|" + player_name
card_strs = ""
for card_int in card_ints:
card_strs += Card.int_to_str(card_int) + ","
card_strs = card_strs[0:-1]
if self.players.get(key) is None:
self.player_keys.append(key)
self.players[key] = card_strs
def get_player_cards(self, player_id, player_name):
key = str(player_id) + "|" + player_name
return self.players.get(key, "")
def set_flop_cards(self, flop_card_strs):
self.flop_card_strs = flop_card_strs
def get_flop_cards(self):
return self.flop_card_strs
def add_flop_card(self, card_int):
card_str = Card.int_to_str(card_int)
if len(self.flop_card_strs) == 0:
self.flop_card_strs = card_str
else:
self.flop_card_strs += "," + card_str
def save_status(self):
lines = []
lines.append(str(self.num_player))
for player_key in self.player_keys:
lines.append(player_key + "=" + self.players.get(player_key, ""))
if self.flop_card_strs is not None and len(self.flop_card_strs) > 0:
lines.append(self.flop_card_strs)
with open(STATUS_FILE, 'w') as f:
for line in lines:
if len(line) > 0:
f.write(line)
f.write("\n")
@staticmethod
def read_status():
lines = []
if not os.path.exists(STATUS_FILE):
return
with open(STATUS_FILE, 'r') as f:
lines = f.readlines()
if len(lines) > 0:
num_players = int(lines[0])
round = Round(num_player=num_players)
for line_idx in range(1, num_players+1, 1):
line = lines[line_idx]
fields = line.split("=")
player_key = fields[0]
player_cards = fields[1].strip()
player_fields = player_key.split("|")
round.add_player_cards(player_id=player_fields[0], player_name=player_fields[1],
card_strs=player_cards)
if len(lines) > num_players+1:
round.set_flop_cards(lines[-1].strip())
return round
| true
|
74b154f411e136b36f22b1a1aab1d83082de8361
|
Python
|
GeorgeGio/python_programming
|
/class-notes/class13/opening.py
|
UTF-8
| 149
| 2.5625
| 3
|
[] |
no_license
|
a_file = open("new_text.txt")
file_contents = a_file.read()
second_file = open("new_file2.txt","w")
second_file.write(file_contents)
a_file.read()
| true
|
9c952e4a3f8422efd35797d9482bbd18f208a204
|
Python
|
harinathreddy224/data-mining-project
|
/bull_vs_bear.py
|
UTF-8
| 834
| 2.75
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pylab
import numpy as np
from peakdetect import peakdetect
folderPath = "./data/"
# Process S&P 500
dfSP500 = pd.read_csv('./dataset/SP500.csv')
dfSP500['Date'] = pd.to_datetime(dfSP500['Date'])
dfSP500 = dfSP500[['Date', 'Close']]
print(dfSP500.describe())
plt.plot(dfSP500['Date'], dfSP500['Close'])
# pylab.show()
minOrMax = peakdetect(dfSP500['Close'].as_matrix(), lookahead=50)[0]
print(minOrMax)
labels = []
for index, peak in enumerate(minOrMax[:-1]):
peakValue = peak[1]
lastPeakValue = minOrMax[index + 1][1]
delta = peakValue - lastPeakValue
if delta > 0:
labels.append("bull")
else:
if abs((delta / lastPeakValue) * 100 ) > 20:
labels.append("bear")
print("Bull:", labels.count("bull"))
print("Bear:", labels.count("bear"))
| true
|
90d59cb2a2fd930d41fc1c0ce1726d18808a08aa
|
Python
|
vishwasks32/python3-learning
|
/myp3basics/exers/exer2.py
|
UTF-8
| 1,066
| 4.21875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python3
#
# Author : Vishwas K Singh
# Email : vishwasks32@gmail.com
#
# Script to convert Celcius to Farenheit and vice versa
# Formula F = (C x 9/5) + 32
# C = (F - 32) x 5/9
import os
import sys
os.system('clear')
print("Menu: ")
print("1. Celcius to Farenheit")
print("2. Farenheit to Celcius")
print("q to Quit")
while True:
chice = input("Enter Choice(1/2/q): ")
if chice == '1':
chic_num = 1
elif chice == '2':
chic_num = 2
elif chice == 'q':
sys.exit(0)
else:
print("Invalid Input")
if chic_num == 1:
celci = float(input("Enter value in degree celcius: "))
Fheit = (celci * (9 /5)) + 32
print("%.2f degree celcius is %.2f degree farenheit"%(celci,Fheit))
elif chic_num == 2:
Fheit = float(input("Enter value in degree farenheit: "))
celci = (Fheit - 32 ) * (5/9)
print("%.2f degree farenheit is %.2f degree celcius"%(Fheit,celci))
| true
|
2871145931f7d904c9b2ac1d349daf8b621cd6b6
|
Python
|
rbuckley-git/AdventOfCode2019
|
/day19.py
|
UTF-8
| 2,668
| 3.71875
| 4
|
[] |
no_license
|
# https://adventofcode.com/
# 19/12/2019
# Day 19
#
# This had me puzzled for ages. Turned out to be an out by one error. 100 cells are contained in 99 coordinate changes. Algorithm was sound.
#
import intcode
prog = intcode.get_program("19.input.txt")
grid = {}
def render_grid():
maxx = max(x for x,y in grid)
maxy = max(y for x,y in grid)
xstart = 0
ystart = 0
print()
for y in range(ystart,maxy+1):
chars = []
for x in range(xstart,maxx+1):
if (x,y) in grid:
if grid[(x,y)] == 1:
chars.append( '#' )
else:
chars.append('X')
else:
chars.append( '.' )
print("".join(chars))
def is_being_pulled(x,y):
ic = intcode.computer(prog)
pulled = 0
ic.add_input(x)
ic.add_input(y)
while not ic.is_stopped():
output = ic.run()
if output == None:
continue
if output == 1:
pulled+=1
return pulled
def is_square_pulled(top_right_corner,square_size):
tr = top_right_corner
tl = (tr[0]-square_size+1,tr[1])
br = (tr[0],tr[1]+square_size-1)
bl = (tr[0]-square_size+1,tr[1]+square_size-1)
if not is_being_pulled(tl[0],tl[1]):
return None
if not is_being_pulled(br[0],br[1]):
return None
if not is_being_pulled(bl[0],bl[1]):
return None
grid[tr] = 2
grid[tl] = 2
grid[bl] = 2
grid[br] = 2
return tl
def calc_affected_points(grid_size):
global grid
pulled = 0
for x in range(grid_size):
for y in range(grid_size):
if is_being_pulled(x,y):
pulled += 1
grid[(x,y)] = 1
return pulled
def calc_santa_position(square_size):
global grid
# assume solution is greater than 500,500
y = 30
x1 = 10
while not is_being_pulled(x1,y):
x1+=1
x2 = x1
while is_being_pulled(x1,y):
grid[(x1,y)] = 1
x1+=1
# cell before was being pulled
x1-=1
found = (0,0)
while found == (0,0):
y+=1 # next row
# move on while there is traction
while is_being_pulled(x1,y):
grid[(x1,y)] = 1
x1+=1
x1-=1
while not is_being_pulled(x2,y):
x2+=1
grid[(x2,y)] = 1
tl = is_square_pulled((x1,y),square_size)
if tl != None:
return tl
return found
if __name__ == '__main__':
print("Part 1, affected points",calc_affected_points(50))
render_grid()
(x,y) = calc_santa_position(100)
print("Part 2, answer",x*10000+y)
| true
|
afb0ad4de9c558d53a4a7f7b3320923bd41aa919
|
Python
|
konishis/python_training
|
/wwwproject/tests/test_practice2/test_q3_3.py
|
UTF-8
| 1,262
| 3.25
| 3
|
[] |
no_license
|
"""
q3_3【難】
借金返済計画を立てるプログラムを作りたい.
簡単のため,利子は無しとする.
まず,借金の総額と,ひと月に返済する金額を入力すると,
返済にかかる年数を表示し,
さらに,毎年のボーナスから返済する金額を入力すると,
返済完了が何年早まるかを表示し,
その次に返済を完了したい年数を入力すると,
ボーナスからいくら返せばよいかを表示するプログラムを作成せよ.
"""
# from wwwproject.practice2 import q3_3
# def test_1():
# q3_3.debtperson.debt = 500000
# q3_3.debtperson.monthrepayament = 10000
# result = q3_3.repaymentyears()
# assert result == 50
# def test_2():
# q3_3.debtperson.debt = 500000
# q3_3.debtperson.monthrepayament = 10000
# q3_3.debtperson.bonus = 50000
# result = q3_3.bonusrepaymentboost()
# assert result == (500000 / (10000 * 12)) - (500000 / ((10000 * 12) + 50000))
# def test_3():
# q3_3.debtperson.debt = 500000
# q3_3.debtperson.monthrepayament = 10000
# q3_3.debtperson.targetyear = 2
# result = q3_3.repaymentinbonus()
# assert result == 500000 / 24 - 10000
| true
|
91798fa11ae78596dea453c86c4fcde6cbc2b512
|
Python
|
neuroquant/skmediate
|
/skmediate/conditional_independence.py
|
UTF-8
| 11,292
| 2.828125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
"""Classes for computations of conditional independence."""
import numpy as np
import warnings
from collections.abc import Sequence
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.covariance import (
EmpiricalCovariance,
GraphicalLasso,
GraphicalLassoCV,
LedoitWolf,
MinCovDet,
OAS,
ShrunkCovariance,
)
from sklearn.utils import shuffle
from tqdm.auto import trange
COV_ESTIMATORS = {
"empirical": EmpiricalCovariance(),
"graphical_lasso": GraphicalLasso(),
"graphical_lasso_cv": GraphicalLassoCV(),
"ledoit_wolf": LedoitWolf(),
"min_cov_det": MinCovDet(),
"oas": OAS(),
"shrunk": ShrunkCovariance(),
}
def _quacks_like_estimator(instance):
"""Return True if the instance quacks like an sklearn estimator."""
required_attrs = [
hasattr(instance, "fit"),
hasattr(instance, "predict"),
hasattr(instance, "get_params"),
]
return all(required_attrs)
class ConditionalCrossCovariance(object):
"""Conditional dependence testing between multivariate quantities."""
def __init__(
self,
regression_estimator=None,
covariance_estimator=None,
precision_estimator=None,
residualized=False,
estimate_p_value=False,
n_shuffle=1000,
show_progress=True,
):
"""
Initialize ConditionalCrossCovariance with base estimators.
Parameters
----------
regression_estimator : sklearn estimator class or sequence.
This class will be used to fit Y=f(X) and X=f(X) and to generate
residuals for covariance estimation.
Default: :class:`sklearn.linear_model.LinearRegression`
covariance_estimator : sklearn covariance estimator class.
This class will be used to compute the covariance between the
residuals of f(X) and the Y, Z. This may also be a string, one of
["empirical", "graphical_lasso", "graphical_lasso_cv",
"ledoit_wolf", "min_cov_det", "oas", "shrunk"], to select one of the
covariance classes from sklearn.covariance.
Default: :class:`sklearn.covariance.EmpiricalCovariance`
precision_estimator : sklearn covariance estimator class.
This class will be used to compute the precision of the residualized
Y and Z. This may also be a string, one of ["empirical",
"graphical_lasso", "graphical_lasso_cv", "ledoit_wolf",
"min_cov_det", "oas", "shrunk"], to select one of the covariance
classes from sklearn.covariance.
Default: :class:`sklearn.covariance.EmpiricalCovariance`
residualized: bool
If True, assume that ``Y`` and ``Z`` have already been residualized
on ``X``.
Default: False
estimate_p_value: bool
If True, perform repeated permutation tests on ``Y`` to estimate the
p-value for the residual_cross_covariance_ score.
Default: False
n_shuffle: int
The number of permutation tests to perform to estimate the p-value.
Default: 1000
show_progress: bool
If True, show a progress bar while estimating the p-value
Default: True
Attributes
----------
regression_estimator_{xz, xy} : sklearn estimator class
The fitted regression estimators
residualized_{Y,Z}_ : numpy.ndarray
The residualized ``X``, ``Y``, and ``Z`` matrices
covfit_{yy,zy,zz}_ : sklearn covariance estimator class
The fitted covariance estimator for Y, Z, and YZ.
cov_zy_ : numpy.ndarray
The cross-covariance matrix for Y and Z
prec_{yy,zz}_ : numpy.ndarray
The precision matrix for Y and Z, respectively
residual_crosscovariance_ : float
The residualized cross-covariance
residual_crosscovariance_wherry_corrected_ : float
The residualized cross-covariance with bias corrected using the
Wherry formula. This may or may not be appropriate depending on the
type of covariance and precision estimators.
null_distribution_ : numpy.ndarray
Array of simulated null distribution values
rcc_p_value_ : float
Estimated p value for the ``residual_crosscovariance_`` point estimate
Notes
-----
.. [1] Wim Van der Elst, Ariel Abad Alonso, Helena Geys, Paul Meyvisch,
Luc Bijnens, Rudradev Sengupta & Geert Molenberghs (2019)
Univariate Versus Multivariate Surrogates in the Single-Trial
Setting, Statistics in Biopharmaceutical Research, 11:3,
301-310, DOI: 10.1080/19466315.2019.1575276
"""
if regression_estimator is None:
self.regression_estimator_xz = LinearRegression()
self.regression_estimator_xy = LinearRegression()
elif isinstance(regression_estimator, Sequence):
if not all(_quacks_like_estimator(r) for r in regression_estimator):
raise ValueError(
"regression_estimator must have a 'fit,' 'predict,' and "
"'get_params' methods. The recommended way to do that is "
"to wrap your regressor in a class that inherits from"
"sklearn.base.RegressorMixin."
)
self.regression_estimator_xz = regression_estimator[0]
self.regression_estimator_xy = regression_estimator[1]
else:
if not _quacks_like_estimator(regression_estimator):
raise ValueError(
"regression_estimator must have a 'fit,' 'predict,' and "
"'get_params' methods. The recommended way to do that is "
"to wrap your regressor in a class that inherits from"
"sklearn.base.RegressorMixin."
)
self.regression_estimator_xz = clone(regression_estimator)
self.regression_estimator_xy = clone(regression_estimator)
if covariance_estimator is None:
covariance_estimator = EmpiricalCovariance(assume_centered=True)
if precision_estimator is None:
precision_estimator = EmpiricalCovariance(assume_centered=True)
if isinstance(covariance_estimator, str):
if covariance_estimator not in COV_ESTIMATORS.keys():
raise ValueError(
f"If covariance_estimator is a string, it must be one of "
f"{COV_ESTIMATORS.keys()}. Got {covariance_estimator} "
f"instead."
)
self.covariance_estimator = clone(COV_ESTIMATORS[covariance_estimator])
else:
self.covariance_estimator = covariance_estimator
if isinstance(precision_estimator, str):
if precision_estimator not in COV_ESTIMATORS.keys():
raise ValueError(
f"If precision_estimator is a string, it must be one of "
f"{COV_ESTIMATORS.keys()}. Got {precision_estimator} "
f"instead."
)
self.precision_estimator = clone(COV_ESTIMATORS[precision_estimator])
else:
self.precision_estimator = precision_estimator
self.residualized = residualized
self.estimate_p_value = estimate_p_value
self.n_shuffle = n_shuffle
self.show_progress = show_progress
def fit(self, Z, Y, X=None, estimate_p_value=False):
"""
Fits a conditional covariance matrix.
Parameters
----------
Z, Y, X : ndarray
Input data matrices. ``Z``, ``Y``, and ``X`` must have the same
number of samples. That is, the shapes must be ``(n, p)``, ``(n, q)``,
and ``(n, r)``, where `n` is the number of samples, `p` and
`q` are the number of dimensions of ``Z`` and ``Y`` respectively.
Returns
-------
self : object
"""
# Step 1: Residualize with regression
# TODO: Check regression type for supporting single or multi-output regression
if not self.residualized:
regfit_xz = self.regression_estimator_xz.fit(X, Z)
regfit_xy = self.regression_estimator_xy.fit(X, Y)
# Compute residualized Zs and Ys.
self.residualized_Z_ = Z - regfit_xz.predict(X)
self.residualized_Y_ = Y - regfit_xy.predict(X)
else:
self.residualized_Z_ = np.copy(Z)
self.residualized_Y_ = np.copy(Y)
if X is not None:
warnings.warn(
"You supplied `X` to the fit method but specified "
"`residualized=True` on init. This method will not use the "
"`X` argument that you provided."
)
# Step 2: Covariance estimation
# Step 2a: Estimate covariance of Y,Z
W = np.concatenate((self.residualized_Y_, self.residualized_Z_), axis=1)
self.covfit_zy_ = self.covariance_estimator.fit(W)
cols_Y = self.residualized_Y_.shape[1]
self.cov_zy_ = self.covfit_zy_.covariance_[:cols_Y, cols_Y:]
# Step 2b: Estimate precision of Z
self.covfit_zz_ = self.precision_estimator.fit(self.residualized_Z_)
self.prec_zz_ = self.covfit_zz_.precision_
# Step 2c: Estimate precision of Y
self.covfit_yy_ = self.precision_estimator.fit(self.residualized_Y_)
self.prec_yy_ = self.covfit_yy_.precision_
# Step 2d: Calculate residual cross-covariance
self.residual_crosscovariance_ = np.diag(
((self.cov_zy_ @ self.prec_zz_) @ self.cov_zy_.T) @ self.prec_yy_
).flatten()
n, k = Z.shape
self.residual_crosscovariance_wherry_corrected_ = 1 - (
1 - self.residual_crosscovariance_
) * ((n - 1) / (n - k - 1))
if self.estimate_p_value:
rcc_shuffle = []
shuffle_cov_est = clone(self.covariance_estimator)
shuffle_prec_est = clone(self.precision_estimator)
if self.show_progress:
shuffle_range = trange(self.n_shuffle)
else:
shuffle_range = range(self.n_shuffle)
for n in shuffle_range:
shuffle_Y = shuffle(self.residualized_Y_)
shuffle_W = np.concatenate((shuffle_Y, self.residualized_Z_), axis=1)
shuffle_covfit_zy_ = shuffle_cov_est.fit(shuffle_W)
shuffle_cov_zy_ = shuffle_covfit_zy_.covariance_[:cols_Y, cols_Y:]
shuffle_prec_yy_ = shuffle_prec_est.fit(shuffle_Y).precision_
rcc_shuffle.append(
np.diag(
((shuffle_cov_zy_ @ self.prec_zz_) @ shuffle_cov_zy_.T)
@ shuffle_prec_yy_
).flatten()
)
self.null_distribution_ = np.array(rcc_shuffle)
self.rcc_p_value_ = (
np.sum(rcc_shuffle >= self.residual_crosscovariance_, axis=0)
/ self.n_shuffle
)
return self
| true
|
d94afd4f8844ccdec796e2100fa1c597d331eb95
|
Python
|
jaqquery/BigSmallDice
|
/BigSmallDice/BigSmallDice.py
|
UTF-8
| 576
| 3.515625
| 4
|
[] |
no_license
|
import os
import sys
import random
import time
print("Big or Small Dice Dame")
print("Press any key to start")
system = False
while system == False:
keyInput = input()
diceA = random.randint(0,6)
diceB = random.randint(0,6)
diceC = random.randint(0,6)
result = diceA + diceB + diceC
time.sleep(3)
if result > 10:
print(result)
print("BIG")
else:
print(result)
print("SMALL")
print("Press any key to continue")
strNext = input()
diceA = diceB = diceC = 0
os.system("cls")
| true
|
41b6fd89e39cf40330f423534f56cceb25af1b68
|
Python
|
VadimVovk/VadimWork
|
/HomeWork6.py
|
UTF-8
| 2,439
| 3.28125
| 3
|
[] |
no_license
|
# my_list = ["ab", "cd", "ef", "gh"]
# result=[]
# for index,item in enumerate(my_list):
# if index%2 == 0:
# result.append(item)
# else:
# result.append(item[::-1])
# print(result)
# #2##########
# my_list = ["aba", "cad", "aef", "gh", "aaa"]
# result=[]
# for str_a in (my_list):
# if str_a[0] == "a":
# result.append(str_a)
# print(result)
# 3#########
# my_list = ["ab", "cad", "aef", "gh", "aaa"]
# result=[]
# for str_1 in (my_list):
# if "a" in str_1:
# result.append(str_1)
# print(result)
# #4###########
# my_list = ["aab", "cad", 222, "gh", "aaa", 555]
# result=[]
# for item in (my_list):
# if type(item) == str:
# result.append(item)
# print(result)
#5#########
my_str=("112334556788")
my_list=[]
my_str_1=(set(my_str))
for symbol in my_str_1:
if my_str.count(symbol)==1:
my_list.append(symbol)
print(my_list)
#6#######
my_str_1=("12345568a")
my_str_2 = ("222344778a")
my_str_3 = my_str_1+my_str_2
my_list_f=[]
my_list=set(my_str_3)
for symbol in my_list:
if symbol in my_str_1 and symbol in my_str_2:
my_list_f.append(symbol)
print(my_list_f)
# #7#########
my_str_1=("12345568")
my_str_2 = ("222344778")
my_str_3 = my_str_1+my_str_2
my_list_f=[]
my_list=list(set(my_str_3))
for symbol in my_list:
if my_str_1.count(symbol)==1 and my_str_2.count(symbol) == 1:
my_list_f.append(symbol)
print(set(my_list_f))
# #8###########
# person = {"Фамилия" : "Вовк",
# "Имя": "Вадим",
# "Возраст": "43",
# "Адрес": {
# "Страна": "Украина",
# "Город": "Днепр",
# "Улица": "Победы"},
# "Работа": {
# "Наличие": "нет",
# "Должность": ""}
# }
# print(person ['Адрес'])
# #9############
# cake = {"Коржи":{
# "Мука": "1000",
# "Молоко": "500",
# "Масло": "200",
# "Яйца": "6"},
# "Крем": {
# "Сахар": "300",
# "Масло": "200",
# "Ваниль": "10",
# "Сметана": "300"},
# "Глазурь":{
# "Какао": "300",
# "Сахар": "100",
# "Масло": "150"}
# }
# print(cake["Глазурь"])
| true
|
16950d2b18262680be0315db1eb10a4f15701158
|
Python
|
TheElk205/RotorTestingBench
|
/python/plotSerialData.py
|
UTF-8
| 1,322
| 3.109375
| 3
|
[] |
no_license
|
import serial
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
import numpy as np
from classes.SerialReader import SerialReader
threads = []
# Create new threads
thread1 = SerialReader(1, "Thread-1", 1)
# Start new Threads
thread1.start()
# Add threads to thread list
threads.append(thread1)
fig = plt.figure()
ax1 = fig.add_subplot(1, 3, 1)
ax2 = fig.add_subplot(1, 3, 2)
ax3 = fig.add_subplot(1, 3, 3)
xReadValues = []
yReadValues = []
yMeanValues = []
valuesRead = 0
valueSum = 0
ser = serial.Serial('/dev/ttyACM0', 9600)
print(ser.name)
def animate(i):
values1 = np.array(thread1.values[1])
values2 = np.array(thread1.values[2])
values3 = np.array(thread1.values[3])
ax1.clear()
ax1.plot(values1[:, 0], values1[:, 1])
ax2.clear()
ax2.plot(values2[:, 0], values2[:, 1])
ax3.clear()
ax3.plot(values3[:, 0], values3[:, 1])
ani = animation.FuncAnimation(fig, animate, interval=100)
plt.show()
for t in threads:
t.exit()
t.join()
print("read " + str(len(t.values[0])) + " values for sensor 0")
print("read " + str(len(t.values[1])) + " values for sensor 1")
print("read " + str(len(t.values[2])) + " values for sensor 2")
print("read " + str(len(t.values[3])) + " values for sensor 3")
print ("Exiting Main Thread")
| true
|
622c960c508043c181d1611e223e29e9965e8970
|
Python
|
geyunxiang/mmdps
|
/mmdps/vis/heatmap.py
|
UTF-8
| 6,742
| 2.921875
| 3
|
[] |
no_license
|
"""
Plot network heatmap.
"""
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.cm
from mmdps.util import path
class HeatmapPlot:
"""The heatmap plot."""
def __init__(self, net, title, outfilepath, valuerange=(-1.0, 1.0)):
"""Init the heatmap.
net, the network.
title, the image titile.
outfilepath, the output image path.
valuerange, the valuerange of the net.
"""
self.net = net
self.atlasobj = self.net.atlasobj
self.count = self.atlasobj.count
self.title = title
self.outfilepath = outfilepath
self.valuerange = valuerange
self.cmap = self.get_cmap()
self.title_font_size = 36
self.show_ticks = True
self.show_colorbar = True
def set_title_font_size(self, font_size):
self.title_font_size = font_size
def set_show_ticks(self, is_showing):
self.show_ticks = is_showing
def set_show_colorbar(self, is_showing):
self.show_colorbar = is_showing
def get_cmap(self):
"""Get default cmap use valuerange.
If all positive, use Greys.
If have negative, use coolwarm.
"""
if self.valuerange[0] >= 0:
return matplotlib.cm.Greys
else:
return matplotlib.cm.coolwarm
def set_cmap(self, cmap):
"""
cmap should be one of matplotlib.cm.xxx
see https://matplotlib.org/gallery/color/colormap_reference.html for a list of cmaps
"""
self.cmap = cmap
def plot(self):
"""Do the plot."""
fig = plt.figure(figsize=(20, 20))
netdata_adjusted = self.atlasobj.adjust_mat(self.net.data)
netdata_adjusted = np.nan_to_num(netdata_adjusted)
axim = plt.imshow(netdata_adjusted, interpolation='none', cmap=self.cmap,
vmin=self.valuerange[0], vmax=self.valuerange[1])
nrow, ncol = netdata_adjusted.shape
ax = fig.gca()
ax.set_xlim(-0.5, ncol-0.5)
ax.set_ylim(nrow-0.5, -0.5)
# set ticks
if self.show_ticks:
ax.set_xticks(range(self.count))
ax.set_xticklabels(self.atlasobj.ticks_adjusted, rotation=90)
ax.set_yticks(range(self.count))
ax.set_yticklabels(self.atlasobj.ticks_adjusted)
else:
ax.set_xticks([])
ax.set_yticks([])
# set colorbar
if self.show_colorbar:
cbar = fig.colorbar(axim, fraction=0.046, pad=0.04)
# change colorbar ticks font size
# see https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.tick_params
# and https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.tick_params.html#matplotlib.axes.Axes.tick_params
cbar.ax.tick_params(labelsize = 25, length = 5, width = 5)
# save fig
plt.title(self.title, fontsize = self.title_font_size)
path.makedirs_file(self.outfilepath)
plt.savefig(self.outfilepath, dpi = 200)
plt.close()
def plotRSN(self):
"""
Similar as plot(), but plot the heatmap according to RSN config file
and use black lines to separate RSNs
"""
fig = plt.figure(figsize=(20, 20))
netdata_adjusted = self.atlasobj.adjust_mat_RSN(self.net.data)
netdata_adjusted = np.nan_to_num(netdata_adjusted)
axim = plt.imshow(netdata_adjusted, interpolation='none', cmap=self.cmap,
vmin=self.valuerange[0], vmax=self.valuerange[1])
nrow, ncol = netdata_adjusted.shape
ax = fig.gca()
ax.set_xlim(-0.5, ncol-0.5)
ax.set_ylim(nrow-0.5, -0.5)
# set ticks
ticks_adjusted, nodeCount = self.atlasobj.adjust_ticks_RSN()
if self.show_ticks:
ax.set_xticks(range(self.count))
ax.set_xticklabels(ticks_adjusted, rotation=90)
ax.set_yticks(range(self.count))
ax.set_yticklabels(ticks_adjusted)
else:
ax.set_xticks([])
ax.set_yticks([])
# plot horizontal and vertical lines
plotIdx = -0.5
ax.vlines(plotIdx, -0.5, self.count, linewidths = 5) # vposition, start, end
ax.hlines(plotIdx, -0.5, self.count, linewidths = 5) # hposition, start, end
border = [0] # the border of each RSN, including 0 and max
for idx in range(len(nodeCount)):
netCount = nodeCount[idx]
plotIdx += netCount
ax.vlines(plotIdx, -0.5, self.count, linewidths = 5) # vposition, start, end
ax.hlines(plotIdx, -0.5, self.count, linewidths = 5) # hposition, start, end
border.append(border[-1] + netCount)
# add ticks for RSN
minorTicks = []
for idx in range(len(border) - 1):
minorTicks.append((border[idx] + border[idx+1])/2.0)
ax.set_xticks(minorTicks, minor=True)
ax.set_yticks(minorTicks, minor=True)
ax.tick_params(which="minor", bottom=False, left=False, pad=35, labelsize = 30) # make minor ticks invisible
ax.set_xticklabels(self.atlasobj.get_RSN_list(), minor = True)
ax.set_yticklabels(self.atlasobj.get_RSN_list(), minor = True)
# set colorbar
if self.show_colorbar:
cbar = fig.colorbar(axim, fraction=0.046, pad=0.04)
# change colorbar ticks font size
# see https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.tick_params
# and https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.tick_params.html#matplotlib.axes.Axes.tick_params
cbar.ax.tick_params(labelsize = 25, length = 5, width = 5)
# save fig
plt.title(self.title, fontsize = self.title_font_size)
path.makedirs_file(self.outfilepath)
plt.savefig(self.outfilepath, dpi=200)
plt.close()
class HeatmapPlotRows:
"""The heatmap rows plot."""
def __init__(self, atlasobj, rowsmat, rowsticks, title, outfilepath, valuerange=(-1.0, 1.0)):
"""Init the heatmap rows.
atlasobj, the atlas object.
rowsmat, the rows matrix, each row acts as an attribute.
rowsticks, the rows ticks, each row's y tick label.
title, the image titile.
outfilepath, the output image path.
valuerange, the valuerange of the net.
"""
self.rowsmat = rowsmat
self.rowsticks = rowsticks
self.atlasobj = atlasobj
self.count = self.atlasobj.count
self.title = title
self.outfilepath = outfilepath
self.valuerange = valuerange
def get_cmap(self):
"""Get the color map."""
if self.valuerange[0] >= 0:
return matplotlib.cm.Greys
else:
return matplotlib.cm.coolwarm
def plot(self):
"""Do the plot."""
fig = plt.figure(figsize=(20, 8))
netdata_adjusted = self.atlasobj.adjust_mat_col(self.rowsmat)
netdata_adjusted = np.nan_to_num(netdata_adjusted)
axim = plt.imshow(netdata_adjusted, interpolation='none', cmap=self.get_cmap(),
vmin=self.valuerange[0], vmax=self.valuerange[1])
nrow = self.rowsmat.shape[0]
_, ncol = netdata_adjusted.shape
ax = fig.gca()
ax.set_xticks(range(self.count))
ax.set_xticklabels(self.atlasobj.ticks_adjusted, rotation=90)
ax.set_yticks(range(nrow))
ax.set_yticklabels(self.rowsticks)
ax.set_xlim(-0.5, ncol-0.5)
ax.set_ylim(nrow-0.5, -0.5)
#fig.colorbar(axim)
plt.title(self.title, fontsize=24)
path.makedirs_file(self.outfilepath)
plt.savefig(self.outfilepath, dpi=100)
plt.close()
def plot_net_RSN(net, title, outfilepath):
plotter = HeatmapPlot(net, title, outfilepath)
plotter.plotRSN()
| true
|
e125753c3ddb12c1a4dcec2277bd0f7837b153d2
|
Python
|
nbro/ands
|
/ands/algorithms/numerical/horner.py
|
UTF-8
| 4,956
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# Meta-info
Author: Nelson Brochado
Created: 30/09/2017
Updated: 30/09/2017
# Description
## Polynomials
The most common way of expressing a polynomial p: R → R of degree at most u is
to use the monomial basis {1, x, x², ..., xᵘ} and to write p as
p(x) = aᵤ * xᵘ + aᵤ₋₁ * xᵘ⁻¹ + ... + a₁ * x + a₀ = ∑ᵢ₌₀ᶦ⁼ᵘ aᵢ * xᶦ
with coefficients a₀, a₁, ..., aᵤ₋₁, aᵤ ∈ R. Using this representation, one can
show that:
p'(x) = u * aᵤ * xᵘ⁻¹ + (u − 1) * aᵤ₋₁ * xᵘ⁻² + ... + 2 * a₂ * x + a₁ =
= ∑ᵢ₌₀ᶦ⁼ᵘ⁻¹ (i + 1) * aᵢ₊₁ * xᶦ,
and
p⁽ᶦ⁾(0) = i! * aᵢ, for i = 0, ..., u,
and
p⁽ᵘ⁺¹⁾(x) = 0.
## Horner's method to compute polynomials
Horner's method (a.k.a. Horner scheme or Horner's rule) is an algorithm for
calculating polynomials. It consists of transforming the monomial form of p into
a computationally efficient form.
Suppose we want to evaluate the polynomial p at a specific value of x, say x₀.
We now transform the monomial (usual) form of p into an equivalent form, which
allows us to efficiently evaluate p at x₀:
p(x) = aᵤ * xᵘ + aᵤ₋₁ * xᵘ⁻¹ + ... + a₁ * x + a₀ <=>
p(x) = (aᵤ * xᵘ⁻¹ + aᵤ₋₁ * xᵘ⁻² + ... + a₁) * x + a₀ <=>
p(x) = ((aᵤ * xᵘ⁻² + aᵤ₋₁ * xᵘ⁻³ + ... + a₂) * x + a₁) * x + a₀ <=>
If we continue this process, we end up with the following formula:
p(x) = (((aᵤ₋₁ + aᵤ * x) * x + ... + a₂) * x + a₁) * x + a₀
We now calculate p at x₀ by replacing x with x₀ in the general form
p(x₀) = (((aᵤ₋₁ + aᵤ * x₀) * x₀ + ... + a₂) * x₀ + a₁) * x₀ + a₀
### Why would this allow us to evaluate p at x₀ efficiently?
If, for simplicity, we perform the following changes of variables
bᵤ := aᵤ
bᵤ₋₁ := aᵤ₋₁ + bᵤ * x₀
.
.
.
b₀ := a₀ + b₁ * x₀
And replace these new variables (or alias) in the evaluation of p at x₀, that is
p(x₀) = (((aᵤ₋₁ + bᵤ * x₀) * x₀ + ... + a₂) * x₀ + a₁) * x₀ + a₀ <=>
p(x₀) = (((bᵤ₋₁) * x₀ + ... + a₂) * x₀ + a₁) * x₀ + a₀ <=>
.
.
.
p(x₀) = a₀ + b₁ * x₀ <=>
p(x₀) = b₀
We see that we end up, at the end, to discover that the result of p(x₀) is b₀.
### How many changes of variables do we perform?
This can easily be seen from the subscripts of the variables b. We have u
changes of variables, where u is the original degree of the polynomial p.
### In each change of variable, how many additions and multiplications do we
perform?
Excluding bᵤ := aᵤ, which we assume to be a constant-time operation, all other u
changes of variables perform one addition and one multiplication.
### How many operations have we performed in total?
So, we have u additions and u multiplications, plus a constant-time operation.
### Notes
- When evaluating p(x₀) with the changes of variables, we are only performing
the operations in the changes of variables.
### Optimality of Horner's method
Horner's method is optimal, in the sense that any algorithm to evaluate an
arbitrary polynomial must use at least as many operations.
## Computing polynomials by implementing Horner's method
p(x₀), a u-degree polynomial, can computed efficiently using Horner's scheme, in
O(u) operations, as follows
function HORNER({a₀, a₁, ..., aᵤ₋₁, aᵤ}, x₀):
p := aᵤ
for i from u − 1 to 0 by −1 do:
p := p * x₀ + aᵢ
return p
From the previous pseudo-code, we can easily see that this is a O(u) algorithm,
since we have u iterations of the for loop and provided that multiplications and
additions can be performed in O(1), w.r.t. u.
### Notes
- HORNER basically implements the changes of variables explained above.
# TODO
- Add example of how Horner's method works in practice.
- Implement the slightly optimized version using explicit fused
Multiply–accumulate operation.
# References
- Dr. prof. Kai Hormann's notes for the Numerical Algorithms course, fall, 2017.
- https://en.wikipedia.org/wiki/Horner%27s_method
"""
__all__ = ["horner"]
def horner(x0: float, coefficients: list) -> float:
"""A function that implements the Horner's method for evaluating a
polynomial, with coefficients, at x = x0.
Time complexity: O(n), where n = len(coefficients)."""
assert isinstance(coefficients, list)
assert all(isinstance(x, float) or isinstance(x, int) for x in coefficients)
assert isinstance(x0, float) or isinstance(x0, int)
p = 0
for c in reversed(coefficients):
p = p * x0 + c
return p
| true
|
e07f231fc81a6f7e92eb0f2192ce80cff0bbdb5c
|
Python
|
acganesh/euler
|
/545/545.py
|
UTF-8
| 2,889
| 3.5625
| 4
|
[] |
no_license
|
from Euler import prime_sieve, factor
import itertools
from bisect import bisect
import random
def prime_sieve(l):
s = [True] * (l + 1)
s[0:2] = [False, False]
for x in xrange(2, l):
if s[x]:
s[x ** 2::x] = [False] * ((l - x ** 2) / x + 1)
primes = [x for x in xrange(lim) if s[x]]
return s, primes
def miller_rabin(n):
"""
Check n for primality: Example:
>miller_rabin(162259276829213363391578010288127) #Mersenne prime #11
True
Algorithm & Python source:
http://en.literateprograms.org/Miller-Rabin_primality_test_(Python)
"""
d = n - 1
s = 0
while d % 2 == 0:
d >>= 1
s += 1
for repeat in range(20):
a = 0
while a == 0:
a = random.randrange(n)
if not miller_rabin_pass(a, s, d, n):
return False
return True
def miller_rabin_pass(a, s, d, n):
a_to_power = pow(a, d, n)
if a_to_power == 1:
return True
for i in range(s-1):
if a_to_power == n - 1:
return True
a_to_power = (a_to_power * a_to_power) % n
return a_to_power == n - 1
lim = 10**7
sieve, primes = prime_sieve(lim)
print 'done sieving'
# From http://stackoverflow.com/a/171784
def get_divisors(n):
factors = factor(n)
nfactors = len(factors)
f = [0] * nfactors
while True:
yield reduce(lambda x, y: x*y, [factors[x][0]**f[x] for x in range(nfactors)], 1)
i = 0
while True:
f[i] += 1
if f[i] <= factors[i][1]:
break
f[i] = 0
i += 1
if i >= nfactors:
return
def D(k):
target = 20010
prod = 1
divisors = get_divisors(k)
for d in divisors:
if is_prime(d+1):
prod *= (d+1)
if prod > target:
break
return prod
def is_prime(n):
if n < lim:
return sieve[n]
return miller_rabin(n)
# Test if n satisfies D(n) = 20010
def is_valid(n):
return D(n) == 20010
#@profile
def F(m):
# For composite multiples m, pre_check m
# to see that m is a product of primes that are valid
def pre_check(n):
if n == 1 or is_prime(n):
return True
divisors = get_divisors(n)
for f in divisors:
if not f in valid and f < n:
return False
return True
count = 0
base = 308
factor = 1
valid = set([])
while count < m:
num = base*factor
if pre_check(factor):
if is_valid(num):
valid.add(factor)
# Progress:
if count % 1000 == 0: print count
count += 1
factor += 1
return num
def tests():
assert D(4) == 30
assert D(308) == 20010
assert F(1) == 308
assert F(10) == 96404
tests()
print F(100000)
| true
|
bf4698b4f2772427eee6b02d8c3fe6fa52b06f4a
|
Python
|
quekyufei/lottery-env
|
/environment/plotpoint.py
|
UTF-8
| 418
| 2.640625
| 3
|
[] |
no_license
|
from .constants import LOTTERY_RESULTS
class PlotPoint():
def __init__(self, winnings, tier_idx, bet, won, game_step):
self.winnings = winnings
self.tier = LOTTERY_RESULTS[tier_idx] # loss, small win, med win, large win, jackpot
self.bet = bet
self.won = won
self.game_step = game_step
@classmethod
def beginning(cls):
return cls(None, 0, None, None, 0)
| true
|
c2d8191dae4b13a50ee6ee3854b87f3d83f2f09c
|
Python
|
kate-gordon/python_GameofThrones
|
/game_of_thrones_starter/got_demo.py
|
UTF-8
| 1,920
| 3.8125
| 4
|
[] |
no_license
|
from pprint import pprint
from characters import characters
from houses import houses
# ## Characters with names starting with "A "
# namesA = 0
# for character in characters:
# if character['name'][0] == 'A':
# namesA += 1
# print(namesA)
# ## Characters with names starting with "Z"
# namesZ = 0
# for character in characters:
# if character['name'].startswith('Z') == True:
# namesZ += 1
# print(namesZ)
# ## Number of characters who died
# index = 0
# for character in characters:
# if character['died'] != '':
# index +=1
# print(index)
# ## Character with the most titles
# amtofTitles = 0
# charName = ''
# for character in characters:
# if len(character['titles']) > amtofTitles:
# amtofTitles = len(character['titles'])
# charName = character['name']
# print(amtofTitles)
# print(charName)
# ## Number of Valyrian characters
# numVal = 0
# for character in characters:
# if character['culture'] == 'Valyrian':
# numVal += 1
# print(numVal)
# ## Name of actor who plays Hot Pie
# for character in characters:
# if character['aliases'] == 'Hot Pie' or character['name'] == 'Hot Pie':
# print(character['playedBy'])
# ## Number of characters from the books who are in the series
# inSeries = 0
# for character in characters:
# if character['tvSeries'][0] != '':
# inSeries += 1
# print(inSeries)
# ## List of Targaryens
# for character in characters:
# if 'Targaryen' in character['name']:
# print(character['name'])
## Houses & Number of Allegiances to each
members_by_house = {}
for character in characters:
for url in character['allegiances']:
houseName = houses[url]
if houseName in members_by_house:
members_by_house[houseName] += 1
else:
members_by_house[houseName] = 1
pprint(members_by_house)
| true
|
216c564a66269f26a7014c1159cbadc83636d39d
|
Python
|
DeshErBojhaa/sports_programming
|
/leetcode/833. Find And Replace in String.py
|
UTF-8
| 680
| 3.25
| 3
|
[] |
no_license
|
# 833. Find And Replace in String
class Solution:
def findReplaceString(self, S: str, indexes: List[int], sources: List[str], targets: List[str]) -> str:
ans, instructions = [], {}
for i, s, r in zip(indexes, sources, targets):
instructions[i] = (s, r, len(s))
i = 0
while i < len(S):
if i not in instructions:
ans.append(S[i])
elif instructions[i][0] == S[i:i+instructions[i][2]]:
ans.append(instructions[i][1])
i = i + instructions[i][2] - 1
else:
ans.append(S[i])
i += 1
return ''.join(ans)
| true
|
2893a34288d5a4bca591e9980da36b05c9c69831
|
Python
|
hjazcarate/empleado
|
/applications/departamento/models.py
|
UTF-8
| 740
| 2.53125
| 3
|
[] |
no_license
|
from django.db import models
# Create your models here, blank_True -> el campo permite espacios o null=True
# str(self.id) el id es entero str permite un string
# editable=False -> bloquea el uso de ese campo
class Departamento(models.Model):
name = models.CharField('Nombre', max_length=50, blank=True, null=True)
shor_name = models.CharField('Nombre Corto', max_length=20, unique=True)
anulate = models.BooleanField('Anulado', default=False)
class Meta:
verbose_name = 'Mi Departamento'
verbose_name_plural = 'Areas de la empresa'
ordering = ['-name']
unique_together = ('name', 'shor_name')
def __str__(self):
return str(self.id) + ' ' + self.name + '-' + self.shor_name
| true
|
34d0ea514b0b34f05f8fd7a5ff5c13b13f2e25bb
|
Python
|
sittinginmiami/practice-projects
|
/Quadraticpolynomialssumofdigitstothe4thpower.py
|
UTF-8
| 456
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
# mensa bulletin Aug 2021 quadratic polynomials
#
# this program will find the three 4-digit numbers that are the sum of their digits to the 4th power
#
# no import math
for i in range(999, 9999): # brute force check each 4 digit number to see whether it meets criteria
first = i % 10
second = (i // 10) % 10
third = (i // 100) % 10
fourth = (i // 1000) % 10
if i == first**4 + second**4 + third**4 + fourth**4:
print(i)
| true
|
afd761d234ae6fbb2d1f37650c8645f0e630e2ff
|
Python
|
yycho0108/MobileNet
|
/voc_utils.py
|
UTF-8
| 4,776
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
import pandas as pd
import os
from bs4 import BeautifulSoup
from more_itertools import unique_everseen
import numpy as np
import matplotlib.pyplot as plt
import skimage
from skimage import io
root_dir = os.environ["VOC_ROOT"]
img_dir = os.path.join(root_dir, 'JPEGImages/')
ann_dir = os.path.join(root_dir, 'Annotations')
set_dir = os.path.join(root_dir, 'ImageSets', 'Main')
def ann2bbox(ann, categories):
width = int(ann.findChild('width').contents[0])
height = int(ann.findChild('height').contents[0])
objs = ann.findAll('object')
bbox = []
labels = []
for obj in objs:
label = categories.index(obj.findChild('name').contents[0])
labels.append(label)
box = obj.findChild('bndbox')
y_min = float(box.findChild('ymin').contents[0]) / height
x_min = float(box.findChild('xmin').contents[0]) / width
y_max = float(box.findChild('ymax').contents[0]) / height
x_max = float(box.findChild('xmax').contents[0]) / width
bbox.append([y_min,x_min,y_max,x_max])
return np.asarray(bbox, dtype=np.float32), np.asarray(labels, dtype=np.int32)
class VOCLoader(object):
def __init__(self, root_dir):
self.root_dir = root_dir
self.img_dir = os.path.join(root_dir, 'JPEGImages/')
self.ann_dir = os.path.join(root_dir, 'Annotations/')
self.set_dir = os.path.join(root_dir, 'ImageSets', 'Main')
def list_image_sets(self):
return [
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train',
'tvmonitor']
def list_types(self):
return ['train', 'val', 'trainval', 'test']
def imgs_from_category(self, cat_name, dataset, as_list=False):
filename = os.path.join(self.set_dir, cat_name + "_" + dataset + ".txt")
df = pd.read_csv(
filename,
delim_whitespace=True,
header=None,
names=['filename', 'true'])
if(as_list):
df = df[df['true'] == 1]
return df['filename'].values
return df
def annotation_file_from_img(self, img_name):
return os.path.join(self.ann_dir, img_name) + '.xml'
def img_from_annotation(self, annot):
img_file = annot.findChild('filename').contents[0]
return os.path.join(self.img_dir, img_file)
def grab(self, basename):
img_name = os.path.join(self.img_dir, basename + '.jpg')
xml = ""
with open(os.path.join(self.ann_dir, basename + '.xml')) as f:
xml = f.readlines()
xml = ''.join([line.strip('\t') for line in xml])
ann = BeautifulSoup(xml)
box, lbl = ann2bbox(ann, self.list_image_sets())
return img_name, box, lbl
def load_annotation(self, img_filename):
"""
Load annotation file for a given image.
Args:
img_name (string): string of the image name, relative to
the image directory.
Returns:
BeautifulSoup structure: the annotation labels loaded as a
BeautifulSoup data structure
"""
xml = ""
with open(self.annotation_file_from_img(img_filename)) as f:
xml = f.readlines()
xml = ''.join([line.strip('\t') for line in xml])
return BeautifulSoup(xml)
def load_img(self, img_filename, path_only=False):
"""
Load image from the filename. Default is to load in color if
possible.
Args:
img_name (string): string of the image name, relative to
the image directory.
Returns:
np array of float32: an image as a numpy array of float32
"""
img_filename = os.path.join(self.img_dir, img_filename + '.jpg')
if path_only:
return img_filename
img = skimage.img_as_float(io.imread(
img_filename)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def list_all(self):
## CUSTOM FUNCTION -- DO NOT USE
with open(os.path.join(self.root_dir, 'list.txt')) as f:
return [l.strip() for l in f.readlines()]
def annotations(self):
for fn in os.listdir(self.ann_dir):
filepath = os.path.join(self.ann_dir, fn)
if os.path.isfile(filepath):
xml = ""
with open(filepath) as f:
xml = f.readlines()
xml = ''.join([line.strip('\t') for line in xml])
yield BeautifulSoup(xml)
| true
|
ec3263487861ff9805d665972532227326a2791c
|
Python
|
marcial2020/python_1
|
/tuples.py
|
UTF-8
| 145
| 3.328125
| 3
|
[] |
no_license
|
# tuples can not be changed or modified so it's immutable
coordinates = (4, 5)
# coordinates[1] = 10 will send an error
print(coordinates[0])
| true
|
50bda7d89d046f58e4e3e893a362e174dbd7f403
|
Python
|
TrellixVulnTeam/allPythonPractice_R8XZ
|
/2019/05/0520多进程服务器/05-单进程非阻塞多客端server.py
|
UTF-8
| 744
| 3.140625
| 3
|
[] |
no_license
|
tcp_server_socket = socket(.....)
tcp_server_socket.setblocking(False) # 设置套接字为非阻塞的方式
client_socket_list = list()
while True:
try:
new_socket, new_addr = tcp_server_socket.accept()
except Exception as ret:
print('----没有新客户端到来----')
else:
print('----只要没有产生异常,那么就表示来了一个新客户端')
new_socket.serblocking(False)
client_socket_list.append(new_socket)
for client_socket in client_socket_list:
try:
client_socket.recv()
except Exception as ret:
print('----这个客户端没有发送数据----')
else:
print('-----客户端发送过来新数据----')
| true
|
3c9bab4ffcd9224fed8921e9a1a3c62452490dea
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_2/354.py
|
UTF-8
| 2,148
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/python
import sys
def min_from_hm(hm):
h, m = hm.split(':')
return int(h) * 60 + int(m)
def first(arr):
if len(arr):
return arr[0]
else:
return 99999;
def calc_requirements(in_a, in_b, out_a, out_b):
req_a = 0
req_b = 0
cur_a = 0
cur_b = 0
while len(in_a) + len(in_b) + len(out_a) + len(out_b) != 0:
min_time = min(first(in_a), first(in_b), first(out_a), first(out_b))
if min_time in in_a:
in_a = in_a[1:]
cur_a += 1
elif min_time in in_b:
in_b = in_b[1:]
cur_b +=1
elif min_time in out_a:
if cur_a == 0:
req_a += 1
else:
cur_a -= 1
out_a = out_a[1:]
elif min_time in out_b:
if cur_b == 0:
req_b += 1
else:
cur_b -= 1
out_b = out_b[1:]
return req_a, req_b
if __name__ == '__main__':
f = open(sys.argv[1])
n_cases = int(f.readline())
for case in range(n_cases):
turnaround = int(f.readline())
line = f.readline().split(' ')
n_a, n_b = line
n_a = int(n_a)
n_b = int(n_b)
sched_a = {}
sched_b = {}
in_a = []
in_b = []
out_a = []
out_b = []
for j in range(n_a):
line = f.readline().split(' ')
time_from, time_to = line
sched_a[min_from_hm(time_from)] = min_from_hm(time_to)
out_a.append(min_from_hm(time_from))
in_b.append(min_from_hm(time_to) + turnaround)
for j in range(n_b):
time_from, time_to = [min_from_hm(hm) for hm in f.readline().split(' ')]
sched_b[time_from] = time_to
out_b.append(time_from)
in_a.append(time_to+ turnaround)
in_a.sort()
in_b.sort()
out_a.sort()
out_b.sort()
#print in_a, in_b
#print out_a, out_b
req_a, req_b = calc_requirements(in_a, in_b, out_a, out_b)
print 'Case #%d: %d %d' % (case + 1, req_a, req_b)
| true
|
c26e4d8e0aee614c0c3a8a53aa7261ae932291b7
|
Python
|
pauljxtan/imgtag
|
/imgtag/state.py
|
UTF-8
| 796
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
"""Provides a class for storing and passing around globally shared state.
There should ideally be as little in this module as possible.
"""
from PySide2.QtCore import QStringListModel
from PySide2.QtWidgets import QCompleter
from .data import get_all_tags
class GlobalState(object):
"""Stores all global state not handled by Qt."""
# TODO: Refactor settings into here
def __init__(self):
self._tag_completer = QCompleter()
# Sort by descending file count
tagnames = [tag[0] for tag in sorted(get_all_tags(), key=lambda t: -t[1])]
self._tag_completer.setModel(QStringListModel(tagnames))
@property
def tag_completer(self) -> QCompleter:
"""A dropdown completer used for any tag entry widget."""
return self._tag_completer
| true
|
def8acc5723f20722daa7bd54544aa800aa1b111
|
Python
|
yangnaGitHub/LearningProcess
|
/python/pop3.py
|
UTF-8
| 463
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Python内置一个poplib模块,实现了POP3协议,可以直接用来收邮件
#POP3协议收取的不是一个已经可以阅读的邮件本身,而是邮件的原始文本
#第一步:用poplib把邮件的原始文本下载到本地
#第二部:用email解析原始文本,还原为邮件对象
import poplib
email = input("Email: ")
password = input("Password: ")
pop3_server = input("POP3 server: ")
| true
|
05972f0cd3102fcda53c5b65c2db9fa2e84bcb4f
|
Python
|
magiob/drln_tennis
|
/MaDDPG.py
|
UTF-8
| 4,444
| 2.984375
| 3
|
[] |
no_license
|
import numpy as np
import random
import copy
from collections import namedtuple, deque
from model import Actor, Critic
from DDPG_agent import Agent
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 512 # minibatch size
UPDATE_EVERY = 10 # how often to update the network
GAMMA = 0.99 # discount factor
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class MaDDPGAgent():
"""Multi-agent interacts with and learns from the environment."""
def __init__(self, state_size, action_size, warmup, random_seed, nb_agents):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
nb_agents (int): number of agents
warmup (int): number of iterations to warm-up before using policy
random_seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
self.nb_agents = nb_agents
self.warmup = warmup
self.agents = [Agent(state_size=self.state_size, action_size=self.action_size, warmup=self.warmup, random_seed=i) for i in range(self.nb_agents)]
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def reset(self):
"""Reset every agent noise."""
for agent in self.agents:
agent.reset()
def step(self, states, actions, rewards, next_states, dones):
"""Add experience to shared ReplayBuffer and take step for each agent"""
for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# Learn, if enough samples are available in memory
if len(self.memory) > BATCH_SIZE:
for agent in self.agents:
experiences = self.memory.sample()
agent.learn(experiences, GAMMA)
def act(self, states, timestep, add_noise=True):
"""Act for each agent"""
return [agent.act(np.expand_dims(state, axis=0), timestep) for agent, state in zip(self.agents, states)]
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
| true
|
7cfe15f8aea63fde9abe3ea85d5bd940722f0d56
|
Python
|
RAVIKANT431/dummy.project
|
/ravikant.py
|
UTF-8
| 379
| 3.875
| 4
|
[] |
no_license
|
num1= input("enter first value:" )
num2= input("enter second value:" )
num3= input("enter third value:" )
num1=float(num1)
num2=float(num2)
num3=float(num3)
def max_num(num1,num2,num3):
if num1>=num2 and num1>=num3:
return num1
elif num2>=num1 and num2>=num3:
return num2
else:
return num3
print(max_num(num1,num2,num3))
| true
|
cc20de0fd27f9bcf8232eff2b5a26de830a2f670
|
Python
|
statistics-exercises/hypothesis-testing-13
|
/test_main.py
|
UTF-8
| 453
| 2.859375
| 3
|
[] |
no_license
|
import unittest
from main import *
class UnitTests(unittest.TestCase) :
def test_statPower(self) :
psi4 = scipy.stats.norm.ppf(0.05)
mdiff = 20 - sample
for i in range(10) :
xv = mdiff / ( 2 / np.sqrt(i+1) ) + psi4
myval = scipy.stats.norm.cdf(xv)
self.assertTrue( np.abs(statisticalPower(20, 2, sample, i+1)-myval)<1e-7, "Your statistical power function is not working" )
| true
|
dbdeee88b347899da91128207330bc4b3af2f893
|
Python
|
eliben/code-for-blog
|
/2016/readline-samples/python/readline-complete-simple.py
|
UTF-8
| 1,073
| 3.296875
| 3
|
[
"Unlicense",
"LicenseRef-scancode-public-domain"
] |
permissive
|
# Simple completion with the readline module.
#
# Tested with Python 3.4
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import readline
def make_completer(vocabulary):
def custom_complete(text, state):
# None is returned for the end of the completion session.
# A space is added to the completion since the Python readline doesn't
# do this on its own. When a word is fully completed we want to mimic
# the default readline library behavior of adding a space after it.
results = [x + ' ' for x in vocabulary if x.startswith(text)] + [None]
return results[state]
return custom_complete
def main():
vocabulary = {'cat', 'dog', 'canary', 'cow', 'hamster'}
readline.parse_and_bind('tab: complete')
readline.set_completer(make_completer(vocabulary))
try:
while True:
s = input('>> ').strip()
print('[{0}]'.format(s))
except (EOFError, KeyboardInterrupt) as e:
print('\nShutting down...')
if __name__ == '__main__':
main()
| true
|
6765b089aee26e37c9c4f2a4f3636cce9f2b8f19
|
Python
|
kimmj8205/Python
|
/Study/countdown.py
|
UTF-8
| 157
| 3.34375
| 3
|
[] |
no_license
|
import time
def countdown(n):
while n>0:
print(n)
time.sleep(0.3)
n=n-1
print("Go !")
countdown(int(input("Insert sec. :")))
| true
|
bdac38b8a12d14f54d3de45712f6e98aeb5a7502
|
Python
|
roarkemc/StatTools
|
/stattools/optimization/base.py
|
UTF-8
| 468
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
"""Defines the Optimizer abstract base class."""
import abc
class Optimizer(metaclass=abc.ABCMeta):
"""Abstract base class for function optimization.
Subclasses should have an `__init__` method which sets the optimzation
algorithm parameters and a `optimize` method that accepts an objective
function, an initial optimizer guess, and other optional parameters.
"""
@abc.abstractmethod
def optimize(self, *args, **kwargs):
pass
| true
|
b1408635522f1a4b6873c393f66fc73778f3bbaf
|
Python
|
MrKolbaskin/insurance_company
|
/interface/layouts/layout_main.py
|
UTF-8
| 2,816
| 2.515625
| 3
|
[] |
no_license
|
import PySimpleGUI as sg
from interface.contracts import contracts
COMPANY_INFO = '-COMPANY_INFO-'
LOGS = '-LOGS-'
CONTRACTS_INFO = '-CONTRACTS_INFO-'
CONTRACTS = '-CONTRACTS-'
CURRENT_DEMAND = '-CURRENT_DEMAND-'
buttons = [
[
sg.Button('Следующий месяц', button_color=('black', 'green'), size=(16, 1), font=('default', 13)),
sg.Button('Симуляция до конца', size=(16, 1), font=('default', 13), button_color=('black', 'orange'))
],
[
sg.Button('Начать заново', button_color=('black', 'yellow'), size=(16, 1), font=('default', 13)),
sg.Button('Изменить условия', size=(16, 1), button_color=('black', 'blue'), font=('default', 13))
]
]
headers_contracts = ['Тип контракта', "Продолжительность контракта", "Максимальная сумма возмещенения", "Размер взноса", "Коэф-ты повреждения по страховым случаям"]
headers_events = ['Коэфиц-т повреждения']
#headers_actions = ['Последние действия']
headers_cond_contracts = ['Тип контракта', "Продолж-ть контракта", "Макс. сумма возмещенения", "Размер взноса"]
layout_main = [
[
sg.Column(
[
[sg.Text('Условия контрактов', text_color='white', font=('default', 20))],
[sg.Table(contracts(), headings=headers_cond_contracts, font=('default', 15), size=(None, 5), max_col_width=15, justification='center', key=CONTRACTS_INFO, hide_vertical_scroll=True)]
]
),
sg.Column(
[
[sg.Text('Последние действия', text_color='white', font=('default', 20))],
[sg.Table([[""]], font=('default', 15), size=(20, 5), auto_size_columns=False, def_col_width=52, max_col_width=30, justification='center', key=LOGS)]
]
)
],
[
sg.Text('', text_color='white', size=(37, 6), font=('default', 15), justification='left', key=COMPANY_INFO),
sg.Text('', text_color='white', size=(30, 6), font=('default', 15), justification='left', key=CURRENT_DEMAND),
sg.Column(buttons, pad=((75, 0), None)),
sg.Column([[sg.Button('Выход', size=(16, 1), button_color=('black', 'red'), font=('default', 13))]])
],
[
sg.Column(
[
[sg.Text('Действующие контракты компании', text_color='white', font=('default', 20))],
[sg.Table([["", "", "", "", ""]], headings=headers_contracts, font=('default', 15), max_col_width=15, justification='center', key=CONTRACTS)]
]
)
]
]
| true
|
a97dfb679136198d4895074771e8d04fa9f3edbc
|
Python
|
rosariomgomez/udacity_prog_foundations
|
/programming_foundations/lesson1/take_a_break.py
|
UTF-8
| 254
| 3.265625
| 3
|
[] |
no_license
|
import time
import webbrowser
num_breaks = 1
total_breaks = 3
print("This program started on "+ time.ctime())
while num_breaks <= total_breaks:
time.sleep(10)
webbrowser.open('http://www.youtube.com/watch?v=dQw4w9WgXcQ')
num_breaks = num_breaks + 1
| true
|
c3f6ec6d42a0654aed417045691636bc1647416c
|
Python
|
HuDunYu/031902106
|
/test.py
|
UTF-8
| 836
| 3.046875
| 3
|
[] |
no_license
|
import unittest
from function import edit_text, count_keyword, count_switch, count_if_else
with open("c.txt") as file_object:
read_lines = file_object.readlines()
lines = edit_text(read_lines)
class MyTestCase(unittest.TestCase):
def test_something1(self):
total_num = count_keyword(lines)
self.assertEqual(total_num, 35) # add assertion here
def test_something2(self):
switch_num, case_num = count_switch(lines)
self.assertEqual(switch_num, 2) # add assertion here
self.assertEqual(case_num, [3, 2])
def test_something3(self):
if_else_num, if_elseif_else_num = count_if_else(lines)
self.assertEqual(if_else_num, 2) # add assertion here
self.assertEqual(if_elseif_else_num, 2) # add assertion here
if __name__ == '__main__':
unittest.main()
| true
|
b940173b53d8ba8585989fc7b5d9409018c49464
|
Python
|
NeonNihon/Pynet
|
/Class1/exercise7.py
|
UTF-8
| 449
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
import yaml
import json
def open_file(f):
with open(f, 'r') as e:
if 'yml' in f:
return yaml.load(e)
if 'json' in f:
return json.load(e)
def print_list(lst):
for word in lst:
print(word)
new_yaml = open_file('exercise6.yml')
new_json = open_file('exercise6.json')
print("YAML")
print("=" * 8)
print_list(new_yaml)
print("JSON")
print("=" * 8)
print_list(new_json)
| true
|
b8bc78cf1050dda74d33c63d771fb8cf6eeac6f8
|
Python
|
AnshChoudhary/ZIRA---The-Virual-Assistant
|
/lyrics finder.py
|
UTF-8
| 231
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
import webbrowser
a = input("Search for lyrics: ")
L = list(a)
i = 0
while i < len(L):
if L[i] == ' ':
L[i] = '%20'
i+= 1
searchTerm = ''.join(L)
webbrowser.open("https://genius.com/search?q="+searchTerm)
| true
|
fe9bf09fd3ea7d48fd662bae9eba7fa7db8c1817
|
Python
|
Brewgarten/c4-utils
|
/c4/utils/command.py
|
UTF-8
| 6,323
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
"""
Copyright (c) IBM 2015-2017. All Rights Reserved.
Project name: c4-utils
This project is licensed under the MIT License, see LICENSE
This library contains methods for executing commands, capturing their output
and raising exceptions accordingly.
Functionality
-------------
"""
import logging
import os
import shlex
import subprocess
import multiprocessing
import threading
from pwd import getpwuid
from os import geteuid
log = logging.getLogger(__name__)
class CommandException(Exception):
"""
The exception being thrown in case of an error
:param command: the command array
:type command: [str]
:param returnCode: return code
:type returnCode: int
:param output: output
:type output: str
:param error: error output
:type error: str
:param message: message
:type message: str
"""
def __init__(self, command, returnCode, output=None, error=None, message=None):
self.command = command
self.returnCode = returnCode
self.output = output
self.error = error
self.message = message
def __str__(self):
string = "Command '%s' returned non-zero exit status %d" % (" ".join(self.command), self.returnCode)
if self.message:
string = "%s: %s" % (self.message, string)
if self.output:
string = "%s\n%s" % (string, self.output)
if self.error:
string = "%s\n%s" % (string, self.error)
return string
def execute(command, errorMessage=None, finallyClause=None, user=None):
"""
Execute command, e.g.:
.. code-block:: python
execute(["/usr/lpp/mmfs/bin/mmstartup", "-a"], "Could not startup GPFS")
:param command: the command array
:type command: [str]
:param errorMessage: the message to be displayed in case of an error
:type errorMessage: str
:param finallyClause: the function executed in the finally clause in case of an error
:type finallyClause: func
:returns: output
:raises: :class:`CommandException`
"""
try:
if user and getpwuid(geteuid()).pw_name == user:
user = None
if user:
log.debug("Executing: %s as %s" % (" ".join(command), user))
command = ["/usr/bin/sudo", "-u", user] + command
else:
log.debug("Executing: %s" % " ".join(command))
# kick off process
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# retrieve output and errors
output, error = process.communicate()
output = output.rstrip()
error = error.rstrip()
# check return code
returnCode = process.poll()
if returnCode:
raise CommandException(command, returnCode, output, error, errorMessage)
if output:
log.debug(output)
return output
except OSError as e:
raise CommandException(command, -1, error=str(e), message=errorMessage)
except Exception as e:
raise e
finally:
if finallyClause:
finallyClause()
def run(command, workingDirectory=None):
"""
Run command using the current or specified working directory
:param command: command
:type command: str
:param workingDirectory: working directory
:type str
:returns: tuple of stdout, stderr and return code
:rtype: (stdout, stderr, status)
"""
if not workingDirectory:
workingDirectory = os.getcwd()
if not os.path.exists(workingDirectory):
return "", "Path '{path}' does not exist".format(path=workingDirectory), 1
log.debug("Running '%s' on '%s'", command, workingDirectory)
process = subprocess.Popen(
[part for part in shlex.split(command)],
cwd=workingDirectory,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# retrieve output and errors
stdout, stderr = process.communicate()
stdout = stdout.rstrip()
stderr = stderr.rstrip()
# check return code
status = process.poll()
return stdout, stderr, status
def executeNforget(command, process_level=2):
"""
Execute a command as a detached process not caring about return status.
Don't leave zombies behind
:param command: The command array or the command string. If string then shell is assumed to be true
:type command: [str] or str
:param process_level: functions internal recursion related value. Do not change!
:type process_level: int
"""
log.debug("Executing: '%s', process_level: %d", " ".join(command), process_level)
if process_level == 0:
if type(command) != list:
command=command.split()
try:
# execute(command) # Fails to "forget" because opens stdin
proc = subprocess.Popen(command, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
log.debug("returncode = %d , command = '%s'", proc.returncode, " ".join(command))
except Exception, e:
log.error('executeNforget exception: %s', e)
else:
p = multiprocessing.Process(target=executeNforget,
args=(command,),
kwargs={'process_level':(process_level-1)})
p.start()
if process_level == 2:
p.join()
def executeRemotely_callbackExecutor(ssh, cmd, callback):
output = execute(ssh + cmd)
return callback(output)
def executeRemotely(cmd, host, user='root', callback=None):
"""
Execute command on a remote machine/node via SSH connection.
This is an fire and forget implementation - no status is returned.
:param cmd: Array of strings defining command and its arguments
:type cmd: string
:param host: Target host to execute command on
:type host: string
:param user: User used for SSH authorization on remote host (default: apuser)
:type user: string
"""
ssh = ['/usr/bin/ssh',
'-o', 'PasswordAuthentication=no',# '-o', 'StrictHostKeyChecking=no',
'-l', user, host]
if not hasattr(callback, '__call__'):
executeNforget(ssh + cmd)
return None
else:
return threading.Thread(target=executeRemotely_callbackExecutor, args=(ssh, cmd, callback)).start()
| true
|
341053ae0faf77e62ec655c3f13736461b8ba723
|
Python
|
Krasniy23/Hillel_Krasnoshchok
|
/Lesson_10/HW10_1.py
|
UTF-8
| 194
| 3.640625
| 4
|
[] |
no_license
|
file_name = input('Cоздать новый файл: ')
with open(file_name, 'w') as file:
while True:
s = input()
if s == '':
break
file.write(s + '\n')
| true
|
43b1ad0b09aace400da18d8cd4da55acd2096ac0
|
Python
|
Mi7ai/EI1022
|
/L2/L2Ex14.py
|
UTF-8
| 325
| 3.265625
| 3
|
[] |
no_license
|
from L2.L2Ex11 import first
from L2.L2Ex13 import take_while
def squares():
n=1
while True:
yield n*n
n +=1
def escapicua(n):
a = str(n)
b = a[::-1]
return a==b
a = first(100,squares())
b = take_while(lambda n: n<10 ,squares())
c = first(10, filter(escapicua,squares()))
print(list(c))
| true
|
7e6c76e9797b83bc3218479cab07df0ae10fa6ac
|
Python
|
godiatima/Gui_apps
|
/spinner_1.py
|
UTF-8
| 2,553
| 3.015625
| 3
|
[] |
no_license
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib
class SpinnerWindow(Gtk.Window):
def __init__(self, *args, **kwargs):
Gtk.Window.__init__(self, title="Musify")
self.set_border_width(10)
mainBox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(mainBox)
self.spinner = Gtk.Spinner()
mainBox.pack_start(self.spinner, True, True, 0)
self.label = Gtk.Label()
mainBox.pack_start(self.label, True, True, 0)
self.entry = Gtk.Entry()
self.entry.set_text('10')
mainBox.pack_start(self.entry, True, True, 0)
self.buttonStart = Gtk.Button("Start Timer")
self.buttonStart.connect("clicked", self.on_buttonStart_clicked)
mainBox.pack_start(self.buttonStart, True, True, 0)
self.buttonStop = Gtk.buttonStop("Stop Timer")
self.buttonStop.set_sensitive(False)
self.buttonStop.connect("clicked", self.on_buttonStop_clicked)
mainBox.pack_start(self.buttnStop, True, True, 0)
self.timeout_id = None
self.connect("destroy", self.on_SpinnerWindow_destroy)
def on_buttonStart(self, widget, *args):
"""
Handles clicked event of buttonStart
"""
self.start_timer()
def on_buttonStop_clicked(self, widget, *args):
"""
Handles destroy event buttonStop
"""
self.stop_timer('Stopped from button')
def on_SpinnerWindow_destroy(self, widget, *args):
"""
Handles destroy event of main windows."""
if self.timeout_id:
GLib.source_remove(self.timeout_id)
self.timeout_id = None
Gtk.main_quit()
def on_timeout(self, *args, **kwargs):
""" a timeout function.
return true to stop it.
This is not a precise timer since next timeout is recalculated based on the current time."""
self.counter -= 1
if self.counter <= 0:
self.stop_timer('Reached time out')
return False
self.label.set_label('Remaining: ' + str(int(self.counter / 4)))
return True
def start_timer(self):
""" Start the timer. """
self.buttonStart.set_senstive(False)
self.buttonStop.set_senstive(True)
# time out will check every 250 miliseconds
self.counter = 4 * int(self.entry.get_text())
self.label.set_label('Remaining: ' + str(int(self.counter / 4)))
self.spinner.start()
self.timeout_id = GLib.timeout_add(250, self.on_timeout, None)
def stop_timer(self, alabeltext):
if self.timeout_id:
GLib.source_remove(self.timeout_id)
self.timeout_id = None
self.spinner.stop()
self.buttonStart.set_sensitive(True)
self.buttonStop.set_sensitive(False)
self.label.set_label(alabeltext)
win = SpinnerWindow()
win.show_all()
Gtk.main()
| true
|
76d08bc5473701a91b53841e0bc93007fb414999
|
Python
|
ChangMQ267/VOC2COCO
|
/findPhoto.py
|
UTF-8
| 1,343
| 2.59375
| 3
|
[] |
no_license
|
import os
import shutil
def findPhoto(PHOTOPATH, filename, SAVE_PATH):
filename_1 = str(filename).strip(".xml")
photourl = PHOTOPATH + filename_1 + ".jpg"
if (os.path.exists(photourl)):
shutil.move(photourl, SAVE_PATH)
else:
print(filename)
def findXML(PATH, XMLPATH):
i = 0
train = 0.8
for (dirpath, dirnames, filenames) in os.walk(PATH):
print(len(filenames))
with open((XMLPATH + "trainval.txt"), 'w+') as tr:
with open((XMLPATH + "test.txt"), 'w+') as te:
for filename in filenames:
filename = str(filename).strip(".xml")
if i < len(filenames) * train:
tr.write(filename + "\n")
else:
te.write(filename + "\n")
i += 1
if __name__ == '__main__':
PATH = "C:/Users/chang/Desktop/Fish-PascalVOC-export/Annotations/"
XMLPATH = "C:/Users/chang/Desktop/Fish-PascalVOC-export/ImageSets/Main/"
SAVE_PATH = "C:/Users/chang/Desktop/Fish-PascalVOC-export/images/"
PHOTOPATH = "C:/Users/chang/Desktop/Fish-PascalVOC-export/JPEGImages/"
# for (dirpath, dirnames, filenames) in os.walk(PATH):
# for filename in filenames:
# findPhoto(PHOTOPATH,filename,SAVE_PATH)
findXML(PATH, XMLPATH)
| true
|
ec945b4e4ec4387e7486f2b473005fdcf83c7347
|
Python
|
BIAOXYZ/variousCodes
|
/_CodeTopics/LeetCode/601-800/000738/000738.py
|
UTF-8
| 1,616
| 3.15625
| 3
|
[] |
no_license
|
class Solution(object):
def monotoneIncreasingDigits(self, N):
"""
:type N: int
:rtype: int
"""
def has_increasing_digits(N):
lis = int_to_list(N)
if lis == sorted(lis):
return True
return False
def int_to_list(N):
lis = []
while N > 0:
lis.insert(0, N % 10)
N /= 10
return lis
def list_to_int(lis):
num = lis[0]
length = len(lis)
if length == 1:
return num
else:
for i in range(1, length):
num = num * 10 + lis[i]
return num
if has_increasing_digits(N):
return N
lis = int_to_list(N)
length = len(lis)
replaceLeftNumsFlag = False
for i in range(1, length):
if replaceLeftNumsFlag == True:
lis[i] = 9
continue
if lis[i] < lis[i-1]:
replaceLeftNumsFlag = True
while i > 0 and lis[i] < lis[i-1]:
lis[i] = 9
lis[i-1] -= 1
i -= 1
if lis[0] == 0:
lis.pop(0)
return list_to_int(lis)
"""
https://leetcode-cn.com/submissions/detail/131334698/
302 / 303 个通过测试用例
状态:通过
执行用时: 28 ms
内存消耗: 13 MB
执行用时:28 ms, 在所有 Python 提交中击败了25.00%的用户
内存消耗:13 MB, 在所有 Python 提交中击败了23.26%的用户
"""
| true
|
696cfb1b8ff8333d5f89a1315b2b15a3d88b3d36
|
Python
|
ergoregion/Rota-Program
|
/Rota_System/Appointments.py
|
UTF-8
| 2,006
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
__author__ = 'Neil Butcher'
from PyQt4.QtCore import pyqtSignal, QObject
class AppointmentAbstract(QObject):
changed = pyqtSignal()
def __init__(self, parent, role):
QObject.__init__(self, parent)
self.role = role
self._note = ''
self._disabled = False
@property
def note(self):
return self._note
@note.setter
def note(self, value):
self._note = value
self.changed.emit()
@property
def disabled(self):
return self._disabled
@disabled.setter
def disabled(self, value):
if value:
self.vacate()
self._disabled = value
self.changed.emit()
class Appointment(AppointmentAbstract):
vacated = pyqtSignal()
filled = pyqtSignal(QObject)
def __init__(self, parent, role, event):
AppointmentAbstract.__init__(self, parent, role)
self._event = event
self._person = None
@property
def event(self):
return self._event
@property
def date(self):
return self._event.date
@property
def time(self):
return self._event.time
def datetime(self):
return self._event.datetime()
@property
def person(self):
return self._person
def vacate(self):
if self._person is None:
return self
self._person = None
self.vacated.emit()
self.changed.emit()
def appoint(self, person):
self._person = person
self.filled.emit(person)
self.changed.emit()
def is_filled(self):
return self._person is not None
class AppointmentPrototype(AppointmentAbstract):
def __init__(self, parent, role):
AppointmentAbstract.__init__(self, parent, role)
def create_in(self, event):
a = Appointment(event, self.role, event)
a.note = self.note
a.disabled = self.disabled
return a
def vacate(self):
pass
def is_filled(self):
return False
| true
|
a7481bf74f4228fe90435afecd4dd471ea705573
|
Python
|
Fracappo87/ML
|
/logisticregression/test/test_mylogisticmodel.py
|
UTF-8
| 5,863
| 2.984375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 13 18:23:38 2017
Author: Francesco Capponi <capponi.francesco87@gmail.com>
License: BSD 3 clause
"""
import unittest
import numpy as np
import numpy.testing as npt
from ..mylogisticmodel import MyLogisticRegressionClassifier
class MyLogisticModelClassifierTest(unittest.TestCase):
def test_init(self):
"""Testing class initialization"""
print("Testing initialization of logistic model class")
test_dictionaries = [{'minibatch_size': 3, 'optimization': 'GadentDscent', 'start': 'random', 'offset': 0},
{'minibatch_size': 3, 'optimization': False, 'start': 'pabolo', 'offset': 0},
{'minibatch_size': 3, 'optimization': 'adam', 'start': 'pabolo', 'offset': 0},
{'minibatch_size': 3, 'optimization': 'adam', 'start': .9, 'offset': 0}]
for dictionary in test_dictionaries:
self.assertRaises(ValueError, MyLogisticRegressionClassifier, **dictionary)
logit_model = MyLogisticRegressionClassifier(minibatch_size=3)
self.assertEqual(logit_model.minibatch_size,3,'Testing minibatch_size attribute')
self.assertEqual(logit_model.learning_type,'training_based')
self.assertEqual(logit_model.optimization,None,'Testing optimization flag')
self.assertEqual(logit_model.start,'random','Testing parameters initialization flag')
self.assertEqual(logit_model.offset,0.,'Testing offset attribute')
self.assertEqual(logit_model.n_iterations, 10, 'Testing n_iterations attribute')
self.assertEqual(logit_model.learning_rate, .5, 'Testing learning_rate attribute')
def test_initialize(self):
"""Testing weights initialization"""
print("Testing initialization of logistic model weights")
offset = 0.00000345
test_dimensions = [3, 7]
np.random.seed(1)
w_check_values = [np.array([[4.170220e-01, 7.203245e-01, 1.143748e-04]]), np.array([[0.14675589, 0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673,0.41919451]])]
b_check_values = [0.302333, 0.68522]
for dimension, w_value, b_value in zip(test_dimensions, w_check_values, b_check_values):
logit_model = MyLogisticRegressionClassifier()
logit_model._initialize(dimension)
self.assertSequenceEqual((1, dimension), logit_model.W.shape, 'Testing shape of weights array for random initialization')
self.assertSequenceEqual((1, 1), logit_model.b.shape, 'Testing shape of bias array for random initialization')
npt.assert_array_almost_equal(logit_model.W, w_value, err_msg='Testing weights array random initialization')
npt.assert_array_almost_equal(logit_model.b, b_value, err_msg='Testing bias random initialization')
logit_model = MyLogisticRegressionClassifier(start = 'uniform', offset = offset)
logit_model._initialize(30)
self.assertSequenceEqual((1, 30), logit_model.W.shape, 'Testing shape of weights array for uniform initialization')
self.assertSequenceEqual((1, 1), logit_model.b.shape, 'Testing shape of bias array for uniform initialization')
npt.assert_array_almost_equal(logit_model.W, offset, err_msg='Testing weights array uniform initialization')
npt.assert_array_almost_equal(logit_model.b, offset, err_msg='Testing bias uniform initialization')
def test_forward_prop(self):
"""Testing forward propagation"""
print("Testing forward propagation")
logit_model = MyLogisticRegressionClassifier(offset = 1., start = 'uniform')
X_trains = [np.array([[.5, -.4, .7, -.1]]), np.array([[0.5, 1.],[-0.5, 0.4]]), np.array([[0.5, 0., 0.],[0.5, 0., 0.],[0.5, 0., 0.]])]
Y_trains = [np.array([[0]]), np.array([[0],[1]]), np.array([[1],[1],[1]])]
expected_activations = [np.array([[ 0.8455347]]), np.array([[0.9241418, 0.7109495]]), np.array([[0.8175744, 0.8175744, 0.8175744]])]
expected_costs = [1.8677858, 1.4600217, 0.2014133]
for X_train, Y_train, expected_activation, expected_cost in zip(X_trains, Y_trains, expected_activations, expected_costs):
logit_model._initialize(X_train.shape[1])
activation, cost = logit_model._forward_prop(X_train, Y_train)
np.testing.assert_array_almost_equal(activation, expected_activation, err_msg = 'Testing activation function values')
np.testing.assert_array_almost_equal(cost, expected_cost, err_msg = 'Testing computation of the cost function')
def test_backward_prop(self):
"""Testing backward propagation"""
print("Testing backward propagation")
# Trivial case, learning rate set to zero
logit_model = MyLogisticRegressionClassifier(offset = 1., start = 'uniform', learning_rate=0.)
X_train = np.array([[.5, -.4, .7, -.1]])
Y_train = np.array([[0]])
logit_model._initialize(X_train.shape[1])
activation, cost = logit_model._forward_prop(X_train, Y_train)
logit_model._back_prop(activation, X_train, Y_train)
np.testing.assert_array_equal(logit_model.W, 1., "Testing weights array after backprop, zero learning rate")
np.testing.assert_array_equal(logit_model.b, 1., "Testing bias array after backprop, zero learning rate")
# Non trivial case, learning rate = 0.5, as default
logit_model = MyLogisticRegressionClassifier(offset = 1., start = 'uniform')
X_trains = [np.array([[.5, -.4, .7, -.1]]), np.array([[0.5, 1.],[-0.5, 0.4]]), np.array([[0.5, 0., 0.],[0.5, 0., 0.],[0.5, 0., 0.]])]
Y_trains = [np.array([[0]]), np.array([[0],[1]]), np.array([[1],[1],[1]])]
| true
|
419a335aac1bb48636ae2ba54a383b77a41acf00
|
Python
|
jamie-g/wardrobe-mix
|
/polyvore_main.py
|
UTF-8
| 1,969
| 2.609375
| 3
|
[] |
no_license
|
from random import choice
from flask import Flask, render_template, request
import polyvore
import os
app = Flask(__name__)
import logging
import requests
import google_scrape
logger = app.logger
GOOGLE_URL = "https://www.googleapis.com/shopping/search/v1/public/products?country=US"
GOOGLE_KEY = "AIzaSyDYSIyGTRNGRvv2XDaGplJ7cp5kB0lJzbQ"
# BARCODE_WEB
def is_int(string, default = 0):
try:
num = int(string)
return num
except:
return default
def is_barcode(terms):
terms = terms.strip()
if len(terms) != 12 and len(terms) != 13:
logger.debug("Terms is %d long"%(len(terms)))
return False
if not is_int(terms):
logger.debug("Terms is not an integer")
return False
return True
# def scan():
# http://zxing.appspot.com/scan?ret=http://foo.com/products/{CODE}/description&SCAN_FORMATS=UPC_A,EAN_13
def get_title_from_google(barcode):
r = requests.get(GOOGLE_URL, params = {"q": barcode, "key": GOOGLE_KEY})
results = r.json
num_results = results['totalItems']
if num_results > 0:
terms = results['items'][0]['product']['title']
else:
terms = barcode
return terms
@app.route('/')
def index_page():
return render_template('index.html')
@app.route('/search')
def search():
terms = request.args.get("q")
p = is_int(request.args.get("p"), 1)
logger.debug("Searching for %s"%terms)
if is_barcode(terms):
logger.debug("%s is a barcode"%terms)
new_terms = get_title_from_google(terms)
logger.debug("Our new terms are %s from google"%new_terms)
results = google_scrape.get_polyvore_from_google(new_terms)
else:
results = polyvore.PolyvoreSet.search(terms)
logger.debug("Result set is %d items"%(len(results)))
if len(results) > 0:
start = (p-1)*3
end = 3*p
more = end < len(results)
return render_template("results.html", sets=results[start:end], terms=terms, p=p, more=more)
else:
return render_template("search_again.html")
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| true
|
ab07a78a7283a49a5575c5cc4b9062069c830664
|
Python
|
trevorkt/learnpython
|
/MIT.OCW/ps1a.v2.py
|
UTF-8
| 352
| 3.78125
| 4
|
[] |
no_license
|
# Problem Set 1, Problem 1
# Trevor T
import math # for sqrt()
def isprime(x):
x = abs(int(x))
if x < 2:
return False
if x == 2:
return True
if (x/2)*2 == ((x*1.0)/2)*2:
return False
for div in range(3, int(math.sqrt(x)), 2):
if x % div == 0:
return False
return True
x = int(raw_input('Enter a positive integer: '))
print isprime(x)
| true
|
57c0764b33f8a8784d59334585693a0287b0b886
|
Python
|
trams/top100movies
|
/test_application.py
|
UTF-8
| 596
| 2.65625
| 3
|
[] |
no_license
|
import application
state = application.State("test_data/movies.json")
def test_not_existing_one():
assert state.naive_get("abracadabra") == []
assert state.naive_get("abracadabra") == []
def test_empty_query():
assert state.naive_get("") == []
assert state.get("") == []
def test_simple_query():
assert state.naive_get("garcia") == ["City Lights"]
assert state.get("garcia") == ["City Lights"]
def test_repeated_word():
assert state.naive_get("garcia garcia") == ["City Lights"]
assert state.get("garcia garcia") == ["City Lights"]
| true
|
40dc1b5c6b7b44aeb3da9248ea1558a0021982a0
|
Python
|
dack/text-based-atk
|
/game/enemies.py
|
UTF-8
| 2,544
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
import random
class Enemy:
def __init__(self, name, hp, damage, critChance):
self.name = name
self.gf = bool(random.getrandbits(1))
self.sf = bool(random.getrandbits(1))
self.hp = hp
self.damage = damage + random.randint(1, 5) * critChance
self.critChance = critChance
def is_alive(self):
return self.hp > 0
def is_gf(self):
if self.gf:
return "Gluten Free"
else:
return null
def is_sf(self):
if self.sf:
return "Sugar Free"
else:
return null
class CrinkleCookie(Enemy):
def __init__(self):
Enemy.__init__(self, name="{} {} Crinkle Cookie Behemoth".format(self.is_gf, self.is_sf),
hp=5000,
damage=10,
critChance=.3)
class BananaBread(Enemy):
def __init__(self):
Enemy.__init__(self, name="{} {} Banana Bread Berserker".format(self.is_gf, self.is_sf),
hp=1000,
damage=5,
critChance=.5)
class Turkey(Enemy):
def __init__(self):
Enemy.__init__(self, name="{} {} Infernal Heritage Turkey".format(self.is_gf, self.is_sf),
hp=3000,
damage=4,
critChance=.2)
class Yogurt(Enemy):
def __init__(self):
Enemy.__init__(self, name="{} {} Yogurt Brute".format(self.is_gf, self.is_sf),
hp=700,
damage=3,
critChance=.1)
class Jam(Enemy):
def __init__(self):
jamFlavors = ["Rasberry", "Cherry", "Gooseberry", "Indiscriminate", "Strange", "Old", "Blueberry", "Rhubarb", "Orange", "Strawberry"]
num = random.randint(0, len(jamFlavors)-1)
Enemy.__init__(self, name="{} {} {} Chu Jam".format(self.is_gf, self.is_sf, jamFlavors[num]),
hp=500,
damage=3,
critChance=.1)
class Cornbread(Enemy):
def __init__(self):
Enemy.__init__(self, name="{} {} Cornbread Fiend".format(self.is_gf, self.is_sf),
hp=300,
damage=5,
critChance=.1)
class Coleslaw(Enemy):
def __init__(self):
Enemy.__init__(self, name="{} {} Coleslaw Vermin".format(self.is_gf, self.is_sf),
hp=100,
damage=1,
critChance=.05)
| true
|
81e2623852489b20aa6c050c383013e02966fbc8
|
Python
|
EduardoMSA/Proyectos_ISC_ITESM
|
/Programas Python/Password.py
|
UTF-8
| 492
| 3.359375
| 3
|
[] |
no_license
|
# coding: utf-8
# In[ ]:
def Suffix(t,s):
if t==s[:len(t)]:
return True
return False
def Preffix(t,s):
if t==s[-len(t):]:
return True
return False
def Obelix(t,s):
obel=s[len(t):-len(t)]
if t in obel:
return True
return False
def Password(s):
for i in range(len(s)//2):
t = s[:i]
if Suffix(t,s) and Preffix(t,s) and Obelix(t,s):
return t
return "Just a legend"
s = input()
print(Password(s))
| true
|