blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bfc449afac945454eec512f803501c963c9ea1de
|
Python
|
aidanhayter/spike_train_analysis
|
/histogram_data.py
|
UTF-8
| 1,587
| 3.109375
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
from spike_train import create_spikes
"""histogram_data calls the function from spike_train to create random data,
as a Poisson model or MMPP model.
This code then creates statistical histograms from the data."""
plt.switch_backend('Qt5Agg')
model = 'MMPP'
spike_train = create_spikes(model)
# Spike Times
spike_times = np.where(spike_train)
# Spike Intervals
spike_intervals = np.diff(spike_times[0])
# Plotting PSTH
psth_array = [1] * len(spike_times[0])
plt.title('Peristimulus Time Histogram')
plt.ylabel('Spikes')
plt.xlabel('Time')
y, bin_edges = np.histogram(spike_times, bins=100)
bin_centers = (bin_edges - np.diff(bin_edges)[0]/2)[1:]
plt.plot(bin_centers, y)
# Train of spikes
plt.title('Spike Train')
plt.ylabel('Spikes')
plt.xlabel('Time')
plt.yticks([])
plt.scatter(spike_times[0], psth_array, marker='|')
# Plotting IIH
iih_array = [1] * len(spike_intervals)
plt.title('Interspike Interval Histogram')
plt.ylabel('Spikes')
plt.xlabel('Time')
y, bin_edges = np.histogram(spike_intervals, bins=100)
bin_centres = (bin_edges - np.diff(bin_edges)[0]/2)[1:]
plt.plot(bin_centres, y)
# Plotting IDC (FF)
np.var(spike_train) / np.mean(spike_train)
tau_min = 1
tau_max = 1000 # CORRECT: This is in time bins, need in ms
idc = [0] * tau_max
num_samples = 100000
for tau in range(tau_min, tau_max):
sptr_tau, t_tau = np.histogram(spike_times, bins=int(num_samples/tau))
idc[tau] = np.var(sptr_tau) / np.mean(sptr_tau)
plt.title('IDC (FF)')
plt.xlabel('Bin Width (ms/10)')
plt.ylabel('IDC')
plt.plot(idc[1:])
| true
|
407743641565f7e79bd71ac24e0dd744941f39b2
|
Python
|
huub8/tol-revolve
|
/tol/manage/robot.py
|
UTF-8
| 5,092
| 2.90625
| 3
|
[] |
no_license
|
import random
from sdfbuilder.math import Vector3
from revolve.util import Time
from revolve.angle import Robot as RvRobot
class Robot(RvRobot):
"""
Class to manage a single robot
"""
def __init__(self, conf, name, tree, robot, position, time, battery_level=0.0, parents=None):
"""
:param conf:
:param name:
:param tree:
:param robot: Protobuf robot
:param position:
:type position: Vector3
:param time:
:type time: Time
:param parents:
:type parents: tuple(Robot, Robot)
:param battery_level: Battery charge for this robot
:type battery_level: float
:return:
"""
speed_window = int(conf.evaluation_time * conf.pose_update_frequency)
super(Robot, self).__init__(name=name, tree=tree, robot=robot, position=position, time=time,
battery_level=battery_level, speed_window=speed_window,
warmup_time=conf.warmup_time, parents=parents)
# Set of robots this bot has mated with
self.mated_with = {}
self.last_mate = None
self.conf = conf
self.size = len(tree)
self.battery_level = battery_level
self.initial_charge = battery_level
def will_mate_with(self, other):
"""
Decides whether or not to mate with the other given robot
based on its position and speed.
:param other:
:type other: Robot
:return:
"""
if self.age() < self.conf.warmup_time:
# Don't mate within the warmup time
return False
mate_count = self.mated_with.get(other.name, 0)
if mate_count > self.conf.max_pair_children:
# Maximum number of children with this other parent
# has been reached
return False
if self.last_mate is not None and \
float(self.last_update - self.last_mate) < self.conf.gestation_period:
# Don't mate within the cooldown window
return False
if self.distance_to(other.last_position) > self.conf.mating_distance_threshold:
return False
my_fitness = self.fitness()
other_fitness = other.fitness()
# Only mate with robots with nonzero fitness, check for self zero-fitness
# to prevent division by zero.
return other_fitness > 0 and (
my_fitness == 0 or
(other_fitness / my_fitness) >= self.conf.mating_fitness_threshold
)
def distance_to(self, vec, planar=True):
"""
Calculates the Euclidean distance from this robot to
the given vector position.
:param vec:
:type vec: Vector3
:param planar: If true, only x/y coordinates are considered.
:return:
"""
diff = self.last_position - vec
if planar:
diff.z = 0
return diff.norm()
@staticmethod
def header():
"""
:return:
"""
return ['run', 'id', 't_birth', 'parent1', 'parent2', 'charge', 'nparts', 'x', 'y', 'z']
def write_robot(self, world, details_file, csv_writer):
"""
:param world:
:param details_file:
:param csv_writer:
:return:
"""
with open(details_file, 'w') as f:
f.write(self.robot.SerializeToString())
row = [getattr(world, 'current_run', 0), self.robot.id,
world.age()]
row += [parent.robot.id for parent in self.parents] if self.parents else ['', '']
row += [self.initial_charge, self.size, self.last_position.x,
self.last_position.y, self.last_position.z]
csv_writer.writerow(row)
def fitness(self):
"""
Fitness is proportional to both the displacement and absolute
velocity of the center of mass of the robot, in the formula:
5 dS + S
Where dS is the displacement over a direct line between the
start and end points of the robot, and S is the distance that
the robot has moved.
Since we use an active speed window, we use this formula
in context of velocities instead.
:return:
"""
age = self.age()
if age < (0.25 * self.conf.evaluation_time) or age < self.conf.warmup_time:
# We want at least some data
return 0.0
return 5.0 * self.displacement_velocity() + self.velocity()
def charge(self):
"""
Returns the remaining battery charge of this robot.
:return:
"""
return self.initial_charge - (float(self.age()) * self.size)
def did_mate_with(self, other):
"""
Called when this robot mated with another robot successfully.
:param other:
:type other: Robot
:return:
"""
self.last_mate = self.last_update
if other.name in self.mated_with:
self.mated_with[other.name] += 1
else:
self.mated_with[other.name] = 1
| true
|
77e517b8db446f966a27a6b94e4e66a693c7b7be
|
Python
|
falcone-gk/Battery
|
/decorators.py
|
UTF-8
| 633
| 3.1875
| 3
|
[] |
no_license
|
"""
Decoradores para los scripts del archivo battery.py
"""
def is_allowed(func):
def wrapper(*args, **kwargs):
param = args[0]
if param == 0:
print('Se desactivará el modo de conservación de bateria')
elif param == 1:
print('Se activará el modo de conservación de bateria')
else:
print('Parámetro no permitido: {}'.format(param))
return
status = func(*args, **kwargs)
if status == 0:
print('Realizado exitosamente!')
elif status == 256:
print('Inténtalo de nuevo! :(')
return wrapper
| true
|
cd6e415f93f06a8070057820434dd42b11f1ff14
|
Python
|
jedrzejkozal/AdventOfCode
|
/2017/day12/GraphParsing_test.py
|
UTF-8
| 2,808
| 2.609375
| 3
|
[] |
no_license
|
import pytest
from GraphTestBase import *
class TestGraphParsing(GraphTestBase):
def test_parse_graph_with_no_node_graph_is_empty(self):
arg = [""]
self.sut.parseGraph(arg)
assert self.sut.empty() == True
def test_parse_graph_with_one_node_graph_is_not_empty(self, one_node_graph):
self.sut.parseGraph(one_node_graph)
assert self.sut.empty() == False
def test_parse_graph_with_one_node_graph_contains_vertcies(self, one_node_graph):
self.sut.parseGraph(one_node_graph)
assert 0 in self.sut
def test_parse_graph_with_one_node_graph_contains_edges(self, one_node_graph):
self.sut.parseGraph(one_node_graph)
assert self.sut.contains_edge((0, [0]))
def test_parse_graph_with_two_nodes_graph_is_not_empty(self, two_nodes_graph):
self.sut.parseGraph(two_nodes_graph)
assert self.sut.empty() == False
def test_parse_graph_with_two_nodes_graph_contains_verticies(self, two_nodes_graph):
self.sut.parseGraph(two_nodes_graph)
assert 0 in self.sut
assert 1 in self.sut
def test_parse_graph_with_two_nodes_graph_contains_edges(self, two_nodes_graph):
self.sut.parseGraph(two_nodes_graph)
assert self.sut.contains_edge((0, [1]))
assert self.sut.contains_edge((1, [0]))
def test_parse_graph_with_two_nodes_and_uneacesseary_connections_edges_are_added_once(self, two_nodes_graph_with_repeated_edge):
self.sut.parseGraph(two_nodes_graph_with_repeated_edge)
assert self.sut.contains_edge((0, [1]))
assert self.sut.contains_edge((1, [0, 0]))
def test_parse_graph_with_three_nodes_graph_single_line_is_not_empty(self, three_nodes_graph_single_line):
self.sut.parseGraph(three_nodes_graph_single_line)
assert self.sut.empty() == False
def test_parse_graph_with_three_nodes_graph_single_line_contains_verticies(self, three_nodes_graph_single_line):
self.sut.parseGraph(three_nodes_graph_single_line)
assert 0 in self.sut
assert 1 in self.sut
assert 2 in self.sut
def test_parse_graph_with_three_nodes_graph_is_not_empty(self, three_nodes_graph):
self.sut.parseGraph(three_nodes_graph)
assert self.sut.empty() == False
def test_parse_graph_with_three_nodes_graph_contains_verticies(self, three_nodes_graph):
self.sut.parseGraph(three_nodes_graph)
assert 0 in self.sut
assert 1 in self.sut
assert 2 in self.sut
def test_parse_graph_with_three_nodes_graph_contains_edges(self, three_nodes_graph):
self.sut.parseGraph(three_nodes_graph)
assert self.sut.contains_edge((0, [1, 2]))
assert self.sut.contains_edge((1, [0]))
assert self.sut.contains_edge((2, [0]))
| true
|
582f6842db3b3fb3457c152c6054f6272763652c
|
Python
|
bgruening/galaxytools
|
/tools/text_processing/column_arrange_by_header/column_arrange.py
|
UTF-8
| 996
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="Tabular Input File Name")
parser.add_argument("-o", "--output", help="Tabular Output File")
parser.add_argument("-c", "--columns", nargs="+", help="Column Headers to Sort By")
parser.add_argument(
"-d", "--discard", action="store_true", help="Discard remaining columns"
)
args = parser.parse_args()
with open(args.input) as data:
hdr = next(data)
columns = hdr.rstrip("\n").split("\t")
idx = [columns.index(name) for name in args.columns]
if not args.discard:
idx += [i for i in range(len(columns)) if i not in idx]
rearranged_cols = [columns[i] for i in idx]
with open(args.output, "w") as out:
out.write("\t".join(rearranged_cols) + "\n")
for line in data:
columns = line.rstrip("\n").split("\t")
rearranged_cols = [columns[i] for i in idx]
out.write("\t".join(rearranged_cols) + "\n")
| true
|
760a088888488732b17ebf3a310a7a7d5ce4226b
|
Python
|
rinkeigun/linux_module
|
/python_source/windowscontrol.py
|
UTF-8
| 1,133
| 2.609375
| 3
|
[] |
no_license
|
# coding: utf-8
from datetime import datetime
from pywinauto import application
app = application.Application(backend="uia").start("notepad.exe")
dlg_spec = app['無題 - メモ帳']
dlg_spec.draw_outline()
# Notepad クラス (Notepad のウィンドウ) を探して日時を入力
#dlg_spec.Edit1.SetText(datetime.now())
#dlg_spec.Edit1.SetText("test")
#dialog = app[u"ファイル->名前を付けて保存"]
# メニューを選択
#app.Notepad.MenuSelect(u"ファイル->名前を付けて保存")
dlg_spec.MenuSelect(u"ファイル->名前を付けて保存")
# 「名前を付けて保存」のウィンドウを探す
dialog = app[u"名前を付けて保存"]
# ファイル名を設定
dialog.Edit1.SetText(u"datetime.txt")
# 保存ボタンをクリック
dialog.Button1.Click()
# すでにファイルが存在すれば上書きの確認が求められる
confirm = app[u"名前を付けて保存の確認"]
if confirm.Exists(): # 確認を求められたかどうか
confirm.Button1.Click() # 上書きする
# キーストロークを使って終了させる
app.Notepad.TypeKeys("%FX") # 終了 (Alt+F X)
| true
|
2f3cfe3728aefadd286a8e3bd37783fe9e5714cc
|
Python
|
BusraOzer/InternetofThingsExamples
|
/PythonBolumu/regexEx.py
|
UTF-8
| 710
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/python
import re
mail = "tolgabuyuktanir@.com";
#regular expression
searchObj = re.search( r'(.*)@(.*\.)(.*)', mail)
if searchObj:
print "searchObj.group() : ", searchObj.group()
print "searchObj.group(1) : ", searchObj.group(1)
print "searchObj.group(2) : ", searchObj.group(2)
print "searchObj.group(3) : ", searchObj.group(3)
else:
print "Nothing found!!"
#mailin valid olup olmadigi
#eger mail adresi formata uygun ise yazar degilse bos gosterir
searchObj=re.findall(r"\w+@\w+\.(?:com|in)",mail)
print "mail : ", searchObj
deneme="tolga Buyuktanir.lkngolnobg norgbourtnhgou rtguo hrtuort.uhg.ruhtgu.rthgu hyuhrtyuhgyygygygryegfygrfgy?rggefr"
d=re.search(r'(Bi*)',deneme)
print d.group()
| true
|
a38d7094d40552433881942568be32d743359f71
|
Python
|
vijayaramanp/Guvi-pack1
|
/coun_spaces.py
|
UTF-8
| 73
| 3.171875
| 3
|
[] |
no_license
|
# count spaces
x=input()
arr=x.split()
spaces=(len(arr)-1)
print(spaces)
| true
|
16cd61901876435beb788106e8f06c02ae0e83db
|
Python
|
1reddymallik1/ETrade-Trading
|
/Tests/test_EntryMoneyMaker_CalculateStockItemOrders.py
|
UTF-8
| 4,358
| 2.734375
| 3
|
[] |
no_license
|
import os
import sys
import unittest
from pathlib import Path
from typing import List
sys.path.append(str(Path(os.getcwd()).parent))
from BusinessModels.ActiveStockItem import ActiveStockItem
from BusinessModels.OrderInfo import OrderInfo
from BusinessModels.PortfolioResponse import PortfolioPosition
from BusinessModels.PriceCoordinate import BuyDeltaType, PriceCoordinate
from BusinessModels.SellDeltaType import SellDeltaType
from BusinessModels.Settings import Settings
from Entries.EntryHelpers.ManageOrdersHelpers import ManageOrdersHelpers
from Entries.EntryInputs.EntryMoneyMakerInput import EntryMoneyMakerInput
from Entries.EntryMoneyMaker import MoneyMaker
from Entries.ActiveStocks import ActiveStockItems
class tests_ManageOrdersHelpers_GeneratePossibleLimitOrders(unittest.TestCase):
""" Tests related to GeneratePossibleLimitOrders method of ManagerOrdersHelpers class.
"""
def setUp(self):
# Initialize the object.
self.manageOrdersHelpers:ManageOrdersHelpers = ManageOrdersHelpers(Settings())
entryInput = EntryMoneyMakerInput()
self.moneyMaker = MoneyMaker(entryInput, self.manageOrdersHelpers)
def tearDown(self):
pass
def test_CalculateStockItemOrders(self):
""" CalculateStockItemOrders
Symbol = "XXXX"
FLAT Buy pricing.
SellStepType = SellDeltaType.FIXED
StartPrice = 20.55
quantity = 2
BuyStepSize = 1
SellStepSize = 2
QuantityMultiplier = 1
MaxActiveBuy = 2
Portfolio quantity = 9
Expected Cancel Orders =
[
OrderInfo(Settings.NewOrderId, symbol, 22.55, 2, False, True),
OrderInfo(Settings.NewOrderId, symbol, 21.55, 2, False, True),
OrderInfo(Settings.NewOrderId, symbol, 20.55, 2, False, True),
OrderInfo(Settings.NewOrderId, symbol, 19.55, 2, False, True),
OrderInfo(Settings.NewOrderId, symbol, 18.55, 1, True, True),
OrderInfo(Settings.NewOrderId, symbol, 16.55, 1, True, False),
OrderInfo(Settings.NewOrderId, symbol, 15.55, 2, False, False)
]
Expected Place Orders =
[
]
"""
symbol = "XXXX"
# Create ActiveStockItem
activeStockItem = ActiveStockItem(symbol=symbol)
quantity = 2
buyStepSize = 1
activeStockItem.SellStepSize = 2
activeStockItem.SellStepType = SellDeltaType.FIXED
activeStockItem.StartPrice = 20.55
activeStockItem.QuantityMultiplier = 1
activeStockItem.MaxActiveBuy = 2
priceCoordinates:List[PriceCoordinate] = []
priceCoordinates.append(PriceCoordinate(startPrice=0,quantity=quantity,
buyDeltaType=BuyDeltaType.FIXED, fixedBuyDelta=buyStepSize))
activeStockItem.PriceCoordinates = priceCoordinates
# Create PortfolioPosition
portfolioPosition = PortfolioPosition(symbol=symbol)
portfolioPosition.Quantity = 9
expectedLimitOrders:List[OrderInfo] = [
OrderInfo(Settings.NewOrderId, symbol, 22.55, 2, False, True),
OrderInfo(Settings.NewOrderId, symbol, 21.55, 2, False, True),
OrderInfo(Settings.NewOrderId, symbol, 20.55, 2, False, True),
OrderInfo(Settings.NewOrderId, symbol, 19.55, 2, False, True),
OrderInfo(Settings.NewOrderId, symbol, 18.55, 1, True, True),
OrderInfo(Settings.NewOrderId, symbol, 16.55, 1, True, False),
OrderInfo(Settings.NewOrderId, symbol, 15.55, 2, False, False)
]
possibleLimitOrders:List[OrderInfo] = self.manageOrdersHelpers.GeneratePossibleLimitOrders(activeStockItem, portfolioPosition.Quantity)
self.assertSequenceEqual(expectedLimitOrders, possibleLimitOrders)
placeOrders, cancelOrders = self.moneyMaker.CalculateStockItemOrders(activeStockItem, [], portfolioPosition)
print(placeOrders)
print(cancelOrders)
for activeStockItem in ActiveStockItems:
print(activeStockItem.Symbol)
if __name__ == '__main__':
unittest.main()
| true
|
6a4556cf51d831e43cc1a2dd41892922170a5210
|
Python
|
guoyu07/pynotes
|
/tools/douban.py
|
UTF-8
| 1,903
| 3.421875
| 3
|
[] |
no_license
|
import urllib.request
import re
import time
def movie(movieTag):
tagUrl=urllib.request.urlopen(url)
tagUrl_read = tagUrl.read().decode('utf-8')
return tagUrl_read
def subject(tagUrl_read):
'''
这里还存在问题:
①这只针对单独的一页进行排序,而没有对全部页面的电影进行排序
②下次更新添加电影链接,考虑添加电影海报
③需要追加列表
④导入到本地txt或excel中
⑤在匹配电影名字时是否可以同时匹配链接与名字、评分、评论组成数组
⑥
'''
#正则表达式匹配电影的名字(链接)、评分与评论
nameURL = re.findall(r'(http://movie.douban.com/subject/[0-9.]+)\/"\s+title="(.+)"',tagUrl_read)
scoreURL = re.findall(r'<span\s+class="rating_nums">([0-9.]+)<\/span>',tagUrl_read)
evaluateURL = re.findall(r'<span\s+class="pl">\((\w+)人评价\)<\/span>',tagUrl_read)
movieLists = list(zip(nameURL,scoreURL,evaluateURL))
newlist.extend(movieLists)
return newlist
#用quote处理特殊(中文)字符
movie_type = urllib.request.quote(input('请输入电影类型(如剧情、喜剧、悬疑):'))
page_end=int(input('请输入搜索结束时的页码:'))
num_end=page_end*20
num=0
page_num=1
newlist=[]
while num<num_end:
url=r'http://movie.douban.com/tag/%s?start=%d'%(movie_type,num)
movie_url = movie(url)
subject_url=subject(movie_url)
num=page_num*20
page_num+=1
else:
#使用sorted函数对列表进行排列,reverse参数为True时升序,默认或False时为降序, key=lambda还不是很明白这里的原理
movieLIST = sorted(newlist, key=lambda movieList : movieList[1],reverse = True)
for movie in movieLIST:
print(movie)
time.sleep(3)
print('结束')
| true
|
396023c24a469d9c323197b3c26d3dd29153b81a
|
Python
|
disha111/Python_Beginners
|
/Assignment 1.2/A1.2.py
|
UTF-8
| 341
| 3.890625
| 4
|
[] |
no_license
|
def fizz_buzz(no1):
if(no1%3 == 0 or no1%5 == 0):
op1 = " "
op2 = " "
if(no1%3 == 0):
op1 = "Fizz"
if(no1%5 == 0):
op2 = "Buzz"
result = op1+op2
return result
else:
return no1
no1 = int(input("Enter The Number : "))
result = fizz_buzz(no1)
print(result)
| true
|
0444631b4625cd8aaae4a9fc7aefac009400f6d1
|
Python
|
kutsevol/technorely_etl
|
/etl/encounter.py
|
UTF-8
| 1,073
| 2.734375
| 3
|
[] |
no_license
|
import pandas as pd
from schemas import encounter_schema
from utils.stats import get_stats_of_popular_days_of_week
from etl.base import BaseETL
class EncounterETL(BaseETL):
name = "Encounter"
def __init__(self, url, session):
super().__init__(url, session)
self.schema = encounter_schema
self.json_file_name = "Encounter.ndjson"
self.table_name = "encounter"
def refinement_dataframe(self):
self.df.rename(
columns={"id": "source_id"},
inplace=True
)
self.df["patient_id"] = self.df.subject.apply(
pd.Series).reference.str.partition("/")[2]
self.df[["start_date", "end_date"]] = self.df.period.apply(pd.Series)
self.df[["type_code", "type_code_system"]] = self.df.type.apply(
pd.Series)[0].apply(pd.Series).coding.apply(pd.Series)[0].apply(
pd.Series)[["code", "system"]]
return self.df
def get_the_most_and_least_popular_days_by_col(self, col):
return get_stats_of_popular_days_of_week(self.df, col)
| true
|
98d02f9aaae3fd251511340d5f14fad4acc2fd0e
|
Python
|
santi1991/tic-python
|
/semana1/clase1/HolaMundo.py
|
UTF-8
| 127
| 3.296875
| 3
|
[] |
no_license
|
hola = 'hola'
print(hola)
# print(“Hello, I’m “, 21, “ years old”)
True + False == 0
print(f'{True + False == 0}')
| true
|
696ec36152825ee0bbdc6fabfee85e806940d5ff
|
Python
|
nakaken0629/atcoder
|
/abc157/bingo.py
|
UTF-8
| 600
| 3.15625
| 3
|
[] |
no_license
|
A = []
for _ in range(3):
A.append(list(map(int, input().split())))
N = int(input())
b = []
for _ in range(N):
b.append(int(input()))
for num in b:
for x in range(3):
for y in range(3):
if A[x][y] == num:
A[x][y] = -1
amount = 0
for x in range(3):
if max(A[x]) == -1:
amount += 1
for y in range(3):
if max([A[0][y], A[1][y], A[2][y]]) == -1:
amount += 1
if max([A[0][0], A[1][1], A[2][2]]) == -1:
amount += 1
if max([A[2][0], A[1][1], A[0][2]]) == -1:
amount += 1
if amount > 0:
print("Yes")
else:
print("No")
| true
|
ea600784265d557c13059ac44dc20dbc96942eb6
|
Python
|
setazer/aoc
|
/aoc2020/d5/solve.py
|
UTF-8
| 1,304
| 3.203125
| 3
|
[] |
no_license
|
from itertools import product
from aocframework import AoCFramework
def decipher(board_pass):
row, seat = board_pass[:-3], board_pass[-3:]
row = int(row.translate({ord('B'):'1',ord('F'):'0'}), 2)
seat = int(seat.translate({ord('R'):'1',ord('L'):'0'}), 2)
return row, seat
def next_and_prev(seat):
next_seat = list(seat)
next_seat[1]+=1
if next_seat[1]>7:
next_seat[0]+=1
next_seat[1]=0
prev_seat = list(seat)
prev_seat[1]-=1
if prev_seat[1]<0:
prev_seat[0]-=1
prev_seat[1]=7
return tuple(next_seat), tuple(prev_seat)
class DayPart1(AoCFramework):
test_cases = (
("""""", ),
)
known_result = 901
def go(self):
raw_split = self.linesplitted
return max(map(lambda x: x[0] * 8 + x[1],map(decipher, raw_split)))
DayPart1()
class DayPart2(AoCFramework):
test_cases = (
("""""", ),
)
known_result = 661
def go(self):
raw_split = self.linesplitted
seats = set(map(decipher,raw_split))
all_seats = set(product(range(128), range(8)))
difference = all_seats - seats
for seat in difference:
n, p = next_and_prev(seat)
if n in seats and p in seats:
return seat[0]*8+seat[1]
DayPart2()
| true
|
1a0d8de36e4c816b0d98031803369b6e042e8ea8
|
Python
|
tcouch/platform-logs-query
|
/statsGUI.py
|
UTF-8
| 9,231
| 2.734375
| 3
|
[] |
no_license
|
from tkinter import *
import datetime as dt
import statsFactory as sf
class MainMenu(Tk):
def __init__(self,parent):
Tk.__init__(self,parent)
self.parent = parent
self.initialize()
def initialize(self):
self.grid()
self.initializePlatformSelector()
self.initializeDateSection()
#node type selection
nodeSelectLabel = Label(self,
text="Node type:",
fg="white",
bg="blue",
width="12",
anchor="e").grid(column=0,row=4)
self.nodeVar = StringVar()
self.nodeVar.set('all')
nodeList = list(sf.nodeDict.keys())
nodeList.sort()
nodeList.insert(0,"all")
self.nodeSelector = OptionMenu(self, self.nodeVar, *nodeList)
self.nodeSelector.grid(row=4,column=1)
#Checkboxes to select statistics
self.stats={}
rowCount = 5
for statistic in sf.Statistic.__subclasses__():
self.stats[statistic.__name__] = IntVar()
Checkbutton(self,text=statistic.__name__,
variable=self.stats[statistic.__name__]) \
.grid(column=0,row=rowCount,sticky="W")
rowCount += 1
#Add OK button
self.OK = Button(self, text="OK", command=self.OnOKClick) \
.grid(column=0,row=rowCount)
def initializePlatformSelector(self):
#platform selector
platformSelectLabel = Label(self,
text="Platform:",
fg="white",
bg="blue",
width="12",
anchor="e").grid(column=0,row=0)
self.platformVar = StringVar()
self.platformVar.set('Legion')
platformList = list(sf.platform2database.keys())
self.platformSelector = OptionMenu(self, self.platformVar, *platformList)
self.platformSelector.grid(row=0,column=1)
def initializeDateSection(self):
#Top labels
self.MonthLabel = Label(self,
text="Month (MM)",
width="10")
self.MonthLabel.grid(column=1,row=1)
self.YearLabel = Label(self,
text="Year (YYYY)",
width="10")
self.YearLabel.grid(column=2,row=1)
#Start date bits
self.startDateLabel = Label(self,
text="Start Month:",
fg="white",
bg="blue",
width="12",
anchor="e")
self.startDateLabel.grid(column=0,row=2)
self.startMonth = StringVar()
self.startMonthEntry = Entry(
self,textvariable=self.startMonth,width="2")
self.startMonthEntry.grid(column=1,row=2)
self.startYear = StringVar()
self.startYearEntry = Entry(
self,textvariable=self.startYear,width="4")
self.startYearEntry.grid(column=2,row=2)
#End date bits
self.endDateLabel = Label(self,
text="End Month:",
fg="white",
bg="blue",
width="12",
anchor="e")
self.endDateLabel.grid(column=0,row=3)
self.endMonth = StringVar()
self.endMonthEntry = Entry(
self,textvariable=self.endMonth,width="2")
self.endMonthEntry.grid(column=1,row=3)
self.endYear = StringVar()
self.endYearEntry = Entry(
self,textvariable=self.endYear,width="4")
self.endYearEntry.grid(column=2,row=3)
def OnOKClick(self):
if self.validateInput():
platform = self.platformVar.get()
startYear = int(self.startYear.get())
startMonth = int(self.startMonth.get())
endYear = int(self.endYear.get())
endMonth = int(self.endMonth.get())
startDate = dt.datetime(startYear,startMonth,1)
endDate = dt.datetime(endYear,endMonth,1)
node = self.nodeVar.get()
stats = []
for statistic in sf.Statistic.__subclasses__():
if self.stats[statistic.__name__].get() == 1:
stats.append(statistic.__name__)
kwargs = {
"startDate":startDate,
"endDate":endDate,
"node":node,
"stats":stats,
"db":sf.platform2database[platform]
}
getRequestedStats(**kwargs)
def validateInput(self):
if not self.validateMonth(self.startMonthEntry,self.startMonth.get()):
return False
if not self.validateMonth(self.endMonthEntry,self.endMonth.get()):
return False
if not self.validateYear(self.startYearEntry,self.startYear.get()):
return False
if not self.validateYear(self.endYearEntry,self.endYear.get()):
return False
if not self.validateDates():
return False
return True
def validateMonth(self,widget,month):
valid = True
try:
month = int(month)
except ValueError:
messagebox.showerror("Input error!",
"You must enter an integer value.")
valid = False
if month < 1 or month > 12:
messagebox.showerror("Input error!",
"You must enter a value from 1 to 12.")
valid = False
if valid == False:
widget.focus_set()
widget.selection_range(0, END)
return valid
def validateYear(self,widget,year):
valid = True
try:
year = int(year)
except ValueError:
messagebox.showerror("Input error!",
"You must enter an integer value")
valid = False
if year < 2011 or year > 2050:
messagebox.showerror("Input error!",
"You must enter a value from 2011 to 2050.")
valid = False
if valid == False:
widget.focus_set()
widget.selection_range(0, END)
return valid
def validateDates(self):
startDate = self.startYear.get() + self.startMonth.get()
endDate = self.endYear.get() + self.endMonth.get()
if startDate > endDate:
messagebox.showerror("Input error!",
"The start date must be earlier than or" \
+" equal to the end date.")
self.startMonthEntry.focus_set()
self.startMonthEntry.selection_range(0, END)
return False
else:
return True
def getRequestedStats(**kwargs):
intervals = getMonthIntervals(kwargs["startDate"], kwargs["endDate"])
monthCollection = []
for interval in intervals:
kwargs["startDate"] = interval["startDate"]
kwargs["endDate"] = interval["endDate"]
statsDict = {}
for statistic in kwargs["stats"]:
statsDict[statistic] = sf.statFactory(statistic, **kwargs) \
.getResult()
statsDict["Start Date"] = kwargs["startDate"].strftime('%d-%m-%Y')
statsDict["End Date"] = kwargs["endDate"].strftime('%d-%m-%Y')
statsDict["Node Type"] = kwargs["node"]
statsDict = flatten(statsDict)
monthCollection.append(statsDict)
printStats(monthCollection)
def printStats(collection):
tempCollection = collection
for month in tempCollection:
print("Start Date: {}".format(month["Start Date"]))
del month["Start Date"]
print("End Date: {}".format(month["End Date"]))
del month["End Date"]
print("Node Type: {}".format(month["Node Type"]))
del month["Node Type"]
for key in sorted(month):
print("{0}: {1}".format(key,month[key]))
return 0
def flatten(statsDict):
toDelete = []
toAdd = {}
for key, value in statsDict.items():
if type(value) is dict:
for k, v in value.items():
toAdd["{0}-{1}".format(key, k)] = v
toDelete.append(key)
for key in toDelete:
del statsDict[key]
statsDict.update(toAdd)
return statsDict
def getMonthIntervals(startDate, endDate):
intervals = []
while startDate != add_months(endDate,1):
interval = {
"startDate":startDate,
"endDate":add_months(startDate,1)
}
intervals.append(interval)
startDate = add_months(startDate,1)
return intervals
def add_months(sourcedate,months):
month = sourcedate.month - 1 + months
year = sourcedate.year + month // 12
month = month % 12 + 1
day = 1
return dt.datetime(year,month,day)
if __name__=="__main__":
app = MainMenu(None)
app.title('my application')
app.mainloop()
| true
|
e4ee9509253b00cbd90d5fcf8fc4051d8ee2e5fb
|
Python
|
YouEbr/nginx_loadbalancer
|
/app-scalable/app.py
|
UTF-8
| 432
| 2.75
| 3
|
[] |
no_license
|
import flask
import socket
app1 = flask.Flask(__name__)
@app1.route('/')
def hello_world():
hostname, ip = get_hostname_and_ip()
return 'This web application is running on host:\"{}\" with ip:\"{}\"'.format(hostname, ip)
def get_hostname_and_ip():
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
return hostname, ip
if __name__ == '__main__':
app1.run(debug=True, host='0.0.0.0')
| true
|
d16fe61b8e703b2ebf748ca86bec7010d8d21f31
|
Python
|
SandhyaDotel/spiral
|
/spiral/homework.py
|
UTF-8
| 338
| 3.53125
| 4
|
[] |
no_license
|
def spiralize(number):
n = 1
step = 2
total_value = 0
matrix_row = 0
while (n <= (number ** 2)):
total_value += n
n += step
matrix_row += 1
if matrix_row == 4:
step += 2
matrix_row = 0
return total_value
if __name__ == "__main__":
print(spiralize(501))
| true
|
400c63daf19c164aea1deec9b50778b12ca2c0c5
|
Python
|
freddycervantes/projectZ
|
/update_database.py
|
UTF-8
| 1,678
| 2.8125
| 3
|
[] |
no_license
|
"""
"""
class Update:
def __init__(self):
self.__F = self.__fetch()
self.__W = self.__write()
self.__intra_dates = self.__valid_dates_intra()
self.__stock_list = self.__valid_stocks()
def __fetch(self):
import fetch_data
return fetch_data.Fetch()
def __write(self):
import write_dat
return write_dat.Write()
def __valid_stocks(self):
import whitelist
return whitelist.GetItems().approved_stocks()
def __valid_dates_intra(self):
return self.__F.list_valid_intraday_dates()
def update_intra_single(self, name):
"""
:param name:
:return: False if not a valid stock, else true
>>> U = Update()
>>> U.update_intra_single("SPY")
True
>>> U.update_intra_single("SLDKHF")
False
"""
if name not in self.__stock_list:
return False
for i in reversed(self.__intra_dates):
if self.__W.day_in_database(i, name):
return True
print(self.__W.store3(name, i, self.__F.intraday_average_price(name, i)))
print(name)
return True
def update_5_single(self, name):
if name not in self.__stock_list:
return False
self.__W.store5(name)
return True
def update_all(self):
"""
>>> U = Update()
>>> U.update_all()
"""
for i in self.__stock_list:
if i == 'XRX':
continue
self.update_intra_single(i)
self.update_5_single(i)
if __name__=="__main__":
import doctest
doctest.testmod()
| true
|
1eff8ed58fb2b57b02b72f3c2a265119567a95e2
|
Python
|
pyy1988/pyy_test1
|
/pyy1/.pycharm_helpers/python_stubs/-1550516950/_dbus_bindings/Dictionary.py
|
UTF-8
| 2,836
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# encoding: utf-8
# module _dbus_bindings
# from /usr/lib/python3/dist-packages/_dbus_bindings.cpython-35m-x86_64-linux-gnu.so
# by generator 1.145
"""
Low-level Python bindings for libdbus. Don't use this module directly -
the public API is provided by the `dbus`, `dbus.service`, `dbus.mainloop`
and `dbus.mainloop.glib` modules, with a lower-level API provided by the
`dbus.lowlevel` module.
"""
# imports
import dbus.lowlevel as __dbus_lowlevel
from .dict import dict
class Dictionary(dict):
"""
An mapping whose keys are similar and whose values are similar,
implemented as a subtype of dict.
As currently implemented, a Dictionary behaves just like a dict, but
with the addition of a ``signature`` property set by the constructor;
conversion of its items to D-Bus types is only done when it's sent in
a Message. This may change in future so validation is done earlier.
Constructor::
Dictionary(mapping_or_iterable=(), signature=None, variant_level=0)
``variant_level`` must be non-negative; the default is 0.
``signature`` is either a string or None. If a string, it must consist
of exactly two complete type signatures, representing the 'key' type
(which must be a primitive type, i.e. one of "bdginoqstuxy")
and the 'value' type. The signature of the whole Dictionary will be
``a{xx}`` where ``xx`` is replaced by the given signature.
If it is None (the default), when the Dictionary is sent over
D-Bus, the key and value signatures will be guessed from an arbitrary
element of the Dictionary.
:IVariables:
`variant_level` : int
Indicates how many nested Variant containers this object
is contained in: if a message's wire format has a variant containing a
variant containing an array of DICT_ENTRY, this is represented in
Python by a Dictionary with variant_level==2.
"""
def __init__(self, mapping_or_iterable=(), signature=None, variant_level=0): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
signature = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The D-Bus signature of each key in this Dictionary, followed by that of each value in this Dictionary, as a Signature instance."""
variant_level = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The number of nested variants wrapping the real data. 0 if not in a variant."""
| true
|
afe888690d11a9b3712e14b730e9fecc18175131
|
Python
|
pzengseu/leetcode
|
/3Sum.py
|
UTF-8
| 672
| 2.78125
| 3
|
[] |
no_license
|
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if not nums: return []
nums = sorted(nums)
res = []
for i in xrange(len(nums)-2):
j = i+1
k = len(nums) - 1
while j < k:
if nums[i] + nums[j] + nums[k] == 0:
res.append((nums[i], nums[j], nums[k]))
if nums[i] + nums[j] + nums[k] < 0: j += 1
else: k -= 1
return [list(l) for l in {}.fromkeys(res).keys()]
print Solution().threeSum([12,-14,-5,12,-2,9,0,9,-3,-3,-14,-6,-4,13,-11,-8,0,5,-7])
| true
|
ec7dca5a02323e36316101f48e95b8e99bd74f76
|
Python
|
PinmanHuang/CVDL_2020
|
/Hw2/hw2.py
|
UTF-8
| 14,434
| 2.578125
| 3
|
[] |
no_license
|
"""
Author : Joyce
Date : 2020-12-15
"""
from UI.hw2_ui import Ui_MainWindow
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication, QMainWindow
import sys
import cv2
import numpy as np
# Q3
import cv2.aruco as aruco
from matplotlib import pyplot as plt
# Q4
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
class PyMainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
super(PyMainWindow, self).__init__()
self.setupUi(self)
# === push button clicked action ===
self.pushButton.clicked.connect(self.bg_sub)
self.pushButton_2.clicked.connect(self.preprocessing)
self.pushButton_3.clicked.connect(self.tracking)
self.pushButton_4.clicked.connect(self.transform)
self.pushButton_5.clicked.connect(self.reconstruction)
self.pushButton_6.clicked.connect(self.error)
def bg_sub(self):
opencv = OpenCv()
opencv.Q1_1()
def preprocessing(self):
opencv = OpenCv()
opencv.Q2_1()
def tracking(self):
opencv = OpenCv()
opencv.Q2_2()
def transform(self):
opencv = OpenCv()
opencv.Q3_1()
def reconstruction(self):
opencv = OpenCv()
opencv.Q4_1()
def error(self):
opencv = OpenCv()
opencv.Q4_2()
class OpenCv(object):
def __init__(self):
super(OpenCv, self).__init__()
def Q1_1(self):
i = mean = std = 0
frames = []
cv2.namedWindow('1.1 Original Video', cv2.WINDOW_NORMAL)
cv2.namedWindow('1.1 Subtraction Video', cv2.WINDOW_NORMAL)
# capture video and get fps
cap = cv2.VideoCapture('./Q1_Image/bgSub.mp4')
fps = cap.get(cv2.CAP_PROP_FPS)
# print("fps: {}".format(fps))
while(cap.isOpened()):
i = i + 1
# get frames of video and convert to gray
# frame shape: (176, 320), type: ndarray
ret, frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
result = np.zeros_like(gray)
# process according to frame
if i < 50:
# 1st to 49th frame
frames.append(gray)
elif i == 50:
# 50th frame
frames.append(gray)
all_frames = np.array(frames)
# mean and standard deviation
mean = np.mean(all_frames, axis=0)
std = np.std(all_frames, axis=0)
# print("type: {}, shape: {}, mean: {}, std: {}".format(
# type(all_frames), all_frames.shape, mean.shape, std.shape))
# if standard deviation is less then 5, set to 5
std[std < 5] = 5
else:
# after 51th frame
# subtract
diff = np.subtract(gray, mean)
diff = np.absolute(diff)
result[diff > 5*std] = 255
# print("type: {}, shape: {}, diff_type:{}, diff_shape: {}, mean: {}, std: {}".format(
# type(gray), gray.shape, type(diff), diff.shape, mean.shape, std.shape))
cv2.imshow('1.1 Original Video', frame)
cv2.imshow('1.1 Subtraction Video', result)
if cv2.waitKey(int(1000/fps)) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def Q2_1(self):
cv2.namedWindow('2.1 Preprocessing Video', cv2.WINDOW_NORMAL)
# Set up the detector with parameters
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 10
params.maxThreshold = 200
params.filterByArea = True
params.minArea = 35
params.filterByCircularity = True
params.minCircularity = 0.8
params.maxCircularity = 0.9
params.filterByConvexity = True
params.minConvexity = 0.5
params.filterByInertia = True
params.minInertiaRatio = 0.5
detector = cv2.SimpleBlobDetector_create(params)
# capture video
cap = cv2.VideoCapture('./Q2_Image/opticalFlow.mp4')
# get 1st frame of video
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect blobs
keypoints = detector.detect(gray)
# Draw rectangle and line
for kp in keypoints:
x, y = (kp.pt)
x = int(x)
y = int(y)
frame = cv2.rectangle(frame, (x-5, y-5), (x+5, y+5), (0, 0, 255), 1)
frame = cv2.line(frame, (x, y-5), (x, y+5), (0, 0, 255), 1)
frame = cv2.line(frame, (x-5, y), (x+5, y), (0, 0, 255), 1)
cv2.imshow('2.1 Preprocessing Video', frame)
cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
def Q2_2(self):
cv2.namedWindow('2.2 Video tracking', cv2.WINDOW_NORMAL)
lk_params = dict(
winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Set up the detector with parameters
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = 35
params.filterByCircularity = True
params.minCircularity = 0.8
params.maxCircularity = 0.9
params.filterByConvexity = True
params.minConvexity = 0.5
params.filterByInertia = True
params.minInertiaRatio = 0.5
detector = cv2.SimpleBlobDetector_create(params)
# capture video and get fps
cap = cv2.VideoCapture('./Q2_Image/opticalFlow.mp4')
fps = cap.get(cv2.CAP_PROP_FPS)
# get 1st frame of video
ret, frame = cap.read()
gray_1 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect blobs
keypoints = detector.detect(gray_1)
p0 = np.array([[[kp.pt[0], kp.pt[1]]] for kp in keypoints]).astype(np.float32)
mask = np.zeros_like(frame)
while(cap.isOpened()):
# get frames of video and convert to gray
# frame shape: (176, 320), type: ndarray
ret, frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(gray_1, gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), (0, 0, 255), 2)
frame = cv2.circle(frame, (a, b), 3, (0, 0, 255), -1)
result = cv2.add(frame, mask)
cv2.imshow('2.2 Video tracking', result)
if cv2.waitKey(int(1000/fps)) & 0xFF == ord('q'):
break
gray_1 = gray.copy()
p0 = good_new.reshape(-1, 1, 2)
cap.release()
cv2.destroyAllWindows()
def Q3_1(self):
cv2.namedWindow('3.1 Perspective Transform', cv2.WINDOW_NORMAL)
# read image
im_src = cv2.imread("./Q3_Image/rl.jpg")
size = im_src.shape
pts_src = np.array([
[0, 0],
[size[1], 0],
[size[1], size[0]],
[0, size[0]]
], dtype=float)
# capture video and get fps
cap = cv2.VideoCapture('./Q3_Image/test4perspective.mp4')
fps = cap.get(cv2.CAP_PROP_FPS)
# get 1st frame of video
ret, frame = cap.read()
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('./Q3_Result/3_1_result.mp4', fourcc, 20.0, (frame.shape[1], frame.shape[0]))
while(cap.isOpened()):
# get frames of video and convert to gray
ret, frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# reading the four code and get its idx and corner
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
arucoParameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(
gray,
aruco_dict,
parameters = arucoParameters
)
# doesn't have four corners
if len(corners) != 4:
continue
# have four corners
if np.all(ids != None):
# index of each markers
idx_23 = np.where(ids==23)[0][0]
idx_25 = np.where(ids==25)[0][0]
idx_30 = np.where(ids==30)[0][0]
idx_33 = np.where(ids==33)[0][0]
# get four point location
p1 = (corners[idx_25][0][1][0], corners[idx_25][0][1][1])
p2 = (corners[idx_33][0][2][0], corners[idx_33][0][2][1])
p3 = (corners[idx_30][0][0][0], corners[idx_30][0][0][1])
p4 = (corners[idx_23][0][0][0], corners[idx_23][0][0][1])
# calculate distance and scale the point location
distance = np.linalg.norm(np.subtract(p1, p2))
scaling = round(0.02 * distance)
pts_dst = np.array([
[p1[0] - scaling, p1[1] - scaling],
[p2[0] + scaling, p2[1] - scaling],
[p3[0] + scaling, p3[1] + scaling],
[p4[0] - scaling, p4[1] + scaling]
])
# frame image
im_dst = frame
# find homography and warp perspective
h, status = cv2.findHomography(pts_src, pts_dst)
temp = cv2.warpPerspective(im_src, h, (im_dst.shape[1], im_dst.shape[0]))
# draw
cv2.fillConvexPoly(im_dst, pts_dst.astype(int), 0, 16)
im_dst = im_dst + temp
out.write(im_dst)
cv2.imshow('3.1 Perspective Transform', im_dst)
if cv2.waitKey(int(1000/fps)) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
def norm(self, img):
img = img.copy()
img -= np.min(img)
img /= np.max(img)
img *= 255.
return np.uint8(img)
def reconstruction(self, img):
# split 3 channels
b, g, r = cv2.split(img)
# PCA
pca = PCA(25)
# b
# b_norm = normalize(b)
lower_dimension_b = pca.fit_transform(b)
approximation_b = pca.inverse_transform(lower_dimension_b)
# g
# g_norm = normalize(g)
lower_dimension_g = pca.fit_transform(g)
approximation_g = pca.inverse_transform(lower_dimension_g)
# r
# r_norm = normalize(r)
lower_dimension_r = pca.fit_transform(r)
approximation_r = pca.inverse_transform(lower_dimension_r)
# clip
clip_b = np.clip(approximation_b, a_min = 0, a_max = 255)
clip_g = np.clip(approximation_g, a_min = 0, a_max = 255)
clip_r = np.clip(approximation_r, a_min = 0, a_max = 255)
# merge
n_img = (cv2.merge([clip_b, clip_g, clip_r])).astype(np.uint8)
# n_img = (np.dstack((approximation_b, approximation_g, approximation_r))).astype(np.uint8)
# n_img = (cv2.merge([approximation_b, approximation_g, approximation_r])).astype(int)
# n_img = cv2.normalize(n_img, None, 0, 255, cv2.NORM_MINMAX)
# np_img = n_img.astype(int)
# n_img = norm(np.float32(n_img))
'''
X = img.reshape((100, -1))
pca = PCA(25)
lower_dimension = pca.fit_transform(X)
approximation = pca.inverse_transform(lower_dimension)
approximation = approximation.reshape(100, 100, 3)
n_img = norm_image(approximation)
'''
return n_img
def Q4_1(self):
fig = plt.figure(figsize=(18, 4))
for idx in range(1, 18, 1):
# ====== i ======
img = cv2.imread('./Q4_Image/'+str(idx)+'.jpg')[:, :, ::-1]
n_img = self.reconstruction(img)
# plot
# origin image
plt.subplot(4, 17, idx)
plt.xticks([])
plt.yticks([])
plt.imshow(img)
# reconstruction image
plt.subplot(4, 17, idx + 17)
plt.xticks([])
plt.yticks([])
plt.imshow(n_img)
# ===============
# ====== i+17 ======
img = cv2.imread('./Q4_Image/'+str(idx + 17)+'.jpg')[:, :, ::-1]
n_img = self.reconstruction(img)
# plot
# origin image
plt.subplot(4, 17, idx + 34)
plt.xticks([])
plt.yticks([])
plt.imshow(img)
# reconstruction image
plt.subplot(4, 17, idx + 51)
plt.xticks([])
plt.yticks([])
plt.imshow(n_img)
# ==================
fig.text(0, 0.9, 'Original', va='center', rotation='vertical')
fig.text(0, 0.65, 'Reconstruction', va='center', rotation='vertical')
fig.text(0, 0.4, 'Original', va='center', rotation='vertical')
fig.text(0, 0.15, 'Reconstruction', va='center', rotation='vertical')
plt.tight_layout(pad=0.5)
plt.show()
def Q4_2(self):
for idx in range(1, 35, 1):
img = cv2.imread('./Q4_Image/'+str(idx)+'.jpg')
n_img = self.reconstruction(img)
# convert to gray
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
n_img_gray = cv2.cvtColor(n_img, cv2.COLOR_BGR2GRAY)
n_img_gray = cv2.normalize(n_img_gray, None, 0, 255, cv2.NORM_MINMAX)
error = np.sum(np.absolute(img_gray-n_img_gray))
print('{}: {}'.format(idx, error))
if __name__ == '__main__':
app = QApplication(sys.argv)
window = QMainWindow()
ui = PyMainWindow()
ui.setupUi(window)
ui.show()
sys.exit(app.exec_())
| true
|
0b5e283f629bdcf2a174a465bf8b04c87524976f
|
Python
|
Erniess/pythontutor
|
/Занятие 4 - Цикл for /Практика/Сумма десяти чисел.py
|
UTF-8
| 308
| 3.390625
| 3
|
[] |
no_license
|
# Задача «Сумма десяти чисел»
# Условие
# Дано 10 целых чисел. Вычислите их сумму. Напишите программу, использующую
# наименьшее число переменных.
print(sum([int(input()) for _ in range(10)]))
| true
|
f0aaaf75c316fd0cc8cfbac06dbeba2f31df5f42
|
Python
|
Cougar/pyxy
|
/test/test_xy.py
|
UTF-8
| 2,834
| 2.921875
| 3
|
[] |
no_license
|
import unittest
from xy import Area, XY
class AreaTests(unittest.TestCase):
def test_area(self):
a1 = Area(-1, -2, 1, 2, 'name1')
self.assertEqual(a1.xmin, -1)
self.assertEqual(a1.ymin, -2)
self.assertEqual(a1.xmax, 1)
self.assertEqual(a1.ymax, 2)
self.assertEqual(a1.name, 'name1')
a2 = Area(10, 20, -10, -20, 'name2')
self.assertEqual(a2.xmin, -10)
self.assertEqual(a2.ymin, -20)
self.assertEqual(a2.xmax, 10)
self.assertEqual(a2.ymax, 20)
self.assertEqual(a2.name, 'name2')
def test_area_thin(self):
self.assertRaises(Exception, Area, 0, 1, 0, 2, None)
self.assertRaises(Exception, Area, 1, 0, 2, 0, None)
def test_area_exact(self):
a1 = Area(-10, -10, 10, 10, 'name1')
a2 = Area(-10, -10, 10, 10, 'name2')
a3 = Area(-15, -15, 15, 15, 'name2')
self.assertTrue(a1.is_exact(a2))
self.assertTrue(a2.is_exact(a1))
self.assertFalse(a1.is_exact(a3))
self.assertFalse(a3.is_exact(a1))
def test_area_iside_outside(self):
a1 = Area(-10, -10, 10, 10, 'name1')
a2 = Area(-5, -5, 5, 5, 'name2')
a3 = Area(-15, -15, 15, 15, 'name2')
self.assertTrue(a2.is_inside(a1))
self.assertFalse(a3.is_inside(a1))
self.assertFalse(a2.is_around(a1))
self.assertTrue(a3.is_around(a1))
def test_area_no_overlap(self):
a1 = Area(-10, -10, 10, 10, 'name1')
a2 = Area(-15, -15, -10, -10, 'name2')
self.assertFalse(a1.is_overlap(a2))
self.assertFalse(a2.is_overlap(a1))
a1 = Area(-10, -10, 10, 10, 'name1')
a2 = Area(-15, -10, -10, 0, 'name2')
self.assertFalse(a1.is_overlap(a2))
self.assertFalse(a2.is_overlap(a1))
def test_area_overlap(self):
a1 = Area(-10, -10, 10, 10, 'name1')
a2 = Area(-15, -15, -9, -9, 'name2')
self.assertTrue(a1.is_overlap(a2))
self.assertTrue(a2.is_overlap(a1))
class XYTests(unittest.TestCase):
def setUp(self):
self.m = XY(-100, -100, 100, 100)
def test_empty_map(self):
self.assertEqual(len(self.m.areas), 1)
self.assertEqual(self.m.get_area_count(), (0, 1))
self.assertEqual(self.m.find_area(0, 0).name, None)
def test_one_area_replace(self):
self.m.add_area(-100, -100, 100, 100, 'test')
self.assertEqual(len(self.m.areas), 1)
self.assertEqual(self.m.get_area_count(), (1, 0))
self.assertEqual(self.m.find_area(0, 0).name, 'test')
def test_one_area(self):
m = XY(-100, -100, 100, 100)
m.add_area(10, 20, -30, -40, 'test')
self.assertEqual(len(m.areas), 5)
self.assertEqual(m.get_area_count(), (1, 4))
self.assertEqual(m.find_area(0, 0).name, 'test')
self.assertEqual(m.find_area(20, 10).name, None)
def test_area_overlaps(self):
self.m.add_area(-20, -20, 20, 20, 'test')
self.assertRaises(Exception, self.m.add_area, -10, -10, 10, 10, 'test2')
self.assertRaises(Exception, self.m.add_area, -30, -30, 30, 30, 'test3')
self.assertRaises(Exception, self.m.add_area, -30, -30, 10, 10, 'test4')
| true
|
b9b1162c9cf6e3d52b1d0f0ce2e25ee2a91a6e6a
|
Python
|
DHaythem/Competitive-Programming-Solutions
|
/Codeforces/1200/A. IQ Test.py
|
UTF-8
| 611
| 3.09375
| 3
|
[] |
no_license
|
#https://codeforces.com/problemset/problem/287/A
a=input()
b=input()
c=input()
d=input()
k=0
for i in range(3):
L=[a[i],a[i+1],b[i],b[i+1]]
if L.count("#")>=3 or L.count(".")>=3:
print('YES')
k=1
break
if k==0:
for i in range(3):
L=[b[i],b[i+1],c[i],c[i+1]]
if L.count("#")>=3 or L.count(".")>=3:
print('YES')
k=1
break
if k==0:
for i in range(3):
L=[c[i],c[i+1],d[i],d[i+1]]
if L.count("#")>=3 or L.count(".")>=3:
print('YES')
k=1
break
if k==0:
print('NO')
| true
|
78021570f9c7c95ee3401495603322d711ad340b
|
Python
|
furixturi/CTCI
|
/Round 3/01 arrays and strings/05-oneAway.py
|
UTF-8
| 1,015
| 3.8125
| 4
|
[] |
no_license
|
# One Away: There are three types of edits that can be performed on
# strings: insert a character, remove a character, or replace a
# character. Given two strings, write a function to check if they are
# one edit (or zero edits) away.
def oneAway(s1, s2):
lenDiff = abs(len(s1)-len(s2))
if lenDiff == 0:
return oneReplaceAway(s1, s2)
elif lenDiff == 1:
ss = s1 if len(s1) < len(s2) else s2
sl = s1 if len(s2) < len(s1) else s2
return oneInsertAway(ss, sl)
return False
def oneReplaceAway(s1, s2):
diff = 0
for i in range(len(s1)):
if s1[i] != s2[i]:
if diff > 0:
return False
else:
diff = 1
return True
def oneInsertAway(ss, sl):
i1 = 0
i2 = 0
while i2 < len(sl):
if ss[i1] != sl[i2]:
i2 += 1
if ss[i1] != sl[i2]:
return False
i1 += 1
i2 += 1
return True
if __name__ == '__main__':
print(oneAway('aple', 'apple'))
print(oneAway('pale', 'ple'))
print(oneAway('pale', 'bae'))
print(oneAway('apple', 'appde'))
| true
|
d99e613c6f6000c123b63eecab83e3b6fbcc885a
|
Python
|
zheugene/pdu
|
/test_xpt2046.py
|
UTF-8
| 2,284
| 2.640625
| 3
|
[] |
no_license
|
# Copyright 2012 Matthew Lowden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Python proof of concept for interfacing an XPT2046 Touch Screen Controller
# to a Raspberry Pi using SPI (via bit-banged GPIO).
# This sample uses the SPI pins on the Raspberry Pi Expansion header.
# (With the intention that no wiring changes should be required to use SPI
# drivers, rather than bit-banged GPIO).
# More information on Raspberry Pi GPIO can be found here:
# http://elinux.org/RPi_Low-level_peripherals
# This sample code is dependent on the RPi.GPIO library available here:
# http://pypi.python.org/pypi/RPi.GPIO/
import time
from sys import stdout
from XPT2046 import XPT2046
import Adafruit_GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
IRQ = None
SPI_PORT = 0
SPI_DEVICE = 1
try:
touch = XPT2046(irq=IRQ, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=500000))
print ("Start")
while True:
startTime = time.time()
x = touch.readX()
y = touch.readY()
z1 = touch.readZ1()
z2 = touch.readZ2()
pressure = round(touch.readTouchPressure(),2)
temp0 = touch.readTemperature0()
temp1 = touch.readTemperature1()
vbatt = touch.readBatteryVoltage()
aux = touch.readAuxiliary()
duration = round((time.time() - startTime) * 1000, 2)
print ("\rX: %s " % x + " Y: %s" % y + " Z1: %s" % z1 + " Z2: %s" % z2 + " Pressure: %s" % pressure + " Temp0: %s" % temp0 + " Temp1: %s" % temp1 + " VBatt: %s" % vbatt + " Aux: %s" % aux + " SampleTime: %s ms" % duration +" ")
# print ("\rtemp0: %s " % temp0 + " %s" % temp1)
stdout.flush ()
except KeyboardInterrupt:
print ("keyInt")
print ("\n")
except Exception:
print ("except")
raise
| true
|
7be0f9da17498364e2dca6bb8deeeeab61bd2ceb
|
Python
|
n2dah/MATH-2305-Final-Project
|
/functions.py
|
UTF-8
| 3,528
| 3.453125
| 3
|
[
"Unlicense"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 6 20:55:28 2020
@authors: Jeremiah Neuneker, Julia Ramirez, Warda Qadeer, Noah V.
"""
import networkx as nx
import os
def V(graph):
"""Takes in a number of vertices.
Parameters
----------
Returns
-------
A graph with set number of vertices.
"""
return set(graph.nodes())
def E(graph):
"""Takes in a number of edges.
Parameters
----------
Returns
-------
A graph with set number of edges.
"""
return set(graph.edges())
def prims_initialize(graph , v):
"""Function to intialize the tree
Parameters
----------
graph : NetworkX graph
v : starting vertex/node
Returns
-------
Error if vertix is not found, otherwise graph.
"""
if v not in V(graph):
return 'Error vertex not found'
T = nx.Graph()
T.add_node(v)
return T
def is_spanning(graph, subgraph):
"""Checks to see if graph is spanning.
Parameters
----------
Graph
Subgraph
Returns
-------
A graph containing all spanning vertices/nodes
"""
return V(graph) == V(subgraph)
def cost(G, e):
"""Returns the weight of the edges of G.
Parameters
----------
G: NetworkX graph
e : the nodes on either side of the edge.
Returns
-------
Weight of the edges of G.
"""
return G[e[0]][e[1]]['weight']
def possible_edges(G, T):
"""Returns the list of possible edges remaining.
Parameters
----------
G : NetworkX graph
T : Prim's Algorithm
Returns
-------
List of possible edges remaining.
"""
return [e for e in list(G.edges(V(T)))
if e[0] not in V(T)
or e[1] not in V(T)]
def min_cost_edge(G, T):
"""Returns the edge with the lowest cost/weight.
Parameters
----------
G : NetworkX graph
T : Prim's Algorithm
Returns
-------
The edge with the lowest cost/weight.
"""
edge_list = possible_edges(G, T)
edge_list.sort(key = lambda e : cost(G, e))
return edge_list[0]
def FileSelector(directory):
"""Returns the file selected based on the user input.
Parameters
----------
directory : Directory to look for graphs
Returns
-------
The file selected by the user.
"""
entries = os.listdir(directory)
entries = [x.upper() for x in entries]
print(entries)
inputFile = input('Please enter the desired graph file name to solve: ')
if not inputFile.endswith('.txt'):
inputFile += '.txt'
while inputFile.upper() not in entries:
print(entries)
inputFile = input('The graph file was not found. Please enter the desired graph file name to solve: ')
if not inputFile.endswith('.txt'):
inputFile += '.txt'
return directory + inputFile
def InputStartingPoint(G):
"""Returns the starting vertex based on the user input.
Parameters
----------
G : NetworkX graph
Returns
-------
The starting vertex selected by the user.
"""
vertices = G.nodes
print(vertices)
vertex = input('Please enter a vertex listed for the graph: ')
while not vertex.isnumeric() or int(vertex) not in vertices:
print(vertices)
vertex = input('Invalid entry. Please enter a valid vertex listed for the graph: ')
return int(vertex)
| true
|
00c5d33b468e64ce07c0bf3ce563650304ebecb7
|
Python
|
AndreyPankov89/python-glo
|
/lesson9/task5.py
|
UTF-8
| 81
| 2.96875
| 3
|
[] |
no_license
|
for i in range(7):
for j in range(6):
print('2', end=' ')
print()
| true
|
1a65736239eb4c1b19bdb49ebfdfa0574a10f342
|
Python
|
Hash-It-Out/Fake-News-Suspector
|
/wce/scrapeUrl.py
|
UTF-8
| 1,063
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
from bs4 import BeautifulSoup
from newspaper import Article
import csv
class url_query():
def url_news(sstring):
url= sstring
print(url)
data=[None]*2
try:
article = Article(url)
article.download()
article.html
article.parse()
data[0]='100'
data[1]=article.title
print(*data)
# with open('real4.csv','a') as w:
# writer = csv.writer(w)
# writer.writerow(data)
# w.close()
except Exception as e:
print(e)
pass
url_news('https://www.yahoo.com/news/gop-lawmaker-knocks-trump-putin-call-refuses-distance-president-135204872.html')
| true
|
7eb8bb25546f929a4fdf04c419bc4e5ee3c795f4
|
Python
|
kziolkowska/PyEMMA
|
/pyemma/msm/estimation/dense/tmatrix_sampler_jwrapper_test.py
|
UTF-8
| 2,909
| 2.671875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
'''
Created on Jun 6, 2014
@author: marscher
'''
import unittest
import numpy as np
from tmatrix_sampler_jwrapper import ITransitionMatrixSampler
def assertSampler2x2(sampler, C, nsample, errtol):
"""
same function as in stallone.test.mc.MarkovModelFactoryTest
"""
c1 = float(np.sum(C[0]))
c2 = float(np.sum(C[1]))
n = C.shape[0]
samplesT12 = np.empty(nsample)
samplesT21 = np.empty(nsample)
for i in xrange(nsample):
T = sampler.sample(100)
samplesT12[i] = T[0, 1]
samplesT21[i] = T[1, 0]
# check means
true_meanT12 = float(C[0, 1] + 1) / float(c1 + n)
sample_meanT12 = np.mean(samplesT12)
err_T12 = np.abs(true_meanT12 - sample_meanT12) / true_meanT12
assert float(err_T12) < errtol
true_meanT21 = float(C[1, 0] + 1) / float(c2 + n)
sample_meanT21 = np.mean(samplesT21)
err_T21 = np.abs(true_meanT21 - sample_meanT21) / true_meanT21
assert float(err_T21) < errtol
# check variance
true_varT12 = true_meanT12 * (1.0 - true_meanT12) / float(c1 + n + 1)
sample_varT12 = np.var(samplesT12)
err_varT12 = np.abs(true_varT12 - sample_varT12) / true_varT12
assert float(err_varT12) < errtol
true_varT21 = true_meanT21 * (1.0 - true_meanT21) / float(c2 + n + 1)
sample_varT21 = np.var(samplesT21)
err_varT21 = np.abs(true_varT21 - sample_varT21) / true_varT21
assert float(err_varT21) < errtol
class TestJavaTransitionMatrixSampler(unittest.TestCase):
"""
note this is very slow, since it calls nsample times jni methods.
"""
def setUp(self):
self.C = np.array([[5, 2 ],
[1, 10]])
self.errtol = 1e-2
self.nsample = 100000
@unittest.SkipTest
def testSamplerRev(self):
sampler_rev = ITransitionMatrixSampler(self.C, reversible=True)
assertSampler2x2(sampler_rev, self.C, self.nsample, self.errtol)
@unittest.SkipTest
def testSamplerNonRev(self):
sampler_nonrev = ITransitionMatrixSampler(self.C, reversible=False)
assertSampler2x2(sampler_nonrev, self.C, self.nsample, self.errtol)
# @unittest.SkipTest
def testSamplerRevPiFix(self):
"""Mean transition matrix"""
mP=(self.C+1.0)/(self.C+1.0).sum(axis=1)[:,np.newaxis]
"""Stationary distribution of mean-transition matrix"""
mu=np.array([6.0/19, 13.0/19])
sampler_rev_pi = ITransitionMatrixSampler(self.C, reversible=True, mu=mu, Tinit=mP)
# sampler_rev_pi = ITransitionMatrixSampler(self.C, reversible=True)
T=np.zeros((self.nsample, 2, 2))
for i in range(self.nsample):
T[i,:,:]=sampler_rev_pi.sample(10)
mT=np.mean(T, axis=0)
dT=np.sqrt(np.var(T, axis=0))
self.assertTrue(np.all(np.abs(mT-mP)<dT))
if __name__ == "__main__":
unittest.main()
| true
|
e1684604ac5625757d83f240c02b4297e3056e20
|
Python
|
FredrikBakken/family-tree
|
/scrape/scrape.py
|
UTF-8
| 3,164
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
import re
import time
import random
import requests
from bs4 import BeautifulSoup
def getContents(url):
time.sleep(2) # Avoid DDoS
page = requests.get(url)
if (page.status_code == 200):
return page.content
def website(url):
contents = getContents(url)
if (contents != None):
# Parse information table
soup = BeautifulSoup(contents, 'html.parser')
table = soup.find('table', {"class": "data_table lengthy"})
table_rows = table.findAll('tr')
# Initialize parent variables
person = {}
parent_urls = []
# Loop through each row in table
for table_row in table_rows:
if (not table_row.find('td', {"class": "spacer"})):
head_cells = table_row.findAll('th')
data_cells = table_row.findAll('td')
# Some rows has both th and td, others have only td
if (len(head_cells) + len(data_cells) == 1):
person["Name"] = data_cells[0].text.strip().split("\n")[0]
elif (len(head_cells) + len(data_cells) == 2):
if (len(head_cells) > 0):
key = head_cells[0].text.strip().replace(":", "")
value = data_cells[0].text.strip()
else:
key = data_cells[0].text.strip().replace(":", "")
value = re.sub(' +', ' ', data_cells[1].text.strip().replace('\n', ''))
# Parse parents from row related to 'Immediate Family'
if (key == 'Immediate Family'):
family_entities = data_cells[0].find('p')
if (family_entities.text.startswith("Son of") or family_entities.text.startswith("Daughter of")):
parents_text = family_entities.text.split("\n")[0]
all_links = family_entities.findAll('a')
# Remove link text from row related to parents (since it may include the 'and' word)
# Removing this may cause a infinite loop
for all_link in all_links:
parents_text = parents_text.replace(all_link.text, "")
# Find the number of parents that exists
if ("and" in parents_text):
number_of_parents = 2
else:
number_of_parents = 1
# Fetch the links to the parents
parent_links = family_entities.findAll('a')
for parent_link in parent_links[:number_of_parents]:
parent_urls.append(parent_link['href'])
else:
parent_urls = []
# Add each row to dictionary
person[key] = value
# Return results
return person, parent_urls
else:
print("No contents received from Geni...")
| true
|
d9ad6c1c6ca611740773639c414594d305378b4a
|
Python
|
zhou11616/myproject
|
/day1/c.py
|
UTF-8
| 585
| 3.078125
| 3
|
[] |
no_license
|
def ling(n):
nstr = range(1, 2 * n)
for i in nstr[0:n]:
for j in nstr[n - i : 0 : - 1]:
print(" ", end=" ")
for j in nstr[0 : 2 * i - 1]:
print("*", end=" ")
print("")
for i in nstr[0:n-1]:
for j in nstr[0:i]:
print(" ", end=" ")
for j in nstr[2*(n-i)-1:0:-1]:
print("*", end=" ")
print("")
def huan():
for i in range(1, 10):
for j in range(1, i + 1):
print("%dX%d=%-2d " % (j, i, i * j), end=" ")
print()
if __name__ == '__main__':
ling(9)
| true
|
98ae47f15e089f31c8abddd6f4acfb4e51807c89
|
Python
|
RohanAnandPandit/graphite
|
/src/string_formatting.py
|
UTF-8
| 11,035
| 3.140625
| 3
|
[] |
no_license
|
from maths.math_constants import *
from maths.math_functions import *
from utils import invert
constants = ['π', 'φ']
# Superscript numbers,variables and operator characters
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] # Regular numbers
super_script = ['⁰', '¹', '²', '³', '⁴', '⁵', '⁶', '⁷', '⁸', '⁹', 'ᵃ', 'ᵇ', 'ᶜ', 'ᵈ', 'ᵉ', 'ˢ',
'ˣ', 'ʸ', 'ᶻ', 'ᵗ', 'ᵒ', '⁽', '⁾', '⁻', '⁺', '^', 'ᵍ', 'ᵐ', 'ʰ', 'ᶦ', 'ⁿ', '˚', 'ʳ']
variables = ['a', 'b', 'c', 'd', 'e', 't', 'x', 'y', 'z', 'r'] # Regular variables
to_super_script = {'0': '⁰', '1': '¹', '2': '²', '3': '³', '4': '⁴', '5': '⁵', '6': '⁶',
'7': '⁷', '8': '⁸', '8': '⁹', 'a': 'ᵃ', 'b': 'ᵇ', 'c': 'ᶜ', 'd': 'ᵈ',
'o': 'ᵒ', 's': 'ˢ', 'e': 'ᵉ', 'x': 'ˣ', 'y': 'ʸ', 'z': 'ᶻ', 't': 'ᵗ',
'g': 'ᵍ', 'm': 'ᵐ', 'h': 'ʰ', 'i': 'ᶦ', 'n': 'ⁿ', 'r': 'ʳ', '(': '⁽',
')': '⁾', '-': '⁻', '*': '*', '/': '/', ' ': ' ', '+': '⁺', '*': '˚',
"'": "'", ',': ','}
identifiers = {'cos': '[', 'sin': ']', 'tan': '@', 'sec': '~', 'cosec': '#', 'cot': '$',
'gamma': ';', 'asin': '%', 'acos': '_', 'atan': '&', 'sqrt': '¬',
'cbrt': "\\", 'abs': '{', 'sigma': 'Σ', 'integral': '∫', 'fact': '£',
'mandelbrot': '?', 'derivative': 'α', 'newtonRhapson': 'β', 'C': 'δ'}
characters = ['[', ']', '@', '~', '#', '$', ';', '%', '_', '&', '¬', "\\", '{', '}', 'Σ',
'∫', '£', '?', 'α', 'β', 'δ']
def syntax_correction(equation, replace_functions=True):
# This section removes all superfluous spaces from the string
equation = list(equation)
i = 0
l = len(equation)
while i < l:
char = equation[i]
if char == ' ':
del equation[i]
l -= 1
else:
i += 1
# This section replaces each special character with it' syntax representation
i = 0
l = len(equation)
while i < l:
char = equation[i]
if i < l - 1:
# This indicates that the base is being raised to the power of the
# expression with the supersScript brackets
if char not in super_script and equation[i + 1] == '⁽':
equation[i + 1] = '**('
if char in operators + constants + super_script:
try:
equation[i] = key[char]
except:
pass
i = i + 1
# This section inserts the factorial function in the correct position
# whenever the '!' symbol is encountered
i = 0
equation = list(''.join(equation))
l = len(equation)
while i < l:
if equation[i] == '!':
equation[i] = ')'
if i == 0:
equation.insert(0, 'fact(')
else:
j = i - 1
while equation[j] not in operators and j >= 1:
j -= 1
equation.insert(j + 1, 'fact(')
i += 2
l += 1
else:
i += 1
# This section replaces each mathematical function with it's special
# identifier
equation = list(''.join(equation))
i = 0
l = len(equation)
while i < l:
for a in range(3, 11):
if i + a < l:
if ''.join(equation[i:i + a]) in functions:
equation = equation[0:i]
equation += [identifiers[''.join(equation[i:i + a])]]
equation += equation[i + a: l]
l = l - (a - 1)
i = i + 1
# This section inserts a '*' sign between expression which have been
# implicitly multiplied
equation = list(''.join(equation))
for i in range(1, len(equation)):
char = equation[i]
if char in variables and equation[i - 1] not in operators + characters:
equation[i] = '*' + equation[i]
elif char == '(' and equation[i - 1] == ')':
equation[i] = '*' + equation[i]
elif char not in variables + operators + ['i'] and equation[i - 1] in variables:
equation[i] = '*' + equation[i]
elif ((char not in variables + operators + constants + numbers + ['.'])
and (equation[i - 1] not in variables + operators + constants + numbers + ['.'])
or (char not in variables + operators and equation[i - 1] == ')')):
equation[i] = '*' + equation[i]
# This section replaces special constants with their variable representation
equation = list(''.join(equation))
for i in range(0, len(equation)):
if equation[i] == 'π':
if i != 0:
if equation[i - 1] in operators:
equation[i] = 'pi'
else:
equation[i] = '*pi'
else:
equation[i] = 'pi'
elif equation[i] == 'φ':
if i != 0:
if equation[i - 1] in operators:
equation[i] = 'phi'
else:
equation[i] = '*phi'
else:
equation[i] = 'phi'
# This replaces the identifier characters with the original function
if replace_functions:
equation = list(''.join(equation))
for i in range(len(equation)):
char = equation[i]
for function in functions:
if identifiers[function] == char:
equation[i] = function
# This section inserts the absolute value function whenever the modulus
# sign is encountered
i = 0
equation = list(''.join(equation))
l = len(equation)
tracker = True
for i in range(len(equation)):
if equation[i] == '|':
if tracker:
equation[i] = 'abs('
else:
equation[i] = ')'
elif equation[i] in operators:
# A tracker has to be maintained to mdetermine whether the modulus
# sign is closing or opening the function
# as the open and close modulus signs both are represented by '|'
tracker = invert(tracker)
return ''.join(equation)
# Formats mathematical statements entered into a form
def entry_formatter(entry):
l = len(entry.get())
i = 0
while i < l:
if i != 0:
# If the character before the current character is in super_script
# then the current character should also be in super_script
if entry.get()[i - 1] in super_script and entry.get()[i] not in super_script:
s = entry.get()[i]
entry.delete(i)
entry.insert(i, to_super_script[s])
# If the user wants to raise something to a power then it will be
# replaced by super script open bracket
if entry.get()[i] == '^' and entry.get()[i - 1] not in super_script:
entry.delete(i, i + 1)
entry.insert(i, '⁽')
entry.icursor(i + 2)
if i + 4 < len(entry.get()) + 1: # Ensures there are at least 4 characters left
# If the user types sqrt it will be replaces by the symbol for square settings_window
if entry.get()[i:i + 4] == 'sqrt':
entry.delete(i, i + 4)
entry.insert(i, '√()')
entry.icursor(i + 2)
l = l - 1 # The length of the string is reduced by one
# If the user types cbrt it will be replaces by the symbol for cube settings_window
elif entry.get()[i:i + 4] == 'cbrt':
entry.delete(i, i + 4)
entry.insert(i, '∛()')
entry.icursor(i + 2)
l = l - 1 # The length of the string is reduced by one
if i + 2 < len(entry.get()) + 1: # Ensures there are at least 2 characters left
# If the user types pi it will be replaced by π
if entry.get()[i:i + 2] == 'pi':
entry.delete(i, i + 2)
entry.insert(i, 'π')
l = l - 1 # The length of the string is reduced by one
# Ensures there are at least 3 characters left
if i + 3 < len(entry.get()) + 1:
# If the user types 'sum' it will be replaced by the greek letter sigma
if entry.get()[i:i + 3] == 'sum':
entry.delete(i, i + 3)
entry.insert(i, 'Σ()')
entry.icursor(i + 2)
# l = l - 0 # The length of the string is reduced by zero
# If the user types phi it will be replaced by the greek letter phi
if entry.get()[i:i + 3] == 'phi':
entry.delete(i, i + 3)
entry.insert(i, 'φ')
l = l - 2 # The length of the string is reduced by two
if i + 5 < len(entry.get()) + 1: # Ensures there are at least 5 characters left
# If the user types sigma it will be replaced by the greek letter sigma
if entry.get()[i:i + 5] == 'sigma':
entry.delete(i, i + 5)
entry.insert(i, 'Σ()')
entry.icursor(i + 2)
l = l - 2 # The length of the string is reduced by two
if i + 4 < len(entry.get()) + 1: # Ensures there are at least 4 characters left
# If the user types inte it will be replaced by the integral symbol
if entry.get()[i:i + 4] == 'inte':
entry.delete(i, i + 4)
entry.insert(i, '∫()')
entry.icursor(i + 2)
l = l - 1 # The length of the string is reduced by one
i += 1
return
def substitute_values(obj, equation):
i = 0
equation = list(equation)
l = len(equation)
while (i < l):
for a in range(3, 6):
if (i + a < l):
if (''.join(equation[i:i + a]) in functions):
equation = equation[0:i]
equation += [identifiers[''.join(equation[i:i + a])]]
equation += equation[i + a:l]
l = l - (a - 1)
i = i + 1
equation = list(equation)
for i in range(0, len(equation)):
char = equation[i]
if char in ['a', 'b', 'c', 'd']:
equation[i] = str(round(eval('obj.' + char), 3))
elif char in numbers:
pass
else:
try:
if key[char] in ['a', 'b', 'c', 'd']:
equation[i] = str(round(eval('obj.' + key[char]), 3))
except:
pass
try:
if key[char] in numbers:
equation[i] = str(key[char])
except:
pass
equation = list(''.join(equation))
for i in range(len(equation)):
char = equation[i]
for function in functions:
if identifiers[function] == char:
equation[i] = function
return ''.join(equation)
| true
|
6789c47dd25c8726ed4a62cbf39d57c8e7202513
|
Python
|
arunajasti/python_programs
|
/TypesOfVariablesInOOPS/Static Variables/Employee1.py
|
UTF-8
| 1,367
| 4
| 4
|
[] |
no_license
|
class Employee1:
company = 'Amazon' #declare static variable outside method/constructor
def __init__(self,name):
self.name = name
Employee1.empLocation = 'DENMARK'# declare static variable inside constructor using className
def showDetails(self):
Employee1.salary = 10000 #declare static variable inside Instance Method using className
@classmethod
def companyLocation(cls):
Employee1.comLocation = 'USA'#declare static variable inside @classmethod using className or cls
print("call static variables in classmethod by using cls or className:",cls.company)
#or
#cls.comLocation = 'USA'
@staticmethod
def employeeId():
Employee1.empId = 3003 #declare static variable inside @staticmethod using className
e1 = Employee1('aruna')
print(e1.__dict__)
print("call static variables outside class using className or obj reference:",Employee1.empLocation)
print()
Employee1.companyLocation() #calling class method
Employee1.employeeId() #calling static method
e1.showDetails()
print(Employee1.__dict__) #using Employee1(className) to retrive static varibales
print()
print("call static variables outside class using className or obj reference:",Employee1.salary)
print()
e1.companyLocation()
e1.employeeId()
print(Employee1.__dict__)
| true
|
5806ea7b2e853f50ead7a72cb4eacebd259122c3
|
Python
|
aseemm/crawlers
|
/src/example.py
|
UTF-8
| 3,543
| 2.75
| 3
|
[] |
no_license
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
class test():
def __init__(self):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-gpu")
self.driver = webdriver.Chrome(options=chrome_options)
def login_hackernews(self):
# Get some information about the page
# print(self.driver.page_source)
# print(self.driver.title)
# print(self.driver.current_url)
# Search by tag, class, id, xpath, css selector
# h1 = driver.find_element_by_name('h1')
# h1 = driver.find_element_by_class_name('someclass')
# h1 = driver.find_element_by_xpath('//h1')
# h1 = driver.find_element_by_id('greatID')
# all_links = driver.find_elements_by_tag_name('a')
# Accessing elements
# element.text, elementt.click(), element.get_attribute('class'), element.send_keys('mypassword'), element.is_displayed()
self.driver.set_window_size(1280, 720)
self.driver.get("https://news.ycombinator.com/login")
login = self.driver.find_element_by_xpath("//input").send_keys('hello')
password = self.driver.find_element_by_xpath("//input[@type='password']").send_keys('world')
submit = self.driver.find_element_by_xpath("//input[@value='login']").click()
try:
logout_button = self.driver.find_element_by_id("logout")
print('Successfully logged in')
except NoSuchElementException:
print('Incorrect login/password')
# take a screenshot
self.driver.save_screenshot('screenshot.png')
def search_google(self):
self.driver.set_window_size(1280, 720)
self.driver.get("https://www.google.com/?gws_rd=ssl")
self.driver.find_element_by_name("q").click()
self.driver.find_element_by_name("q").clear()
self.driver.find_element_by_name("q").send_keys("cat")
self.driver.find_element_by_id("tsf").submit()
self.driver.find_element_by_link_text("Images").click()
def access_github(self):
self.driver.set_window_size(1280, 720)
self.driver.get("https://github.com/TheDancerCodes")
print(self.driver.title)
print(self.driver.current_url)
# print(self.driver.page_source)
# # Wait 20 seconds for page to load
# timeout = 20
# try:
# WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located((By.XPATH, "//img[@class='avatar width-full rounded-2']")))
# except TimeoutException:
# print("Timed out waiting for page to load")
# self.driver.quit()
# get titles of the pinned repos
titles_element = self.driver.find_elements_by_xpath("//a[@class='text-bold flex-auto min-width-0 ']")
titles = [x.text for x in titles_element]
print('titles:')
print(titles, '\n')
language_element = self.driver.find_elements_by_xpath("//p[@class='mb-0 f6 text-gray']")
languages = [x.text for x in language_element]
print("languages:")
print(languages, '\n')
print("RepoName : Language")
for title, language in zip(titles, languages):
print(title + ": " + language)
automate = test()
# automate.search_google()
# automate.login_hackernews()
automate.access_github()
| true
|
b2fa8cb8fa9a9159c9dc2150cdcfd5aae040a4b7
|
Python
|
xinyuewang1/hackerrankTests
|
/makingAnagrams.py
|
UTF-8
| 634
| 3.125
| 3
|
[] |
no_license
|
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import Counter
# Complete the makeAnagram function below.
def makeAnagram(a, b):
a = Counter(a)
b = Counter(b)
inter = a & b # intersection
# print(a.subtract(inter))
# if inter:
return sum((a - inter).values()) + sum((b - inter).values())
# else:
# return len(a)+len(b) # no intersection
if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
a = input()
b = input()
res = makeAnagram(a, b)
print(res)
# fptr.write(str(res) + '\n')
#
# fptr.close()
| true
|
59d202fefeb20b76f0d2f6ef7e9874aa0ed51114
|
Python
|
piotroramus/Computational-Geometry-2015
|
/lab1/matplotlib/main.py
|
UTF-8
| 960
| 3.25
| 3
|
[] |
no_license
|
__author__ = 'piotr'
from matplotlib import pyplot as plt
currentPoints = "D"
with open('../points'+currentPoints+'.txt') as inputFile:
points = []
for line in inputFile:
[x, y] = line.strip().split(';')
points.append([x, y])
plt.plot(*zip(*points), marker='.', color='r', ls='', markersize=1)
minx = -1000
maxx = 1000
linex1 = minx
liney1 = 0.05*minx+0.05
linex2 = maxx
liney2 = 0.05*maxx+0.05
aDesc = "10^5 points of [-100;100]"
bDesc = "10^5 points of [-10^14;10^14]"
cDesc = '1000 points on circle of (X0,Y0)=(0,0) and R=100'
dDesc = "1000 points on the line going through [-1.0;0.0] and [1.0;0.1]"
plt.xlabel('X')
plt.ylabel('Y')
plt.title(dDesc)
withLine = False
if withLine:
plt.plot([linex1, linex2], [liney1, liney2], 'b-', lw=1)
plt.savefig('/home/piotr/Projects/go/lab1/images/points'+currentPoints+'WithLine.png')
else:
plt.savefig('/home/piotr/Projects/go/lab1/images/points'+currentPoints+'.png')
| true
|
c143d7553a633c5dc08ba671430a6426a6a66309
|
Python
|
saatvikgulati/turtle-python
|
/race.py
|
UTF-8
| 1,246
| 3.15625
| 3
|
[] |
no_license
|
import turtle
import time
from random import randint
window=turtle.Screen()
window.bgcolor("brown")
tur=turtle.Pen()
tur.speed(0)
tur.up()
stamp_size=20
square_size=15
finish_line=200
tur.color("black")
tur.shape("square")
tur.shapesize(square_size/stamp_size)
tur.up()
for i in range(10):
tur.setpos(finish_line,(150-(i*square_size*2)))
tur.stamp()
for j in range(10):
tur.setpos(finish_line+square_size,((150-square_size)-(j*square_size*2)))
tur.stamp()
tur.hideturtle()
p1=turtle.Pen()
p2=turtle.Pen()
p3=turtle.Pen()
p4=turtle.Pen()
d=turtle.Pen()
p1.shape("turtle")
p2.shape("turtle")
p3.shape("turtle")
p4.shape("turtle")
p1.speed(0)
p1.up()
p2.speed(0)
p2.up()
p3.speed(0)
p3.up()
p4.speed(0)
p4.up()
d.speed(0)
d.up()
d.setpos(-300,-190)
d.down()
d.left(90)
d.forward(400)
d.right(90)
d.up()
d.hideturtle()
p1.setpos(-300,0)
p2.setpos(-300,-50)
p3.setpos(-300,50)
p4.setpos(-300,100)
d.setpos(-300,0)
d.down()
d.forward(2000)
d.up()
d.setpos(-300,-50)
d.down()
d.forward(2000)
d.up()
d.setpos(-300,50)
d.down()
d.forward(2000)
d.up()
d.setpos(-300,100)
d.down()
d.forward(2000)
d.up()
for i in range(145):
p2.forward(randint(1,5))
p1.forward(randint(1,5))
p3.forward(randint(1,5))
p4.forward(randint(1,5))
| true
|
57f242252ea28ad0a95e39b2eeff06110cba06c7
|
Python
|
xiaoli777/ecs289g_tsne_pres
|
/build.py
|
UTF-8
| 1,893
| 2.5625
| 3
|
[] |
no_license
|
#!/usr/bin/python3
import argparse
import os
import shutil
def handleAutoComplete():
if sys.platform == 'Mac OS X':
complete_cmd = 'complete -F _longopt {}'.format(os.path.basename(__file__))
bashrc_path = os.path.expanduser('~/.bashrc')
with open(bashrc_path) as f:
if not complete_cmd in f.read():
os.system('echo "{}" >> {}'.format(complete_cmd, bashrc_path))
else:
pass
class BuildDirectory():
def __init__(self):
self.script_folder = os.path.abspath(os.path.dirname(__file__))
self.build_root = os.path.join(self.script_folder, 'build')
def sourceFilename(self, base_filename_tex):
return os.path.join(os.path.join(self.script_folder), base_filename_tex)
def outputFilename(self, base_filename_tex):
return os.path.join(os.path.join(self.script_folder), os.path.splitext(base_filename_tex)[0] + '.pdf')
def runBuild(dirs, tex_filename):
os.makedirs(dirs.build_root, exist_ok=True)
exit_code = os.system("pdflatex {}".format(dirs.sourceFilename(tex_filename)))
if exit_code != 0:
quit(1)
def runPreview(dirs, tex_filename):
os.system("evince {}".format(dirs.outputFilename(tex_filename)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--clean', action='store_true', help='Clean build folder')
# parser.add_argument('filename', nargs=1, help="Name of the target LaTeX file")
parser.add_argument('--preview', action='store_true', help='Open output file with pdf viewer')
args = parser.parse_args()
dirs = BuildDirectory()
target_name = 'pres.tex'
if args.clean:
shutil.rmtree(dirs.build_root, ignore_errors=True)
quit()
runBuild(dirs, target_name)
if args.preview:
runPreview(dirs, target_name)
print("finished")
| true
|
35c02d229b85b1a2a92a9eadf207612f93f61322
|
Python
|
LiMichael1/Space_Invaders
|
/PlayScreen.py
|
UTF-8
| 12,890
| 2.5625
| 3
|
[] |
no_license
|
import sys
from time import sleep
import pygame
from bullet import Bullet
from Alien1 import Alien1
from Alien2 import Alien2
from Alien3 import Alien3
from pygame.sprite import Group
from high_score import High_Score
from game_stats import GameStats
from scoreboard import Scoreboard
from ship import Ship
from UFO import UFO
from startScreen import StartScreen
from Bunker import Bunker
from gameover import GameOver
import random
clock = pygame.time.Clock()
class PlayScreen:
def __init__(self, ai_settings, screen):
self.ai_settings = ai_settings
self.screen = screen
self.display_start = True
self.display_high_score = False
self.start_screen = StartScreen(ai_settings=ai_settings, screen=screen)
self.stats = GameStats(ai_settings=self.ai_settings)
self.sb = Scoreboard(ai_settings=self.ai_settings, screen=self.screen, stats=self.stats)
self.ship = Ship(ai_settings=self.ai_settings, screen=self.screen)
self.high_score = High_Score(self.ai_settings, self.screen)
self.gameover = GameOver(self.ai_settings, self.screen)
self.quit = False
self.alien1 = Alien1(ai_settings=self.ai_settings, screen=self.screen)
self.alien2 = Alien2(ai_settings=self.ai_settings, screen=self.screen)
self.alien3 = Alien3(ai_settings=self.ai_settings, screen=self.screen)
self.aliens = [self.alien1, self.alien2, self.alien3]
self.UFO = UFO(ai_settings=self.ai_settings, screen=self.screen)
self.faster = False
self.bullets = Group()
self.enemy_bullets = Group()
self.alien_group = Group()
self.play_music = 'sounds/play.mp3'
self.play_music_faster = 'sounds/play-faster.mp3'
self.missile_sound = pygame.mixer.Sound('sounds/missile.wav')
self.bunker = Group()
self.create_bunker()
def create_bunker(self):
for x in range(3):
bunk = Bunker(ai_settings=self.ai_settings, screen=self.screen,
centerx= x * 400 + 200, centery=self.ai_settings.screen_height - 100)
self.bunker.add(bunk)
def check_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
self.check_keydown_events(event)
elif event.type == pygame.KEYUP:
self.check_keyup_events(event)
def check_keyup_events(self, event):
if event.key == pygame.K_RIGHT:
self.ship.moving_right = False
elif event.key == pygame.K_LEFT:
self.ship.moving_left = False
def check_keydown_events(self, event):
if event.key == pygame.K_RIGHT:
self.ship.moving_right = True
elif event.key == pygame.K_LEFT:
self.ship.moving_left = True
elif event.key == pygame.K_SPACE:
self.fire_bullet()
elif event.key == pygame.K_q:
sys.exit()
def new_game(self):
self.ai_settings.initialize_dynamic_settings()
pygame.mouse.set_visible(False)
self.stats.reset_stats()
self.stats.game_active = True
self.sb.prep_score()
self.sb.prep_ships()
self.alien_group.empty()
self.bullets.empty()
self.enemy_bullets.empty()
# create fleet
self.create_fleet()
self.create_bunker()
self.ship.center_ship()
def fire_bullet(self):
if len(self.bullets) < self.ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings=self.ai_settings, screen=self.screen, ship=self.ship)
self.bullets.add(new_bullet)
self.missile_sound.play()
def fire_enemy_bullet(self):
if random.random() < self.ai_settings.probability_to_fire:
if len(self.enemy_bullets) < self.ai_settings.enemy_bullets_allowed:
alien = random.choice(list(self.alien_group.spritedict.keys()))
new_bullet = Bullet(ai_settings=self.ai_settings, screen=self.screen, ship=alien, enemy=True)
self.enemy_bullets.add(new_bullet)
self.missile_sound.play()
def get_number_aliens_x(self):
alien_width = self.aliens[0].rect.width
available_space_x = self.ai_settings.screen_width - (2 * alien_width)
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_number_rows(self):
alien_height = self.aliens[0].rect.height
available_space_y = (self.ai_settings.screen_height -
(3 * alien_height) - self.ship.rect.height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def create_alien(self, which_alien, row_number, alien_number):
one_alien = None
if which_alien == 0:
one_alien = Alien1(ai_settings=self.ai_settings, screen=self.screen)
elif which_alien == 1:
one_alien = Alien2(ai_settings=self.ai_settings, screen=self.screen)
elif which_alien == 2:
one_alien = Alien3(ai_settings=self.ai_settings, screen=self.screen)
alien_width = one_alien.rect.width
one_alien.x = alien_width + 2 * alien_width * alien_number
one_alien.rect.x = one_alien.x
one_alien.rect.y = one_alien.rect.height + 2 * one_alien.rect.height * row_number
self.alien_group.add(one_alien)
def create_fleet(self):
rows = self.get_number_rows()
aliens_per_row = self.get_number_aliens_x()
for row_number in range(rows):
which_alien = int(row_number / 2)
for alien_number in range(aliens_per_row):
self.create_alien(which_alien=which_alien, row_number=row_number, alien_number=alien_number)
def check_bullet_alien_collisions(self):
collisions = pygame.sprite.groupcollide(self.bullets, self.alien_group, True, True)
pygame.sprite.groupcollide(self.alien_group, self.bunker, False, True)
if self.UFO.alive:
if pygame.sprite.spritecollideany(self.UFO, self.bullets):
self.UFO.explode()
self.UFO.dead()
self.stats.score += self.ai_settings.ufo_points
self.sb.prep_score()
if collisions:
for aliens in collisions.values():
for alien in aliens:
for x in range(len(self.aliens)):
if type(alien) == type(self.aliens[x]):
self.stats.score += self.ai_settings.alien_points[x]
self.sb.prep_score()
alien.explode()
sleep(0.05)
alien.explosion_timer.reset()
# needs to update settings for alien points
if len(self.alien_group) < 11 and not self.faster:
self.play_game_music(faster=True)
if len(self.alien_group) == 0 and not self.UFO.alive:
self.bullets.empty()
self.ai_settings.increase_speed()
self.stats.level += 1
self.sb.prep_level()
self.create_fleet()
self.UFO.reset()
self.play_game_music(faster=False)
bunker_collision= pygame.sprite.groupcollide(groupa=self.bunker, groupb=self.enemy_bullets,
dokilla=False, dokillb=True,
collided=pygame.sprite.collide_rect_ratio(2))
def ship_hit(self):
self.stats.ships_left -= 1
print(self.stats.ships_left)
if self.stats.ships_left > 0:
# Update the scoreboard
self.sb.prep_ships()
# Empty list of aliens and bullets
self.alien_group.empty()
self.bullets.empty()
self.enemy_bullets.empty()
# create_fleet
self.create_fleet()
self.ship.center_ship()
self.create_bunker()
# Pause
sleep(0.5)
else:
for i in range(6):
self.ship.explode()
sleep(0.3)
self.ship.explosion_timer.reset()
self.stats.game_active = False
pygame.mouse.set_visible(True)
self.high_score.update_score(self.stats.score)
def update_screen(self):
self.screen.fill(self.ai_settings.bg_color)
self.bullets.draw(self.screen)
self.enemy_bullets.draw(self.screen)
self.ship.blitme()
for alien in self.alien_group:
alien.blitme()
if self.UFO.alive:
self.UFO.blitme()
for bunker in self.bunker:
bunker.draw()
self.sb.show_score()
pygame.display.flip()
def start_screen_play(self):
self.start_screen.play_music()
while self.display_start:
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
elif e.type == pygame.MOUSEBUTTONDOWN:
_mouse_x, _mouse_y = pygame.mouse.get_pos()
if self.start_screen.check_play_button(_mouse_x, _mouse_y):
self.display_start = False
continue
if self.start_screen.check_high_score_button(_mouse_x, _mouse_y):
self.display_high_score = True
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_SPACE:
self.display_high_score = False
if self.display_high_score:
self.high_score.draw()
else:
self.start_screen.draw()
pygame.display.flip()
pygame.mixer.music.stop()
def gameOver_play(self):
self.gameover.draw()
for e in pygame.event.get():
if e.type == pygame.KEYDOWN:
if e.type == pygame.QUIT:
sys.exit()
if e.type == pygame.MOUSEBUTTONDOWN:
_mouse_x, _mouse_y = pygame.mouse.get_pos()
if self.gameover.check_play_button(_mouse_x=_mouse_x, _mouse_y=_mouse_y):
self.new_game()
self.stats.game_active = True
elif self.gameover.check_quit_button(_mouse_x=_mouse_x, _mouse_y=_mouse_y):
self.quit = True
def play_game_music(self, faster):
pygame.mixer.music.stop()
if faster:
pygame.mixer.music.load(self.play_music_faster)
self.faster = True
else:
pygame.mixer.music.load(self.play_music)
self.faster = False
pygame.mixer.music.play(-1, 0.0)
def update_bullets(self):
self.bullets.update()
self.enemy_bullets.update()
for bullet in self.bullets.copy():
if bullet.rect.bottom <= 0:
self.bullets.remove(bullet)
for bullet in self.enemy_bullets.copy():
if bullet.rect.top >= self.ai_settings.screen_height:
self.enemy_bullets.remove(bullet)
self.fire_enemy_bullet()
if pygame.sprite.spritecollideany(self.ship, self.enemy_bullets):
self.ship_hit()
self.check_bullet_alien_collisions()
def change_fleet_direction(self):
for alien in self.alien_group:
alien.rect.y += self.ai_settings.fleet_drop_speed
self.ai_settings.fleet_direction *= -1
def check_fleet_edges(self):
for alien in self.alien_group:
if alien.check_edges():
self.change_fleet_direction()
break
def check_aliens_bottom(self):
screen_rect = self.screen.get_rect()
for alien in self.alien_group:
if alien.rect.bottom >= screen_rect.bottom:
self.ship_hit()
break
def update_aliens(self):
self.check_fleet_edges()
self.alien_group.update()
if pygame.sprite.spritecollideany(self.ship, self.alien_group):
self.ship_hit()
if self.UFO.alive:
self.update_UFO()
self.check_aliens_bottom()
def update_UFO(self):
if self.UFO.check_edges():
self.ai_settings.UFO_direction *= -1
self.UFO.update()
def play(self):
self.start_screen_play()
self.new_game()
self.play_game_music(faster=False)
while not self.quit:
if self.stats.game_active:
self.check_events()
self.ship.update()
self.update_bullets()
self.update_aliens()
self.update_screen()
else:
self.gameOver_play()
pygame.display.flip()
| true
|
221e71fa44fc9f71b5c2c61f854db74dfc91d2d1
|
Python
|
hxl163630/practice
|
/31. Next Permutation.py
|
UTF-8
| 538
| 3
| 3
|
[] |
no_license
|
class Solution:
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums or len(nums) == 1: return
i = len(nums) - 2
while i >= 0 and nums[i + 1] <= nums[i]: i -= 1
if i >= 0:
j = len(nums) - 1
while j >= i and nums[j] <= nums[i]: j -= 1
nums[i], nums[j] = nums[j], nums[i]
nums = nums[::-1]
sol = Solution()
sol.nextPermutation([1,3,2])
| true
|
24e2252193be0424394912a38d072c232cc4d852
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_135/1028.py
|
UTF-8
| 949
| 3.234375
| 3
|
[] |
no_license
|
import sys
if __name__ == "__main__":
T = int(sys.stdin.readline())
f = open("a.out", 'w')
for i in range(T):
r1 = int(sys.stdin.readline())
for j in range(1, 5):
if j == r1:
row = sys.stdin.readline().split()
r1_set = set(row)
else:
sys.stdin.readline()
r2 = int(sys.stdin.readline())
for j in range(1,5):
if j == r2:
row = sys.stdin.readline().split()
r2_set = set(row)
else:
sys.stdin.readline()
intersect = r1_set.intersection(r2_set)
if len(intersect) == 0:
f.write("Case #%d: Volunteer cheated!\n" %(i+1))
elif len(intersect) == 1:
el = intersect.pop()
f.write("Case #%d: %s\n" %(i+1, el))
else:
f.write("Case #%d: Bad magician!\n" %(i+1))
f.close()
| true
|
b5faf9773c21e394401a5b724c92de8c642f690d
|
Python
|
harshitbhat/Data-Structures-and-Algorithms
|
/GeeksForGeeks/DS-Course/002-Recursion/017.print-1-to-n-without-using-loops.py
|
UTF-8
| 232
| 3.078125
| 3
|
[] |
no_license
|
class Solution:
#Complete this function
def printNos(self,N):
def print1ToN(n):
if n == 0:
return
print1ToN(n-1)
print(n, end=' ')
print1ToN(N)
| true
|
db4fb8983298740fcf0bd85ed1bb48c417e372d4
|
Python
|
Nitishkumar-S/Cryptocurrency-sentiment-analysis
|
/cryptocurrency sentiment analysis.py
|
UTF-8
| 3,690
| 3.4375
| 3
|
[] |
no_license
|
#Import libraries
import tweepy
from textblob import TextBlob
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
#Function that cleans tweets
def cleanTwt(twt):
twt=re.sub("#","",twt) #removes all hash from hashtags
twt=re.sub('\\n','',twt) #removes \n from tweets
twt=re.sub('https?:\/\/\S+','',twt) #removes hyperlinks from tweets
twt=re.sub('@','',twt) #removes all @ from tweets
return twt
#Function to get Subjectivity
def getSubjectivity(twt):
return TextBlob(twt).sentiment.subjectivity
#Function to get Polarity
def getPolarity(twt):
return TextBlob(twt).sentiment.polarity
#function to get text sentiment
def getSentiment(score):
if score<0:
return 'Negative'
elif score==0:
return 'Neutral'
else:
return 'Positive'
if __name__=="__main__":
login=pd.read_csv("login.csv") #The api credentials were saved in a file named 'login.csv' which had four keys, api key, api consumer secret, access token key, acccess token secret
#format of login.csv file: column names are key names and entries are the key values
#Get twitter API credentials from login file
consumerKey=''.join(login['apikey'])
consumerSecret=''.join(login['apisecretkey'])
accessToken=''.join(login['accesstoken'])
accessTokenSecret=''.join(login['accesstokensecret'])
#Create authentication object
authenticate= tweepy.OAuthHandler(consumerKey,consumerSecret)
#Set access token and access token secret
authenticate.set_access_token(accessToken,accessTokenSecret)
#Create API object
api = tweepy.API(authenticate, wait_on_rate_limit=True)
#Gather 2000 tweets about bitcoin and filter out any retweets
search_term = "#Dogecoin -filter:retweets"
#Create a cursor object
tweets= tweepy.Cursor(api.search, q=search_term, lang='en', since='2021-05-07',tweet_mode='extended').items(2000)
#Store tweets in variable and get full texts
all_tweets = [tweet.full_text for tweet in tweets]
#Create a dataframe with the column name "Tweets" to store tweets
df = pd.DataFrame(all_tweets, columns=['Tweets'])
print("Tweets after pulling from twitter")
#First 5 rows of data
print(df.head())
print()
#to show dimensions
df.shape
#Clean tweets
df['Cleaned_tweets']=df['Tweets'].apply(cleanTwt)
print("\nAfter cleaning\n")
#show first 5 rows
print(df.head(5))
#create two new columns to store 'subjectivity' an 'polarity' by calling above created functions
df['Subjectivity']=df['Cleaned_tweets'].apply(getSubjectivity)
df['Polarity']=df['Cleaned_tweets'].apply(getPolarity)
print("Subjectivity and polarity added\n")
#show first 5 rows
df.head(5)
#create column to store text sentiment
df['Sentiment']=df['Polarity'].apply(getSentiment)
print("Sentiment column added\n")
#show first 5 rows
print(df.head())
# create a scatter plot to show the subjectivity and the polarity
plt.figure(figsize=(8,6))
for i in range(df.shape[0]):
plt.scatter(df["Polarity"][i], df["Subjectivity"][i], color="Purple")
plt.title("Sentiment Analysis Scatter Plot")
plt.xlabel('Polarity')
plt.ylabel('Subjectivity')
plt.show()
#create a bar plot to show count of positive, neutral and negative tweets
df['Sentiment'].value_counts().plot(kind='bar')
plt.title('Sentiment Analysis Bar Plot')
plt.xlabel('Sentiment')
plt.ylabel('Count')
plt.show()
| true
|
97f045c216b79fefd9248bb7797772a0ed4f2232
|
Python
|
astsu-dev/exapi1
|
/exapi/channel_creators/binance/spot/market_data/interface.py
|
UTF-8
| 2,186
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
from typing import Protocol, Optional
class IBinanceSpotMarketDataChannelCreator(Protocol):
"""Binance spot market data socket channel creator."""
def create_agg_trades_channel(self, symbol: str) -> str:
"""Creates agg trade channel.
Args:
symbol (str)
Returns:
str
"""
def create_trades_channel(self, symbol: str) -> str:
"""Creates trade channel.
Args:
symbol (str)
Returns:
str
"""
def create_candles_channel(self, symbol: str) -> str:
"""Creates candles channel.
Args:
symbol (str)
Returns:
str
"""
def create_mini_ticker_channel(self, symbol: str) -> str:
"""Creates symbol mini ticker channel.
Args:
symbol (str)
Returns:
str
"""
def create_mini_tickers_channel(self) -> str:
"""Creates all symbol mini tickers channel.
Returns:
str
"""
def create_ticker_channel(self, symbol: str) -> str:
"""Creates individual symbol ticker channel.
Args:
symbol (str)
Returns:
str
"""
def create_tickers_channel(self) -> str:
"""Creates all symbol tickers channel.
Args:
symbol (str)
Returns:
str
"""
def create_book_ticker_channel(self, symbol: str) -> str:
"""Creates symbol book ticker channel.
Args:
symbol (str)
Returns:
str
"""
def create_book_tickers_channel(self) -> str:
"""Creates all symbol book tickers channel.
Returns:
str
"""
def create_order_book_channel(self, symbol: str,
levels: Optional[int] = None,
update_speed: Optional[int] = None) -> str:
"""Creates depth channel.
Args:
symbol (str)
levels (Optional[int]): count of bid and ask orders.
update_speed (Optional[int])
Returns:
str
"""
| true
|
e92a58a55a7229f0e0ac5cb64ca8613aba8a8773
|
Python
|
Sheepp96/ngoquangtruong-fundamentals-c4e17
|
/Session4/while_ex.py
|
UTF-8
| 141
| 3.84375
| 4
|
[] |
no_license
|
n = 0
while n < 3: #sau while là kiểu boolean - true / false
#tương đương for i in range(3):
print("Hi")
n += 1
| true
|
8ebf3969c1ef9a5d48df719b81b5f0f30c3f4ec6
|
Python
|
metulburr/random
|
/pygame_/resizable_surface.py
|
UTF-8
| 3,399
| 3.046875
| 3
|
[] |
no_license
|
import pygame as pg
import random
class Ball:
def __init__(self, screen_rect):
self.screen_rect = screen_rect
self.image = pg.Surface([50,50]).convert()
self.image.fill((255,0,0))
self.rect = self.image.get_rect()
self.speed_init = 10
self.speed = self.speed_init
self.set_ball()
def set_ball(self):
self.vel = [random.choice([-1,1]), 0]
self.rect.center = self.screen_rect.center
self.true_pos = list(self.rect.center)
self.speed = self.speed_init
def move(self):
if self.rect.left <= 0:
self.vel[0] *= -1
elif self.rect.right >= self.screen_rect.right:
self.vel[0] *= -1
self.true_pos[0] += self.vel[0] * self.speed
#self.true_pos[1] += self.vel[1] * self.speed
self.rect.center = self.true_pos
def update(self, screen_rect):
self.screen_rect = screen_rect
self.move()
def draw(self, surf):
surf.blit(self.image, self.rect)
class Control:
def __init__(self):
self.resolutions = [(300,200), (600,400),(800, 600), (928, 696)]
self.render_size = self.resolutions[-1] #largest
self.screen = pg.display.set_mode(self.resolutions[-1], pg.RESIZABLE)
self.screen_rect = self.screen.get_rect()
self.render_surf = pg.Surface(self.render_size).convert()
#pg.event.clear(pg.VIDEORESIZE)
self.clock = pg.time.Clock()
self.done = False
self.fps = 60
self.ball = Ball(self.screen_rect)
def event_loop(self):
for event in pg.event.get():
if event.type == pg.QUIT:
self.done = True
elif event.type == pg.VIDEORESIZE:
self.on_resize(event.size)
#pg.event.clear(pg.VIDEORESIZE)
def on_resize(self, size):
if size == self.screen_rect.size:
return
res_index = self.resolutions.index(self.screen_rect.size)
adjust = 1 if size > self.screen_rect.size else -1
if 0 <= res_index+adjust < len(self.resolutions):
new_size = self.resolutions[res_index+adjust]
else:
new_size = self.screen_rect.size
self.screen = pg.display.set_mode(new_size, pg.RESIZABLE)
self.screen_rect.size = new_size
self.set_scale()
def set_scale(self):
w_ratio = self.render_size[0]/float(self.screen_rect.w)
h_ratio = self.render_size[1]/float(self.screen_rect.h)
self.scale = (w_ratio, h_ratio)
def update(self):
self.ball.update(self.render_surf.get_rect()) #give obj updated screen size
def render(self):
if self.render_size != self.screen_rect.size:
scale_args = (self.render_surf, self.screen_rect.size, self.screen)
pg.transform.smoothscale(*scale_args)
else:
self.screen.blit(self.render_surf, (0, 0))
self.render_surf.fill((255,255,255))
self.ball.draw(self.render_surf)
def game_loop(self):
while not self.done:
self.event_loop()
self.update()
self.render()
pg.display.update()
self.clock.tick(self.fps)
pg.init()
app = Control()
app.game_loop()
pg.quit()
| true
|
917530577706bb6c58f0c159fc8561ac4e0dbb57
|
Python
|
nashid/magicomplete
|
/oneshot.py
|
UTF-8
| 11,423
| 2.578125
| 3
|
[] |
no_license
|
# One-shot learning algorithms.
import random
import numpy as np
import torch
import torch.nn.functional as F
from util import batched, Progress
from user import User
from data import augment
class OneShotLearner:
def name(self):
raise NotImplemented()
def learn(self, example):
pass
def test(self, examples):
raise NotImplemented()
class OneShotEvaluator:
def name(self):
raise NotImplemented()
def evaluate(self, learner, dataset, save_examples=False):
raise NotImplemented()
class LearnEvalIterate(OneShotEvaluator):
def __init__(self, batch_size=64):
self.batch_size = batch_size
def name(self):
return "LearnEvalIterate"
def evaluate(self, learner, dataset, save_examples=False):
u = User(conventions_queue=[(row['string'], row['abbreviation']) for row in dataset])
accuracies = []
accuracies_positive, accuracies_negative = [], []
examples = []
p = Progress(len(dataset))
for row in dataset:
u.add_next_convention()
training_example = row['positive_examples'][0]
learner.learn((u.encode(training_example), training_example))
test_positive = list(set(s for s in row['positive_examples'] if s != training_example))
test_negative = list(set(row['negative_examples']))
correct_positive = []
for batch in batched(test_positive, self.batch_size):
encoded_batch = [u.encode(s) for s in batch]
learner_prediction = learner.test(encoded_batch)
correct_positive.extend([int(p == s) for p, s in zip(batch, learner_prediction)])
if save_examples:
examples.extend([{'long': l, 'short': s, 'prediction': p}
for l, s, p in zip(batch, encoded_batch, learner_prediction)])
correct_negative = []
for batch in batched(test_negative, self.batch_size):
encoded_batch = [u.encode(s) for s in batch]
learner_prediction = learner.test(encoded_batch)
correct_negative.extend([int(p == s) for p, s in zip(batch, learner_prediction)])
if save_examples:
examples.extend([{'long': l, 'short': s, 'prediction': p}
for l, s, p in zip(batch, encoded_batch, learner_prediction)])
accuracies.append(np.mean(correct_positive + correct_negative))
accuracies_positive.append(np.mean(correct_positive))
accuracies_negative.append(np.mean(correct_negative))
return {
'accuracy': np.mean(accuracies),
'accuracy_positive': np.mean(accuracies_positive),
'accuracy_negative': np.mean(accuracies_negative),
'per_example_accuracy': accuracies,
'per_example_accuracy_positive': accuracies_positive,
'per_example_accuracy_negative': accuracies_negative,
'examples': examples,
}
class LearnAllThenEval(OneShotEvaluator):
def __init__(self, batch_size=64):
self.batch_size = batch_size
def name(self):
return "LearnAllThenEval"
def evaluate(self, learner, dataset, save_examples=False):
u = User(conventions_queue=[(row['string'], row['abbreviation']) for row in dataset])
accuracies = []
accuracies_positive, accuracies_negative = [], []
train = []
examples = []
p = Progress(len(dataset))
for row in dataset:
u.add_next_convention()
training_example = row['positive_examples'][0]
train.append(training_example)
learner.learn((u.encode(training_example), training_example))
for row, training_example in zip(dataset, train):
test_positive = list(set(s for s in row['positive_examples'] if s != training_example))
test_negative = list(set(row['negative_examples']))
correct_positive = []
for batch in batched(test_positive, self.batch_size):
encoded_batch = [u.encode(s) for s in batch]
learner_prediction = learner.test(encoded_batch)
correct_positive.extend([int(p == s) for p, s in zip(batch, learner_prediction)])
if save_examples:
examples.extend([{'long': l, 'short': s, 'prediction': p}
for l, s, p in zip(batch, encoded_batch, learner_prediction)])
correct_negative = []
for batch in batched(test_negative, self.batch_size):
encoded_batch = [u.encode(s) for s in batch]
learner_prediction = learner.test(encoded_batch)
correct_negative.extend([int(p == s) for p, s in zip(batch, learner_prediction)])
if save_examples:
examples.extend([{'long': l, 'short': s, 'prediction': p}
for l, s, p in zip(batch, encoded_batch, learner_prediction)])
accuracies.append(np.mean(correct_positive + correct_negative))
accuracies_positive.append(np.mean(correct_positive))
accuracies_negative.append(np.mean(correct_negative))
return {
'accuracy': np.mean(accuracies),
'accuracy_positive': np.mean(accuracies_positive),
'accuracy_negative': np.mean(accuracies_negative),
'per_example_accuracy': accuracies,
'per_example_accuracy_positive': accuracies_positive,
'per_example_accuracy_negative': accuracies_negative,
'examples': examples,
}
class PriorBaseline(OneShotLearner):
def __init__(self, prior_decoder, alphabet, parameters={}):
self.decoder = prior_decoder.clone(alphabet)
self.alphabet = alphabet
self.decoder.eval()
self.batch_size = parameters.get('batch_size') or 64
def name(self):
return 'PriorBaseline'
def learn(self, example):
pass
def test(self, examples):
results = []
for batch in batched(examples, self.batch_size):
results.extend(self.decoder(batch, self.alphabet))
return results
class KGradientSteps(OneShotLearner):
def __init__(self, prior_decoder, alphabet, parameters={}):
self.decoder = prior_decoder.clone(alphabet)
self.alphabet = alphabet
self.decoder.eval()
self.batch_size = parameters.get('batch_size') or 64
self.learning_rate = parameters.get('learning_rate') or 1e-2
self.k = parameters.get('k') or 1
self.optimizer = torch.optim.SGD(self.decoder.parameters(), lr=self.learning_rate)
def name(self):
return 'KGradientSteps(K={}, lr={})'.format(self.k, self.learning_rate)
def learn(self, example):
self.decoder.train()
short, long = example
for i in range(self.k):
self.optimizer.zero_grad()
loss = self.decoder([short], self.alphabet, [long]).mean()
loss.backward()
self.optimizer.step()
def test(self, examples):
self.decoder.eval()
results = []
for batch in batched(examples, self.batch_size):
results.extend(self.decoder(batch, self.alphabet))
return results
def infer_abbreviation(short, long):
# FIXME: This doesn't really disambiguate between all the possible abbreviations...
# That doesn't seem possible to do deterministically with a single example.
for prefix in range(len(short)):
if short[prefix] != long[prefix]:
break
prefix += 1
for suffix in range(len(short)):
if short[-1 - suffix] != long[-1 - suffix]:
break
suffix += 1
return (short[prefix:-suffix], long[prefix:-suffix])
class StepUntilCorrect(OneShotLearner):
def __init__(self, prior_decoder, alphabet, parameters={}, augmentation_dataset=[]):
self.decoder = prior_decoder.clone(alphabet)
self.alphabet = alphabet
self.batch_size = parameters.get('batch_size') or 64
self.learning_rate = parameters.get('learning_rate') or 1e-2
self.max_steps = parameters.get('max_steps') or 8
self.extra_steps = parameters.get('extra_steps') or 0
self.data_augmentation = parameters.get('data_augmentation') or None
self.rehearsal_examples = parameters.get('rehearsal_examples') or 0
self.augmentation_dataset = augmentation_dataset
self.optimizer = torch.optim.SGD(self.decoder.parameters(), lr=self.learning_rate)
self.past_examples = []
def name(self):
return ('StepUntilCorrect(lr={}, max={}, extra={}, data_augmentation={}, reheasal_examples={})'
.format(self.learning_rate,
self.max_steps,
self.extra_steps,
self.data_augmentation or 'no',
self.rehearsal_examples))
def fetch_augmentation_examples(self, short, long):
ab_short, ab_long = infer_abbreviation(short, long)
for row in self.augmentation_dataset:
if long in row['positive_examples']:
return ([(s.replace(row['string'], row['abbreviation']), s)
for s in row['positive_examples_train']] +
[(s, s) for s in row['negative_examples_train']])
def trim_examples(self, batch):
return [batch[0]] + random.sample(batch[1:], min(len(batch) - 1, self.batch_size - 1))
def learn(self, example):
short, long = example
if self.data_augmentation is None:
batch = [example]
elif self.data_augmentation in ('ast_only_short', 'ast_all'):
batch = augment(short, long, only_shortened=(self.data_augmentation == 'only_short'))
elif self.data_augmentation == 'fetch_examples':
batch = [example] + self.fetch_augmentation_examples(short, long)
correct_since = self.max_steps
for i in range(self.max_steps):
self.decoder.eval()
prediction = self.decoder([short], self.alphabet)[0]
if prediction == long:
correct_since = i
if i >= correct_since + self.extra_steps:
break
rehearsal_batch = random.sample(self.past_examples,
min(len(self.past_examples), self.rehearsal_examples))
rehearsal_short, rehearsal_long = (zip(*rehearsal_batch)
if len(rehearsal_batch)
else ((), ()))
self.decoder.train()
self.optimizer.zero_grad()
batch_short, batch_long = zip(*self.trim_examples(batch))
loss = self.decoder(batch_short + rehearsal_short,
self.alphabet,
batch_long + rehearsal_long).mean()
loss.backward()
self.optimizer.step()
self.past_examples.extend(batch)
def test(self, examples):
self.decoder.eval()
results = []
for batch in batched(examples, self.batch_size):
results.extend(self.decoder(batch, self.alphabet))
return results
| true
|
31e94203b38d4983d907364176e7f3ed256b5166
|
Python
|
mukeshkrishna/datastructures
|
/merge_two_sorted_list.py
|
UTF-8
| 1,697
| 4.4375
| 4
|
[] |
no_license
|
"""Problem Statement #
Implement a function that merges two sorted lists of m and n elements respectively, into another sorted list. Name it merge_lists(lst1, lst2).
Input #
Two sorted lists.
Output #
A merged and sorted list consisting of all elements of both input lists.
Sample Input #
list1 = [1,3,4,5]
list2 = [2,6,7,8]
Sample Output #
arr = [1,2,3,4,5,6,7,8]
"""
def merge_lists(list1,list2):
i = 0
j = 0
n = len(list1)
m = len(list2)
tempList = list()
while(i<n or j<m):
if i == n :
tempList.append(list2[j])
j = j + 1
elif j == m :
tempList.append(list1[i])
i = i + 1
elif(list1[i]<list2[j]):
tempList.append(list1[i])
i = i + 1
elif(list2[j]<list1[i]):
tempList.append(list2[j])
j = j + 1
else:
tempList.append(list1[i])
i = i + 1
j = j + 1
return tempList
def merge_lists2(list1,list2):
i = 0
j = 0
n = len(list1)
m = len(list2)
tempList = list()
while(i<n and j<m):
if(list1[i]<list2[j]):
tempList.append(list1[i])
i = i + 1
elif(list2[j]<list1[i]):
tempList.append(list2[j])
j = j + 1
if(i<n):
tempList.extend(list1[i:])
elif(j<m):
tempList.extend(list2[j:])
return tempList
def merge_lists3(list1,list2):
list1.extend(list2)
print(list1)
if(__name__ == "__main__"):
list1 = [1,3,4,5]
list2 = [2,6,7,8]
print(merge_lists(list1,list2))
print(merge_lists2(list1,list2))
merge_lists3(list1,list2)
| true
|
7349268d79c050a6a7b7a33368195e810bdc483a
|
Python
|
kimha1030/AirBnB_clone_v2
|
/web_flask/9-states.py
|
UTF-8
| 967
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/python3
"""Task 6"""
from flask import Flask
from flask import render_template
from models import storage
from models.state import State
from models.city import City
app = Flask(__name__)
@app.route('/states', strict_slashes=False)
def states():
"""Function that return states"""
states = storage.all(State).values()
return render_template('7-states_list.html', states=states)
@app.route('/states/<id>', strict_slashes=False)
def list_states(id):
"""Function that return states according id"""
states = storage.all(State).values()
ban = False
name_st = ""
for state in states:
if id in state.id:
ban = True
name_st = state
break
else:
ban = False
return render_template('9-states.html', name_st=name_st, ban=ban)
@app.teardown_appcontext
def close_session(db):
storage.close()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
| true
|
b3d913e4fcba86cbd07f96a39ce7cf53fbcd9aeb
|
Python
|
microsoft/SparseSC
|
/src/SparseSC/utils/misc.py
|
UTF-8
| 3,069
| 2.71875
| 3
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
# Allow capturing output
# Modified (to not capture stderr too) from https://stackoverflow.com/questions/5136611/
import contextlib
import sys
from .print_progress import it_progressbar, it_progressmsg
@contextlib.contextmanager
def capture():
STDOUT = sys.stdout
try:
sys.stdout = DummyFile()
yield
finally:
sys.stdout = STDOUT
class DummyFile(object):
def write(self, x):
pass
@contextlib.contextmanager
def capture_all():
STDOUT, STDERR = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = DummyFile(), DummyFile()
yield
finally:
sys.stdout, sys.stderr = STDOUT, STDERR
def par_map(part_fn, it, F, loop_verbose, n_multi=0, header="LOOP"):
if n_multi>0:
from multiprocessing import Pool
with Pool(n_multi) as p:
#p.map evals the it so can't use it_progressbar(it)
if loop_verbose==1:
rets = []
print(header + ":")
for ret in it_progressbar(p.imap(part_fn, it), count=F):
rets.append(ret)
elif loop_verbose==2:
rets = []
for ret in it_progressmsg(p.imap(part_fn, it), prefix=header, count=F):
rets.append(ret)
else:
rets = p.map(part_fn, it)
else:
if loop_verbose==1:
print(header + ":")
it = it_progressbar(it, count=F)
elif loop_verbose==2:
it = it_progressmsg(it, prefix=header, count=F)
rets = list(map(part_fn, it))
return rets
class PreDemeanScaler:
"""
Units are defined by rows and cols are "pre" and "post" separated.
Demeans each row by the "pre" mean.
"""
# maybe fit should just take Y and T0 (in init())?
# Try in sklearn.pipeline with fit() for that and predict (on default Y_post)
# might want wrappers around fit to make that work fine with pipeline (given its standard arguments).
# maybe call the vars X rather than Y?
def __init__(self):
self.means = None
# self.T0 = T0
def fit(self, Y):
"""
Ex. fit(Y.iloc[:,0:T0])
"""
import numpy as np
self.means = np.mean(Y, axis=1)
def transform(self, Y):
return (Y.T - self.means).T
def inverse_transform(self, Y):
return (Y.T + self.means).T
def _ensure_good_donor_pool(custom_donor_pool, control_units):
N0 = custom_donor_pool.shape[1]
custom_donor_pool_c = custom_donor_pool[control_units, :]
for i in range(N0):
custom_donor_pool_c[i, i] = False
custom_donor_pool[control_units, :] = custom_donor_pool_c
return custom_donor_pool
def _get_fit_units(model_type, control_units, treated_units, N):
if model_type == "retrospective":
return control_units
elif model_type == "prospective":
return range(N)
elif model_type == "prospective-restricted:":
return treated_units
# model_type=="full":
return range(N) # same as control_units
| true
|
c922c9040e1b760db28a07a981f21c40e5155588
|
Python
|
osmaralg/mywebpage
|
/sampleapp/ML_scripts/predict_bankrrupcy/Final_project.py
|
UTF-8
| 5,569
| 2.5625
| 3
|
[] |
no_license
|
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.metrics import f1_score
from sklearn.utils import resample
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from style_functions import *
#credits https://towardsdatascience.com/predicting-bankruptcy-f4611afe8d2c
d =pd.read_csv("bankruptcy.csv")
#d = d.iloc[:, :-1]
cols = pd.read_csv("ColumnNames.csv")
#cols = cols.iloc[:, :]
#d.columns = cols.columns
label_column = "class"
d[label_column].replace("b'0'", 0b0, inplace=True)
d[label_column].replace("b'1'", 0b1, inplace=True)
d.replace("?", np.nan, inplace=True)
d.replace(r'', np.nan, inplace=True)
d = d.replace(r'^\s+$', np.nan, regex=True)
#d = (d.drop(cols.columns, axis=1).join(d.apply(pd.to_numeric, errors='coerce')))
d = d.astype(float)
feature_columns = [c for c in d.columns if c != label_column]
X = d[feature_columns].values
y = d[label_column].values
'''
from scipy import stats
z = (d - d.mean())/d.std(ddof=0)
print(z)
z_row = np.nanmax(z, axis=1)
X = X[z_row < 5]
y = y[z_row < 5]
'''
print("", X.shape)
#Pie chart
x = d['class'].value_counts()
labels = x
sizes = x
explode = (0, 0.2)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
plt.title("Pie chart showing imbalance in Dataset. \nOrange: Bankrupt , Blue: Not Bankrupt\n")
print(d.describe().T.style.render())
#d.style.apply(highlight_max, color='darkorange', axis=None)
describe_table = d.describe().T.style.apply(highlight_max, color='red', axis=0).render()
save_str_to_file(describe_table, 'machine_code.html')
# Showing only partial data
cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)
corr_map_str = d.corr()\
.style.background_gradient(cmap, axis=None)\
.set_properties(**{'max-width': '80px', 'font-size': '1pt'})\
.set_caption("Hover to magnify")\
.set_precision(2)\
.set_table_styles(magnify()).render()
save_str_to_file(corr_map_str, 'C:/Users/osmaralg/Desktop/mycompany/deploying-django-master/sampledeploy/sampleapp/templates/machine_corr_table.html')
imp = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imp.fit(X)
X = imp.transform(X)
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
print("Original feature size")
print("X.shape", X.shape)
select_features = 1
if select_features:
lsvc = LinearSVC(penalty="l1", dual=False, C=0.0025, max_iter=10000).fit(X, y)
model = SelectFromModel(lsvc, prefit=True)
X = model.transform(X) # select only new features
print("Feature size after regularization")
print("X.shape", X.shape)
names = ["Logistic Regession with Lasso", "Logistic Regression with Ridge",
"Logistic Regression Elastic Net", "KNeighbors Classifier",
"Decision Tree", "AdaBoost"]
C = 0.7
classifiers = [
LogisticRegression(multi_class='ovr', max_iter=5000000, solver='saga'),
LogisticRegression(C=C, penalty='l2', tol=0.01, solver='saga'),
LogisticRegression(C=C, penalty='elasticnet', solver='saga', l1_ratio=0.5, tol=0.01),
KNeighborsClassifier(10),
DecisionTreeClassifier(max_depth=200, class_weight='balanced'),
AdaBoostClassifier()]
scores = []
#data split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
clf = LogisticRegression().fit(X_train,y_train)
y_pred = clf.predict(X_test)
# Model Evaluation metrics
from sklearn.metrics import accuracy_score,recall_score,precision_score,f1_score
print('Accuracy Score : ' + str(accuracy_score(y_test,y_pred)))
print('Precision Score : ' + str(precision_score(y_test,y_pred)))
print('Recall Score : ' + str(recall_score(y_test,y_pred)))
print('F1 Score : ' + str(f1_score(y_test,y_pred)))
#Logistic Regression Classifier Confusion matrix
from sklearn.metrics import confusion_matrix
print('Confusion Matrix : \n' + str(confusion_matrix(y_test,y_pred)))
from sklearn.model_selection import GridSearchCV
clf = LogisticRegression(max_iter=500000, solver='saga')
grid_values = {'penalty': ['l1', 'l2'], 'C':[0.01,.09,1,5,9,10,11,25]}
grid_clf_acc = GridSearchCV(clf, param_grid = grid_values,scoring = 'recall')
grid_clf_acc.fit(X_train, y_train)
#Predict values based on new parameters
y_pred_acc = grid_clf_acc.predict(X_test)
# New Model Evaluation metrics
print('Accuracy Score : ' + str(accuracy_score(y_test,y_pred_acc)))
print('Precision Score : ' + str(precision_score(y_test,y_pred_acc)))
print('Recall Score : ' + str(recall_score(y_test,y_pred_acc)))
print('F1 Score : ' + str(f1_score(y_test,y_pred_acc)))
#Logistic Regression (Grid Search) Confusion matrix
confusion_matrix(y_test,y_pred_acc)
model = make_pipeline(
StandardScaler(),
AdaBoostClassifier()
).fit(X,y)
| true
|
3abf8d7f27d317c01b1bb5cef4190d15c5993453
|
Python
|
IgnacioPardo/PythonGalore
|
/scripts/style-transfer.py
|
UTF-8
| 1,530
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
import tensorflow as tf
import numpy as np
import tensorflow_hub as hub
from PIL import Image
from argparse import ArgumentParser
hub_module = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/1')
def style_transfer(content_image: np.ndarray,
style_image: np.ndarray, *,
resize: bool = True) -> np.ndarray:
content_image = content_image.astype(np.float32)[np.newaxis, ...] / 255
style_image = style_image.astype(np.float32)[np.newaxis, ...] / 255
if resize:
style_image = tf.image.resize(style_image, (256, 256))
result = hub_module(tf.constant(content_image), tf.constant(style_image))[0]
result = result * 255
result = np.array(result, dtype=np.uint8)
return result[0]
if __name__ == "__main__":
parser = ArgumentParser(description="Tool that crops an image to a target size")
parser.add_argument("input", type=str, help="Input image's filename")
parser.add_argument("--style", "-s", type=str, help="Style image's filename")
parser.add_argument("--output", '-o', type=str, default="output.jpg", help="Output image's filename")
parser.add_argument("--no-resize", help="Disable style image resize", action="store_true", dest="no_resize")
args = parser.parse_args()
img = np.array(Image.open(args.input).convert("RGB"))
style = np.array(Image.open(args.style).convert("RGB"))
result = style_transfer(img, style, resize=not args.no_resize)
Image.fromarray(result).save(args.output)
| true
|
327e1616ddac8f982f25e04cce486c77830a36c9
|
Python
|
felipehv/iic2233
|
/Actividades/AC14/test_ac14.py
|
UTF-8
| 2,097
| 2.8125
| 3
|
[] |
no_license
|
from main import *
import pytest
class TestSistema():
def setup_method(cls, method):
cls.base = Base()
cls.ramos = list()
cls.alumno = Alumno(cls.base, 0, "Felipe")
cls.alumno.tomar_ramo("ICS3902") # 30 creditos
cls.alumno.tomar_ramo("ICT2213") # 10 creditos quedan 10
def test_vacantes(cls):
cls.base.db[0].vacantes = 0
sigla = cls.base.db[0].sigla
assert not cls.base.inscribir(sigla, cls.alumno)
def test_vacantes2(cls):
cls.base.db[0].vacantes = 19
sigla = cls.base.db[0].sigla
assert cls.base.inscribir(sigla, cls.alumno)
def test_tomar_repetido(cls):
assert not cls.alumno.tomar_ramo("ICT2213")
def test_tomar_exceso_creditos(cls):
assert not cls.alumno.tomar_ramo("ICT3442") # False por creditos
def test_tomar_repeticion_creditos(cls):
# Falla por repeticion y creditos
assert not cls.alumno.tomar_ramo("ICS3902")
def test_tomar_true(cls):
assert cls.alumno.tomar_ramo("ICS2523") # Pasa
def test_botar_ramo_no_tomado(cls):
# False porque no esta tomado
assert not cls.alumno.botar_ramo("ICT3352")
def test_botar_ramo_tomado(cls):
# Pasa porque el ramo esta tomado
assert cls.alumno.botar_ramo("ICS3902")
def test_botar_ramo_botado(cls):
cls.alumno.botar_ramo("ICS3902")
assert not cls.alumno.botar_ramo("ICS3902") # Ya se boto el ramo
def test_creditos(cls):
cls.alumno.botar_ramo("ICS3902")
for ramodb in cls.base.db:
if ramodb.sigla == "ICS3902":
ramo = ramodb
break
assert ramo.vacantes == 30
def test_repeticion_repetido(cls):
cls.alumno.agregar_ramo("ICS3902") # Forzamos la toma de un ramo
assert not cls.alumno.chequear_repeticion(
"ICS3902") # EL ramo esta dos veces ya
def test_repeticion_no_repetido(cls):
assert cls.alumno.chequear_repeticion(
"IIC2233") # El ramo no esta
if __name__ == "__main__":
pass
| true
|
96f81075b0694e5033e28e4066cf966cd4225881
|
Python
|
cs20-1/cursor-MasonShenner
|
/Masons_curser/Masons_curser.pyde
|
UTF-8
| 278
| 3.078125
| 3
|
[] |
no_license
|
# Mason Shenner
# COmputar science
# October 2
# Circle with cross
def setup():
size(500,500)
def draw():
fill(255,0,0)
ellipse(mouseX + 250, mouseY + 250, 400, 400)
fill(0,0,0)
rect(mouseX + 225, mouseY + 50, 50, 400)
rect(mouseX + 50, mouseY + 225, 400, 50)
| true
|
538775e59fae3ed7d10996580b7e229c881ae0b2
|
Python
|
DeanFujimoto/python
|
/test.py
|
UTF-8
| 1,750
| 2.671875
| 3
|
[] |
no_license
|
from scipy.io import loadmat
import numpy as num
import math
data = loadmat('16_state.mat')
data1 = loadmat('exo0.5_endo1.0023_h1.mat')
rows, cols = (16, 16)
mut = [[0 for i in range(cols)] for j in range(rows)]
state = data.get('STATE')
eqADD = data1.get('eqADD')
QADD = data1.get('QADD')
eqPROD = data1.get('eqPROD')
def pairwise(num1, num2, s1, s2):
su = 0
for i in range(0, 65536):
if state[i, num1] == s1 and state[i, num2] == s2:
su += eqADD[i]
return su
pairs = num.zeros((16,16,4))
for row in range (0,16):
for col in range (row,16):
pairs[row,col,0] =pairwise(row, col, 0,0)
pairs[row,col, 1] =pairwise(row, col, 0,1)
pairs[row,col, 2] =pairwise(row, col, 1,0)
pairs[row,col, 3] =pairwise(row, col, 1,1)
##print(pairs[row,col,0])
##print(pairs[row,col,1])
##print(pairs[row,col,2])
##print(pairs[row,col,3])
def margins(num, s):
su = 0
for i in range(0, 65536):
if state[i, num] == s:
su += eqADD[i]
return su
marginARR=[]
for i in range (0,16):
marginARR.append(margins(i,0))
print(marginARR)
for row in range(0,16):
for col in range(row,16):
mutsum = 0
if(row != col):
mutsum += pairs[row][col][0] * num.log2((pairs[row][col][0])/(marginARR[row]*marginARR[col]))
mutsum += pairs[row][col][1] * num.log2((pairs[row][col][1])/(marginARR[row]*(1-marginARR[col])))
mutsum += pairs[row][col][2] * num.log2((pairs[row][col][2])/((1-marginARR[row])*marginARR[col]))
mutsum += pairs[row][col][3] * num.log2((pairs[row][col][3])/((1-marginARR[row])*(1-marginARR[col])))
mut[row][col] = mutsum
##print(mut)
| true
|
28eddf2ac9a45555fc8fafd356e8211e7b8f0918
|
Python
|
HiAwesome/python-algorithm
|
/c05/p185.py
|
UTF-8
| 1,048
| 4.0625
| 4
|
[
"Apache-2.0"
] |
permissive
|
def quicksort(alist):
quicksortHelper(alist, 0, len(alist) - 1)
def quicksortHelper(alist, first, last):
if first < last:
pivot_value = alist[first]
leftmark = first + 1
rightmark = last
done = False
while not done:
while leftmark <= rightmark and alist[leftmark] <= pivot_value:
leftmark += 1
while leftmark <= rightmark and alist[rightmark] >= pivot_value:
rightmark -= 1
if rightmark < leftmark:
done = True
else:
alist[leftmark], alist[rightmark] = alist[rightmark], alist[leftmark]
alist[first], alist[rightmark] = alist[rightmark], alist[first]
quicksortHelper(alist, first, rightmark - 1)
quicksortHelper(alist, rightmark + 1, last)
if __name__ == '__main__':
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print(alist)
quicksort(alist)
print(alist)
"""
[54, 26, 93, 17, 77, 31, 44, 55, 20]
[17, 20, 26, 31, 44, 54, 55, 77, 93]
"""
| true
|
565c21c9784beb95aea35945a4aa994812ffe0e7
|
Python
|
JakeStubbs4/MTHE-493
|
/eigenfaces.py
|
UTF-8
| 5,689
| 3.125
| 3
|
[] |
no_license
|
# MTHE-493 Facial Recognition Project
# EigenFaces Implementation
# Prepared by Jake Stubbs
from matplotlib import pyplot as plt
import numpy as np
import os
from utilities import euclideanDistance, importDataSet, FaceImage, EigenPair, KNearestNeighbors
# Computes the vector representation of the average face of all of the faces in the provided dataset.
def averageVector(face_images):
face_images_arrays = []
for image in face_images:
face_images_arrays.append(image.image_array)
return np.mean(face_images_arrays, axis=0).reshape(-1, 1)
# Computes the standard deviation of each face image and returns an array of deviation vectors.
def standardDeviation(face_images, average_face):
face_deviations = []
for face in face_images:
face_deviations.append(face.image_vector - average_face)
return face_deviations
# Computes the eigenvectors of the provided empirical covariance matrix A.
def covarianceEigenvectors(face_deviations, A):
L = np.dot(np.transpose(A), A)
return np.linalg.eig(L)
# Converts eigen vector to face images to be displayed.
def getEigenFace(eigen_vector, A):
eigen_face = np.dot(A, eigen_vector).reshape(150, 150)
return eigen_face
# Projects newly introduced face image onto predetermined low dimensional image space.
def projectImage(face_image, eigen_pairs, average_face, A):
projection = []
for pair in eigen_pairs:
omega_k = np.dot(np.dot(A, pair.eigen_vector), face_image - average_face)
projection.append(omega_k)
return projection
# Classify unidentified face image projection based on the projections of the identified K nearest neighbors.
def classifyImage(corresponding_faces, new_face_projection):
identity_dictionary = dict()
for faceImage in corresponding_faces:
if identity_dictionary.get(faceImage.identity) == None:
identity_dictionary[faceImage.identity] = faceImage
else:
identity_dictionary[faceImage.identity].OMEGA_k = np.mean([identity_dictionary[faceImage.identity].OMEGA_k, faceImage.OMEGA_k], axis=0)
updated_results = []
for key in identity_dictionary:
dist = euclideanDistance(new_face_projection, identity_dictionary[key].OMEGA_k)
updated_results.append((identity_dictionary[key], dist))
updated_results.sort(key=lambda tup: tup[1])
return updated_results[0][0]
def identify(face_images, ms_eigen_pairs, OPTIMAL_K, average_face, A, unidentified_image=None):
if (unidentified_image == None):
# Introduce new face and classify
new_face_file = input("Enter the filename of an image to be classified: ")
new_face = FaceImage(new_face_file, None)
else:
new_face = unidentified_image
new_face_projection = projectImage(new_face.image_vector, ms_eigen_pairs, average_face, A)
corresponding_faces = KNearestNeighbors(face_images, new_face_projection, OPTIMAL_K)
for face in corresponding_faces:
print(face.identity)
corresponding_face = classifyImage(corresponding_faces, new_face_projection)
if (unidentified_image == None):
plt.figure(2)
plt.title("Unidentified")
new_face.displayImage()
plt.figure(3)
plt.title("Possible Match")
corresponding_face.displayImage()
plt.show()
else:
print(f"Corresponding Face: {corresponding_face.identity}")
print(f"Unidentified Face: {new_face.identity}")
if (corresponding_face.identity == new_face.identity):
return 1
else:
return 0
def main():
'''IMPORT DATA SET AND TRAIN'''
# Import training data set.
face_images = importDataSet()
# Compute the average of all of the imported face images.
average_face = averageVector(face_images)
# Compute the deviation of all of the face images.
face_deviations = standardDeviation(face_images, average_face)
# Calculate A matrix, impirical covariance matrix is given by C = A*AT
A = np.concatenate(face_deviations, axis=1)
# Calculate eigen vectors and values from the impirical covariance matrix.
eigen_values, eigen_vectors = covarianceEigenvectors(face_deviations, A)
# Pair the eigenvectors and eigenvalues then order pairs by decreasing eigenvalue magnitude.
eigen_pairs = []
for i in range(len(eigen_values)):
eigen_pairs.append(EigenPair(eigen_values[i], eigen_vectors[i]))
eigen_pairs.sort(key=lambda x: x.magnitude, reverse=True)
# Optimal dimension for accuracy of recognition.
OPTIMAL_DIM = 7
# Optimal nearest neighbors to consider for accuracy of recognition.
OPTIMAL_K = 3
# Choose a subset of eigenpairs corresponding to DIM largest eigenvalues.
ms_eigen_pairs = []
for k in range(OPTIMAL_DIM):
ms_eigen_pairs.append(eigen_pairs[k])
error = 0
for k in range(OPTIMAL_DIM + 1, len(eigen_pairs) - 1):
error += eigen_pairs[k].magnitude
print(f"Residual error of eigenfaces is: {error}")
# Classify the given training dataset based on the chosen subspace.
for face in face_images:
face.OMEGA_k = projectImage(face.image_vector, ms_eigen_pairs, average_face, A)
print(face.OMEGA_k)
unidentified_images = importDataSet(os.getcwd() + "/Face_Images/unidentified", True)
performance_vector = []
for unidentified_image in unidentified_images:
performance_vector.append(identify(face_images, ms_eigen_pairs, OPTIMAL_K, average_face, A, unidentified_image))
print(f"The resulting algorithm achieves {(sum(performance_vector)/len(performance_vector))*100}% recognition accuracy.")
if __name__ == "__main__":
main()
| true
|
4b05faeee9cc5e70aa951331a8da1d7f8d7b03ae
|
Python
|
nickvirden/deep-learning-portfolio
|
/Fundamentals/Data Preprocessing/data-preprocessing.py
|
UTF-8
| 3,619
| 3.796875
| 4
|
[] |
no_license
|
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Suppresses automatic format guessing by numpy
# I set this option because it changes numbers into scientific notation after they get disproportionately large
np.set_printoptions(suppress=True)
# Import the dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
# Take care of missing data
from sklearn.preprocessing import Imputer
# Imputer by default replaces NaN values with the mean of the column
imputer = Imputer()
# Fits Imputer object to Matrix X
imputer = imputer.fit(X[:, 1:3])
# Replace the data in the second and third columns with the mean of their column values
X[:, 1:3] = imputer.transform(X[:, 1:3])
# View the filled in variables
# print X[:, 1:3]
# Encoding categorial data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# Create new instance of label encoder for the independent variables
label_encoder_X = LabelEncoder()
# Encodes categorical variables in the first column
# In this case, the first column is the countries
# The only problem here is that if we just do encoding on one column, the program will think, for example, France (1) is greater than Germany (0), which makes no sense, so we have to use dummy variables
X[:, 0] = label_encoder_X.fit_transform(X[:, 0])
# Print out (incorrect) encoded Matrix X
# print X
# OneHotEncoder will treat all the columns as categorical variables by default, so we pass an array with the indices into the categorical_features paramter to encode only that column
one_hot_encoder = OneHotEncoder(categorical_features=[0])
# Returns three categorical columns - one for each country - as a numpy array (matrix)
X = one_hot_encoder.fit_transform(X).toarray()
# Print variables rounded to 2 decimal places
# print np.around(X[:, :-1], decimals=2)
# Create new instance of label encoder for the dependent variable
label_encoder_y = LabelEncoder()
# Label encode dependent variable "Purchased"
y = label_encoder_y.fit_transform(y)
# Split the dataset into the Training and Test sets
from sklearn.cross_validation import train_test_split
# Create Training and Test sets that are associated with each other
# As a general rule of thumb, the test size should be somewhere between 20% and 30% of the dataset, and certainly no more than 40%
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# print np.around(X_test, decimals=2)
# print np.around(X_train, decimals=2)
# print np.around(y_test, decimals=2)
# print np.around(y_train, decimals=2)
# The last step in data preprocessing is normalizing the data so no one feature dominates the fit of the dataset
# For example, salary is in $1,000s, whereas age is in 10s, so we need to bring the values to an equal scale in order to get a proper fit
# TWO METHODS OF FEATURE SCALING
# Standardization => (x - mean(data)) / st-dev(data)
# Normalization => (x - min(data)) / (max(data) - min(data))
from sklearn.preprocessing import StandardScaler
# Scale X
sc_X = StandardScaler()
# Fit and transform Training set
X_train = sc_X.fit_transform(X_train)
# Only need to transform Test set because we already fit the X Matrix
X_test = sc_X.transform(X_test)
# Sometimes y will need to be fitted, but in this case, it's only 0 and 1, so we don't need to scale it
print np.around(X_test, decimals=3)
print np.around(X_train, decimals=3)
print np.around(y_test, decimals=3)
print np.around(y_train, decimals=3)
| true
|
459fd6c77aa5058435f5aa6b418c12fb0cb6e582
|
Python
|
dreadlordow/Softuni-Python-Advanced
|
/02.Tuples_and_Sets-Exrecise/7.Battle_of_names.py
|
UTF-8
| 650
| 3.609375
| 4
|
[] |
no_license
|
n = int(input())
even_set = set()
odd_set = set()
for i in range(1, n + 1):
name = input()
summed = sum([ord(x) for x in name]) // i
if summed % 2 == 0:
even_set.add(summed)
else:
odd_set.add(summed)
even_sum = sum(even_set)
odd_sum = sum(odd_set)
if odd_sum > even_sum:
difference_values = odd_set.difference(even_set)
print(', '.join([str(x) for x in difference_values]))
elif even_sum > odd_sum:
sym_values = odd_set.symmetric_difference(even_set)
print(', '.join([str(x) for x in sym_values]))
else:
union_values = odd_set.union(even_set)
print(', '.join([str(x) for x in union_values]))
| true
|
f37c8d222d222916a066337a44e933bf61f5f6db
|
Python
|
ysachinj99/PythonFile
|
/Frame.py
|
UTF-8
| 84
| 2.75
| 3
|
[] |
no_license
|
from tkinter import *
root=Tk()
frame=Frame(root)
frame.pack()
root.mainloop()
| true
|
7c275ced5bd26fa0000aaf344f9b113475596ce6
|
Python
|
YuanbenWang/learngit
|
/android/spy_360.py
|
UTF-8
| 1,261
| 2.734375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 12 11:30:11 2017
@author: GXW
used
"""
import re
import urllib
import os
# response=urllib.urlopen('http://zhushou.360.cn/list/index/cid/1?page=1')
# html=response.read()
# link_list=re.findall(r"(?<=&url=).*?apk",html)
# for url in link_list:
# print url
class testClass:
def __init__(self):
self.urllist = []
self.k = 1
self.baseurl = 'http://zhushou.360.cn/list/index/cid/1?page='
def geturl(self, pageindex):
for i in range(1, pageindex + 1):
self.urllist.append(self.baseurl + str(i))
def spider(self):
for i in range(len(self.urllist)):
response = urllib.urlopen(self.urllist[0])
html = response.read()
link_list = re.findall(r"(?<=&url=).*?apk", html)
for url in link_list:
file_name = "%d.apk" % (self.k)
self.k = self.k + 1
file_path = os.path.join("/home/huu/Downloads/apk_360", file_name)
urllib.urlretrieve(url, file_path)
del self.urllist[0]
print (i)
def start(self):
self.geturl(50)
self.spider()
a = testClass()
a.start()
| true
|
0de9972676b2c6b5e3e69a48ff5573bfdd788c56
|
Python
|
kmarkley-ksu/HumanEmotions
|
/Emotion_Detection_CNN/Source/Emotion_CNN.py
|
UTF-8
| 6,600
| 2.625
| 3
|
[] |
no_license
|
#<editor-fold> Import Statements
import matplotlib.pyplot as plot
#%matplotlib inline
import numpy
#import tensorflow
import tensorflow.compat.v1 as tensorflow
tensorflow.disable_v2_behavior()
#</editor-fold> Import Statements
#<editor-fold> Unzipping the file and getting each picture.
CIFAR_DIR = "./cifar-10-batches-py/"
def unpickle(file):
import pickle
with open(file, "rb") as fo:
cifar_dict = pickle.load(fo, encoding = "bytes")
return cifar_dict
#</editor-fold> Unzipping the file and getting each picture.
#<editor-fold> Initial Set Up
dirs = ["batches.meta", "data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", "data_batch_5", "test_batch"]
all_data = [0, 1, 2, 3, 4, 5, 6]
for i, direc in zip(all_data, dirs):
all_data[i] = unpickle(CIFAR_DIR + direc)
batch_meta = all_data[0]
data_batch1 = all_data[1]
data_batch2 = all_data[2]
data_batch3 = all_data[3]
data_batch4 = all_data[4]
data_batch5 = all_data[5]
test_batch = all_data[6]
#print(CIFAR_DIR + direc)
#print(batch_meta)
X = data_batch1[b"data"]
#Take the data of 10,000 pictures of 3 colors of size 32 by 32 and
#reshape the X array so that it now holds 10,000 pictures that are 32 by 32 with colors.
#Also limit the data type in the array to 8 bit integers.
X = X.reshape(10000, 3, 32, 32).transpose(0, 3, 2, 1).astype("uint8")
print(X[0].max())
print((X[0] / 255).max())
#</editor-fold> Initial Set Up
#<editor-fold> Helper Functions
def one_hot_encode(vector, values = 10):
"""
For use to one hot encode the 10 possible labels.
This will translate the labels from words/strings to a string of ints.
Helps the CNN determine which label to apply.
"""
n = len(vector)
out = numpy.zeros( (n, values) )
out[range(n), vector] = 1
return out
def init_weights(shape):
init_random_dist = tensorflow.truncated_normal(shape, stddev = 0.1)
return tensorflow.Variable(init_random_dist)
def init_bias(shape):
init_bias_values = tensorflow.constant(0.1, shape = shape)
return tensorflow.Variable(init_bias_values)
def conv2d(x, W):
return tensorflow.nn.conv2d(x, W, strides = [1, 1, 1, 1], padding = "SAME" )
def max_pool_2by2(x):
return tensorflow.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = "SAME")
def convolutional_layer(input_x, shape):
W = init_weights(shape)
b = init_bias( [shape[3]] )
return tensorflow.nn.relu(conv2d(input_x, W) + b)
def normal_full_layer(input_layer, size):
input_size = int( input_layer.get_shape()[1] )
W = init_weights( [input_size, size] )
b = init_bias( [size] )
return tensorflow.matmul(input_layer, W) + b
#</editor-fold> Helper Functions
#<editor-fold> CIFAR Class
class CIFARHelper():
def __init__(self):
self.i = 0
self.all_training_batches = [data_batch1, data_batch2, data_batch3, data_batch4, data_batch5]
self.test_batch = [test_batch]
self.training_images = None
self.training_labels = None
self.test_images = None
self.test_labels = None
def set_up_images(self):
print("Setting up the training images and labels!")
self.training_images = numpy.vstack( [ d[b"data"] for d in self.all_training_batches ] )
training_images_length = len(self.training_images)
self.training_images = self.training_images.reshape(training_images_length, 3, 32, 32).transpose(0, 3, 2, 1) / 255
self.training_labels = one_hot_encode(numpy.hstack( [ d[b"labels"] for d in self.all_training_batches ] ), 10)
print("Setting up the testing images and labels!")
self.test_images = numpy.vstack( [ d[b"data"] for d in self.test_batch ] )
testing_images_length = len(self.test_images)
self.test_images = self.test_images.reshape(testing_images_length, 3, 32, 32).transpose(0, 3, 2, 1) / 255
#CNN will give the labels for the testing batch.
self.test_labels = one_hot_encode(numpy.hstack( [ d[b"labels"] for d in self.test_batch ] ), 10)
def next_batch(self, batch_size):
x = self.training_images[self.i: self.i + batch_size].reshape(100, 32, 32, 3)
y = self.training_labels[self.i: self.i + batch_size]
self.i = (self.i + batch_size) % len(self.training_images)
return x, y
#</editor-fold> CIFAR Class
#<editor-fold> Main Program
ch = CIFARHelper()
ch.set_up_images()
x = tensorflow.placeholder( tensorflow.float32, shape = [None, 32, 32, 3] )
y_true = tensorflow.placeholder( tensorflow.float32, shape = [None, 10] )
hold_prob = tensorflow.placeholder(tensorflow.float32)
#Create the convolutional layers
convo_1 = convolutional_layer(x, shape = [4, 4, 3, 32])
convo_1_pooling = max_pool_2by2(convo_1)
convo_2 = convolutional_layer(convo_1_pooling, shape = [4, 4, 32, 64])
convo_2_pooling = max_pool_2by2(convo_2)
#Done creating the convolutional layers.
#Flatten the output to a 1D vector.
convo_2_flat = tensorflow.reshape(convo_2_pooling, [-1, 8 * 8 * 64])
#Make the fully connected layers.
full_layer_one = tensorflow.nn.relu(normal_full_layer(convo_2_flat, 1024))
full_one_dropout = tensorflow.nn.dropout(full_layer_one, keep_prob = hold_prob)
#Done making the fully connected layers.
#Set up output
y_pred = normal_full_layer(full_one_dropout, 10)
#Apply loss function
cross_entropy = tensorflow.reduce_mean(tensorflow.nn.softmax_cross_entropy_with_logits(labels = y_true, logits = y_pred))
#Create optimizer
optimizer = tensorflow.train.AdamOptimizer(learning_rate = 0.001)
train = optimizer.minimize(cross_entropy)
#Create a variable to initalize all the global tensorflow variables.
init = tensorflow.global_variables_initializer()
#Run the CNN by using a graph session.
with tensorflow.Session() as sess:
sess.run(tensorflow.global_variables_initializer())
for i in range(500):
batch = ch.next_batch(100)
sess.run(train, feed_dict = {x: batch[0], y_true: batch[1], hold_prob: 0.5 })
#Print out a message every 100 steps.
if i % 100 == 0:
print("Currently on step {}".format(i))
print("Accuracy is: ")
#Test the train model
matches = tensorflow.equal(tensorflow.argmax(y_pred, 1), tensorflow.argmax(y_true, 1))
acc = tensorflow.reduce_mean(tensorflow.cast(matches, tensorflow.float32))
print( sess.run(acc, feed_dict = {x: ch.test_images, y_true: ch.test_labels, hold_prob: 1.0}) )
print("\n")
#Displaying an image from our batch.
# plot.imshow(X[0])
# plot.show()
#plot.close()
#</editor-fold> Main Program
| true
|
ac3434b7717f2bcff37ef0c056f0993c70b4c521
|
Python
|
videoturingtest/FALCON2018
|
/src/train_test/main.py
|
UTF-8
| 2,792
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
"""
FALCON: FAst and Lightweight CONvolution
Authors:
- Chun Quan (quanchun@snu.ac.kr)
- U Kang (ukang@snu.ac.kr)
- Data Mining Lab. at Seoul National University.
File: train_test/train_test.py
- receive arguments and train_test/test the model.
Version: 1.0
This software is free of charge under research purposes.
For commercial purposes, please contact the authors.
"""
import sys
sys.path.append('../')
from train_test.train import train
from train_test.test import test
from models.model_standardConv import StandardConvModel
from models.model_MobileConv import MobileConvModel
from models.model_FALCON import FALCONModel
from models.model_MobileConv_rank import RankMobileConvModel
from models.model_FALCON_rank import RankFALCONModel
from utils.default_param import get_default_param
from utils.save_restore import save_model
from utils.save_restore import load_model
from utils.compression_cal import cr_crr
def main(args):
if args.datasets == "cifar10":
num_classes = 10
elif args.datasets == "cifar100":
num_classes = 100
# elif args.datasets == "imagenet":
# num_classes = 1000
else:
pass
if args.convolution == "MobileConv":
net = MobileConvModel(num_classes=num_classes, which=args.model)
elif args.convolution == "FALCON":
net = FALCONModel(num_classes=num_classes, which=args.model)
elif args.convolution == "RankMobileConv":
net = RankMobileConvModel(rank=args.rank, alpha=args.alpha, num_classes=num_classes, which=args.model)
elif args.convolution == "RankFALCON":
net = RankFALCONModel(rank=args.rank, alpha=args.alpha, num_classes=num_classes, which=args.model)
elif args.convolution == "StandardConv":
net = StandardConvModel(num_classes=num_classes, which=args.model)
else:
pass
net = net.cuda()
if args.is_train:
# training
best = train(net,
lr=args.learning_rate,
optimizer_option=args.optimizer,
epochs=args.epochs,
batch_size=args.batch_size,
n_lr_decay=args.no_learning_rate_dacay,
is_train=args.is_train,
data=args.datasets)
save_model(best, args)
test(net, batch_size=args.batch_size, data=args.datasets)
cr_crr(args)
else:
# testing
load_model(net, args)
inference_time = 0
for i in range(10):
inference_time += test(net, batch_size=args.batch_size, data=args.datasets)
print("Averate Inference Time: %fs" % (float(inference_time) / float(10)))
# cr_crr(args)
if __name__ == "__main__":
parser = get_default_param()
args = parser.parse_args()
print(args)
main(args)
| true
|
bf4b655da24f20bb96859a6276c73ad9e954df79
|
Python
|
Shafin-Thiyam/Sound_Content_Test
|
/baseXmlHelper.py
|
UTF-8
| 9,870
| 2.71875
| 3
|
[] |
no_license
|
# Copyright: 2018, Ableton AG, Berlin. All rights reserved.
#****************************************************************************************
#
# base xml parse functionality
#
#****************************************************************************************
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import fnmatch, re, os
from cStringIO import StringIO
import gzip
import hashlib
#----------------------------------------------------------------------------------------
class xml_helper:
def __init__(self, filepath=None, tree=None):
self.tree = tree
self.mFilePath = filepath
self.ParentMap = None
self.mReplaceNewline = True
#----------------------------------------------------------------------------------------
def load_xml(self, filepath=None, escapeNewline=True, maxSize=0, createMap=True):
"""
load xml from filepath
"""
if filepath != None:
self.mFilePath = filepath
self.mReplaceNewline = escapeNewline
if not os.path.exists(str(self.mFilePath)):
print "Warning: The filepath '%s' does not exist. Please make sure to pass the right path as load_xml('foo/bar')" %filepath
return False
if not escapeNewline:
try:
input = StringIO(gzip.open(self.mFilePath, "r").read())
except IOError:
input = StringIO(open(self.mFilePath, "r").read())
else:
# replace Live's newline string with a dummy ###newline_escape###
# we will revert this back on writing.
# using the escapeNewline is slow on large documents
try:
file = gzip.open(self.mFilePath, "r").read()
except IOError:
file = open(self.mFilePath, "r").read()
input = StringIO(re.sub(r"�[DA];", "###newline_escape###", file))
del(file) # save memory
if maxSize:
maxSize = maxSize*1048576 # in MB
if len(input.getvalue()) > maxSize:
print "Warning: Large Document - skipping %s" %filepath
return False
self.tree = ET.ElementTree(file=input)
input.close()
if createMap:
self.child_to_parent_dict()
return True
def getroot(self):
return self.tree.getroot()
#----------------------------------------------------------------------------------------
def new_xml(self, root_name):
"""
create a new (empty) xml tree
"""
self.tree = ET.ElementTree(ET.fromstring('<?xml version="1.0" encoding="UTF-8"?><%s></%s>'%(
root_name, root_name)))
return self.tree.getroot()
#----------------------------------------------------------------------------------------
def return_tree(self):
"""
return tree and dict with ParentMap
"""
return self.tree, self.ParentMap
#----------------------------------------------------------------------------------------
def fn_to_reg(self, searchItems):
""" return a list of RegEx from searchItems"""
return [re.compile(fnmatch.translate(s)) for s in searchItems]
#----------------------------------------------------------------------------------------
def find_nodes(self, searchItems, root=None, sortByDepth=False):
if root == False:
print "Hey, you are passing 'False' as the root to find_nodes. Fix your script."
ListOfSearchItems = list(searchItems)
if root == None:
Parent = ListOfSearchItems.pop(0)
Out = [x for x in self.ParentMap.keys() if x.tag == Parent]
else:
Out = [root]
while len(ListOfSearchItems) > 0:
Parent = ListOfSearchItems.pop(0)
Out = [x for root in Out for x in root.getiterator(Parent)]
if sortByDepth == False: return Out
TDict = dict((x, len(self.get_path_to_node(x))) for x in Out)
return [o[0] for o in sorted(TDict.items(),key=lambda x:x[1])]
#----------------------------------------------------------------------------------------
def find_node(self, searchItems, root=None):
try:
return self.find_nodes(searchItems, root, sortByDepth=True)[0]
except IndexError:
return False
#----------------------------------------------------------------------------------------
def _refind_nodes(self, reSearchItems, root=None, sortByDepth=False):
"""
Recursive Search for nodes. Same as find_allnodes, except input is a regex
"""
reListOfSearchItems = list(reSearchItems)
if root == None:
ReParent = reListOfSearchItems.pop(0)
Out = [x for x in self.ParentMap.keys() if ReParent.match(x.tag)]
else:
Out = [root]
while len(reListOfSearchItems) > 0:
ReParent = reListOfSearchItems.pop(0)
Out = [x for root in Out for x in root.iter() if ReParent.match(x.tag)]
if sortByDepth == False: return Out
TDict = dict((x, len(self.get_path_to_node(x))) for x in Out)
return [o[0] for o in sorted(TDict.items(),key=lambda x:x[1])]
#----------------------------------------------------------------------------------------
def child_to_parent_dict(self,):
"""create a dict to map child to parent for each node.
"""
self.ParentMap = dict((c, p) for p in self.tree.iter() for c in p)
self.ParentMap[self.tree.getroot()] = None
return self.ParentMap
#----------------------------------------------------------------------------------------
def get_parent_node(self, node):
# much more efficient:
try: return self.ParentMap[node]
except (KeyError, IndexError, TypeError): self.child_to_parent_dict()
return self.ParentMap[node]
#----------------------------------------------------------------------------------------
def get_path_to_node(self, node):
Path = []
Parent = self.get_parent_node(node)
while Parent != None:
Path.insert(0, Parent)
Parent = self.get_parent_node(Parent)
return Path
#----------------------------------------------------------------------------------------
def set_nodevalue(self, node, value, V="Value", Conditional=False):
"""
update the value of an xml node
if Conditional, then check value first and return None if
the old value is already the same as the new value
"""
if self.mReplaceNewline:
value = re.sub("\n", "###newline_escape###",value)
if Conditional:
if self.get_nodevalue(node, V) == value: return None
node.set(V, value)
return node
#----------------------------------------------------------------------------------------
def add_node(self, parent_node, new_node_name, attributes={}, position=0):
"""
add a new xml node below parent_node
attributes is a dict {"Value":"0.0"}
"""
for key in attributes:
attributes[key] = format(attributes[key])
if position == -1:
count_children = len(list(parent_node))
position = count_children
new_node = ET.Element(new_node_name, attributes)
parent_node.insert(position, new_node)
return new_node
#----------------------------------------------------------------------------------------
def get_nodevalue(self, node, V="Value"):
"""
get node value
"""
if node == False:
return node
return node.get(V)
#----------------------------------------------------------------------------------------
def get_parametervalue(self, nodename=None, node=None, root=None):
"""
return FloatEvent, BoolEvent or EnumEvent of a parameter node
"""
if nodename != None:
node = self.find_node(nodename, root)
node_event = self._refind_nodes(self.fn_to_reg(["Manual"]), root=node)[0]
node_event_value = self.get_nodevalue(node_event)
return node_event_value
#----------------------------------------------------------------------------------------
def set_parametervalue(self, Value, nodename=None, node=None, root=None, ):
"""
return FloatEvent, BoolEvent or EnumEvent of a parameter node
"""
if nodename != None:
node = self.find_node(nodename, root)
node_event = self._refind_nodes(self.fn_to_reg(["Manual"]), root=node)[0]
if node_event != False:
self.set_nodevalue(node_event, Value)
return True
return False
#----------------------------------------------------------------------------------------
def indent(self, elem, level=0):
"""proper indent out, copied from http://effbot.org/zone/element-lib.htm"""
i = "\n" + level*"\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#----------------------------------------------------------------------------------------
def write_xml(self, filepath=None, escapeNewline=True, indent=False):
"""
write new xml out
if escapeNewline then we write to a string buffer and replace
###newline_escape### to 
 to enforce correct line breaks """
if not filepath:
filepath = self.mFilePath
if indent:
self.indent(self.tree.getroot())
output = StringIO()
self.tree.write(output, encoding="UTF-8")
outFile = open(filepath, "w")
if escapeNewline:
# we need to make sure newline 
 is written correctly
print >> outFile, re.sub("###newline_escape###", "
", output.getvalue())
else:
print >> outFile, output.getvalue()
outFile.close
#========================================================================================
# end of file
#========================================================================================
| true
|
d3a094adf868249681683a5afa54538f249a36f2
|
Python
|
Xiaoran807/robot-play
|
/src/beginner_tutorials/scripts/tempTopic/temperatureClass.py
|
UTF-8
| 948
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float64
class TemperatureSensor:
def read_temperature_sensor_data(self):
# Here you read the data from your sensor
# And you return the real value
self.temperature = 30.0
def __init__(self):
# Create a ROS publisher
self.temperature_publisher = rospy.Publisher("/temperature", Float64, queue_size=1)
# Initialize temperature data
self.temperature = 0
def publish_temperature(self):
msg = Float64()
msg.data = self.temperature
self.temperature_publisher.publish(msg)
if __name__ == '__main__':
rospy.init_node("your_sensor_node")
# Create an instance of Temperature sensor
ts = TemperatureSensor()
# Create a rate
rate = rospy.Rate(10)
while not rospy.is_shutdown():
ts.read_temperature_sensor_data()
ts.publish_temperature()
rate.sleep()
| true
|
684b2b564cd2f21801893326e18a72aa8cf56e7a
|
Python
|
michelwandermaas/scheduler_translator
|
/script_scheduler_writer.py
|
UTF-8
| 32,922
| 2.671875
| 3
|
[] |
no_license
|
'''
Author: Michel Wan Der Maas Soares (mwandermaassoares@lbl.gov)
This class is supposed to serve as a mechanism for the developer to write scripts that will work in multiple schedulers.
The project was first developed to work with Univa Grid Engine(UGE) and Simple Linux Utility for Resource Management(SLURM),
but it should be quite easy to implement functionaly with other schedulers.
This is an open-source project, and any development you make to the file will be reviewed and appreciated.
If you want guidelines to include support for other scheduler, do not hesitate to contact me.
Functions whose names start with underscore should not be directly accessed by the user.
'''
import sys
class script_scheduler_writer:
def __init__(self, scheduler, name = "Name"):
self.jobsType = []
self.dependencies = []
self.dependenciesType = [] #Types: NONE, JOB_NUM, JOB_ID as well as SINGLE or ARRAY.
self.header = ""
self.commands = []
self.name = name
self.shellSet = False
self.schedulersSupported = ["UGE","SLURM",""] # "" means run standard syntax, it wont parse the commands.
self.defaultCommands = ""
self.defaultHeader = ""
self.modules = ""
self._setScheduler(scheduler)
self.ignoreUnkown = True # ignore unknown commands? if not, delete them.
self.originalScript = None
# list of all commands accepted. it will be used to delete unknown commands, and to translate.
# Dependecies/commands are described in the functions responsible for parsing them.
self.dependenciesAvail = ["LAST_ADDED","LAST_ADDED_ARRAY", "LAST", "LAST_ID", "LAST_ID_ARRAY", "JOBS_LIST",
"JOBS_LIST_ARRAY","ALL_ADDED", "ALL_ADDED_ARRAY"]
self.commandsAvail = ["JOB_NAME", "RESOURCE_EXCLUSIVE", "RESOURCE_NODES", "RESOURCE_IB", "JOB_ARRAY", "TASK_ID",
"RESOURCE_MEM", "RESOURCE_CCM", "RESOURCE_PRIOR_QUEUE"]
def getJobsSize(self):
'''
:return: the amount of jobs appended so far
'''
return self.jobsType.__len__()
def getSchedulers(self):
'''
:return: the list of schedulers currently supported
'''
return self.schedulersSupported
def getScript(self):
'''
:return: The script string ready to be written to a file
'''
string = ""
if (not self.shellSet):
self.setShell("")
string += self.header
string += self.defaultHeader +"\n"
string += self.modules +"\n"
for x in self.commands:
string += x
return string
def clearCommands(self):
'''
Clean all the commands set so far
'''
self.commands = []
self.jobsType = []
self.dependencies = []
self.dependenciesType = []
self.defaultCommands = ""
def clearAll(self):
'''
Clean all variables
'''
self.defaultHeader = ""
self.clearCommands()
self.header = ""
self.name = "Name"
self._setScheduler("")
self.ignoreUnkown = True
def _setName(self, name):
'''
Set name a name for the object. The name will be used when creating JobID holders in the script.
'''
self.name = name
def _setScheduler(self, type):
'''
Sets a scheduler for the script. See this file header for the names of schedulers supported.
'''
if (type not in self.schedulersSupported):
sys.stderr.write("Scheduler not supported.\n")
sys.exit(1)
self.scheduler = type
if(type == "UGE"):
self.addModule("module load uge\n")
def setShell(self,shell):
'''
Set shell used by the script
'''
if (shell != ""):
shell = shell.replace("\n", "") # removes any newline characters
shell = shell.replace("#!", "") # removes any #! characters
self.header = "#!"+shell+"\n" + self.header
else:
self.header = "#!/bin/bash -l\n" + self.header
self.shellSet = True
def addModule(self,module):
if (module == ""):
return
module = module.replace("module ","")
module = module.split("\n")[0]
if (self.modules.find(module) >= 0):
return
if (module.find("purge") >= 0):
self.modules = ("module "+module+"\n") + self.modules
return
module = "module "+module+"\n"
self.modules += module
def addJob(self, command, dependency = "", dependencyType = "", translate = False):
'''
Add command to launch a job to the script
:param command: command to run if first word of command is RUN or launch otherwise (see parseCommand() and getLauncher())
:param dependency: "LAST_ADDED", "LAST n" (n is a number), "JOB_ID n" (n is the id) (see parseDependency())
:return: True for success and False for failure
'''
if (self.scheduler not in self.schedulersSupported):
sys.stderr.write("Scheduler not supported.\n")
sys.exit(1)
self.jobsType.append([])
command = command.replace("\n","") #removes any newline characters
self.commands.append(self._getCommandString(command, dependency, dependencyType, translate))
def addLineParsed(self, command):
'''
Add a line parsing it with the parseCommand function. Used in special cases when one wants to parse a command but not launch a job.
'''
if (self.scheduler not in self.schedulersSupported):
sys.stderr.write("Scheduler not supported.\n")
sys.exit(1)
self.jobsType.append([])
self.commands.append(self._parseCommand(command) + "\n")
def setDefaultConfig(self, command):
if (self.scheduler not in self.schedulersSupported):
sys.stderr.write("Scheduler not supported.\n")
sys.exit(1)
command = self._parseCommand(command)
if (command == ""):
return
if (self.scheduler == "SLURM"):
self.defaultHeader += "#SBATCH "+ command +"\n"
elif (self.scheduler == "UGE"):
self.defaultHeader += "#$ " + command + "\n"
def unsetDefaultConfig(self):
self.defaultCommands = ""
def _getLauncher(self, command):
'''
Get launch command based on the type of scheduler. UGE with the command RUN maps to a local run, as well as not specified schedulers.
:return: launch command or "" for local commands
'''
commandList = command.split()
launcher = ""
if (commandList[0] == "RUN"):
if (self.scheduler == "SLURM"):
launcher = "srun "
command = command.replace("RUN "," ",1)
else:
command = command.replace("RUN ", " ", 1)
else:
if (commandList[0] == "LAUNCH" or command.find("LAUNCH ") < 0): #if the first command is LAUNCH or there`s no launch
command = command.replace("LAUNCH "," ")
if (self.scheduler == "UGE"):
launcher = "qsub "
elif (self.scheduler == "SLURM"):
launcher = "sbatch "
elif (command.find("LAUNCH ") >= 0): #otherwise, look for a LAUNCH, and replace it with the appropriate launcher.
if (self.scheduler == "UGE"):
launcher = command.split("LAUNCH")[0]+"qsub "
command = command.split("LAUNCH")[1]
elif (self.scheduler == "SLURM"):
launcher = command.split("LAUNCH")[0]+"sbatch "
command = command.split("LAUNCH")[1]
return (launcher,command)
def addEmail(self, email, type):
'''
Add email configuration for the jobs in the script
:param type: "END", "START", "ABORT", "SUSPENDED", "ALWAYS", "NEVER"/"", or a combination separated by a whitespace.
Combination of opposites will result in error. ("Always" and "Never")
'''
if (self.scheduler not in self.schedulersSupported):
sys.stderr.write("Scheduler not supported.\n")
sys.exit(1)
if (email == "" or email == None):
return
if (self.scheduler == "UGE"):
self.header += "#$ -M "+email+"\n"
if (type == ""):
return
self.header += "#$ -m "
if(type == "NEVER"):
self.header += "n\n"
return
type = type.split()
if ("ALWAYS" in type and "NEVER" in type):
print("Conflicting email types.\n")
sys.exit()
if ("ALWAYS" in type):
self.header += "beas\n"
return
if ("START" in type):
self.header += "b"
if ("END" in type):
self.header += "e"
if ("ABORT" in type):
self.header += "a"
if ("SUSPENDED" in type):
self.header += "s"
self.header += "\n"
elif (self.scheduler == "SLURM"):
self.header += "#SBATCH --mail-user="+email+"\n"
self.header += "#SBATCH --mail-type="
if(type == "NEVER" or type == ""):
self.header += "NONE\n"
return
type = type.split()
set = False
if ("ALWAYS" in type and "NEVER" in type):
print("Conflicting email types.\n")
sys.exit()
if ("ALWAYS" in type):
self.header += "ALL\n"
return
if ("START" in type):
self.header += "BEGIN"
set = True
if ("END" in type):
if (set):
self.header += ","
self.header += "END"
set = True
if ("ABORT" in type or "SUSPENDED" in type):
if (set):
self.header += ","
self.header += "FAIL"
set = True
self.header += "\n"
def addLineHeader(self, line):
'''
Add a line to the header
'''
line = line.replace("#", "", 1) # removes any shebang characters
self.header += "#"+line
def addLine(self, line, newline = False):
'''
Add a line to the script. By default it does not add a newline character.
For script translation the default is to add a newline.
'''
'''
if (line.find("module") >= 0):
list = line.split("module ")
for x in range(1,list.__len__()):
self.addModule(list[x])
line = list[0]
'''
if (line == "" or line.replace(" ","") == ""):
return
if (newline):
self.commands.append(line+"\n")
else:
self.commands.append(line)
def addComment(self, comment):
'''
Add a comment to the script
'''
comment = comment.replace("\n", "") # removes any newline characters
comment = comment.replace("#", "") # removes any shebang characters
self.commands.append("#"+comment+"\n")
def _getCommandString(self, command, dependency, dependencyType, translate = False):
command = self._getLauncher(command)
ret_string = self.name+"_JOB_"+str(self.jobsType.__len__()) + "=`" + command[0] + self._getDependencyString(dependency, dependencyType) + " " + self._parseCommand(command[1]).replace("\n", "") + "`\n"
ret_string += self.name+"_JOB_"+str(self.jobsType.__len__()) + "=`echo $" + self.name + "_JOB_" + str(self.jobsType.__len__()) + " | awk 'match($0,/[0-9]+/){print substr($0, RSTART, RLENGTH)}'`\n"
return ret_string
def _getDependencyString(self, dependency, dependencyType):
'''
Parses dependencies and return the appropriate string according to the scheduler
:param dependency: string to be parsed
:return: list of strings to be added
'''
dependencyType = dependencyType.replace(" ","")
self._parseDependency(dependency)
ret = ""
if (dependency == ""):
return ret
if (self.dependencies[self.jobsType.__len__()-1].__len__()==0):
return ret
if(self.scheduler=="UGE"):
if (self.dependenciesType[self.jobsType.__len__()-1][1] == "SINGLE"):
ret = "-hold_jid "
else:
ret = "-hold_jid_ad "
bool = False
for n in self.dependencies[self.jobsType.__len__()-1]:
if (bool):
ret += ","
if (self.dependenciesType[self.jobsType.__len__()-1][0] == "JOB_NUM"):
ret += "$"+self.name+"_JOB_"+str(n)
else:
ret += str(n)
bool = True
ret += " "
elif (self.scheduler=="SLURM"):
if (dependencyType == "OKAY"):
ret = "--dependency=afterok:"
elif (dependencyType == "NOT OKAY"):
ret = "--dependency=afternotok:"
elif (dependencyType == "START"):
ret = "--dependency=after:"
else:
ret = "--dependency=afterany:"
bool = False
for n in self.dependencies[self.jobsType.__len__()-1]:
if (bool):
ret += ":"
if (self.dependenciesType[self.jobsType.__len__()-1][0] == "JOB_NUM"):
ret += "$"+self.name+"_JOB_"+str(n)
else:
ret += str(n)
bool = True
ret += " "
return ret
def _parseDependency(self, dependency):
'''
Parses dependencies to self.dependency, to be turned into a string later
:param dependency: string to be parsed
'''
'''
List of constants: (N stands for a number) (More than one kind of dependency will probably cause errors)
LAST_ADDED : depends on the last job added
LAST_ADDED_ARRAY : depends on the last job added, and the current job is an array
LAST N : depends on the last N jobs added
LAST_ARRAY N : depends on the last N jobs added, and the current job is an array
JOB_ID N1 N2 N3 N3 : depends on the jobs of ids N1, N2 and so on
JOB_ID_ARRAY N1 N2 N3 : depends on the jobs of ids N1, N2... and the current job is an array
JOBS_LIST N1 N2 N3 : depends on the N1th job added, N2th job added, and so on. Jobs index start at 1.
JOBS_LIST_ARRAY N1 N2 N3 : depends on the N1th job added, N2th job added, and so on. Jobs index start at and the current job is an array.
ALL_ADDED : depends on all jobs added so far.
ALL_ADDED_ARRAY : depends on all jobs added so far. The current job must be an array.
'''
if (dependency == ""):
self.dependencies.append([])
self.dependenciesType.append(("NONE","NONE"))
else:
#SINGLE DEPENDENCY
if (dependency.replace(" ","") == "LAST_ADDED"):
if (self.jobsType.__len__() == 0):
sys.stderr.write("The first job added cannot depend on the last one added.\n")
sys.exit()
else:
self.dependenciesType.append(("JOB_NUM", "SINGLE"))
self.dependencies.append([self.jobsType.__len__() - 1])
return
if (dependency.replace(" ","") == "LAST_ADDED_ARRAY"):
if (self.jobsType.__len__() == 0):
sys.stderr.write("The first job added cannot depend on the last one added.\n")
sys.exit()
else:
self.dependenciesType.append(("JOB_NUM", "ARRAY"))
self.dependencies.append([self.jobsType.__len__() - 1])
return
dependency = dependency.split()
if ("LAST" in dependency):
pos_dependency = dependency.index("LAST")+1
num_dependency = int(dependency[pos_dependency])
self.dependenciesType.append(("JOB_NUM","SINGLE"))
self.dependencies.append([])
for x in range(self.jobsType.__len__()-num_dependency, self.jobsType.__len__()):
self.dependencies[self.dependencies.__len__()-1].append(x)
elif ("JOB_ID" in dependency):
self.dependenciesType.append(("JOB_ID","SINGLE"))
self.dependencies.append([])
for x in range(1,dependency.__len__()):
if (dependency[x].isdigit()):
num_dependency = int(dependency[x])
else:
num_dependency = dependency[x]
self.dependencies[self.dependencies.__len__()-1].append(num_dependency)
elif ("JOBS_LIST" in dependency or "JOB_LIST" in dependency):
self.dependencies.append([])
self.dependenciesType.append(("JOB_NUM","SINGLE"))
for x in range(1,dependency.__len__()):
num_dependency = int(dependency[x])
self.dependencies[self.dependencies.__len__()-1].append(num_dependency)
elif ("ALL_ADDED" in dependency):
self.dependenciesType.append(("JOB_NUM","SINGLE"))
self.dependencies.append([])
for x in range(1, self.jobsType.__len__()):
self.dependencies[self.dependencies.__len__()-1].append(x)
#JOB ARRAYS DEPENDENCY
elif ("LAST_ARRAY" in dependency):
pos_dependency = dependency.index("LAST_ARRAY")+1
num_dependency = int(dependency[pos_dependency])
self.dependenciesType.append(("JOB_NUM","ARRAY"))
self.dependencies.append([])
for x in range(self.jobsType.__len__()-num_dependency, self.jobsType.__len__()):
self.dependencies[self.dependencies.__len__()-1].append(x)
elif ("JOB_ID_ARRAY" in dependency):
self.dependenciesType.append(("JOB_ID","ARRAY"))
self.dependencies.append([])
for x in range(1,dependency.__len__()):
num_dependency = int(dependency[x])
self.dependencies[self.dependencies.__len__()-1].append(num_dependency)
elif ("JOBS_LIST_ARRAY" in dependency or "JOB_LIST" in dependency):
self.dependencies.append([])
self.dependenciesType.append(("JOB_NUM","ARRAY"))
for x in range(1,dependency.__len__()):
num_dependency = int(dependency[x])
self.dependencies[self.dependencies.__len__()-1].append(num_dependency)
elif ("ALL_ADDED_ARRAY" in dependency):
self.dependenciesType.append(("JOB_NUM","ARRAY"))
self.dependencies.append([])
for x in range(1, self.jobsType.__len__()):
self.dependencies[self.dependencies.__len__()-1].append(x)
else:
sys.stderr.write("Dependency \""+str(dependency)+"\" not supported.\n")
sys.exit(1)
def _deleteOtherOccurances(self, command, occurance, num_args=0):
if (num_args == 0):
command = command.replace(occurance, "")
else:
# delete additional occurances
args = 0
list = command.split()
auxCommand = ""
for x in list:
if x == occurance: # delete occurrance
args = num_args
elif args > 0: # delete argument of the additional occurrance
args -= 1
else:
auxCommand += x+" "
command = auxCommand
return command
def _parseCommand(self, command): #TODO: ignore unknown commands option (this is currently turned on)
#TODO: add other mem and nodes options
'''
Parses the input command looking for certain keywords and replacing them with the appropriate string
:param command: command to be parsed
:return: string to be written in the script
'''
'''
List of constants: (N (N1,N2...) stands for a number, and "" stands for a string)
JOB_NAME "" : sets the name of the job to ""
RESOURCE_EXCLUSIVE : requests for exclusive nodes
RESOURCE_NODES N : requests for N nodes
RESOURCE_IB : requests infiniband resource
JOB_ARRAY N1:N2 : sets a job array from N1 to N2. (N1 > 0)
TASK_ID : substituted to the variable set by the scheduler that tells the task id
RESOURCE_MEM N : requests N amount of memory. Specify either MB or GB right after N. e.g. "RESOURCE_MEM 200MB"
RESOURCE_CCM : request ccm capabality
RESOURCE_PRIOR_QUEUE "": requests a certain priority queue
OUTPUT_CURRENT_DIR : write jobs output to the current directory
'''
if (self.scheduler == "UGE"):
command = command.replace("JOB_NAME ","-N ", 1) # replace the first occurrance
#delete additional occurances
command = self._deleteOtherOccurances(command, "JOB_NAME", 1)
elif (self.scheduler == "SLURM"):
command = command.replace("JOB_NAME ", "--job-name=", 1)
command = self._deleteOtherOccurances(command, "JOB_NAME", 1)
if (self.scheduler == "UGE"):
command = command.replace("RESOURCE_NODES ","-pe pe_slots ", 1)
command = self._deleteOtherOccurances(command, "RESOURCE_NODES", 1)
elif (self.scheduler == "SLURM"):
command = command.replace("RESOURCE_NODES ","-N ", 1)
command = self._deleteOtherOccurances(command, "RESOURCE_NODES", 1)
if (self.scheduler == "UGE"):
command = command.replace("RESOURCE_IB","-l infiniband.c=1", 1)
command = self._deleteOtherOccurances(command, "RESOURCE_IB")
elif (self.scheduler == "SLURM"):
if (command.find("RESOURCE_IB") >= 0):
sys.stderr.write("Command RESOURCE_IB not available for SLURM. Ignored.\n")
command = command.replace("RESOURCE_IB", "")
if (self.scheduler == "UGE"):
command = command.replace("RESOURCE_EXCLUSIVE","-l exclusive.c", 1)
command = self._deleteOtherOccurances(command, "RESOURCE_EXCLUSIVE")
elif (self.scheduler == "SLURM"):
command = command.replace("RESOURCE_EXCLUSIVE","--exclusive", 1)
command = self._deleteOtherOccurances(command, "RESOURCE_EXCLUSIVE")
if (self.scheduler == "UGE"):
command = command.replace("JOB_ARRAY ","-t ", 1)
command = self._deleteOtherOccurances(command, "JOB_ARRAY", 1)
elif (self.scheduler == "SLURM"):
list = command.split()
auxCommand = ""
found = False
for x in list:
if x == "JOB_ARRAY":
auxCommand += "--array="
found = True
elif found:
auxCommand += x.replace(":","-",1)
break
else:
auxCommand += x+" "
command = auxCommand
command = self._deleteOtherOccurances(command, "JOB_ARRAY", 1)
if (self.scheduler == "UGE"):
command = command.replace("TASK_ID","$SGE_TASK_ID")
elif (self.scheduler == "SLURM"):
command = command.replace("TASK_ID","$SLURM_TASK_ID")
# TW: No support for "-l h_rt"
# TW: This seems not to work...
if (self.scheduler == "UGE"):
command = command.replace("RESOURCE_MEM ", "-l ram.c=", 1)
command = self._deleteOtherOccurances(command, "RESOURCE_MEM", 1)
if (command.find("-l ram.c=") >= 0):
list = command.split("-l ram.c=")
listAux = list[1].split(" ")
listAux[0] = listAux[0].replace("MB","M")
listAux[0] = listAux[0].replace("GB","G")
list[1] = " ".join(listAux)
command = "-l ram.c=".join(list)
elif (self.scheduler == "SLURM"):
command = command.replace("RESOURCE_MEM ", "--mem=", 1)
command = self._deleteOtherOccurances(command, "RESOURCE_MEM", 1)
if (self.scheduler == "UGE"):
if (command.find("RESOURCE_CCM") >= 0):
sys.stderr.write("Command RESOURCE_CCM not available for UGE.\n")
sys.exit(1)
elif (self.scheduler == "SLURM"):
command = command.replace("RESOURCE_CCM", "-ccm", 1)
command = self._deleteOtherOccurances(command, "RESOURCE_CCM")
if (self.scheduler == "UGE"):
command = command.replace("RESOURCE_PRIOR", "-l", 1)
command = self._deleteOtherOccurances(command, "RESOURCE_PRIOR", 1)
elif (self.scheduler == "SLURM"):
command = command.replace("RESOURCE_PRIOR ", "--qos=", 1)
command = self._deleteOtherOccurances(command, "RESOURCE_PRIOR", 1)
if (self.scheduler == "UGE"):
command = command.replace("OUTPUT_CURRENT_DIR", "-cwd", 1)
command = self._deleteOtherOccurances(command, "OUTPUT_CURRENT_DIR", 1)
elif (self.scheduler == "SLURM"):
#ignore, this is the default in SLURM
command = command.replace("OUTPUT_CURRENT_DIR", "", 1)
command = self._deleteOtherOccurances(command, "OUTPUT_CURRENT_DIR", 1)
# TW: doesn't support UGE "-v environment_variable=value"
return command
def unitTest(self): #TODO: automatize the result checking, instead of prinnting it
'''
Test if the returned script is correct.
This functiom should be used to test the implementation of other scheduler following the pattern set here.
:return: True if results are correct, False otherwise
'''
print "--------------------------------------------------"
print "Test 1"
print "--------------------------------------------------"
# TEST AS LIBRARY
#---------------------------------------FIRST TEST-----------------------------------------------
#For reference only, it does not map to anything. The dependencies might not make sense.
firstExampleScript = \
""""#/bin/bash\n
#EMAIL mwandermaassoares@lbl.gov
#EMAIL_TYPE START END
JOB_NAME First_Job RESOURCE_NODES 8 ./job1.sh\n
DEPEND LAST_ADDED JOB_NAME Second_Job JOB_ARRAY 1:10 ./job3.sh\n
DEPEND LAST 2 JOB_NAME Third_Job JOB_ARRAY 3:25 RESOURCE_IB ./job3.sh\n"""
#---------------------------------------UGE FIRST TEST___________________________________________
firstUGEScript = \
""" #/bin/bash -l\n
#$ -M mwandermaassoares@lbl.gov\n
#$ -m be\n
qsub -N First_Job -pe pe_slots 8 ./job1.sh\n
qsub -hold_jid First_Job -N Second_Job -t 1:10 ./job2.sh\n
qsub -hold_jid First_Job Second_Job -N Third_Job -t 3:25 -l infiniband.c=1 ./job3.sh\n"""
self._setScheduler("UGE")
self.setShell("/bin/bash -l")
self.addJob("JOB_NAME First_Job RESOURCE_NODES 8 JOB_NAME RANDOM_JOB ./job1.sh ")
self.addJob("JOB_NAME Second_Job JOB_ARRAY 1:10 ./job2.sh","LAST_ADDED")
self.addJob("JOB_NAME Third_Job JOB_ARRAY 3:25 RESOURCE_IB RESOURCE_IB ./job3.sh ", "LAST 2")
self.addEmail("mwandermaassoares@lbl.gov","START END")
returnScript = self.getScript()
print (returnScript)
print "--------------------------------------------------"
self.clearAll()
#---------------------------------------SLURM FIRST TEST---------------------------------------
firstSLURMScript = \
""""#/bin/bash -l\n
#SBATCH --mail-user=mwandermaassoares@lbl.gov
#SBATCH --mail-type=BEGIN,END
$Name_JOB_1=`sbatch ./job1.sh -N First_Job -pe pe_slots 8`\n
$Name_JOB_2=`sbatch ./job2.sh -d afterany:$Name_JOB_1 -N Second_Job --array=1-10`\n
$Name_JOB_3=`sbatch ./job3.sh -d afterany:$Name_JOB_1:$Name_JOB_2 -N Third_Job -l infiniband.c=1`\n"""
self._setScheduler("SLURM")
self.setShell("/bin/bash -l")
self.addJob("JOB_NAME First_Job RESOURCE_NODES 8 ./job1.sh ")
self.addJob("JOB_NAME Second_Job JOB_ARRAY 1:10 ./job2.sh ","LAST_ADDED")
self.addJob("JOB_NAME Third_Job JOB_ARRAY 3:25 RESOURCE_IB ./job3.sh ", "LAST 2")
self.addEmail("mwandermaassoares@lbl.gov","START END")
returnScript = self.getScript()
print (returnScript)
print "--------------------------------------------------"
self.clearAll()
print "Test 2"
print "--------------------------------------------------"
#---------------------------------------SECOND TEST-----------------------------------------------
#For reference only, it does not map to anything. The dependencies might not make sense.
secondExampleScript = \
""""#/bin/bash\n
#EMAIL mwandermaassoares@lbl.gov\n
#EMAIL_TYPE END ABORT START\n
JOB_NAME First_Job RESOURCE_NODES 8 RESOURCE_EXCLUSIVE ./job1.sh \n
LAST_ADDED JOB_NAME Second_Job JOB_ARRAY 1:10 ./job2.sh \n
LAST_ADDED_ARRAY JOB_NAME Third_Job JOB_ARRAY 3:25 RESOURCE_IB ./job3.sh \n
LAST_ARRAY 2 JOB_NAME Fourth_Job JOB_ARRAY 1:1000 RESOURCE_NODES 16 ./job4.sh """
#---------------------------------------UGE SECOND TEST___________________________________________
secondUGEScript = \
""""#/bin/bash -l\n
#$ -M mwandermaassoares@lbl.gov\n
#$ -m eab\n
$Name_JOB_1=`qsub -N First_Job -pe pe_slots 8 -l exclusive.c ./job1.sh `\n
$Name_JOB_2=`qsub -hold_jid $Name_JOB_1 -N Second_Job -t 1:10 ./job2.sh `\n
$Name_JOB_3=`qsub -hold_jid_ad $Name_JOB_2 -N Third_Job -t 3:25 -l infiniband.c=1 ./job3.sh `\n
$Name_JOB_4=`qsub -hold_jid_ad $Name_JOB_2,$Name_JOB_3 -N Fourth_Job -t 1:1000 -pe pe_slots 16 ./job4.sh `"""
self._setScheduler("UGE")
self.setShell("/bin/bash -l")
self.setDefaultConfig("RESOURCE_MEM 200MB RESOURCE_NODES 1")
self.addJob("JOB_NAME First_Job RESOURCE_NODES 8 RESOURCE_EXCLUSIVE ./job1.sh ")
self.addJob("JOB_NAME Second_Job JOB_ARRAY 1:10 ./job2.sh ","LAST_ADDED")
self.addJob("JOB_NAME Third_Job JOB_ARRAY 3:25 RESOURCE_IB ./job3.sh ", "LAST_ADDED_ARRAY")
self.addJob("JOB_NAME Fourth_Job JOB_ARRAY 1:1000 RESOURCE_NODES 16 ./job4.sh ", "LAST_ARRAY 2")
self.addEmail("mwandermaassoares@lbl.gov","END ABORT START")
returnScript = self.getScript()
print (returnScript)
print "--------------------------------------------------"
self.clearAll()
#---------------------------------------SLURM SECOND TEST___________________________________________
secondSLURMcript = \
""""#/bin/bash -l\n
#$ -M mwandermaassoares@lbl.gov\n
#$ -m eab\n
$Name_JOB_1=`sbatch --job-name=First_Job -ntasks-per-node=8 -l exclusive.c ./job1.sh `\n
$Name_JOB_2=`sbatch -d afterany:$Name_JOB_1 --job-name=Second_Job --array=1-10 ./job2.sh `\n
$Name_JOB_3=`sbtach -d afterany:$Name_JOB_2 -job-name=Third_Job --array:3-25 --contraint=IB ./job3.sh `\n
$Name_JOB_4=`sbatch -d afterany:$Name_JOB_2:$Name_JOB_3 --job-name=Fourth_Job --array=1-1000 -pntasks-per-node=16 ./job4.sh `"""
self._setScheduler("SLURM")
self.setShell("/bin/bash -l")
self.setDefaultConfig("RESOURCE_MEM 200MB RESOURCE_NODES 1")
self.addJob("JOB_NAME First_Job RESOURCE_NODES 8 RESOURCE_EXCLUSIVE ./job1.sh ")
self.addJob("JOB_NAME Second_Job JOB_ARRAY 1:10 ./job2.sh ","LAST_ADDED")
self.addJob("JOB_NAME Third_Job JOB_ARRAY 3:25 RESOURCE_IB ./job3.sh ", "LAST_ADDED_ARRAY")
self.addJob("JOB_NAME Fourth_Job JOB_ARRAY 1:1000 RESOURCE_NODES 16 ./job4.sh ", "LAST_ARRAY 2")
self.addEmail("mwandermaassoares@lbl.gov","END ABORT START")
returnScript = self.getScript()
print (returnScript)
print "--------------------------------------------------"
self.clearAll()
return True
if __name__ == "__main__":
# run unit test
test = script_scheduler_writer("")
test.unitTest()
| true
|
5cc0b0934270492e92dbb799aa2417d716e570aa
|
Python
|
ray-project/ray
|
/python/ray/tune/search/bohb/bohb_search.py
|
UTF-8
| 13,731
| 2.609375
| 3
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
"""BOHB (Bayesian Optimization with HyperBand)"""
import copy
import logging
import math
# use cloudpickle instead of pickle to make BOHB obj
# pickleable
from ray import cloudpickle
from typing import Dict, List, Optional, Union
from ray.tune.result import DEFAULT_METRIC
from ray.tune.search.sample import (
Categorical,
Domain,
Float,
Integer,
LogUniform,
Normal,
Quantized,
Uniform,
)
from ray.tune.search import (
UNRESOLVED_SEARCH_SPACE,
UNDEFINED_METRIC_MODE,
UNDEFINED_SEARCH_SPACE,
Searcher,
)
from ray.tune.search.variant_generator import parse_spec_vars
from ray.tune.utils.util import flatten_dict, unflatten_list_dict
try:
import ConfigSpace
from hpbandster.optimizers.config_generators.bohb import BOHB
except ImportError:
BOHB = ConfigSpace = None
logger = logging.getLogger(__name__)
class _BOHBJobWrapper:
"""Mock object for HpBandSter to process."""
def __init__(self, loss: float, budget: float, config: Dict):
self.result = {"loss": loss}
self.kwargs = {"budget": budget, "config": config.copy()}
self.exception = None
class TuneBOHB(Searcher):
"""BOHB suggestion component.
Requires HpBandSter and ConfigSpace to be installed. You can install
HpBandSter and ConfigSpace with: ``pip install hpbandster ConfigSpace``.
This should be used in conjunction with HyperBandForBOHB.
Args:
space: Continuous ConfigSpace search space.
Parameters will be sampled from this space which will be used
to run trials.
bohb_config: configuration for HpBandSter BOHB algorithm
metric: The training result objective value attribute. If None
but a mode was passed, the anonymous metric `_metric` will be used
per default.
mode: One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate: Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
seed: Optional random seed to initialize the random number
generator. Setting this should lead to identical initial
configurations at each run.
max_concurrent: Number of maximum concurrent trials.
If this Searcher is used in a ``ConcurrencyLimiter``, the
``max_concurrent`` value passed to it will override the
value passed here. Set to <= 0 for no limit on concurrency.
Tune automatically converts search spaces to TuneBOHB's format:
.. code-block:: python
config = {
"width": tune.uniform(0, 20),
"height": tune.uniform(-100, 100),
"activation": tune.choice(["relu", "tanh"])
}
algo = TuneBOHB(metric="mean_loss", mode="min")
bohb = HyperBandForBOHB(
time_attr="training_iteration",
metric="mean_loss",
mode="min",
max_t=100)
run(my_trainable, config=config, scheduler=bohb, search_alg=algo)
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
import ConfigSpace as CS
config_space = CS.ConfigurationSpace()
config_space.add_hyperparameter(
CS.UniformFloatHyperparameter("width", lower=0, upper=20))
config_space.add_hyperparameter(
CS.UniformFloatHyperparameter("height", lower=-100, upper=100))
config_space.add_hyperparameter(
CS.CategoricalHyperparameter(
name="activation", choices=["relu", "tanh"]))
algo = TuneBOHB(
config_space, metric="mean_loss", mode="min")
bohb = HyperBandForBOHB(
time_attr="training_iteration",
metric="mean_loss",
mode="min",
max_t=100)
run(my_trainable, scheduler=bohb, search_alg=algo)
"""
def __init__(
self,
space: Optional[Union[Dict, "ConfigSpace.ConfigurationSpace"]] = None,
bohb_config: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
seed: Optional[int] = None,
max_concurrent: int = 0,
):
assert (
BOHB is not None
), """HpBandSter must be installed!
You can install HpBandSter with the command:
`pip install hpbandster ConfigSpace`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.trial_to_params = {}
self._metric = metric
self._bohb_config = bohb_config
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self))
)
space = self.convert_search_space(space)
self._space = space
self._seed = seed
self.running = set()
self.paused = set()
self._max_concurrent = max_concurrent
self._points_to_evaluate = points_to_evaluate
super(TuneBOHB, self).__init__(
metric=self._metric,
mode=mode,
)
if self._space:
self._setup_bohb()
def set_max_concurrency(self, max_concurrent: int) -> bool:
self._max_concurrent = max_concurrent
return True
def _setup_bohb(self):
from hpbandster.optimizers.config_generators.bohb import BOHB
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
if self._mode == "max":
self._metric_op = -1.0
elif self._mode == "min":
self._metric_op = 1.0
if self._seed is not None:
self._space.seed(self._seed)
self.running = set()
self.paused = set()
bohb_config = self._bohb_config or {}
self.bohber = BOHB(self._space, **bohb_config)
def set_search_properties(
self, metric: Optional[str], mode: Optional[str], config: Dict, **spec
) -> bool:
if self._space:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_bohb()
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._space:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"
)
)
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__, metric=self._metric, mode=self._mode
)
)
max_concurrent = (
self._max_concurrent if self._max_concurrent > 0 else float("inf")
)
if len(self.running) >= max_concurrent:
return None
if self._points_to_evaluate:
config = self._points_to_evaluate.pop(0)
else:
# This parameter is not used in hpbandster implementation.
config, _ = self.bohber.get_config(None)
self.trial_to_params[trial_id] = copy.deepcopy(config)
self.running.add(trial_id)
return unflatten_list_dict(config)
def on_trial_result(self, trial_id: str, result: Dict):
if trial_id not in self.paused:
self.running.add(trial_id)
if "hyperband_info" not in result:
logger.warning(
"BOHB Info not detected in result. Are you using "
"HyperBandForBOHB as a scheduler?"
)
elif "budget" in result.get("hyperband_info", {}):
hbs_wrapper = self.to_wrapper(trial_id, result)
self.bohber.new_result(hbs_wrapper)
def on_trial_complete(
self, trial_id: str, result: Optional[Dict] = None, error: bool = False
):
del self.trial_to_params[trial_id]
self.paused.discard(trial_id)
self.running.discard(trial_id)
def to_wrapper(self, trial_id: str, result: Dict) -> _BOHBJobWrapper:
return _BOHBJobWrapper(
self._metric_op * result[self.metric],
result["hyperband_info"]["budget"],
self.trial_to_params[trial_id],
)
# BOHB Specific.
# TODO(team-ml): Refactor alongside HyperBandForBOHB
def on_pause(self, trial_id: str):
self.paused.add(trial_id)
self.running.discard(trial_id)
def on_unpause(self, trial_id: str):
self.paused.discard(trial_id)
self.running.add(trial_id)
@staticmethod
def convert_search_space(spec: Dict) -> "ConfigSpace.ConfigurationSpace":
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a TuneBOHB search space."
)
# Flatten and resolve again after checking for grid search.
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
def resolve_value(
par: str, domain: Domain
) -> ConfigSpace.hyperparameters.Hyperparameter:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(domain, Float):
if isinstance(sampler, LogUniform):
lower = domain.lower
upper = domain.upper
if quantize:
lower = math.ceil(domain.lower / quantize) * quantize
upper = math.floor(domain.upper / quantize) * quantize
return ConfigSpace.UniformFloatHyperparameter(
par, lower=lower, upper=upper, q=quantize, log=True
)
elif isinstance(sampler, Uniform):
lower = domain.lower
upper = domain.upper
if quantize:
lower = math.ceil(domain.lower / quantize) * quantize
upper = math.floor(domain.upper / quantize) * quantize
return ConfigSpace.UniformFloatHyperparameter(
par, lower=lower, upper=upper, q=quantize, log=False
)
elif isinstance(sampler, Normal):
return ConfigSpace.hyperparameters.NormalFloatHyperparameter(
par, mu=sampler.mean, sigma=sampler.sd, q=quantize, log=False
)
elif isinstance(domain, Integer):
if isinstance(sampler, LogUniform):
lower = domain.lower
upper = domain.upper
if quantize:
lower = math.ceil(domain.lower / quantize) * quantize
upper = math.floor(domain.upper / quantize) * quantize
else:
# Tune search space integers are exclusive
upper -= 1
return ConfigSpace.UniformIntegerHyperparameter(
par, lower=lower, upper=upper, q=quantize, log=True
)
elif isinstance(sampler, Uniform):
lower = domain.lower
upper = domain.upper
if quantize:
lower = math.ceil(domain.lower / quantize) * quantize
upper = math.floor(domain.upper / quantize) * quantize
else:
# Tune search space integers are exclusive
upper -= 1
return ConfigSpace.UniformIntegerHyperparameter(
par, lower=lower, upper=upper, q=quantize, log=False
)
elif isinstance(domain, Categorical):
if isinstance(sampler, Uniform):
return ConfigSpace.CategoricalHyperparameter(
par, choices=domain.categories
)
raise ValueError(
"TuneBOHB does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__, type(domain.sampler).__name__
)
)
cs = ConfigSpace.ConfigurationSpace()
for path, domain in domain_vars:
par = "/".join(str(p) for p in path)
value = resolve_value(par, domain)
cs.add_hyperparameter(value)
return cs
def save(self, checkpoint_path: str):
save_object = self.__dict__
with open(checkpoint_path, "wb") as outputFile:
cloudpickle.dump(save_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
save_object = cloudpickle.load(inputFile)
self.__dict__.update(save_object)
| true
|
04bf2ac55986bea800a619467034cd9a61e32141
|
Python
|
fosua/Lab_Python_06
|
/Lab06.py
|
UTF-8
| 3,056
| 3.421875
| 3
|
[] |
no_license
|
class Player:
def __init__(self,firstname,lastname, team= None):
self.first_name = firstname
self.last_name = lastname
self.score = []
self.team = team
def add_score(self, date, score):
self.score.append(score)
return self.score
#print self.score
def total_score (self):
self.total = 0
for i in range (len(self.score)):
self.total = self.total + self.score[i]
print self.total
def average_score(self):
len_score = float(len(self.score))
self.average = float((self.total)/len_score)
print self.average
torres= Player('Fernando', 'Torres', 'Chelsea')
new_score = [0,0,1,0,1]
for i in range(len(new_score)):
torres_score = torres.add_score ('011/07/09',new_score[i])
print torres_score
torres.total_score()
torres.average_score()
print ''
print ''
#part III
class Team:
def __init__(self, name, league, manager_name, points):
self.name = name
self.league = league
self.manger_name = manager_name
self.points = points
self.players = []
def add_player(self, player):
self.players.append(player)
print self.players
def __str__(self):
return 'the name of the team is '+self.name
spain = Team( 'spain', 'la liga', 'esta',51)
print spain
portugal = Team('portugal', 'epl', 'lizzy',46)
print portugal
torres= Player('Fernando', 'Torres', spain)
ronaldo = Player('Christiano', 'Ronaldo', portugal)
#print torres.first_name
#print ronaldo.team
spain.add_player('ronaldo')
portugal.add_player('torres')
class Match:
def __init__(self, home_team, away_team, date):
self.home_team = home_team
self.away_team = away_team
self.date = date
self.home_scores = {}
self.away_scores = {}
def home_score(self):
return sum(self.home_scores.values())
def away_score(self):
return sum(self.away_scores.values())
def winner(self):
if self.home_score() > self.away_score():
return self.home_team.name
elif self.home_score() < self.away_score():
return self.away_team.name
elif self.home_score() == self.away_score():
return 'draw'
def add_score(self, player, score):
players_team = player.team
if players_team == self.home_team:
if player.last_name in self.home_scores:
self.home_scores[player.last_name] += score
else:
self.home_scores [player.last_name]= score
elif players_team == self.away_team:
if player.last_name in self.away_scores:
self.away_scores[player.last_name] += score
else:
self.away_scores [player.last_name]= score
euro_match_final = Match(spain, portugal,'2012/06/27')
euro_match_final.add_score(torres, 1)
euro_match_final.add_score(ronaldo, 1)
euro_match_final.add_score(torres, 1)
print euro_match_final.winner()
| true
|
4c83d580d3ed5334b9afad9ab0943a1f5d1fa335
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02937/s449980375.py
|
UTF-8
| 867
| 2.8125
| 3
|
[] |
no_license
|
def main():
import sys
import copy
from bisect import bisect_right
from collections import defaultdict
input = sys.stdin.readline
s, t = input().strip(), input().strip()
lens = len(s)
index_list = defaultdict(list)
leng = defaultdict(int)
for i, ss in enumerate(s):
index_list[ss].append(i)
for s, i in index_list.items():
if i:
leng[s] = len(index_list[s])
prev = -1
ans = 0
for tt in t:
if tt not in leng:
print(-1)
return False
p = bisect_right(index_list[tt], prev)
# print('tt={}, prev={}, p={}, ans={}'.format(tt, prev, p, ans))
if p < leng[tt]:
prev = index_list[tt][p]
else:
prev = index_list[tt][0]
ans += lens
print(ans+prev+1)
if __name__ == '__main__':
main()
| true
|
19632b07fbb4f05c419c7f47636a8e4aa96848a8
|
Python
|
rcasas37/PUPS
|
/ROV/test_uart.py
|
UTF-8
| 1,695
| 3.21875
| 3
|
[] |
no_license
|
# Import code modules
import time
import serial
import os
import test_uart
#def main():
# Open serial port communication
ser = serial.Serial(port='/dev/ttyS0', baudrate=9600, parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=1) # (physical port, baudrate, timeout interval)
ser.flushInput()
def write_ser(user):
strr = user.encode()
ser.write(strr)
def read_ser():
line = ser.read(1)
return line
def read_ser_port(size=None, eol=';'):
# Open and read from serial port and save in cmd_message variable
len_eol = len(eol)
line_str = bytearray()
while True:
#ser.flushInput() # Flush serial port
char = ser.read(1) # Read 1 byte or 1 char
if char:
#if char != ';': # Is the current char the terminating char? No then append it.
line_str += char # Append a single char string to the line_str
if line_str[-len_eol:] == eol: # Check if char is terminating character
break
if size is not None and len(line_str) >= size: # Check if message is even in the buffer
break
else:
break
return bytes(line_str)
while 1:
str_input = "S,1,2,3,4,5,6,7;"
line = read_ser_port()
write_ser(str_input)
#time.sleep(.16)
print("This is the received char: ", line)
#main()
| true
|
862cba6c0ff1a1a973be4a53de4beba31595b78c
|
Python
|
tan506/wordvector
|
/main.py
|
UTF-8
| 929
| 3.109375
| 3
|
[] |
no_license
|
#First open the file location of the text document for proessing the textual schametics
#you are trying to derive morpheme embeddings using CNN'schametics
#decide on the appropriate CNN models
with open("browncorpus.txt") as model_document:
try:
while True:
latin = model_document.next().strip()
gloss = model_document.next().strip()
trans = model_document.next().strip()
#processing the embedded morphing
process(latin, gloss, trans)
model_document.next() # skip blank line
except StopIteration:
# reached end of file
pass
from itertools import chain
def chunk(s):
return chain(*(c.split("-") for c in s.split()))
#processing the CNN's using Natural Language Processing
#this leaves the chunks of morpheme and predicts the next wordings
def process(latin, gloss, trans):
chunks = zip(chunk(latin), chunk(gloss))
| true
|
cc24c1b611c91cc683704352bb128042497179bb
|
Python
|
tommasorea/forest_fires_cause_predictions
|
/src/main.py
|
UTF-8
| 171
| 2.671875
| 3
|
[] |
no_license
|
import pandas as pd
filepath = "../data/fires2015_train.csv"
# Read the file into a variable fifa_data
df = pd.read_csv(filepath, parse_dates=True)
print(df.head(10))
| true
|
73d5c7ff2861adb81ff3effe699cf2c032d5cd96
|
Python
|
pletzer/firstStepsInViz
|
/scene/coneWithLight.py
|
UTF-8
| 958
| 2.796875
| 3
|
[] |
no_license
|
import vtk
# create a rendering window and renderer
ren = vtk.vtkRenderer()
ren.SetBackground(0.5, 0.5, 0.5)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(640, 480)
# create lights
light = vtk.vtkLight()
light.SetPosition(4, 3, 2)
light.SetColor(1, 0.5, 0.3)
light.SetIntensity(0.3)
# create another light
light2 = vtk.vtkLight()
light2.SetPosition(2, 3, 4)
light2.SetColor(1, 0, 1.0)
light2.SetIntensity(0.4)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create cone
cone = vtk.vtkConeSource()
cone.SetResolution(60)
# mapper
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
# actor
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
# assign actor to the renderer
ren.AddActor(coneActor)
ren.AddLight(light)
ren.AddLight(light2)
# enable user interface interactor
iren.Initialize()
renWin.Render()
iren.Start()
| true
|
9068842e42ed47adbc77be0374f7050ab70f517b
|
Python
|
iTeam-co/pytglib
|
/pytglib/api/types/file_part.py
|
UTF-8
| 522
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
from ..utils import Object
class FilePart(Object):
"""
Contains a part of a file
Attributes:
ID (:obj:`str`): ``FilePart``
Args:
data (:obj:`bytes`):
File bytes
Returns:
FilePart
Raises:
:class:`telegram.Error`
"""
ID = "filePart"
def __init__(self, data, **kwargs):
self.data = data # bytes
@staticmethod
def read(q: dict, *args) -> "FilePart":
data = q.get('data')
return FilePart(data)
| true
|
724adca8ea7b9884cc7d91687f8a213b32f0623e
|
Python
|
agi1512/ConnectFourAI
|
/screenreader.py
|
UTF-8
| 5,337
| 2.765625
| 3
|
[] |
no_license
|
import win32gui, win32api, win32con, time, colorsys
from win32api import GetSystemMetrics
import ImageGrab
from connectfour import *
from minimax import *
from book import *
import threading
## TODO HANDLE WHEN TWO STRIPES ARE THE SAME COLOR
## MAKE METHOD FOR FINDING KEYS
start = [785, 135]
NUM_TABS = 20
tabs = [True] * NUM_TABS
lock = threading.Lock()
class grabber:
def __init__(self):
self.image = ImageGrab.grab()
def capture(self):
self.image = ImageGrab.grab()
def get_pixel_color(self, i_x, i_y):
return self.image.getpixel((i_x, i_y))
def click(self, x,y):
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
def get_board(self):
columns = [0,0,0,0,0,0,0]
for i in range(0, 6):
for j in range(0, 7):
if self.get_pixel_color(start[0] + 135 * j, start[1] + 135 * i) != (255, 255, 255):
columns[j] += 1
return columns
def enter_board_string(self, board):
for i in board:
self.click(start[0] + 135 * int(i), 135)
def click_back(self):
self.click(475, 780)
def reset(self):
for i in range(0, 20):
self.click_back()
def enter_next_move(self):
if self.get_pixel_color(585, 875) == (166, 165, 201):
self.click(585, 875)
self.click(585, 875)
else:
self.click(585, 975)
self.click(585, 975)
def get_best_move(self, board_string):
self.reset()
self.enter_board_string(board_string)
time.sleep(0.7)
first = self.get_board()
self.enter_next_move()
time.sleep(0.7)
second = self.get_board()
for i in range(0, 7):
if first[i] != second[i]:
return i
return -1
def get_tab(self):
while True:
lock.acquire()
for i in range(len(tabs)):
if tabs[i]:
tabs[i] = False
lock.release()
return i
lock.release()
time.sleep(0.1)
def release_tab(self, tab):
lock.acquire()
tabs[tab] = True
lock.release()
def switch_tab(self, tab):
self.click(50 + 83*tab, 15)
time.sleep(0.05)
transpose = []
bk = book()
bk.book = {}
found = 0
def generate(node, max_depth, printer=False):
global found
if max_depth <= 0:
return
if node.gethash() in transpose:
#print("already have a transpose")
return
if bk.inBook(node.line):
#print("already have symmertry")
return
# if this position isnt useful, give up
checker = Search(node)
checker.clear_history()
checker.allowed_time = 5
checker.start_time = time.time()
if abs(checker.minimax(node, 4, -999999, 999999)) > 5000:
#print("Aborting line due to bad score")
return
transpose.append(node.gethash())
# get the best move for this position
#bk.book[node.line] = str(foo.get_best_move(node.line))
lock.acquire()
if node.line not in bk.opening_book:
print(found)
found += 1
bk.book[node.line] = ""
lock.release()
for move in node.legal_moves():
if printer:
print(move)
child = node.export()
child.make_move(move)
generate(child, max_depth - 1)
def do_sleep():
lock.release()
time.sleep(1.1)
lock.acquire()
def worker():
foo = grabber()
while True:
my_tab = foo.get_tab()
my_key = ""
while my_key == "":
lock.acquire()
for key in bk.book:
if bk.book[key] == "":
my_key = key
bk.book[key] = "taken"
break
lock.release()
time.sleep(0.05)
lock.acquire()
foo.switch_tab(my_tab)
foo.reset()
foo.enter_board_string(my_key)
do_sleep()
foo.switch_tab(my_tab)
time.sleep(0.1)
foo.capture()
lock.release()
first = foo.get_board()
lock.acquire()
foo.switch_tab(my_tab)
foo.enter_next_move()
do_sleep()
foo.switch_tab(my_tab)
time.sleep(0.1)
foo.capture()
lock.release()
second = foo.get_board()
for i in range(0, 7):
if first[i] != second[i]:
lock.acquire()
bk.book[my_key] = str(i)
lock.release()
foo.release_tab(my_tab)
threads = []
for i in range(0, NUM_TABS):
threads.append(threading.Thread(target=worker))
threads[i].daemon = True
threads[i].start()
startt = time.time()
generate(Root_Node(), 7, True)
while True:
time.sleep(1)
lock.acquire()
done = True
counter = 0
for key in bk.book:
if bk.book[key] == "" or bk.book[key] == "taken" and key != "":
counter += 1
done = False
lock.release()
print(counter)
if done:
break
print(time.time() - startt)
print(len(bk.book))
filer = open("output.txt", "w")
filer.write(str(bk.book))
filer.close()
| true
|
19543e5f564d2a360d140ea427900d80d832b50e
|
Python
|
ivombi/Introduction-to-Programming
|
/assignment7.py
|
UTF-8
| 408
| 2.84375
| 3
|
[] |
no_license
|
import sys
rating = sys.argv[1:]
number_list = [1,2,3,4,5,6,7,8,9]
def catapproval(rating):
def rating_splitting(rating):
iterating_word = rating
rating_list=[]
rating_len = len(rating)
new_rating=""
for i in range(rating_len):
if i == "0" or i == str(rating_len-1):
continue
else:
new_rating+=rating(i)
| true
|
46f537a00838287b6a647200f1f09aa0d51e8bcd
|
Python
|
JosephMcGrath/tidyclipper
|
/src/tidyclipper/feed_entry.py
|
UTF-8
| 3,507
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
"""
Tools to manage individual entries from RSS feeds.
"""
import datetime
import re
from urllib import parse
import feedparser
from bs4 import BeautifulSoup
from .templates import ENTRY
def sanitise_html(html: str) -> str:
"""
Removes a set of tags from a HTML string.
Intended to return HTML that can be embeded in a larger document.
"""
if html is None:
return ""
soup = BeautifulSoup(html, "lxml")
# Don't want these tags:
for tag_label in ["img", "script", "embed", "iframe", "hr"]:
for entry in soup.findAll(tag_label):
entry.extract()
# Don't want most attributes
for tag in soup.recursiveChildGenerator():
if hasattr(tag, "attrs"):
tag.attrs = {
key: value for key, value in tag.attrs.items() if key == "href"
}
# Remove tags without text
for target_tag in soup.find_all():
if len(target_tag.get_text(strip=True)) == 0:
target_tag.extract()
output = soup.prettify()
# Wipe out unwwanted tags entirely
output = re.sub(r"<\/?html>", "", output)
output = re.sub(r"<\/?body>", "", output)
output = re.sub(r"<\/?div>", "", output)
output = re.sub(r"<\/?span>", "", output)
output = re.sub(r"(\s)+", r"\1", output)
output = re.sub(r"<(\/?)h1>", r"<\1h3>", output)
output = re.sub(r"<(\/?)h2>", r"<\1h3>", output)
output = output.strip()
return output
def sanitise_url(url: str) -> str:
"""
Cleans up a url by removing the query parameter.
"""
temp = list(parse.urlparse(url))
temp[4] = ""
return parse.urlunparse(temp)
class FeedEntry:
"""
A single entry from an RSS feed.
"""
@classmethod
def from_rss(
cls, entry: feedparser.FeedParserDict, feed: feedparser.FeedParserDict
) -> "FeedEntry":
"""
Converts a feedparser entry / feed into a FeedEntry.
"""
try:
time = datetime.datetime(*entry.published_parsed[:6]).isoformat()
except (AttributeError, TypeError):
time = datetime.datetime.now().isoformat()
return cls(
title=entry.get("title"),
summary=entry.get("summary"),
link=sanitise_url(entry.get("link")),
time=time,
feed=feed.feed.get("title"),
source=feed.get("href"),
)
def __init__(
self, title: str, summary: str, link: str, time: str, feed: str, source: str
):
if title is None:
self.title: str = "-"
else:
self.title = title.strip()
self.summary = summary
self.link = link
self.time = time
self.feed = feed
self.source = source
try:
self.summary = sanitise_html(self.summary)
except TypeError:
pass
def __hash__(self) -> int:
return hash(self.link)
def __repr__(self) -> str:
return f"<Feed Entry : {self.title[:50]}>"
def as_markdown(self) -> str:
"""
Convert the feed entry to a simple markdown output format.
"""
output = f"## {self.title}\n\n"
output += f"* {self.time}\n* {self.feed}\n* {self.link}\n\n"
output += f"{self.summary}\n\n---"
return output
def as_html(self) -> str:
"""
Formats the feed entry to a snippet of HTML.
"""
self.summary = sanitise_html(self.summary)
return ENTRY.render(entry=self)
| true
|
2a4f9986bb02606a2946d2e746869a068a000e9f
|
Python
|
cu-swe4s-fall-2019/hash-tables-evangallagher
|
/hash_tables.py
|
UTF-8
| 3,024
| 3.03125
| 3
|
[] |
no_license
|
import hash_functions
import sys
import time
import random
class LinearProbe:
def __init__(self, N, hash_function):
"""
returns object, size, and keys from hash table
N: length of the hash array
T: array to hold the (key, value) tuples
M: number of indexes occupied by (key, value)
"""
self.hash_function = hash_function
self.N = N
self.T = [None for i in range(N)]
self.M = 0
def add(self, key, value):
"""adds keys and values"""
start_hash = self.hash(key, self.N)
for i in range(self.N):
test_slot = (start_hash + i) % self.N
if self.T[test_slot] is None:
self.T[test_slot] = (key, value)
self.M += 1
return True
else:
return False
def search(self, key):
"""Searches for key in hash table """
hash_slot = self.hash(key, self.N)
for i in range(self.N):
test_slot = (hash_slot + i) % self.N
if self.T[test_slot] is None:
return None
if self.T[test_slot][0] == key:
return self.T[test_slot][1]
else:
return None
class ChainedHash:
def __init__(self, N, hash_function):
self.hash_function = hash_function
self.N = N
self.T = [[] for i in range(N)]
self.M = 0
def add(self, key, value):
start_hash = self.hash_function(key, self.N)
self.T[start_hash] = self.hash(key, self.N)
self.M += 1
return True
def search(self, key):
start_hash = self.hash_function(key, self.N)
for k, v in self.T[start_hash]:
if key == k:
return v
return None
if __name__ == '__main__':
N = int(sys.argv[1])
hash_alg = sys.argv[2]
collision_strategy = sys.argv[3]
data_file_namem = sys.argv[4]
keys_to_add = int(sys.argv[5])
ht = None
if hash_alg == 'ascii':
if collision_strategy == 'linear':
ht = LPHashTable(N, hash_functions.h_ascii_sum)
elif collision_strategy == 'chain':
ht = ChainHashTable(N, hash_functions.h_ascii_sum)
elif hash_alg == 'rolling':
if collision_strategy == 'linear':
ht = LPHashTable(N, hash_functions.h_polynomial_rolling)
elif collision_strategy == 'chain':
ht = ChainHashTable(N, hash_functions.h_polynomial_rolling)
elif hash_alg == 'python':
if collision_strategy == 'linear':
ht = LPHashTable(N, hash_functions.h_python)
elif collision_strategy == 'chain':
ht = ChainHashTable(N, hash_functions.h_python)
keys_to_search = 100
V = []
for l in open(data_file_name):
reservoir_sampling(l, keys_to_search, V)
t0 = time.time()
ht.insert(l, l)
t1 = time.time()
print('insert', ht.M/ht.N, t1 - t0)
if ht.M == keys_to_add:
sys.exit
| true
|
581112f826749be80ac55fca91ed0457280231d9
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03327/s827607125.py
|
UTF-8
| 63
| 3.078125
| 3
|
[] |
no_license
|
N = int(input())
ans = "ABC" if N <= 999 else "ABD"
print(ans)
| true
|
b675edd995d0532ca10f820b01faaee8fb22cf47
|
Python
|
ringof/pyembc
|
/pyembc/_pyembc.py
|
UTF-8
| 22,202
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
import sys
import ctypes
import struct
from enum import Enum, auto
from typing import Type, Any, Iterable, Dict, Optional, Mapping
__all__ = [
"pyembc_struct",
"pyembc_union"
]
# save the system's endianness
_SYS_ENDIANNESS_IS_LITTLE = sys.byteorder == "little"
# name for holding pyembc fields and endianness
_FIELDS = "__pyembc_fields__"
_ENDIAN = "__pyembc_endian__"
# name of the field in ctypes instances that hold the struct char
_CTYPES_TYPE_ATTR = "_type_"
# name of the field in ctypes Structure/Union instances that hold the fields
_CTYPES_FIELDS_ATTR = "_fields_"
# name of the field in ctypes Structure/Union instances that hold packing value
_CTYPES_PACK_ATTR = "_pack_"
# name of the field in ctypes Structure instances that are non-native-byteorder
_CTYPES_SWAPPED_ATTR = "_swappedbytes_"
class PyembcFieldType:
"""
Class for holding information about the fields
"""
# noinspection PyProtectedMember
def __init__(self, _type, bit_size: int, bit_offset: int):
self.base_type = _type
self.bit_size = bit_size
self.bit_offset = bit_offset
@property
def is_bitfield(self) -> bool:
return self.bit_size is not None
@property
def is_ctypes_type(self) -> bool:
# noinspection PyProtectedMember
return issubclass(
self.base_type, (ctypes._SimpleCData, ctypes.Structure, ctypes.Union, ctypes.Array)
)
@property
def is_ctypes_simple_type(self):
# noinspection PyProtectedMember
return issubclass(self.base_type, ctypes._SimpleCData)
@property
def is_structure(self):
return issubclass(self.base_type, ctypes.Structure)
class _PyembcTarget(Enum):
"""
Target type for pyembc class creation
"""
STRUCT = auto()
UNION = auto()
def _check_value_for_type(field_type: PyembcFieldType, value: Any):
"""
Checks whether a value can be assigned to a field.
:param field_type: type class of the field.
:param value: value to be written
:raises: ValueError
"""
if field_type.is_ctypes_simple_type:
# check for ctypes types, that have the _type_ attribute, containing a struct char.
struct_char = getattr(field_type.base_type, _CTYPES_TYPE_ATTR)
is_signed = struct_char.islower()
# noinspection PyProtectedMember
if isinstance(value, ctypes._SimpleCData):
_value = value.value
else:
_value = value
try:
struct.pack(struct_char, _value)
except struct.error as e:
raise ValueError(
f'{value} cannot be set for {field_type.base_type.__name__} ({repr(e)})!'
) from None
if field_type.is_bitfield:
if is_signed:
max_raw = 2 ** (field_type.bit_size - 1) - 1
min_raw = -(2 ** (field_type.bit_size - 1))
else:
max_raw = 2 ** field_type.bit_size - 1
min_raw = 0
if not min_raw <= _value <= max_raw:
raise ValueError(f"Cannot set {_value} for this bitfield")
else:
raise TypeError('Got non-ctypes type!')
def _is_little_endian(obj: ctypes.Structure) -> bool:
"""
Checks whether a Structure instance/class is little endian
:param obj: Structure instance/class
:return: True if little endian
"""
is_swapped = hasattr(obj, _CTYPES_SWAPPED_ATTR)
if _SYS_ENDIANNESS_IS_LITTLE:
return not is_swapped
else:
return is_swapped
def _is_pyembc_type(instance: PyembcFieldType) -> bool:
"""
Checks if an object/field is a pyembc instance by checking if it has the __pyembc_fields__ attribute
:param instance: instance to check
:return: True if pyembc instance
"""
return hasattr(instance.base_type, _FIELDS)
# noinspection PyProtectedMember
def _short_type_name(typeobj: PyembcFieldType) -> str:
"""
Returns a short type name for a basic type, like u8, s16, etc...
:param typeobj: pyembc type object
:return: short name for the type
"""
# noinspection PyUnresolvedReferences
byte_size = struct.calcsize(typeobj.base_type._type_)
bit_size = byte_size * 8
# noinspection PyUnresolvedReferences
signedness = 'u' if typeobj.base_type._type_.isupper() else 's'
if issubclass(typeobj.base_type, (ctypes.c_float, ctypes.c_double)):
prefix = 'f'
else:
prefix = signedness
return f"{prefix}{bit_size}"
# noinspection PyProtectedMember
def _c_type_name(typeobj: PyembcFieldType) -> str:
"""
Returns an ANSI c type name for a basic type, like unsigned char, signed short, etc...
:param typeobj: pyembc type object
:return: c type name for the type
"""
# noinspection PyUnresolvedReferences
byte_size = struct.calcsize(typeobj.base_type._type_)
if issubclass(typeobj.base_type, (ctypes.c_float, ctypes.c_double)):
if byte_size == 4:
return "float"
elif byte_size == 8:
return "double"
else:
raise ValueError("invalid length for float")
else:
if byte_size == 1:
name = "char"
elif byte_size == 2:
name = "short"
elif byte_size == 4:
name = "int"
elif byte_size == 8:
name = "long"
else:
raise ValueError("invalid length")
# noinspection PyUnresolvedReferences
signed = "unsigned" if typeobj.base_type._type_.isupper() else "signed"
return f"{signed} {name}"
def __len_for_union(self):
"""
Monkypatch __len__() method for ctypes.Union
"""
return ctypes.sizeof(self)
def _print_field_value(field, typeobj):
if issubclass(typeobj.base_type, (ctypes.c_float, ctypes.c_double)):
return f"{field:.6f}"
else:
return f"0x{field:X}"
def __repr_for_union(self):
"""
Monkypatch __repr__() method for ctypes.Union
"""
_fields = getattr(self, _FIELDS)
field_count = len(_fields)
s = f'{self.__class__.__name__}('
for i, (field_name, field_type) in enumerate(_fields.items()):
_field = getattr(self, field_name)
if _is_pyembc_type(field_type):
s += f"{field_name}={repr(_field)}"
else:
if field_type.is_bitfield:
bitfield_info = field_type.bit_size
else:
bitfield_info = ''
s += f"{field_name}:{_short_type_name(field_type)}{bitfield_info}={_print_field_value(_field, field_type)}"
if i < field_count - 1:
s += ", "
s += ')'
return s
# Monkypatch ctypes.Union: it only works like this, because Union is a metaclass,
# and the method with exec/setattr does not work for it, as described here:
# https://stackoverflow.com/questions/53563561/monkey-patching-class-derived-from-ctypes-union-doesnt-work
# However, it only seems to be needed for __len__ and __repr__.
ctypes.Union.__len__ = __len_for_union
ctypes.Union.__repr__ = __repr_for_union
def _add_method(
cls: Type,
name: str,
args: Iterable[str],
body: str,
return_type: Any,
docstring="",
_globals: Optional[Dict[str, Any]] = None,
_locals: Optional[Mapping] = None,
class_method=False
):
"""
Magic for adding methods dynamically to a class. Yes, it uses exec(). I know. Sorry about that.
:param cls: class to extend
:param name: name of the method to add
:param args: arguments of the method
:param body: body code of the method
:param return_type: return type of the method
:param docstring: optional docstring for the method
:param _globals: globals for the method
:param _locals: locals for the method
:param class_method: if True, generates a classmethod
"""
# default locals
__locals = dict()
__locals["_return_type"] = return_type
return_annotation = "->_return_type"
# default globals:
__globals = {
"cls": cls,
"ctypes": ctypes,
"struct": struct,
"_is_pyembc_type": _is_pyembc_type,
"_short_type_name": _short_type_name,
"_c_type_name": _c_type_name,
"_is_little_endian": _is_little_endian,
"_check_value_for_type": _check_value_for_type,
"_print_field_value": _print_field_value
}
# update globals and locals
if _globals is not None:
__globals.update(_globals)
if _locals is not None:
__locals.update(_locals)
# final code
args = ','.join(args)
code = f"def {name}({args}){return_annotation}:\n{body}"
# execute it and save to the class
exec(code, __globals, __locals)
method = __locals[name]
method.__doc__ = docstring
if class_method:
method = classmethod(method)
setattr(cls, name, method)
def _generate_class(_cls, target: _PyembcTarget, endian=sys.byteorder, pack=4):
"""
Generates a new class based on the decorated one that we gen in the _cls parameter.
Adds methods, sets bases, etc.
:param _cls: class to work on
:param target: union/struct
:param endian: endianness for structures. Default is the system's byteorder.
:param pack: packing for structures
:return: generated class
"""
# get the original class' annotations, we will parse these and generate the fields from these.
cls_annotations = _cls.__dict__.get('__annotations__', {})
# ctypes currently does not implement the BigEndianUnion and LittleEndianUnion despite its documentation
# sais so. Therefore, we use simple Union for now. Details:
# https://stackoverflow.com/questions/49524952/bigendianunion-is-not-part-of-pythons-ctypes
# https://bugs.python.org/issue33178
if endian == "little":
_bases = {
_PyembcTarget.STRUCT: ctypes.LittleEndianStructure,
_PyembcTarget.UNION: ctypes.Union
}
elif endian == "big":
_bases = {
_PyembcTarget.STRUCT: ctypes.BigEndianStructure,
_PyembcTarget.UNION: ctypes.Union
}
else:
raise ValueError("Invalid endianness")
# create the new class
cls = type(_cls.__name__, (_bases[target], ), {})
# set our special attribute to save fields
setattr(cls, _FIELDS, {})
_fields = getattr(cls, _FIELDS)
# go through the annotations and create fields
_ctypes_fields = []
_first_endian = None
_bitfield_counter = 0
_bitfield_basetype_bitsize = 0
_bitfield_basetype = None
bit_offset = None
for field_cnt, (field_name, _field_type) in enumerate(cls_annotations.items()):
if isinstance(_field_type, tuple):
__field_type, bit_size = _field_type
if _bitfield_counter == 0:
_bitfield_counter = bit_size
_bitfield_basetype_bitsize = struct.calcsize(__field_type._type_) * 8
_bitfield_basetype = __field_type
bit_offset = 0
else:
_bitfield_counter += bit_size
if __field_type != _bitfield_basetype:
raise SyntaxError("Bitfields must be of same type!")
if _bitfield_counter > _bitfield_basetype_bitsize:
raise SyntaxError("Bitfield overflow!")
if _bitfield_counter == _bitfield_basetype_bitsize:
# full bitfield
_bitfield_counter = 0
_bitfield_offset = 0
_bitfield_basetype_bitsize = 0
_bitfield_basetype = None
bit_offset = (_bitfield_counter - bit_size)
else:
if _bitfield_counter > 0:
raise SyntaxError("Incomplete bitfield definition!")
__field_type = _field_type
bit_size = None
if field_cnt == len(cls_annotations) - 1:
if _bitfield_counter > 0:
raise SyntaxError("Incomplete bitfield definition!")
field_type = PyembcFieldType(_type=__field_type, bit_size=bit_size, bit_offset=bit_offset)
# noinspection PyProtectedMember
if not field_type.is_ctypes_type:
raise TypeError(
f'Invalid type for field "{field_name}". Only ctypes types can be used!'
)
if target is _PyembcTarget.UNION:
# for unions, check if all sub-struct has the same endianness.
if field_type.is_structure:
if _first_endian is None:
_first_endian = _is_little_endian(field_type.base_type)
else:
_endian = _is_little_endian(field_type.base_type)
if _endian != _first_endian:
raise TypeError('Only the same endianness is supported in a Union!')
# save the field to our special attribute, and also for the ctypes _fields_ attribute
_fields[field_name] = field_type
if bit_size is None:
_ctypes_fields.append((field_name, field_type.base_type))
else:
_ctypes_fields.append((field_name, field_type.base_type, bit_size))
# set the ctypes special attributes, note, _pack_ must be set before _fields_!
setattr(cls, _CTYPES_PACK_ATTR, pack)
setattr(cls, _CTYPES_FIELDS_ATTR, _ctypes_fields)
# save the endianness to us, because union streaming/building will need this
setattr(cls, _ENDIAN, endian)
# Add the generated methods
# ---------------------------------------------------
# __init__
# ---------------------------------------------------
docstring = "init method for the class"
body = f"""
fields = getattr(self, '{_FIELDS}')
if args:
if kwargs:
raise TypeError('Either positional arguments, or keyword arguments must be given!')
if len(args) == len(fields):
for arg_val, field_name in zip(args, fields):
setattr(self, field_name, arg_val)
else:
raise TypeError('Invalid number of arguments!')
if kwargs:
if args:
raise TypeError('Either positional arguments, or keyword arguments must be given!')
if len(kwargs) == len(fields):
for field_name in fields:
try:
arg_val = kwargs[field_name]
except KeyError:
raise TypeError(f'Keyword argument {{field_name}} not specified!')
setattr(self, field_name, arg_val)
else:
raise TypeError('Invalid number of keyword arguments!')
"""
_add_method(
cls=cls,
name="__init__",
args=('self', '*args', '**kwargs',),
body=body,
docstring=docstring,
return_type=None
)
# ---------------------------------------------------
# __len__
# ---------------------------------------------------
docstring = "Gets the byte length of the structure/union"
body = f"""
return ctypes.sizeof(self)
"""
_add_method(
cls=cls,
name="__len__",
args=('self',),
body=body,
docstring=docstring,
return_type=int
)
# ---------------------------------------------------
# stream()
# ---------------------------------------------------
docstring = "gets the bytestream of the instance"
if issubclass(cls, ctypes.Union):
body = f"""
if cls.__pyembc_endian__ == sys.byteorder:
return bytes(self)
else:
_bytearray = bytearray(self)
_bytearray.reverse()
return bytes(_bytearray)
"""
else:
body = f"""
return bytes(self)
"""
_add_method(
cls=cls,
name="stream",
args=('self',),
body=body,
docstring=docstring,
return_type=bytes,
_globals={"sys": sys}
)
# ---------------------------------------------------
# parse()
# ---------------------------------------------------
docstring = "parses the instance values from a bytestream"
body = f"""
if not isinstance(stream, bytes):
raise TypeError("bytes required")
ctypes.memmove(ctypes.addressof(self), stream, len(stream))
"""
_add_method(
cls=cls,
name="parse",
args=("self", "stream"),
body=body,
docstring=docstring,
return_type=None
)
# ---------------------------------------------------
# ccode()
# ---------------------------------------------------
docstring = "Generates the c representation of the instance. Returns a list of the c code lines."
body = f"""
code = []
_typename = 'struct' if issubclass(cls, ctypes.Structure) else 'union'
code.append(f"typedef {{_typename}} _tag_{{cls.__name__}} {{{{")
print(' ')
for field_name, field_type in cls.{_FIELDS}.items():
_field = getattr(cls, field_name)
if _is_pyembc_type(field_type):
subcode = field_type.base_type.ccode()
code = subcode + code
code.append(f" {{field_type.base_type.__name__}} {{field_name}};")
else:
if field_type.is_bitfield:
code.append(f" {{_c_type_name(field_type)}} {{field_name}} : {{field_type.bit_size}};")
else:
code.append(f" {{_c_type_name(field_type)}} {{field_name}};")
code.append(f"}}}} {{cls.__name__}};")
return code
"""
_add_method(
cls=cls,
name="ccode",
args=("cls",),
body=body,
docstring=docstring,
return_type=Iterable[str],
class_method=True
)
# ---------------------------------------------------
# print_ccode()
# ---------------------------------------------------
docstring = "Generates the c representation of the instance and prints it to the stdout."
body = """
print('\\n'.join(cls.ccode()))
"""
_add_method(
cls=cls,
name="print_ccode",
args=("cls",),
body=body,
docstring=docstring,
return_type=None,
class_method=True
)
# ---------------------------------------------------
# __repr__
# ---------------------------------------------------
docstring = "repr method for the instance"
body = f"""
field_count = len(self.{_FIELDS})
s = f'{{cls.__name__}}('
for i, (field_name, field_type) in enumerate(self.{_FIELDS}.items()):
_field = getattr(self, field_name)
if _is_pyembc_type(field_type):
s += f'{{field_name}}={{repr(_field)}}'
else:
if field_type.is_bitfield:
bitfield_info = f"@{{field_type.bit_size}}"
else:
bitfield_info = ''
s += f'{{field_name}}:{{_short_type_name(field_type)}}{{bitfield_info}}={{_print_field_value(_field, field_type)}}'
if i < field_count - 1:
s += ', '
s += ')'
return s
"""
_add_method(
cls=cls,
name="__repr__",
args=('self',),
body=body,
docstring=docstring,
return_type=str
)
# ---------------------------------------------------
# __setattr__
# ---------------------------------------------------
docstring = "Attribute setter. Checks values."
body = f"""
field = self.__getattribute__(field_name)
field_type = self.{_FIELDS}[field_name]
if _is_pyembc_type(field_type):
if not isinstance(value, field_type):
raise TypeError(
f'invalid value for field "{{field_name}}"! Must be of type {{field_type}}!'
)
super(cls, self).__setattr__(field_name, value)
else:
_check_value_for_type(field_type, value)
if isinstance(value, ctypes._SimpleCData):
value = value.value
super(cls, self).__setattr__(field_name, value)
"""
_add_method(
cls=cls,
name="__setattr__",
args=('self', 'field_name', 'value',),
body=body,
docstring=docstring,
return_type=None
)
return cls
def pyembc_struct(_cls=None, *, endian=sys.byteorder, pack: int = 4):
"""
Magic decorator to create a user-friendly struct class
:param _cls: used for distinguishing between call modes (with or without parens)
:param endian: endianness. "little" or "big"
:param pack: packing of the fields.
:return:
"""
def wrap(cls):
return _generate_class(cls, _PyembcTarget.STRUCT, endian, pack)
if _cls is None:
# call with parens: @pyembc_struct(...)
return wrap
else:
# call without parens: @pyembc_struct
return wrap(_cls)
def pyembc_union(_cls=None, *, endian=sys.byteorder):
"""
Magic decorator to create a user-friendly union class
:param _cls: used for distinguishing between call modes (with or without parens)
:param endian: endianness. "little" or "big"
:return: decorated class
"""
if endian != sys.byteorder:
raise NotImplementedError(
f"{endian} endian byteorder is currently not supported for Unions."
f"This is because ctypes does not implement the BigEndianUnion and LittleEndianUnion despite its "
f"documentation says so. Details:"
f"https://stackoverflow.com/questions/49524952/bigendianunion-is-not-part-of-pythons-ctypes, "
f"https://bugs.python.org/issue33178"
)
def wrap(cls):
return _generate_class(cls, _PyembcTarget.UNION, endian)
if _cls is None:
# call with parens: @pyembc_struct(...)
return wrap
else:
# call without parens: @pyembc_struct
return wrap(_cls)
| true
|
954fe6d3523e3f7ca0a815a0770a6f1aeba61823
|
Python
|
wangyum/Anaconda
|
/lib/python2.7/site-packages/networkx/generators/tests/test_degree_seq.py
|
UTF-8
| 5,734
| 2.75
| 3
|
[
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
#!/usr/bin/env python
from nose.tools import *
import networkx
from networkx import *
from networkx.generators.degree_seq import *
from networkx.utils import uniform_sequence,powerlaw_sequence
def test_configuration_model_empty():
# empty graph has empty degree sequence
deg_seq=[]
G=configuration_model(deg_seq)
assert_equal(G.degree(), {})
def test_configuration_model():
deg_seq=[5,3,3,3,3,2,2,2,1,1,1]
G=configuration_model(deg_seq,seed=12345678)
assert_equal(sorted(G.degree().values(),reverse=True),
[5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1])
assert_equal(sorted(G.degree(range(len(deg_seq))).values(),
reverse=True),
[5, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1])
# test that fixed seed delivers the same graph
deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
G1=configuration_model(deg_seq,seed=1000)
G2=configuration_model(deg_seq,seed=1000)
assert_true(is_isomorphic(G1,G2))
G1=configuration_model(deg_seq,seed=10)
G2=configuration_model(deg_seq,seed=10)
assert_true(is_isomorphic(G1,G2))
@raises(NetworkXError)
def test_configuation_raise():
z=[5,3,3,3,3,2,2,2,1,1,1]
G = configuration_model(z, create_using=DiGraph())
@raises(NetworkXError)
def test_configuation_raise_odd():
z=[5,3,3,3,3,2,2,2,1,1]
G = configuration_model(z, create_using=DiGraph())
@raises(NetworkXError)
def test_directed_configuation_raise_unequal():
zin = [5,3,3,3,3,2,2,2,1,1]
zout = [5,3,3,3,3,2,2,2,1,2]
G = directed_configuration_model(zin, zout)
def test_directed_configuation_mode():
G = directed_configuration_model([],[],seed=0)
assert_equal(len(G),0)
def test_expected_degree_graph_empty():
# empty graph has empty degree sequence
deg_seq=[]
G=expected_degree_graph(deg_seq)
assert_equal(G.degree(), {})
def test_expected_degree_graph():
# test that fixed seed delivers the same graph
deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
G1=expected_degree_graph(deg_seq,seed=1000)
G2=expected_degree_graph(deg_seq,seed=1000)
assert_true(is_isomorphic(G1,G2))
G1=expected_degree_graph(deg_seq,seed=10)
G2=expected_degree_graph(deg_seq,seed=10)
assert_true(is_isomorphic(G1,G2))
def test_expected_degree_graph_selfloops():
deg_seq=[3,3,3,3,3,3,3,3,3,3,3,3]
G1=expected_degree_graph(deg_seq,seed=1000, selfloops=False)
G2=expected_degree_graph(deg_seq,seed=1000, selfloops=False)
assert_true(is_isomorphic(G1,G2))
def test_expected_degree_graph_skew():
deg_seq=[10,2,2,2,2]
G1=expected_degree_graph(deg_seq,seed=1000)
G2=expected_degree_graph(deg_seq,seed=1000)
assert_true(is_isomorphic(G1,G2))
def test_havel_hakimi_construction():
G = havel_hakimi_graph([])
assert_equal(len(G),0)
z=[1000,3,3,3,3,2,2,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=["A",3,3,3,3,2,2,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=[5,4,3,3,3,2,2,2]
G=havel_hakimi_graph(z)
G=configuration_model(z)
z=[6,5,4,4,2,1,1,1]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z)
z=[10,3,3,3,3,2,2,2,2,2,2]
G=havel_hakimi_graph(z)
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, z, create_using=DiGraph())
def test_directed_havel_hakimi():
# Test range of valid directed degree sequences
n, r = 100, 10
p = 1.0 / r
for i in range(r):
G1 = nx.erdos_renyi_graph(n,p*(i+1),None,True)
din = list(G1.in_degree().values())
dout = list(G1.out_degree().values())
G2 = nx.directed_havel_hakimi_graph(din, dout)
assert_true(din == list(G2.in_degree().values()))
assert_true(dout == list(G2.out_degree().values()))
# Test non-graphical sequence
dout = [1000,3,3,3,3,2,2,2,1,1,1]
din=[103,102,102,102,102,102,102,102,102,102]
assert_raises(nx.exception.NetworkXError,
nx.directed_havel_hakimi_graph, din, dout)
# Test valid sequences
dout=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
din=[2, 2, 2, 2, 2, 2, 2, 2, 0, 2]
G2 = nx.directed_havel_hakimi_graph(din, dout)
assert_true(din == list(G2.in_degree().values()))
assert_true(dout == list(G2.out_degree().values()))
# Test unequal sums
din=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
assert_raises(nx.exception.NetworkXError,
nx.directed_havel_hakimi_graph, din, dout)
# Test for negative values
din=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, -2]
assert_raises(nx.exception.NetworkXError,
nx.directed_havel_hakimi_graph, din, dout)
def test_degree_sequence_tree():
z=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
G=degree_sequence_tree(z)
assert_true(len(G.nodes())==len(z))
assert_true(len(G.edges())==sum(z)/2)
assert_raises(networkx.exception.NetworkXError,
degree_sequence_tree, z, create_using=DiGraph())
z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_raises(networkx.exception.NetworkXError,
degree_sequence_tree, z)
def test_random_degree_sequence_graph():
d=[1,2,2,3]
G = nx.random_degree_sequence_graph(d)
assert_equal(d, list(G.degree().values()))
def test_random_degree_sequence_graph_raise():
z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4]
assert_raises(networkx.exception.NetworkXUnfeasible,
random_degree_sequence_graph, z)
def test_random_degree_sequence_large():
G = nx.fast_gnp_random_graph(100,0.1)
d = G.degree().values()
G = nx.random_degree_sequence_graph(d, seed=0)
assert_equal(sorted(d), sorted(list(G.degree().values())))
| true
|
4386612ab017678dc3ef7880c2f3b2dbc7b4f2de
|
Python
|
BC-SECURITY/Empire
|
/empire/scripts/sync_starkiller.py
|
UTF-8
| 1,446
| 2.53125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
import logging
import subprocess
from pathlib import Path
from typing import Dict
log = logging.getLogger(__name__)
def sync_starkiller(empire_config):
"""
Syncs the starkiller directory with what is in the config.
Using dict access because this script should be able to run with minimal packages,
not just within empire server.
"""
starkiller_config = empire_config["starkiller"]
starkiller_dir = starkiller_config["directory"]
if not Path(starkiller_dir).exists():
log.info("Starkiller: directory not found. Cloning Starkiller")
_clone_starkiller(starkiller_config, starkiller_dir)
if starkiller_config.get("auto_update"):
log.info("Starkiller: Autoupdate on. Pulling latest ref.")
_fetch_checkout_pull(
starkiller_config["repo"], starkiller_config["ref"], starkiller_dir
)
def _clone_starkiller(starkiller_config: Dict, starkiller_dir: str):
subprocess.run(
["git", "clone", starkiller_config["repo"], starkiller_dir],
check=True,
)
def _fetch_checkout_pull(remote_repo, ref, cwd):
subprocess.run(
["git", "remote", "set-url", "origin", remote_repo],
cwd=cwd,
check=True,
)
subprocess.run(["git", "fetch"], cwd=cwd, check=True)
subprocess.run(
["git", "checkout", ref],
cwd=cwd,
check=True,
)
subprocess.run(["git", "pull", "origin", ref], cwd=cwd)
| true
|
c23fdb43ac4ea21d0ecb205eca9b837296535837
|
Python
|
sgt-nagisa/AtCoder
|
/ABC006D.py
|
UTF-8
| 337
| 3.203125
| 3
|
[] |
no_license
|
# ABC 006 D
from bisect import bisect_left
N = int(input())
A = [int(input()) for k in range(N)]
# Longest Increasing Subsequence
INF = float("inf")
L = [INF for k in range(N+2)]
L[0] = -INF
for k in range(N):
t = bisect_left(L,A[k])
L[t] = A[k]
for k in range(1,N+2):
if L[k] == INF:
print(N-k+1)
exit(0)
| true
|
52060db05a736251f12c76a34f1c7d0c6d12fcb2
|
Python
|
Mimik1/study
|
/pec/lab7/process_raw.py
|
UTF-8
| 1,055
| 2.625
| 3
|
[] |
no_license
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from usbtmc import UsbtmcDevice
import sys
PREAMBULE_BYTES = 11
DEVICE_PATH = sys.argv[1]
PARAMS = sys.stdin.read().splitlines()
RAW_DATA_PATH = 'data.raw'
COMMAND = 'wav:data?'
if __name__ == '__main__':
print('PARAMS: {0}'.format(PARAMS))
print('Obtaining data from device...')
with open(RAW_DATA_PATH, 'wb') as file:
dev = UsbtmcDevice(DEVICE_PATH)
file.write(dev.ask(COMMAND))
print('Processing data...')
with open(RAW_DATA_PATH, 'rb') as file:
file.seek(PREAMBULE_BYTES)
data = bytearray(file.read())
samples = [(x - float(PARAMS[0]) - float(PARAMS[1])) * float(PARAMS[2]) for x in data]
plt.plot(samples)
axes = plt.gca()
axes.get_xaxis().set_major_formatter(
FuncFormatter(lambda x, p: format(float(x * float(PARAMS[3]) / 100), ','))
)
axes.set_ylim([min(samples) - 50, max(samples) + 50])
plt.savefig('{0}.png'.format(RAW_DATA_PATH))
print('Plot saved to {0}.png'.format(RAW_DATA_PATH))
| true
|
25d2b9e50e85c95c6c1d1b4006cbb8f453c81786
|
Python
|
ryanbeales/logfiledurationprocessor
|
/processor.py
|
UTF-8
| 1,560
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
from collections import defaultdict, namedtuple
from glob import glob
from datetime import datetime
import re
filenames = r'*.log'
start_string = 'Start'
end_string = 'End'
uuid_pattern = r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
uuid_times = defaultdict()
def process_line(line):
linelist = line.split('|')
time = linelist[1].strip()
uuid = re.search(uuid_pattern, line)
if uuid:
identifer = '{uuid}'.format(uuid=uuid.group(0))
if start_string in line:
uuid_times[identifer] = {'start': time, 'end': None, 'duration': None}
return
if end_string in line:
if identifer not in uuid_times.keys():
# This uuid has not seen a start time, probably in
# a previous log file we don't have.
return
uuid_times[identifer]['end'] = time
duration = datetime.strptime(uuid_times[identifer]['end'], '%H:%M:%S.%f') - datetime.strptime(uuid_times[identifer]['start'], '%H:%M:%S.%f')
uuid_times[identifer]['duration'] = duration
def process_files(filenames):
for filename in glob(filenames):
with open(filename,'r') as file:
for line in file.readlines():
process_line(line)
if __name__ == '__main__':
# Process the log files:
process_files(filenames)
# Print out the duration of each uuid:
for uuid in uuid_times:
print uuid, uuid_times[uuid]['duration']
| true
|
f90b3acbfd92cc770a4dd58a33f1826141eb0e16
|
Python
|
rustylocks79/Figgie
|
/agent/pricers/custom_faded_pricer.py
|
UTF-8
| 6,314
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
from typing import Optional
import numpy as np
from agent.modular_agent import Pricer
from figgie import Figgie, Suit
class CustomFadedPricer(Pricer):
def get_bidding_price(self, figgie: Figgie, suit: Suit, utils: np.ndarray) -> Optional[int]:
market = figgie.markets[suit.value]
return self.get_bidding_price_internal(market.buying_price, utils[suit.value], market.last_price)
def get_bidding_price_internal(self, market_buying_price, util, last_price):
minimum = market_buying_price + 1 if market_buying_price is not None else 1
maximum = util
if last_price is not None and minimum < last_price < maximum:
maximum = last_price
assert minimum <= maximum
if market_buying_price is None:
if util <= 306.5:
if util <= 130.5:
if util <= 48.5:
return (maximum - minimum) // 4 + minimum
else:
return (maximum - minimum) // 8 + minimum
else:
if util <= 220.5:
return (maximum - minimum) // 12 + minimum
else:
return (maximum - minimum) // 12 + minimum
else:
if util <= 555.5:
if util <= 428.5:
return (maximum - minimum) // 16 + minimum
else:
return (maximum - minimum) // 16 + minimum
else:
if util <= 680.5:
return (maximum - minimum) // 32 + minimum
else:
return (maximum - minimum) // 32 + minimum
else:
if util <= 44.5:
if util <= 24.5:
if util <= 15.5:
return (maximum - minimum) // 4 + minimum
else:
return (maximum - minimum) // 4 + minimum
else:
if util <= 34.5:
return (maximum - minimum) // 4 + minimum
else:
return (maximum - minimum) // 4 + minimum
else:
if util <= 322.3:
if util <= 69.5:
return (maximum - minimum) // 4 + minimum
else:
return (maximum - minimum) // 4 + minimum
else:
if util < 579.5:
return (maximum - minimum) // 4 + minimum
else:
return (maximum - minimum) // 4 + minimum
def get_asking_price(self, figgie: Figgie, suit: Suit, utils: np.ndarray) -> Optional[int]:
market = figgie.markets[suit.value]
return self.get_asking_price_internal(market.selling_price, utils[suit.value], market.last_price)
def get_asking_price_internal(self, market_asking_price, util, last_price):
minimum = util
maximum = market_asking_price - 1 if market_asking_price else util * 2
if last_price is not None and minimum < last_price < maximum:
minimum = last_price
assert minimum <= maximum
if market_asking_price is None:
if util <= 316.5:
if util <= 54.5:
if util <= 26.5:
return maximum - (3 * (maximum - minimum)) // 4
else:
return maximum - (3 * (maximum - minimum)) // 4
else:
if util <= 156.5:
return maximum - (7 * (maximum - minimum)) // 8
else:
return maximum - (7 * (maximum - minimum)) // 8
else:
if util <= 570.5:
if util <= 445.5:
return maximum - (11 * (maximum - minimum)) // 12
else:
return maximum - (11 * (maximum - minimum)) // 12
else:
if util <= 693.5:
return maximum - (11 * (maximum - minimum)) // 16
else:
return maximum - (11 * (maximum - minimum)) // 16
else:
if market_asking_price <= 314.4:
if util <= 20.5:
if market_asking_price <= 58.5:
return maximum - (maximum - minimum) // 4
else:
return maximum - (maximum - minimum) // 4
else:
if util <= 53.5:
return maximum - (maximum - minimum) // 4
else:
return maximum - (maximum - minimum) // 4
else:
if market_asking_price <= 567.5:
if util <= 119.5:
return maximum - (maximum - minimum) // 4
else:
return maximum - (maximum - minimum) // 4
else:
if util <= 195:
return maximum - (maximum - minimum) // 4
else:
return maximum - (maximum - minimum) // 4
def get_at_price(self, figgie: Figgie, suit: Suit, utils: np.ndarray) -> Optional[tuple]:
market = figgie.markets[suit.value]
return self.get_at_price_internal(market.buying_price, market.selling_price, utils[suit.value], market.last_price)
def get_at_price_internal(self, market_bidding_price, market_asking_price, util, last_price) -> Optional[tuple]:
bidding_price = self.get_bidding_price_internal(market_bidding_price, util, last_price)
asking_price = self.get_asking_price_internal(market_asking_price, util, last_price)
if asking_price == bidding_price:
if bidding_price > market_bidding_price if market_bidding_price is not None else 1:
bidding_price -= 1
elif asking_price < market_asking_price if market_asking_price is not None else util * 2:
asking_price += 1
else:
assert False, 'Market chooser should not select action where min util and max equal '
return bidding_price, asking_price
| true
|
8c15d73da8753cabe93e311099007ee4aa764313
|
Python
|
debanjan611/Library-Management-System
|
/Python_prEdit.py
|
UTF-8
| 2,076
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
def edit():
import sqlite3
from tabulate import tabulate
con=sqlite3.connect('Library.sqlite')
cur=con.cursor()
print("\n\n********** PRINTING THE CURRENT LIBRARY DATABASE ***********\n\n")
cur.execute(" SELECT Book_Name, Author, Quantity, Book_id FROM Main ")
curr_lib=cur.fetchall()
print(tabulate(curr_lib,headers=['Book Name', 'Author', 'In Stock', 'Book ID'],tablefmt='psql'))
BID=int(input("Enter the Book ID for which the details are to be altered: "))
querySelect = "SELECT * FROM Main"
queryWhere = " WHERE Book_id = " + str(BID)
query = querySelect + queryWhere
cur.execute(query)
curr_lib=cur.fetchall()
print("\n")
print(tabulate(curr_lib,headers=['Book Name', 'Author', 'In Stock', 'Book ID'],tablefmt='psql'))
print("\n")
print("Enter which field you would like to Edit: \n")
print("1. Book Name")
print("2. Author")
print("3. Stock")
ch=int(input("\nEnter your choice from above options: "))
if(ch==1):
Bname=input("Enter the new Name of the Book: ")
sql_update_query = """UPDATE Main SET Book_Name = ? where Book_id = ?"""
data=(Bname,BID)
if(ch==2):
Bauth=input("Enter the new authour of the book: ")
sql_update_query = """UPDATE Main SET Author = ? where Book_id = ?"""
data = (Bauth, BID)
if(ch==3):
Bquant=int(input("Enter the new quantity of the Book: "))
sql_update_query = """UPDATE Main SET Quantity = ? where Book_id = ?"""
data=(Bquant,BID)
cur.execute(sql_update_query, data)
con.commit()
print("\n Record Updated successfully")
print("\n\n********* PRINTING THE UPDATED LIBRARY **********\n\n")
cur.execute(" SELECT Book_name, Quantity, Author, Book_id FROM Main ")
curr_lib=cur.fetchall()
print(tabulate(curr_lib,headers=['Book Name','In Stock','Author','Book ID'],tablefmt='psql'))
| true
|
8306f28381c1b9433b9273dc2101ad2acd9de478
|
Python
|
marlboromoo/basinboa
|
/basinboa/d20/level.py
|
UTF-8
| 1,772
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
"""
Level system
"""
import random
EXP_BASE = 10
EXP_STEP = 15
class Level(object):
"""docstring for Level"""
def __init__(self):
super(Level, self).__init__()
self.exp = 0
def increase(self):
"""docstring for increase"""
self.level += 1
def get(self):
"""docstring for get"""
level = 1
while self.exp >= self.require_exp(level):
level += 1
return level - 1 if level > 1 else 1
def set(self, level):
"""docstring for set"""
self.exp = self.require_exp(level)
def increase_exp(self, value):
"""docstring for fname"""
self.exp += value
def decrease_exp(self, value):
"""docstring for decrease_exp"""
self.exp -= value
def require_exp(self, level):
"""
level experience points
2 10
3 40
4 85
5 145
6 220
7 310
8 415
9 535
10 670
11 820
12 985
13 1165
14 1360
15 1570
16 1795
17 2035
18 2290
19 2560
20 2845
"""
if level == 1:
return 0
else:
exp = EXP_BASE
i = 2 #. because level 1 don't need exp
while i < level:
exp += EXP_STEP * i
i += 1
return exp
if __name__ == '__main__':
level = Level()
i = 1
while i <= 20:
print "%s: %s" % (i, level.require_exp(i))
i += 1
level.increase_exp(random.randint(1, 2845))
print "exp:%s = level:%s" % (level.exp, level.get())
| true
|
a9891ed84a3a7d1940f5933dcb61b7105b36262a
|
Python
|
KomissarovSV/Algorithms
|
/Map/task1.py
|
UTF-8
| 2,374
| 3.6875
| 4
|
[] |
no_license
|
class Node(object):
def __init__(self,key,value):
self.key = key
self.value = value
def getKey(self):
return self.key
def getValue(self):
return self.value
class MapIterator():
def __init__(self,mas):
self.mas = mas
self.current = 0
self.size = len(mas)
def __next__(self):
if self.current < self.size:
item = self.mas[self.current]
self.current += 1
if item is None:
return self.__next__()
else:
return item.getValue()
else:
raise StopIteration
class Map(object):
def __init__(self,size):
self.mas = [None]*size
self.size = size
self.count = 0
def put(self, key, value):
if self.count / self.size > 0.7:
self.resize()
hashcode = hash(key)
hashKey = hashcode % self.size
while self.mas[hashKey] is not None:
hashKey = (hashKey + 1) % self.size
self.mas[hashKey] = Node(key,value)
self.count += 1
def delete(self, key):
hashcode = hash(key)
hashKey = hashcode % self.size
while self.mas[hashKey] is not None:
if self.mas[hashKey].getKey() is key:
break
hashKey = (hashKey + 1) % self.size
self.mas[hashKey] = None
self.count -= 1
def get(self,key):
hashcode = hash(key)
hashKey = hashcode % self.size
while self.mas[hashKey] is not None:
if self.mas[hashKey].getKey() is key:
break
hashKey = (hashKey + 1) % self.size
if self.mas[hashKey] is None:
return None
else:
return self.mas[hashKey].getValue()
def __iter__(self):
return MapIterator(self.mas)
def resize(self):
newSize = self.size*2
newMas = [None]*newSize
for node in self.mas:
if node is not None:
hashkey = node.getKey() % newSize
newMas[hashkey] = node
self.mas = newMas
self.size = newSize
map = Map(1)
map.put(3,"string 3")
map.put(6,"string 6")
map.put(7,"string 7")
map.put(10,"string 10")
print(map.get(3))
print(map.get(6))
map.delete(7)
print(map.get(7))
print(map.get(10))
for item in map:
print(item)
| true
|
fd103e8213de34dbad57ae9b84185e75be660280
|
Python
|
AndreiKorzhun/CS50x
|
/pset6_Python/cash/cash.py
|
UTF-8
| 634
| 4.09375
| 4
|
[] |
no_license
|
from cs50 import get_float
from math import floor
def main():
# converting dollars to cents
cents = round(money() * 100)
# number of coins
number = 0
for i in [25, 10, 5, 1]:
# get the number of coins of each denomination
number += floor(cents / i)
# get the number of cents after deducting coins of each denomination
cents = cents % i
# print minimum number of coins
print(number)
# getting a positive number from the user
def money():
while True:
dollars = get_float("Change owed: ")
if dollars > 0:
break
return dollars
main()
| true
|
9e6ee55e9af00bd85696f7f4ce601ff73dccb609
|
Python
|
Rohankhadka33/practiceclass
|
/class(d2,p1).py
|
UTF-8
| 418
| 3.453125
| 3
|
[] |
no_license
|
''' The cost of the house is $1M. If the buyer has good credit, they need to put down 10% otherwise they need to put
down 20%. print the down payment.'''
cost_of_house= 1000000
credit= True
if(credit==True):
discount=10/100
cost_of_house= cost_of_house-cost_of_house*discount
print(cost_of_house)
else:
discount= 20/100
cost_of_house= cost_of_house-cost_of_house*discount
print(cost_of_house)
| true
|
d6c57e2b0e9d3ae3fec291953f6e3c5dbf2a0c89
|
Python
|
Shimpa11/PythonTutorial
|
/linkedlist.py
|
UTF-8
| 1,658
| 4.21875
| 4
|
[] |
no_license
|
"""
1.Think of an object
song:title,artist,duration
"""
# 2. create its class
class Song:
def __init__(self,title,artist,duration):
self.title=title
self.artist = artist
self.duration=duration
nextSong=None
previousSong=None
def showSong(self):
print("!!!{}!!! {}!!!{}!!!".format(self.title,self.artist,self.duration))
# from the class create real objects
song1=Song("Duniyaa", "Akhil", 2.56)
song2=Song("Pachtaoge" , "Arijit Singh", 2.56)
song3=Song("Tera Ban Jaunga" ,"Kabir Singh", "2:56")
song4=Song("Vaaste" , "Dhvani", "2:56")
song5=Song("Tum Hi Aana" , "Payal Dev ","2:56")
# print("song1 is:",song1)
# print("song2 is:",song2)
# print("song3 is:",song3)
# print("song4 is:",song4)
# print("song5 is:",song5)
songs=[song1,song2,song3,song4,song5]
for song in songs:
print(song)
# reference copy operation
song1.nextSong=song2
song2.nextSong=song3
song3.nextSong=song4
song4.nextSong=song5
song5.nextSong=song1
song1.previousSong=song5
song2.previousSong=song1
song3.previousSong=song2
song4.previousSong=song3
song5.previousSong=song4
# song1.showSong()
# song1.nextSong.showSong()
# song1.previousSong.showSong()
print("Iterating Forward")
temp=song1
while temp.nextSong!=song1:
print("------------")
temp.showSong()
temp=temp.nextSong
print("--------------------")
# showing the last song
temp.showSong()
print("-----------------------")
print("Iterating backward")
current=song5
while current.previousSong!=song5:
print("-------------")
current.showSong()
current=current.previousSong
print("--------------")
current.showSong()
print("--------------")
| true
|
866f891befdaa67b4096bdc4a6e64a8bf315d2d6
|
Python
|
mlrequest/sklearn-json
|
/test/test_classification.py
|
UTF-8
| 7,605
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
from sklearn.datasets import make_classification
from sklearn.feature_extraction import FeatureHasher
from sklearn import svm, discriminant_analysis
from sklearn.linear_model import LogisticRegression, Perceptron
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import BernoulliNB, GaussianNB, MultinomialNB, ComplementNB
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
import unittest
import random
import numpy as np
from numpy import testing
import sklearn_json as skljson
class TestAPI(unittest.TestCase):
def setUp(self):
self.X, self.y = make_classification(n_samples=50, n_features=3, n_classes=3, n_informative=3, n_redundant=0, random_state=0, shuffle=False)
feature_hasher = FeatureHasher(n_features=3)
features = []
for i in range(0, 100):
features.append({'a': random.randint(0, 2), 'b': random.randint(3, 5), 'c': random.randint(6, 8)})
self.y_sparse = [random.randint(0, 2) for i in range(0, 100)]
self.X_sparse = feature_hasher.transform(features)
def check_model(self, model, abs=False):
# Given
if abs:
model.fit(np.absolute(self.X), self.y)
else:
model.fit(self.X, self.y)
# When
serialized_model = skljson.to_dict(model)
deserialized_model = skljson.from_dict(serialized_model)
# Then
expected_predictions = model.predict(self.X)
actual_predictions = deserialized_model.predict(self.X)
testing.assert_array_equal(expected_predictions, actual_predictions)
def check_sparse_model(self, model, abs=False):
# Given
if abs:
model.fit(np.absolute(self.X_sparse), self.y_sparse)
else:
model.fit(self.X_sparse, self.y_sparse)
# When
serialized_model = skljson.to_dict(model)
deserialized_model = skljson.from_dict(serialized_model)
# Then
expected_predictions = model.predict(self.X)
actual_predictions = deserialized_model.predict(self.X)
testing.assert_array_equal(expected_predictions, actual_predictions)
def check_model_json(self, model, model_name, abs=False):
# Given
if abs:
model.fit(np.absolute(self.X), self.y)
else:
model.fit(self.X, self.y)
# When
serialized_model = skljson.to_json(model, model_name)
deserialized_model = skljson.from_json(model_name)
# Then
expected_predictions = model.predict(self.X)
actual_predictions = deserialized_model.predict(self.X)
testing.assert_array_equal(expected_predictions, actual_predictions)
def check_sparse_model_json(self, model, model_name, abs=False):
# Given
if abs:
model.fit(np.absolute(self.X_sparse), self.y_sparse)
else:
model.fit(self.X_sparse, self.y_sparse)
# When
serialized_model = skljson.to_json(model, model_name)
deserialized_model = skljson.from_json(model_name)
# Then
expected_predictions = model.predict(self.X)
actual_predictions = deserialized_model.predict(self.X)
testing.assert_array_equal(expected_predictions, actual_predictions)
def test_bernoulli_nb(self):
self.check_model(BernoulliNB())
self.check_sparse_model(BernoulliNB())
model_name = 'bernoulli-nb.json'
self.check_model_json(BernoulliNB(), model_name)
self.check_sparse_model_json(BernoulliNB(), model_name)
def test_guassian_nb(self):
self.check_model(GaussianNB())
model_name = 'gaussian-nb.json'
self.check_model_json(GaussianNB(), model_name)
# No sklearn implementation for sparse matrix
def test_multinomial_nb(self):
self.check_model(MultinomialNB(), abs=True)
self.check_sparse_model(MultinomialNB(), abs=True)
model_name = 'multinomial-nb.json'
self.check_model_json(MultinomialNB(), model_name, abs=True)
self.check_sparse_model_json(MultinomialNB(), model_name, abs=True)
def test_complement_nb(self):
self.check_model(ComplementNB(), abs=True)
model_name = 'complement-nb.json'
self.check_model_json(ComplementNB(), model_name, abs=True)
# No sklearn implementation for sparse matrix
def test_logistic_regression(self):
self.check_model(LogisticRegression())
self.check_sparse_model(LogisticRegression())
model_name = 'lr.json'
self.check_model_json(LogisticRegression(), model_name)
self.check_sparse_model_json(LogisticRegression(), model_name)
def test_lda(self):
self.check_model(discriminant_analysis.LinearDiscriminantAnalysis())
model_name = 'lda.json'
self.check_model_json(discriminant_analysis.LinearDiscriminantAnalysis(), model_name)
# No sklearn implementation for sparse matrix
def test_qda(self):
self.check_model(discriminant_analysis.QuadraticDiscriminantAnalysis())
model_name = 'qda.json'
self.check_model_json(discriminant_analysis.QuadraticDiscriminantAnalysis(), model_name)
# No sklearn implementation for sparse matrix
def test_svm(self):
self.check_model(svm.SVC(gamma=0.001, C=100., kernel='linear'))
self.check_sparse_model(svm.SVC(gamma=0.001, C=100., kernel='linear'))
model_name = 'svm.json'
self.check_model_json(svm.SVC(), model_name)
self.check_sparse_model_json(svm.SVC(), model_name)
def test_decision_tree(self):
self.check_model(DecisionTreeClassifier())
self.check_sparse_model(DecisionTreeClassifier())
model_name = 'dt.json'
self.check_model_json(DecisionTreeClassifier(), model_name)
self.check_sparse_model_json(DecisionTreeClassifier(), model_name)
def test_gradient_boosting(self):
self.check_model(GradientBoostingClassifier(n_estimators=25, learning_rate=1.0))
self.check_sparse_model(GradientBoostingClassifier(n_estimators=25, learning_rate=1.0))
model_name = 'gb.json'
self.check_model_json(GradientBoostingClassifier(), model_name)
self.check_sparse_model_json(GradientBoostingClassifier(), model_name)
def test_random_forest(self):
self.check_model(RandomForestClassifier(n_estimators=10, max_depth=5, random_state=0))
self.check_sparse_model(RandomForestClassifier(n_estimators=10, max_depth=5, random_state=0))
model_name = 'rf.json'
self.check_model_json(RandomForestClassifier(), model_name)
self.check_sparse_model_json(RandomForestClassifier(), model_name)
def test_perceptron(self):
self.check_model(Perceptron())
self.check_sparse_model(Perceptron())
model_name = 'perceptron.json'
self.check_model_json(Perceptron(), model_name)
self.check_sparse_model_json(Perceptron(), model_name)
def test_mlp(self):
self.check_model(MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1))
self.check_sparse_model(MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1))
model_name = 'mlp.json'
self.check_model_json(MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1), model_name)
self.check_sparse_model_json(MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1), model_name)
| true
|
88d9f2f8f4f6123c571c7ce43b4a1f7c4ebde0c0
|
Python
|
rafalacerda1530/Python
|
/atividades/Seção 8/Atividade 2.py
|
UTF-8
| 637
| 4.09375
| 4
|
[] |
no_license
|
"""
Uma função que recebe a data atual (dia, mês, ano) e exiba na tela
em formato de texto
"""
meses = {1: 'Janeiro', 2: 'Fevereiro', 3: 'Março,', 4: 'Abril', 5: 'Maio',
6: 'Junho', 7: 'Julho', 8: 'Agosto', 9: 'Setembro', 10: 'Outubro',
11: 'Novembro', 12: 'Dezembro'}
def data(di, me, an):
global meses
if me in meses:
return f'{di} de {meses.get(me)} de {an}'
# Nessa chave estou pesquisando no dicionario meses na parte de chave o que foi digitado em "me"
d = input('Digite o dia: ')
m = int(input('Digite o mês: '))
a = int(input('Digite o ano: '))
print(data(di=d, me=m, an=a))
| true
|
33d7b59891ebbb3da0d110b90840409b31856c5e
|
Python
|
marina90/Distributed-System-Programming-project
|
/vectors_pair_mapper.py
|
UTF-8
| 961
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/python
import sys
import ast
data=[]
def main():
with open ("input3.txt") as file:
for line in file:
if not line == "" and not line == '\t\n' and not line == "\n" and not line == " " :
line=line.strip()
data.append(line.split('\t'))
mapper()
def mapper ():
for line in sys.stdin:
if not line == "" and not line == '\t\n' and not line == "\n" and not line == " ":
split_line = line.split('\t')
word_value = split_line[0]
words_vector = split_line[1]
words_vector = ast.literal_eval(words_vector)
for i in range(4):
words_vector[i].sort()
for item in data:
if (item[0]==word_value or item[1]==word_value):
result = [item[0], item[1], item[2].strip(), str(words_vector)]
print '\t'.join(result)
if __name__ == '__main__':
main()
| true
|
fe546601da6402694b4ae780b766d51bd6510d70
|
Python
|
Nordenbox/Nordenbox_Python_Fundmental
|
/ListenAndSpell.py
|
UTF-8
| 1,533
| 3.453125
| 3
|
[] |
no_license
|
""" This is a progame for practice litsen and spell for my daughter"""
import os
import random
import subprocess
file_path = (
r"//Users/nordenbox/Documents/GitHub/NordenboxPython/Nordenbox_Python_Fundmental/Speech_US"
) # read mp3 files from my file folder to ready pronunce
file_list = [] # build a empty list to standby
for i in os.listdir(file_path): # using os module to traverse the mp3 files in the certain path
file_list.append(os.path.join(file_path, i)) # creat a list of multipile mp3 paths
random.shuffle(file_list) # randoms the orders of list for repeatively use
print("---------\n我们开始吧!\n--------------")
print("########### LITSEN & SPELL #############")
for items in file_list: # traverse the list that randomed order, items = specific path name
subprocess.call(["afplay", items]) # for each files recalls os subprocess module to apply afplay to play mp3 files
A = True # set the condition of While loop
while A: # if condition is True loop runs
wait_word = input("输入听到的单词:\n") # variables as inputed words
if wait_word != os.path.basename(os.path.splitext(items)[0]):
# if word inputed not equal to the specific file name( by using os module function to obtain basename)
print("no!!! 再来一遍!")
subprocess.call(["afplay", items]) # sounds again
else:
print("Yes! 下一个")
A = False # jump out of loop
print("-----------\nOK! We Finished\n-------------")
| true
|
4e002114ea420b2980ee9f0b815144126173c2c0
|
Python
|
bunnymonster/personal-code-bits
|
/python/learningExercises/ControlFlowLoops.py
|
UTF-8
| 1,795
| 4.3125
| 4
|
[] |
no_license
|
#
#For Loops
#
#for loops iterate over the set of items in a given list in order.
words = ['fox','flop','number','hungry','listing']
for w in words:
print(w,len(w))
#To modify a sequence while iterating, first make a copy using slice
for w in words[:]: #loop over slice copy of entire list
if len(w) > 6:
words.insert(0,w) #insert words with len > 6 in front of list.
print(words)
#
#Range functions
#
#basic range function
for i in range(5):
print(i)
print(range(5,10)) #range(a,b) is a inclusive, b exclusive
print(range(0,10,3)) #3rd argument allows increment size
print(range(-10,-100,-30)) #range allows negative increment sizes
#iterating over indices of sequence, combine range and len
a = ['Mar','Bez','lop','Kinlin','blarg']
for i in range(len(a)):
#0 Mar
#1 Bez
#2 lop
#3 Kinlin
#4 blarg
print(i,a[i])
#using 'print(range(10))' prints 'range(10)'
print(range(10))
list(range(5)) # iterate over range(5)
#
# Break, Continue, Pass
#
#break breaks out of the innermost loop
for n in range(2,25):
for x in range(2,n):
if n % x == 0:
print(n, 'equals', x, '*', n//x)
break
else: #This is correct. This else belongs to the for loop,
#not the if statement
#loop fell through without finding a factor
print(n, 'is a prime number')
#continue forwards the code to the next iteration of the loop
for num in range(2,25):
if num % 2 == 0:
print("Found an even number", num)
continue
print("Found a number", num)
#
#Pass
#
while True:
pass #busy wait for ctrl-c
#pass does nothing.
#It is used when a statement is required but no action is needed.
#This can be used as a placeholder for functions that yet need to be implemented.
| true
|
28331afc437a6b63c8210e46934c6658db167e54
|
Python
|
TomAndy/python_qa
|
/lesson4/task2.py
|
UTF-8
| 345
| 2.53125
| 3
|
[] |
no_license
|
import json
from pprint import pprint
filename='bugs.json'
new_filename='bugs_new.json'
def main():
with open(filename) as data_file:
data = json.load(data_file)
for aaa in data:
aaa['Owner']='qa5'
with open(new_filename,'w') as new_f:
new_f.write(json.dumps(data))
if __name__ == '__main__':
main()
| true
|