blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
16b60954ed5364a8f89003a9df00db7e15d58325 | Python | AshwinCS/Game | /modules/button.py | UTF-8 | 4,725 | 3.140625 | 3 | [] | no_license | """Module for the button class and the button set."""
import logging
import pygame as pg
from . import screen as sc
class ButtonSet(object):
"""Class to interact with a set of buttons at the same time."""
def __init__(self, buttons):
"""Set instance variables."""
self.buttons = buttons
self.highlighted_id = None
self.active = True
self.next_id = 0
for button in self.buttons:
button.id = self.next_id
self.next_id += 1
def __iter__(self):
"""Yield each button."""
for button in self.buttons:
yield button
@property
def highlighted_button(self):
"""Return highlighted button."""
if self.highlighted_id is not None:
return self.buttons[self.highlighted_id]
else:
return self
def toggle(self):
"""Toggle button states."""
self.active = not self.active
for button in self.buttons:
button.active = self.active
def scale(self, multiplier):
"""Scale all buttons in the set."""
for button in self.buttons:
button.scale(multiplier)
def draw(self, scroll):
"""Draw active buttons."""
for button in [b for b in self.buttons if b.active]:
if button is self.highlighted_button:
button.draw(scroll, highlighted=True)
else:
button.draw(scroll)
def press(self):
"""Press button that check returns or reset highlighted id."""
button = self.check()
if button is not None:
self.buttons[button].press()
else:
self.highlighted_id = None
def highlight(self):
"""Highlight the button under the cursor."""
button = self.check()
if button is not None:
self.highlighted_id = button
def check(self):
"""Return button that collides with mouse position."""
for button in [b for b in self.buttons if b.active]:
if button.check(pg.mouse.get_pos()):
return button.id
def clear(self):
"""Clear buttons from the screen."""
for button in self.buttons:
button.clear()
class Button(object):
"""Class for buttons."""
def __init__(self, rect, text, strategy):
"""Set instance variables."""
self.text_color = (1, 1, 1)
self.bg_color = (255, 255, 255)
self.highlight_color = (0, 0, 255)
self.rect = rect
self.text = text
self.press = strategy
# Pos is set in the draw function.
self.pos = (-1000, -1000)
self.id = 0
self.active = True
self.font = None
self.surf = None
self.render()
def render(self):
"""Set font and render button."""
font_size = int(max(self.rect.width / 6, self.rect.height / 3))
self.font = pg.font.Font(None, font_size)
text_surf = self.font.render(
self.text, 0, self.text_color, self.bg_color)
text_pos = (self.rect.width/2 - text_surf.get_width()/2,
self.rect.height/2 - text_surf.get_height()/2)
self.surf = pg.Surface(self.rect.size)
self.surf.fill(self.bg_color)
#self.surf.set_colorkey(self.bg_color)
self.surf.blit(text_surf, text_pos)
pg.draw.rect(self.surf, self.highlight_color,
pg.Rect((0, 0), self.rect.size), 1)
def scale(self, multiplier):
"""Scale button using the 'multiplier' value."""
self.rect.x *= multiplier
self.rect.y *= multiplier
self.rect.width *= multiplier
self.rect.height *= multiplier
self.render()
def draw(self, scroll, highlighted=False):
"""
Draw 'self.surf' to 'self.dest'.
If 'highlighted' is true a blue border is drawn.
"""
self.pos = (self.rect.x, self.rect.y + scroll)
blit_rect = pg.Rect(self.pos, self.surf.get_size())
sc.draw_queue.append(dict(layer=10, surf=self.surf, pos=self.pos))
if highlighted:
sc.draw_queue.append(dict(
layer=11, func=pg.draw.rect,
args=(sc.screen, self.highlight_color, self.rect, 2)))
def clear(self):
"""Clear the area of 'self.rect' from 'self.dest'."""
clear_rect = pg.Rect(
self.pos, (self.rect.width + 2, self.rect.height + 2))
sc.draw_queue.append(dict(
layer=2, func=pg.draw.rect,
args=(sc.screen, self.bg_color, clear_rect)))
def check(self, pos):
"""Check if pos overlaps 'self.rect'. Return bool."""
return self.rect.collidepoint(pos)
| true |
8b43662b1e26a5cab30e732e15d718f11978ba2b | Python | NimraSadaqat/events_calendar | /calender_app/models.py | UTF-8 | 841 | 2.609375 | 3 | [] | no_license | from djongo import models
# Create your models here.
class Event(models.Model):
title = models.CharField(max_length=200)
# description = models.TextField()
start_time = models.DateField()
year = models.CharField(max_length=5, blank=True)
month = models.CharField(max_length=5, blank=True)
day = models.CharField(max_length=5, blank=True)
def __str__(self):
return f"ID={self.id} Title={self.title}"
def save(self, *args, **kwargs):
# print("time=",self.start_time.strftime('%Y'),"year=",self.year)
if not self.year:
self.year = self.start_time.strftime('%Y')
if not self.month:
self.month = self.start_time.strftime('%m')
if not self.day:
self.day = self.start_time.strftime('%d')
super(Event, self).save(*args, **kwargs)
| true |
4c727349ce73e8509bd6816d21e5df3a79b912e5 | Python | nsabine/ose_scripts | /docker_list_images.py | UTF-8 | 553 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
import json
def get_jsonparsed_data(url):
"""Receive the content of ``url``, parse it as JSON and return the
object.
"""
response = urlopen(url)
data = str(response.read())
return json.loads(data)
url = 'https://registry.access.redhat.com/v1/search?q=*'
data = get_jsonparsed_data(url)
for x in data['results']:
print(x['name'])
| true |
647c5180eb8a58114f7293f24d12940d766518a7 | Python | Dmitry-15/10_laba | /Zadaniy/zadanie2.py | UTF-8 | 750 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def cylinder():
def circle():
return math.pi * r ** 2
r = float(input("Введите радиус: "))
h = float(input("Введите высоту: "))
answer = input("Хотите получить 1) площадь боковой поверхности цилиндра,"
" или 2) полную площадь цилиндра? - ")
side = 2 * math.pi * r * h
if answer == "1":
print(f"Площадь боковой поверхности: {side}")
else:
s_circle = circle()
full = side + 2 * s_circle
print(f"Полная площадь: {full}")
if __name__ == '__main__':
cylinder()
| true |
afdebff1ebcc5aa8b3302ad9c1a32ee7c5283dc7 | Python | openGDA/gda-diamond | /configurations/i20-config/scripts/xes/setOffsets.py | UTF-8 | 2,460 | 2.5625 | 3 | [] | no_license | from BeamlineParameters import JythonNameSpaceMapping
#
# This script will change the offsets for the motors in the spectrometer, based on supplied values from the user.
#
def setFromExpectedValues(expectedValuesDict):
"""
Using the supplied dictionary of expected motor positions, this calculates the required offsets and sets them on the GDA Scannables.
"""
_checkDictNames(expectedValuesDict)
jython_mapper = JythonNameSpaceMapping()
spectrometer = jython_mapper.spectrometer
offsetsDict = {}
for name in expectedValuesDict.keys():
expected = expectedValuesDict[name]
print "\t %s %f" % (name,expected)
newOffset = _calcOffset(name,expected)
offsetsDict[name] = newOffset
print offsetsDict
_setFromDict(offsetsDict)
from xes import offsetsStore
offsetsStore.write()
def _setFromDict(offsetsDict):
"""
Sets the supplied dictionary of offsets to the GDA Scannables.
The optional second argument is a boolean, if true this will store the new offsets to the default xml store of offsets as well.
"""
_checkDictNames(offsetsDict)
jython_mapper = JythonNameSpaceMapping()
spectrometer = jython_mapper.spectrometer
print "Setting the spectrometer offsets:"
for name in offsetsDict.keys():
offset = offsetsDict[name]
print "\t %20s offset -> %.9f" % (name,offset)
spectrometer.getGroupMember(name).setOffset([offset])
def _checkDictNames(valuesDict):
jython_mapper = JythonNameSpaceMapping()
spectrometer = jython_mapper.spectrometer
for name in valuesDict.keys():
scannable = spectrometer.getGroupMember(name)
if scannable == None:
message = "scannable " + name +" could not be found. Will not apply offsets"
raise ValueError(message)
def _calcOffset(name,expectedReadback):
jython_mapper = JythonNameSpaceMapping()
spectrometer = jython_mapper.spectrometer
scannable = spectrometer.getGroupMember(name)
if scannable == None:
raise ValueError("scannable '{}' could not be found. Will not apply offsets".format(name))
readback = scannable()
currentOffset = scannable.getOffset()
if currentOffset == None:
currentOffset = [0]
currentOffset = currentOffset[0]
newOffset = expectedReadback - (readback - currentOffset)
return newOffset
| true |
e1b80ed0979e41efd603abb1e7b073bb7f6ee16d | Python | H-H2648/Deep-Dream | /VGG.py | UTF-8 | 968 | 2.65625 | 3 | [] | no_license | from collections import namedtuple
import torch
import torch.nn as nn
from torchvision import models
#These corresponds to the layer conv1_1, conv2_1, conv3_1, conv4_1, conv5_1
focusConv = ['0', '5', '10', '19', '28']
#use gpu
device = torch.device("cuda:0" if torch.cuda.is_available else "cpu")
class VGGModel(nn.Module):
def __init__(self):
super(VGGModel, self).__init__()
self.focusConv = focusConv
#we go up to 29 because we don't actually need the linear section (no need for actual prediction)
self.model= (models.vgg19(pretrained=True).features[:29]).to(device)
def forward(self, x):
features = []
#go up to the final convolutional layer
for layer_num, layer in enumerate(self.model):
x = layer(x)
#store the reuslt of output of the desired convolutional layer
if str(layer_num) in self.focusConv:
features.append(x)
return features | true |
698c7984b18132f442885026bfddbe79c757c0d9 | Python | acaciooneto/cursoemvideo | /aula-15-pratica.py | UTF-8 | 484 | 3.875 | 4 | [] | no_license | cont = soma = 0
while cont <= 10:
print(cont, '-> ', end='')
cont += 1
print('Acabou')
while True:
num = int(input('Digite um número: '))
if num == 0:
break
soma += num
#print('A soma vale {}.'.format(soma))
print(f'A soma vale {soma}') #f'string, veio depois do python 3.6 e substitui o .format
nome = 'josé'
idade = 33
salário = 1300
print(f'O {nome} tem {idade} anos e ganha R${salário} por mês, o que dá cerca de R${salário/30:.2f} por dia.')
| true |
de1cfee24a8a79f5249d8b2b79dbb94375e7838b | Python | ESA-PhiLab/hypernet | /beetles/scripts/multispectral/run_multispectral_experiments.py | UTF-8 | 3,466 | 2.8125 | 3 | [
"MIT"
] | permissive | import clize
import pandas as pd
from ml_intuition.data.io import save_ml_report
from scripts.multispectral.train_classifier import train_and_eval_classifier
from scripts.multispectral.train_regression import train_and_eval_regression
def run_experiments(*,
dataframe_path: str,
label_name: str,
output_dir_path: str,
model_name: str,
train_fraction: float,
seed: int,
verbose: int,
n_jobs: int) -> None:
"""
Function for running experiments on multispectral datasets.
:param dataframe_path: Dataframe containing all samples as a design matrix.
The rows indicate observations, whereas the columns
indicate explanatory variables. One of those columns is the
target or dependent variable which the model learns.
The target variable for the classification should be in the raw nominal
form (simply put as strings, as the one-hot-encoding is done already
in the pipeline on this very column), whereas for the regression problem,
the numerical form is required (standard regression target).
:param label_name: The name of the dependent variable in the dataframe.
All other columns are assumed to serve as input features and the
regression and classification models are build on top of them.
In other words, the dataset should only consist of one dependent
variable column, and other features i.e., explanatory variables.
:param output_dir_path: Path to the destination output directory.
:param model_name: Type of the model used for the experiments.
For the classification task, the name should end with "_clf" suffix,
whereas for the regression problem it is simply "_reg".
For example to employ the decision tree classifier one
should specify the model_name as a "decision_tree_clf".
:param train_fraction: Fraction of the samples employed
for training the models. For classification problem,
the division is stratified to preserve the original
distribution of classes. For the regression task, the data sampling
is not stratified, and random subsets are generated.
:param seed: Seed used for the experiments reproduction.
:param verbose: Verbosity mode.
:param n_jobs: Number of jobs utilized for the parallel computing.
:return: None.
"""
dataframe = pd.read_csv(dataframe_path)
if model_name.split('_')[-1] == 'reg':
test_report, best_params = train_and_eval_regression(
dataframe=dataframe,
label_name=label_name,
train_fraction=train_fraction,
model_name=model_name,
seed=seed,
verbose=verbose,
n_jobs=n_jobs)
else:
test_report, best_params = train_and_eval_classifier(
dataframe=dataframe,
label_name=label_name,
train_fraction=train_fraction,
model_name=model_name,
seed=seed,
verbose=verbose,
n_jobs=n_jobs)
save_ml_report(output_dir_path=output_dir_path,
model_name=model_name,
test_report=test_report,
best_params=best_params,
train_fraction=train_fraction)
if __name__ == '__main__':
clize.run(run_experiments)
| true |
30930b547b83c5cc4f118e0f1977fad0946f03d2 | Python | Ihyatt/fandor_challenge | /server.py | UTF-8 | 2,611 | 2.875 | 3 | [] | no_license | """Fandor Challenge"""
import os
from jinja2 import StrictUndefined
import psycopg2
from model import Movie, Ratings, connect_to_db, db
from flask import Flask, render_template, redirect, request, flash, session, jsonify
from flask_debugtoolbar import DebugToolbarExtension
from operator import attrgetter
import operator
app = Flask(__name__)
app.secret_key = "ABC"
app.jinja_env.undefined = StrictUndefined
@app.route('/')
def main_page():
"""Main page where movies will be displayed"""
movies = Movie.query.all()
preview = len(movies) /2
popular = []
new_releases = sorted(movies, key=attrgetter('year'))
old_school = new_releases[:preview]
new_releases = new_releases[::-1][:preview]
for movie in movies:
popular.append([movie, movie.rating_count()])
popular = sorted(popular, key=operator.itemgetter(1))[::-1][:preview]
return render_template("home.html", movies=movies, popular=popular, new_releases=new_releases, old_school=old_school)
@app.route("/add-vote.json", methods=['POST'])
def rate_movie():
"""Rate movie"""
voted_item = request.form.get("voted_item")
movie_id = request.form.get("movie_id")
movie = Movie.query.get(int(movie_id))
vote_added = None
if voted_item == "up":
vote_added = Ratings(movie_id=int(movie_id), up_vote=True, down_vote=False)
else:
vote_added = Ratings(movie_id=int(movie_id), up_vote=False, down_vote=True)
db.session.add(vote_added)
db.session.commit()
result = {'vote': movie.rating_count(), "movie_id": movie_id}
return jsonify(result)
@app.route('/search')
def search_page():
"""Search page where user queries for specific movies"""
return render_template("search.html")
@app.route('/movies_search.json',methods=['GET'])
def return_search():
query = {}
search_list = []
search = request.args.get("search_item")
splitted_search = search.split(" ")
for word in splitted_search:
search_match_title = Movie.query.filter(Movie.title.like('%' + word + '%') ).all()
search_match_description = Movie.query.filter(Movie.description.like('%' + word + '%') ).all()
search_list.extend(search_match_title)
search_list.extend(search_match_description)
for match in search_list:
query[match.movie_id] = [match.title, match.description, match.year]
return jsonify(query)
if __name__ == "__main__":
# We have to set debug=True here, since it has to be True at the point
# that we invoke the DebugToolbarExtension
# Do not debug for demo
app.debug = False
connect_to_db(app)
# Use the DebugToolbar
DebugToolbarExtension(app)
app.run() | true |
d07090a1fd16de9966221b178fe9d6ada3536198 | Python | KardeslerSporSalonuUygulamasi/SporSalonu | /PythonScripts/main.py | UTF-8 | 1,057 | 3.171875 | 3 | [
"BSL-1.0"
] | permissive | import sys
class Uyeler:
salonAdi="Kardeşler Spor Salonu"
def __init__(self, Id, adSoyad, yas, kilo, dogumTarihi):
self.Id = Id
self.adSoyad = adSoyad
self.yas = yas
self.kilo = kilo
self.dogumTarihi = dogumTarihi
def yazma(self):
liste = [str(self.Id),"\t", self.adSoyad,"\t", self.yas,"\t", self.kilo,"\t", str(self.dogumTarihi)]
with open("uyeTablo.txt", 'w', encoding = 'utf-8') as dosya:
#dosya.write("#")
dosya.writelines(liste)
#dosya.writeline(self.id, self.adSoyad, self.yas, self.kilo, self.dogumTarihi)
def uyeBilgileri(self):
print("Salon Adı: ",uye.salonAdi,"\nTC :",self.Id,
"\nİsim Soyisim :",self.adSoyad,
"\nYaş : ",self.yas,
"\nKilo : ",self.kilo,
"\nDogum Tarihi: ",self.dogumTarihi)
#def uyeKaydet(self):
#arada # işareti var değerleri alırken kullanacağımız!
uye=Uyeler("1","Yasin Işıktaş","24","70","07.01.1997")
uye.yazma()
for arg in sys.argv:
print(arg)
| true |
31ad1ea1fc59feee866627878bc151894583a79b | Python | cl-conway/AGN-code | /make_sinusoid_data.py | UTF-8 | 4,572 | 2.65625 | 3 | [] | no_license | """
Description:
File to produce data files of sinusoids that have the same time stamps of PG1302-102
with different error bars. This is to be used be julia CARMA in order to test whether
periodic can be found.
"""
import math
import pandas as pd
import numpy as np
User= 'C'
using_fake_errors= False
if User == 'N':
Graham_IDs_path = 'C:/Users/User/Documents/University/Year 4/Project/Julia_Working_Directory/Graham_Periods_Medians.txt'
elif User == 'C':
Graham_IDs_path = 'C:/Users/Christopher/Documents/UNI/Year 4/Project/AGN-code/Julia_Working_Directory/Graham_Periods_Medians.txt'
#Load in all the Graham object data
All_Graham_Objects = pd.read_table(Graham_IDs_path, sep='\t', header=None, index_col=0)
Object_Names_Text =open('Sinusoids_Obj_Names.txt','w')
MCMC_outputs_path = 'C:/Users/Christopher/Documents/UNI/Year 4/Project/AGN-code/MCMC_output_value.txt'
for j in range(len(All_Graham_Objects)):
Object_Name = All_Graham_Objects.iloc[j,0]
if User == 'N':
Path_to_Data = 'C:/Users/User/Documents/University/Year 4/Project/Julia_Working_Directory/Grahams_Clipped_Data/Clipped_Data_' + Object_Name +'.txt'
elif User == 'C':
Path_to_Data = 'C:/Users/Christopher/Documents/UNI/Year 4/Project/AGN-code/Julia_Working_Directory/Grahams_Clipped_Data/Clipped_Data_' + Object_Name +'.txt'
#Read the data
MCMC_Output_Path = 'C:/Users/Christopher/Documents/UNI/Year 4/Project/AGN-code/MCMC_output_values.txt'
MCMCdf = pd.read_table(MCMC_Output_Path, sep =';', header=0)
Data_for_obj = pd.read_table(Path_to_Data, sep=',', header=0)
Times = Data_for_obj[['MJD']].as_matrix().ravel()
Mag_True = Data_for_obj[['Mag']].as_matrix().ravel()
#Period = (Times.max()-Times.min())/3
Period = All_Graham_Objects.iloc[j,2]
if 'PG 1302' in Object_Name:
Amplitude = MCMCdf.Amplitude.iloc[j]
else:
Amplitude = (np.max(Mag_True)-np.min(Mag_True))/2
Median = All_Graham_Objects.iloc[j,1]
Phase = np.pi
Mag = Median + Amplitude*np.sin(2*np.pi*Times/Period + Phase)
#using Fake Errors
if using_fake_errors == True:
for k in range(20):
Magerr = np.ones_like(Times)*(k+1)/10
sigma = str((k+1)/20)
Mag = Mag + Magerr * np.random.randn(Mag.size)
Object_Names_Text.write('Data_Magerr_' + sigma + '\n')
if User == 'N':
sinusoids_path = 'C:/Users/User/Documents/University/Year 4/Project/Julia_Working_Directory/Sinusoids_Data/' + Object_Name + '_Data_Magerr_' + sigma + '.txt'
elif User == 'C':
sinusoids_path = 'C:/Users/Christopher/Documents/UNI/Year 4/Project/AGN-code/Julia_Working_Directory/Sinusoids_Data/' + Object_Name + '_Data_Magerr_' + sigma + '.txt'
d = {'MJD' : pd.Series(Times),
'Mag': pd.Series(Mag),
'Magerr': pd.Series(Magerr)}
df = pd.DataFrame(d)
df.to_csv(sinusoids_path, index=False, sep = ' ',header = None)
else:
Found_Phase = False
Magerr_True= Data_for_obj[['Magerr']].as_matrix().ravel()
for l in range(1,721):
Phase = l*2*np.pi/720
if abs(Amplitude*np.sin(Phase)+Median-Mag_True[0]) < np.median(Magerr_True):
Found_Phase = True
break
if l == 720 and Found_Phase == False:
print(j," No phase for object ", Object_Name)
Mag = Median + Amplitude*np.sin(2*np.pi*Times/Period + Phase) + Magerr_True*np.random.randn(Mag.size)
Object_Names_Text.write('Data_'+ Object_Name+ '_Magerr_True \n')
if User == 'N':
sinusoids_path = 'C:/Users/User/Documents/University/Year 4/Project/Julia_Working_Directory/Sinusoids_Data/Data_' + Object_Name + '_Magerr_True.txt'
elif User == 'C':
sinusoids_path = 'C:/Users/Christopher/Documents/UNI/Year 4/Project/AGN-code/Julia_Working_Directory/Sinusoids_Data/Data_' + Object_Name + '_Magerr_True.txt'
d = {'MJD' : pd.Series(Times),
'Mag': pd.Series(Mag),
'Magerr': pd.Series(Magerr_True)}
df = pd.DataFrame(d)
df.to_csv(sinusoids_path, index=False, sep = ' ',header = None)
Object_Names_Text.close()
| true |
6ea633f37f5d48e9ca0d78f9c1ac948dbf532424 | Python | TTwelves/Data-structure-and-algorithm | /14.最长公共前缀.py | UTF-8 | 709 | 3.3125 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=14 lang=python3
#
# [14] 最长公共前缀
#
# @lc code=start
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
# max和min函数不能用于空的字符串,所以首先判断是否为空
# if not: 等价于 if strs is NONE:
if not strs:return ""
# 选出一个最小的字符串和一个最大的字符串,相互比较
# 此处的最大和最小字母是按照字母排序得来的,并不是字符串长度
min_str=min(strs)
max_str=max(strs)
for x in range(len(min_str)):
if min_str[x]!=max_str[x]:
return min_str[:x]
return min_str
# @lc code=end
| true |
9ef18c7de5bbac0c73a9a7542fe627945a232bfe | Python | Yashg19/enrique | /testcases/tet_gui.py | UTF-8 | 7,174 | 2.734375 | 3 | [] | no_license | from PyQt4.QtGui import *
from PyQt4.QtCore import *
import pandas as pd
from functools import partial
import magellan as mg
from collections import OrderedDict
class DataModel():
def __init__(self, df):
self.dataframe = df
def getDataFrame(self):
""" Returns reference to the dataframe.
Args:
Returns:
Reference to the dataframe.
"""
return self.dataframe
def setDataFrame(self, df):
""" Change the dataframe attribute to reference new dataframe.
Args:
The new dataframe the model will use.
Returns:
"""
self.dataframe = df
class MTableViewWithLabel(QWidget):
def __init__(self, model):
super(MTableViewWithLabel, self).__init__()
self.mv = MTableView(model)
self.init_gui()
def init_gui(self):
lbl = QLabel('Metrics')
layout = QVBoxLayout()
layout.addWidget(lbl)
layout.addWidget(self.mv)
self.setLayout(layout)
class MTableView(QTableWidget):
def __init__(self, model, *args):
super(MTableView, self).__init__(*args)
self.set_model(model)
self.paint_gui()
def set_model(self, model):
self._model = model
def paint_gui(self):
# enable sorting.
self.setSortingEnabled(True)
self.setColumnCount(len(self._model.getDataFrame().columns) + 2)
self.setRowCount(len(self._model.getDataFrame()))
headers = ['Show', 'Debug']
headers.extend(list(self._model.getDataFrame().columns.values))
self.setHorizontalHeaderLabels(headers)
# self.setColumnWidth(0, 35)
self.verticalHeader().setVisible(True)
self.horizontalHeader().setStretchLastSection(True)
for i in range(len(self._model.getDataFrame().index)):
for j in range(len(self._model.getDataFrame().columns) + 2) :
if j == 0:
button = QPushButton('Show', self)
self.setCellWidget(i, j, button)
button.clicked.connect(partial(self.handle_show_button, i))
elif j == 1:
button = QPushButton('Debug', self)
self.setCellWidget(i, j, button)
button.clicked.connect(partial(self.handle_debug_button, i))
else:
# self.setItem(i, j, QTableWidgetItem("pp"))
if pd.isnull(self._model.getDataFrame().iloc(i, j - 2)):
self.setItem(i, j, QTableWidgetItem(""))
else:
self.setItem(i, j, QTableWidgetItem(
str(self._model.getDataFrame().iloc[i, j - 2])))
def handle_debug_button(self, index):
y = 10
print 'Debug button clicked : ' + str(index)
d = mg.load_dataset('table_B')
model = DataModel(d)
view = MTableView(model)
view.show()
def handle_show_button(self, index):
x = 20
print 'show button clicked : ' + str(index)
class MetricWidget(QTableWidget):
def __init__(self, model, *args):
super(MetricWidget, self).__init__(*args)
self.set_model(model)
self.paint_gui()
def set_model(self, model):
self._model = model
def paint_gui(self):
self.setSortingEnabled(True)
self.setColumnCount(1)
self.setRowCount(len(self._model.keys())+1)
headers = ['Value']
#headers.extend(list(self._model.getDataFrame().columns.values))
self.setHorizontalHeaderLabels(headers)
self.verticalHeader().setVisible(True)
h = self._model.keys()
h.append('Show')
self.setVerticalHeaderLabels(h)
self.horizontalHeader().setStretchLastSection(True)
idx = 0
for k, v in self._model.iteritems():
self.setItem(idx, 0, QTableWidgetItem(str(v)))
idx += 1
b = QComboBox()
b.addItems(['False Positives', 'False Negatives'])
b.activated[str].connect(self.onActivated)
self.setCellWidget(idx, 0, b)
def onActivated(self, text):
print text + " is on"
class MCombobox(QComboBox):
def __init__(self):
super(MCombobox, self).__init__()
self.initUI()
def initUI(self):
print 'inside init'
self.addItems(['False Positives', 'False Negatives'])
self.activated[str].connect(self.onActivated)
class MurWidget(QWidget):
def __init__(self, d):
super(MurWidget, self).__init__()
self.model = DataModel(d)
self.view = MTableViewWithLabel(self.model)
self.setWindowTitle("Debug - Mtable")
metric_data = OrderedDict()
metric_data['Precision'] = 0.95
metric_data['Recall'] = 0.93
metric_data['F1'] = 0.94
metric_data['Num. False Positives'] = 5
metric_data['Num. False Negatives'] = 6
metric_table = MetricWidget(metric_data)
hbox = QVBoxLayout(self)
splitter2 = QSplitter(Qt.Horizontal)
splitter2.addWidget(metric_table)
splitter2.addWidget(self.view)
hbox.addWidget(splitter2)
self.setLayout(hbox)
class MWidget(QWidget):
def __init__(self, d):
super(MWidget, self).__init__()
self.model = DataModel(d)
self.view = MTableView(self.model)
# # set window size
# width = min((len(self.model.getDataFrame().columns) + 2)*105, mg._viewapp.desktop().screenGeometry().width() - 50)
# height = min((len(self.model.getDataFrame()) + 2)*41, mg._viewapp.desktop().screenGeometry().width() - 100)
# self.resize(width, height)
# set window title
self.setWindowTitle("Debug - Mtable")
# change_btn = QPushButton('Change', self)
# change_btn.clicked.connect(self.change_table_contents)
metric_data = OrderedDict()
metric_data['Precision'] = 0.95
metric_data['Recall'] = 0.93
metric_data['F1'] = 0.94
metric_data['Num. False Positives'] = 5
metric_data['Num. False Negatives'] = 6
metric_table = MetricWidget(metric_data)
combobox = MCombobox()
hlayout = QVBoxLayout()
hlayout.addWidget(metric_table)
# hlayout.addWidget(combobox)
# layout = QGridLayout()
# layout.addWidget(change_btn, 0, 0)
layout = QVBoxLayout()
# layout.addWidget(metric_table, 0, 0)
# layout.addWidget(self.view, 1, 0)
# layout.addWidget(metric_table)
layout.addLayout(hlayout)
layout.addWidget(self.view)
self.setLayout(layout)
# QApplication.setStyle(QStyleFactory.create('motif'))
def change_table_contents(self):
d = mg.load_dataset('table_B')
self.model = DataModel(d)
self.view.set_model(self.model)
self.view.paint_gui()
# model = DataModel(d)
# view = MTableView(model)
# view.show()
import magellan as mg
import sys
app = mg._viewapp
d = mg.load_dataset('table_A')
# w = MWidget(d)
w = MurWidget(d)
w.show()
(app.exec_())
| true |
cf1be284f3e04209ac8cdea7b5e51551df512b5a | Python | noegodinho/EC | /TTP/CycleCross.py | UTF-8 | 2,586 | 3.4375 | 3 | [] | no_license | import random
def cycle_cross(indiv_1,indiv_2,prob_cross):
size = len(indiv_1[0])
value = random.random()
positions = [0]*size
crosses = []
if value < prob_cross:
while(sum(positions)<size):
#get first unocupied place
i = getUnocupied(positions)
temp1 = []
while(True):
positions[i] = 1
temp1.append(i)
#get place where ind1(i) = ind2(j)
#actualiza valor i
i = indiv_1[0].index(indiv_2[0][i])
if(i in temp1):
crosses.append(temp1)
break
cycles_num = len(crosses)
if(cycles_num <2):
#print("So existe um ciclo")
return indiv_1,indiv_2
#buscar matrix de decisão EDIT modfiquei isto para não ter de reestruturar tudo e aproveitar o que ja estava feito
new_indiv_1 = getDecision(cycles_num)
new_indiv_2 = getDecision(cycles_num,True)
#montar os individuos a devolver
individual1 = mountIndividual(indiv_1,indiv_2,new_indiv_1,crosses,size)
individual2 = mountIndividual(indiv_1,indiv_2,new_indiv_2,crosses,size)
return individual1 , individual2
else:
return indiv_1,indiv_2
def getDecision(cycles_num,inverse= False):
decision = []
if(inverse == False):
for i in range(cycles_num):
if(i%2):
decision.append(1)
else:
decision.append(2)
elif(inverse == True):
for i in range(cycles_num):
if(i%2):
decision.append(2)
else:
decision.append(1)
return decision
def mountIndividual(indiv_1,indiv_2,structure,cycles,size):
individual = [0]*size
for ind,i in enumerate(cycles):
for j in i:
if (structure[ind] ==1 ):
individual[j] = indiv_1[0][j]
elif(structure[ind] ==2 ):
individual[j] = indiv_2[0][j]
return [individual,0]
def getUnocupied(positions):
return positions.index(0)
if __name__ == '__main__':
individual1 = [[1,2,3,4,5,6,7,8],0]
individual2 = [[2,5,3,4,1,8,6,7],0]
#print(individual2[0][1])
#cycles=[[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15]]
#structure=[1,1,1,1,1]
"""
for ind,i in enumerate(cycles):
print(i)
for j in i:
print(j)
"""
new_individual1, new_individual2 = cycle_cross(individual1,individual2,1)
print(new_individual1)
print(new_individual2) | true |
53af02d96030a8c733174cd01d92512c6c6a35e3 | Python | pstreich/Gesichtserkennung | /Raspberry Pi Quellcode/prozess2.py | UTF-8 | 2,472 | 2.859375 | 3 | [] | no_license | #Bibliotheken einbinden
import RPi.GPIO as GPIO
import sys
import time
import datetime
import numpy as np
import gspread
import oauth2client.client
import json # zum Einlesen der Google Zugangsdaten aus entsprechender Datei
import cPickle
#json Dateiname fuer Google Zugangsdaten
JSON_FILENAME = 'pitest-c7f0752a993d.json'
# Google Dokumentname
GSHEET_NAME = 'pi'
# laedt Zugangsdaten aus json Datei und oeffnet Dokument zum schreiben
json_key = json.load(open(JSON_FILENAME))
creds = oauth2client.client.SignedJwtAssertionCredentials(json_key['client_email'],
json_key['private_key'],
['https://spreadsheets.google.com/feeds'])
client_inst = gspread.authorize(creds)
gsheet = client_inst.open(GSHEET_NAME).sheet1
#GPIO Modus
GPIO.setmode(GPIO.BCM)
#GPIO Pins zuweisen
GPIO_TRIGGER = 6
GPIO_ECHO = 12
#Richtung der GPIO-Pins festlegen (Eingabe/Asugabe)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
liste = np.zeros(60)
i = 0
def distanz():
# setze Trigger auf HIGH
GPIO.output(GPIO_TRIGGER, True)
# setze Trigger nach 0.01ms auf LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartZeit = time.time()
StopZeit = time.time()
# speichere Startzeit
while GPIO.input(GPIO_ECHO) == 0:
StartZeit = time.time()
# speichere Ankunftszeit
while GPIO.input(GPIO_ECHO) == 1:
StopZeit = time.time()
# Zeit Differenz zwischen Start und Ankunft
TimeElapsed = StopZeit - StartZeit
# mit der Schallgeschwindigkeit (34300 cm/s) multiplizieren
# und durch 2 teilen, da hin und zurueck
distanz = (TimeElapsed * 34300) / 2
return distanz
try:
while True:
# Misst Abstand und legt Liste/Array an zur Durchschnittsberechnung
abstand = distanz()
print ("Gemessene Entfernung = %.1f cm" % abstand)
liste[i]=abstand
i+=1
time.sleep(1)
# Nach ungefeahr 60 Sekunde
if i == 55:
# oeffnet Pickle Datei und liest Positionswert des Betrachters aus
fp = open("shared.pk1", "rb")
shared = cPickle.load(fp)
# berechnet Distanzdurschnitt
durschnitt = liste.mean()
print str(durschnitt)
print shared["xs"]
print shared["ys"]
curr_time = datetime.datetime.now()
# Schreibt neue Zeile ins Google Dokument mit Datum, Distanz und X und Y Position
gsheet.append_row((curr_time, str(int(durschnitt))+" cm", shared["xs"], shared["ys"]))
# Setzt Liste mit Distanzwerten zurueck
i=0
# Beim Abbruch durch STRG+C beenden
except KeyboardInterrupt:
GPIO.cleanup()
sys.exit(0) | true |
6545b45b926c8d154a7c39e617a00dc8e7d131d2 | Python | ReWKing/StarttoPython | /操作列表/创建数值列表/动手试一试/4-6 奇数.py | UTF-8 | 158 | 3.1875 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Author:William Gin
single_numbers = list(range(1, 21, 2))
print(single_numbers)
for i in single_numbers:
print(i)
| true |
2700a64075fd3999e6b7f1f2a4f4309adef2a04b | Python | charlierkj/Gradient-Health-Project | /train.py | UTF-8 | 2,154 | 2.546875 | 3 | [] | no_license | import tensorflow as tf
import tensorflow_datasets as tfds
def preprocess(feature):
image, label = feature["image"], feature["label"]
image = tf.cast(image, tf.float32)
image = image / 255
shape = tf.shape(image)
h, w = shape[0], shape[1]
ratio = w / h
if ratio >= 1:
image = tf.image.resize(image, (224, tf.cast(tf.math.round(224 * ratio), dtype=tf.int32)))
w_offset = tf.cast(tf.math.round(224 * (ratio - 1)/ 2), dtype=tf.int32)
image = tf.image.crop_to_bounding_box(image, 0, w_offset, 224, 224)
else:
image = tf.image.resize(image, (tf.cast(tf.math.round(224 / ratio), dtype=tf.int32), 224))
h_offset = tf.cast(tf.math.round(224 * (1/ratio - 1) / 2), dtype=tf.int32)
image = tf.image.crop_to_bounding_box(image, h_offset, 0, 224, 224)
label = tf.one_hot(label, 2)
return image, label
if __name__ == "__main__":
# load data
trainset, info = tfds.load(name="covid_ct", split="train", shuffle_files=True, with_info=True)
print(info)
valset = tfds.load(name="covid_ct", split="validation", shuffle_files=False)
testset = tfds.load(name="covid_ct", split="test", shuffle_files=False)
train_ds = trainset.map(preprocess)
val_ds = valset.map(preprocess)
test_ds = testset.map(preprocess)
train_batches = train_ds.batch(5)
val_batches = val_ds.batch(2)
test_batches = test_ds.batch(1)
# specify model
model = tf.keras.Sequential([
tf.keras.applications.DenseNet169(include_top=True, weights='imagenet'),
tf.keras.layers.Dense(2, activation="softmax")
])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.categorical_accuracy])
print("Fitting model on training data")
history = model.fit(train_batches.repeat(), epochs=5, steps_per_epoch=85,
validation_data=val_batches.repeat(), validation_steps=59)
print("Evaluating model on the test data")
results = model.evaluate(test_batches, steps=203)
print("Test Acc: ", results[1])
# save model
model.save("densenet_covid.h5")
| true |
d1e4515dc066a8e1bf27395801317e579217a88e | Python | SpatialDays/csvs-enso-server | /src/ensoserver/domain/services.py | UTF-8 | 2,046 | 2.796875 | 3 | [] | no_license | import logging
from datetime import datetime
from typing import Tuple, List
import requests
from ensoserver.config import enso_invalid_value, LOG_LEVEL, LOG_FORMAT
from urllib3 import HTTPResponse
logging.basicConfig(level=LOG_LEVEL, format=LOG_FORMAT)
logger = logging.getLogger(__name__)
def download_enso_values(url: str) -> HTTPResponse:
try:
r = requests.get(url, stream=True)
r.raise_for_status()
except requests.exceptions.HTTPError as err:
logger.error(f'HTTP error occurred trying to download enso'
f' values: {err}')
raise DownloadError
return r.raw
class DownloadError(Exception):
pass
def decode_enso_line(line: bytes) -> List[str]:
"""
Deletes extra blank spaces and splits every single value in line
Args:
line: encoded line in UTF-8 from enso data file.
Returns:
List that contains every single value for that year, being the
year the first element.
"""
return " ".join(line.decode('utf-8').split()).split(' ')
def get_enso_interval(data: list) -> Tuple[str, str]:
enso_interval = data[0]
enso_start_year = enso_interval[0]
enso_end_year = enso_interval[1]
return enso_start_year, enso_end_year
def obtain_enso_data_from_url(url: str) -> Tuple[datetime, float]:
try:
enso_data = [decode_enso_line(line) for line in download_enso_values(url)]
enso_start_year, enso_end_year = get_enso_interval(enso_data)
for row in enso_data[1:]:
year = row[0]
if enso_start_year <= year <= enso_end_year:
for month, meiv2_value in enumerate(row[1:]):
if meiv2_value != enso_invalid_value:
yield datetime(year=int(year),
month=month+1,
day=1, hour=0, minute=0, second=0).isoformat(),\
float(meiv2_value)
except DownloadError:
logger.error('ENSO data could not be obtained.')
| true |
de168f084fcd182b2aff9f6c606c13fdce7ccb04 | Python | RLBat/CMEECourseWork | /Week2/Code/using_name.py | UTF-8 | 499 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python3
""" Shows how to distinguish between the module being run directly or called from another module """
__author__ = 'Rachel Bates r.bates18@imperial.ac.uk'
__version__ = '0.0.1'
## IMPORTS ##
# None
## CONSTANTS ##
# None
## FUNCTIONS ##
# None
###############
if __name__ == '__main__': # If the name of this programe is the same as the\
# name of the main program being run
print ('This program is being run by itself')
else:
print ('I am being imported from another module')
| true |
43cdc856f2f80c5de08850d9db2307a823ea43de | Python | slowrunner/Carl | /Examples/imu/di_BNO055/di_code/di_easy_inertial_measurement_unit.py | UTF-8 | 9,491 | 2.828125 | 3 | [] | no_license | # https://www.dexterindustries.com
#
# Copyright (c) 2018 Dexter Industries
# Released under the MIT license (http://choosealicense.com/licenses/mit/).
# For more information see https://github.com/DexterInd/DI_Sensors/blob/master/LICENSE.md
#
# EASIER WRAPPERS FOR:
# IMU SENSOR,
# LIGHT AND COLOR SENSOR
# TEMPERATURE, HUMIDITY and PRESSURE SENSOR
# MUTEX SUPPORT WHEN NEEDED
from di_sensors import inertial_measurement_unit
from di_sensors import BNO055
from math import atan2, pi
from time import sleep
'''
MUTEX HANDLING
'''
from di_sensors.easy_mutex import ifMutexAcquire, ifMutexRelease
'''
PORT TRANSLATION
'''
ports = {
"AD1": "GPG3_AD1",
"AD2": "GPG3_AD2"
}
class EasyIMUSensor(inertial_measurement_unit.InertialMeasurementUnit):
'''
Class for interfacing with the `InertialMeasurementUnit Sensor`_.
This class compared to :py:class:`~di_sensors.inertial_measurement_unit.InertialMeasurementUnit` uses mutexes that allows a given
object to be accessed simultaneously from multiple threads/processes.
Apart from this difference, there may
also be functions that are more user-friendly than the latter.
'''
def __init__(self, port="AD1", use_mutex=False):
"""
Constructor for initializing link with the `InertialMeasurementUnit Sensor`_.
:param str port = "AD1": The port to which the IMU sensor gets connected to. Can also be connected to port ``"AD2"`` of a `GoPiGo3`_ robot or to any ``"I2C"`` port of any of our platforms. If you're passing an **invalid port**, then the sensor resorts to an ``"I2C"`` connection. Check the :ref:`hardware specs <hardware-interface-section>` for more information about the ports.
:param bool use_mutex = False: When using multiple threads/processes that access the same resource/device, mutexes should be enabled.
:raises RuntimeError: When the chip ID is incorrect. This happens when we have a device pointing to the same address, but it's not a `InertialMeasurementUnit Sensor`_.
:raises ~exceptions.OSError: When the `InertialMeasurementUnit Sensor`_ is not reachable.
"""
self.use_mutex = use_mutex
try:
bus = ports[port]
except KeyError:
bus = "RPI_1SW"
ifMutexAcquire(self.use_mutex)
try:
# print("INSTANTIATING ON PORT {} OR BUS {} WITH MUTEX {}".format(port, bus, use_mutex))
super(self.__class__, self).__init__(bus = bus)
# on GPG3 we ask that the IMU be at the back of the robot, facing outward
# We do not support the IMU on GPG2 but leaving the if statement in case
if bus != "RPI_1SW":
self.BNO055.set_axis_remap( BNO055.AXIS_REMAP_X,
BNO055.AXIS_REMAP_Z,
BNO055.AXIS_REMAP_Y,
BNO055.AXIS_REMAP_POSITIVE,
BNO055.AXIS_REMAP_NEGATIVE,
BNO055.AXIS_REMAP_POSITIVE)
except Exception as e:
print("Initiating error: "+str(e))
raise
finally:
sleep(0.1) # add a delay to let the IMU stabilize before control panel can pull from it
ifMutexRelease(self.use_mutex)
def reconfig_bus(self):
"""
Use this method when the `InertialMeasurementUnit Sensor`_ becomes unresponsive but it's still plugged into the board.
There will be times when due to improper electrical contacts, the link between the sensor and the board gets disrupted - using this method restablishes the connection.
.. note::
Sometimes the sensor won't work just by calling this method - in this case, switching the port will do the job. This is something that happens
very rarely, so there's no need to worry much about this scenario.
"""
ifMutexAcquire(self.use_mutex)
self.BNO055.i2c_bus.reconfig_bus()
ifMutexRelease(self.use_mutex)
def safe_calibrate(self):
"""
Once called, the method returns when the magnetometer of the `InertialMeasurementUnit Sensor`_ gets fully calibrated. Rotate the sensor in the air to help the sensor calibrate faster.
.. note::
Also, this method is not used to trigger the process of calibrating the sensor (the IMU does that automatically),
but its purpose is to block a given script until the sensor reports it has fully calibrated.
If you wish to block your code until the sensor calibrates and still have control over your script, use
:py:meth:`~di_sensors.easy_inertial_measurement_unit.EasyIMUSensor.safe_calibration_status` method along with a ``while`` loop to continuously check it.
"""
status = -1
while status < 3:
ifMutexAcquire(self.use_mutex)
try:
new_status = self.BNO055.get_calibration_status()[3]
except:
new_status = -1
finally:
ifMutexRelease(self.use_mutex)
if new_status != status:
status = new_status
def safe_calibration_status(self):
"""
Returns the calibration level of the magnetometer of the `InertialMeasurementUnit Sensor`_.
:returns: Calibration level of the magnetometer. Range is **0-3** and **-1** is returned when the sensor can't be accessed.
:rtype: int
"""
ifMutexAcquire(self.use_mutex)
try:
status = self.BNO055.get_calibration_status()[3]
except Exception as e:
status = -1
finally:
ifMutexRelease(self.use_mutex)
return status
def convert_heading(self, in_heading):
"""
This method takes in a heading in degrees and return the name of the corresponding heading.
:param float in_heading: the value in degree that needs to be converted to a string.
:return: The heading of the sensor as a string.
:rtype: str
The possible strings that can be returned are: ``"North"``, ``"North East"``, ``"East"``,
``"South East"``, ``"South"``, ``"South West"``, ``"West"``, ``"North West"``, ``"North"``.
.. note::
First use :py:meth:`~di_sensors.easy_inertial_measurement_unit.EasyIMUSensor.safe_calibrate` or :py:meth:`~di_sensors.easy_inertial_measurement_unit.EasyIMUSensor.safe_calibration_status`
methods to determine if the magnetometer sensor is fully calibrated.
"""
headings = ["North", "North East",
"East", "South East",
"South", "South West",
"West", "North West",
"North"]
nb_headings = len(headings)-1 # North is listed twice
heading_index = int(round(in_heading/(360.0/nb_headings),0))
# sometimes the IMU will return a in_heading of -1000 and higher.
if heading_index < 0:
heading_index = 0
# print("heading {} index {}".format(in_heading, heading_index))
# print(" {} ".format( headings[heading_index]))
return(headings[heading_index])
def safe_read_euler(self):
"""
Read the absolute orientation.
:returns: Tuple of euler angles in degrees of *heading*, *roll* and *pitch*.
:rtype: (float,float,float)
:raises ~exceptions.OSError: When the sensor is not reachable.
"""
ifMutexAcquire(self.use_mutex)
try:
x, y, z = self.read_euler()
except Exception as e:
# print("safe read euler: {}".format(str(e)))
# x, y, z = 0, 0, 0
raise
finally:
ifMutexRelease(self.use_mutex)
return x,y,z
def safe_read_magnetometer(self):
"""
Read the magnetometer values.
:returns: Tuple containing X, Y, Z values in *micro-Teslas* units. You can check the X, Y, Z axes on the sensor itself.
:rtype: (float,float,float)
.. note::
In case of an exception occurring within this method, a tuple of 3 elements where all values are set to **0** is returned.
"""
ifMutexAcquire(self.use_mutex)
try:
x, y, z = self.read_magnetometer()
except Exception as e:
x, y, z = 0, 0, 0
finally:
ifMutexRelease(self.use_mutex)
return x,y,z
def safe_north_point(self):
"""
Determines the heading of the north point.
This function doesn't take into account the declination.
:return: The heading of the north point measured in degrees. The north point is found at **0** degrees.
:rtype: int
.. note::
In case of an exception occurring within this method, **0** is returned.
"""
ifMutexAcquire(self.use_mutex)
try:
x, y, z = self.read_magnetometer()
except:
x, y, z = 0,0,0
finally:
ifMutexRelease(self.use_mutex)
# using the x and z axis because the sensor is mounted vertically
# the sensor's top face is oriented towards the front of the robot
heading = -atan2(-x, z) * 180 / pi
# adjust it to 360 degrees range
if heading < 0:
heading += 360
elif heading > 360:
heading -= 360
return heading
| true |
55682f5562b715c8b8481602d3c89eecee3e1078 | Python | joaojunior/data_structures_and_algorithms | /python_implementations/tests/algorithms/test_insert_sort.py | UTF-8 | 585 | 3.359375 | 3 | [
"MIT"
] | permissive | import random
import pytest
from algorithms.sorting.insert_sort import InsertSort
@pytest.fixture
def insert_sort():
return InsertSort()
def test_array_already_sorted_asc(insert_sort):
items = [0, 1, 2, 3, 4]
insert_sort.sort(items)
assert list(range(5)) == items
def test_array_sorted_desc(insert_sort):
items = [4, 3, 2, 1, 0]
insert_sort.sort(items)
assert list(range(5)) == items
def test_array_sorted_random(insert_sort):
items = [0, 4, 2, 3, 1]
random.shuffle(items)
insert_sort.sort(items)
assert list(range(5)) == items
| true |
601ecb3cd29091911203fba9daaf77e7840dd589 | Python | xuruyi136/py1 | /HttpRequest.py | UTF-8 | 1,472 | 2.921875 | 3 | [] | no_license | import requests
import abc
'''
请求方法抽象类
'''
class AbsMethod:
@abc.abstractmethod
def request(self, url, attach):
pass
'''
Get 方法
'''
class Get(AbsMethod):
'''
请求
'''
def request(self, url, attach) -> requests.Response:
res = requests.post(url, attach)
if not res.ok:
return res.raise_for_status()
return res
'''
Post 方法
'''
class Post(AbsMethod):
'''
请求
'''
def request(self, url, attach) -> requests.Response:
res = requests.get(url, attach)
if not res.ok:
return res.raise_for_status()
return res
'''
方法工厂
'''
class MethodFactory:
def create(self, method: str) -> AbsMethod:
return eval(method)()
'''
http 请求
'''
class HttpReuqest:
'''
发送求请
'''
@staticmethod
def send(url, attach = {}, method='Get') -> requests.Response:
factory = MethodFactory()
target = factory.create(method)
return target.request(url, attach)
from typing import List, Tuple, Dict
def add(a:int, string:str, f:float, b:bool) -> Tuple[List, Tuple, Dict, bool]:
list1 = list(range(a))
tup = (string, string, string)
d = {"a":f}
bl = b
return list1, tup, d,bl
print(add(5,"hhhh", 2.3, False))
from typing import List
def func(a, string):
list1 = []
list1.append(a)
list1.append(string)
return list1
print(list1)
| true |
1ba84d888a60206c1484f3490d28bc7f6588a6f6 | Python | iamani123/ML1819--task-104--team-15 | /Phase-2/Normalisation_Standardisation.py | UTF-8 | 830 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 29 04:50:52 2018
@author: advancerajat
"""
import numpy as np
newX=np.zeros((30000,23))
X=np.genfromtxt('credit_card.csv', delimiter = ',',usecols=(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23))
#min1=np.min(X, axis=0)
##max1=np.max(X, axis=0)
#for i in range(max1.size):
# if((max1[i] - min1[i]) != 0) :
# newX[:,i]=(X[:,i] - min1[i])/(max1[i] - min1[i])
# else :
# newX[:,i]=X[:,i]
mean1=np.mean(X, axis=0)
print(mean1)
std1=np.std(X, axis=0)
print(std1)
for i in range(23):
if((std1[i]) != 0) :
newX[:,i]=(X[:,i] - mean1[i])/(std1[i])
else :
newX[:,i]=X[:,i]
print(newX.mean(axis=0))
np.savetxt("updated_creditcard_dataset_standadrise.csv", newX, delimiter=",") | true |
a07f46dbc91965e2fa88319d943faf391c89944f | Python | ZSerhii/Beetroot.Academy | /Homeworks/HW7.py | UTF-8 | 1,246 | 4.8125 | 5 | [] | no_license | print('Task 1. Dict comprehension exercise.\n')
print('''Make a program that given a whole sentence (a string) will make a dict
containing all unique words as keys and the number of occurrences as values.
''')
print('Result 1:\n')
vSentence = 'Verbs like Put - Put - Put no change'
vDict = {vKey : vSentence.split().count(vKey) for vKey in vSentence.split()}
print('Input sentence:\n', vSentence, '\n', sep='')
print('Result dict:\n', vDict, sep='')
print('\nTask 2. List comprehension exercise I.\n')
print('''Consider the following list: a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100].
Now, make a program (no longer than one line) that makes a new list
containing all the values in a but no even numbers.
''')
print('Result 2:\n')
vList = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
print('Original list:\n', vList, '\n', sep='')
print('Result list:\n', [vItem for vIndex, vItem in enumerate(vList) if vItem % 2 == 1], sep='')
print('\nTask 3. List comprehension exercise II.\n')
print('''Use a list comprehension to make a list containing tuples (i, j) where i
goes from 1 to 10 and j is corresponding i squared.
''')
print('Result 3:\n')
print('Result list:\n', [(i, i ** 2) for i in range(1, 11)], sep='')
print('\nThat\'s all Folks!') | true |
39607dc92e188ce36fc1fd6f7f67065d86d1d5e1 | Python | cooperbaerseth/STAM_exp1 | /exp1_mnist_stam.py | UTF-8 | 8,005 | 2.671875 | 3 | [] | no_license | from __future__ import print_function
import random
from keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sn
import pandas as pd
plt.interactive(True)
def accuracy_eval(progress):
correct = 0.0
for i in range(0, progress):
if x_clusterInd[i] == y_train[i]:
correct = correct + 1
return correct/progress
def accuracy_eval_perDigit(progress):
accu = np.zeros((3, 10)) #1st row: # correct (per class)
#2nd row: # seen (per class)
#3rd row: accuracy (per class)
#class indexed by position
#get correct / total
for i in range(0,progress):
accu[1, y_train[i]] = accu[1, y_train[i]] + 1.0
if x_clusterInd[i] == y_train[i]:
accu[0, y_train[i]] = accu[0, y_train[i]] + 1.0
#get accuracy per class
for i in range(0, 10):
accu[2,i] = accu[0,i] / accu[1,i]
#print accuracy per class
print("Per Class Accuracy: ")
for i in range(0, 10):
print(str(i) + ": " + str(round(accu[2,i], 3)) + "\t\t", end='')
print("\n\n")
#add to accuracy graph datapoints to plot later
accuGraph_points[progress/div - 1, :] = accu[2, :]
#show confusion matrix
conf_mat(progress)
return
def show_AccuGraph():
fig = plt.figure(1)
ax = plt.subplot(111)
for i in range(0, 5):
ax.plot(np.arange(0, x_train.shape[0]-1, div), accuGraph_points[:, i], linewidth=2.0, label=str(i))
for i in range(5, 10):
ax.plot(np.arange(0, x_train.shape[0]-1, div), accuGraph_points[:, i], '--', linewidth=2.0, label=str(i))
plt.xlim(xmax=x_train.shape[0]+3000)
plt.title('Accuracy Per Class Over Time', fontweight='bold', fontsize=20)
plt.ylabel('Accuracy', fontweight='bold')
plt.xlabel('Total Iterations', fontweight='bold')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.95, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show(fig)
return
def conf_mat(n):
confMat = np.zeros((10, 10)) #holds the values of the confusion matrix
#populate the matrix
for i in range(0, n):
confMat[x_clusterInd[i], y_train[i]] = confMat[x_clusterInd[i], y_train[i]] + 1
#show confusion matrix
confMat_dFrame = pd.DataFrame(confMat, range(confMat.shape[0]), range(confMat.shape[1]))
fig = plt.figure(2)
fig.clear()
plt.title("True Labels", fontweight='bold')
sn.heatmap(confMat_dFrame, annot=True, fmt='g')
return
def initCents_firstCome():
for i in range(0, 10):
j = 0
while (y_train[j] != i):
j = j + 1
centroids[:, i] = x_train[j].flatten()
centroidIndexs[i] = j
x_clusterInd[j] = -1 # -1 indicates it is a centroid, not to be used in training
print("index: " + str(j) + "\n digit: " + str(y_train[j]))
return
def showCentroids(centroids):
for i in range(0,len(centroids[0,:]-1)):
plt.subplot(5, 2, i+1)
plt.imshow(centroids[:,i].reshape(28,28))
plt.pause(0.005)
plt.show()
return
def initCents_pickRands():
n = 1
cent_picks = np.zeros((10, n)) #will hold the random pick'th instance in sample
tr_stat = plt.hist(y_train) #get number of instances in each class
#get indicies for random selection from train
for i in range(0, 10):
pick = random.randint(1, int(tr_stat[0][i]))
while pick in cent_picks:
pick = random.randint(1, int(tr_stat[0][i]))
cent_picks[i, n-1] = pick
#populate centroids
for i in range(0, 10):
count = 0
j = 0
while count != cent_picks[i, n-1]:
if y_train[j] == i:
count = count + 1
j = j + 1
centroids[:, i] = x_train[j-1].flatten()
centroidIndexs[i] = j-1
return
def initCents_rands_alt():
filled = 0
while filled != centroidIndexs.size:
pick = random.randint(0, y_train.size-1)
if centroidIndexs[y_train[pick]] == -1 and pick not in centroidIndexs:
centroidIndexs[y_train[pick]] = pick
centroids[:, y_train[pick]] = x_train[pick].flatten()
filled = filled + 1
return
def initCents_avg(n): #if n = float("inf"), average all examples together
global centroids
if n == float("inf"):
avgCents_full = np.zeros((28*28, 10))
cent_count = np.zeros(10)
#sum
for i in range(0, x_train.shape[0]):
avgCents_full[:, y_train[i]] = avgCents_full[:, y_train[i]] + x_train[i].flatten()
cent_count[y_train[i]] = cent_count[y_train[i]] + 1
#divide
centroids = avgCents_full / cent_count[None, :]
else:
avgCents = np.zeros((28 * 28, 10, n))
avgCents_ind = np.full((10, n), -1)
#fill the matrix in order to average
for i in range(0, n):
filled = 0
while filled != 10:
pick = random.randint(0, y_train.size-1)
if avgCents_ind[y_train[pick], i] == -1 and pick not in avgCents_ind:
avgCents[:, y_train[pick], i] = x_train[pick].flatten()
avgCents_ind[y_train[pick], i] = pick
filled = filled + 1
#average by 3rd dimension
temp = avgCents.mean(axis=2)
for i in range(0,10):
centroids[:, i] = temp[:, i]
return
def initCents_close2avg():
global centroids
temp_cents = np.zeros(centroids.shape)
best_dists = np.full((10, 1), float("inf"))
#populate centroids with averages of all instances in training set
initCents_avg(float("inf"))
#pick instances that are closest to global averages per class
for i in range(0, x_train.shape[0]):
xi = x_train[i].flatten()
if np.linalg.norm(xi - centroids[:, y_train[i]]) < best_dists[y_train[i]]:
temp_cents[:, y_train[i]] = xi
best_dists[y_train[i]] = np.linalg.norm(xi - centroids[:, y_train[i]])
centroidIndexs[y_train[i]] = i
centroids = temp_cents
return
alpha = 0.005
div = 10000
#load mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
centroids = np.zeros((28*28, 10))
centroidIndexs = np.full((10, 1), -1)
x_clusterInd = np.zeros(x_train.shape[0]).astype(int)
accuGraph_points = np.zeros((x_train.shape[0]/div, 10)) #will hold all the datapoints over time for the accuracy of each class
###INITIALIZE CENTROIDS
#first of each class in train is centroid (first come first serve)
#initCents_firstCome()
#random of each class
#initCents_pickRands()
#initCents_rands_alt()
#average of n examples
#initCents_avg(3)
initCents_avg(float("inf"))
#closest to global average
#initCents_close2avg()
plt.figure(3)
showCentroids(centroids)
#cluster
for i in range(0, x_train.shape[0]): #over all instances in training set (60k)
#don't use centroids
if i in centroidIndexs:
continue
xi = x_train[i].flatten()
# find closest centroid
smallest = float("inf") #holds distance through iterations
for j in range(0,10):
if np.linalg.norm(xi - centroids[:, j]) < smallest:
smallest = np.linalg.norm(xi - centroids[:, j])
x_clusterInd[i] = j
#adjust centroid according to instance
#centroids[:, x_clusterInd[i]] = centroids[:, x_clusterInd[i]] + (alpha*xi)
centroids[:, x_clusterInd[i]] = (1-alpha) * centroids[:, x_clusterInd[i]] + (alpha * xi)
#evaluate accuracy occasionally
if (i+1) % div == 0:
accu = accuracy_eval(i)
print("Overall Accuracy: " + str(round(accu*100, 3)) + "%")
accuracy_eval_perDigit(i)
plt.figure(4)
showCentroids(centroids)
raw_input('Press Enter to exit')
plt.figure(5)
showCentroids(centroids)
show_AccuGraph()
raw_input('Press Enter to exit')
print("donezo") | true |
c88f457ca71e8697fbf8cb664e4e98d395203736 | Python | caro-01/leccion-03-python-tipos-variables-expresiones | /practica.py | UTF-8 | 86 | 3.171875 | 3 | [] | no_license | # Hilera "Hola mundo"
print("Hola Mundo")
print("Hola América")
print("Hola Costa Rica") | true |
5bba5d5605c57c7349e335c0bb58a5ad3d53b152 | Python | Vinceeee/mypy | /mutiltasking/multiprocessing_sample.py | UTF-8 | 1,528 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 多进程的优点
# - 避免全局线程锁,进程可控制
# 多进程的缺点
# - 消耗内存,跨进程访问较麻烦
import multiprocessing
from random import randint
import time
from os import getpid
def run(instance,func_name):
"""
这特么蛋碎的东西
一定要最顶层才能够被pickled
不然就不能执行对象方法
"""
func = getattr(instance,func_name)
func()
class BadGirl(object):
def __init__(self):
pass
def skr(self):
print(time.time())
print(u"这个没混直接发")
time.sleep(2.3)
def worker(procnum):
print 'I am number %d in process %d' % (procnum, getpid())
time.sleep(3.5)
return getpid()
def foo(n,queue):
time.sleep(randint(500,1000)*0.001)
print(n)
queue.put(n**2)
def sample_pool():
pool = multiprocessing.Pool(processes = 5)
print(pool.map(worker, xrange(5)))# get the result in order
def sample_queue():
queue = multiprocessing.Queue(maxsize=100)
result = []
jobs = []
for i in xrange(1,25):
p = multiprocessing.Process(target=foo,args=(i,queue))
jobs.append(p)
p.start()
for p in jobs:
result.append(queue.get())
queue.join()
print result
def main():
pool = multiprocessing.Pool(processes = 3)
for i in xrange(10):
pool.apply_async(run,args=(BadGirl(),"skr"))
pool.close()
pool.join()
if __name__ == '__main__':
main()
| true |
3981e979d7950cf734766af2f4c24aafc9245d68 | Python | shreya-chow/FinalProject | /finalproj_test.py | UTF-8 | 3,001 | 2.625 | 3 | [] | no_license | from finalproj import *
import unittest
class GetDataTests(unittest.TestCase):
def testYelpGetData(self):
table()
g_id = getgoogledata("48104")
y_data = getyelpdata(g_id)
self.assertEqual(type(y_data), list)
self.assertEqual(type(y_data[8]), dict)
self.assertTrue("price" in y_data[5])
self.assertTrue("name" in y_data[67])
self.assertTrue(len(y_data) > 90)
def testGoogleGetData(self):
table()
g_id = getgoogledata("48104")
self.assertTrue(type(g_id), int)
statement1 = "SELECT lat,lng FROM Google WHERE zipcode = ?"
cur.execute(statement1, ("48104",))
lat,lng = cur.fetchone()
self.assertTrue(type(lat), float)
self.assertTrue(type(lng), float)
class StoreDataTests(unittest.TestCase):
def testyelpdb(self):
table()
g_id = getgoogledata("48104")
y_data = getyelpdata(g_id)
statement = "SELECT * FROM Yelp"
cur.execute(statement)
results = cur.fetchall()
self.assertEqual(len(results), 100)
self.assertTrue(type(results), list)
self.assertEqual(type(results[0][0]), str)
self.assertEqual(type(results[0][1]), float)
self.assertEqual(type(results[0][2]), float)
self.assertTrue(results[0][1] > 0 and results[0][1] <= 5)
def testgoogledb(self):
table()
g_id = getgoogledata("48104")
statement = "SELECT * FROM Google"
cur.execute(statement)
results = cur.fetchall()
self.assertTrue(type(results), list)
self.assertEqual(len(results[0]), 6)
self.assertTrue(results[0][0].isnumeric())
class ProcessDataTests(unittest.TestCase):
def testvisualization1(self):
table()
g_id = getgoogledata("48104")
y_data = getyelpdata(g_id)
results = getdistance(g_id, showViz = False)
self.assertTrue(type(results), list)
for x in results:
self.assertEqual(type(x), float)
self.assertTrue(len(results) > 10)
def testvisualization2(self):
table()
g_id = getgoogledata("48104")
y_data = getyelpdata(g_id)
results = getprice(g_id, showViz = False)
self.assertTrue(type(results), list)
for x in results:
self.assertTrue(x <= 4)
def testvisualization3(self):
table()
g_id = getgoogledata("48104")
y_data = getyelpdata(g_id)
results = category(g_id, showViz = False)
self.assertTrue(type(results), dict)
self.assertTrue(len(results.keys()) > 10)
self.assertTrue("Sandwiches" in results)
def testvisualization4(self):
table()
g_id = getgoogledata("48104")
y_data = getyelpdata(g_id)
results = groupedbar(g_id, showViz = False)
self.assertTrue(type(results), list)
for sublist in results:
self.assertTrue(type(sublist), list)
unittest.main()
| true |
2d54d6ef6fd87412443137c292d002e2cf220e06 | Python | rxharja/taxonomic_protein_analysis | /app/tools.py | UTF-8 | 7,688 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python3
import subprocess,os
from app.ld_json import Ld_json
from app.splitter import Splitter
#This is our swiss army knife class. I went back and forth on structuring these methods defined in this class but decided on just putting them all into one class called tools because that would have been too many tiny files, maybe in the future I'll refactor it to be that way
class Tools:
#import our outputs from the json file
out = Ld_json().tools
def __init__(self,path="./outputs/"):
#store pathways of each file as they're generated as strings. Make output directory if it does not exist upon initialising
if not os.path.isdir(path): os.mkdir(path)
self.fasta = None
self.alignment_file = None
self.consensus = None
self.db = None
self.blast_file = None
self.top_250 = None
self.list_of_acc = None
self.plot_file = None
self.motifs_file = None
self.tree_file = None
self.path = path
self.splitter = Splitter()
self.bb = 1000
@staticmethod
def throw_err(outp):
#some reusable code to print an error the screen and then exit the program
print(outp)
exit()
@staticmethod
def check_file(*argv):
#takes each file passed into function as a list, evaluates if they exist and appends them to a list. If they all exist, returns true, otherwise false.
bools = []
for arg in argv:
bools += [os.path.isfile(arg)]
if False in bools:
return False
return True
@staticmethod
def run(inp):
#Just made it easier to run suprocess.call to shell
return subprocess.call(inp,shell=True)
#this takes our dataset, takes the accessions, and writes them all into a file called taxon_protein_accs.fasta
def write(self,inp_f,p="protein",t="taxon",alt=""):
p = p.replace(" ","_")
t = t.replace(" ","_")
title = self.path
if alt == "":
title +="{}_{}_accs.fasta".format(t,p)
else:
title += alt #branch here if someone decided to include a different title
with open(title,"w+") as f: #write all of our accessions into the file
for species in inp_f.keys():
for acc in inp_f[species]:
f.write(acc+"\n")
self.list_of_acc = title #assign our list_of_acc file to our state
def align(self,fasta,title="alignment.fasta"):
#calls clustalo to align the fasta file and outputs it to default alignment.fasta in outputs folder
title = self.path + title.replace(" ","_") #handle any spaces in our title
self.fasta = fasta #assign the fasta input here as the fasta for this class
if self.check_file(self.fasta): #make sure the file exists
self.run("clustalo -i {} -t protein --threads 12 --force -o {}".format(self.fasta,title))
self.alignment_file = title #then we assign our title as our alignment_file location
else:
self.throw_err(self.out['alignment_err'])
def cons(self,title="consensus.fasta"):
#calls cons from emboss tools to create a conensus sequence of our aligned sequences, outputs as default consensus.fasta to outputs folder
title = self.path + title.replace(" ","_") #handle spaces in title, make sure our file exists, then run cons on our alignment file. handle lack of alignment file
if self.check_file(self.alignment_file):
self.run("cons -sprotein1 {} -outseq {} -auto Y".format(self.alignment_file,title))
self.consensus = title #assign location of consensus sequence to state
else:
self.throw_err(self.out['consensus_err'])
def blast(self,db_file="output_db",b_file="blastp.out"):
#runs two processes, first creates a blast database given fasta file to output_db
if self.check_file(self.consensus,self.fasta): #make sure we have the necessary files, fasta and consensus.
db_file = self.path + db_file.replace(" ","_") #handle irregularities in title
b_file = self.path + b_file.replace(" ","_")
self.run("makeblastdb -in {} -dbtype prot -out {}".format(self.fasta,db_file)) #make the db
self.db = db_file #set it to state
self.run("blastp -db {} -query {} -max_hsps 1 -outfmt 6 > {}".format(self.db,self.consensus,b_file)) #run the blast
self.blast_file = b_file #set the file to the state
else:
self.throw_err(self.out['blast_err'])
#runs plotcon with the alignment file and some predefined variables. Will take another alignment file as an input but it defaults to the one saved to the state if none provided
def plot(self,algn_file="",winsize='4',graph='svg',title="plotcon"):
if not algn_file:
if self.alignment_file:
algn_file = self.alignment_file
else:
self.throw_err(self.out['plot_err'])
title = title.replace(" ","_")
self.run("plotcon {} -winsize {} -graph {} -gdirectory {} -goutfile {} -auto Y".format(algn_file,winsize,graph,self.path,title))
self.run("(display './outputs/{}.{}' &)".format(title,graph)) #run the display in a subshell as to not stop the script from running
self.plot_file =self.path+ title + '.svg' #save pathway of plot to state
#abstracts the splitter.process_motifs method chooses the right files to pass into it, then returns the data from the motifs to app class
def motifs(self,title,acc="",align=""):
title = title.replace(" ","_")
if self.check_file(self.list_of_acc, self.alignment_file):
acc = self.list_of_acc
align = self.alignment_file
elif acc == "" or align == "":
throw_err(self.out['motif_err'])
self.motifs_file = self.splitter.process_motifs(align,acc,title)
return open(self.motifs_file,'r').read()
#this takes the maximum sequences we want, handles the title for spaces, and takes the blast file and gets the top however many accessions, then writes them to a file, then we run pullseq, which I suppose I could have called self.splitter.pull for this, but this was before I wrote that. anyway the output is our final filtered fasta
def filter(self,max_seq,title="filtered_alignment.fasta"):
counter = 0
title = title.replace(" ","_")
outf = self.path + "accessions_{}_".format(max_seq) + title
filtered = self.path + title + "filtered.fasta"
file_to_process = self.list_of_acc
if self.blast_file: file_to_process = self.blast_file
with open(file_to_process,'r') as bf:
with open(outf,"a") as out:
for line in bf:
if counter >= max_seq: break #counter limit defined by max_seq
counter += 1
out.write(line.split()[1]+"\n")
self.run("/localdisk/data/BPSM/Assignment2/pullseq -i {} -n {} > {}".format(self.alignment_file,outf,filtered))
self.alignment_file,self.list_of_acc = filtered,outf
#this abstracts our splitter.process_redundant method and returns our new raw fasta(the fasta data) and the file name
def filter_redundant(self,fasta,data,title):
raw_fasta,self.fasta = self.splitter.process_redundant(fasta,data,self.path+title.replace(" ","_")+"_no_redundant.fasta")
return raw_fasta,self.fasta
#advanced menu setter for babybootstrap which gets passed to self.tree. standard for all advanced menu assignments
def set_bb(self):
val = input("4. Phylogenetic Tree Boostrap Value(min 1000): ")
try:
if int(val) >= 1000: self.bb = int(val)
else: print("Your value must be greater than 1000")
except: print("Your value must be an integer")
#calls iq tree and prints it to the screen. Also saves file name to state
def tree(self):
subprocess.call('iqtree -s {} -m MFP -nt AUTO -alrt {} -bb {} -bnni'.format(self.alignment_file,self.bb,self.bb),shell=True)
self.tree_file = self.alignment_file+'.iqtree'
print(open(self.tree_file,'r').read())
| true |
aca05b63fa7f5048ecd6348315034b7f1bc98a30 | Python | joshuahonguyen/algorithm-datastructure-practice | /list_temple.py | UTF-8 | 491 | 3.734375 | 4 | [] | no_license | list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for l in range(0, len(list)):
lol = []
lol2 = []
for n in range(0, l+1):
lol.append(list[n])
for n2 in range(len(list)-1-l, len(list)):
lol2.append(list[len(list)-1-n2])
print(lol2,lol)
for l in range(0, len(list)):
lol3 = []
lol4 = []
for n3 in range(0, len(list)-l):
lol3.append(list[n3])
for n4 in range(l, len(list)):
lol4.append(list[len(list)-1-n4])
print(lol4,lol3) | true |
0954a0177a4f2c14607724195de90f423139ed2f | Python | XiaoxiaoLiu/pyLAR | /core/ialm.py | UTF-8 | 4,745 | 3.09375 | 3 | [] | no_license | """ialm.py
Implements the inexact Lagrangian multiplier approach (IALM) to solve
the matrix deconvolution problem
\min_{P,C} ||P||_* + \gamma ||C||_1, s.t. ||M-P-C||_{fro} < eps
that was proposed as an approach to solve Candes et al.'s robust
PCA formulation, cf.,
[1] Candes et al., "Robust Principal Component Analysis?",
In: Journal of the ACM, Vol. 58, No. 3, 2011
Reference for the IALM algorithm:
[2] Lin et al., "The Augmented Lagrangian Multiplier Method
for Exact Recovery of Corrupted Low-Rank Matrices", 2011
(available online on archivX).
"""
__license__ = "Apache License, Version 2.0"
__author__ = "Roland Kwitt, Kitware Inc., 2013"
__email__ = "E-Mail: roland.kwitt@kitware.com"
__status__ = "Development"
import os
import sys
import time
import numpy as np
from optparse import OptionParser
import warnings
def main(argv=None):
""" Functionality to test the module from the commane line.
"""
if argv is None:
argv = sys.argv
parser = OptionParser()
parser.add_option("-i", "--dat", help="data file")
parser.add_option("-g", "--gam", help="gamma value", type="float")
parser.add_option("-s", "--sav", help="save to")
(options, args) = parser.parse_args()
t0 = time.clock()
X = np.genfromtxt(options.dat)
t1 = time.clock()
print "data loading took %.2g [sec]" % (t1-t0)
m, n = X.shape
print "data (%d x %d) loaded in %.2g [sec]" % (m, n, (t1-t0))
low_rank, sparse, n_iter = recover(X, options.gam)
if not options.sav is None:
# save rounded low-rank result - usefull for images
np.savetxt(options.sav, np.round(low_rank), delimiter=' ')
def recover(D, gamma=None):
"""Recover low-rank and sparse part.
Paramters
---------
D : numpy ndarray, shape (N, D)
Input data matrix.
gamma : float, default = None
Weight on sparse component. If 'None', then gamma = 1/sqrt(max(D,N))
as shown in [1] to be the optimal choice under a set of suitable
assumptions.
Returns
-------
LL : numpy array, shape (N,D)
Low-rank part of data
SP : numpy array, shape (N,D)
Sparse part of data
n_iter : int
Number of iterations until convergence.
"""
m, n = D.shape
Y=D
if gamma is None:
gamma = 1/np.sqrt(np.amax([m, n]))
tol = 1e-07
max_iter = 1000
# l2n is the (matrix) 2-norm, i.e., the max singular value
l2n = np.linalg.norm(Y, ord=2)
# l2i is the 'inf' norm, i.e., the max. abs. value of Y
l2i = np.linalg.norm(np.asarray(Y).ravel(), ord=np.inf)
# computes J(Y) = max(||Y||_2,\gamma^{-1}||Y||_{\inf}), cf. [2], eq. (10)
dual_norm = np.amax([l2n, l2i/gamma])
# computes line 1 in Alg. 5 of [2]
Y = Y/dual_norm
A_hat = np.zeros(D.shape)
E_hat = np.zeros(D.shape)
rank_est = -1
non_zero = -1
# cf. section "Choosing Parameters" of [2]
m = 1.25/l2n
m_b = m*1e07
rho = 1.5
sv = 10
# Frobenius norm of the original matrix
D_fro = np.linalg.norm(D, ord='fro')
k=0
total_svd=0
converged=False
while not converged:
# start timer for k-th iteration
t0 = time.clock()
# part of line 4 of Alg. 5
tmp = D-A_hat+(1/m)*Y
E_hat = np.maximum(np.asmatrix(tmp)-gamma/m,0)
E_hat = E_hat + np.minimum(np.asmatrix(tmp)+gamma/m,0)
# line 4 of Alg. 5
U, S, V = np.linalg.svd(D-E_hat+(1/m)*Y, full_matrices=False)
svp = len(np.where(S>1/m)[0])
if svp < sv:
sv = np.amin([svp+1, n])
else:
sv = np.amin([svp+np.round(0.05*n), n])
A_hat = np.mat(U[:,0:svp]) * np.diag(S[0:svp]-1/m) * np.mat(V[0:svp,:])
total_svd = total_svd+1
# line 8 of Alg. 5
Z = D-A_hat-E_hat
Y = Y+m*Z
# lno. (9) of Alg. 5 in [2]
m = np.amin([m*rho, m_b])
t1 = time.clock()
# eval. stopping crit.
conv_crit = np.linalg.norm(Z, ord='fro')/D_fro
if conv_crit < tol:
converged = True
if k % 10 == 0:
rank_est = np.linalg.matrix_rank(A_hat)
non_zero = len(np.where(np.asarray(np.abs(E_hat)).ravel()>0)[0])
abs_sum_sparse =np.sum(np.abs(E_hat))
print ("[iter: %.4d]: rank(P) = %.4d, |C|_0 = %.4d, crit=%.10f, total sparse =%.4d" %
(k, rank_est, non_zero, conv_crit,abs_sum_sparse))
# handle non-convergence
k = k+1
if not converged and k>max_iter:
warnings.warn("terminate after max. iter.", UserWarning)
converged = True
return (A_hat, E_hat, k, rank_est,non_zero, abs_sum_sparse)
if __name__ == "__main__":
main()
| true |
b01c1d9d1eb9716771cfb739b424d1f46146e71d | Python | 0xB9/MyBBscan | /version_check.py | UTF-8 | 734 | 2.609375 | 3 | [] | no_license | from huepy import *
import requests
import json
currentVersion = "v3.1.1"
repo = "https://api.github.com/repos/0xB9/MyBBscan/releases/latest"
response = requests.get(repo)
release = json.loads(response.text or response.content)
latestVersion = release["tag_name"]
def checkVersion():
if response.ok:
if latestVersion == currentVersion:
print (good(lightgreen("MyBBscan is up to date")))
print ("-"*60)
else:
print (bad(red(f"MyBBscan out of date. The latest version is {latestVersion}")))
print (info(white("Visit https://github.com/0xB9/MyBBscan/releases/latest to download the latest version.")))
print ("-"*95)
else:
print (info(yellow(f"Failed to check for updates, try again later.")))
print ("-"*60)
| true |
5d834c66a34746d4ff747112b55455e7e449e82d | Python | Shaonianlan/Python_exercise | /python_test/xml_parse/test.py | UTF-8 | 682 | 3.265625 | 3 | [] | no_license | from xml.sax.handler import ContentHandler
from xml.sax import parse
class HeadlineHandler(ContentHandler):
in_handline = False
def __init__(self,headlines):
ContentHandler.__init__(self)
self.headlines = headlines
self.data = []
def startElement(self,name,attrs):
if name == 'h1':
self.in_handline = True
def characters(self,string):
if self.in_handline:
self.data.append(string)
def endElement(self,name):
if name == 'h1':
text = ''.join(self.data)
self.data = []
self.in_handline = False
self.headlines.append(text)
headlines = []
parse('website.xml',HeadlineHandler(headlines))
print("the following<h1>:")
for h in headlines:
print(h)
| true |
03679184a023e639884230ab4c8d064a055a29dd | Python | lkhamsurenl-zz/research | /HolyImpl/src/model/grid.py | UTF-8 | 16,915 | 3.265625 | 3 | [] | no_license | import copy
from src.model.edge import Edge
from src.model.graph import Graph
from src.model.vertex import Vertex
from src.model.weight import Weight
from src.algorithms.traversal import bfs
__author__ = 'Luvsandondov Lkhamsuren'
class Grid(Graph):
"""
Grid is subclass of Graph with grid structure.
"""
def __init__(self, g, m, n):
"""
Create m x n grid graph with genus g.
:return: Nothing.
"""
self.width = m
self.height = n
graph = __g1_grid_graph__(m, n) if g == 1 else __g2_grid_graph__(m, n)
self.vertices = graph.vertices
self.faces = graph.faces
self.genus = graph.genus
##################### Specific grid graphs #############################
def __g1_grid_graph__(m, n):
"""
Create m x n grid graph with genus 1.
1st row and last column are the homology cycles.
Bottom left face is the MSSP face with top right vertex of this face being the first source.
:param m:
:param n:
:return:
"""
# Generate vertices and faces.
vs = [[Vertex((i, j)) for j in range(n)] for i in range(m)]
fs = [[Vertex((i, j)) for j in range(n)] for i in range(m)]
# Generate all edges and its duals.
for i in range(m):
for j in range(n):
v = vs[i][j]
# Dart (i, j) -> (i + 1, j), its reverse, dual, dual reverse.
neighbor = vs[(i + 1) % m][j] # Compute left and right face coordinates.
left = fs[i][j]
right = fs[i][(j - 1) % n]
if i == m - 1:
Edge(v, neighbor, Weight(1, [0, -1], 0), left, right)
else:
Edge(v, neighbor, Weight(1, [0, 0], 0), left, right)
# Dart (i, j) -> (i, j + 1), its reverse, dual, dual reverse.
neighbor = vs[i][(j + 1) % n] # Compute left and right face coordinates.
left = fs[(i - 1) % m][j]
right = fs[i][j]
if j == n - 1:
Edge(v, neighbor, Weight(1, [1, 0], 0), left, right)
else:
Edge(v, neighbor, Weight(1, [0, 0], 0), left, right)
graph = Graph(vertices=sum(vs, []), faces=sum(fs, []), genus=1)
# Num Edge(spanning tree) + Num edges(dual spanning tree) + 2 * g
# Spanning tree in original graph. Note that we first remove non-trivial homology edges from graph prior to
# computing spanning tree.
# TODO(lkhamsurenl): Figure out a way to find Spanning tree without explicit copy of the entire graph.
c_st = copy.deepcopy(graph)
for i in range(m):
c_u = c_st.get_vertex((i, n - 1))
c_v = c_st.get_vertex((i, 0))
__remove_edge__(c_u, c_v)
for j in range(n):
c_u = c_st.get_vertex((m - 1, j))
c_v = c_st.get_vertex((0, j))
__remove_edge__(c_u, c_v)
# Get spanning tree by computing BFS, starting at the current root.
# NOTE(lkhamsurenl): Assume root is at (1, 1).
spanning_tree = bfs(c_st.get_vertex((1, 1)))
# Dual spanning tree. Note that we remove all edges in spanning tree and 2g auxiliary edges (which in our case:
# (0, n-1) -> (0, 0) and (m-1, 0) -> (0, 0)).
c_g = copy.deepcopy(graph)
for v_name in spanning_tree:
u_name = spanning_tree[v_name]
if u_name != None:
__remove_edge__(c_g.get_vertex(u_name), c_g.get_vertex(v_name))
__remove_edge__(c_g.get_vertex((0, n - 1)), c_g.get_vertex((0, 0)))
__remove_edge__(c_g.get_vertex((m - 1, 0)), c_g.get_vertex((0, 0)))
# Compute dual spanning tree by computing BFS rooted at (0, 0) face.
# NOTE(lkhamsurenl): Assume root is at face (0, 0).
dual_spanning_tree = bfs(c_g.get_face((0, 0)))
leafmost = __compute_leafmost__(dual_spanning_tree)
for (u_name, v_name) in leafmost:
if u_name != None:
u = graph.get_face(u_name)
v = graph.get_face(v_name)
dart = u.neighbors[v]
dart.weight = Weight(dart.weight.length, dart.weight.homology, leafmost[(u_name, v_name)])
# Create reverse, dual, dual reverse respectively with corresponding leafmost terms.
Edge(dart.tail, dart.head, dart.weight, dart.dual.tail, dart.dual.head)
return graph
def __g2_grid_graph__(m, n):
"""
Manually create m by n grid graph with genus 2.
:return: Graph.
"""
assert m == 6 and n == 6, "We only support m = 6, n = 6 option for now."
# Build vertices and faces.
vertices = [[Vertex((i, j)) for j in range(6)] for i in range(6)]
faces = [[Vertex((i, j)) for j in range(6)] for i in range(6)]
# Build darts, its reverse, dual and dual reverse respectively.
Edge(vertices[0][0], vertices[0][1], Weight(1, [0, 0, 0, 0], 0), faces[5][3], faces[0][0])
Edge(vertices[0][0], vertices[1][0], Weight(1, [0, 0, 0, 0], 0), faces[0][0], faces[3][5])
Edge(vertices[1][0], vertices[1][1], Weight(1, [0, 0, 0, 0], 0), faces[0][0], faces[1][0])
Edge(vertices[1][0], vertices[2][0], Weight(1, [0, 0, 0, 0], 0), faces[1][0], faces[4][5])
Edge(vertices[2][0], vertices[2][1], Weight(1, [0, 0, 0, 0], 0), faces[1][0], faces[2][0])
Edge(vertices[2][0], vertices[0][0], Weight(1, [0, -1, 0, 0], 0), faces[2][0], faces[5][5])
Edge(vertices[0][0], vertices[3][1], Weight(1, [0, 1, 0, 0], 0), faces[2][0], faces[3][0])
Edge(vertices[0][0], vertices[4][0], Weight(1, [0, 0, 0, 0], 0), faces[3][0], faces[0][5])
Edge(vertices[4][0], vertices[4][1], Weight(1, [0, 1, 0, 0], 0), faces[3][0], faces[4][0])
Edge(vertices[4][0], vertices[5][0], Weight(1, [0, 0, 0, 0], 0), faces[4][0], faces[1][5])
Edge(vertices[5][0], vertices[5][1], Weight(1, [0, 1, 0, 0], 0), faces[4][0], faces[5][0])
Edge(vertices[5][0], vertices[0][0], Weight(1, [0, 0, -1, 0], 0), faces[5][0], faces[2][5])
Edge(vertices[0][1], vertices[0][2], Weight(1, [0, 0, 0, 0], 0), faces[5][4], faces[0][1])
Edge(vertices[0][1], vertices[1][1], Weight(1, [0, 0, 0, 0], 0), faces[0][1], faces[0][0])
Edge(vertices[1][1], vertices[1][2], Weight(1, [0, 0, 0, 0], 0), faces[0][1], faces[1][1])
Edge(vertices[1][1], vertices[2][1], Weight(1, [0, 0, 0, 0], 0), faces[1][1], faces[1][0])
Edge(vertices[2][1], vertices[2][2], Weight(1, [0, 0, 0, 0], 0), faces[1][1], faces[2][1])
Edge(vertices[2][1], vertices[3][1], Weight(1, [0, 0, 0, 0], 0), faces[2][1], faces[2][0])
Edge(vertices[3][1], vertices[3][2], Weight(1, [0, 0, 0, 0], 0), faces[2][1], faces[3][1])
Edge(vertices[3][1], vertices[4][1], Weight(1, [0, 0, 0, 0], 0), faces[3][1], faces[3][0])
Edge(vertices[4][1], vertices[4][2], Weight(1, [0, 0, 0, 0], 0), faces[3][1], faces[4][1])
Edge(vertices[4][1], vertices[5][1], Weight(1, [0, 0, 0, 0], 0), faces[4][1], faces[4][0])
Edge(vertices[5][1], vertices[5][2], Weight(1, [0, 0, 0, 0], 0), faces[4][1], faces[5][1])
Edge(vertices[5][1], vertices[0][4], Weight(1, [0, -1, -1, 0], 0), faces[5][1], faces[5][0])
Edge(vertices[0][2], vertices[0][0], Weight(1, [1, 0, 0, 0], 0), faces[5][5], faces[0][2])
Edge(vertices[0][2], vertices[1][2], Weight(1, [0, 0, 0, 0], 0), faces[0][2], faces[0][1])
Edge(vertices[1][2], vertices[1][3], Weight(1, [0, 0, 0, 0], 0), faces[0][2], faces[1][2])
Edge(vertices[1][2], vertices[2][2], Weight(1, [0, 0, 0, 0], 0), faces[1][2], faces[1][1])
Edge(vertices[2][2], vertices[2][3], Weight(1, [0, 0, 0, 0], 0), faces[1][2], faces[2][2])
Edge(vertices[2][2], vertices[3][2], Weight(1, [0, 0, 0, 0], 0), faces[2][2], faces[2][1])
Edge(vertices[3][2], vertices[3][3], Weight(1, [0, 0, 0, 0], 0), faces[2][2], faces[3][2])
Edge(vertices[3][2], vertices[4][2], Weight(1, [0, 0, 0, 0], 0), faces[3][2], faces[3][1])
Edge(vertices[4][2], vertices[4][3], Weight(1, [0, 0, 0, 0], 0), faces[3][2], faces[4][2])
Edge(vertices[4][2], vertices[5][2], Weight(1, [0, 0, 0, 0], 0), faces[4][2], faces[4][1])
Edge(vertices[5][2], vertices[5][3], Weight(1, [0, 0, 0, 0], 0), faces[4][2], faces[5][2])
Edge(vertices[5][2], vertices[0][5], Weight(1, [0, -1, -1, 0], 0), faces[5][2], faces[5][1])
Edge(vertices[0][0], vertices[0][4], Weight(1, [0, 0, 0, 0], 0), faces[5][0], faces[0][3])
Edge(vertices[0][0], vertices[1][3], Weight(1, [-1, 0, 0, 0], 0), faces[0][3], faces[0][2])
Edge(vertices[1][3], vertices[1][4], Weight(1, [0, 0, 0, 0], 0), faces[0][3], faces[1][3])
Edge(vertices[1][3], vertices[2][3], Weight(1, [0, 0, 0, 0], 0), faces[1][3], faces[1][2])
Edge(vertices[2][3], vertices[2][4], Weight(1, [0, 0, 0, 0], 0), faces[1][3], faces[2][3])
Edge(vertices[2][3], vertices[3][3], Weight(1, [0, 0, 0, 0], 0), faces[2][3], faces[2][2])
Edge(vertices[3][3], vertices[3][4], Weight(1, [0, 0, 0, 0], 0), faces[2][3], faces[3][3])
Edge(vertices[3][3], vertices[4][3], Weight(1, [0, 0, 0, 0], 0), faces[3][3], faces[3][2])
Edge(vertices[4][3], vertices[4][4], Weight(1, [0, 0, 0, 0], 0), faces[3][3], faces[4][3])
Edge(vertices[4][3], vertices[5][3], Weight(1, [0, 0, 0, 0], 0), faces[4][3], faces[4][2])
Edge(vertices[5][3], vertices[5][4], Weight(1, [0, 0, 0, 0], 0), faces[4][3], faces[5][3])
Edge(vertices[5][3], vertices[0][0], Weight(1, [0, -1, -1, 1], 0), faces[5][3], faces[5][2])
Edge(vertices[0][4], vertices[0][5], Weight(1, [0, 0, 0, 0], 0), faces[5][1], faces[0][4])
Edge(vertices[0][4], vertices[1][4], Weight(1, [-1, 0, 0, 0], 0), faces[0][4], faces[0][3])
Edge(vertices[1][4], vertices[1][5], Weight(1, [0, 0, 0, 0], 0), faces[0][4], faces[1][4])
Edge(vertices[1][4], vertices[2][4], Weight(1, [0, 0, 0, 0], 0), faces[1][4], faces[1][3])
Edge(vertices[2][4], vertices[2][5], Weight(1, [0, 0, 0, 0], 0), faces[1][4], faces[2][4])
Edge(vertices[2][4], vertices[3][4], Weight(1, [0, 0, 0, 0], 0), faces[2][4], faces[2][3])
Edge(vertices[3][4], vertices[3][5], Weight(1, [0, 0, 0, 0], 0), faces[2][4], faces[3][4])
Edge(vertices[3][4], vertices[4][4], Weight(1, [0, 0, 0, 0], 0), faces[3][4], faces[3][3])
Edge(vertices[4][4], vertices[4][5], Weight(1, [0, 0, 0, 0], 0), faces[3][4], faces[4][4])
Edge(vertices[4][4], vertices[5][4], Weight(1, [0, 0, 0, 0], 0), faces[4][4], faces[4][3])
Edge(vertices[5][4], vertices[5][5], Weight(1, [0, 0, 0, 0], 0), faces[4][4], faces[5][4])
Edge(vertices[5][4], vertices[0][1], Weight(1, [0, -1, -1, 1], 0), faces[5][4], faces[5][3])
Edge(vertices[0][5], vertices[0][0], Weight(1, [0, 0, 0, 1], 0), faces[5][2], faces[0][5])
Edge(vertices[0][5], vertices[1][5], Weight(1, [-1, 0, 0, 0], 0), faces[0][5], faces[0][4])
Edge(vertices[1][5], vertices[4][0], Weight(1, [1, 0, 0, 1], 0), faces[0][5], faces[1][5])
Edge(vertices[1][5], vertices[2][5], Weight(1, [0, 0, 0, 0], 0), faces[1][5], faces[1][4])
Edge(vertices[2][5], vertices[5][0], Weight(1, [1, 0, 0, 1], 0), faces[1][5], faces[2][5])
Edge(vertices[2][5], vertices[3][5], Weight(1, [0, 0, 0, 0], 0), faces[2][5], faces[2][4])
Edge(vertices[3][5], vertices[0][0], Weight(1, [1, 0, -1, 1], 0), faces[2][5], faces[3][5])
Edge(vertices[3][5], vertices[4][5], Weight(1, [0, 0, 0, 0], 0), faces[3][5], faces[3][4])
Edge(vertices[4][5], vertices[1][0], Weight(1, [1, 0, -1, 1], 0), faces[3][5], faces[4][5])
Edge(vertices[4][5], vertices[5][5], Weight(1, [0, 0, 0, 0], 0), faces[4][5], faces[4][4])
Edge(vertices[5][5], vertices[2][0], Weight(1, [1, 0, -1, 1], 0), faces[4][5], faces[5][5])
Edge(vertices[5][5], vertices[0][2], Weight(1, [0, -1, -1, 1], 0), faces[5][5], faces[5][4])
graph = Graph(vertices=sum(vertices, []), faces=sum(faces, []), genus=2)
# Num Edge(spanning tree) + Num edges(dual spanning tree) + 2 * g
# Spanning tree in original graph.
# TODO(lkhamsurenl): Figure out a way to find Spanning tree without explicit copy of the entire graph.
c_st = copy.deepcopy(graph)
# Get spanning tree by computing BFS, starting at the current root.
# NOTE(lkhamsurenl): Assume root is at (1, 1).
spanning_tree = bfs(c_st.get_vertex((1, 1)))
# Dual spanning tree.
c_g = copy.deepcopy(graph)
for v_name in spanning_tree:
u_name = spanning_tree[v_name]
if u_name != None:
__remove_edge__(c_g.get_vertex(u_name), c_g.get_vertex(v_name))
# Compute dual spanning tree by computing BFS rooted at (0, 0) face.
# NOTE(lkhamsurenl): Assume root is at face (0, 0).
dual_spanning_tree = bfs(c_g.get_face((0, 0)))
leafmost = __compute_leafmost__(dual_spanning_tree)
for (u_name, v_name) in leafmost:
if u_name != None:
u = graph.get_face(u_name)
v = graph.get_face(v_name)
dart = u.neighbors[v]
dart.weight = Weight(dart.weight.length, dart.weight.homology, leafmost[(u_name, v_name)])
# Create reverse, dual, dual reverse respectively with corresponding leafmost terms.
Edge(dart.tail, dart.head, dart.weight, dart.dual.tail, dart.dual.head)
return graph
def g1():
"""
Manually create 3 by 3 grid graph with genus 1.
:return: Graph.
"""
# Build vertices and faces.
vertices = [[Vertex((i, j)) for j in range(3)] for i in range(3)]
faces = [[Vertex((i, j)) for j in range(3)] for i in range(3)]
# Build darts, its reverse, dual and dual reverse respectively.
Edge(vertices[0][0], vertices[0][2], Weight(1, [-1, 0], 0), faces[0][2], faces[2][2])
Edge(vertices[1][0], vertices[1][2], Weight(1, [-1, 0], 6), faces[1][2], faces[0][2])
Edge(vertices[2][0], vertices[2][2], Weight(1, [-1, 0], 4), faces[2][2], faces[1][2])
Edge(vertices[2][0], vertices[0][0], Weight(1, [0, -1], 0), faces[2][0], faces[2][2])
Edge(vertices[2][2], vertices[0][2], Weight(1, [0, -1], -3), faces[2][2], faces[2][1])
Edge(vertices[2][1], vertices[0][1], Weight(1, [0, -1], -1), faces[2][1], faces[2][0])
Edge(vertices[0][0], vertices[1][0], Weight(1, [0, 0], -8), faces[0][0], faces[0][2])
Edge(vertices[1][0], vertices[2][0], Weight(1, [0, 0], 1), faces[1][0], faces[1][2])
Edge(vertices[0][2], vertices[1][2], Weight(1, [0, 0], -1), faces[0][2], faces[0][1])
Edge(vertices[1][2], vertices[2][2], Weight(1, [0, 0], 0), faces[1][2], faces[1][1])
Edge(vertices[0][1], vertices[1][1], Weight(1, [0, 0], 0), faces[0][1], faces[0][0])
Edge(vertices[1][1], vertices[2][1], Weight(1, [0, 0], 0), faces[1][1], faces[1][0])
Edge(vertices[0][2], vertices[0][1], Weight(1, [0, 0], 0), faces[0][1], faces[2][1])
Edge(vertices[1][2], vertices[1][1], Weight(1, [0, 0], 0), faces[1][1], faces[0][1])
Edge(vertices[2][2], vertices[2][1], Weight(1, [0, 0], -1), faces[2][1], faces[1][1])
Edge(vertices[0][1], vertices[0][0], Weight(1, [0, 0], 0), faces[0][0], faces[2][0])
Edge(vertices[1][1], vertices[1][0], Weight(1, [0, 0], 0), faces[1][0], faces[0][0])
Edge(vertices[2][1], vertices[2][0], Weight(1, [0, 0], 0), faces[2][0], faces[1][0])
return Graph(vertices=sum(vertices, []), faces=sum(faces, []), genus=1)
##################### HELPER METHODS #############################
def __remove_edge__(u, v):
"""
Remove all the edges in u <-> v.
:param u: tail vertex.
:param v: head vertex.
:return: nothing
"""
du = u.neighbors[v].dual.head
dv = u.neighbors[v].dual.tail
u.remove_dart(v)
v.remove_dart(u)
du.remove_dart(dv)
dv.remove_dart(du)
def __print_spanning_tree__(pred):
print("-------------------")
for v in pred:
print("{} -> {}".format(pred[v], v))
print("-------------------")
def __compute_leafmost__(spanning_tree):
"""
Return learfmost term for each edge in following format:
(source_name, destination_name): leafmost_term
:param spanning_tree: dict of {src_name: dst_name}
:return:
"""
leafmost = {}
num_children = {}
for v_name in spanning_tree.keys():
num_children[v_name] = 0
while len(spanning_tree) != 0:
count = {}
for v_name in spanning_tree.keys():
u_name = spanning_tree[v_name]
count[v_name] = count[v_name] + 1 if v_name in count else 1
count[u_name] = count[u_name] + 1 if u_name in count else 1
# Find which ones are the leaf
for v_name in count.keys():
if count[v_name] == 1 and v_name != None:
# TODO(lkhamsurenl): Current leafmost assignment points away from the root face.
# NOTE(lkhamsurenl): Negating would reverse the direction.
leafmost[(spanning_tree[v_name], v_name)] = num_children[v_name] + 1
num_children[spanning_tree[v_name]] = num_children[spanning_tree[v_name]] + num_children[v_name] + 1 \
if spanning_tree[v_name] != None else 0
del spanning_tree[v_name]
return leafmost
| true |
79e39d933042f28a788332acbb14710547eed3f9 | Python | bucketzxm/pyquark | /src/io/aio.py | UTF-8 | 631 | 2.578125 | 3 | [
"MIT"
] | permissive | from galileo_config import AIO_MAPPINGS
'''
Aio represents analog IO of Galileo board
'''
class Aio(object):
def __init__(self, arduino_id):
if isinstance(arduino_id, int):
arduino_id = "A%d" % arduino_id
pin = AIO_MAPPINGS[arduino_id]
pin.select()
self.arduino_id = arduino_id
self.linux_id = pin.linux_id
def value(self):
# FIXME consider to open the file once in init, and seek to beginning to read every time
with open("/sys/bus/iio/devices/iio:device0/in_voltage%d_raw" % self.linux_id, 'r') as f:
return f.readline().rstrip('\r\n')
| true |
3d7b6c3525ebb38fb260e0fd8d8037b850f04dec | Python | ericwgz/TCSS554A_HW1 | /TCSS554A/processing.py | UTF-8 | 1,107 | 2.9375 | 3 | [] | no_license | from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.tokenize.treebank import TreebankWordDetokenizer
from nltk.stem.snowball import SnowballStemmer
import glob
read_files = glob.glob(".\\transcripts\\transcripts\\*.txt")
# Combine all corpus to single txt file and change characters to lower case
with open("dump.txt", "wb") as outfile:
for txtfile in read_files:
with open(txtfile, "rb") as infile:
outfile.write(infile.read().lower())
f = open('.\\dump.txt', encoding='utf-8')
# remove special characters in sentences
tokenizer = RegexpTokenizer(r'\w+')
word_tokens = tokenizer.tokenize(f.read())
f.close()
# remove stopwords from sentences
stop_words = set(stopwords.words('english'))
filtered_sentence = [w for w in word_tokens if not w in stop_words]
# stemming each word to the root
stemmer = SnowballStemmer("english")
stemmed_sentence = [stemmer.stem(w) for w in filtered_sentence]
f = open('.\\result.txt', 'w', encoding='utf-8')
f.write(TreebankWordDetokenizer().detokenize(stemmed_sentence)) | true |
9bffe8ab58f0b949ad7bf10a964a50e848f5b913 | Python | philipptrenz/climate-keywords | /scripts/script_pre_assign_files.py | UTF-8 | 2,374 | 2.609375 | 3 | [] | no_license | import argparse
import os
import pandas as pd
def main():
parser = argparse.ArgumentParser(description='Extracts annotations of annotated files')
parser.add_argument('-i', '--in', help='in directory', default="data/evaluation")
parser.add_argument('-s', '--standard', help='in directory', default="data/evaluation/annotated_keywords.csv")
args = vars(parser.parse_args())
directory = args['in']
keyword_list_file = args['standard']
exclude = ['precision.csv', 'annotated_keywords.csv', 'unannotated_keywords.csv', 'unannotated_keywords_sample.csv',
'unannotated_keywords_sample_pt.csv']
dir_files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
dir_files = [file for file in dir_files if file not in exclude and not file.endswith('_an.csv')
and file.endswith('.csv') and not file.endswith('_ov.csv')]
print(dir_files)
label_df = pd.read_csv(keyword_list_file)
labels = {row['Keyword']: row['Label'] for i, row in label_df.iterrows()}
for file in dir_files:
path = os.path.join(directory, file)
df = pd.read_csv(path)
# df = pd.read_csv(path, dtype={'Word_1': np.str, 'Score_1': 'float64', 'Label_1': 'Int32',
# 'Word_2': np.str, 'Score_2': 'float64', 'Label_2': 'Int32'})
# df.columns = ['Word_1', 'Score_1', 'Label_1', 'Word_2', 'Score_2', 'Label_2']
override = []
for i, row in df.iterrows():
label_1 = row['Label_1']
label_2 = row['Label_2']
if str(label_1).lower() == "nan":
label_1 = labels.get(row['Word_1'])
if label_1:
label_1 = int(label_1)
if str(label_2).lower() == "nan":
label_2 = labels.get(row['Word_2'])
if label_2:
label_2 = int(float(str(label_2)))
override.append((row['Word_1'], row['Score_1'], label_1, row['Word_2'], row['Score_2'], label_2))
override_df = pd.DataFrame(override, columns=df.columns)
# override_df[["Label_1", "Label_2"]] = df[["Label_1", "Label_2"]].astype(pd.Int64Dtype())
override_df.to_csv(os.path.join(directory, f"{file.replace('.csv', '_ov.csv')}"), index=False)
print(override_df)
if __name__ == '__main__':
main()
| true |
389ff1c4c2fd51e2762a93598814cc279908992c | Python | sprax/1337 | /python3/test_l0152_maximum_product_subarray.py | UTF-8 | 266 | 2.71875 | 3 | [] | no_license | import unittest
from l0152_maximum_product_subarray import Solution
class Test(unittest.TestCase):
def test_solution(self):
self.assertEqual(6, Solution().maxProduct([2, 3, -2, 4]))
self.assertEqual(0, Solution().maxProduct([-2, 0, -1]))
| true |
407a01688a3b84768d4e60139984d28881d799dd | Python | sym170030/ML | /Assignment5.py | UTF-8 | 5,725 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[51]:
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 13 19:27:26 2018
@author: Siddharth Mudbidri
"""
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import numpy as np
df_path = "C:\ASM exam\cds_spread5y_2001_2016.dta"
data = pd.io.stata.read_stata("C:\ASM exam\cds_spread5y_2001_2016.dta")
data.to_csv('my_stata_file.csv')
# In[52]:
cdsdata = pd.read_csv('my_stata_file.csv',
low_memory = False)
cdsdata
# In[53]:
crspdata = pd.read_csv('crsp.csv')
#d3 = pd.merge(df, df1, on='gvkey') #merging based on gvkey
crspdata
# In[54]:
cdsdata['Date'] = pd.to_datetime(cdsdata['mdate'])
cdsdata['Month']= cdsdata['Date'].dt.month
cdsdata['Year']=cdsdata['Date'].dt.year
cdsdata['quarter']='4'
# In[55]:
cdsdata
# In[56]:
cdsdata.loc[cdsdata['Month']>9,"quarter"]=4
# In[57]:
cdsdata.loc[(cdsdata['Month']>6) & (cdsdata['Month']<=9),"quarter"]=3
# In[58]:
cdsdata.loc[(cdsdata['Month']>3) & (cdsdata['Month']<=6),"quarter"]=2
# In[59]:
#cdsdata[(cdsdata['Month'])<=3,"quarter"]=1
cdsdata['gvkey'] = cdsdata['gvkey'].astype(float)
cdsdata['quarter'] = cdsdata['quarter'].astype(float)
cdsdata['Year'] = cdsdata['Year'].astype(float)
# In[60]:
cdsdata.loc[(cdsdata['Month']<=3),"quarter"]=1
# In[61]:
crspdata=crspdata.rename(columns = {'GVKEY':'gvkey'})
crspdata=crspdata.rename(columns = {'datadate':'mdate'})
crspdata
# In[62]:
crspdata
# In[76]:
crspdata['Date'] = pd.to_datetime(crspdata['mdate'])
crspdata['Month']= crspdata['Date'].dt.month
crspdata['Year']=crspdata['Date'].dt.year
crspdata['quarter']='4'
# In[77]:
crspdata['mdate'].unique()
# In[65]:
crspdata.loc[crspdata['Month']>9,"quarter"]=4
crspdata.loc[(crspdata['Month']>6) & (crspdata['Month']<=9),"quarter"]=3
crspdata.loc[(crspdata['Month']>3) & (crspdata['Month']<=6),"quarter"]=2
crspdata.loc[(crspdata['Month'])<=3,"quarter"]=1
crspdata['gvkey'] = crspdata['gvkey'].astype(float)
crspdata['quarter'] = crspdata['quarter'].astype(float)
crspdata['Year'] = crspdata['Year'].astype(float)
# In[73]:
merge = pd.merge(cdsdata[1:200], crspdata[1:200], on=['gvkey', 'quarter', 'Year'])
# In[78]:
crspdata
# In[2]:
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
merge = merge.select_dtypes(include=numerics)
merge = merge.fillna(data.median())
merge = merge.dropna(axis=1, how='any')
#Split the dataset
TestData = merge[(merge['Year'] >= 2010) & (merge['Year'] <= 2018)]
#Initialze X and Y
Xt= TestData.drop('spread5y', axis=1)
yt=TestData['spread5y']
Xt= Xt.drop('Month_x', axis=1)
Xt= Xt.drop('Month_y', axis=1)
Xt= Xt.drop('quarter', axis=1)
Xt= Xt.drop('Year', axis=1)
Xt= Xt.drop('gvkey', axis=1)
TrainData=data[(data['Year'] < 2011)]
#splitting x and y for test data
X_train= TrainData.drop('spread5y', axis=1)
y_train=TrainData['spread5y']
X_train= X_train.drop('Month_x', axis=1)
X_train= X_train.drop('Month_y', axis=1)
X_train= X_train.drop('quarter', axis=1)
X_train= X_train.drop('Year', axis=1)
X_train= X_train.drop('gvkey', axis=1)
# In[3]:
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import numpy as np
randforst = RandomForestRegressor(n_estimators=50)
randforst.fit(X_train, y_train)
#randforst.score(X_test, y_test)
randforst_Pred=randforst.predict(Xt)
F_imp=randforst.feature_importances_
F_imp = pd.DataFrame(randforst.feature_importances_,index = X_train.columns,columns=['imp']).sort_values('imp',ascending=False)
newfeatures=F_imp.iloc[:50,:]
newfeatures=newfeatures.index.tolist()
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
mean_squared_error(yt,randforst_Pred)
mean_absolute_percentage_error(yt, randforst_Pred)
X_train_NewF=X_train[newfeatures]
X_test_NewF=X_test[newfeatures]
scaler = StandardScaler()
scaler.fit(X_train_NewF)
StandardScaler(copy=True, with_mean=True, with_std=True)
X_train_NewFt=scaler.transform(X_train_NewF)
X_test_NewFt=scaler.transform(X_test_NewF)
# In[4]:
newtrain_x = X_train[list(newfeatures)]
newtest_x = X_test[list(newfeatures)]
regressor_100 = RandomForestRegressor(n_estimators = 100,max_depth = 3)
regressor_100.fit(newtrain_x,y_train)
pred_100 = regressor_100.predict(newtest_x)
print('Mean Accuracy at 100:', regressor_100.score(newtest_x,y_test))
errors_100 = abs(pred_100 - y_test)
mape_100 = 100 * (errors_100 / y_test)
accuracy_100 = 100 - np.mean(mape_100)
print('Accuracy_100:', round(accuracy_100, 2), '%.')
# In[5]:
regressor_500 = RandomForestRegressor(n_estimators = 500,max_depth = 3)
regressor_500.fit(newtrain_x,y_train)
pred_500 = regressor_500.predict(newtest_x)
print('Mean Accuracy_500:', regressor_500.score(newtest_x,y_test))
errors_500 = abs(pred_500 - y_test)
mape_500 = 500 * (errors_500 / y_test)
# Calculate and display accuracy
accuracy_500 = 500 - np.mean(mape_500)
print('Accuracy_500:', round(accuracy_500, 2), '%.')
# In[6]:
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import ensemble
from sklearn.metrics import mean_squared_error
import xgboost
# In[7]:
GB_100 = ensemble.GradientBoostingRegressor(n_estimators = 100, max_depth = 3)
GB_100.fit(newtrain_x, y_train)
mse_100 = mean_squared_error(y_test, GB_100.predict(newtest_x))
print("MSE_100: %.4f" % mse_100)
# In[ ]:
GB_200 = ensemble.GradientBoostingRegressor(n_estimators = 200, max_depth = 3)
GB_200.fit(newtrain_x, y_train)
mse_200 = mean_squared_error(y_test, GB_200.predict(newtest_x))
print("MSE_200: %.4f" % mse_200)
| true |
49368324baec718a4ad75409fdf4da810a7a55a7 | Python | kisho-stack/Hackathon_semana_7 | /controllers/profesores_controller.py | UTF-8 | 8,285 | 3.296875 | 3 | [] | no_license | from classes.profesor import Profesor
from classes.curso import Curso
from classes.profesor_curso import Profesor_curso
from helpers.menu import Menu
from helpers.helper import print_table, input_data, pregunta
from classes.salon import Salon
from classes.profesor_salon import Profesor_salon
class Profesores_controller:
def __init__(self):
self.profesor = Profesor()
self.curso = Curso()
self.profesor_curso = Profesor_curso()
self.salon = Salon()
self.profesor_salon = Profesor_salon()
self.salir = False
def menu(self):
while True:
try:
print('''
===================
Profesores
===================
''')
menu = ['Listar profesores', 'Buscar profesor', "Nuevo profesor", "Salir"]
respuesta = Menu(menu).show()
if respuesta == 1:
self.listar_profesores()
elif respuesta == 2:
self.buscar_profesor()
elif respuesta == 3:
self.insertar_profesor()
else:
self.salir = True
break
except Exception as e:
print(f'{str(e)}')
def listar_profesores(self):
print('''
===========================
Lista de Profesores
===========================
''')
profesores = self.profesor.obtener_profesores('profesor_id')
print(print_table(profesores, ['ID', 'Nombre', 'Edad', 'Correo']))
input("\nPresione una tecla para continuar...")
def buscar_profesor(self):
print('''
===========================
Buscar Profesor
===========================
''')
try:
id_profesor = input_data("Ingrese el ID del profesor >> ", "int")
profesor = self.profesor.obtener_profesor({'profesor_id': id_profesor})
print(print_table(profesor, ['ID', 'Nombre', 'Edad', 'Correo']))
if profesor:
if pregunta(f"¿Deseas dar mantenimiento al registro profesor '{profesor[1]}'?"):
opciones = ['Asignar curso','Asignar salon','Editar profesor', 'Eliminar profesor', 'Salir']
respuesta = Menu(opciones).show()
if respuesta == 1:
self.asignar_curso(id_profesor, profesor)
elif respuesta == 2:
self.asignar_salon(id_profesor, profesor)
elif respuesta == 3:
self.editar_profesor(id_profesor)
elif respuesta == 4:
self.eliminar_profesor(id_profesor)
except Exception as e:
print(f'{str(e)}')
input("\nPresione una tecla para continuar...")
def insertar_profesor(self):
nombre = input_data("Ingrese el nombre del profesor >> ")
edad = input_data("Ingrese la edad del profesor >> ")
correo = input_data("Ingrese el correo del profesor >> ")
self.profesor.guardar_profesor({
'nombres': nombre,
'edad': edad,
'correo': correo
})
print('''
=================================
Nuevo Profesor agregado !
=================================
''')
self.listar_profesores()
def editar_profesor(self, id_profesor):
nombre = input_data("Ingrese el nuevo nombre del profesor >> ")
edad = input_data("Ingrese la nueva edad del profesor >> ")
correo = input_data("Ingrese el nuevo correo del profesor >> ")
self.profesor.modificar_profesor({
'profesor_id': id_profesor
}, {
'nombres': nombre,
'edad': edad,
'correo': correo
})
print('''
==========================
Profesor Editado !
==========================
''')
def eliminar_profesor(self, id_profesor):
self.profesor.eliminar_profesor({
'profesor_id': id_profesor
})
print('''
===========================
Profesor Eliminado !
===========================
''')
def asignar_curso(self, id_profesor, profesor):
print(f'\n Asignación de cursos para el profesor : {profesor[1]}')
print('''
============================
Curso disponibles
============================
''')
cursos = self.curso.obtener_cursos('curso_id')
cursos_disponibles = []
if cursos:
for curso in cursos:
id_curso = curso[0]
nombre_curso = curso[1]
cursos_profesor = self.profesor_curso.buscar_profesor_cursos({
'id_profesor': id_profesor,
'id_curso': id_curso
})
if not cursos_profesor:
cursos_disponibles.append({
'id': id_curso,
'Cursos disponibles': nombre_curso
})
print(print_table(cursos_disponibles))
curso_seleccionado = input_data(f'\nSeleccione el ID del curso a asignar al profesor: {profesor[1]} >> ', 'int')
buscar_curso = self.curso.obtener_curso({'curso_id': curso_seleccionado})
if not buscar_curso:
print('\nEste curso no existe !')
return
cursos_profesor = self.profesor_curso.buscar_profesor_cursos({
'id_profesor': id_profesor,
'id_curso': curso_seleccionado
})
if cursos_profesor:
print('\nEste curso ya esta asignado al profesor !')
return
self.profesor_curso.guardar_profesor_curso({
'id_profesor': id_profesor,
'id_curso': curso_seleccionado
})
print('''
==============================
Nuevo curso asignado !
==============================
''')
def asignar_salon(self, id_profesor, profesor):
print(f'\n Asignación de salones para el profesor : {profesor[1]}')
print('''
============================
Salones disponibles
============================
''')
salon = self.salon.obtener_salones('id_salon')
salon_disponibles = []
if salon:
for salones in salon:
id_salon = salones[0]
grado_salon = salones[1]
nombre_salon = salones[2]
salon_profesor = self.profesor_salon.buscar_profesor_salones({
'id_profesor': id_profesor,
'id_salon': id_salon
})
if not salon_profesor:
salon_disponibles.append({
'id': id_salon,
'Salones disponibles': grado_salon,
'Nombre Salon' : nombre_salon
})
print(print_table(salon_disponibles))
salon_seleccionado = input_data(f'\nSeleccione el ID del salon a asignar al profesor: {profesor[1]} >> ', 'int')
buscar_salon = self.salon.obtener_salon({'id_salon': salon_seleccionado})
if not buscar_salon:
print('\nEste curso no existe !')
return
salon_profesor = self.profesor_salon.buscar_profesor_salones({
'id_profesor': id_profesor,
'id_salon': salon_seleccionado
})
if salon_profesor:
print('\nEste curso ya esta asignado al profesor !')
return
self.profesor_salon.guardar_profesor_salon({
'id_profesor': id_profesor,
'id_salon': salon_seleccionado
})
print('''
==============================
Nuevo salon asignado !
==============================
''')
| true |
2648263fecc606af858550f2715997977e79b91c | Python | lastlegion/SimpleCV-Experiments | /Affine.py | UTF-8 | 798 | 2.6875 | 3 | [] | no_license | import SimpleCV
import math
from PIL import Image
def ScaleRotateTranslate(image, angle, center = None, new_center = None, scale = None,expand=False):
if center is None:
return image.rotate(angle)
angle = -angle/180.0*math.pi
nx,ny = x,y = center
sx=sy=1.0
if new_center:
(nx,ny) = new_center
if scale:
(sx,sy) = scale
cosine = math.cos(angle)
sine = math.sin(angle)
a = cosine/sx
b = sine/sx
c = x-nx*a-ny*b
d = -sine/sy
e = cosine/sy
f = y-nx*d-ny*e
return image.transform(image.size, Image.AFFINE, (a,b,c,d,e,f), resample=Image.BICUBIC)
angle = 45
I = Image.open("1.jpg")
I.rotate(angle,Image.BICUBIC,0).show()
a = raw_input("yo")
print I.size
ScaleRotateTranslate(I, angle, [I.size[0]/2,I.size[1]/2]).show()
| true |
c5406cc3ee4bbce5780f051e6b2640859fca10b5 | Python | BurakYyurt/Neural_Nets | /input_iris.py | UTF-8 | 980 | 2.5625 | 3 | [] | no_license | from model import NN
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import metrics
df = pd.read_csv("iris.data", header=None)
df.columns = [1, 2, 3, 4, "class"]
mapping = {"Iris-setosa": 0, "Iris-versicolor": 1,"Iris-virginica":2}
df = df.replace({"class": mapping})
X = df[[1, 2, 3, 4]].to_numpy()
Y_values = df[["class"]].to_numpy()
Y = np.array([[1 if j == i else 0 for j in range(3)]for i in Y_values])
nn_model = NN([4, 4, 3]) # 10, 10, 10, 10, 1])
nn_model.import_set(X, Y, normalization=True, ratio=[7, 2, 1])
theta, cost_hist = nn_model.train_model((3, 1), max_iter=10000)
training_score = nn_model.cost_function(theta, nn_model.sets['train'])
cv_score = nn_model.cost_function(theta, nn_model.sets['cv'])
test_score = nn_model.cost_function(theta, nn_model.sets['test'])
print("training cost:", training_score)
print("cross validation cost:", cv_score)
print("test cost:", test_score)
# plt.plot(cost_hist[1:])
# plt.show()
| true |
410d1bb9d398735965e6e7cc370342c7f52c6c68 | Python | Rubber-Conquest/dfgboat | /food-master/mytestsd(fixed).py | UTF-8 | 4,813 | 2.828125 | 3 | [] | no_license | import telebot
from telebot import types
bot = telebot.TeleBot("944485905:AAHrw7jtHjnAVxqU7GsPS_xrhPPO6fUdiqU")
@bot.message_handler(commands=["start"])
def start(m):
msg = bot.send_message(m.chat.id, "Hello")
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
keyboard.add(*[types.KeyboardButton(name) for name in ['Меню', 'Акции']])
keyboard.add(*[types.KeyboardButton(name) for name in ['Поиск по ингредиентам', 'Расчёт калорий']])
keyboard.add(*[types.KeyboardButton(name) for name in ['Корзина']])
bot.send_message(m.chat.id, 'Выберите нужный пункт меню', reply_markup=keyboard)
bot.register_next_step_handler(msg, name)
def name(m):
if m.text == 'Меню':
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
keyboard.add(*[types.KeyboardButton(advert1) for advert1 in ['Еда']])
keyboard.add(*[types.KeyboardButton(advert1) for advert1 in ['Напитки']])
keyboard.add(*[types.KeyboardButton(advert0) for advert0 in ['Назад']])
bot.send_message(m.chat.id, 'Еда или напитки?', reply_markup=keyboard)
elif m.text == 'Акции':
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
keyboard.add(*[types.KeyboardButton(advert2) for advert2 in ['В разработке']])
keyboard.add(*[types.KeyboardButton(advert0) for advert0 in ['Назад']])
bot.send_message(m.chat.id, 'Акций к сожалению нет :(', reply_markup=keyboard)
elif m.text == 'Поиск по ингредиентам': #####делаю поиск по ингредиентам
msg = bot.send_message(m.chat.id, "Отлично")
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
keyboard.add(*[types.KeyboardButton(advert3) for advert3 in ['Пицца', 'Суши']])
keyboard.add(*[types.KeyboardButton(advert3) for advert3 in ['Первое блюдо', 'Второе блюдо']])
keyboard.add(*[types.KeyboardButton(advert3) for advert3 in ['Салат']])
keyboard.add(*[types.KeyboardButton(advert0) for advert0 in ['Назад']])
bot.send_message(m.chat.id, 'Что будем искать?', reply_markup=keyboard)
bot.register_next_step_handler(msg, advert3)
elif m.text == 'Расчёт калорий':
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
keyboard.add(*[types.KeyboardButton(advert4) for advert4 in ['Выбрать категорию']])
keyboard.add(*[types.KeyboardButton(advert4) for advert4 in ['Выбрать блюдо']])
keyboard.add(*[types.KeyboardButton(advert4) for advert4 in ['Рассчитать и вывести']])
keyboard.add(*[types.KeyboardButton(advert0) for advert0 in ['Назад']])
bot.send_message(m.chat.id, 'Что нужно сделать?', reply_markup=keyboard)
elif m.text == 'Корзина':
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
keyboard.add(*[types.KeyboardButton(advert5) for advert5 in ['Оформить заказ']])
keyboard.add(*[types.KeyboardButton(advert5) for advert5 in ['Убрать некоторую еду из корзины']])
keyboard.add(*[types.KeyboardButton(advert0) for advert0 in ['Назад']])
bot.send_message(m.chat.id, 'Что нужно сделать?', reply_markup=keyboard)
#####################################################################################################
#делаю поиск по ингредиентам
def advert3(m):
if m.text == 'Пицца':
msg = bot.send_message(m.chat.id, "Отлично")
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
keyboard.add(*[types.KeyboardButton(advert33) for advert33 in ['Которые входят в состав']])
keyboard.add(*[types.KeyboardButton(advert33) for advert33 in ['Без этих ингредиентов']])
keyboard.add(*[types.KeyboardButton(advert0) for advert0 in ['Назад']])
bot.send_message(m.chat.id, 'Какие ингредиенты ищем?', reply_markup=keyboard)
bot.register_next_step_handler(msg, advert33)
def advert33(m):
if m.text == 'Которые входят в состав':
bot.send_message(m.chat.id,'Провожу поиск...')
if m.text == 'Без этих ингредиентов':
bot.send_message(m.chat.id,'Провожу поиск...')
#####################################################################################################
bot.polling()
| true |
e8182751ca51bbd98ac24f02c9c0374217a4f478 | Python | zwhubuntu/CTF-chal-code | /geek_localhost.py | UTF-8 | 198 | 2.75 | 3 | [] | no_license | f = open('d:/ip_table2.txt', 'wb')
for i in range(0, 256):
for j in range(0, 256):
strr = "169.254." + str(i) + "." +str(j) + chr(13)
print strr
f.write(strr)
f.close()
| true |
af4ea4cc20f4acaaee928a1885eca5f5ec355c12 | Python | mfojtak/deco | /deco/tokenizers/sentencepiece.py | UTF-8 | 7,732 | 2.78125 | 3 | [
"MIT"
] | permissive | import sentencepiece as spm
import collections
import tensorflow as tf
import codecs
import numpy as np
class SentencepieceTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, model_file, do_lower_case=True):
self.vocab = self.load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.sp = spm.SentencePieceProcessor()
self.sp.Load(model_file)
self.unk_token = "[UNK]"
self.cls_id = self.vocab["[CLS]"]
self.sep_id = self.vocab["[SEP]"]
def load_vocab(self, vocab_file):
"""Loads a vocabulary file into a dictionary."""
res = collections.OrderedDict()
index = 0
with codecs.open(vocab_file, 'r', 'utf8') as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip().split()[0]
res[token] = index
index += 1
for special_token in ['[PAD]','[UNK]','[CLS]','[SEP]','[MASK]']:
token = special_token
if token not in res:
res[token] = index
index += 1
return res
def special_tokens(self):
return ['[PAD]','[UNK]','[CLS]','[SEP]','[MASK]', "<unk>", "<s>", "</s>"]
def convert_by_vocab(self, vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def tokenize_batch(self, batch):
res = []
for t in batch:
res.append(self.tokenize(t))
return res
def tokenize(self, text):
output_ids = self.sp.EncodeAsIds(text)
output_tokens = [self.sp.IdToPiece(i)
if i != 0 else self.unk_token
for i in output_ids]
return output_tokens
def tokenize_bert(self, text_a, text_b=None):
tokens_a = self.tokenize(text_a)
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
if text_b:
tokens_b = self.tokenize(text_b)
tokens = tokens + tokens_b + ["[SEP]"]
return tokens
def encode_bert(self, text_a, text_b=None, max_len=512):
first_tokens = self.tokenize(text_a)
first_ids = [self.cls_id] + self.tokens_to_ids(first_tokens) + [self.sep_id]
segments = [0] * len(first_ids)
ids = first_ids
if text_b:
second_tokens = self.tokenize(text_b)
second_ids = self.tokens_to_ids(second_tokens) + [self.sep_id]
segments += [1] * len(second_ids)
ids += second_ids
if len(ids) > max_len:
ids = ids[:max_len]
segments = segments[:max_len]
ids[max_len-1] = self.sep_id
pad = max_len - len(ids)
segments += [0] * pad
ids += [0] * pad
return ids, segments
def encode_bert_batch(self, texts_a, texts_b=None, max_len=512):
indices = []
segments = []
if texts_b:
for pair in zip(texts_a, texts_b):
i,s = self.encode_bert(pair[0], pair[1], max_len=max_len)
indices.append(i)
segments.append(s)
else:
for text in texts_a:
i,s = self.encode_bert(text, max_len=max_len)
indices.append(i)
segments.append(s)
return indices, segments
def tokens_to_ids(self, tokens):
return self.convert_by_vocab(self.vocab, tokens)
def ids_to_tokens(self, ids):
return self.convert_by_vocab(self.inv_vocab, ids)
def create_training(self, documents, seq_len=519):
for document in documents:
current_sample = ["CLS"]
for sentence in document:
sent_tokens = self.tokenize(sentence)
if len(current_sample) + len(sent_tokens) <= seq_len:
current_sample += sent_tokens
else:
if len(current_sample) > 1 and len(current_sample) <= seq_len:
yield current_sample
current_sample = ["CLS"] + sent_tokens
if len(current_sample) > 1 and len(current_sample) <= seq_len:
yield current_sample
def bert_create_pretrained(self, sentence_pairs,
seq_len=512,
mask_rate=0.15,
mask_mask_rate=0.8,
mask_random_rate=0.1,
swap_sentence_rate=0.5,
force_mask=True):
"""Generate a batch of inputs and outputs for training.
:param sentence_pairs: A list of pairs containing lists of tokens.
:param token_dict: The dictionary containing special tokens.
:param token_list: A list containing all tokens.
:param seq_len: Length of the sequence.
:param mask_rate: The rate of choosing a token for prediction.
:param mask_mask_rate: The rate of replacing the token to `TOKEN_MASK`.
:param mask_random_rate: The rate of replacing the token to a random word.
:param swap_sentence_rate: The rate of swapping the second sentences.
:param force_mask: At least one position will be masked.
:return: All the inputs and outputs.
"""
batch_size = len(sentence_pairs)
unknown_index = self.vocab[self.unk_token]
spec_tokens = self.special_tokens()
token_list = list(self.vocab.keys())
# Generate sentence swapping mapping
nsp_outputs = np.zeros((batch_size,))
mapping = {}
if swap_sentence_rate > 0.0:
indices = [index for index in range(batch_size) if np.random.random() < swap_sentence_rate]
mapped = indices[:]
np.random.shuffle(mapped)
for i in range(len(mapped)):
if indices[i] != mapped[i]:
nsp_outputs[indices[i]] = 1.0
mapping = {indices[i]: mapped[i] for i in range(len(indices))}
# Generate MLM
token_inputs, segment_inputs, masked_inputs = [], [], []
mlm_outputs = []
for i in range(batch_size):
first, second = sentence_pairs[i][0], sentence_pairs[mapping.get(i, i)][1]
segment_inputs.append(([0] * (len(first) + 2) + [1] * (seq_len - (len(first) + 2)))[:seq_len])
tokens = ["[CLS]"] + first + ["[SEP]"] + second + ["[SEP]"]
tokens = tokens[:seq_len]
tokens += ["<unk>"] * (seq_len - len(tokens))
token_input, masked_input, mlm_output = [], [], []
has_mask = False
for token in tokens:
mlm_output.append(self.vocab.get(token, unknown_index))
if token not in spec_tokens and np.random.random() < mask_rate:
has_mask = True
masked_input.append(1)
r = np.random.random()
if r < mask_mask_rate:
token_input.append(self.vocab["[MASK]"])
elif r < mask_mask_rate + mask_random_rate:
while True:
token = np.random.choice(token_list)
if token not in spec_tokens:
token_input.append(self.vocab[token])
break
else:
token_input.append(self.vocab.get(token, unknown_index))
else:
masked_input.append(0)
token_input.append(self.vocab.get(token, unknown_index))
if force_mask and not has_mask:
masked_input[1] = 1
token_inputs.append(token_input)
masked_inputs.append(masked_input)
mlm_outputs.append(mlm_output)
inputs = [np.asarray(x) for x in [token_inputs, segment_inputs, masked_inputs]]
outputs = [np.asarray(np.expand_dims(x, axis=-1)) for x in [mlm_outputs, nsp_outputs]]
return inputs, outputs
#tok = SentencepieceTokenizer(vocab_file="/data/BioNLP/BERT/sp_bpe.vocab", model_file="/data/BioNLP/BERT/sp_bpe.model")
#tokens = tok.tokenize("hypertension problem")
#ids = tok.convert_tokens_to_ids(tokens)
#print(tokens, ids)
| true |
5b73bbc626d610712b752f5bde0633fc3f2e2248 | Python | dimk00z/3_bars | /bars.py | UTF-8 | 2,943 | 3.6875 | 4 | [] | no_license | import json
from os import path
from math import radians, cos, sin, asin, sqrt
def load_bars_from_json(json_file_name):
if not path.exists(json_file_name):
return None
with open(json_file_name, 'r', encoding='cp1251') as file_handler:
json_file = json.load(file_handler)
return json_file
def get_biggest_bar(bars):
return max(bars, key=lambda max_key: max_key['SeatsCount'])
def get_smallest_bar(bars):
return min(bars, key=lambda min_key: min_key['SeatsCount'])
def get_distance(longitude1, latitude1, longitude2, latitude2):
radius_of_the_earth = 6371
longitude1_in_radians, latitude1_in_radians, \
longitude2_in_radians, latitude2_in_radians = map(
radians, [longitude1, latitude1, longitude2, latitude2])
subtracting_latitudes = latitude2_in_radians - latitude1_in_radians
subtracting_longitudes = longitude2_in_radians - longitude1_in_radians
distance = sin((subtracting_latitudes) / 2) ** 2 + \
cos(latitude1_in_radians) * cos(latitude2_in_radians) * \
sin((subtracting_longitudes) / 2) ** 2
distance = 2 * asin(sqrt(distance))
return distance * radius_of_the_earth
def get_closest_bar(bars, longitude, latitude):
return min(bars, key=lambda x:
get_distance(latitude, longitude,
x['geoData']['coordinates'][1],
x['geoData']['coordinates'][0]))
def get_string_bar(bar):
string_bar = "{} с {} количеством посадочных мест по адресу {} \n \
широта: {}, долгота {}"
return (string_bar.format(bar['Name'], bar["SeatsCount"], bar["Address"],
str(bar['geoData']['coordinates'][0]),
str(bar['geoData']['coordinates'][1])))
def input_user_coordinates():
try:
print("Поиск ближайшего бара:")
longitude = float(input("Введите долготу: "))
latitude = float(input("Введите широту: "))
return longitude, latitude
except ValueError:
print("Ввод некорректен")
return None
if __name__ == '__main__':
print("Добро пожаловать!")
json_file_name = input("Введите путь к json-файлу\n")
bars = load_bars_from_json(json_file_name)
if not bars:
print("Файл не найден {}".format(json_file_name))
exit()
print("Самый большой бар : " + get_string_bar(get_biggest_bar(bars)))
print("Самый маленький бар : " + get_string_bar(get_smallest_bar(bars)))
user_coordinates = input_user_coordinates()
if not user_coordinates:
exit()
closest_bar = get_closest_bar(bars, user_coordinates[0],
user_coordinates[1])
print("Ближайший бар :" + get_string_bar(closest_bar))
| true |
8b44fc698202dca5ac57ab229d0f3a06f2f611be | Python | mn113/projecteuler-solutions | /076-100/euler081.py | UTF-8 | 1,743 | 3.703125 | 4 | [] | no_license | #! /usr/bin/env python
# Project Euler problem 081: find minimal sum traversing 80x80 matrix
from itertools import *
f = open('euler081_mini.txt')
# Retrieve lines:
rows = f.readlines()
# Build matrix:
matrix = {}
# Iterate over stored lines:
for i in range(len(rows)):
row = rows[i]
if row:
nums = row.split(',')
for j in range(len(nums)):
matrix[(i,j)] = int(nums[j])
print matrix
# Navigate a path:
x,y = 0,0
mx,my = 4,4
total = 0
# Scout ahead within 3x3 box:
#paths = ['AADD', 'ADDA', 'ADAD', 'DAAD', 'DADA', 'DDAA']
paths = list(combinations('AD', 4))
print paths
# Repeat until we hit last square:
while x < mx or y < my:
# Reset the values for the box to be scouted:
boxtotal = 0
boxmin = 5000
print '@', x, y, matrix[(x,y)]
# Try every path through the 3x3:
for path in paths:
print path
dx,dy = 0,0
boxtotal = matrix[(x,y)]
# Take one step:
for step in path:
# Edge cases:
if x+dx == mx:
step = 'A'
elif y+dy == my:
step = 'D'
# Follow step cases:
if step == 'D':
dx += 1
elif step == 'A':
dy += 1
# Add new square:
print dx, dy, matrix[(x+dx,y+dy)]
boxtotal += matrix[(x+dx,y+dy)]
print path, boxtotal
if boxtotal < boxmin:
boxmin = boxtotal
boxminpath = path
print 'min:', boxmin, '(', boxminpath, ')'
# Move 1 place:
if boxminpath[0] == 'D':
x += 1
elif boxminpath[0] == 'A':
y += 1
print 'step to (', x, ',', y, ')'
| true |
d067570712e743b5227adb3bccc6688eb1c8e370 | Python | dawidoberda/work_repo | /MPN&HS&ECN_organizer/main.py | UTF-8 | 674 | 2.640625 | 3 | [] | no_license | #ENG:ESD data
from Parser_csv import ParserCsv
import datebase_manager
def main():
first_file = 'MPN_previous (copy).csv'
second_file = 'MPN_today (copy).csv'
mpn_compare_file = 'MPN_compare.csv'
csv_parser_mpn = ParserCsv(first_file, second_file, mpn_compare_file)
indicators = []
indicators = csv_parser_mpn.check_if_exist()
print(indicators)
if indicators[0] == True:
csv_parser_mpn.create_first_temp()
if indicators[1] == True:
csv_parser_mpn.create_second_temp()
csv_parser_mpn.compare_csv()
datebase_manager.create_mpn_table()
datebase_manager.fill_mpn_tabel()
if __name__=='__main__':
main()
| true |
b0c7cfe50da2d8fe1972cd00d4dcb8c9bca48f3f | Python | daansteraan/Random-Code | /project_mortgage_calculator.py | UTF-8 | 1,142 | 4.0625 | 4 | [] | no_license | """
Mortgage Calculator -
Calculate the monthly payments of a fixed term mortgage over given Nth terms \
at a given interest rate. Also figure out how long it will take the user to \
pay back the loan. For added complexity, add an option for users to select \
the compounding interval
"""
name = '*** Mortgage Calculator ***'
print
print name
print '-' * len(name)
# input variables
print "INPUTS"
print 'Cash details:'
purchase_price = float(raw_input('Enter purchase price: R '))
deposit = float(raw_input('Enter deposit amount: R '))
print
print 'Bond details:'
interest_rate = float(raw_input('Enter interest rate (annual): '))
bond_term = float(raw_input('Enter bond term (years): '))
# calculations
loan_amount = purchase_price - deposit
interest_rate /= 1200
bond_term *= 12
monthly_payment = loan_amount*(interest_rate*(1+interest_rate)**bond_term) \
/ (((1+interest_rate)**bond_term) - 1)
total_cost = monthly_payment * bond_term
# results
print
print '-' * len(name)
print 'RESULTS'
print 'Monthly repayment: R %.2f' % monthly_payment
print 'Total Cost: R %.2f' % total_cost
print
print '-' * len(name) | true |
592504b3b70f8fecd6f6a660eb89fde162294664 | Python | Qqwy/python-multiple_indexed_collection | /multi_indexed_collection.py | UTF-8 | 15,202 | 3.6875 | 4 | [
"MIT"
] | permissive |
# These names are skipped during __setattr__ wrapping; they do not cause the containers to be updated.
_RESTRICTED_NAMES = [
'_multi_indexed_collections', '__setattr__',
'__dict__', '__class__'
]
class AutoUpdatingItem():
"""When mixing in this class
all changes to properties on the object cause the `MultiIndexedCollection`s it is contained in
to automatically re-compute the keys of the object, so manually calling `collection.update(obj)` is no longer required.
This is implemented by wrapping `__setattr__` with a custom implementation that calls `collection.update(obj)` every time a property changes.
>>>
>>> class AutoUser(AutoUpdatingItem):
... def __init__(self, name, user_id):
... self.name = name
... self.user_id = user_id
>>> autojohn = AutoUser('John', 1)
>>> autopete = AutoUser('Pete', 2)
>>> autolara = AutoUser('Lara', 3)
>>>
>>>
>>> mic = MultiIndexedCollection({'user_id', 'name'})
>>> mic.add(autojohn)
>>> mic.add(autopete)
>>> len(mic)
2
>>> mic.find('name', 'John') == autojohn
True
>>> autojohn.name = 'Johnny'
>>> mic.get('name', 'Johnny', False) == autojohn
True
>>> mic.get('name', 'John', False)
False
"""
def __new__(cls, *args, **kwargs):
inst = super(AutoUpdatingItem, cls).__new__(cls)
# Skips below implementation of __setattr__ here.
super(AutoUpdatingItem, inst).__setattr__('_multi_indexed_collections', [])
return inst
def __setattr__(self, name, value):
super().__setattr__(name, value)
if name not in _RESTRICTED_NAMES:
for collection in self._multi_indexed_collections:
collection.update_item(self)
class MultiIndexedCollection():
"""
A collection type for arbitrary objects, that indexes them based on (a subset of) their properties.
Which properties to look for is specified during the initialization of the MultiIndexedCollection;
any hashable objects can be stored inside.
removing/updating objects is also supported.
The MultiIndexedCollection will _not_ automatically know when an object is changed, so calling `update` manually is necessary in that case.
Optionally, custom dictionary-types can be used, which is nice if you have some special requirements for your dictionary types.
>>> # Most example code uses the following class and data as example:
>>> class User():
... def __init__(self, name, user_id):
... self.name = name
... self.user_id = user_id
>>>
>>> john = User('John', 1)
>>> pete = User('Pete', 2)
>>> lara = User('Lara', 3)
>>>
>>>
>>> mic = MultiIndexedCollection({'user_id', 'name'})
>>> mic.add(john)
>>> mic.add(pete)
>>> len(mic)
2
>>> mic.find('name', 'John') == john
True
>>> mic['name', 'Pete'] == pete
True
>>> mic['name', 'NotInThere']
Traceback (most recent call last):
...
KeyError: '`name`=`NotInThere`'
>>> ('name', 'John') in mic
True
>>> ('user_id', 2) in mic
True
>>> ('user_id', 42) in mic
False
"""
def __init__(self, properties, dict_type=dict, auto_update=False):
"""Initializes the MultiIndexedCollection with the given `properties`.
properties -- a set (or iteratable sequence convertable to one) of string names of properties to index.
dict_type -- Optional; type to use under the hood to store objects in.
"""
self._properties = set(properties)
for property in self._properties:
if not isinstance(property, str):
raise ValueError("{} constructor expects `properties` argument to be a sequence of strings; `{}` is not a string property".format(self.__class__.__name__, property))
self._dict_type = dict_type
if (not hasattr(dict_type, '__getitem__')
or not hasattr(dict_type, '__setitem__')
or not hasattr(dict_type, '__len__')
or not hasattr(dict_type, '__delitem__')
or not hasattr(dict_type, '__iter__')
or not hasattr(dict_type, '__contains__')
or not hasattr(dict_type, 'get')
):
raise Valueerror("{} constructor expects `dict_type` argument to be a mapping, but not all required mapping methods are implemented by {}.".format(self.__class_.__name__, dict_type))
# Per property, we have a collection
self._dicts = self._dict_type([(prop, self._dict_type()) for prop in properties])
# Contains for all objects a dict of (property->value)s,
# so we can keep track of the per-object tracked properties (and their values).
self._propdict = self._dict_type()
self._auto_update = auto_update
def add(self, obj):
"""
Adds `obj` to the collection,
so it can later be found under all its properties' values.
>>> mic = MultiIndexedCollection({'user_id', 'name'})
>>> mic.add(john)
>>> mic.add(pete)
>>> len(mic)
2
>>> mic.find('name', 'John') == john
True
Attempting to add an object that has a property value that conflicts with a property already in the collection, raises a DuplicateIndexError:
>>> mic = MultiIndexedCollection({'user_id', 'name'})
>>> mic.add(john)
>>> mic.add(pete)
>>> anotherpete = User('Pete', 22)
>>> mic.add(anotherpete)
Traceback (most recent call last):
...
multi_indexed_collection.DuplicateIndexError: 'Collection already contains an element with `name`=`Pete`'
"""
if obj in self._propdict:
return
if isinstance(obj, AutoUpdatingItem):
obj._multi_indexed_collections.append(self)
prop_results = {prop: getattr(obj, prop) for prop in self._properties if hasattr(obj, prop)}
# TODO Check for duplicate keys before altering here.
for (prop, val) in prop_results.items() :
if val in self._dicts[prop].keys():
raise DuplicateIndexError("Collection already contains an element with `{}`=`{}`".format(prop, val))
for (prop, val) in prop_results.items() :
self._dicts[prop][val] = obj
self._propdict[obj] = prop_results
def find(self, prop, value):
"""Finds the object whose indexed property `prop` has value `value`.
Returns `KeyError` if this cannot be found.
>>> mic = MultiIndexedCollection({'user_id', 'name'})
>>> mic.add(john)
>>> mic.add(pete)
>>> len(mic)
2
>>> mic.find('name', 'John') == john
True
>>> mic.find('name', 'NotInThere')
Traceback (most recent call last):
...
KeyError: '`name`=`NotInThere`'
"""
try:
return self._dicts[prop][value]
except KeyError as key_error:
key_error.args = ('`{}`=`{}`'.format(prop, key_error.args[0]),)
raise
def __getitem__(self, propval_tuple):
"""Finds the object whose indexed property `prop` has value `value`.
Returns `KeyError` if this cannot be found.
propval_tuple -- A tuple (pair) where `prop` is the first item and `value` the second.
>>> mic = MultiIndexedCollection({'user_id', 'name'})
>>> mic.add(john)
>>> mic.add(pete)
>>>
>>> mic['name', 'John'] == john
True
>>> mic['user_id', 2] == pete
True
>>> mic['name', 'NotInThere']
Traceback (most recent call last):
...
KeyError: '`name`=`NotInThere`'
"""
prop, val = propval_tuple
return self.find(prop, val)
def remove(self, obj):
"""Removes `obj` from this collection, so it will no longer be indexed or found.
Raises `KeyError` if `obj` is not contained in the collection.
>>> mic = MultiIndexedCollection({'user_id', 'name'})
>>> mic.add(john)
>>> mic.add(pete)
>>>
>>> mic['name', 'John'] == john
True
>>> mic['user_id', 2] == pete
True
>>> mic.remove(john)
>>>
>>> mic.get('name', 'John', False)
False
>>> mic.find('name', 'John')
Traceback (most recent call last):
...
KeyError: '`name`=`John`'
"""
if not obj in self._propdict:
raise KeyError(obj)
if isinstance(obj, AutoUpdatingItem):
obj._multi_indexed_collections.remove(self)
prop_results = self._propdict[obj]
for (prop, val) in prop_results.items():
del self._dicts[prop][val]
del self._propdict[obj]
def discard(self, obj):
"""Removes `obj` from this collection, if it is present."""
try:
self.remove(obj)
except KeyError:
pass
def __len__(self):
"""The amount of items in the collection.
>>> mic = MultiIndexedCollection({'user_id', 'name'})
>>> len(mic)
0
>>> mic.add(john)
>>> mic.add(pete)
>>> len(mic)
2
"""
return len(self._propdict)
def __length_hint__(self):
return self._propdict.__length_hint__()
def __contains__(self, propval_tuple):
"""True if there exists an item under the key `value` for `prop`.
`propval_tuple` -- a tuple (pair) whose first item identifies the `prop` and the second item the `value` to look for.
>>> mic = MultiIndexedCollection({'user_id', 'name'})
>>> len(mic)
0
>>> mic.add(john)
>>> mic.add(pete)
>>> ('name', 'John') in mic
True
>>> ('user_id', 2) in mic
True
"""
prop, val = propval_tuple
return val in self._dicts[prop]
def update_item(self, obj):
"""
Updates `obj`'s property values, so it will be indexed using its current values.
Needs to be called manually every time `obj` was altered.
Alternatively, objects who inherit from `AutoUpdatingItem` will automatically call this function
for each MultiIndexedCollection they are part of whenever one of their properties changes.
>>> mic = MultiIndexedCollection({'user_id', 'name'})
>>> mic.add(john)
>>> mic.add(pete)
>>>
>>> mic['name', 'John'] == john
True
>>> john.name = 'Johnny'
>>> mic['name', 'John'] == john
True
>>> mic.update_item(john)
>>> mic['name', 'Johnny'] == john
True
>>> mic['name', 'John']
Traceback (most recent call last):
...
KeyError: '`name`=`John`'
Updating to a key under which another value is already stored results in a DuplicateIndexError (which is a subclass of KeyError):
>>> john.name = 'John'
>>> mic = MultiIndexedCollection({'user_id', 'name'})
>>> mic.add(john)
>>> johnny = User('Johnny', 4)
>>> mic.add(johnny)
>>> johnny.name = 'John'
>>> mic.update_item(johnny)
Traceback (most recent call last):
...
multi_indexed_collection.DuplicateIndexError: 'Collection already contains an element with `name`=`John`'
"""
prop_results = {(prop, getattr(obj, prop)) for prop in self._properties if hasattr(obj, prop)}
prev_prop_results = set(self._propdict[obj].items())
old_props = (prev_prop_results - prop_results)
new_props = (prop_results - prev_prop_results)
# Ensure no duplicate new keys.
for (prop, val) in new_props:
if val in self._dicts[prop].keys() and self._dicts[prop][val] != obj:
raise DuplicateIndexError("Collection already contains an element with `{}`=`{}`".format(prop, val))
# print("old_props: {}".format(old_props))
# print("new_props: {}".format(new_props))
# Remove old/deleted properties.
for (prop, val) in old_props:
del self._dicts[prop][val]
# insert updated properties
for (prop, val) in new_props:
self._dicts[prop][val] = obj
# Extra indirection is necessary because not all dict types can create
# a dict directly from a set of pairs:
self._propdict[obj] = self._dict_type(dict(prop_results))
def clear(self):
"""Completely empties the state of this MultiIndexedCollection"""
self._dicts = self._dict_type({prop: self._dict_type() for prop in self._properties})
self._propdict = self._dict_type()
def __copy__(self):
"""Creates a shallow copy of this MultiIndexedCollection.
(The items contained are not copied but instead referenced)"""
other = self.__class__(self._properties, dict_type=self._dict_type)
other._propdict = self._propdict.copy()
other._dicts = self.dicts.copy()
def copy(self):
"""Creates a shallow copy of this MultiIndexedCollection.
(The items contained are not copied but instead referenced)"""
self.__copy__()
def values(self, property_name=None):
"""
When the optional `prop` arguments is not given, it will return all contained objects.
Otherwise, it will only return objects that have the property `prop`.
"""
if property_name:
return self._dicts[property_name].values()
else:
return self._propdict.keys()
def items(self, property_name):
"""Returns an iterator of all items stored in this collection"""
return self._dicts[property_name].items()
def keys(self, property_name=None):
"""Returns an iterator of all values of the given property.
When the optional `prop` is not given, it will return an iterator of all property names.
"""
if property_name:
return self._dicts[property_name].keys()
else:
return self._propdict.values()
def items_props(self):
"""An iterator of all contained items as tuples, where the key is the item, and the value is a dictionary of (property names->property values)"""
return self._propdict.items()
def properties(self):
"""Returns the property names that this MultiIndexedCollection was initialized with."""
return self._properties
def __iter__(self):
"""Maybe somewhat surprisingly, iterates over all objects inside the MultiIndexedCollection"""
return self._propdict.keys()
def get(self, property_name, value, default=None):
"""Attempts to retrieve the item whose `prop` is `value`, but returns `default` as default if it could not be found."""
return self._dicts[property_name].get(value, default)
class DuplicateIndexError(KeyError):
"""Raised whenever an item is added or updated a MultiIndexedCollection that would result in a pre-contained item to become inaccessible because one (or more) of its properties already occupies the value that the new item would store."""
pass
| true |
b906887703e83e0ab1074473b8214b940fbf82d1 | Python | vinaykshirsagar/footyball-analysis | /Continuous ProRel back.py | UTF-8 | 4,056 | 3.484375 | 3 | [] | no_license |
#Dictionaries associates team with the year of changed league and point tally in that league.
#There is a dictionary for promoted teams and a dictionary for relegated teams.
#Code intended for promotion and relegation from first league
from parse_text import Data
years = sorted(list(Data.keys()))
toBePromoted = {}
toBeRelegated = {}
#teamsEncountered = {}
returnedInOneYearCount = 0
returnedInThreeYearCount = 0
returnedInFiveYearCount = 0
returnedInTenYearCount = 0
riseCount = 0 #total number of promoted teams (for fraction)
survivedOneYearCount = 0
survivedThreeYearCount = 0
survivedFiveYearCount = 0
survivedTenYearCount = 0
dropCount = 0;
yearCount = 0; #this count ensures that we don't count years without seasons in the survivorship or return tallies
lastYear = set() #who was in the prem last year (initially empty)
for year in years:
print("\tYear: ", year)
#if(len(lastYear) == 0): #if this the first year we don't have any promotion/relegation info to compare against
thisYear = set()
for pos in Data[year][1]:
if pos not in ['home goals', 'away goals']:
''' this code is likley unnecessary
if Data[year][1][pos]["name"] not in teamsEncountered:
toBeRelegated[Data[year][1][pos]["name"]] = {}
#The year this team was promoted (here they were a team to start in the highest division)
toBeRelegated[Data[year][1][pos]["name"]]["Year"] = yearCount
#The point tally of this team the year it was promoted (-1 to indicate they started in the first division)
toBeRelegated[Data[year][1][pos]["name"]]["Points"] = -1
teamsEcountered.append(Data[year][1][pos]["name"])
'''
thisYear.add(Data[year][1][pos]["name"])
promoted = thisYear - lastYear
relegated = lastYear - thisYear
for team in promoted:
if team in toBePromoted:
returnTime = yearCount - toBePromoted[team]["Year"]
print("Return time: ", surviveTime)
if returnTime == 1:
returnedInOneYearCount += 1
if returnTime <= 3:
returnedInThreeYearCount += 1
if returnTime <= 5:
returnedInFiveYearCount += 1
if returnTime <= 10:
returnedInTenYearCount += 1
toBePromoted.pop(team)
riseCount += 1
#add team to toBeRelegated
toBeRelegated[team] = {}
toBeRelegated[team]["Year"] = yearCount
print(team, "was promoted in ", yearCount)
#toBeRelegated[team]["Points"] = #more code will be needed to get the points, figure this out once the other stuff works
for team in relegated:
if team in toBeRelegated:
surviveTime = yearCount - toBeRelegated[team]["Year"]
print("Survivetime: ", surviveTime)
if surviveTime >= 10:
survivedTenYearCount += 1
if surviveTime >= 5:
survivedFiveYearCount += 1
if surviveTime >= 3:
survivedThreeYearCount += 1
if surviveTime > 1:
survivedOneYearCount += 1
print("Survived one year")
toBeRelegated.pop(team)
dropCount += 1
toBePromoted[team] = {}
toBePromoted[team]["Year"] = yearCount
print(team, "was relegated in ", yearCount)
lastYear = thisYear
yearCount += 1
print("Fraction of teams to survive at least")
print("\tOne year: {:.10f}".format(survivedOneYearCount/ float(dropCount)))
print("\tThree years: {:.10f}".format(survivedThreeYearCount/ float(dropCount)))
print("\tFive years: {:f}".format(survivedFiveYearCount/ float(dropCount)))
print("\tTen years: {:f}".format(survivedTenYearCount/ float(dropCount)))
print("Fraction of teams to return in at most")
print("\tOne year: {:f}".format(returnedInOneYearCount/ float(riseCount)))
print("\tThree years: {:f}".format(returnedInThreeYearCount/ float(riseCount)))
print("\tFive years: {:f}".format(returnedInFiveYearCount/ float(riseCount)))
print("\tTen years: {:f}".format(returnedInTenYearCount/ float(riseCount)))
#when team changes league, increase the appropriate counters representing how long they stayed in the league
#, increment the counter showing that they are team that changed league (to take find the percentage of teams)
# that stayed 1, 3, 5, 10 years
# add point tally to likeliness-to-change-league analysis later | true |
f249980d06aada69655e0601027b7d050f320bb2 | Python | Anseik/algorithm | /study/백준/boj_1050_물약.py | UTF-8 | 1,429 | 2.71875 | 3 | [] | no_license | import sys
from collections import defaultdict
sys.stdin = open('boj_1050_물약.txt')
N, M = map(int, input().split())
mat_dict = defaultdict(int)
for i in range(N):
name, cost = input().split()
cost = int(cost)
mat_dict[name] = cost
target = ''
for j in range(M):
tmp = input()
idx = tmp.index('=')
key = tmp[:idx]
met = tmp[idx + 1:]
if key == 'LOVE':
target = tmp
else:
val = 0
multi = 0
ma = ''
for m in range(len(met)):
if '1' <= met[m] <= '9':
multi = int(met[m])
elif 'A' <= met[m] <= 'Z':
ma += met[m]
if ma in mat_dict:
val += (multi * mat_dict[ma])
elif met[m] == '+':
multi = 0
ma = ''
mat_dict[key] = val
# print(mat_dict)
# print(target)
idx = target.index('=')
key = target[:idx]
met = target[idx + 1:]
result = 0
multi = 0
ma = ''
for m in range(len(met)):
if '1' <= met[m] <= '9':
multi = int(met[m])
elif 'A' <= met[m] <= 'Z':
ma += met[m]
if ma in mat_dict:
result += (multi * mat_dict[ma])
if met[m] == '+' or m == len(met) - 1:
if ma not in mat_dict:
result = -1
break
else:
multi = 0
ma = ''
if result > 1000000000:
print(1000000001)
else:
print(result)
| true |
d51ab0eaec9ed8dec40bc1772a2d5f6889951b66 | Python | oconnorb/sndrizpipe | /sndrizpipe/mkrefcat.py | UTF-8 | 7,406 | 2.703125 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python
# S.Rodney 2014.05.06
def writeDS9reg( catalog, regfile, color='green', shape='diamond',
linewidth=1 ):
""" Write out a DS9 region file from the given catalog
catalog may be a file name or an astropy table object.
"""
from astropy.coordinates import ICRS
from astropy import units as u
from astropy.io import ascii
if isinstance(catalog,str) :
catalog = ascii.read( catalog )
fout = open(regfile,'w')
print("# Region file format: DS9 version 5.4", file=fout)
print('global color=%s font="times 16 bold" select=1 edit=1 move=0 delete=1 include=1 fixed=0 width=%i'%(color,linewidth), file=fout)
for row in catalog :
RA = row['RA']
DEC = row['DEC']
if ':' in str(RA) :
coord = ICRS( ra=RA,dec=DEC, unit=(u.hour,u.degree) )
else :
coord = ICRS( ra=RA, dec=DEC, unit=(u.degree,u.degree) )
xstr = coord.ra.to_string( unit=u.hour,decimal=False, pad=True,sep=':', precision=2 )
ystr = coord.dec.to_string( unit=u.degree, decimal=False, pad=True, alwayssign=True, sep=':', precision=1 )
print('point %s %s # point=%s'%( xstr, ystr, shape ), file=fout)
fout.close()
def convertToRefcat( incatfile, refcatfile, fluxcol=None, magcol=None,
trimctr=None, trimrad=None,
ds9regfile=False, clobber=False, verbose=False ):
""" Read in the catalog incatfile and write out as a tweakreg reference
catalog called refcatfile.
The file incatfile may be in any of the formats recognized by the
astropy.io.ascii library (e.g. ascii text, sextractor), but it must
have a list of column names, including at least the RA and Dec columns.
The argument fluxcol can be used to give the column name for a single
column of measured source fluxes to be written out as a third column
in the output tweakreg reference catalog file.
"""
import os
from astropy.io import ascii
from astropy import table
import numpy as np
incat = ascii.read( incatfile )
if os.path.exists( refcatfile ) and not clobber :
print(("Tweakreg reference catalog %s exists. Not clobbering."%refcatfile))
return
if verbose :
print(("Converting input catalog %s into tweakreg ref cat %s"%(incatfile, refcatfile)))
gotracol = False
for racol in ['X_WORLD','RA','ra','R.A.','ALPHA_J2000','ALPHA', 'Ra']:
if racol in incat.colnames :
gotracol = True
break
gotdeccol = False
for deccol in ['Y_WORLD','DEC','dec','Dec','Decl','DELTA_J2000','DELTA']:
if deccol in incat.colnames :
gotdeccol = True
break
if not (gotracol and gotdeccol) :
raise RuntimeError(
"Can't read RA, Dec columns from catalog %s"%incatfile )
if fluxcol :
if fluxcol not in incat.colnames :
raise RuntimeError(
"There is no column %s in %s."%(fluxcol, incatfile) )
savecolnames = [racol,deccol,fluxcol]
outcolnames = ['RA','DEC','FLUX']
elif magcol :
if magcol not in incat.colnames :
raise RuntimeError(
"There is no column %s in %s."%(magcol, incatfile) )
savecolnames = [racol,deccol,magcol]
# convert from mags to flux using an arbitrary zpt = 25
igoodmags = np.where( (incat[magcol]>-9) & (incat[magcol]<99) )
incat[magcol][igoodmags] = 10**(-0.4*(incat[magcol][igoodmags]-25))
outcolnames = ['RA','DEC','FLUX']
else :
savecolnames = [racol,deccol]
outcolnames = ['RA','DEC']
outcoldat = [ incat[colname] for colname in savecolnames ]
outcat = table.Table( data= outcoldat, names=outcolnames )
if trimctr and trimrad :
if verbose : print(('Trimming to %.1f arcsec around %s'%(trimrad,trimctr)))
trimra,trimdec = trimctr.split(',')
outcat = trimcat( outcat, float(trimra), float(trimdec), trimrad )
outcat.write( refcatfile, format='ascii.commented_header' )
if ds9regfile :
writeDS9reg( outcat, ds9regfile )
def trimcat( incat, ra, dec, radius, outcatfile=None):
"""Trim the input catalog incat, excluding any sources more than
<radius> arcsec from the given RA,Dec in decimal degrees.
Assumes the input catalog has position columns 'RA' and 'DEC' in decimal deg.
The incat may be a catalog object or a filename.
ra and dec must be in decimal degrees
radius must be a float, in arcsec.
If outcatfile is given, the trimmed catalog is written to that file.
"""
# TODO : update this with a newer astropy (v0.4+?) with coordinate arrays
from astropy.io import ascii
from astropy.coordinates import ICRS
from astropy import units as u
from numpy import cos, where, sqrt
if isinstance( incat, str) :
incat = ascii.read( incat )
racat = incat['RA']
deccat = incat['DEC']
dec_rad = 0.0174533 * dec
darcsec = sqrt((cos(dec_rad) * (ra - racat)**2 + (dec - deccat)**2)) * 3600
ifar = where(darcsec > radius)[0]
incat.remove_rows( ifar )
if outcatfile :
incat.write( outcatfile, format='ascii.commented_header')
return( incat )
def main():
import argparse
parser = argparse.ArgumentParser(
description='Convert a source catalog into the format required '
'for use as a tweakreg reference catalog.'
'(Requires astropy)' )
# Required positional argument
parser.add_argument('incatfile', help='Input catalog. May be in any format'
'recognized by astropy.')
parser.add_argument('refcatfile', help='Output reference catalog file name')
parser.add_argument('--fluxcol', default=None,
help='Name of the input catalog column containing '
'fluxes, to be used by tweakreg '
'for limiting detected source lists.')
parser.add_argument('--magcol', default=None,
help='Name of the input catalog column containing '
'magnitudes, to be converted to fluxes and then'
'used by tweakreg for limiting source lists.')
parser.add_argument('--ds9reg', type=str, metavar='X.reg',
help='Write out a ds9 region file.' )
parser.add_argument('--clobber', default=False, action='store_true',
help='Clobber existing reference catalog if it exists. [False]')
parser.add_argument('--verbose', default=False, action='store_true',
help='Turn on verbose output. [False]')
parser.add_argument('--trimctr', type=str, metavar='RA,DEC', default=None,
help='Center point for catalog trimming, in decimal deg.')
parser.add_argument('--trimrad', type=float, metavar='[arcsec]', default=None,
help='Radius for catalog trimming, in arcsec.')
argv = parser.parse_args()
convertToRefcat( argv.incatfile, argv.refcatfile, fluxcol=argv.fluxcol,
magcol=argv.magcol, ds9regfile=argv.ds9reg,
trimctr=argv.trimctr, trimrad=argv.trimrad,
clobber=argv.clobber, verbose=argv.verbose )
if __name__=='__main__' :
main()
| true |
90eb6e2066072ca67715466c85e635c9f9a38b65 | Python | segelmark/udacity-full-stack-projects | /projects/02_trivia_api/backend/flaskr/__init__.py | UTF-8 | 6,438 | 2.734375 | 3 | [] | no_license | import os
from flask import Flask, request, abort, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
import random
from models import setup_db, Question, Category
ENTRIES_PER_PAGE=10
def format_entities(entities):
"""Formats categories correctly"""
return [entity.format() for entity in entities]
def paginate(entries,page,entries_per_page):
"""Paginates all entries returning the right page for a certain entries per page """
start = (page - 1) * entries_per_page
end = start + entries_per_page
return entries[start:end]
def paginate_questions(request, selection):
"""paginates a selecation for the right number of pages given by the get request argument """
page = request.args.get('page', 1, type=int)
entries = format_entities(selection)
return paginate(entries,page,ENTRIES_PER_PAGE)
def create_app(test_config=None):
""" Create and configure the app """
app = Flask(__name__)
setup_db(app)
CORS(app, resources={r"/*": {"origins": "*"}})
# CORS Headers - Setting access control allow
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization,true')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
return response
@app.route('/categories')
def retrieve_categories():
"""Endpoint to handle GET requests for all available categories"""
try:
categories = Category.query.order_by(Category.id).all()
except:
abort(422)
#Make sure we got some categories
if not categories:
abort(404)
#Return the categories
return jsonify({
'success': True,
'categories': format_entities(categories),
})
@app.route('/questions')
def retrieve_questions():
""" Endpoint to handle GET requests for all questions paginated (10 questions) """
try:
questions = Question.query.order_by(Question.id).all()
categories = Category.query.order_by(Category.id).all()
except:
abort(422)
# Paginate list of questions and make sure it is a valid page
questions_paginated = paginate_questions(request, questions)
if not questions_paginated:
abort(404)
return jsonify({
'success': True,
'questions': questions_paginated,
'categories': format_entities(categories),
'total_questions': len(questions),
'current_category': None
})
@app.route('/questions/<int:question_id>', methods=['DELETE'])
def delete_question(question_id):
""" Endpoint to DELETE question using a question ID. """
try:
question = Question.query.get(question_id)
except:
abort(422)
#Make sure the question we want to delete exists
if not question:
abort(404)
try:
question.delete()
except:
abort(422)
return jsonify({
'success': True,
'deleted': question_id
})
@app.route('/questions', methods=['POST'])
def create_question():
""" Endpoint to POST a new question """
body = request.get_json()
# Check that we are getting the required fields
if not ('question' in body and 'answer' in body and 'difficulty' in body and 'category' in body):
abort(422)
new_question = body.get('question', None)
new_answer = body.get('answer', None)
new_difficulty = body.get('difficulty', None)
new_category = body.get('category', None)
try:
question = Question(question=new_question, answer=new_answer,
difficulty=new_difficulty, category=new_category)
question.insert()
return jsonify({
'success': True,
'created': question.id,
})
except:
abort(422)
@app.route('/questions/search', methods=['POST'])
def search_questions():
""" Endpoint to get questions based on a search term. """
try:
body = request.get_json()
search_term = body.get('searchTerm', None)
if not search_term:
abort(404)
search_result = Question.query.filter(
Question.question.ilike(f'%{search_term}%')
).all()
return jsonify({
'success': True,
'questions': paginate_questions(request, search_result),
'total_questions': len(search_result),
'current_category': None
})
except:
abort(404)
@app.route('/categories/<int:category_id>/questions')
def retrieve_questions_by_category(category_id):
""" Endpoint to handle GET requests for questions in a certain category """
questions = Question.query.filter_by(category=str(category_id)).order_by(Question.id).all()
categories = Category.query.order_by(Category.type).all()
if not questions:
abort(404)
return jsonify({
'success': True,
'questions': paginate_questions(request, questions),
'categories': format_entities(categories),
'total_questions': len(questions),
'current_category': category_id
})
@app.route('/quizzes', methods=['POST'])
def play_quiz():
""" Endpoint to get questions to play the quiz """
try:
body = request.get_json()
if not ('quiz_category' in body and 'previous_questions' in body):
abort(422)
category = body.get('quiz_category')
previous_questions = body.get('previous_questions')
if str(category['id']).isnumeric() and str(category['id'])!='0':
new_question = Question.query.filter_by(
category=(int(category['id']))
).filter(Question.id.notin_((previous_questions))).all()
else:
new_question = Question.query.filter(
Question.id.notin_((previous_questions))
).all()
except:
abort(422)
return jsonify({
'success': True,
'question': random.choice(new_question).format() if new_question else None,
})
#Error handlers for all expected errors
@app.errorhandler(400)
def error_bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "Bad Request"
}), 400
@app.errorhandler(404)
def error_not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "Resource Not Found"
}), 404
@app.errorhandler(422)
def error_unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "Unprocessable Entity"
}), 422
return app
| true |
2307e654f7ff70cb518688e0ccfafc3e9178d83f | Python | zcmarine/continued_cs | /continued_cs/algorithms/island_count/test_module.py | UTF-8 | 2,407 | 3.1875 | 3 | [] | no_license | import logging
import pytest
from continued_cs.algorithms import island_count
logger = logging.getLogger('continued_cs.algorithms.island_count')
logger.setLevel(logging.DEBUG)
def test_initialize_tracker_grid():
tracker_grid = island_count.initialize_tracker_grid(nrows=3, ncols=5)
assert len(tracker_grid) == 3
assert len(tracker_grid[0]) == 5
def test_initialize_tracker_grid_rows_not_referencing_same_object():
'''
My previous initialization approach created one row and then n of them, but this just
creates another reference to the same objects. This test is to assert I'm not still doing that
For future reference to teach others, the old approach was:
zeroed_col = [UNPROCESSED for col in range(ncols)]
return [zeroed_col for row in range(nrows)]
'''
tracker_grid = island_count.initialize_tracker_grid(nrows=3, ncols=5)
tracker_grid[0][0] = 'foo'
assert tracker_grid[1][0] != 'foo'
@pytest.mark.parametrize('row, col, nrows, ncols, expected', [
(0, 0, 1, 1, set()),
(0, 0, 2, 2, set([(0, 1, island_count.FROM_NEIGHBOR), (1, 0, island_count.FROM_NEIGHBOR)])),
(1, 1, 3, 3, set([(0, 1, island_count.FROM_NEIGHBOR), (2, 1, island_count.FROM_NEIGHBOR),
(1, 0, island_count.FROM_NEIGHBOR), (1, 2, island_count.FROM_NEIGHBOR)])),
])
def test_get_neighbors(row, col, nrows, ncols, expected):
assert island_count.get_neighbors(row, col, nrows, ncols) == expected
def test_one_island():
grid = [[1]]
assert island_count.count_islands(grid) == 1
def test_one_island_non_square():
grid = [[0, 0, 0],
[0, 1, 0]]
assert island_count.count_islands(grid) == 1
def test_one_multiblock_island_non_square():
grid = [[0, 1, 0],
[1, 1, 0]]
assert island_count.count_islands(grid) == 1
def test_two_islands():
grid = [[0, 1, 0],
[1, 1, 0],
[0, 0, 1]]
assert island_count.count_islands(grid) == 2
def test_two_islands_non_square():
grid = [[0, 1, 0, 0, 0],
[1, 1, 0, 1, 0],
[0, 0, 0, 1, 0]]
assert island_count.count_islands(grid) == 2
def test_multiple_islands_bigger_example():
grid = [[0, 1, 0, 0, 0, 1],
[1, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 1]]
assert island_count.count_islands(grid) == 5
| true |
c776c8c5ddf3188a59c067c24a075d59c3fec450 | Python | FelSiq/antiderivative-solution-insertion-on-images | /symbol-recognition/runall.py | UTF-8 | 1,167 | 2.515625 | 3 | [
"MIT"
] | permissive | """Run all scripts in this subrepository."""
import sys
import sklearn
import balancing
import augmentation
import preprocessing
import symbol_recog
if __name__ == "__main__":
if len(sys.argv) > 1:
print("Skip flags: {}".format(sys.argv[1:]))
if {"b", "a", "p"}.isdisjoint(sys.argv):
print("Balancing classes...")
balancing.balance_classes("./data-original", random_seed=1234)
else:
print("Skipped data balancing.")
if {"a", "p"}.isdisjoint(sys.argv):
print("Augmenting data...")
augmentation.augment_data("./data-balanced", random_seed=2805)
else:
print("Skipped data augmenting.")
if "p" not in sys.argv:
print("Preprocessing augmented data...")
preprocessing.preprocess("./data-augmented")
else:
print("Skipped data preprocessing.")
print("Getting data...")
X, y = symbol_recog.get_data("./data-augmented-preprocessed")
y = sklearn.preprocessing.LabelEncoder().fit_transform(y)
print("Got all data (total of {} instances).".format(y.shape[0]))
symbol_recog.train_models([3, 16, 25, 26, 27])
print("Process finished.")
| true |
11354d0f4f93b2696ab8b8ec35014b158417f49c | Python | zarina494/fisrt_git_lesson | /5/5.4.py | UTF-8 | 1,767 | 3.140625 | 3 | [
"MIT"
] | permissive | product_list = ['bread','cheese','egg','meat']
# buterbrod 0+1
# biphsteks 2+3
# gamburger 0+2+3
# 4isburger 0+1+2+3
cook_list = []
print('U vas imeyutsya takie produkty:', product_list)
product = input('Vozmte product:')
i = 0
while product != '0' and i <= len(product_list):
if product in product_list:
cook_list.append(product)
else:
print('Ispolzuyte producty s tarelki!')
product = input('Vozmit product:')
i += 1
j = 0
butter = ['bread', 'cheese']
biff = ['meat', 'egg']
ham = [' bread', 'egg', 'meat']
ch_ham = [' bread', 'cheese', 'egg', 'meat']
#product1 =[]
#product2= []
#product13 =[]
#product4 = []
if len(cook_list) == len(butter):
j = 0
while j < len(cook_list):
if cook_list[j] in butter:
j += 1
else:
print('vy dali mne ne vernyi product:')
break
if j == len(butter):
print('Vy mojete prigotovit buter')
elif len(cook_list) == len(ham):
j = 0
while j < len(cook_list):
if cook_list[j] in biff:
j += 1
else:
print('vy dali mne ne vernyi product:')
break
if j == len(biff):
print('Vy mojete prigotovit buter')
elif len(cook_list) == len(cook_list):
j = 0
while j < len(cook_list):
if cook_list[j] in ham:
j += 1
else:
print('vy dali mne ne vernyi product:')
break
if j == len(ham):
print('Vy mojete prigotovit buter')
elif len(cook_list) == len(cook_list):
j = 0
while j < len(cook_list):
if cook_list[j] in ch_ham:
j += 1
else:
print('vy dali mne ne vernyi product:')
break
if j == len(ch_ham):
print('Vy mojete prigotovit buter') | true |
cc677c71f06b744dc739000d0cee678131ef629f | Python | MAlexa315/Practice | /Python_Basic_(Part -I).py | UTF-8 | 3,220 | 4.25 | 4 | [] | no_license | # https://www.w3resource.com/python-exercises/python-basic-exercises.php
# 1. Write a Python program to print the following string in a specific format (see the output). Go to the editor
# Sample String : "Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond in the sky. Twinkle, twinkle, little star, How I wonder what you are"
# Output :
# Twinkle, twinkle, little star,
# How I wonder what you are!
# Up above the world so high,
# Like a diamond in the sky.
# Twinkle, twinkle, little star,
# How I wonder what you are
import calendar
print(
"Twinkle, twinkle, little star,\n\t How I wonder what you are! \n\t\t Up above the world so high, \n\t\t Like a "
"diamond in the sky. \nTwinkle, twinkle, little star, \n\t How I wonder what you are")
# 2. Write a Python program to get the Python version you are using
import sys
vers = sys.version
print('Python version: ' + vers)
print('Version info: ')
print(sys.version_info)
# 3. Write a Python program to display the current date and time.
import datetime
now = datetime.datetime.now()
print('Current date and time : ' + now.strftime('%Y - %m - %d %H:%M:%S'))
# 4. Write a Python program which accepts the radius of a circle from the user and compute the area
import math
r = float(input('Radius of the circle: '))
print('The are of the circle with radius ' + str(r) + ' is: ' + str(math.pi * r**2))
# 5. Write a Python program which accepts the user's first and last name and print them in reverse order with a space
# between them.
first_name = input('First name: ')
last_name = input ('Last name: ')
print(last_name + ' ' + first_name)
# 6. Write a Python program which accepts a sequence of comma-separated numbers from user and generate a list and a
# tuple with those numbers.
numbers = input('Write some comma separated number here: ')
list = numbers.split(",")
tuple = tuple(list)
print('List: ', list)
print('Tuple: ', tuple)
# 7. Write a Python program to accept a filename from the user and print the extension of that.
file_name = input('Filename: ')
file_extension = file_name.split('.')
print('The file extension is: ' + file_extension[-1])
# 8. Write a Python program to display the first and last colors from the following list.
color_list = ["Red", "Green", "White", "Black"]
print(color_list[0] + ' ' + color_list[-1])
print('%s %s' % (color_list[0], color_list[-1]))
# 9. Write a Python program to display the examination schedule. (extract the date from exam_st_date).
exam_st_date = (11, 12, 2014)
print('The examination will start from: %i / %i /%i'% exam_st_date)
# 10. Write a Python program that accepts an integer (n) and computes the value of n+nn+nnn
integer = int(input('Type an integer: '))
n1 = int('%s' % integer)
n2 = int('%s%s' % (integer, integer))
n3 = int('%s%s%s' % (integer, integer,integer))
print(n1 + n2 + n3)
# 11. Write a Python program to print the documents (syntax, description etc.) of Python built-in function(s).
print(abs.__doc__)
print(all.__doc__)
# 12. Write a Python program to print the calendar of a given month and year.
month = int(input('month: '))
year = int(input('year: '))
i = calendar.month(year, month, w=4, l=0.9)
print(i)
| true |
e4fed73a697534c18a9a9bc7c10a37648f6ccd09 | Python | Yuchizz12/py2_samples | /Chapter2/list0202_1.py | UTF-8 | 822 | 3.296875 | 3 | [] | no_license | import tkinter
import math
def hit_check_circle():
dis = math.sqrt((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))
if dis <= r1 + r2:
return True
return False
def mouse_move(e):
global x1, y1
x1 = e.x
y1 = e.y
col = "green"
if hit_check_circle() == True:
col = "lime"
canvas.delete("CIR1")
canvas.create_oval(x1-r1, y1-r1, x1+r1, y1+r1, fill=col, tag="CIR1")
root = tkinter.Tk()
root.title("円によるヒットチェック")
canvas = tkinter.Canvas(width=600, height=400, bg="white")
canvas.pack()
canvas.bind("<Motion>", mouse_move)
x1 = 50
y1 = 50
r1 = 40
canvas.create_oval(x1-r1, y1-r1, x1+r1, y1+r1, fill="green", tag="CIR1")
x2 = 300
y2 = 200
r2 = 80
canvas.create_oval(x2-r2, y2-r2, x2+r2, y2+r2, fill="orange")
root.mainloop()
| true |
799497d996505edd2cea28608b3e6d5d2a90ea73 | Python | whglamrock/leetcode_series | /leetcode1002 Find Common Characters.py | UTF-8 | 702 | 3.359375 | 3 | [] | no_license |
from collections import Counter
class Solution(object):
def commonChars(self, A):
"""
:type A: List[str]
:rtype: List[str]
"""
minCount = {}
for c in 'abcdefghijklmnopqrstuvwxyz':
minCount[c] = 2147483647
for s in A:
sCount = Counter(s)
for c in 'abcdefghijklmnopqrstuvwxyz':
if c not in sCount:
sCount[c] = 0
minCount[c] = min(minCount[c], sCount[c])
ans = []
for c in minCount:
for i in xrange(minCount[c]):
ans.append(c)
return ans
print Solution().commonChars(A = ["bella","val","roller"])
| true |
d4052a129349e4237be0dabe75248bce50dcd8bb | Python | nandy23/python-belajar | /Studi Kasus/ganjilgenap.py | UTF-8 | 133 | 3.53125 | 4 | [] | no_license | bil = int(input("Masukan Bilangan : "))
if (bil % 2 == 0):
print(bil, "Bilangan genap")
else:
print(bil, "Bilangan Ganjil")
| true |
31889fc34db741cf320daddb315a503a4b2da44a | Python | taoranzhishang/Python_codes_for_learning | /study_code/Day_25/时间模块/04取更精确的当前时间time.clock().py | UTF-8 | 116 | 2.96875 | 3 | [] | no_license | import time
start = time.clock()
num = 0
for i in range(10000):
num += i
end = time.clock()
print(end - start)
| true |
14bf1b2da9b4e80767900e106bb1de54831d62c3 | Python | anyboby/A3C_CartPole | /src/optimizer.py | UTF-8 | 966 | 2.859375 | 3 | [] | no_license | import threading
import constants as Constants
"""
The optimizer calls MasterNetwork.optimize() endlessly in a loop, possibly from multiple threads
"""
class Optimizer(threading.Thread):
stop_signal = False
write_summaries = False
def __init__(self, master_network):
threading.Thread.__init__(self)
self.master_network = master_network
def run (self):
trainings = 0
trained = False
while not self.stop_signal:
if Optimizer.write_summaries:
trained = self.master_network.optimize(writesummaries=True)
Optimizer.write_summaries = not trained
else:
trained = self.master_network.optimize()
if trained:
trainings += 1
if trainings % Constants.SUMMARY_STEPS == 0:
Optimizer.write_summaries = True
def stop(self):
self.stop_signal = True
| true |
a3b04085d613fa14e97a2475166335138ad02a51 | Python | kissisland/loganalyzer | /guanjia.py | UTF-8 | 833 | 2.578125 | 3 | [] | no_license | import requests, csv
from lxml import html
from multiprocessing.dummy import Pool
all_data = []
def getHtml(page_num):
res = requests.get("http://www.zizhiguanjia.com/zs/pn{}".format(page_num))
selector = html.fromstring(res.content)
for info in selector.xpath("//div[@class='zsbase-r']/ul/li/div[@class='kn-time']/div"):
title = info.xpath("a/text()")[0]
link = info.xpath("a/@href")[0]
tags = '|'.join(info.xpath("span[3]/a/text()"))
all_data.append({
'title' : title,
'link' : link,
'tags' : tags,
})
print(title, link, tags)
pool = Pool(10)
pool.map(getHtml, [i for i in range(1,622)])
writer = csv.DictWriter(f=open("guanjia.csv", 'w', newline='',encoding='utf-8'),fieldnames=['title','link','tags'])
writer.writerows(all_data)
| true |
ddb9898ee0841ea859e820a7a245956252d1acf6 | Python | th00tames1/- | /prep_sum.py | UTF-8 | 858 | 2.765625 | 3 | [] | no_license | f=open('stn_summer.csv','r')
f1=open('prep_sum.csv','w')
d={}
for line in f:
data=line.split(',')
station=int(data[0])
year=int(data[1])
month=int(data[2])
day=int(data[3])
if data[7]!='':
prep=float(data[7])
else:
prep=0
if not station in d.keys():
d[station]={year:{month:[prep]}}
elif not year in d[station].keys():
d[station][year]={month:[prep]}
elif not month in d[station][year].keys():
d[station][year][month]=[prep]
else:
d[station][year][month].append(prep)
st=d.keys()
st.sort()
for stn in st:
for yr in d[stn].keys():
sum_prep=0
for mon in d[stn][yr].keys():
sum_prep=sum_prep+sum(d[stn][yr][mon])
f1.write(str(stn)+','+str(yr)+','+str(sum_prep)+','+'\n')
f1.close()
| true |
b00e6e877588e3a2a71c3367888beec19d305712 | Python | ehudkr/expected-risk-frequencies | /expected_frequencies/expected_frequencies.py | UTF-8 | 14,926 | 2.796875 | 3 | [] | no_license | import math
import warnings
import altair as alt
# from typing import List
from .risk_conversions import calculate_exposed_absolute_risk
PERSON_SHAPE = (
"M1.7 -1.7h-0.8c0.3 -0.2 0.6 -0.5 0.6 -0.9c0 -0.6 "
"-0.4 -1 -1 -1c-0.6 0 -1 0.4 -1 1c0 0.4 0.2 0.7 0.6 "
"0.9h-0.8c-0.4 0 -0.7 0.3 -0.7 0.6v1.9c0 0.3 0.3 0.6 "
"0.6 0.6h0.2c0 0 0 0.1 0 0.1v1.9c0 0.3 0.2 0.6 0.3 "
"0.6h1.3c0.2 0 0.3 -0.3 0.3 -0.6v-1.8c0 0 0 -0.1 0 "
"-0.1h0.2c0.3 0 0.6 -0.3 0.6 -0.6v-2c0.2 -0.3 -0.1 "
"-0.6 -0.4 -0.6z"
)
CROSS_SHAPE = "M -1.7 -2.5 L 2.5 3.5"
def plot_expected_frequencies(baseline_risk, added_risk, added_risk_type, population_size=100,
title="", configure_chart=True,
icon_shape=PERSON_SHAPE, icon_size=75, stroke_color="black", stroke_width=1.3,
cross_shape=CROSS_SHAPE, cross_width=None,
chart_width=350, chart_height=400):
"""Plots an icon array (isotype grid) of expected frequencies.
Parameters
----------
baseline_risk : float
The fraction ([0.0, 1.0]) of samples in the control group (i.e. WITHOUT the risk-factor)
That also had the outcome (event) of interest.
added_risk : float
The value of the association measure (e.g., the odds-ratio or hazard-ratio).
added_risk_type : {'odds_ratio', 'hazard_ratio', 'risk_ratio', 'percentage_change'}
The type of association measure provided.
population_size : int, optional
Size of the hypothetical population on which to calculate the expectancy.
title : str, optional
Chart title.
configure_chart : bool, optional
Whether to configure some aesthetics of the chart.
Pass `False` if you wish to later concatenate several charts (e.g., `alt.vconcat()`)
icon_shape : str
SVG path of an icon to plot. Defaults to a person icon.
icon_size : float or int, optional
The size of the person icon in the array.
If changed consider also adjusting `chart_width` and `chart_height`.
stroke_color : str or bool, optional
Contour around the person icon.
A legal color value Altair can read. Pass False if you wish to not draw contour.
stroke_width : float, optional
The thickness of the icon's contour.
cross_shape : str, optional
SVG path of some cross mark to overlay over the icon shape.
Default is a diagonal cross mark (\).
cross_width : float, optional
Stroke thickness of the cross shape. If not provided, a reasonable one is calculated.
chart_width : int, optional
chart_height : int, optional
Returns
-------
alt.Chart
Icon array of expected frequencies
"""
baseline_ef, exposed_ef = (
_calculate_expected_frequencies(baseline_risk, added_risk, added_risk_type, population_size)
)
chart = _plot_isotype_array(baseline_ef, exposed_ef, population_size,
title, configure_chart,
icon_shape, icon_size, stroke_color, stroke_width,
cross_shape, cross_width,
chart_width, chart_height)
return chart
def phrase_expected_frequencies(baseline_risk, added_risk, added_risk_type,
population_name, event_name, risk_factor_name,
followup_duration="", population_size=100, precision=0):
"""Generates a textual phrase conveying the baseline and exposed risk.
Parameters
----------
baseline_risk : float
The fraction ([0.0, 1.0]) of samples in the control group (i.e. WITHOUT the risk-factor)
That also had the outcome (or event) of interest.
added_risk : float
The value of the association measure (e.g., the odds-ratio or hazard-ratio).
added_risk_type : {'odds_ratio', 'hazard_ratio', 'risk_ratio', 'percentage_change'}
The type of association measure provided.
population_name : str
A description of the study population. (e.g., 'hospitalized men between ages 45 to 64')
event_name : str
What is the measured outcome of the analysis. (e.g., 'acute respiratory disorder')
risk_factor_name : str
What is the risk-factor or treatment group tested. (e.g., 'go through surgery')
followup_duration : str, optional
What was the followup time (e.g., '3 years').
population_size : int, optional
Size of the hypothetical population on which to calculate the expectancy.
precision : int, optional
How many decimals to round the expected frequencies (the default is 0).
Returns
-------
str
A textual phrasing of the expected frequency in both risk groups.
"""
baseline_ef, exposed_ef = (
_calculate_expected_frequencies(baseline_risk, added_risk, added_risk_type, population_size)
)
text = _generate_text(
baseline_ef, exposed_ef,
population_size, precision,
population_name, event_name, risk_factor_name, followup_duration
)
return text
def expected_frequencies(baseline_risk, added_risk, added_risk_type,
population_size=100, precision=0,
population_name="", event_name="", risk_factor_name="", followup_duration="",
plot_kwargs=None, plot_text=False):
"""Calculates expected frequencies, plots them as an icon-array (isotype-grid),
and generates a textual phrase conveying the expected frequencies.
Parameters
----------
baseline_risk : float
The fraction ([0.0, 1.0]) of samples in the control group (i.e. WITHOUT the risk-factor)
That also had the outcome (or event) of interest.
added_risk : float
The value of the association measure (e.g., the odds-ratio or hazard-ratio).
added_risk_type : {'odds_ratio', 'hazard_ratio', 'risk_ratio', 'percentage_change'}
The type of association measure provided.
population_size : int, optional
Size of the hypothetical population on which to calculate the expectancy.
precision : int, optional
How many decimals to round the expected frequencies (the default is 0).
population_name : str, optional
A description of the study population. (e.g., 'hospitalized men between ages 45 to 64')
event_name : str, optional
What is the measured outcome of the analysis. (e.g., 'acute respiratory disorder')
risk_factor_name : str, optional
What is the risk-factor or treatment group tested. (e.g., 'go through surgery')
followup_duration : str, optional
What was the followup time (e.g., '3 years').
plot_kwargs : dict, optional
Keyword arguments for plotting an icon array.
See the documentation of `plot_expected_frequencies` for details.
plot_text : bool, optional
Whether to add the generated text as a title to the chart.
Returns
-------
ExpectedFrequencies
A result object containing:
* `baseline_expected_frequencies` - expected frequencies without the risk factor
* `exposed_expected_frequencies` - expected frequencies with the risk factor
* `chart` - Altair Chart of an icon array depicting the expected frequencies
* `text` - textual phrasing conveying the expected frequencies.
"""
plot_kwargs = plot_kwargs if plot_kwargs else {}
baseline_ef, exposed_ef = (
_calculate_expected_frequencies(baseline_risk, added_risk, added_risk_type, population_size)
)
# baseline_ef = round(baseline_ef, precision)
# exposed_ef = round(exposed_ef, precision)
text = _generate_text(
baseline_ef, exposed_ef,
population_size, precision,
population_name, event_name, risk_factor_name, followup_duration
)
plot_title = text.replace(",", ",\n").split("\n") if plot_text else None
if "title" not in plot_kwargs.keys(): # A different title not already provided
plot_kwargs["title"] = plot_title
chart = _plot_isotype_array(
baseline_ef, exposed_ef,
population_size, **plot_kwargs
)
result = ExpectedFrequencies(baseline_ef, exposed_ef,
chart, text)
return result
def _calculate_expected_frequencies(baseline_risk, added_risk, added_risk_type, population_size):
"""Convert risk to absolute risk and calculate expected frequencies"""
exposed_absolute_risk = calculate_exposed_absolute_risk(baseline_risk, added_risk, added_risk_type)
baseline_ef = population_size * baseline_risk
exposed_ef = population_size * exposed_absolute_risk
return baseline_ef, exposed_ef
def _generate_text(baseline_ef, exposed_ef, population_size, precision,
population_name, event_name, risk_factor_name, followup_duration=""):
base_text = (
f"Out of {population_size:d} {population_name} who did {{exposed}}{risk_factor_name}, "
f"we should expect {{ef:.{precision}f}} of them to also have {event_name}"
f"{f' over {followup_duration}' if followup_duration else ''}.\n"
)
text = ""
for ef, exposed in zip([baseline_ef, exposed_ef], [False, True]):
text += base_text.format(ef=ef, exposed="" if exposed else "not ")
return text
def _plot_isotype_array(baseline_ef, exposed_ef, population_size=100, title="", configure_chart=True,
icon_shape=PERSON_SHAPE, icon_size=75, stroke_color="black", stroke_width=1.3,
cross_shape=CROSS_SHAPE, cross_width=None,
chart_width=350, chart_height=400):
if isinstance(baseline_ef, float) or isinstance(exposed_ef, float):
warnings.warn("Can't currently plot (color) fractional icons. Rounding to nearest integer.")
baseline_ef = round(baseline_ef)
exposed_ef = round(exposed_ef)
data = __generate_chart_source_data(baseline_ef, exposed_ef, population_size)
root = round(math.sqrt(population_size)) # Create a square grid of total `population_size`
# https://altair-viz.github.io/gallery/isotype_grid.html
base_chart = alt.Chart(data).transform_calculate(
row=f"ceil(datum.id/{root})",
col=f"datum.id - datum.row*{root}",
).encode(
x=alt.X("col:O", axis=None),
y=alt.Y("row:O", axis=None),
).properties(
width=chart_width,
height=chart_height,
title=title if title else ""
)
icons = base_chart.mark_point(
filled=True,
stroke=stroke_color,
strokeWidth=stroke_width, # 2,
size=icon_size,
).encode(
color=alt.Color(
'hue:N',
scale=alt.Scale(
domain=[0, 1, 2], # Explicitly specify `hue` values or coloring will fail if <3 levels exist in data
range=[
"#FFFFFF", # Population (0)
"#4A5568", # Baseline (1)
"#FA5765", # Exposed (2) "#4078EF"
]),
# TODO: add uncertainty using shade: lighter color fill of icons in the 95% CI.
legend=None),
shape=alt.ShapeValue(icon_shape),
)
chart = icons
if exposed_ef < baseline_ef:
stroke_out = base_chart.mark_point(
# shape="cross",
filled=True,
stroke="#4078EF", # "black"
# strokeWidth=cross_width,
strokeWidth=math.sqrt(icon_size) / 1.7 if cross_width is None else cross_width,
strokeCap="round",
size=icon_size,
).encode(
shape=alt.ShapeValue(cross_shape),
opacity=alt.Opacity(
'reduced:N',
legend=None,
scale=alt.Scale(
domain=[False, True],
range=[0, 1]
),
)
)
chart += stroke_out
if configure_chart: # Configured charts cannot be later concatenated.
chart = chart.configure_title(
align="left",
anchor="start",
offset=-10,
).configure_view(
strokeWidth=0,
)
return chart
def __generate_chart_source_data(baseline_ef, exposed_ef, population_size):
"""Generate data to plug into Altair chart. shape = (`population_size`, 3).
Columns: - `id`: base-1 counting from 1 to `population_size` + 1,
- `hue`: 0: entire population, 1: baseline risk people, 2: additional exposed risk people.
- `reduced`: In risk reduction, whether to cross out a baseline-risk icon
"""
data = [{"id": i, "hue": 0, "reduced": False}
for i in range(1, population_size + 1)]
for row in data[:baseline_ef]:
row['hue'] = 1 # data.iloc[:baseline_ef]['hue'] = "Baseline"
if exposed_ef >= baseline_ef: # Additional units under expose
for row in data[baseline_ef:exposed_ef]:
row['hue'] = 2 # data.iloc[baseline_ef:exposed_ef]['hue'] = "Exposed"
else: # Baseline units to "remove" from the outcome
for row in data[baseline_ef - exposed_ef:baseline_ef]:
row['reduced'] = True # data.iloc[baseline_ef:exposed_ef]['reduced'] = True
data = alt.Data(values=data)
return data
class ExpectedFrequencies:
def __init__(self, baseline_expected_frequencies,
exposed_expected_frequencies,
chart, text):
"""Data class containing the results from running `expected_frequencies()`.
Parameters
----------
baseline_expected_frequencies : float
Expected frequencies without the risk factor
exposed_expected_frequencies : float
Expected frequencies with the risk factor
chart : alt.Chart
Icon array depicting the expected frequencies
text : str
Textual phrasing conveying the expected frequencies
"""
self.baseline_expected_frequencies = baseline_expected_frequencies
self.exposed_expected_frequencies = exposed_expected_frequencies
self.chart = chart
self.text = text
def __repr__(self):
s = (f"ExpectedFrequencies: "
f"baseline - {self.baseline_expected_frequencies:.0f}, "
f"exposed - {self.exposed_expected_frequencies:.0f}.")
return s
| true |
a258651c1dd842794526c74ca9196f180bcf4709 | Python | tslearn-team/tslearn | /docs/examples/misc/plot_distance_and_matrix_profile.py | UTF-8 | 4,593 | 2.875 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""
Distance and Matrix Profiles
============================
This example illustrates how the matrix profile is calculated. For each
segment of a timeseries with a specified length, the distances between
each subsequence and that segment are calculated. The smallest distance is
returned, except for trivial match on the location where the segment is
extracted from which is equal to zero.
"""
# Author: Gilles Vandewiele
# License: BSD 3 clause
import numpy
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.pyplot as plt
from tslearn.datasets import CachedDatasets
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
from tslearn.matrix_profile import MatrixProfile
import warnings
warnings.filterwarnings('ignore')
# Set a seed to ensure determinism
numpy.random.seed(42)
# Load the Trace dataset
X_train, y_train, _, _ = CachedDatasets().load_dataset("Trace")
# Normalize the time series
scaler = TimeSeriesScalerMeanVariance()
X_train = scaler.fit_transform(X_train)
# Take the first time series
ts = X_train[0, :, :]
# We will take the spike as a segment
subseq_len = 20
start = 45
segment = ts[start:start + subseq_len]
# Create our matrix profile
matrix_profiler = MatrixProfile(subsequence_length=subseq_len, scale=True)
mp = matrix_profiler.fit_transform([ts]).flatten()
# Create a grid for our plots
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)
# Plot our timeseries
ax1.plot(ts, c='b', label='time series')
ax1.add_patch(patches.Rectangle((start, numpy.min(ts) - 0.1), subseq_len,
numpy.max(ts) - numpy.min(ts) + 0.2,
facecolor='b', alpha=0.25,
label='segment'))
ax1.axvline(start, c='b', linestyle='--', lw=2, alpha=0.5,
label='segment start')
ax1.legend(loc='lower right', ncol=4, fontsize=8,
handletextpad=0.1, columnspacing=0.5)
ax1.set_title('The time series')
# Inset plot with our segment
fig_ax_in = ax1.inset_axes([0.5, 0.55, 0.2, 0.4])
fig_ax_in.plot(scaler.fit_transform(segment.reshape(1, -1, 1))[0], c='b')
ax1.indicate_inset(inset_ax=fig_ax_in, transform=ax1.transData,
bounds=[start, numpy.min(ts) - 0.1, subseq_len,
numpy.max(ts) - numpy.min(ts) + 0.2],
linestyle='--', alpha=0.75)
fig_ax_in.tick_params(labelleft=False, labelbottom=False)
fig_ax_in.xaxis.set_visible(False)
fig_ax_in.yaxis.set_visible(False)
# Calculate a distance profile, which represents the distance from each
# subsequence of the time series and the segment
distances = []
for i in range(len(ts) - subseq_len):
scaled_ts = scaler.fit_transform(ts[i:i+subseq_len].reshape(1, -1, 1))
scaled_segment = scaler.fit_transform(segment.reshape(1, -1, 1))
distances.append(numpy.linalg.norm(scaled_ts - scaled_segment))
# Mask out the distances in the trivial match zone, get the nearest
# neighbor and put the old distances back in place so we can plot them.
distances = numpy.array(distances)
mask = list(range(start - subseq_len // 4, start + subseq_len // 4))
old_distances = distances[mask]
distances[mask] = numpy.inf
nearest_neighbor = numpy.argmin(distances)
dist_nn = distances[nearest_neighbor]
distances[mask] = old_distances
# Plot our distance profile
ax2.plot(distances, c='b')
ax2.set_title('Segment distance profile')
dist_diff = numpy.max(distances) - numpy.min(distances)
ax2.add_patch(patches.Rectangle((start - subseq_len // 4,
numpy.min(distances) - 0.1),
subseq_len // 2,
dist_diff + 0.2,
facecolor='r', alpha=0.5,
label='exclusion zone'))
ax2.scatter(nearest_neighbor, dist_nn, c='r', marker='x', s=50,
label='neighbor dist = {}'.format(numpy.around(dist_nn, 3)))
ax2.axvline(start, c='b', linestyle='--', lw=2, alpha=0.5,
label='segment start')
ax2.legend(loc='lower right', fontsize=8, ncol=3,
handletextpad=0.1, columnspacing=0.5)
# Plot our matrix profile
ax3.plot(mp, c='b')
ax3.set_title('Matrix profile')
ax3.scatter(start, mp[start],
c='r', marker='x', s=75,
label='MP segment = {}'.format(numpy.around(mp[start], 3)))
ax3.axvline(start, c='b', linestyle='--', lw=2, alpha=0.5,
label='segment start')
ax3.legend(loc='lower right', fontsize=8,
handletextpad=0.1, columnspacing=0.25)
plt.tight_layout()
plt.show()
| true |
f3888f1b4413ca3fc14509aa19a0fccac7e6aecf | Python | Elliot47/Crawler | /crawler.py | UTF-8 | 1,466 | 2.703125 | 3 | [] | no_license | import re
import requests
from bs4 import BeautifulSoup
from time import sleep
def links_in_html(content, visited, folder, depth=0):
soup = BeautifulSoup(content, 'lxml')
links = soup.find_all('a')
target_base = ' https://example.site.ru/{}'
for a in links:
link = a.get('href')
stash_url = 'https://stash.site.ru{}'.format(link)
if (
stash_url not in visited
and folder in stash_url
and len(stash_url.split('/')) > depth
):
visited.add(stash_url)
r = requests.get(stash_url, auth=('login', 'pass'))
target_url = link.split('{}/'.format(folder))[-1]
if target_url:
target_link = target_base.format(target_url)
target = requests.get(target_link)
print('%s %s' % (target.status_code, target_link))
# print(target.url, '\n')
if 'https://www.site.ru' not in target.url and target.status_code != 404:
print(target.url, target.status_code, '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
if r.status_code == 200:
links_in_html(r.content, visited, folder, depth=depth)
visited.add(stash_url)
def get_weak_urls(repo_url, folder_name):
url_parts = repo_url.split('/')
r = requests.get(repo_url, auth=('login', 'pass'))
if r.status_code != 200:
print('not')
return
links_in_html(r.content, set(), folder=folder_name, depth=repo_url.count('/'))
if __name__ == '__main__':
repo_url = 'https://stash.com/browse/'
get_weak_urls(repo_url, repo_url.split('/')[-2])
| true |
39cb950f5db5360d0e0486fe02f304534d805dea | Python | AndersonSM/P1-Solutions | /aplicacao.py | UTF-8 | 410 | 3.40625 | 3 | [] | no_license | # coding utf-8
# aplicacao_polinomios
# Anderson Sales
def calcula(lista, valor):
soma = 0
for i in range(len(lista)):
soma += int(lista[i]) * valor**i
print soma
while True:
entrada = raw_input()
if entrada == "fim":
break
if entrada[0] == "p":
lista = entrada.split()[1:]
print "novo polinomio"
else:
calcula(lista,int(entrada)) | true |
979114aaf5b3cb158bef4f506157cff45106719f | Python | olga3n/adventofcode | /2020/day_21_allergen_assessment_1.py | UTF-8 | 2,754 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
def parse_records(data):
records = []
for line in data:
first, second = line.split(' (contains ')
ingredients = first.split(' ')
allergens = second[:-1].split(', ')
records.append((set(ingredients), set(allergens)))
return records
def ingredients_without_allergens(data):
start_records = parse_records(data)
records = start_records.copy()
ingredients_dict = {}
allergens_dict = {}
allergens_count = len({y for x in records for y in x[1]})
while len(allergens_dict) < allergens_count:
for i in range(len(records)):
for j in range(i + 1, len(records)):
ingredients = records[i][0].intersection(records[j][0])
allergens = records[i][1].intersection(records[j][1])
ingredients = {
x for x in ingredients if x not in ingredients_dict
}
allergens = {
x for x in allergens if x not in allergens_dict
}
if len(ingredients) == 1 and len(allergens) == 1:
ingredient = ingredients.pop()
allergen = allergens.pop()
ingredients_dict[ingredient] = allergen
allergens_dict[allergen] = ingredient
elif len(ingredients) and len(allergens):
records.append((ingredients, allergens))
new_records = []
for record in records:
ingredients = {x for x in record[0] if x not in ingredients_dict}
allergens = {x for x in record[1] if x not in allergens_dict}
if len(ingredients) == 1 and len(allergens) == 1:
ingredient = ingredients.pop()
allergen = allergens.pop()
ingredients_dict[ingredient] = allergen
allergens_dict[allergen] = ingredient
if len(allergens):
new_records.append(record)
records = new_records
result = 0
for record in start_records:
result += len({x for x in record[0] if x not in ingredients_dict})
return result
class TestClass():
def test_ingredients_without_allergens(self):
data = [
'mxmxvkd kfcds sqjhc nhms (contains dairy, fish)',
'trh fvjkl sbzzf mxmxvkd (contains dairy)',
'sqjhc fvjkl (contains soy)',
'sqjhc mxmxvkd sbzzf (contains fish)'
]
assert ingredients_without_allergens(data) == 5
def main():
data = [line.strip() for line in sys.stdin if len(line.strip())]
result = ingredients_without_allergens(data)
print(result)
if __name__ == '__main__':
main()
| true |
ff12f3c17794b6c9913e04a26ede6d1ebfacc000 | Python | intellihr/python_json_logger | /json_logger/utils.py | UTF-8 | 1,582 | 2.84375 | 3 | [] | no_license | PATCHED_LOGGER_METHODS = ('debug', 'info', 'warning',
'error', 'critical', 'exception')
class LoggerAdapter:
def __init__(self, logger, args_adapter):
self.logger = logger
self.args_adapter = args_adapter
def __getattr__(self, name):
method = getattr(self.logger, name)
if name not in PATCHED_LOGGER_METHODS:
return method
return _wrap_logger_method(method, self.args_adapter)
def adapt_logger(logger, args_adapter):
return LoggerAdapter(logger, args_adapter)
def to_api_logger(logger):
"""
this utility assumes request and response are coming from falcon.
e.g. http://falcon.readthedocs.io/en/stable/api/request_and_response.html
"""
def api_logger_adapter(msg, *args, **kwargs):
extra = {}
request = kwargs.pop('request', None)
if request:
path = request.path
if request.query_string:
path += '?' + request.query_string
extra['path'] = path
response = kwargs.pop('response', None)
if response and response.status:
extra['status'] = int(response.status.split()[0])
extra.update(kwargs.get('extra', {}))
kwargs['extra'] = extra
return msg, args, kwargs
return adapt_logger(logger, api_logger_adapter)
def _wrap_logger_method(base_method, args_adapter):
def wrapped(msg, *args, **kwargs):
msg, args, kwargs = args_adapter(msg, *args, **kwargs)
return base_method(msg, *args, **kwargs)
return wrapped
| true |
3300cb36e1ee9e6ff8dd133e0bc39e021b65e066 | Python | J3B60/Sort-Algorithms-Coursework | /Bubblesort2.py | UTF-8 | 825 | 3.390625 | 3 | [] | no_license | import random as rd
#/ Bubble Sort /#
#///////////////#
#/ Input Array /#
#///////////////#
A = ['P/\R',3,complex(3,6),float('NaN'),'g',float('inf'),4.32]
#///////////////#
print ("Input Array: ", A)
n = len(A) #Number of elements remaining
swapped = False
i = 1 #Element in Array
NoNaN = 0 #Number of NaNs
R = A #Result Array
while (n > 0):
swapped = False
while i < n:
if R[i] != R[i]:
del R[i]
n -= 1
NoNaN += 1
elif R[i-1] != R[i-1]:
del R[i-1]
n -= 1
NoNaN += 1
else:
if R[i-1] > R[i]:
R[i], R[i-1] = R[i-1], R[i]
swapped = True
else:
i += 1
i = 1
n -= 1
print ("Output Array: ", R)
print ("Number of NaNs: ", NoNaN)
| true |
9b6bf6d2e849bfed7b88997bab22de9f423b357d | Python | pizzicatomania/pythonClass | /pythonTest/xmlTest.py | UTF-8 | 1,205 | 2.578125 | 3 | [] | no_license | from bs4 import BeautifulSoup
import urllib.request as REQ
url = 'http://rss.joins.com/joins_news_list.xml'
weather = 'http://web.kma.go.kr/weather/forecast/mid-term-rss3.jsp?stnId=109'
response= REQ.urlopen(weather)
soup = BeautifulSoup(response, 'html.parser')
# print(soup)
for itemElem in soup.findAll('location'):
print(itemElem.city.string)
print(10*'=')
for d in itemElem.findAll('data'):
print('날짜:', d.tmef.string)
print('날씨:', d.wf.string)
print('최저:', d.tmn.string)
print('최고:', d.tmx.string)
print(10*'-')
# fp = open('song.xml','r',encoding='utf-8')
# soup = BeautifulSoup(fp, "html.parser")
# # print (soup)
# chanElem = soup.find('channel')
# songElem = soup.new_tag('song', sname='sname4') #{'sname':'sname4', 'aa':'aaa'}
# titleElem = soup.new_tag('title')
# titleElem.string='song4'
# singerElem = soup.new_tag('singer')
# singerElem.string='singer4'
#
# songElem.append(titleElem)
# songElem.append(singerElem)
# chanElem.append(songElem)
#
# print(soup.prettify())
# for songElem in soup.findAll('song'):
# print(songElem['sname'])
# print(songElem.title.string)
# print(songElem.singer.string) | true |
db74f634dfb0cb3ad7d97f7a3e43ba6309756db2 | Python | lizzzcai/PyCon-Note | /PyConAPAC2018_Practical_Python_Design_Patterns/what_is_type.py | UTF-8 | 166 | 3 | 3 | [] | no_license |
TestWithType = type('TestWithType', (object,), {})
print(f'type(TestWithType): {type(TestWithType)}')
ins1 = TestWithType()
print(f'type(ins1): {type(ins1)}')
| true |
0c01e3de99d39bba0cdbad3c8e3acde251fbdc7f | Python | reddymadhira111/Python | /programs/pra1.py | UTF-8 | 141 | 2.875 | 3 | [] | no_license | s="ababbcdeab"
l=[]
l1=[]
for i in s:
l.append(i)
print(l)
for i in l:
j=l.count(i)
l1.append(j)
print(l1)
d=dict(zip(l,l1))
print(l2) | true |
96e5d960672a720ae8ebbc5f0b5b4015fd0da34a | Python | vishalre/HashtagGenerator | /HashtagGenerator/DL Model/modeling.py | UTF-8 | 8,477 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# TensorFlow 2.0.0 is required for this code to work.
# Uncomment and run the following line to install the CPU version.
# !pip uninstall tenserflow
!pip install tensorflow==2.0.0-beta0
# !pip install pyspark
# !pip install 'h5py<3.0.0'
# !pip install selenium
# !pip install colabcode
# !pip install fastapi
# !pip install python-multipart
# Commented out IPython magic to ensure Python compatibility.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import matplotlib.image as mpimg
from PIL import Image
import io
import tensorflow as tf
from tensorflow.keras.applications import MobileNetV2
from pyspark.sql import SparkSession
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS, ALSModel
from sklearn.model_selection import train_test_split
from functools import reduce
from functions import prepare_image, extract_features
import os
from tqdm import tqdm
from scipy.spatial.distance import cosine
from fastapi import FastAPI, File, UploadFile
np.random.seed(0)
"""### Get a list of all hashtags"""
json_file_names = os.listdir('metadata')
# Remove the 5 char .json file ending to isolate hashtag name
hashtags = [hashtag[:-5] for hashtag in json_file_names]
# remove '.DS_', '.ipynb_checkp'
non_hashtags = ['.DS_', '.ipynb_checkp']
for non_hashtag in non_hashtags:
try:
hashtags.remove(non_hashtag)
except:
pass # If we can't remove it, it's already gone
hashtags
# Build a dataframe of hashtag metadata
hashtag_metadata = []
for hashtag in hashtags:
hashtag_metadata.append(pd.read_json(f'metadata/{hashtag}.json'))
hashtag_metadata = reduce(lambda x, y: pd.concat([x, y]), hashtag_metadata)
pd.DataFrame.reset_index(hashtag_metadata, drop=True, inplace=True)
hashtag_metadata.tail()
# Remove non-hashtags from hashtag list.
hashtag_metadata['hashtags'] = hashtag_metadata['hashtags'].apply(
lambda hashtag_list: [h for h in hashtag_list if h.startswith('#')])
# Create a flattened list of all hashtags
all_hashtags = [hashtag for hashtags in hashtag_metadata['hashtags'] for hashtag in hashtags]
all_hashtags = sorted(list(set(all_hashtags)))
hashtag_lookup = {hashtag: i for i, hashtag in enumerate(all_hashtags)}
hashtag_rec_data = []
for i in hashtag_metadata.index:
hashtag_list = hashtag_metadata.loc[i, 'hashtags']
for hashtag in hashtag_list:
hashtag_rec_data.append(
{'image_id': i,
'hashtag_id': hashtag_lookup[hashtag],
'rating': 1}
)
hashtag_rec_data = pd.DataFrame(hashtag_rec_data)
hashtag_rec_data.tail()
"""# Creating our Neural Network"""
img_shape = (160, 160, 3)
# Create the base model from the pre-trained model MobileNet V2
base_model = tf.keras.applications.mobilenet_v2.MobileNetV2(input_shape=img_shape, include_top=False, weights='imagenet')
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
neural_network = tf.keras.Sequential([
base_model,
global_average_layer,
])
pics = []
for i, row in tqdm(hashtag_metadata.iterrows()):
name = row['image_local_name']
hashtag = row['search_hashtag']
img_path = f'data/{hashtag}/{name}'
try:
img = prepare_image(img_path, where='local')
deep_features = extract_features(img, neural_network)
pics.append({'pic': img,
'hashtag': hashtag,
'name': name,
'deep_features': deep_features})
except Exception as e:
error_type = type(e).__name__
if error_type == "NotFoundError":
# If a file in the list isn't in
# storage, skip it and continue
pass
else:
print(e)
pics = pd.DataFrame(pics)
pics.index = pics['name']
#pics.head()
pic = pics.iloc[0]
type(pic['pic'])
plt.imshow(pic['pic']);
pic['hashtag'], pic['deep_features'].shape, pic['pic'].shape
spark = SparkSession.builder.master('local').getOrCreate()
als = ALS(userCol='image_id',
itemCol='hashtag_id',
implicitPrefs=True,
alpha=40)
als.setSeed(0)
hashtag_spark_df = spark.createDataFrame(hashtag_rec_data)
als_model = als.fit(hashtag_spark_df)
# als_model.write().overwrite().save('als')
hashtag_rec_data = []
for i in hashtag_metadata.index:
hashtag_list = hashtag_metadata.loc[i, 'hashtags']
for hashtag in hashtag_list:
hashtag_rec_data.append(
{'image_id': i,
'hashtag_id': hashtag_lookup[hashtag],
'rating': 1}
)
hashtag_rec_data = pd.DataFrame(hashtag_rec_data)
hashtag_rec_data.tail()
recs = als_model.recommendForAllUsers(numItems=10).toPandas()
recs.tail()
hashtag_index = list(all_hashtags)
def lookup_hashtag(hashtag_id):
return hashtag_index[hashtag_id]
def lookup_hashtag_recs(rec_scores):
return [lookup_hashtag(rec) for (rec, score) in rec_scores]
recs['recommended_hashtags'] = recs['recommendations'].apply(lookup_hashtag_recs)
recs.index = recs['image_id']
recs = recs.join(hashtag_metadata, how='left')[['recommendations',
'recommended_hashtags',
'hashtags',
'image_local_name',
'search_hashtag']]
recs.drop('recommendations', axis=1, inplace=True)
image_factors = als_model.userFactors.toPandas()
image_factors.index = image_factors['id']
recs.join(image_factors);
# Add deep features information to recs dataframe
recs_deep = recs.join(pics, on='image_local_name', how='inner')
recs_deep.info()
recs.loc[0, 'image_local_name']
len(hashtag_lookup), type(hashtag_lookup)
hashtags_df = pd.DataFrame.from_dict(hashtag_lookup, orient='index')
hashtags_df.head()
hashtags_df = hashtags_df.reset_index()
hashtags_df.columns = ['hashtag', 'id']
hashtags_df.index = hashtags_df['id']
hashtags_df.drop('id', axis=1, inplace=True)
hashtags_df.head()
img_features = als_model.userFactors.toPandas()
hashtag_features = als_model.itemFactors.toPandas()
recs_deep_clean = recs_deep[['image_local_name', 'hashtags', 'deep_features']]
img_features.index = img_features['id']
img_features.drop(['id'], axis=1)
# Add image feature into dataframe
recommender_df = recs_deep_clean.join(img_features, how='inner')
recommender_df.head()
# Function that finds k nearest neighbors by cosine similarity
def find_neighbor_vectors(image_path, k=5, recommender_df=recommender_df):
prep_image = prepare_image(image_path, where='local')
pics = extract_features(prep_image, neural_network)
rdf = recommender_df.copy()
rdf['dist'] = rdf['deep_features'].apply(lambda x: cosine(x, pics))
rdf = rdf.sort_values(by='dist')
return rdf.head(k)
def generate_hashtags(image_path):
fnv = find_neighbor_vectors(image_path, k=5, recommender_df=recommender_df)
# Find the average of the 5 user features found based on cosine similarity.
features = []
for item in fnv.features.values:
features.append(item)
avg_features = np.mean(np.asarray(features), axis=0)
hashtag_features['dot_product'] = hashtag_features['features'].apply(lambda x: np.asarray(x).dot(avg_features))
# Find the 10 hashtags with the highest feature dot products
final_recs = hashtag_features.sort_values(by='dot_product', ascending=False).head(10)
output = []
for hashtag_id in final_recs.id.values:
output.append(hashtags_df.iloc[hashtag_id]['hashtag'])
return output
def show_results(test_image):
img = mpimg.imread(f'{test_image}')
plt.figure(figsize=(9, 9))
plt.title(f'Original Hashtag: {test_image.upper()}', fontsize=32)
plt.imshow(img)
recommended_hashtags = generate_hashtags(f'{test_image}')
print(', '.join(recommended_hashtags))
return recommended_hashtags
import shutil
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
origins=["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_headers=["*"],
allow_methods=["*"],
)
@app.post("/predict/image")
async def predict_api(file: UploadFile = File(...)):
with open("destination.jpg", "wb") as buffer:
shutil.copyfileobj(file.file, buffer)
return show_results("destination.jpg")
from colabcode import ColabCode
server = ColabCode(port=10000, code=False)
server.run_app(app=app) | true |
31957089e015913f03da408ad0b7aff30e2ff9f6 | Python | snkemp/__Kavascript | /KML/src/parser.py | UTF-8 | 1,274 | 2.546875 | 3 | [] | no_license |
import os, re, code
from src.utils import *
from src.elements import *
class Token:
def __init__(self, match):
self.name = match.lastgroup
self.literal = match.group(0)
def __repr__(self):
return f'<{self.name} : {self.literal}>'
def tokenize(filename, pattern):
with open(filename, 'r') as f:
text = f.read()
return [ Token(match) for match in pattern.finditer(text) ]
def context_free_grammar():
nodes = {
'white_space': '\s+',
'comment': '/\*.*?\*/',
'atom': '[a-z]\w*',
'identifier': '#[a-z]\w*',
'classifier': '-[a-z]\w*',
'attribute': '@[a-z]\w*',
'lbrace': '\{',
'rbrace': '\}',
'lparen': '\(',
'rparen': '\)',
'separator': ',',
'assignment': ':',
'terminator': ';',
'string': '\'([^\']*?)\'',
'script': '"""([^\"]*?)"""',
}
pattern = "|".join('(?P<%s>%s)' % (n, p) for n,p in nodes.items())
return re.compile(pattern, re.M | re.I)
def parse(filename, port=False, minify=False):
doc = Document(filename)
tokens = tokenize(filename, context_free_grammar())
for t in tokens:
doc.feed(t)
doc.write()
#code.interact(local=locals())
| true |
26eb57500bba43a4f713d8718b715f62f0c3fa8a | Python | SerhiiKhyzhko/SoftServe | /functions/ET8_fibonacci_nums.py | UTF-8 | 2,539 | 3.921875 | 4 | [] | no_license | from decorators_and_additional_vars.decorators import input_integer_data_validation, length
from decorators_and_additional_vars.additional_vars import fib_nums
from custom_exceptions.exceptions import InvalidLength, InvalidInteger
@length
@input_integer_data_validation
def fibonacci_length(length: int) -> list:
'''
generate fibonacci sequence with got length of number
:param length:
:param action:
:return res:
'''
first_num, second_num, fib_num, res = fib_nums[10][0], fib_nums[10][1], 0, []
if length < 10:
first_num, second_num = fib_nums[length][0], fib_nums[length][1]
while len(str(fib_num)) <= length:
fib_num = first_num + second_num
first_num = second_num
second_num = fib_num
if len(str(fib_num)) == length:
res.append(str(fib_num))
return ','.join(res)
@length
@input_integer_data_validation
def fibonacci_range(start: int, finish: int) -> list:
'''
generate fibonacci sequence in got range
:param start:
:param finish:
:param action:
:return res:
'''
first_num, second_num, fib_num, res = fib_nums[10][0], fib_nums[10][1], 0, []
if len(str(start)) < 10:
first_num, second_num = fib_nums[len(str(start))][0], fib_nums[len(str(start))][1]
while fib_num < finish:
fib_num = first_num + second_num
first_num = second_num
second_num = fib_num
if fib_num >= start and fib_num <= finish:
res.append(str(fib_num))
return ','.join(res)
def main():
want_to_continue, param_numbers = True, [1, 2]
while want_to_continue:
print('Do you want to get Fibonacci sequence, y\\n?')
if input('> ').lower() in ['y', 'yes']:
print(
'1. Input range of Fibonacci sequence \n2. Input length of fibonacci number \nChoose an action(write a number)')
action = input('> ')
if action not in ['1', '2']:
print('Invalid input, try again')
continue
input_data = [elem for elem in input('input data \n> ').split(',')]
print('result is ', end='')
try:
if action == '1':
print(fibonacci_range(input_data, param_numbers[1]))
else:
print(fibonacci_length(input_data, param_numbers[0]))
except(InvalidLength, InvalidInteger) as error:
print(error)
else:
print('Bye')
want_to_continue = False
| true |
e2b3ebb80f38e86cff0826c1f4d1f0532e326e09 | Python | Sahil94161/projects | /ass17.py | UTF-8 | 2,291 | 3.890625 | 4 | [] | no_license | # Q1. Write a python program using tkinter interface to
# write Hello World and a exit button that closes the interface.
# Ans-
# from tkinter import *
# import sys
# def exit():
# sys.exit()
# w=Tk()
# l=Label(w,text="Hello World",width=50,bg="green",fg="red")
# l.pack()
# b=Button(w,text="Exit",width=25,bg="yellow",command=exit)
# b.pack()
# w.mainloop()
# Q2. Write a python program to in the same interface as above
# # and create a action when the button is click it will display
# # some text.
#Ans-
# from tkinter import *
# def disp():
# l=Label(w,text="Hello World",width=50,bg="green",fg="red")
# l.pack()
# w=Tk()
# b=Button(w,text="Click!",width=25,bg="yellow",command=disp)
# b.pack()
# w.mainloop()
#
# Q3. Create a frame using tkinter with any label text and two buttons.
# One to exit and other to change the label to some other text.
# Ans-
from tkinter import *
import sys
# def exit():
# sys.exit()
# def show():
# if(t.get()==0):
# n.set("Hello Python")
# t.set(1)
# else:
# n.set("Hello Java")
# t.set(0)
#
# def display(r):
# print(r)
#
#
# root =Tk()
# t = IntVar()
# t.set(0)
#- n=StringVar()
# n.set("Hello World")
# root.title("Windows")
# root.geometry("205x250")
# root.resizable(True,False)
# root.minsize(200,200)
# root.maxsize(300,300)
# l = Label(root, textvariable=n, width=50, bg="dodgerblue", fg="white")
# l.pack()
# b=Button(root,text="Exit",width=25,bg="green",fg="yellow",command=exit)
# b.pack()
#
# b1=Button(root,text="Change",width=25,bg="green",fg="yellow",command=show)
# b1.pack()
#
# b2= Button(root, text="Hello", command=lambda : display(10))
# b2.pack()
# root.mainloop()
# Q4. Write a python program using tkinter interface to
# take an input in the GUI program and print it.
#Ans-
# from tkinter import *
# def show_entry_fields():
# print("First Name: %s\nLast Name: %s" % (e1.get(), e2.get()))
# w = Tk()
# Label(w, text="First Name").grid(row=0)
# Label(w, text="Last Name").grid(row=1)
#
# e1 = Entry(w)
# e2 = Entry(w)
#
# e1.grid(row=0, column=1)
# e2.grid(row=1, column=1)
# b=Button(w, text='Quit', command=w.quit).grid(row=3, column=0, sticky=W,pady=4,)
#
# c=Button(w, text='Show', command=show_entry_fields).grid(row=3, column=1, sticky=W, pady=4)
#
# mainloop( )
| true |
051b31499b8f629a25768f2dcb46a603c255be8c | Python | cskuntal10/investment | /mutualfund/interface.py | UTF-8 | 2,981 | 2.765625 | 3 | [] | no_license | import tkinter as tk
from core import get_investment_suggestions, invest
from consts import MF_DETAILS
class InvestMenu(tk.Frame):
def __init__(self, root, *args, **kwargs):
tk.Frame.__init__(self, root, *args, **kwargs)
label_scheme = tk.Label(self, text="Scheme")
label_amount = tk.Label(self, text="Amount")
label_scheme.grid(row=0, column=0, ipadx=5, pady=5, sticky=tk.W + tk.N)
label_amount.grid(row=1, column=0, ipadx=5, pady=5, sticky=tk.W + tk.N)
self.schem_options = tk.StringVar(self)
OPTIONS = list(MF_DETAILS.keys())
self.schem_options.set(OPTIONS[0])
self.entry_scheme = tk.OptionMenu(self, self.schem_options, *OPTIONS)
self.entry_amount = tk.Entry(self)
self.entry_scheme.grid(row=0, column=1, ipadx=5, pady=5, sticky=tk.W + tk.N)
self.entry_amount.grid(row=1, column=1, ipadx=5, pady=5, sticky=tk.W + tk.N)
MyButton1 = tk.Button(self, text="Submit", width=10, command=self.invest)
MyButton1.grid(row=2, column=1)
def invest(self):
fund=self.schem_options.get()
amount=self.entry_amount.get()
invest(fund,amount)
print("Invested today")
class SuggestionMenu(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
suggestions = get_investment_suggestions()
table_header = ['date', 'fund', 'nav', 'units', 'amount', 'yday_nav', 'nav_change(%)', 'index_today']
if suggestions:
title_label = tk.Label(self, borderwidth=1, relief="solid", text = 'Last Investment Details', width=62)
title_label.grid(row=0, column=0, columnspan=5, sticky=tk.W)
today_label = tk.Label(self, borderwidth=1, relief="solid", text='Today Change', width=37)
today_label.grid(row=0, column=5, columnspan=3, sticky=tk.W)
for row in range(len(table_header)):
header_label = tk.Label(self, borderwidth=1, relief="solid", text = table_header[row].upper(), width=12, anchor='w')
header_label.grid(row=1, column=row, sticky=tk.W)
num_rows = len(suggestions)
num_cols = len(list(suggestions[0].keys()))
for row in range(num_rows):
for column in range(num_cols):
label = tk.Label(self, borderwidth=1, relief="solid", text=suggestions[row].get(table_header[column]), width=12, anchor='w')
label.grid(row=row+2, column=column, sticky=tk.N)
def _main_window():
root = tk.Tk()
root.attributes('-topmost', True)
root.geometry("1000x500")
root.title('Money Guru')
return root
def start_ui():
root=_main_window()
invest_menu = InvestMenu(root, borderwidth=2, relief="solid")
suggest_menu = SuggestionMenu(root, borderwidth=1, relief="solid")
invest_menu.grid(padx=10, pady=20)
suggest_menu.grid(padx=10, pady=20)
root.mainloop() | true |
c41f2e329ee9e74511a814e44157caf3adec9d6f | Python | Apollo1840/United_Kagglers | /tools/data_loader.py | UTF-8 | 838 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import pandas as pd
import os
# some predefined value for illustration
PROJECT_FOLDER = 'United_Kagglers'
def change_dir_to_UKa():
path = os.getcwd()
while(os.path.basename(path) != PROJECT_FOLDER):
path = os.path.dirname(path)
os.chdir(path)
def load_data(DATA_PATH):
change_dir_to_UKa()
# data_train = pd.read_csv(os.path.dirname(__file__)+'\\datasets\\k000_titanic\\train.csv')
data_train = pd.read_csv(DATA_PATH + 'train.csv')
data_test = pd.read_csv(DATA_PATH + 'test.csv')
return data_train, data_test
'''
how to load data?
1) download the data to datasets, put it into the project folder
2) in py file, define the PROJECT_FOLDER
3) from tools.data_loader import load_data
4) data_train, data_test = load_data(DATA_PATH)
''' | true |
e378dbd9cd343eae0beba8226a4d16ff0c5cff76 | Python | plygrnd/snoowatch | /src/snoowatch/log.py | UTF-8 | 636 | 2.78125 | 3 | [
"MIT"
] | permissive | import logging
def log_generator(__name__):
# We want the logger to reflect the name of the module it's logging.
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Create a console logger for when this runs as a streaming processor
# TODO: implement streaming processing
console_logger = logging.StreamHandler()
console_logger.setLevel(logging.DEBUG)
# It has to be readable
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
console_logger.setFormatter(formatter)
logger.addHandler(console_logger)
return logger
| true |
be23a9316ec3a8af5378ea40b73919d55c02693a | Python | ding8848/my_pygames | /src/crazyforhoney.py | UTF-8 | 2,705 | 2.875 | 3 | [] | no_license | import sys, pygame, random
bear = pygame.image.load('/Users/bob/Desktop/my_pygames/res/bear.png')
honey = pygame.image.load('/Users/bob/Desktop/my_pygames/res/honey.png')
coin = pygame.image.load('/Users/bob/Desktop/my_pygames/res/coin.png')
eat = pygame.image.load('/Users/bob/Desktop/my_pygames/res/eat.png')
background = pygame.image.load('/Users/bob/Desktop/my_pygames/res/background.png')
bearX = 512
bearY = 200
stepX = 0
stepY = 0
honey_speed=2
honey_objs=[]
coins_pos = []
def keydown_event(event,stepX,stepY,bear_pos):
coin_pos = []
if event.key == pygame.K_RIGHT:
stepX = 5
elif event.key == pygame.K_LEFT:
stepX = -5
elif event.key == pygame.K_UP:
stepY = -5
elif event.key == pygame.K_DOWN:
stepY = 5
elif event.key == pygame.K_SPACE:
coin_pos = [bear_pos[0],bear_pos[1]+10]
return stepX, stepY, coin_pos
def honey_show(honey_objs,startY=-60):
if len(honey_objs)<5:
honey_X = random.randint(0,800)
honey_pos = [honey_X,startY]
screen.blit(honey,honey_pos)
honey_objs.append(honey_pos)
else:
i = 0
for pos in honey_objs:
screen.blit(honey,pos)
honey_objs[i] = [pos[0],pos[1]+honey_speed]
i = i + 1
return honey_objs
def distance(cx,cy,hx,hy):
a = cx - hx
b = cy - hy
return math.sqrt(a*a+b*b)
def screen_border(X,Y):
if X < 0:
X = 0
elif X > 1024:
X = 1024
if Y < 0:
Y = 0
elif Y > 577:
Y = 577
return X, Y
pygame.init()
size = weight, height = 1024, 577
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Crazy for honey")
while 1:
coin_pos_ = []
bearX = bearX + stepX
bearY = bearY + stepY
bearX,bearY = screen_border(bearX,bearY)
screen.blit(background,(0,0))
screen.blit(bear,(bearX,bearY))
honey_objs = honey_show(honey_objs)
i = 0
for v in coins_pos:
coins_pos[i] = [v[0],v[1]-10]
screen.blit(coin,(coins_pos[i][0]+45,coins_pos[i][1]))
distance_c = [coins_pos[i][0]+45,coins_pos[i][1]]
hi = 0
for hp in honey_objs:
if distance(distance_c[0],distance_c[1],hp[0],hp[1]) < 60:
screen.blit(eat,(hp[0],hp[1]))
honey_objs[hi] = [random.randint(0,900),-50]
hi = hi + 1
i = i + 1
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
if event.type == pygame.KEYDOWN:
stepX, stepY, coin_pos_ = keydown_event(event,stepX,stepY,[bearX,bearY])
if len(coin_pos_) > 0:
coins_pos.append(coin_pos_)
pygame.display.update()
| true |
544540a8f7b3775a532801b78ec4401f7903aa99 | Python | Akashdeepsingh1/project | /2020/meetingRoom.py | UTF-8 | 1,366 | 3.890625 | 4 | [] | no_license | class Classy:
def __init__(self):
pass
def meetingRoom(self, intervals):
'''
:param intervals:
:return:
Given an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei), find the minimum number of conference rooms required.
Example 1:
Input: [[0, 30],[5, 10],[15, 20]]
Output: 2
Example 2:
Input: [[7,10],[2,4]]
Output: 1
'''
if not intervals:
return 0
import heapq
intervals.sort(key = lambda x:x[0])
rooms = []
heapq.heappush(rooms,intervals[0][1])
for i in range(1,len(intervals)):
st, end = intervals[i]
temp = heapq.heappop(rooms)
if temp <= st:
heapq.heappush(rooms,end)
else:
heapq.heappush(rooms,end)
heapq.heappush(rooms,temp)
return rooms
obj = Classy()
Input = [[0, 30],[5, 10],[15, 20]]
print (obj.meetingRoom (Input))
input1 = [[7,10],[2,4]]
print(obj.meetingRoom(input1))
input2 = [[2,11],[6,16],[11,16]]
print(obj.meetingRoom(input2))
input3 = [[2,15],[36,45],[9,29],[16,23],[4,9]]
print(obj.meetingRoom(input3))
input4 = [[928,5032],[3072,3741],[3960,4588],[482,2269],[2030,4360],[150,772]]
print(obj.meetingRoom(input4)) | true |
92c7158bdd3c61c0f9ac574c75bf7633846ae258 | Python | zxlzhangxiaolan/uploader | /P2M++/drawloss2.py | UTF-8 | 2,033 | 3 | 3 | [
"BSD-3-Clause"
] | permissive | import matplotlib.pyplot as plt
import numpy as np
# save loss txt所在的路径
loss_save = '/home/fullo/公共的/fullo-Pixel2MeshPlusPlus-master/Pixel2MeshPlusPlus/results5/coarse_mvp2m/logs/train_loss_record.txt'
loss_save2 = '/home/fullo/公共的/fullo-Pixel2MeshPlusPlus-master/Pixel2MeshPlusPlus/results5/coarse_mvp2m/logs/vaild_loss_record.txt'
epp = []
epoch1 = []
Meanloss = []
with open(loss_save, 'r') as file: # 打开文件
num = 0
for line in file.readlines(): # 文件内容分析成一个行的列表
num = num+1
if num!=1:
line = line.strip().split(" ") # 按照空格进行切分
# print(line)
s , ep , _ ,mloss = line[0], line[1] ,line[2] ,line[3] # 一行拆分为三行
ep = int(ep.split(',')[0]) # 保留itera参数
epoch1.append(ep) # 保存在数组中
Meanloss.append(float(mloss))
epp2 = []
epoch2 = []
Meanloss2 = []
with open(loss_save2, 'r') as file: # 打开文件
num2 = 0
for line in file.readlines(): # 文件内容分析成一个行的列表
num2 = num2+1
if num2!=1:
line = line.strip().split(" ") # 按照空格进行切分
# print(line)
s2 , ep2 , _, mloss2 = line[0], line[1] ,line[2] ,line[3] # 一行拆分为三行
mloss2 = float(mloss2)*10
ep2 = int(ep2.split(',')[0]) # 保留itera参数
epoch2.append(ep2) # 保存在数组中
Meanloss2.append(mloss2)
# 画图
plt.title('Loss') # 标题
# plt.plot(x,y)
# 常见线的属性有:color,label,linewidth,linestyle,marker等
plt.plot(epoch1, Meanloss, color='cyan', label='train-loss')
plt.plot(epoch2, Meanloss2, 'b', label='vaild-loss') # 'b'指:color='blue'
# plt.plot(iteration3, Loss3, 'r', label='loss_train100w_0.01') # 'r'指:color='red'
plt.legend() # 显示上面的labelprint('finish')
plt.xlabel('epoch')
plt.ylabel('loss')
# plt.ylim(-1,1)#仅设置y轴坐标范围
plt.show()
print('finish') | true |
f5c799e4e296ef87eef72a854684e9b1dfb3632c | Python | Azrrael-exe/sda-db | /SQL/select.py | UTF-8 | 259 | 2.78125 | 3 | [] | no_license | import sqlite3
from datetime import date
from random import randint, choice
conn = sqlite3.connect('sda.db')
c = conn.cursor()
res = c.execute("SELECT nombre, apellido FROM estudiantes WHERE sexo='Femenino'");
for row in res:
print row
conn.close();
| true |
2417d3ba0477b64cbe72d6b1c2f828a718f0afa1 | Python | lshang0311/pandas-examples | /format_datetime.py | UTF-8 | 394 | 3.34375 | 3 | [] | no_license | import pandas as pd
"""
datetime format:
https://docs.python.org/3.6/library/datetime.html#strftime-and-strptime-behavior
"""
str_data = r"""
date,weather
20180304,cloudy
20180305,sunny
20180306,rain
"""
df = pd.read_csv(pd.compat.StringIO(str_data))
print(df)
print(df.dtypes)
# int64 -> datetime64[ns]
df['date'] = pd.to_datetime(df['date'], format="%Y%m%d")
print(df)
print(df.dtypes)
| true |
3b609e0eb30b3494ec05ba3db18ab96c196b8c55 | Python | paozhuanyinyuba/A-Lightwe-Classification-Project-based-on-PyTorch | /experiments/recognition/dataset/minc.py | UTF-8 | 11,106 | 2.625 | 3 | [
"MIT"
] | permissive | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Weinong Wang
## Tencent, Youtu
## Email: weinong.wang@hotmail.com
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import torch
import torch.utils.data as data
# import torchvision
from torchvision import transforms
# from PIL import Image
import os
import os.path
import numpy as np
import math
import cv2
import random
_imagenet_pca = {
'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),
'eigvec': torch.Tensor([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
])
}
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.2, r1=0.3, mean=[0.4914, 0.4822, 0.4465]):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
random_value = np.random.random() # random.random()
# print(random_value)
if random_value > self.probability:
return img
# count = 0
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = np.random.uniform(self.sl, self.sh) * area
aspect_ratio = np.random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
# count +=1
x1 = np.random.randint(0, img.size()[1] - h)
y1 = np.random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return img[:,::-1,:].copy()
return img
class RandomVerticalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return img[::-1,:,:].copy()
return img
class RandomRot90(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return np.transpose(img, [1,0,2]).copy()
return img
'''
hsv transformer:
hue_delta: the change ratio of hue
sat_delta: the change ratio of saturation
val_delta: the change ratio of value
'''
def hsv_transform(img, hue_delta, sat_mult, val_mult):
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.float)
img_hsv[:, :, 0] = (img_hsv[:, :, 0] + hue_delta) % 180
img_hsv[:, :, 1] *= sat_mult
img_hsv[:, :, 2] *= val_mult
img_hsv[img_hsv > 255] = 255
return cv2.cvtColor(np.round(img_hsv).astype(np.uint8), cv2.COLOR_HSV2RGB)
'''
random hsv transformer
hue_vari: the range of change ratio of hue
sat_vari: the range of change ratio of saturation
val_vari: the range of change ratio of value
'''
def random_hsv_transform(img, hue_vari, sat_vari, val_vari):
hue_delta = np.random.randint(-hue_vari, hue_vari)
sat_mult = 1 + np.random.uniform(-sat_vari, sat_vari)
val_mult = 1 + np.random.uniform(-val_vari, val_vari)
return hsv_transform(img, hue_delta, sat_mult, val_mult)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(filename, datadir, class_to_idx):
images = []
labels = []
with open(os.path.join(filename), "r") as lines:
for line in lines:
_image = os.path.join(datadir, line.rstrip('\n'))
# print(_image)
_dirname = os.path.split(os.path.dirname(_image))[1]
print(_image)
assert os.path.isfile(_image)
label = class_to_idx[_dirname]
images.append(_image)
labels.append(label)
return images, labels
class MINCDataloder(data.Dataset):
def __init__(self, root, train=True, transform=None, test_aug = False):
self.transform = transform
classes, class_to_idx = find_classes(root + '/images')
if train:
filename = os.path.join(root, 'labels/train1.txt')
else:
filename = os.path.join(root, 'labels/validate1.txt')
self.images, self.labels = make_dataset(filename, root,
class_to_idx)
assert (len(self.images) == len(self.labels))
self.train = train
self.test_aug = test_aug
def __getitem__(self, index):
# _img = Image.open(self.images[index]).convert('RGB')
_img = cv2.imread(self.images[index])
_img = cv2.cvtColor(_img, cv2.COLOR_BGR2RGB)
if self.train:
height,width,_ = _img.shape
min_size = min(height,width)
# max_size = max(height,width)
scale = 256 / float(min_size)
target_width = round(width * scale)
target_height = round(height * scale)
_img = cv2.resize(_img,(target_width, target_height), cv2.INTER_LINEAR)
_img = RandomHorizontalFlip()(_img)
# _img = RandomHorizontalFlip()(_img)
_img = RandomVerticalFlip()(_img)
_img = RandomRot90()(_img)
_img = random_hsv_transform(_img, 0.4,0.4,0.4)
else:
height,width,_ = _img.shape
# ratio_h = height / 720 # this setting is only for our real scenes, you can change it for your scenes
# ratio_w = width / 1280 # this setting is only for our real scenes, you can change it for your scenes
# target_width = int(ratio_w * 320)
# target_height = int(ratio_h * 240)
# ## crop from original images
# _img = _img[-target_height:, int(math.floor(width / 2 - target_width/2)):int(math.floor(width / 2 + target_width/2))]
# height,width,_ = _img.shape
# print( _img.shape)
# cv2.imwrite('test.jpg',_img)
min_size = min(height,width)
# max_size = max(height,width)
scale = 256 / float(min_size) # the minimum side is set 192, you can change it for your scenes
target_width = round(width * scale)
target_height = round(height * scale)
_img = cv2.resize(_img,(target_width, target_height), cv2.INTER_LINEAR)
if self.test_aug:
_images = [_img]
_images.append(np.fliplr(_img).copy())
_images.append(np.flipud(_img).copy())
_images.append(np.fliplr(_images[-1]).copy())
_images.append(np.transpose(_img, (1,0,2)).copy())
_images.append(np.flipud(_images[-1]).copy())
_images.append(np.fliplr(_images[-2]).copy())
_images.append(np.flipud(_images[-1]).copy())
_label = self.labels[index]
if self.transform is not None:
if self.test_aug:
_img_t = []
for i in range(len(_images)):
_img_t.append(self.transform(_images[i]))
else:
_img_t = self.transform(_img)
if self.train:
_img_t = RandomErasing(mean=[0.0, 0.0, 0.0])(_img_t) #
return _img_t, _label #,self.images[index]
def __len__(self):
return len(self.images)
class Dataloder():
def __init__(self, args):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([
# transforms.Resize(399),
# transforms.RandomResizedCrop(224),
# transforms.RandomHorizontalFlip(),
# transforms.ColorJitter(0.2,0.2,0.2),
transforms.ToTensor(),
Lighting(0.1, _imagenet_pca['eigval'], _imagenet_pca['eigvec']),
normalize,
])
transform_test = transforms.Compose([
# transforms.Resize(256),
# transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
trainset = MINCDataloder(root=os.path.expanduser('./data/minc-2500'),
train=True, transform=transform_train)
testset = MINCDataloder(root=os.path.expanduser('./data/minc-2500'),
train=False, transform=transform_test, test_aug = args.test_aug)
kwargs = {'num_workers': 8, 'pin_memory': True} if args.cuda else {}
trainloader = torch.utils.data.DataLoader(trainset, batch_size=
args.batch_size, shuffle=True, **kwargs)
testloader = torch.utils.data.DataLoader(testset, batch_size=
args.test_batch_size, shuffle=False, **kwargs)
self.trainloader = trainloader
self.testloader = testloader
def getloader(self):
return self.trainloader, self.testloader
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone()\
.mul(alpha.view(1, 3).expand(3, 3))\
.mul(self.eigval.view(1, 3).expand(3, 3))\
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
if __name__ == "__main__":
trainset = MINCDataloder(root=os.path.expanduser('/media/youtu/本地磁盘1/data2/minc'), train=True)
testset = MINCDataloder(root=os.path.expanduser('/media/youtu/本地磁盘1/data2/minc'), train=False)
print(len(trainset))
print(len(testset))
| true |
671a3b0d3b2a4a1a88eb55119c2a03aa5f9995d2 | Python | nmessa/Python-2020 | /Lab Exercise 11.19.2020/mapleLeaf.py | UTF-8 | 1,600 | 3.640625 | 4 | [] | no_license | ## mapleLeafBounce.py
## Author: nmessa
## This program takes a maple leaf drawn with the lines function and animates it.
## When collision with wall is detected, the maple leaf "bounces"
import pygame, sys
pygame.init()
dots = [[221, 432], [225, 331], [133, 342], [141, 310],
[51, 230], [74, 217], [58, 153], [114, 164],
[123, 135], [176, 190], [159, 77], [193, 93],
[230, 28], [267, 93], [301, 77], [284, 190],
[327, 135], [336, 164], [402, 153], [386, 217],
[409, 230], [319, 310], [327, 342], [233, 331],
[237, 432]]
#set granularity of animation
dx = 1
dy = 1
#setup screen to paint on
screen = pygame.display.set_mode([1300,850])
screen.fill([255, 255, 255])
#draw maple leaf
pygame.draw.lines(screen, [255,0,0],True, dots, 2)
pygame.display.flip()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
sys.exit()
#check for collision with wall
for i in range(len(dots)):
if dots[i][0] > screen.get_width() or dots[i][0] < 0:
dx = - dx # bounce in the X direction
break
if dots[i][1] > screen.get_height() or dots[i][1] < 0:
dy = - dy # bounce in the Y direction
break
#update points
for i in range(len(dots)):
dots[i][0] += dx
dots[i][1] += dy
#redraw the maple leaf in new position determined by updated dots list
screen.fill([255, 255, 255])
pygame.draw.lines(screen, [255,0,0],True, dots, 2)
pygame.display.flip()
| true |
9292c8eb0f90d2c893ec44718de781dd2694c792 | Python | DarthYogurt/StockAnalyzer | /betaCalculator.py | UTF-8 | 785 | 3.46875 | 3 | [] | no_license |
#This will calculate the BETA given a list of EOD data
AAPL = [1.0,2.0,3.0] #,4,5,6,7,8,9,10]
SNP = [1.0,2.0,3.0]
#Returns list of returns of size n-1
def getReturns(eodData):
dailyReturns = []
for i in range(1,len(eodData)):
dailyReturns.append( (eodData[i]-eodData[i-1])/eodData[i-1] )
return dailyReturns
#Typically Stock1 is the comparing stock. stock2 is the index, or stock to compare against
def calculateBeta(stock1,stock2):
a = getReturns(stock1)
b = getReturns(stock2)
covariance = ( 1/(len(a)-1) ) * ( sum([a[i]*b[i] for i in range(len(a))]) - (sum(a)*sum(b)/len(a)) )
variance = (1/(len(b)-1)) * ( sum([pow(b[i],2) for i in range(len(b))]) - ( pow(sum(b),2) /len(b) ) )
return covariance /variance
print calculateBeta(AAPL,SNP)
| true |
2dcbef211ffb4a4cd5d718fe692f16981ff7b783 | Python | ZanataMahatma/Python-Exercicios | /Funções em Python/ex102.py | UTF-8 | 1,477 | 4.8125 | 5 | [] | no_license | '''Exercício Python 102: Crie um programa que tenha uma função fatorial() que receba
dois parâmetros: o primeiro que indique o número a calcular e outro chamado show,
que será um valor lógico (opcional) indicando se será mostrado ou não na tela o processo
de cálculo do fatorial.'''
def fatorial(n, show=False):
"""
-> Calcula o fatorial de um numero.
:param n:O numero a ser calculado.
:param show: (Opcional) Mostrar ou não a conta.
:return: O valor do Fatorial de um número n.
"""
f = 1
for c in range(n,0,-1):
if show:
print(c, end='')
if c > 1:
print(f' x ', end='')
else:
print(' = ', end='')
f = f * c
return f
#programa principal
print(fatorial(5, show=True))
#help(fatorial)
# minha resposta
'''def fatorial(num=1,show=False):
"""
:param num: O numero a ser calculado.
:param show: (Opcional) Mostra a ser calculado.
:return: O valor do Fatorial de um número n.
"""
fatorial = 1
if show == True:
for contador in range(num,0,-1):
fatorial = fatorial * contador
print(f'{contador}', end='')
print(' x ' if contador > 1 else ' = ', end='')
print(fatorial)
if show == False:
for contador in range(num,0,-1):
fatorial = fatorial * contador
print(fatorial)
#programa
num = int(input('Digite um numero: '))
fatorial(num)''' | true |
a5bde5e88e09003927d22ed735fd24ef7da57310 | Python | CptIdea/clicks | /visualisator.py | UTF-8 | 6,297 | 3.03125 | 3 | [] | no_license | import math
import PySimpleGUI as sg
def visualise_clicks(clicks, colors):
layout = [
[sg.Graph(canvas_size=(600, 600), graph_bottom_left=(-105, -105), graph_top_right=(105, 105),
background_color='white', key='graph')]
]
window = sg.Window('Раскрашенный граф', layout, grab_anywhere=True, finalize=True)
graph = window['graph']
y_count, x_count = 0, 0
while y_count * x_count < len(colors):
y_count += 1
x_count += 1
cords = {dot: get_dot_cords(len(colors), dot) for dot in colors}
multiplier = 80
for dot in cords:
graph.draw_point(tuple([(n * multiplier) for n in cords[dot]]), size=5)
graph.draw_text(f'{dot}({colors[dot]})', tuple([(n * (multiplier + 10)) for n in cords[dot]]))
for click in clicks:
listed_click = list(click)
for i in range(len(listed_click) - 1):
for to_dot in listed_click[i + 1:]:
graph.draw_line(point_from=tuple([(n * multiplier) for n in cords[listed_click[i]]]),
point_to=tuple([(n * multiplier) for n in cords[to_dot]]))
window.read()
window.close()
def get_dot_cords(count, current_number):
step = (2 * math.pi) / count
return math.sin(step * current_number), math.cos(step * current_number)
def visualise_interval(input_nums):
layout = [
[sg.Graph(canvas_size=(1400, 700), graph_bottom_left=(-105, -105), graph_top_right=(105, 105),
background_color='white', key='graph')]
]
window = sg.Window('Интервальный граф', layout, grab_anywhere=True, finalize=True)
cords = {}
cur_cord = 1
for x in input_nums:
if cords.get(x) is None:
cords[x] = [(cur_cord, len(input_nums) - x)]
else:
cords[x].append((cur_cord, len(input_nums) - x))
cur_cord += 0.6
multiplier = 1400 / (len(input_nums) / 2) / 15
for cord in cords:
window['graph'].draw_line(point_from=tuple(
[(x * multiplier) - 100 for x in cords[cord][0]]
),
point_to=tuple(
[(x * multiplier) - 100 for x in cords[cord][1]]
),
width=3
)
window['graph'].draw_text(f'{cord}', tuple([(x * multiplier) - 102 for x in cords[cord][0]]))
window['graph'].draw_line(point_from=tuple(
[(x * multiplier) - 100 for x in cords[cord][0]]
),
point_to=((cords[cord][0][0] * multiplier) - 100, -80),
width=1
)
window['graph'].draw_line(point_from=tuple(
[(x * multiplier) - 100 for x in cords[cord][1]]
),
point_to=((cords[cord][1][0] * multiplier) - 100, -80),
width=1
)
window.read()
window.close()
def visualise_task(clicks, colors, input_nums):
# sg.theme("Default 1")
layout = [
[sg.Frame(layout=[[sg.Graph(canvas_size=(1000, 700), graph_bottom_left=(-105, -105), graph_top_right=(105, 105),
background_color='white', key='interval')]], title="Интервальный граф",
background_color="white", title_color="black"),
sg.Frame(layout=[[sg.Graph(canvas_size=(400, 400), graph_bottom_left=(-105, -105), graph_top_right=(105, 105),
background_color='white', key='graph')],
[sg.Text("Тут должны быть ответы", key='ans', background_color="white", text_color='black',
size=(400,0))]], title="Ответы",
background_color="white",
title_color="black")],
]
window = sg.Window('Готовая задача', layout, grab_anywhere=True, finalize=True, background_color="white")
window.maximize()
graph = window['graph']
y_count, x_count = 0, 0
while y_count * x_count < len(colors):
y_count += 1
x_count += 1
cords = {dot: get_dot_cords(len(colors), dot) for dot in colors}
multiplier = 80
for dot in cords:
graph.draw_point(tuple([(n * multiplier) for n in cords[dot]]), size=5)
graph.draw_text(f'{dot}({colors[dot]})', tuple([(n * (multiplier + 10)) for n in cords[dot]]))
for click in clicks:
listed_click = list(click)
for i in range(len(listed_click) - 1):
for to_dot in listed_click[i + 1:]:
graph.draw_line(point_from=tuple([(n * multiplier) for n in cords[listed_click[i]]]),
point_to=tuple([(n * multiplier) for n in cords[to_dot]]))
cords = {}
cur_cord = 1
for x in input_nums:
if cords.get(x) is None:
cords[x] = [(cur_cord, len(input_nums) - x)]
else:
cords[x].append((cur_cord, len(input_nums) - x))
cur_cord += 0.6
multiplier = 1400 / (len(input_nums) / 2) / 15
for cord in cords:
window['interval'].draw_line(point_from=tuple(
[(x * multiplier) - 100 for x in cords[cord][0]]
),
point_to=tuple(
[(x * multiplier) - 100 for x in cords[cord][1]]
),
width=3
)
window['interval'].draw_text(f'{cord}', tuple([(x * multiplier) - 102 for x in cords[cord][0]]))
window['interval'].draw_line(point_from=tuple(
[(x * multiplier) - 100 for x in cords[cord][0]]
),
point_to=((cords[cord][0][0] * multiplier) - 100, -40),
width=1
)
window['interval'].draw_line(point_from=tuple(
[(x * multiplier) - 100 for x in cords[cord][1]]
),
point_to=((cords[cord][1][0] * multiplier) - 100, -40),
width=1
)
window['interval'].draw_text(f'{cord}', location=((cords[cord][0][0] * multiplier) - 100, -43))
window['interval'].draw_text(f'{cord}', location=((cords[cord][1][0] * multiplier) - 100, -43))
max = 0
for i in colors:
if colors[i] > max:
max = colors[i]
window['ans'].update(
f'Интервалы: {" ".join(list(map(str, input_nums)))}\nКлики: {clicks}\nЦветов:{max}\n\nЦвета: \n{colors}')
window.read()
| true |
7fc07c36982ac03394388ab4e55156c52348f628 | Python | bolozna/cpython-sampling | /crandom.py | UTF-8 | 1,257 | 2.9375 | 3 | [] | no_license | """Module for sampling uniformly random elements from dict and set data structures in linear time.
"""
import ctypes,random
dummy_key='<dummy key>'
ulong_len = ctypes.sizeof(ctypes.c_ulong)
py_object_len=ctypes.sizeof(ctypes.py_object)
entry_len=ulong_len+2*py_object_len
table_size_offset=ulong_len*4
table_pointer_offset=ulong_len*5
def choice_dict_ctypes(d):
"""Sample a uniformly random element from a non-empty dictionary in linear time using ctypes.
"""
#Dict format:
#refcount
#type
#number of slots that are not empty (including dummy elements)
#number of slots containing actual values
#total number of slots -1
#total number of slots -1
#*table
#Table format:
#hash
#key
#val
table_size=ctypes.c_void_p.from_address(id(d)+table_size_offset).value+1
table_pointer=ctypes.c_void_p.from_address(id(d)+table_pointer_offset).value
table_value_offset=table_pointer+ulong_len+py_object_len
while True:
i=random.randrange(0,table_size)
value=ctypes.c_void_p.from_address(table_value_offset+i*entry_len).value
if value!=None:
return ctypes.cast(ctypes.c_void_p.from_address(table_pointer+ulong_len+i*entry_len).value, ctypes.py_object).value
| true |
b35b79d1d957f6b5b020e532f9a9381665eba76c | Python | andromedarabbit/blog | /utils/markov.py | UTF-8 | 3,494 | 3 | 3 | [] | no_license | from pathlib import Path
import re
import random
import frontmatter
from datetime import datetime, timedelta
START_OF_LINE = "AVeryLongMarkerForStartOfLine"
END_OF_LINE = "AVeryLongMarkerForEndOfLine"
class MarkovWordChain:
def __init__(self):
self.map = {}
def add(self, word, nextWord):
#print "Adding %s -> %s" % (word, nextWord)
if not word in self.map:
self.map[word] = { "totalCount" : 0, "nextWords" : {} }
wordChain = self.map[word]
wordChain["totalCount"] = wordChain["totalCount"] + 1
nextWords = wordChain["nextWords"]
if not nextWord in nextWords:
nextWords[nextWord] = 0
nextWords[nextWord] = nextWords[nextWord] + 1
wordChain["nextWords"] = nextWords
self.map[word] = wordChain
def to_string(self):
retval = ""
for word in self.map:
retval = retval + str(word) + " : " + str(self.map[word]["totalCount"]) + "\n"
retval = retval + "Next words:\n"
for nextWord in self.map[word]["nextWords"]:
retval = retval + nextWord + "\n"
return retval
def sentence(self):
retval = ""
word = START_OF_LINE
while word != END_OF_LINE:
if word not in (START_OF_LINE, END_OF_LINE):
retval = retval + word + " "
#print "Adding %s" % word
# get the next word
totalCount = self.map[word]["totalCount"]
nextWords = self.map[word]["nextWords"]
roll = random.randint(0, totalCount-1)
currCount = 0
breaker = 0
#print "Roll = %s" % roll
for nextWord in nextWords:
#print "currCount = %s" % currCount
if currCount >= roll:
word = nextWord
breaker = 1
break
currCount = currCount + nextWords[nextWord]
if breaker == 0:
word = END_OF_LINE
return retval
markov = MarkovWordChain()
cwd = Path.cwd()
# navigate to ./content/posts
p = cwd / "content" / "post"
for mdfile in p.glob("**/*.md"):
modtime = datetime.fromtimestamp(mdfile.stat().st_mtime)
n = datetime.now()
daysdelta = (n - modtime).days
# if not all and daysdelta > 100:
# # dont index files older than 100 days
# continue
with mdfile.open(encoding='utf-8') as f:
post = frontmatter.load(f)
post_text = str(post)
for line in post_text.splitlines():
if line.startswith("![]"):
continue # ignore images
prevToken = START_OF_LINE
for token in line.split():
markov.add(prevToken, token)
prevToken = token
markov.add(prevToken, END_OF_LINE)
import random
content = ["This is the demo output for a Markov chain Python script, based on posts from this site. [More info](/2019/08/python-markov-chains/)"]
for i in range(0, random.randint(10, 20)):
sentence = markov.sentence()
# avoid hugo errors
sentence = sentence.replace("{{", "{\{")
content.append(sentence)
fm = frontmatter.Post("\n\r".join(content))
fm['title'] = 'Markov Chain Demo'
fm['date'] = datetime.now() - timedelta(days=399) # subtract a random number of days so it doesnt appear in Onthisday and on the front page
fm['url'] = '/demos/markov'
fm['hidden'] = True
# create the year folder if it doesnt exist
newfile = frontmatter.dumps(fm)
outfile = cwd / "content" / "page" / "markov.md"
with outfile.open("w", encoding='utf-8') as w:
w.write(newfile) | true |
fbeeb6859ec10a05b4dfbb706e4e920ad01dbc62 | Python | kiranmahi2593/k | /dictionaries.py | UTF-8 | 2,480 | 3.921875 | 4 | [] | no_license | #--------------------------Dictionaries-------------------------------------#
def Create_dict(DictInput):
Userdict = dict()
for i in range(DictInput):
UserDictKey = input("Enter the Key:")
UserDictValue = input("Enter the Value:")
Userdict[UserDictKey] = UserDictValue
print("Created Dictionaty",Userdict)
return Userdict
#clear() Removes all the elements from the dictionary
def clear(x):
x.clear()
print("Dictionary After clearing the values:",x)
return x
#copy() Returns a copy of the dictionary
def copy(x):
Copy = x.copy()
print("Dict of copied values:",Copy)
return Copy
#fromkeys() Returns a dictionary with the specified keys and value
def fromkeys(x):
thisdict = dict.fromkeys(x)
return thisdict
#get() Returns the value of the specified key
'''
def get(x):
val = int(input("enter the key:"))
y = x.get(val)
print("Output:",y)
return y
'''
#items() Returns a list containing a tuple for each key value pair
def items(x):
items_user = x.items()
print("Output:",items_user)
return items_user
#keys() Returns a list containing the dictionary's keys
def keys(x):
items_keys = x.keys()
print("Output:",items_keys)
return items_keys
#pop() Removes the element with the specified key
def pop(x):
val = input("enter the key name to remove dict:")
x.pop(val)
print("Dict After removing dict:",x)
return x
#popitem() Removes the last inserted key-value pair
def popitem(x):
x.popitem()
print("Dict After removing popitem:",x)
return x
#setdefault() Returns the value of the specified key. If the key does not exist: insert the key, with the specified value
def setdefault(x):
UserDictKey = input("Enter the Key:")
UserDictValue = input("Enter the Value:")
SetFunction = x.setdefault(UserDictKey,UserDictValue)
print("OutPut",SetFunction)
return SetFunction
#update() Updates the dictionary with the specified key-value pairs
def update(x):
UserDictKey = input("Enter the Key:")
UserDictValue = input("Enter the Value:")
UpdateFunction = x.update(UserDictKey,UserDictValue)
print("OutPut",UpdateFunction)
return UpdateFunction
#values() Returns a list of all the values in the dictionary
def values(x):
items_values = x.values()
print("Output:",items_values)
return items_values
| true |
d92937a9f36eb388ba2f86cfe3a261743893ce3b | Python | Programamcion-DAM/Leer-y-representar-funciones | /test.py | UTF-8 | 4,359 | 2.90625 | 3 | [] | no_license |
from urllib import parse
from http.server import HTTPServer, BaseHTTPRequestHandler
import cv2
import numpy as np
import PIL
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
from imutils.contours import sort_contours
import imutils
import pandas as pd
#Cargamos el modelo y todos los metodos para predecir los símbolos
model = keras.models.load_model('modelo.h5')
#Los distintos simbolos
class_names = ['(', ')', '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '=', 'A', 'cos', 'div', 'e', 'log', 'pi', 'sin', 'sqrt', 'tan', 'times']
batch_size = 32
img_height = 100
img_width = 100
#Te devuelve la predicción de un numero, a esta funcion de la pasamos
#como parametro el numero de foto al que debe hacer referencia
def prediction(number):
print(number)
test_image_path = "/content/image"+str(number)+".jpg"
test_image = PIL.Image.open(test_image_path)
img = keras.preprocessing.image.load_img(
test_image_path, target_size=(img_height, img_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
return class_names[np.argmax(score)]
#Divide la imagen general según los simbolos y luego los manda a predecir. Son almacenados en la variable chars
def predict_image():
image = cv2.imread('/content/drive/MyDrive/TestImagenes/canvas.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# perform edge detection, find contours in the edge map, and sort the
# resulting contours from left-to-right
edged = cv2.Canny(blurred, 30, 150)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sort_contours(cnts, method="left-to-right")[0]
chars=[]
i = 0
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
if w*h>600:
roi = gray[y-15:y + h + 15, x -15:x + w + 15]
path = 'image'+str(i)+'.jpg'
cv2.imwrite(path,roi)
chars.append(prediction(i))
cv2.rectangle(image, (x-15, y-15), (x + w +15 , y + h +15), (0, 255, 0), 1)
i+=1
return chars
#Transformamos los chars en operacion matematica para que pueda ser devuelta
def transform_into_operation(chars):
operation = ""
for char in chars:
if char == "A" : operation +='a'
elif char == 'div': operation += '/'
elif char == 'pi': operation += 'PI'
elif char == 'times': operation += '*'
else: operation +=char
return operation
#Clase para definir el servidor http. Solo recibe solicitudes POST.
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
print("Peticion recibida")
#Obtener datos de la peticion y limpiar los datos
content_length = int(self.headers['Content-Length'])
data = self.rfile.read(content_length)
data = data.decode().replace('pixeles=', '')
data = parse.unquote(data)
#Realizar transformacion para poder ser guardada
arr = np.fromstring(data, np.float32, sep=",")
arr = arr.reshape(250,500)
arr = np.array(arr)
#Escribir la imagen la carpeta del proyecto
status = cv2.imwrite('C:/Users/Dani/Desktop/Math Web/canvas.jpg',arr)
print("Image written to file-system : ",status)
#Guardar la predicciones de la imagen
symbols = predict_image()
operation = transform_into_operation(symbols)
#Regresar respuesta a la peticion HTTP
self.send_response(200)
#Evitar problemas con CORS
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
self.wfile.write(operation.encode())
#Iniciar el servidor en el puerto 8000 y escuchar por siempre
#Si se queda colgado, en el admon de tareas buscar la tarea de python y finalizar tarea
print("Iniciando el servidor...")
server = HTTPServer(('localhost', 8000), SimpleHTTPRequestHandler)
server.serve_forever()
#import webbrowser
#webbrowser.open_new_tab('index.html') | true |
168aebbddfe194c8cd9f98bd0aa531297f27c4c8 | Python | MaukWM/Spectrangle | /agents/random_agent.py | UTF-8 | 394 | 2.78125 | 3 | [] | no_license | import random
import move
import state
from agents import agent
class RandomAgent(agent.Agent):
def get_move(self, s: state.State) -> move.Move:
possible_moves = s.get_all_possible_moves(self.index)
possible_moves = list(possible_moves)
mv = random.choice(possible_moves)
return mv
def __repr__(self):
return "RandomAgent " + str(self.index)
| true |
696856abf0ba829fa417238188bb9c2c517adea9 | Python | rduvalwa5/Jenkin_Examples | /src/Test_Volume_Calculations.py | UTF-8 | 2,457 | 2.71875 | 3 | [] | no_license | '''
Created on Sep 10, 2017
@author: rduvalwa2
'''
import unittest
from VolumeCalculation import math_volumeCaluculations
import math
class test_VolumeCalcualtions(unittest.TestCase):
def setUp(self):
print("Set up")
def test_Pi(self):
inst = math_volumeCaluculations()
pi = inst.pi()
expected = math.pi
self.assertEqual(expected,pi, "pi value fails")
def test_powers(self):
inst = math_volumeCaluculations()
expected = 27
self.assertEqual(expected,inst.powers(3, 3))
def test_cubeVolume(self):
inst = math_volumeCaluculations()
expected = 64
self.assertEqual(expected, inst.cube_volume(4) , "cube volume failed")
def test_rectangular_prism_volume(self):
inst = math_volumeCaluculations()
expected = 4 * 5 * 6
self.assertEqual(expected,inst.rectangular_prism_volume(4,5,6), "rectangular_prism_volume failed")
def test_rightCylander_volume(self):
inst = math_volumeCaluculations()
expected = math.pi * 5 * 6 * 2
self.assertEqual(expected, inst.rightCylander_volume(5,6), "rightCylander_volumefailed")
def test_pyramid_volume(self):
inst = math_volumeCaluculations()
expected = (4 * 5 * 6)/3
self.assertEqual(expected, inst.pyramid_volume(4,5,6), "pyramid volume failed")
def test_cone_volume(self):
inst = math_volumeCaluculations()
radius = 5
height = 6
expected = (math.pi * radius * height * 2)/3
self.assertEqual(expected, inst.cone_volume(radius,height), "cone volume failed")
def test_sphere_volume(self):
inst = math_volumeCaluculations()
radius = 5
expected = (4/3) * math.pi * pow(radius,3)
self.assertEqual(expected,inst.sphere_volume(radius), "sphere volume failed")
def test_ellipsoid_volume(self):
inst = math_volumeCaluculations()
radius1 = 6
radius2 = 4
radius3 = 5
expected = (4/3) * math.pi * radius1 * radius2 * radius3
self.assertEqual(expected, inst.ellipsoid_volume(radius1,radius2,radius3), "ellipsoid volume failed")
def test_for_git(self):
pass
def tearDown(self):
print("Tear Down")
if __name__ == "__main__":
unittest.main() | true |
8f891daaff4f464cdadc9ca1d0187521b530160f | Python | EdwardPeng19/AI_Risk | /A榜code/loct_code/feature_recieve_addr_info.py | UTF-8 | 2,692 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import numpy as np
import pandas as pd
file_name = 'dealed_data/target_recieve_addr_info_dealed.csv'
# 这里面是提取recieve_addr_info表里面的特征
print('......读取表并合并表')
train_target = pd.read_csv('data/train_target.csv')
train_recieve_addr_info = pd.read_csv('data/train_recieve_addr_info.csv')
test_target = pd.read_csv('data/test_list.csv')
test_recieve_addr_info = pd.read_csv('data/test_recieve_addr_info.csv')
target = pd.concat([train_target, test_target], axis=0)
recieve_addr_info = pd.concat([train_recieve_addr_info, test_recieve_addr_info], axis=0)
if os.path.exists(file_name):
target_recieve_addr_info = pd.read_csv(file_name, encoding='utf-8', low_memory=False)
else:
target_recieve_addr_info = pd.merge(target, recieve_addr_info, on='id', how='left')
target_recieve_addr_info.to_csv(file_name, encoding='utf-8', index=False)
print('......读取完毕')
print('......特征提取\n')
feature = pd.DataFrame()
# 对应id的reg_info个数
def count_rec_info(row):
# addr_id, region, phone, fix_phone, receiver_md5
row = np.array(row)
if (np.isnan(row[0]))and(len(row) == 1):
return 0
return len(row)
feature_count_rec_info = target_recieve_addr_info.groupby('id', sort=False).agg(count_rec_info)
feature_count_rec_info = feature_count_rec_info.reset_index()
del feature_count_rec_info['id']
del feature_count_rec_info['target']
feature_count_order_info = feature_count_rec_info.rename({'addr_id': 'count_rec_info'}, axis=1)
feature = pd.concat([feature, feature_count_order_info], axis=1)
print('......购买个数')
# 地区
def order_name_fre(name):
name = list(name)
if len(name) < 3:
if len(name) <= 1:
return -1
if name[-1] == name[-2]:
return 0
else:
return 1
else:
if name[-1] == name[-3]:
return 2
else:
return 3
temp = target_recieve_addr_info.groupby('id', sort=False)['region'].agg(order_name_fre)
feature['region_fre'] = list(temp)
print('......购买是否换地区')
# 座机填写次数比上总次数
def pos_count_div_sum_count(name):
name = name.fillna(0)
name = list(name)
count = 0
for i in name:
if i == 0:
count = count + 1
return float(count)/len(name)
temp = target_recieve_addr_info.groupby('id', sort=False)['fix_phone'].agg(pos_count_div_sum_count)
feature['fix_phone_fre'] = list(temp)
print('......固话有填写吗')
print('......保存特征')
feature.to_csv('feature/feature_recieve_addr_info.csv', index=False)
print('......结束')
| true |
315300baa30fcecb799e1b9de3df05e422571122 | Python | robertvari/python_alapok_211106_1 | /Photo_To_Excel/PhotoToExcel.py | UTF-8 | 2,149 | 2.84375 | 3 | [] | no_license | import os
from openpyxl import Workbook
from openpyxl.styles import Font
from PIL import Image, ExifTags
# open folder and get all files and folders
photo_folder = r"C:\Work\_PythonSuli\pycore-211106\photos"
file_list = os.listdir(photo_folder)
# filter file_list to get only .jpg and .jpeg formats
clean_file_list = []
extensions = [".jpeg", ".jpg"]
for i in file_list:
name, ext = os.path.splitext(i)
if not ext.lower() in extensions:
continue
clean_file_list.append(os.path.join(photo_folder, i))
# list comprehension
# clean_file_list = [i for i in file_list if ".jpg" in i.lower()]
# create excel workbook
workbook = Workbook()
sheet = workbook.active
title_font = Font(size="20", bold=True)
sheet["A1"] = "File Path"
sheet["A1"].font = title_font
sheet.column_dimensions['A'].width = 70
sheet["B1"] = "Date"
sheet["B1"].font = title_font
sheet.column_dimensions['B'].width = 40
sheet["C1"] = "Size"
sheet["C1"].font = title_font
sheet.column_dimensions['C'].width = 40
sheet["D1"] = "Camera"
sheet["D1"].font = title_font
sheet.column_dimensions['D'].width = 30
sheet["E1"] = "Focal Length"
sheet["E1"].font = title_font
sheet.column_dimensions['E'].width = 30
sheet["F1"] = "ISO"
sheet["F1"].font = title_font
# open photos and get exif data
for index, photo_file in enumerate(clean_file_list):
img = Image.open(photo_file)
image_size = img.size
row = index + 3
# photo path
sheet[f"A{row}"] = photo_file
# image size
sheet[f"C{row}"] = f"{image_size[0]}x{image_size[1]}"
# get exif data
exif_data = img._getexif()
if not exif_data:
continue
# get data from exif tags
for key, value in exif_data.items():
tag_name = ExifTags.TAGS.get(key)
if tag_name == "ISOSpeedRatings":
sheet[f"F{row}"] = value
elif tag_name == "DateTime":
sheet[f"B{row}"] = value
elif tag_name == "Model":
sheet[f"D{row}"] = value
elif tag_name == "FocalLength":
sheet[f"E{row}"] = str(value)
excel_file = os.path.join(photo_folder, "photo_data.xlsx")
workbook.save(excel_file) | true |