blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
afc8470c3b1ae6199f7b2328ed048d5006e3ca45 | Python | Rmartin20/Regim-project | /RegimUI/Regim/DVisual.py | UTF-8 | 9,661 | 2.515625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from Regim import ZoomAdvanced
try:
from Tkinter import *
except ImportError:
from tkinter import *
class DVisual:
def __init__(self, top=None, fixed_img=None, mov_img=None, reg_img=None, bw_img=None):
"""Visualization GUI"""
from PIL import Image, ImageTk
# ---------------------------------- ASSETS ---------------------------------------------------
_side_bg_color = '#535353'
_main_bg_color = '#282828'
_fg_color = '#000000'
_font9 = "-family Verdana -size 9 -weight normal -slant roman" \
" -underline 0 -overstrike 0"
_font11 = "-family Verdana -size 11 -weight normal -slant roman" \
" -underline 0 -overstrike 0"
_font13 = "-family Verdana -size 13 -weight normal -slant roman " \
"-underline 0 -overstrike 0"
self.main_image_object = None
self.big_image = None
# -------------------------------------------------------------------------------------------
# Creating all the GUI
screen_width = int(top.winfo_screenwidth() * 0.85)
screen_height = int(top.winfo_screenheight() - 76)
padx = int((top.winfo_screenwidth() / 2) - (screen_width / 2))
screen_size = "{0}x{1}+{2}+0".format(screen_width, screen_height, padx)
top.geometry(screen_size)
top.title("DVisual")
# top.iconbitmap(self.icon_path)
top.resizable(False, False)
top.configure(background="#d9d9d9")
# Frames configuration
self.frame_images = Frame(top)
self.frame_images.place(relx=0.0, rely=0.0, height=screen_height, width=screen_width*0.2)
self.frame_images.configure(relief=SUNKEN)
self.frame_images.configure(borderwidth="1")
self.frame_images.configure(background=_side_bg_color)
self.frame_visual = Frame(top)
self.frame_visual.place(relx=0.2, rely=0.0, height=screen_height, width=screen_width * 0.65)
self.frame_visual.configure(relief=SUNKEN)
self.frame_visual.configure(borderwidth="1")
self.frame_visual.configure(background=_main_bg_color)
self.frame_sliders = Frame(top)
self.frame_sliders.place(relx=0.85, rely=0.0, height=screen_height, width=screen_width * 0.15)
self.frame_sliders.configure(relief=SUNKEN)
self.frame_sliders.configure(borderwidth="1")
self.frame_sliders.configure(background=_side_bg_color)
# Thumbnail frames configuration
Tk.update(top)
self.tn_width = int(self.frame_images.winfo_width()) * 0.6
thumbnail_size = (self.tn_width, self.tn_width)
fixed_img.thumbnail(thumbnail_size, Image.ANTIALIAS)
fixed_photo = ImageTk.PhotoImage(fixed_img, master=top)
mov_img.thumbnail(thumbnail_size, Image.ANTIALIAS)
mov_photo = ImageTk.PhotoImage(mov_img, master=top)
reg_img.thumbnail(thumbnail_size, Image.ANTIALIAS)
reg_photo = ImageTk.PhotoImage(reg_img, master=top)
bw_img.thumbnail(thumbnail_size, Image.ANTIALIAS)
bw_photo = ImageTk.PhotoImage(bw_img, master=top)
self.image_list = [fixed_img, mov_img, reg_img, bw_img]
self.photo_list = [fixed_photo, mov_photo, reg_photo, bw_photo]
self.image_frame_list = [None, None, None, None]
self.image_canvas_list = [None, None, None, None]
for i in range(4):
rel_x = 0.19
rel_y = 0.04 + (0.24 * i)
self.image_frame_list[i] = Frame(self.frame_images)
self.image_frame_list[i].place(relx=rel_x, rely=rel_y, height=self.tn_width, width=self.tn_width)
self.image_frame_list[i].configure(relief=SOLID)
self.image_frame_list[i].configure(borderwidth="1")
self.image_frame_list[i].configure(background=_main_bg_color)
self.image_frame_list[i].configure(cursor="hand2")
self.image_canvas_list[i] = Canvas(self.image_frame_list[i], highlightthickness=0)
self.image_canvas_list[i].configure(borderwidth="0")
self.image_canvas_list[i].configure(background="#fff")
self.image_canvas_list[i].grid(row=0, column=0, sticky='nswe')
self.image_canvas_list[i].bind("<Button-1>", self.select_image)
self.image_canvas_list[i].create_image((0, 0), image=self.photo_list[i], anchor=NW)
self.image_canvas_list[i].image = self.photo_list[i]
self.image_canvas_list[i].update() # wait till canvas is created
# Main visualizer frame configuration
Tk.update(top)
visual_width = int(self.frame_visual.winfo_width())
visual_height = int(self.frame_visual.winfo_height())
if visual_width < visual_height:
self.visual_size = visual_width
else:
self.visual_size = visual_height
padx_visual = int((visual_width-self.visual_size) / 2)
self.frame_visual_inner = Frame(self.frame_visual)
self.frame_visual_inner.place(x=padx_visual, rely=0.0, height=self.visual_size, width=self.visual_size)
self.frame_visual_inner.configure(relief=SUNKEN)
self.frame_visual_inner.configure(borderwidth="0")
self.frame_visual_inner.configure(background="#000")
self.frame_visual_inner.configure(cursor="fleur")
# Sliders configuration
# Brightness slider
self.scale_br = Scale(self.frame_sliders, from_=0, to=4, orient=HORIZONTAL, resolution=0.2)
self.scale_br.place(relx=0.0, rely=0.05, relwidth=1)
self.scale_br.configure(background=_side_bg_color)
self.scale_br.configure(activebackground="#202020")
self.scale_br.configure(foreground="#fff")
self.scale_br.configure(borderwidth="0")
self.scale_br.set(1)
# Contrast slider
self.scale_ct = Scale(self.frame_sliders, from_=0, to=4, orient=HORIZONTAL, resolution=0.2)
self.scale_ct.place(relx=0.0, rely=0.15, relwidth=1)
self.scale_ct.configure(background=_side_bg_color)
self.scale_ct.configure(activebackground="#202020")
self.scale_ct.configure(foreground="#fff")
self.scale_ct.configure(borderwidth="0")
self.scale_ct.configure(command="")
self.scale_ct.set(1)
# Sliders Commands
self.scale_br.configure(command=lambda _: self.enhance_image(self.main_image_object,
self.big_image,
self.scale_br,
self.scale_ct))
self.scale_ct.configure(command=lambda _: self.enhance_image(self.main_image_object,
self.big_image,
self.scale_br,
self.scale_ct))
def select_image(self, event):
"""Select main canvas image"""
self.scale_ct.set(1)
self.scale_br.set(1)
count = 0
for item in self.image_canvas_list:
if item == event.widget:
item.configure(borderwidth="1")
self.big_image = self.resize_image(self.image_list[count], self.visual_size)
self.main_image_object = ZoomAdvanced.ZoomAdvanced(self.frame_visual_inner, self.big_image)
else:
item.configure(borderwidth="0")
count += 1
@staticmethod
def resize_image(image, new_size=None):
"""Resize an image using PIL"""
from PIL import Image
# original_image = numpy.array(image)
original_height, original_width = image.size
factor = int(new_size/original_width)
new_width = int(original_width * factor)
new_height = int(original_height * factor)
# resized_image = cv2.resize(original_image, (new_width, new_height))
# new_image = PIL.Image.fromarray(resized_image)
resized_img = image.resize((new_width, new_height), Image.ANTIALIAS)
return resized_img
@staticmethod
def enhance_image(zoom_object, image, br_scale, cts_scale):
"""Edit image brightness and contrast"""
from PIL import ImageEnhance
if zoom_object is not None:
brightness = br_scale.get()
contrast = cts_scale.get()
# if cts_scale is not None:
# sharpness = cts_scale.get()
# else:
# sharpness = 1
enhancer = ImageEnhance.Brightness(image)
edited_img = enhancer.enhance(brightness)
enhancer = ImageEnhance.Contrast(edited_img)
edited_img = enhancer.enhance(contrast)
zoom_object.set_image(edited_img)
zoom_object.show_image()
if __name__ == '__main__':
from PIL import Image
fixed_path = Image.open(
"C:/Users/Fabian/Desktop/Fabi_py_Projects/projects/Data_analysis/Data/Input/K/input_1.png")
mov_path = Image.open(
"C:/Users/Fabian/Desktop/Fabi_py_Projects/projects/Data_analysis/Data/Input/K/input_2.png")
reg_path = Image.open(
"C:/Users/Fabian/Desktop/Fabi_py_Projects/projects/Data_analysis/Data/Output/Mutual_info/Displacement/K/output.png")
bw_path = Image.open(
"C:/Users/Fabian/Desktop/Fabi_py_Projects/projects/Data_analysis/Data/Output/Mutual_info/Displacement/K/output.png")
root = Tk()
v = DVisual(root, fixed_path, mov_path, reg_path, bw_path)
root.mainloop()
| true |
8b4f34593489281cdcf22ec7fa6f9839fd3e80ac | Python | AI-DI/Brancher | /development_playgrounds/GP_playground.py | UTF-8 | 1,629 | 2.75 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from brancher.variables import ProbabilisticModel
from brancher.stochastic_processes import GaussianProcess as GP
from brancher.stochastic_processes import SquaredExponentialCovariance as SquaredExponential
from brancher.stochastic_processes import ConstantMean
from brancher.variables import RootVariable
from brancher.standard_variables import NormalVariable as Normal
from brancher import inference
num_datapoints = 20
x_range = np.linspace(-2, 2, num_datapoints)
x = RootVariable(x_range, name="x")
# Model
mu = ConstantMean(0.)
cov = SquaredExponential(scale=0.2, jitter=10**-4)
f = GP(mu, cov, name="f")
y = Normal(f(x), 0.2, name="y")
model = ProbabilisticModel([y])
# Observe data
noise_level = 0.2
data = np.sin(2*np.pi*0.4*x_range) + noise_level*np.random.normal(0., 1., (1, num_datapoints))
y.observe(data)
#Variational Model
Qf = Normal(loc=np.zeros((num_datapoints,)),
scale=2.,
name="f(x)",
learnable=True)
variational_model = ProbabilisticModel([Qf])
model.set_posterior_model(variational_model)
# Inference
inference.perform_inference(model,
number_iterations=2000,
number_samples=20,
optimizer='SGD',
lr=0.00001)
loss_list = model.diagnostics["loss curve"]
plt.plot(loss_list)
plt.show()
# Posterior
posterior_samples = model.get_posterior_sample(8000)["f(x)"]
posterior_mean = posterior_samples.mean()
plt.plot(x_range, posterior_mean)
plt.scatter(x_range, data, color="k")
plt.show() | true |
c2b2a433d39e9dadf25e78e8f54dfc586563535d | Python | Ankur3107/scalingQA | /scalingqa/extractivereader/training/scheduler_factory.py | UTF-8 | 2,008 | 3.15625 | 3 | [
"MIT"
] | permissive | # -*- coding: UTF-8 -*-
""""
Created on 16.07.20
This module contains factory for creating schedulers.
:author: Martin Dočekal
"""
from abc import ABC, abstractmethod
from typing import Callable, Dict
import torch
from torch.optim.lr_scheduler import _LRScheduler # TODO: protected member access, seems dirty :(
class SchedulerFactory(ABC):
"""
Abstract base class for learning rate schedulers creation. (it's factory)
"""
@abstractmethod
def create(self, optimizer: torch.optim.Optimizer) -> _LRScheduler:
"""
Creates scheduler for given optimizer.
:param optimizer: The used optimizer that learning rate you want to schedule.
:type optimizer: torch.optim.Optimizer
:return: Created scheduler for given optimizer and with settings that are hold by factory.
:rtype: torch.optim.Optimizer
"""
pass
class AnySchedulerFactory(SchedulerFactory):
"""
Class that allows creation of any scheduler on demand.
"""
def __init__(self, creator: Callable[..., _LRScheduler], attr: Dict, optimizerAttr: str = "optimizer"):
"""
Initialization of factory.
:param creator: This will be called with given attributes (attr) and the optimizer will be passed
as optimizerAttr attribute.
You can use the class of scheduler itself.
:type creator: Callable[..., _LRScheduler]
:param attr: Dictionary with attributes that should be used. Beware that the attribute with name optimizerAttr
is reserved for optimizer.
:type attr: Dict
:param optimizerAttr: Name of attribute that will be used to pass optimizer to scheduler.
:type optimizerAttr: str
"""
self.creator = creator
self.attr = attr
self.optimizerAttr = optimizerAttr
def create(self, optimizer: torch.optim.Optimizer) -> _LRScheduler:
self.attr[self.optimizerAttr] = optimizer
return self.creator(**self.attr)
| true |
c2d7cfe4d854cc6fa98de62ea2891488fba90853 | Python | 12rambau/sepal_ui | /sepal_ui/mapping/marker_cluster.py | UTF-8 | 622 | 2.59375 | 3 | [
"MIT"
] | permissive | """Custom implementation of the marker cluster to hide it at once."""
from ipyleaflet import MarkerCluster
from traitlets import Bool, observe
class MarkerCluster(MarkerCluster):
"""Overwrite the MarkerCluster to hide all the underlying cluster at once.
.. todo::
remove when https://github.com/jupyter-widgets/ipyleaflet/issues/1108 is solved
"""
visible = Bool(True).tag(sync=True)
@observe("visible")
def toggle_markers(self, change):
"""change the marker value according to the cluster viz."""
for marker in self.markers:
marker.visible = self.visible
| true |
3b91b591c4b2f7faad0f8201f50060608866c373 | Python | nOctaveLay/TM_information | /python-docs.py | UTF-8 | 542 | 2.65625 | 3 | [] | no_license | # 반드시 python-docs를 설치할것.
from docx import Document
from docx.shared import Inches
document = Document()
with open('law.txt','r',encoding='utf-8') as f:
file_list = list()
for line in f:
if line != '\n':
file_list.append(line[:-1])
table = document.add_table(rows = len(file_list), cols = 1)
for index,file_element in enumerate(file_list):
row_cell = table.rows[index].cells
row_cell[0].text = file_element
document.add_page_break()
document.save("law_table.docx")
| true |
99ced1b3a9b9c7a493dd331be0bb7e2627c4ce4e | Python | courageousillumination/django-flags | /flags/flag_overrider.py | UTF-8 | 699 | 2.921875 | 3 | [] | no_license | """The base FlagOverrider class."""
from typing import Any
from flags.flag import Flag
class FlagOverrider(object): # pragma: no cover
"""
A flag overrider is an object that can overide flag values.
These get various bits of context (request, user, etc.) and use these to determine
if the flag value should be changed.
"""
# pylint: disable=unused-argument,no-self-use
def should_override(self, flag: Flag, **kwargs) -> bool:
"""Whether an override should be considered."""
return False
def get_override(self, flag: Flag, **kwargs) -> Any:
"""Get the override for a specific flag, given a context."""
raise NotImplementedError
| true |
72c36d98ce3eff4a83c1885dbceb599c7e0ce92c | Python | mrliuzhao/OpenCVNotebook-Python | /CarDetection/detector.py | UTF-8 | 5,876 | 2.671875 | 3 | [] | no_license | import cv2
import numpy as np
import time
'''
该文件用于使用UIUC数据集训练出识别汽车的BOW+SVM模型
'''
datapath = r".\resources\CarData\TrainImages"
SAMPLES = 400
def path(cls, i):
return "%s/%s%d.pgm" % (datapath, cls, i)
def get_flann_matcher():
flann_params = dict(algorithm=1, trees=5)
return cv2.FlannBasedMatcher(flann_params, {})
def get_bow_extractor(extract, match):
return cv2.BOWImgDescriptorExtractor(extract, match)
def get_extract_detect():
return cv2.xfeatures2d.SIFT_create(), cv2.xfeatures2d.SIFT_create()
def extract_sift(fn, extractor):
img = cv2.imread(fn, cv2.IMREAD_GRAYSCALE)
kpts, des = extractor.detectAndCompute(img, mask=None)
return des
def bow_features(img, extractor_bow, detector):
return extractor_bow.compute(img, detector.detect(img))
def car_detector(cluster_count=40,
extractor=cv2.xfeatures2d.SIFT_create(),
matcher=cv2.FlannBasedMatcher()):
'''
该函数用于获取识别汽车的SVM分类器,以及BOW特征提取器
:param cluster_count: 聚类个数,即词袋中单词种类数
:param extractor: 特征提取器,如ORB、SIFT、SURF等
:param matcher: 特征匹配器,如FLANNMatcher
:return: 第一个返回值为SVM分类器,第二个返回值为BOW特征提取器
'''
pos, neg = "pos-", "neg-"
print("building BOWKMeansTrainer...")
bow_kmeans_trainer = cv2.BOWKMeansTrainer(cluster_count)
extract_bow = cv2.BOWImgDescriptorExtractor(extractor, matcher)
print("adding features to trainer")
start = time.time()
for i in range(SAMPLES):
kpts, sift_pos = extractor.detectAndCompute(cv2.imread(path(pos, i), cv2.IMREAD_GRAYSCALE), mask=None)
if sift_pos is not None:
bow_kmeans_trainer.add(sift_pos)
kpts, sift_neg = extractor.detectAndCompute(cv2.imread(path(neg, i), cv2.IMREAD_GRAYSCALE), mask=None)
if sift_neg is not None:
bow_kmeans_trainer.add(sift_neg)
vocabulary = bow_kmeans_trainer.cluster()
print("Vocabulary Shape:", vocabulary.shape) # (cluster_count, 128)
extract_bow.setVocabulary(vocabulary)
end = time.time()
print("训练BOW时间:", (end - start))
traindata, trainlabels = [], []
print("adding to train data")
start = time.time()
for i in range(SAMPLES):
# print(i)
bowDes_pos = bow_features(cv2.imread(path(pos, i), cv2.IMREAD_GRAYSCALE), extract_bow, extractor)
if bowDes_pos is not None:
traindata.extend(bowDes_pos)
trainlabels.append(1)
bowDes_neg = bow_features(cv2.imread(path(neg, i), cv2.IMREAD_GRAYSCALE), extract_bow, extractor)
if bowDes_neg is not None:
traindata.extend(bowDes_neg)
trainlabels.append(-1)
svm = cv2.ml.SVM_create()
svm.setType(cv2.ml.SVM_C_SVC)
svm.setGamma(1)
svm.setC(35)
svm.setKernel(cv2.ml.SVM_RBF)
svm.train(np.array(traindata), cv2.ml.ROW_SAMPLE, np.array(trainlabels))
end = time.time()
print("训练SVM时间:", (end - start))
return svm, extract_bow, vocabulary
def train_bowextractor(cluster_count=40,
extractor=cv2.xfeatures2d.SIFT_create(),
matcher=cv2.FlannBasedMatcher()):
'''
该函数用于训练BOW特征提取器
:param cluster_count: 聚类个数,即词袋中单词种类数
:param extractor: 特征提取器,如ORB、SIFT、SURF等
:param matcher: 特征匹配器,如FLANNMatcher
:return: 第一个返回值为“视觉单词”(K均值聚类出的中心),第二个返回值为BOW特征提取器
'''
pos, neg = "pos-", "neg-"
print("building BOWKMeansTrainer...")
bow_kmeans_trainer = cv2.BOWKMeansTrainer(cluster_count)
extract_bow = cv2.BOWImgDescriptorExtractor(extractor, matcher)
print("adding features to bow k-means trainer")
start = time.time()
for i in range(SAMPLES):
kpts, sift_pos = extractor.detectAndCompute(cv2.imread(path(pos, i), cv2.IMREAD_GRAYSCALE), mask=None)
if sift_pos is not None:
bow_kmeans_trainer.add(sift_pos)
kpts, sift_neg = extractor.detectAndCompute(cv2.imread(path(neg, i), cv2.IMREAD_GRAYSCALE), mask=None)
if sift_neg is not None:
bow_kmeans_trainer.add(sift_neg)
vocabulary = bow_kmeans_trainer.cluster()
print("Vocabulary Shape:", vocabulary.shape) # (cluster_count, 128)
extract_bow.setVocabulary(vocabulary)
end = time.time()
print("训练BOW时间:", (end - start))
return vocabulary, extract_bow
def train_bownn(bowextractor,
extractor=cv2.xfeatures2d.SIFT_create()):
'''
该函数用于训练以BOW特征作为输入的二分类神经网络
:param bowextractor: BOW特征提取器
:param extractor: 特征提取器,如ORB、SIFT、SURF等
:return:
'''
pos, neg = "pos-", "neg-"
traindata, trainlabels = [], []
print("adding to train data")
start = time.time()
for i in range(SAMPLES):
bowDes_pos = bow_features(cv2.imread(path(pos, i), cv2.IMREAD_GRAYSCALE), bowextractor, extractor)
if bowDes_pos is not None:
traindata.extend(bowDes_pos) # bowDes shape: (1, cluster_count)
trainlabels.append(1)
bowDes_neg = bow_features(cv2.imread(path(neg, i), cv2.IMREAD_GRAYSCALE), bowextractor, extractor)
if bowDes_neg is not None:
traindata.extend(bowDes_neg)
trainlabels.append(-1)
end = time.time()
traindata = np.array(traindata)
trainlabels = np.array(trainlabels)
print('traindata shape:', traindata.shape) # (799, cluster_count)
print('trainlabels shape:', trainlabels.shape) # (799, )
print("训练ANN时间:", (end - start))
return 1
| true |
6c36e52d7f9d1781e7f2fabfff3efd4fe9c2a8fb | Python | David-Carrasco-Vidaurre/trabajo05.Carrasco.Castillo | /verificador03.py | UTF-8 | 409 | 3.515625 | 4 | [] | no_license | # calculadora nro3
# esta calculadora realiza el cálculo de la potencia
# declaración de variables
trabajo, tiempo, potencia = 0.0 , 0.0 , 0.0
# calculadora
trabajo = 18
tiempo = 9
potencia = (trabajo // tiempo)
verificador=(potencia>=2)
# motrar datos
print ( " trabajo = " , trabajo)
print ( " tiempo = " , tiempo)
print ( " potencia = " , potencia)
print("Potencia >=2", verificador)
| true |
a0d4463dc28ad6338f59282d3b7c7c47a37e34f4 | Python | cuttlefish/stactools | /src/stactools/core/io/__init__.py | UTF-8 | 1,300 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | from typing import Callable, Optional, Any
from pystac.stac_io import DefaultStacIO, StacIO
import fsspec
ReadHrefModifier = Callable[[str], str]
"""Type alias for a function parameter
that allows users to manipulate HREFs for reading,
e.g. appending an Azure SAS Token or translating
to a signed URL
"""
def read_text(href: str,
read_href_modifier: Optional[ReadHrefModifier] = None) -> str:
if read_href_modifier is None:
return StacIO.default().read_text(href)
else:
return StacIO.default().read_text(read_href_modifier(href))
class FsspecStacIO(DefaultStacIO):
def read_text_from_href(self, href: str, *args: Any, **kwargs: Any) -> str:
with fsspec.open(href, "r") as f:
s = f.read()
if isinstance(s, str):
return s
elif isinstance(s, bytes):
return str(s, encoding='utf-8')
else:
raise ValueError(
f"Unable to decode data loaded from HREF: {href}")
def write_text_from_href(self, href: str, txt: str, *args: Any,
**kwargs: Any) -> None:
with fsspec.open(href, "w") as destination:
destination.write(txt)
def use_fsspec() -> None:
StacIO.set_default(FsspecStacIO)
| true |
b93f34daabfbf383deda18682dfa807ffb074a6c | Python | raster-foundry/raster-foundry-python-client | /tests/test_notebook_check.py | UTF-8 | 845 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | def test_warn_without_notebook_support():
import rasterfoundry.decorators
rasterfoundry.decorators.NOTEBOOK_SUPPORT = False
from rasterfoundry.decorators import check_notebook
@check_notebook
def f():
return 'foo'
assert f() is None
def test_warn_without_notebook_support_with_args():
import rasterfoundry.decorators
rasterfoundry.decorators.NOTEBOOK_SUPPORT = False
from rasterfoundry.decorators import check_notebook
@check_notebook
def f(*args, **kwargs):
return 'foo'
assert f(1, 2, 3, foo='bar') is None
def test_no_warn_with_notebook_support():
import rasterfoundry.decorators
rasterfoundry.decorators.NOTEBOOK_SUPPORT = True
from rasterfoundry.decorators import check_notebook
@check_notebook
def f():
return 'foo'
assert f() == 'foo'
| true |
ca3a2460a7f07b378c3a2fe25d2ecd6b1d3428ad | Python | Aasthaengg/IBMdataset | /Python_codes/p03078/s543307944.py | UTF-8 | 1,148 | 3.046875 | 3 | [] | no_license | import heapq
x, y, z, k = map(int, input().split())
a = sorted(map(int, input().split()))[::-1]
b = sorted(map(int, input().split()))[::-1]
c = sorted(map(int, input().split()))[::-1]
print(a[0] + b[0] + c[0])
candidates = []
if x > 1: candidates.append((-(a[1] + b[0] + c[0]), 1, 0, 0))
if y > 1: candidates.append((-(a[0] + b[1] + c[0]), 0, 1, 0))
if z > 1: candidates.append((-(a[0] + b[0] + c[1]), 0, 0, 1))
heapq.heapify(candidates)
popped = {(1, 0, 0): 1,
(0, 1, 0): 1,
(0, 0, 1): 1}
for i in range(1, k):
value, p, q, r = heapq.heappop(candidates)
print(-value)
try:
popped[(p+1, q, r)]
except:
if p+1 < x:
heapq.heappush(candidates, (-(a[p+1] + b[q] + c[r]), p+1, q, r))
popped[(p+1, q, r)] = 1
try:
popped[(p, q+1, r)]
except:
if q+1 < y:
heapq.heappush(candidates, (-(a[p] + b[q+1] + c[r]), p, q+1, r))
popped[(p, q+1, r)] = 1
try:
popped[(p, q, r+1)]
except:
if r+1 < z:
heapq.heappush(candidates, (-(a[p] + b[q] + c[r+1]), p, q, r+1))
popped[(p, q, r+1)] = 1
| true |
eca7d8f259370d8c4c4dbe4857b31d085519a85e | Python | duleignjatovic995/OpenParliamentAnalysis | /preprocess/preprocess_data.py | UTF-8 | 4,307 | 3.546875 | 4 | [] | no_license | """
This file contains methods for preprocessing text.
The intendet pipeline would be:
1. s = get_stemmed_list_of_documents(list_of_documents) # parsing one document at a time
2. d = create_dictionary(s)
3. m = create_document_term_matrix(d, s) # bag of words
"""
from preprocess.stemmers.Croatian_stemmer import stem_list as CroStemmer
from nltk.tokenize import word_tokenize
from preprocess.stop_words import stop_words, waste_words
from gensim import corpora, models
import os
import re
def get_stemmed_document_list(text):
"""
Method for converting raw text to list of stemmed tokens.
:param text: raw text
:return: list of preprocessed tokens
"""
# Remove punctuation
string = re.sub('[\.,:;\(\)\'“`0-9]', ' ', text)
# Get list of tokens
tokens = word_tokenize(string.lower())
# Remove stop words
stop_tokens = [token for token in tokens if not token in stop_words]
# Stemming
stemmed_tokens = CroStemmer(stop_tokens)
# Filter useless words
filtered_tokens = [token for token in stemmed_tokens if not token in waste_words]
return filtered_tokens
def get_stemmed_list_of_documents(list_of_documents):
"""
Method for converting list of documents
:param list_of_documents: e.g. ['tomato potato', 'salad soup meat', ...]
:return: list of stemmed document lists e.g. [['tomat', 'potat'], ['salad', 'sou', 'mea'] ...]
"""
dictionary = [get_stemmed_document_list(text) for text in list_of_documents]
return dictionary
def get_ngrams(list_of_tokenized_documents, min_count=20):
"""
Method for finding most occurring bigrams.
:param list_of_tokenized_documents:
:param min_count: ignore all words and bigrams with total collected count lower than this.
:return: documents with most common bi-grams
"""
ngram = models.phrases.Phrases(list_of_tokenized_documents, min_count=min_count)
for idx in range(len(list_of_tokenized_documents)):
for token in ngram[list_of_tokenized_documents[idx]]:
if '_' in token:
# Token is a bigram - add to document (list of tokens)
list_of_tokenized_documents[idx].append(token)
return list_of_tokenized_documents
def create_dictionary(list_of_tokenized_documents, min_occur=1, max_occur=1, save='', print_dict=False):
"""
Method for creating tokenized documents into a id <-> term dictionary
:param list_of_tokenized_documents: list of stemmed document lists e.g. [['tomat', 'potat'], ['salad', 'sou', 'mea'] ...]
:param min_occur: number of minimum word occurrences in documents
:param max_occur: maximum percentage for word occurrence in documents
:param save: if True, saves the document in temp folder
:param print_dict: prints id <-> terms
:return: id <-> term dictionary
"""
dictionary = corpora.Dictionary(list_of_tokenized_documents)
dictionary.filter_extremes(no_below=min_occur, no_above=max_occur)
if save != '':
pathname = '../temp/' + save
try:
with open(os.path.join(os.path.dirname(__file__), pathname), 'wb') as f:
dictionary.save(f)
except IOError:
print("Couldn't save dictionary to temp folder :(")
if print_dict is True:
print(dictionary.token2id)
return dictionary
def create_document_term_matrix(dictionary, list_of_tokenized_documents):
"""
Method for creating bag of words model.
:param dictionary: id <-> term dictionary
:param list_of_tokenized_documents:
:return: list of stemmed document lists e.g. [['tomat', 'potat'], ['salad', 'sou', 'mea'] ...]
"""
dt_matrix = [dictionary.doc2bow(text) for text in list_of_tokenized_documents]
return dt_matrix
def preprocess_pipeline(list_of_documents, ngram=True, min_occur=1, max_occur=1, save_dict=''):
# process list of documents -> doc = list of stemmed words
tokenized_doc_list = get_stemmed_list_of_documents(list_of_documents)
if ngram is True:
tokenized_doc_list = get_ngrams(tokenized_doc_list)
dictionary = create_dictionary(tokenized_doc_list, min_occur=min_occur, max_occur=max_occur, save=save_dict)
bow = create_document_term_matrix(dictionary, tokenized_doc_list)
return bow, dictionary
| true |
93f8c828a683ab2d539cc0b77150d822f0421659 | Python | Aasthaengg/IBMdataset | /Python_codes/p02397/s336800437.py | UTF-8 | 173 | 3.21875 | 3 | [] | no_license | while True :
a = raw_input().split()
x = int(a[0])
y = int(a[1])
if x == 0 and y == 0 :
break
elif x < y :
print u"%d %d" % (x, y)
else :
print u"%d %d" % (y, x) | true |
fdb76e2af2bacafcc6185d3bd1b72c8b08d8b490 | Python | MichiganCOG/video-frame-inpainting | /videolist/master_to_contiguous.py | UTF-8 | 1,759 | 2.9375 | 3 | [] | no_license | import argparse
def range_to_str(a, b):
return '%d-%d' % (a, b)
def str_to_range(str):
return tuple(int(d) for d in str.split('-'))
def main(input_path, output_path, clip_length, default_stride, first_only):
input_reader = open(input_path, 'r')
output_writer = open(output_path, 'w')
for line in input_reader.readlines():
line = line.strip()
video_file_name, video_range = line.split()
# Note: Video range is a 1-indexed, inclusive range
video_range_start, video_range_end = str_to_range(video_range)
# Get the set of possible start indexes, filtering out intervals that fall outside the given range
# Note: Stride is changed for KTH's running and jogging classes as per Villegas et al. (2017) to keep number of
# examples per class similar
stride = 3 if 'running' in video_file_name or 'jogging' in video_file_name else default_stride
possible_start_indexes = xrange(video_range_start, video_range_end - clip_length + 2, stride)
for start_index in possible_start_indexes:
output_writer.write('%s %s\n' % (
video_file_name,
range_to_str(start_index, start_index + clip_length - 1)
))
if first_only:
break
input_reader.close()
output_writer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_path', type=str)
parser.add_argument('output_path', type=str)
parser.add_argument('--clip_length', type=int, default=20)
parser.add_argument('--default_stride', type=int, default=10)
parser.add_argument('--first_only', action='store_true')
args = parser.parse_args()
main(**vars(args)) | true |
1a1204e4face38bd241e7aa4fc45b2f43373d86a | Python | kaer-hero/python-learning | /001.py | UTF-8 | 1,002 | 3.953125 | 4 | [] | no_license | print('i love %s')
print('i love %s'%"lixiao")
print('i am %d years old'%18)
print('i am %d years old, i am %s'%(18,'minlei'))
s = 'i love {}'.format('lixiao')
print(s)
s = 'i am {1}, i love {0}, {1} hate the dog'.format('lixiao','wangjun')
print(s)
# format 格式限定符 有着丰富的格式限定符,语法是{}中带:号
# 填充与对齐 填充经常跟对齐一起使用 ^ < > 分别是居中、左对齐、右对齐,后面带宽度
# :号后面带填充的字符,只能是一个字符,不指定的话默认是用空格填充
print('{0}, {1}'.format('kzc', 18))
print('{:>8}'.format('189'))
print('{:0>8}'.format(189))
print('{:a>8}'.format('189'))
# 精度与类型f:精度常跟类型f一起使用
print('{:.2f}'.format(321.33345))
# 其中.2表示长度为2 的精度,f表示float类型 还有其他类型: 主要就是进制了,b、d、o、x 分别是二、十、八、十六进制
print('{:,}'.format(1234567890)) # 逗号,还能用来做金额的千位分隔符:
| true |
db0a6635bfe78c2dd716577409eade599458dad5 | Python | zuxinlin/leetcode | /leetcode/709.ToLowerCase.py | UTF-8 | 692 | 3.890625 | 4 | [] | no_license | #! /usr/bin/env python
# coding: utf-8
'''
题目: 转换成小写字母 https://leetcode-cn.com/problems/to-lower-case/
主题: string
解题思路:
1. 调用字符串库函数lower
'''
class Solution(object):
'''
'''
def toLowerCase(self, str):
"""
:type str: str
:rtype: str
"""
# return str.lower()
result = ''
for c in str:
if c >= 'A' and c <= 'Z':
result += chr(ord(c) + 32)
else:
result += c
return result
if __name__ == '__main__':
solution = Solution()
assert 'hello' == solution.toLowerCase('Hello')
| true |
f87dc1d166b750049e50a27de9a24a285628522f | Python | chuckbenger/Asteroids-Multiplayer-Backend | /services/common/adapters/sqs_game_queue.py | UTF-8 | 1,991 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | import boto3
from typing import List
from common.domain.player import Player
from common.domain.game_queue_interface import GameQueueInterface
class SQSGameQueueAdapter(GameQueueInterface):
def __init__(self, queue_name: str):
self.queue_name = queue_name
self.sqs = boto3.resource('sqs')
self.client = boto3.client('sqs')
self.queue = self.sqs.get_queue_by_name(QueueName=queue_name)
def push(self, player: Player) -> bool:
response = self.queue.send_message(
MessageAttributes={
"user_id": {
"DataType": "String",
"StringValue": player.player_id
},
"user_name": {
"DataType": "String",
"StringValue": player.name
}
},
MessageBody="Match Making"
)
return response and response['MessageId']
def pop(self, max_size: int = 1) -> List[Player]:
messages = self.queue.receive_messages(
MessageAttributeNames=['user_id', 'user_name'],
MaxNumberOfMessages=max_size,
)
players: List[Player] = []
for message in messages:
if message.message_attributes:
id = message.message_attributes.get(
'user_id').get('StringValue')
name = message.message_attributes.get(
'user_name').get('StringValue')
player = Player(id, name, None)
players.append(player)
message.delete()
return players
def size(self) -> int:
results = self.client.get_queue_attributes(
QueueUrl=self.queue.url,
AttributeNames=['ApproximateNumberOfMessages']
)
if results:
return int(results['Attributes']['ApproximateNumberOfMessages'])
else:
return 0
def purge(self) -> None:
self.queue.purge()
| true |
0553f121a3d7a1ec316d447765cfc947c935e1df | Python | javokhirbek1999/CodeSignal | /Arcade/Intro/Island-Of-Knowledge/avoidObstacles.py | UTF-8 | 311 | 2.828125 | 3 | [] | no_license | def avoidObstacles(inputArray):
i = 1
while True:
j = i
while True:
if j in inputArray:
break
elif j>max(inputArray):
return i
else:
j+=i
i+=1
if max(inputArray)<i:
return i
| true |
c79805236b267261e887e514b86020c7363332bc | Python | cbg-ethz/openproblems2021 | /task01_predictmodality/method/scmm/vaes/vis.py | UTF-8 | 3,251 | 2.65625 | 3 | [
"MIT"
] | permissive | # visualisation related functions
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from matplotlib.lines import Line2D
from umap import UMAP
def custom_cmap(n):
"""Create customised colormap for scattered latent plot of n categories.
Returns colormap object and colormap array that contains the RGB value of the colors.
See official matplotlib document for colormap reference:
https://matplotlib.org/examples/color/colormaps_reference.html
"""
# first color is grey from Set1, rest other sensible categorical colourmap
cmap_array = sns.color_palette("Set1", 9)[-1:] + sns.husl_palette(
n - 1, h=0.6, s=0.7
)
cmap = colors.LinearSegmentedColormap.from_list("mmdgm_cmap", cmap_array)
return cmap, cmap_array
def embed_umap(data):
"""data should be on cpu, numpy"""
embedding = UMAP(
metric="euclidean",
n_neighbors=40,
# angular_rp_forest=True,
# random_state=torch.initial_seed(),
# transform_seed=torch.initial_seed()
)
return embedding.fit_transform(data)
def plot_embeddings(emb, emb_l, labels, filepath):
cmap_obj, cmap_arr = custom_cmap(n=len(labels))
plt.figure()
plt.scatter(
emb[:, 0],
emb[:, 1],
c=emb_l,
cmap=cmap_obj,
s=0.5,
alpha=0.2,
edgecolors="none",
)
l_elems = [
Line2D([0], [0], marker="o", color=cm, label=l, alpha=0.5, linestyle="None")
for (cm, l) in zip(cmap_arr, labels)
]
plt.legend(frameon=False, loc=2, handles=l_elems)
plt.savefig(filepath, bbox_inches="tight", dpi=2500)
plt.close()
def tensor_to_df(tensor, ax_names=None):
assert tensor.ndim == 2, "Can only currently convert 2D tensors to dataframes"
df = pd.DataFrame(data=tensor, columns=np.arange(tensor.shape[1]))
return df.melt(
value_vars=df.columns,
var_name=("variable" if ax_names is None else ax_names[0]),
value_name=("value" if ax_names is None else ax_names[1]),
)
def tensors_to_df(tensors, head=None, keys=None, ax_names=None):
dfs = [tensor_to_df(tensor, ax_names=ax_names) for tensor in tensors]
df = pd.concat(dfs, keys=(np.arange(len(tensors)) if keys is None else keys))
df.reset_index(level=0, inplace=True)
if head is not None:
df.rename(columns={"level_0": head}, inplace=True)
return df
def plot_kls_df(df, filepath, yscale):
_, cmap_arr = custom_cmap(df[df.columns[0]].nunique() + 1)
with sns.plotting_context("notebook", font_scale=2.0):
g = sns.FacetGrid(df, height=12, aspect=2)
g = g.map(
sns.boxplot,
df.columns[1],
df.columns[2],
df.columns[0],
palette=cmap_arr[1:],
showfliers=False,
order=None,
hue_order=None,
)
# g = g.set(yscale='log').despine(offset=10)
# if yscale is not None:
# g = g.set(yscale=yscale).despine(offset=10)
g = g.set(yscale=yscale).despine(offset=10)
plt.legend(loc="best", fontsize="22")
plt.savefig(filepath, bbox_inches="tight")
plt.close()
| true |
e81f13575137b694c8fa91af5fb1b5be46cb02fd | Python | fujikosu/Keras-BatchAI | /keras.py | UTF-8 | 2,010 | 2.703125 | 3 | [] | no_license | from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer -- let's say we have 200 classes
predictions = Dense(5, activation='softmax')(x)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
batch_size = 16
train_datagen = image.ImageDataGenerator(
horizontal_flip=True,
vertical_flip=True,
rotation_range=0.2,
zoom_range=0.2,
shear_range=0.2)
valid_datagen = image.ImageDataGenerator()
test_datagen = image.ImageDataGenerator()
train_generator = train_datagen.flow_from_directory(
'image_split/training', # this is the target directory
target_size=(224, 224), # all images will be resized to 150x150
batch_size=batch_size)
validation_generator = valid_datagen.flow_from_directory(
'image_split/training', # this is the target directory
target_size=(224, 224), # all images will be resized to 150x150
batch_size=batch_size)
# train the model on the new data for a few epochs
model.fit_generator(train_generator, steps_per_epoch=len(train_generator.classes) / batch_size, epochs=10, verbose=1, callbacks=None, validation_data=validation_generator, validation_steps=len(validation_generator.classes) / batch_size, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0)
| true |
848ad20e958c658d325337ad3a248caa491eda00 | Python | imwujue/python-practice-wujue | /Q76.py | UTF-8 | 248 | 3.375 | 3 | [] | no_license | def solve(n):
sum = 0.0
while True:
sum += 1/n
# print(1/n)
# print(sum)
if n == 1 or n == 2:
break
else:
n -= 2
return sum
n = int(input("n:"))
print('sum:%lf' %solve(n)) | true |
d75a3b09b0937ebc6c6d0fd1fd0062cd191b32a2 | Python | taka1156/AtCoder | /ABC/ABC_B_Product_Max.py | UTF-8 | 367 | 3.03125 | 3 | [] | no_license | import test_case
_CASE = """\
-1000000000 0 -1000000000 0
"""
test_case.test_input(_CASE)
###########
# code
##########
a, b, c, d = map(int, input().split())
print(max(max(a * c, a * d), max(b * c, b * d)))
# 最大になるパターンは
# 範囲がプラス側のみの場合、`-x * -y, x * y`
# 範囲がマイナス側のみの場合は` -x * x, x * -y`
| true |
7e1fe19f693b96284196cf99ee2dcfc5f267704c | Python | Alex10ua/Detected | /venv/imagedetect.py | UTF-8 | 1,289 | 2.609375 | 3 | [] | no_license | from imageai.Detection import ObjectDetection
import os
exac_path=os.getcwd()#вказує шлях до цього проекту щоб програма знаходила додаткові файли
detector=ObjectDetection()
detector.setModelTypeAsRetinaNet()# встановлюємо те що використовуємо рітіна модель для визначення об єктів
detector.setModelPath(os.path.join(exac_path, "resnet50_coco_best_v2.0.1.h5")) #вказуємо шлях до моделі
detector.loadModel()# її завантаження
list=detector.detectObjectsFromImage(input_image=os.path.join(exac_path,"object2.jpg"), #надаємо методу зобразення
output_image_path=os.path.join(exac_path,"Detected_objects.jpg"), #видає проаналізоване зображення
minimum_percentage_probability=30,# min perent for detect
display_percentage_probability=True,# відображення процентів на вихідній картинці
display_object_name=True #відображення імені об єкту
) | true |
79dd3317aee307acf7f91d1668f78a63a7e357b6 | Python | harkevich/testgithub | /Lesson/Lesson30 Модули в Python.py | UTF-8 | 537 | 2.828125 | 3 | [] | no_license | # import os
# # import random as r
# # import random
# from random import randint, shuffle # доступны только два метода randint и shuffle
# from random import * # доступ все модули из random
#
#
# print(os.getcwd())
# # print(random.randint(1 , 100))
# print(randint(1, 100))
# l = [1, 2, 3, 4, 5]
# shuffle(l)
# print(l)
#
#
# import libs
#
# print(libs.get_count('hello', 'd'))
# print(libs.get_len('hello'))
import libs as l
print(l.get_count('hello', 'd'))
print(l.get_len('hello'))
| true |
22744458c6086948040d719df8f4930f0120a06f | Python | DataDeveloper7865/my-flask-app | /app.py | UTF-8 | 633 | 3.078125 | 3 | [] | no_license | from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
""" Show homepage"""
return """
<html>
<body>
<h1> I am the landing page </h1>
</body>
</html>
"""
@app.route('/hello')
def say_hello():
"""Return simple "Hello" Greeting."""
html = "<html><body><h1>Hello World! I am coming alive!</h1></body></html>"
return html
@app.route('/goodbye')
def say_goodbye():
"""Return simple "Goodbye" Greeting."""
html = "<html><body><h1>Goodbye World! I am leaving for another route for now!</h1></body></html>"
return html | true |
0d5704bdd1fd815a263d74f8e526fcc755c5a7cc | Python | rjm49/mltm | /static/classes.py | UTF-8 | 3,383 | 2.625 | 3 | [] | no_license | import numpy
from utils import generate_student_name
from keras import backend as K
from keras.constraints import Constraint
from keras.engine.topology import Layer
from keras import initializers, constraints
class WeightClip(Constraint):
'''Clips the weights incident to each hidden unit to be inside a range
'''
def __init__(self, min_w=0, max_w=4):
self.min_w = min_w
self.max_w = max_w
def __call__(self, p):
return K.clip(p, self.min_w, self.max_w)
def get_config(self):
return {'name': self.__class__.__name__,
'min_w': self.min_w,
'max_w': self.max_w }
class BigTable(Layer):
def __init__(self, _dim, min_w=0, max_w=10, **kwargs):
self.dim = _dim
self.limits = (min_w, max_w)
kc =WeightClip(min_w, max_w)
self.kernel_constraint= constraints.get(kc)
super(BigTable, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
min_w, max_w = self.limits
av_w = (min_w + max_w)/2.0
initialiser = initializers.RandomUniform(min_w, max_w)
self.kernel = self.add_weight(name='kernel',
shape=(self.dim),
initializer=initialiser,
trainable=True,
constraint=self.kernel_constraint)
print("kk", self.kernel.shape)
super(BigTable, self).build(input_shape) # Be sure to call this at the end
def call(self, selector):
print("selector shape", selector.shape)
selector = K.flatten(selector)
print("flat selector shape", selector.shape)
print("call kk", self.kernel.shape)
# selector = tf.Print(selector, [selector], message="selector is:", first_n=-1, summarize=1024)
rows = K.gather(self.kernel, selector)
# rows = tf.Print(rows, [rows], message="row is:", first_n=-1, summarize=1024)
print("'rows' shape,",rows.shape)
return rows
def compute_output_shape(self, input_shape):
return ((None, self.dim[1]))
class Question():
def __init__(self, qix, min_diff, max_diff, nt=None, nnw=None):
self.id = qix
# n_c = randint(1,nt)
# n_c = numpy.random.choice([1,2], p=[0.5,0.5])
n_c = nt
choices = numpy.random.choice(range(nt), size=n_c, replace=False)
# mass = numpy.random.uniform(0,(max_diff-min_diff)*len(choices))
not_present= 0#min_diff
self.betas = [ not_present for _ in range(nt) ]
for c in choices:
# self.betas[c] = min_diff
self.betas[c] = numpy.random.uniform(min_diff, max_diff)
class Student():
def __init__(self, ix, min_a, max_a, nt=None, nnw=None):
self.id = ix
self.name = generate_student_name()
n_c = nt
# n_c = numpy.random.choice([1,2], p=[0.5,0.5])
choices = numpy.random.choice(range(nt), size=n_c, replace=False)
# mass = numpy.random.uniform(0,(max_a-min_a)*len(choices))
not_present= 0 #min_a
self.thetas = [ not_present for _ in range(nt) ]
for c in choices:
# self.betas[c] = min_diff
self.thetas[c] = numpy.random.uniform(min_a, max_a)
| true |
4a3e5628f565ff236a04997a7e1763987857bff7 | Python | escape2020/school2022 | /extra/participants.py | UTF-8 | 1,580 | 2.75 | 3 | [
"MIT"
] | permissive | import pandas as pd
from pandas.io.excel._xlrd import XlrdReader
from pandas.io.excel import ExcelFile
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('filename')
args = parser.parse_args()
filename = args.filename
class CustomXlrdReader(XlrdReader):
def load_workbook(self, filepath_or_buffer):
"""Same as original, just uses ignore_workbook_corruption=True)"""
from xlrd import open_workbook
if hasattr(filepath_or_buffer, "read"):
data = filepath_or_buffer.read()
return open_workbook(file_contents=data, ignore_workbook_corruption=True)
else:
return open_workbook(filepath_or_buffer)
ExcelFile._engines['custom_xlrd'] = CustomXlrdReader
print('Monkey patching pandas XLS engines. See CustomXlrdReader')
df = pd.read_excel(filename, engine='custom_xlrd')
speakers = df[df['Catégorie']=='SPEAKER Escape Summer School June 19th to 24th 2022']
print(f"{len(speakers)} speakers")
participants = df[df['Catégorie']!='SPEAKER Escape Summer School June 19th to 24th 2022']
payes = participants[participants['Facture payée']=='Oui']
non_payes = participants[participants['Facture payée']=='Non']
virements_attente = participants[(participants['Paiement']=='VIREMENT') & (participants['Facture payée']=='Non')]
print(f"{len(participants)} participants et {len(payes)} payes\n\n")
print(f"{len(virements_attente)} virements en attente (?)")
inscrits = pd.concat([speakers, payes])
inscrits.to_excel('participants_final.xls')
non_payes.to_excel('participants_attente.xls')
| true |
99e3ae633abf8ecddd9f8bf6606d503f323bc24c | Python | bgmacris/100daysOfCode | /Day76/game.py | UTF-8 | 2,911 | 2.921875 | 3 | [] | no_license | import random
import pygame
import os
import time
NEGRO = (0, 0, 0)
BLANCO = (255, 255, 255)
VERDE = (0, 255, 0)
AZUL = (0, 0, 255)
VIOLETA = (98, 0, 255)
pygame.init()
dimensiones = [300, 300]
root = pygame.display.set_mode(dimensiones)
pygame.display.set_caption('Piedra, Papel, Tijeras')
quit = False
clock = pygame.time.Clock()
global MAQUINA, POSIBILIDADES
MAQUINA = ['PIEDRA', 'PAPEL', 'TIJERAS']
POSIBILIDADES = {
'PIEDRA': 'TIJERAS',
'PAPEL': 'PIEDRA',
'TIJERAS': 'PAPEL'
}
CONT = {
'player': 0,
'pc': 0
}
piedraImg = pygame.image.load(f'{os.path.dirname(__file__)}\\asset\\piedra.png')
papelImg = pygame.image.load(f'{os.path.dirname(__file__)}\\asset\\papel.png')
tijerasImg = pygame.image.load(f'{os.path.dirname(__file__)}\\asset\\tijeras.png')
def jugar(eleccion):
global CONT
pc_choice = random.choice(MAQUINA)
if POSIBILIDADES[pc_choice] == eleccion:
print(f'MAQUINA {pc_choice} GANA {eleccion}')
root.fill(pygame.Color("black"))
CONT['pc'] += 1
return f'MAQUINA {pc_choice} GANA {eleccion}'
elif POSIBILIDADES[eleccion] == pc_choice:
print(f'JUGADOR {eleccion} GANA {pc_choice}')
root.fill(pygame.Color("black"))
CONT['player'] += 1
return f'JUGADOR {eleccion} GANA {pc_choice}'
else:
print(f'EMPATE {pc_choice} {eleccion}')
return f'EMPATE {pc_choice} {eleccion}'
while not quit:
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
quit = True
if evento.type == pygame.MOUSEBUTTONDOWN:
resultado = False
if 20 < mouse[0] < 80 and 175 < mouse[1] < 234:
resultado = jugar("PIEDRA")
if 119 < mouse[0] < 179 and 175 < mouse[1] < 234:
resultado = jugar("PAPEL")
if 219 < mouse[0] < 280 and 175 < mouse[1] < 234:
resultado = jugar("TIJERAS")
if resultado:
root.fill(pygame.Color("black"))
resultado_txt = fuente.render(resultado, True, VERDE)
pygame.display.flip()
root.blit(resultado_txt, [10, 130])
print(mouse)
mouse = pygame.mouse.get_pos()
# print(mouse)
pygame.draw.rect(root, BLANCO, [20, 20, 250, 100], 2)
fuente = pygame.font.Font(None, 25)
player = fuente.render("Jugador", True, VIOLETA)
pc = fuente.render("Ordenador", True, VIOLETA)
root.blit(player, [30, 30])
root.blit(pc, [175, 30])
cont_player = fuente.render(str(CONT['player']), True, AZUL)
cont_pc = fuente.render(str(CONT['pc']), True, AZUL)
root.blit(cont_player, [60, 75])
root.blit(cont_pc, [210, 75])
root.blit(piedraImg, (20, 175))
root.blit(papelImg, (120, 175))
root.blit(tijerasImg, (220, 175))
pygame.display.flip()
pygame.quit()
| true |
e6e18a73f6355f186bd7be3ac53d0376cf950f4f | Python | mrparkonline/python3-euler | /q12.py | UTF-8 | 1,647 | 4.5 | 4 | [
"MIT"
] | permissive | # The sequence of triangle numbers is generated by adding the natural numbers.
# So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28.
# The first ten terms would be:
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
# Let us list the factors of the first seven triangle numbers:
"""
1: 1
3: 1,3
6: 1,2,3,6
10: 1,2,5,10
15: 1,3,5,15
21: 1,3,7,21
28: 1,2,4,7,14,28
We can see that 28 is the first triangle number to have over five divisors.
What is the value of the first triangle number to have over five hundred divisors?
"""
import math
def factors(n):
""" Returns the list of n's factors
--param
n : int
--return
list
"""
if n < 1:
return []
elif n in {1,2,3}:
temp = set()
temp.add(1)
temp.add(n)
return list(temp)
else:
temp = set()
temp.add(1)
temp.add(n)
for i in range(2,math.floor(math.sqrt(n))+1):
if n % i == 0:
temp.add(i)
temp.add(n//i)
# end of for
return list(temp)
# end of factors
def triangleNum(upperLimit):
""" Determines the triangle number up to upperLimit
--param
upperLimit : int
--return
integer
"""
return sum(range(1,upperLimit+1))
# end of triangleNum
factorsCount = 0
index = 0
answer = 0
while factorsCount <= 500:
index += 1
temp = len(factors(triangleNum(index)))
if temp > factorsCount:
factorsCount = temp
answer = triangleNum(index)
print(index) # 12375th Triangle Number
print(answer) # 76576500
# Optimization Note:
# Save the factors found in a dictionary 01/22/2018
| true |
81eab7d38b540a8fdf14870451c1a80571372a2c | Python | zhanglong362/zane | /weektest/test2/ATM_chengjunhua/core/src.py | UTF-8 | 5,946 | 2.71875 | 3 | [] | no_license | from interface import user
from lib import common
from interface import bank
import time
logger1=common.get_logger('ATM')
users={'name':None,
'status':False}
# print('注册')
def register():
if users['status']:
print('您已登陆!')
return
while True:
name=input('请输入用户名>>:').strip()
if user.file(name):
print('该用户已注册!')
choice = input('退出请输入q>>: ').strip()
if choice == 'q': return
continue
pwd1=input('请输入密码>>: ').strip()
pwd2=input('请再次输入密码>>:').strip()
if pwd1 != pwd2 :
print('两次密码不一致,请重新输入')
continue
user.update_user(name,pwd1)
print('注册成功!')
break
# print('登陆')
def login():
while True:
if users['status']:
print('您已登陆,无需重复登陆!')
return
name=input('请输入用户名>>: ').strip()
pwd=input('请输入用户密码>>: ').strip()
user_dic = user.file(name)
if not user_dic:
print('该用户不存在')
continue
if user_dic['lock']:
print('该用户已锁定')
choice = input('退出请输入q>>: ').strip()
if choice=='q':break
continue
if pwd == user_dic['password']:
print('登陆成功!')
users['name']=name
users['status']=True
return
count=1
while True:
if count>=3:
print('用户已锁定')
user.lock_user_interface(name)
return
count+=1
print('密码不正确,请重新输入,%s次后将锁定!'%(3-count))
pwd = input('请输入用户密码>>: ').strip()
if pwd == user_dic['password']:
print('登陆成功!')
users['name'] = name
users['status'] = True
return
# print('查看余额')
@common.login_auth
def look_money():
user_dic = user.file(users['name'])
print('''
尊敬的:%s
您的余额为:%s
您的信用额度还剩:%s'''%(user_dic['name'],user_dic['balance'],user_dic['account']))
choice = input('退出请输入q>>: ').strip()
if choice == 'q':return
# print('转账')
@common.login_auth
def transfer_accounts():
while True:
user_self = user.file(users['name'])
side_name=input('请输入收款账号>>: ').strip()
user_side=user.file(side_name)
if not user_side:
print('该用户不存在!')
continue
if side_name==users['name']:
print('不能转给自己!')
continue
money=input('请输入转账金额>>: ').strip()
if not money.isdigit():
print('钱必须是数字!')
continue
money=int(money)
if user_self['balance'] < money:
print('傻叉钱你没那么多钱!')
continue
user_self['balance']-=money
user_side['balance']+=money
bank.update_money(user_self)
bank.update_money(user_side)
debug=('%s向%s转账%s成功!'%(user_self['name'],user_side['name'],money))
logger1.debug(debug)
choice = input('退出请输入q>>: ').strip()
if choice == 'q': return
# print('还款')
@common.login_auth
def repayment():
while True:
user_self=user.file(users['name'])
account=15000-user_self['account']
print('您本期需要还款的金额为:%s'%account)
money=input('请输入还款金额: ').strip()
if not money.isdigit():
print('钱必须是数字!')
continue
money = int(money)
if user_self['balance'] < money:
print('傻叉钱你没那么多钱!')
continue
user_self['balance']-=money
user_self['account']+=money
bank.update_money(user_self)
debug=('%s还款%s,当前信用可用额度为:%s'%(user_self['name'],money,user_self['account']))
logger1.debug(debug)
choice = input('退出请输入q>>: ').strip()
if choice == 'q': return
# print('取款')
@common.login_auth
def draw_money():
while True:
money=input('请输入取款金额: ').strip()
user_self = user.file(users['name'])
if not money.isdigit():
print('钱必须是数字!')
continue
money = int(money)
if user_self['account'] < money:
print('傻叉钱你没那么多额度了!')
continue
money1=(money*0.05)
money2=money-money1
user_self['account'] -= money
user_self['balance'] += money2
bank.update_money(user_self)
debug = ('%s提现:%s,当前信用可用额度为:%s 手续费:%s' % (user_self['name'],money2,user_self['account'],money1))
logger1.debug(debug)
choice = input('退出请输入q>>: ').strip()
if choice == 'q': return
def illegality():
print('非法输入!')
dic={'1':register,
'2':login,
'3':look_money,
'4':transfer_accounts,
'5':repayment,
'6':draw_money,
}
def run():
while True:
print('''
1、注册
2、登陆
3、查看余额
4、转账
5、还款
6、取款
''')
choice=input('输入序号选择功能,q退出>>: ').strip()
if choice=='q':break
function=dic[choice] if choice in dic else illegality
function()
| true |
4ad3fbe3437bd9afc74064097e3eb7a2eb792a0c | Python | YorkShen/LeetCode | /python/week2/241.py | UTF-8 | 1,385 | 3.34375 | 3 | [] | no_license | import operator
class Solution(object):
func_map = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
}
def __init__(self):
self.cache = {}
def diffWaysToCompute(self, input):
"""
:type input: str
:rtype: List[int]
"""
return self.__compute(input, 0, len(input))
def __compute(self, input, start, end):
cur_input = input[start:end]
if cur_input.isdigit():
return [int(cur_input)]
else:
ret = []
for index in xrange(start, end):
if input[index] in "+-*":
if (start, index) in self.cache:
before = self.cache[(start, index)]
else:
before = self.__compute(input, start, index)
self.cache[(start, index)] = before
if (index + 1, end) in self.cache:
after = self.cache[(index + 1, end)]
else:
after = self.__compute(input, index + 1, end)
self.cache[index + 1, end] = after
temp = [Solution.func_map[input[index]](i, j) for i in before for j in after]
ret.extend(temp)
return ret
s = Solution()
print s.diffWaysToCompute("2-1-1")
| true |
dafb8260258ef4561ef2918d29dbdd2780efcca1 | Python | hardr0m/geek-python | /geek-python/Khrapov_Roman_lesson4/task6.py | UTF-8 | 2,135 | 4.25 | 4 | [] | no_license | # Реализовать два небольших скрипта:
# а) итератор, генерирующий целые числа, начиная с указанного,
# б) итератор, повторяющий элементы некоторого списка, определенного заранее.
#
# Подсказка: использовать функцию count() и cycle() модуля itertools.
# Обратите внимание, что создаваемый цикл не должен быть бесконечным.
# Необходимо предусмотреть условие его завершения.
# Например, в первом задании выводим целые числа, начиная с 3, а при достижении числа 10 завершаем цикл.
# Во втором также необходимо предусмотреть условие, при котором повторение элементов списка будет прекращено.
from typing import Iterable
from itertools import cycle
def get_repeated(iterable: Iterable, count: int):
if not isinstance(count, int):
raise TypeError(f"count '{count.__class__.__name__}' is illegat type")
if count < 0:
raise ValueError(f"count 'can't be less than 0")
# убираем брекется и получаем стандартный режим работы sycle
iterator = cycle([iterable])
while count:
yield next(iterator)
count -= 1
if __name__ == '__main__':
input_data = input('Пожалуйста введите целые числа разделяя их пробелами (максимум 4 числа): ')
repeate = input('Сколько раз повторить выше введенную последовательность?: ')
try:
source_list = [int(i) for i in input_data.split()][:4]
repeate = int(repeate)
except ValueError:
print('Неверно введенные данные')
exit(1)
print(list(get_repeated(source_list, repeate)))
| true |
a1d060901a7729b87e6c3747e41f9a5defa0b66a | Python | freddyfok/cs_with_python | /problems/leetcode/101_symmetric_tree.py | UTF-8 | 698 | 3.5625 | 4 | [] | no_license | """
Return true if left of the center is
"""
from queue import Queue
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def is_symmetric(root: TreeNode) -> bool:
q = Queue()
q.put(root)
q.put(root)
while not q.empty():
left = q.get()
right = q.get()
if left is None and right is None:
continue
if left is None or right is None:
return False
if left.value != right.value:
return False
q.put(left.left)
q.put(right.right)
q.put(left.right)
q.put(right.left)
return True
| true |
d56dd944fdab677bbaff522ab477ce65a0272d96 | Python | chenjiayu1502/NER | /model_on_crf.py | UTF-8 | 15,361 | 2.578125 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.nn.init as I
import torch.nn.utils.rnn as R
from torch.autograd import Variable
import numpy as np
def log_sum_exp(vec, dim=0):
max, idx = torch.max(vec, dim)
max_exp = max.unsqueeze(-1).expand_as(vec)
return max + torch.log(torch.sum(torch.exp(vec - max_exp), dim))
class CRF(nn.Module):
def __init__(self, vocab_size):
super(CRF, self).__init__()
self.vocab_size = vocab_size
self.n_labels = n_labels = vocab_size
self.start_idx = n_labels - 2
self.stop_idx = n_labels - 1
self.transitions = nn.Parameter(torch.randn(n_labels, n_labels))
def reset_parameters(self):
I.normal(self.transitions.data, 0, 1)
def forward(self, logits, lens):
"""
Arguments:
logits: [batch_size, seq_len, n_labels] FloatTensor
lens: [batch_size] LongTensor
"""
batch_size, seq_len, n_labels = logits.size()
alpha = logits.data.new(batch_size, self.n_labels).fill_(-10000)
alpha[:, self.start_idx] = 0
alpha = Variable(alpha)
c_lens = lens.clone()
logits_t = logits.transpose(1, 0)
for logit in logits_t:
logit_exp = logit.unsqueeze(-1).expand(batch_size,
*self.transitions.size())
alpha_exp = alpha.unsqueeze(1).expand(batch_size,
*self.transitions.size())
trans_exp = self.transitions.unsqueeze(0).expand_as(alpha_exp)
mat = trans_exp + alpha_exp + logit_exp
alpha_nxt = log_sum_exp(mat, 2).squeeze(-1)
mask = (c_lens > 0).float().unsqueeze(-1).expand_as(alpha)
alpha = mask * alpha_nxt + (1 - mask) * alpha
c_lens = c_lens - 1
alpha = alpha + self.transitions[self.stop_idx].unsqueeze(0).expand_as(alpha)
norm = log_sum_exp(alpha, 1).squeeze(-1)
return norm
def viterbi_decode(self, logits, lens):
"""Borrowed from pytorch tutorial
Arguments:
logits: [batch_size, seq_len, n_labels] FloatTensor
lens: [batch_size] LongTensor
"""
batch_size, seq_len, n_labels = logits.size()
vit = logits.data.new(batch_size, self.n_labels).fill_(-10000)
vit[:, self.start_idx] = 0
vit = Variable(vit)
c_lens = lens.clone()
logits_t = logits.transpose(1, 0)
pointers = []
for logit in logits_t:
vit_exp = vit.unsqueeze(1).expand(batch_size, n_labels, n_labels)
trn_exp = self.transitions.unsqueeze(0).expand_as(vit_exp)
vit_trn_sum = vit_exp + trn_exp
vt_max, vt_argmax = vit_trn_sum.max(2)
vt_max = vt_max.squeeze(-1)
vit_nxt = vt_max + logit
pointers.append(vt_argmax.squeeze(-1).unsqueeze(0))
mask = (c_lens > 0).float().unsqueeze(-1).expand_as(vit_nxt)
vit = mask * vit_nxt + (1 - mask) * vit
mask = (c_lens == 1).float().unsqueeze(-1).expand_as(vit_nxt)
vit += mask * self.transitions[ self.stop_idx ].unsqueeze(0).expand_as(vit_nxt)
c_lens = c_lens - 1
pointers = torch.cat(pointers)
scores, idx = vit.max(1)
idx = idx.squeeze(-1)
paths = [idx.unsqueeze(1)]
for argmax in reversed(pointers):
idx_exp = idx.unsqueeze(-1)
idx = torch.gather(argmax, 1, idx_exp)
idx = idx.squeeze(-1)
paths.insert(0, idx.unsqueeze(1))
paths = torch.cat(paths[1:], 1)
scores = scores.squeeze(-1)
return scores, paths
def transition_score(self, labels, lens):
"""
Arguments:
labels: [batch_size, seq_len] LongTensor
lens: [batch_size] LongTensor
"""
batch_size, seq_len = labels.size()
# pad labels with <start> and <stop> indices
labels_ext = Variable(labels.data.new(batch_size, seq_len + 2))
labels_ext[:, 0] = self.start_idx
labels_ext[:, 1:-1] = labels
mask = sequence_mask(lens + 1, max_len=seq_len + 2).long()
pad_stop = Variable(labels.data.new(1).fill_(self.stop_idx))
pad_stop = pad_stop.unsqueeze(-1).expand(batch_size, seq_len + 2)
labels_ext = (1 - mask) * pad_stop + mask * labels_ext
labels = labels_ext
trn = self.transitions
# obtain transition vector for each label in batch and timestep
# (except the last ones)
trn_exp = trn.unsqueeze(0).expand(batch_size, *trn.size())
lbl_r = labels[:, 1:]
lbl_rexp = lbl_r.unsqueeze(-1).expand(*lbl_r.size(), trn.size(0))
trn_row = torch.gather(trn_exp, 1, lbl_rexp)
# obtain transition score from the transition vector for each label
# in batch and timestep (except the first ones)
lbl_lexp = labels[:, :-1].unsqueeze(-1)
trn_scr = torch.gather(trn_row, 2, lbl_lexp)
trn_scr = trn_scr.squeeze(-1)
mask = sequence_mask(lens + 1).float()
trn_scr = trn_scr * mask
score = trn_scr.sum(1).squeeze(-1)
return score
class LSTMCRF(nn.Module):
def __init__(self, crf, vocab_sizes, word_dims, hidden_dim, layers,
dropout_prob, bidirectional=True):
super(LSTMCRF, self).__init__()
self.n_feats = len(word_dims)
#print(sum(word_dims))
self.total_word_dim = sum(word_dims)
self.word_dims = word_dims
self.hidden_dim = hidden_dim
self.lstm_layers = layers
self.dropout_prob = dropout_prob
self.is_cuda = False
self.crf = crf
self.bidirectional = bidirectional
self.n_labels = n_labels = self.crf.n_labels
self.embeddings = nn.ModuleList(
[nn.Embedding(vocab_size, word_dim)
for vocab_size, word_dim in zip(vocab_sizes, word_dims)]
)
self.output_hidden_dim = self.hidden_dim
if bidirectional:
self.output_hidden_dim *= 2
self.tanh = nn.Tanh()
self.input_layer = nn.Linear(self.total_word_dim, hidden_dim)
self.output_layer = nn.Linear(self.output_hidden_dim, n_labels)
self.lstm = nn.LSTM(input_size=hidden_dim,
hidden_size=hidden_dim,
num_layers=layers,
bidirectional=bidirectional,
dropout=dropout_prob,
batch_first=True)
def reset_parameters(self):
for emb in self.embeddings:
I.xavier_normal(emb.weight.data)
I.xavier_normal(self.input_layer.weight.data)
I.xavier_normal(self.output_layer.weight.data)
self.crf.reset_parameters()
self.lstm.reset_parameters()
def _run_rnn_packed(self, cell, x, x_lens, h=None):
x_packed = R.pack_padded_sequence(x, x_lens.data.tolist(),
batch_first=True)
if h is not None:
output, h = cell(x_packed, h)
else:
output, h = cell(x_packed)
output, _ = R.pad_packed_sequence(output, batch_first=True)
return output, h
def _embeddings(self, xs):
"""Takes raw feature sequences and produces a single word embedding
Arguments:
xs: [n_feats, batch_size, seq_len] LongTensor
Returns:
[batch_size, seq_len, word_dim] FloatTensor
"""
n_feats, batch_size, seq_len = xs.size()
assert n_feats == self.n_feats
res = [emb(x) for emb, x in zip(self.embeddings, xs)]
x = torch.cat(res, 2)
return x
def _forward_bilstm(self, xs, lens):
n_feats, batch_size, seq_len = xs.size()
x = self._embeddings(xs)
x = x.view(-1, self.total_word_dim)
x = self.tanh(self.input_layer(x))
x = x.view(batch_size, seq_len, self.hidden_dim)
o, h = self._run_rnn_packed(self.lstm, x, lens)
o = o.contiguous()
o = o.view(-1, self.output_hidden_dim)
o = self.tanh(self.output_layer(o))
o = o.view(batch_size, seq_len, self.n_labels)
return o
def _bilstm_score(self, logits, y, lens):
y_exp = y.unsqueeze(-1)
scores = torch.gather(logits, 2, y_exp).squeeze(-1)
mask = sequence_mask(lens).float()
scores = scores * mask
score = scores.sum(1).squeeze(-1)
return score
def score(self, xs, y, lens, logits=None):
if logits is None:
logits = self._forward_bilstm(xs, lens)
transition_score = self.crf.transition_score(y, lens)
bilstm_score = self._bilstm_score(logits, y, lens)
print('bilstm_score==',bilstm_score)
score = transition_score + bilstm_score
return score
def predict(self, xs, lens, return_scores=False):
logits = self._forward_bilstm(xs, lens)
scores, preds = self.crf.viterbi_decode(logits, lens)
print(preds.size())
if return_scores:
return preds, scores
else:
return preds
def loglik(self, xs, y, lens, return_logits=False):
logits = self._forward_bilstm(xs, lens)
norm_score = self.crf(logits, lens)
sequence_score = self.score(xs, y, lens, logits=logits)
loglik = sequence_score - norm_score
if return_logits:
return loglik, logits
else:
return loglik
def mask_bios(xs,y,flag):
#print('flag==',self.flag)
seq_len,label_size=xs.size()
bios=torch.FloatTensor(seq_len,label_size).zero_()
y=y.data.numpy().tolist()
for i in range(len(y)):
if y[i]==flag:
bios[i][y[i]]=1.0
else:
bios[i][y[i]]=50.0
return bios
def myloss( logits, y, lens,flag):
#print(logits[0])
batch_size,seq_len,label_size=logits.size()
logits=logits.view(-1,label_size)
#print('myloss')
y_exp = y.unsqueeze(-1)
bios=Variable(mask_bios(logits,y,flag).float(),requires_grad=False)
#mask = sequence_mask(lens).float()
#print('mask==',mask.size(),type(mask))
scores=-torch.log(logits)*bios
scores=scores.view(batch_size,seq_len,label_size)
#scores=scores*mask
#print(scores.size())
#print(scores[0])
scores=scores.view(-1,seq_len*label_size)
# print(scores[0])
#scores= torch.max(scores,1)[0]
scores=torch.sum(scores)
return scores
class LSTMLSTM(nn.Module):
def __init__(self, model, bidirectional=True):
super(LSTMLSTM, self).__init__()
self.model=model
self.lstm2=nn.LSTM(input_size=self.model.hidden_dim*2,
hidden_size=self.model.hidden_dim*2,
num_layers=self.model.lstm_layers,
bidirectional=False,
dropout=self.model.dropout_prob,
batch_first=True)
def reset_parameters(self):
# I.xavier_normal(self.model.input_layer.weight.data)
# I.xavier_normal(self.model.output_layer.weight.data)
# self.model.lstm.reset_parameters()
self.model=reset_parameters(self.model)
self.lstm2.reset_parameters()
def _run_rnn_packed(self, cell, cell2,x, x_lens, h=None):
x_packed = R.pack_padded_sequence(x, x_lens.data.tolist(),
batch_first=True)
# print(type(x_packed))
output, h = cell(x_packed)
output, h = cell2(output)
output, _ = R.pad_packed_sequence(output, batch_first=True)
return output, h
def _forward_bilstm(self,xs, lens):
n_feats, batch_size, seq_len = xs.size()
x = self.model._embeddings(xs)
x = x.view(-1, self.model.total_word_dim)
x = self.model.tanh(self.model.input_layer(x))
x = x.view(batch_size, seq_len, self.model.hidden_dim)
o, h = self._run_rnn_packed(self.model.lstm, self.lstm2,x, lens)
o = o.contiguous()
o = o.view(-1, self.model.output_hidden_dim)
o = self.model.tanh(self.model.output_layer(o))
o = o.view(batch_size, seq_len, self.model.n_labels)
return o
def predict(self,xs, lens):
logits = self._forward_bilstm(xs, lens)
batch_size, seq_len, n_labels = logits.size()
logits = logits.contiguous().view(-1,n_labels)
res=torch.max(logits,1)[1]
res=res.view(batch_size, seq_len)
# print(res.size())
return res
def loglik(self,xs,y,lens):
logits = self._forward_bilstm(xs, lens)
batch_size, seq_len, n_labels = logits.size()
logits = logits.contiguous().view(-1,n_labels)
softmax = torch.nn.Softmax()
logits = softmax(logits)
logits = logits.view(batch_size, seq_len, n_labels)
# logits = torch.nn.Softmax(logits)
# loss=torch.nn.CrossEntropyLoss()
y=y.contiguous().view(batch_size*seq_len)
# print(y.size())
# score = loss(logits, y)
score = myloss(logits, y, lens,flag=2)
# print(score)
return score,logits
def load_embedding(model):
model.embeddings=torch.load('pkl/embeddings0607.pkl')
return model
def reset_parameters(model):
print('reseting..........')
I.xavier_normal(model.input_layer.weight.data)
I.xavier_normal(model.output_layer.weight.data)
# self.crf.reset_parameters()
model.lstm.reset_parameters()
return model
def predict(model,xs, lens):
logits = model._forward_bilstm(xs, lens)
batch_size, seq_len, n_labels = logits.size()
logits = logits.contiguous().view(-1,n_labels)
res=torch.max(logits,1)[1]
res=res.view(batch_size, seq_len)
# print(res.size())
return res
def loglik(model,xs,y,lens,flag=2):
logits = model._forward_bilstm(xs, lens)
batch_size, seq_len, n_labels = logits.size()
logits = logits.contiguous().view(-1,n_labels)
softmax = torch.nn.Softmax()
logits = softmax(logits)
logits =logits.view(batch_size, seq_len, n_labels)
# loss=torch.nn.CrossEntropyLoss()
y=y.contiguous().view(batch_size*seq_len)
# print(y.size())
# score = loss(logits, y)
score = myloss( logits, y, lens,flag)
# print(score)
return score,logits
class TransparentDataParallel(nn.DataParallel):
def __init__(self, *args, **kwargs):
super(TransparentDataParallel, self).__init__(*args, **kwargs)
def __getattr__(self, item):
try:
return super(TransparentDataParallel, self).__getattr__(item)
except AttributeError:
module = self.__dict__["_modules"]["module"]
return module.__getattribute__(item)
def state_dict(self, *args, **kwargs):
return self.module.state_dict(*args, **kwargs)
def sequence_mask(lens, max_len=None):
batch_size = lens.size(0)
if max_len is None:
max_len = lens.max().data[0]
ranges = torch.arange(0, max_len).long()
ranges = ranges.unsqueeze(0).expand(batch_size, max_len)
ranges = Variable(ranges)
if lens.data.is_cuda:
ranges = ranges.cuda()
lens_exp = lens.unsqueeze(1).expand_as(ranges)
mask = ranges < lens_exp
return mask
| true |
078c73cade0e49fdb523587d911e7b6ca12283c5 | Python | ugly113/RPS | /main.py | UTF-8 | 1,387 | 4.03125 | 4 | [] | no_license | import random
# List for computer to choose from
rps = ['rock', 'paper', 'scissors']
# Displaying the results
def lose(computer):
print(f'\nI picked {computer}, you lose!')
def win(computer):
print(f'\nI picked {computer}, you win!')
def tie(computer):
print(f'\nI pick {computer} as well, it\'s a tie!')
# Main game play
def game():
player = input('R_ock - P_aper - S_cissors? or Q_uit: ')
computer = random.choice(rps)
if player.lower() == 'r':
if computer == 'paper':
lose(computer)
elif computer == 'scissors':
win(computer)
else:
tie(computer)
elif player.lower() == 'p':
if computer == 'scissors':
lose(computer)
elif computer == 'rock':
win(computer)
else:
tie(computer)
elif player.lower() == 's':
if computer == 'rock':
lose(computer)
elif computer == 'paper':
win(computer)
else:
tie(computer)
elif player.lower() == 'q':
print(f'\n')
x_check = input('Are you sure? y/n ')
if x_check.lower() == 'y':
exit()
else:
print(f'\n')
game()
else:
print(f'\nThat\'s not a choice')
print(f'\n')
game()
print(f'\n')
game()
if __name__=='__main__':
game()
| true |
b4b38871ea21c7ec93c9b0beb36fcbbcb0f3cb69 | Python | sinandylmz/Alistirma_1 | /1_8.py | UTF-8 | 213 | 2.625 | 3 | [] | no_license | def aynirakam():
sayac=0
for i in range(100,1000):
a=str(i)
if i%2==0 and (a[0]==a[1] or a[0]==a[2] or a[1]==a[2] or a[0]==a[1]==a[2]):
sayac+=1
return sayac
| true |
8a60f772a6aae5d6a5016c6369df572a4ffdab19 | Python | michalisvaz/Ham-or-Spam-classifier | /ig_calculation.py | UTF-8 | 2,180 | 3.046875 | 3 | [] | no_license | from math import log2
# return (ig, p_x1_ham, p_x0_ham)
def calculate_ig(x1, x1_ham, x1_spam, total_ham, total_spam):
total_mails = total_ham + total_spam
if x1 == 0 or x1 == total_mails:
return (0, total_ham/total_mails, total_ham/total_mails)
x0 = total_mails - x1
x0_spam = total_spam - x1_spam
x0_ham = total_ham - x1_ham
P_ham = total_ham/total_mails
P_spam = 1 - P_ham
# we do not check P_ham > 0 and P_spam > 0
# because if we had only spam or only ham data, the exercise would be pointless
h = - P_ham * log2(P_ham) - P_spam * log2(P_spam)
P_x0_spam = x0_spam / x0
P_x0_ham = x0_ham / x0
P_x1_spam = x1_spam / x1
P_x1_ham = x1_ham / x1
# This is to avoid trying to calculate log(0)
# We use the fact that lim(xlogx)=0 as x-->0
# The base of the logarithm makes no difference in the above limit
# Also note that it is impossible P_x0_ham = P_x0_spam = 0 (both probabilities being 0)
# If we didn't do the following (and the above) we had many cases with ig<0
# Now if we use all the pu3 data just to test if we calculate the ig correctly
# (we won't use all the data as training data simultaneously, this was only done to test this function with more data)
# there is only one example with ig<0. The following:
# 1826,-1.8735013540549517e-16
# which is probably due to numerical errors (you can notice that it is very close to 0)
# To correct these numerical we added an if ig<0 at the end of the function
if P_x0_ham == 0:
h0 = - P_x0_spam * log2(P_x0_spam)
elif P_x0_spam == 0:
h0 = - P_x0_ham * log2(P_x0_ham)
else:
h0 = - P_x0_ham * log2(P_x0_ham) - P_x0_spam * log2(P_x0_spam)
# Same as above
if P_x1_ham == 0:
h1 = - P_x1_spam * log2(P_x1_spam)
elif P_x1_spam == 0:
h1 = - P_x1_ham * log2(P_x1_ham)
else:
h1 = - P_x1_ham * log2(P_x1_ham) - P_x1_spam * log2(P_x1_spam)
ig = h - h0 * (x0/total_mails) - h1 * (x1/total_mails)
if ig < 0:
return (0, P_x1_ham, P_x0_ham)
else:
return (ig, P_x1_ham, P_x0_ham)
| true |
078226e4e9533fec6ba6915c0dde7ccf50a8192f | Python | RamonBecker/S.O.L.I.D-Python | /Dependency Inversion Principle/BAD/repo/reports/file_write.py | UTF-8 | 159 | 2.59375 | 3 | [] | no_license |
class ReportFileWriter():
@staticmethod
def write_file(report):
file = open('report.txt', 'w')
file.write(report)
file.close() | true |
930e951968c6f5fcd44972f558762ed464de1147 | Python | cwz920716/GroDrawer | /groDrawer.py | UTF-8 | 14,052 | 3.609375 | 4 | [] | no_license | import sys
import math
import random
import numpy as np
def round3(x):
return float("{0:.3f}".format(x))
class Vec2(object):
def __init__(self, x, y):
self._x = float(x)
self._y = float(y)
@property
def x(self):
return self._x
@x.setter
def x(self, new_x):
self._x = float(new_x)
@property
def y(self):
return self._y
@y.setter
def y(self, new_y):
self._y = float(new_y)
def __add__(self, other):
types = (int, float)
if isinstance(self, types):
return Vec2(self + other.x, self + other.y)
elif isinstance(other, types):
return Vec2(self.x + other, self.y + other)
else:
return Vec2(self.x + other.x, self.y + other.y)
def __truediv__(self, other):
types = (int, float)
if isinstance(self, types):
self = Vec2(self, self)
elif isinstance(other, types):
other = Vec2(other, other)
x = self.x / other.x
y = self.y / other.y
return Vec2(x, y)
def __mul__(self, other):
types = (int, float)
if isinstance(self, types):
return Vec2(self * other.x, self * other.y)
elif isinstance(other, types):
return Vec2(self.x * other, self.y * other)
else:
return Vec2(self.x * other.x, self.y * other.y)
def __neg__(self):
return Vec2(-self.x, -self.y)
def __radd__(self, other):
return Vec2(self.x + other, self.y + other)
def __rdiv__(self, other):
return Vec2(other/self.x, other/self.y)
def __rmul__(self, other):
return Vec2(other * self.x, other * self.y)
def __rsub__(self, other):
return Vec2(other - self.x, other - self.y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "[{0}, {1}]".format(self.x, self.y)
def __sub__(self, other):
types = (int, float)
if isinstance(self, types):
return Vec2(self - other.x, self - other.y)
elif isinstance(other, types):
return Vec2(self.x - other, self.y - other)
else:
return Vec2(self.x - other.x, self.y - other.y)
def ceil(self):
return Vec2(math.ceil(self.x), math.ceil(self.y))
def floor(self):
return Vec2(math.floor(self.x), math.floor(self.y))
def get_data(self):
return (self.x, self.y)
def inverse(self):
return Vec2(1.0/self.x, 1.0/self.y)
def length(self):
return math.sqrt(self.square_length())
def normalize(self):
length = self.length()
if length == 0.0:
return Vec2(0, 0)
return Vec2(self.x/length, self.y/length)
def round(self):
return Vec2(round(self.x), round(self.y))
def square_length(self):
return (self.x * self.x) + (self.y * self.y)
def rotate90(self):
return Vec2(-self.y, self.x)
@classmethod
def distance(cls, a, b):
c = b - a
return c.length()
@classmethod
def dot(self, a, b):
return (a.x * b.x) + (a.y * b.y)
@classmethod
def equals(cls, a, b, tolerance=0.0):
diff = a - b
dx = math.fabs(diff.x)
dy = math.fabs(diff.y)
if dx <= tolerance * max(1, math.fabs(a.x), math.fabs(b.x)) and \
dy <= tolerance * max(1, math.fabs(a.y), math.fabs(b.y)):
return True
return False
@classmethod
def max(cls, a, b):
x = max(a.x, b.x)
y = max(a.y, b.y)
return Vec2(x, y)
@classmethod
def min(cls, a, b):
x = min(a.x, b.x)
y = min(a.y, b.y)
return Vec2(x, y)
@classmethod
def mix(cls, a, b, t):
return a * t + b * (1-t)
@classmethod
def random(cls):
x = random.random()
y = random.random()
return Vec2(x, y)
@classmethod
def square_distance(cls, a, b):
c = b - a
return c.square_length()
@property
def groPosition(self):
return self * 150
class Point(Vec2):
pass
"""
Linear intERPolate between a and b for x ranging from 0 to 1
"""
def lerp(a, b, x):
return round3(a * (1.0 - x) + b * x)
class Line(object):
def __init__ (self, v0, v1, color = 'green', die_outer = False):
self._v0 = v0
self._v1 = v1
if self.length < 0.5:
print("WARNING: line length < 0.5 is hard to visualize using gro.")
if self.length > 5.0:
print("WARNING: line length > 5.0 is too large for gro screen.")
self._color = color
self._id = -1
self._die_outer = die_outer
@property
def id(self):
return self._id
@id.setter
def id(self, i):
self._id = i
@property
def v0(self):
return self._v0
@v0.setter
def v0(self, v):
self._v0 = v
@property
def v1(self):
return self._v1
@v1.setter
def v1(self, v):
self._v1 = v
@property
def color(self):
return self._color
@property
def die_outer(self):
return self._die_outer
@property
def vector(self):
return self.v1 - self.v0
@property
def dir(self):
return self.vector.normalize()
@property
def length(self):
return self.vector.length()
@property
def center(self):
return (self.v1 + self.v0) / 2.0
def signals(self):
d = self.dir.rotate90()
outer_s = self.center + d
inner_s = self.center - d
return [outer_s, inner_s]
@property
def outer_signal_id(self):
return 2 * self.id
@property
def inner_signal_id(self):
return self.outer_signal_id + 1
@property
def signalStrength(self):
l = self.length
if l <= 0.5:
return 0.7
elif l > 0.5 and l <= 1.0:
x = (l - 0.5) / (1.0 - 0.5)
return lerp(0.7, 0.5, x)
elif l > 1.0 and l <= 1.5:
x = (l - 1.0) / (1.5 - 1.0)
return lerp(0.5, 0.375, x)
elif l > 1.5 and l <= 2.0:
x = (l - 1.5) / (2.0 - 1.5)
return lerp(0.375, 0.25, x)
elif l > 2.0 and l <= 2.5:
x = (l - 2.0) / (2.5 - 2.0)
return lerp(0.25, 0.175, x)
elif l > 2.5 and l <= 3.0:
x = (l - 2.5) / (3.0 - 2.5)
return lerp(0.175, 0.1, x)
elif l > 3.0 and l <= 4.0:
x = (l - 3.0) / (4.0 - 3.0)
return lerp(0.1, 0.01, x)
elif l > 4.0 and l <= 5.0:
x = (l - 4.0) / (5.0 - 4.0)
return lerp(0.01, 0.005, x)
else:
return 0.005
def __repr__(self):
return self.__str__()
def __str__(self):
return "{0} -> {1} color={2}".format(self.v0, self.v1, self.color)
groHeader = """//
// This file is generated by groDrawer.py
//
include gro
set("population_max", 2000);
fun close x y .
if x = 0 | y = 0 then
1.1
else
if x > y then
(x / y - 1)
else
(y / x - 1)
end
end;
MAX_DIFF := 0.5;
DIE_DIFF := 0.75;
"""
groColors = """
gfp := 0;
rfp := 0;
bfp := 0;
cfp := 0;
yfp := 0;
"""
class GroPrinter(object):
def __init__(self):
self._sstream = ''
self._indent = 0
self._signals = 0
def genPrologue(self):
self.sstream += groHeader
return self
@property
def sstream(self):
return self._sstream
@property
def indent(self):
return self._indent
@property
def signals(self):
return self._signals
@sstream.setter
def sstream(self, new_sstream):
self._sstream = new_sstream
@indent.setter
def indent(self, new_indent):
self._indent = new_indent
def __repr__(self):
return self.__str__()
def __str__(self):
return self.sstream
@property
def line_begin(self):
r = ""
for i in range(self.indent):
r += " "
return r
@property
def line_end(self):
return "\n"
@property
def new_line(self):
return self.line_end
def blank_line(self):
self.sstream += self.new_line
return self
def start_program(self, prog):
self.sstream += self.line_begin + "program {0}() := {{".format(prog) + self.line_end
self.indent += 1
return self
def launch_program(self, prog):
self.sstream += self.line_begin + "ecoli ( [], program {0}() );".format(prog) + self.line_end
return self
def declare_colors(self):
self.sstream += groColors
return self
def color2fluorescent(self, c):
c = c.lower()
if c == "red":
return "rfp"
elif c == "blue":
return "bfp"
elif c == "yellow":
return "yfp"
elif c == "cyan":
return "cfp"
else:
return "gfp"
def set_color(self, c):
self.sstream += self.line_begin + "{0} := 800;".format(self.color2fluorescent(c)) + self.line_end
return self
def unset_color(self, c):
self.sstream += self.line_begin + "{0} := 0;".format(self.color2fluorescent(c)) + self.line_end
return self
def die(self):
self.sstream += self.line_begin + "die();" + self.line_end
return self
def declare_timer(self):
self.sstream += self.line_begin + "p := [ t := 0 ];" + self.line_end
self.sstream += self.new_line
self.sstream += self.line_begin + "true : { p.t := p.t + dt }" + self.line_end
self.sstream += self.new_line
return self
def end_program(self):
self.indent -= 1
self.sstream += self.line_begin + "};" + self.line_end
return self
@property
def predicate_always(self):
return "true"
def start_command(self, pred):
self.sstream += self.line_begin + pred + " : {" + self.line_end
self.indent += 1
return self
def end_command(self):
self.indent -= 1
self.sstream += self.line_begin + "}" + self.line_end
return self
def signal_name(self, s):
return "signal" + str(s)
def line_predicates(self, line):
p1 = "get_signal({0}) >= {2} & get_signal({1}) >= {2} & close (get_signal({0})) (get_signal({1})) <= MAX_DIFF".format(self.signal_name(line.outer_signal_id), self.signal_name(line.inner_signal_id), line.signalStrength)
p2 = "get_signal({0}) < {2} | get_signal({1}) < {2} | close (get_signal({0})) (get_signal({1})) > MAX_DIFF".format(self.signal_name(line.outer_signal_id), self.signal_name(line.inner_signal_id), line.signalStrength)
p3 = ""
if line.die_outer:
p3 = "close (get_signal({0})) (get_signal({1})) > DIE_DIFF & get_signal({0}) > get_signal({1}) & p.t > 200".format(self.signal_name(line.outer_signal_id), self.signal_name(line.inner_signal_id), line.signalStrength)
return [p1, p2, p3]
def intersect(self, preds):
if len(preds) == 0:
return "true"
elif len(preds) == 1:
return preds[0]
return "( " + " ) & ( ".join(preds) + " )"
def union(self, preds):
if len(preds) == 0:
return "true"
elif len(preds) == 1:
return preds[0]
return "( " + " ) | ( ".join(preds) + " )"
def declare_signal(self, sid):
self.sstream += self.line_begin + "{0} := signal(5, 0.1);".format(self.signal_name(sid)) + self.line_end
return self
def init_signal(self, sid, s):
self.sstream += self.line_begin + "set_signal({0}, {1}, {2}, 100);".format(self.signal_name(sid), round3(s.groPosition.x), round3(s.groPosition.y)) + self.line_end
return self
def declare_signals_for_lines(self, lines):
for l in lines:
self.declare_signal(l.outer_signal_id)
self.declare_signal(l.inner_signal_id)
self.blank_line()
return self
def init_signals_for_lines(self, lines):
self.start_program("main")
self.start_command(self.predicate_always)
for l in lines:
signals = l.signals()
self.init_signal(l.outer_signal_id, signals[0])
self.init_signal(l.inner_signal_id, signals[1])
self.end_command()
self.end_program()
self.blank_line()
return self
class Canvas(object):
def __init__(self, name="canvas"):
self._lines = []
self._program = GroPrinter()
self._name = name
@property
def name(self):
return self._name
@property
def lines(self):
return self._lines
@property
def program(self):
return self._program
@property
def num_lines(self):
return len(self.lines)
def drawLine(self, v0, v1, color='green', die_outer = False):
l = Line(v0, v1, color, die_outer)
l.id = self.num_lines
self.lines.append(l)
return self
def codegen(self):
p = self.program
p.genPrologue().declare_signals_for_lines(self.lines)
p.start_program(self.name).declare_colors().declare_timer()
unset_color_map = {}
for l in self.lines:
preds = p.line_predicates(l)
c = l.color
p.start_command(preds[0]).set_color(c).end_command().blank_line()
if c not in unset_color_map:
unset_color_map[c] = []
unset_color_map[c].append(preds[1])
if l.die_outer:
p.start_command(preds[2]).die().end_command().blank_line()
for c in unset_color_map:
pred = p.intersect(unset_color_map[c])
p.start_command(pred).unset_color(c).end_command().blank_line()
p.end_program().blank_line()
p.init_signals_for_lines(self.lines)
p.launch_program(self.name)
return p
| true |
cd6099d6870ffddf5841ad7ae67480e6e5693c13 | Python | gavinrozzi/aleph | /services/extract-entities/entityextractor/aggregate.py | UTF-8 | 2,396 | 2.890625 | 3 | [
"MIT"
] | permissive | from entityextractor.extract import extract_polyglot, extract_spacy
from entityextractor.normalize import clean_label, label_key
from entityextractor.normalize import select_label
from entityextractor.util import overlaps
class EntityGroup(object):
def __init__(self, label, key, category, span):
self.labels = [label]
self.categories = [category]
self.keys = set([key])
self.spans = set([span])
def match(self, key, span):
if key in self.keys:
return True
for crit in self.spans:
if overlaps(span, crit):
return True
# TODO: could also do some token-based magic here??
return False
def add(self, label, key, category, span):
self.labels.append(label)
self.categories.append(category)
self.keys.add(key)
self.spans.add(span)
@property
def label(self):
return select_label(self.labels)
@property
def category(self):
return max(set(self.categories), key=self.categories.count)
@property
def weight(self):
return len(self.labels)
class EntityAggregator(object):
def __init__(self):
self.groups = []
self.record = 0
def extract(self, text, languages):
self.record += 1
for language in languages:
for (l, c, s, e) in extract_polyglot(text, language):
self.feed(l, c, (self.record, s, e))
for (l, c, s, e) in extract_spacy(text, language):
self.feed(l, c, (self.record, s, e))
def feed(self, label, category, span):
label = clean_label(label)
if label is None:
return
key = label_key(label)
if key is None:
return
for group in self.groups:
if group.match(key, span):
group.add(label, key, category, span)
return
group = EntityGroup(label, key, category, span)
self.groups.append(group)
@property
def entities(self):
for group in self.groups:
# When we have many results, don't return entities which
# were only found a single time.
if len(self) > 100 and group.weight == 1:
continue
yield group.label, group.category, group.weight
def __len__(self):
return len(self.groups)
| true |
ec467636bb6136b33a7ec3704046a6fcdd53ef28 | Python | jestrella52/indybot | /rrScripts/rrBirthdays.py | UTF-8 | 2,577 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python
#
# Adds driver birth dates to database.
#
import MySQLdb
import MySQLdb.cursors
import datetime
import requests
import string
import time
import sys
import re
def findDriverID(driverList, last, first):
for driver in driverList:
if driver['last'] == last and driver['first'] == first:
return driver['id']
con = MySQLdb.connect(read_default_group='indybot', port=3306, db='indybot2', cursorclass=MySQLdb.cursors.DictCursor)
cur = con.cursor()
query = "SELECT * from driver where died IS NULL"
cur.execute(query)
drivers = cur.fetchall()
bornPattern = re.compile('^<BR><BR><B>Born:</B>\s?([a-zA-Z]+)\s+(\d+),\s(\d+)') #\s?([a-zA-Z]+)\s(\d+), (\d+)$')
diedPattern = re.compile('.*<B>Died:</B>\s?([a-zA-Z]+)\s+(\d+),\s(\d+)')
for driver in drivers:
driverName = string.replace(driver['first'] + " " + string.replace(driver['last'], ' Sr.', ''), ',', '')
# driverName = "Justin Wilson"
driverAddr = "http://racing-reference.info/driver/" + string.replace(driverName, ' ', '_')
print driverAddr
page = requests.get(driverAddr)
html = page.content.split('\n')
for line in html:
match = bornPattern.match(line)
if match:
print "BORN: " + match.group(1) + " " + match.group(2) + ", " + match.group(3)
if int(match.group(3)) == 2016:
print "ERROR! YEAR IS THIS YEAR - DRIVER ID: " + str(driver['id'])
sys.exit()
stamp = datetime.datetime.strptime(match.group(1) + " " + match.group(2) + ", " + match.group(3), "%B %d, %Y")
datestamp = stamp.strftime("%Y-%m-%d")
query = 'UPDATE driver SET dob="'
query += datestamp
query += '" where driver.id='
query += str(driver['id'])
cur.execute(query)
con.commit()
match2 = diedPattern.match(line)
if match2:
print "DIED: " + match2.group(1) + " " + match2.group(2) + ", " + match2.group(3)
if int(match2.group(3)) == 2016:
print "ERROR! YEAR IS THIS YEAR - DRIVER ID: " + str(driver['id'])
sys.exit()
stamp2 = datetime.datetime.strptime(match2.group(1) + " " + match2.group(2) + ", " + match2.group(3), "%B %d, %Y")
datestamp2 = stamp2.strftime("%Y-%m-%d")
query = 'UPDATE driver SET died="'
query += datestamp2
query += '" where driver.id='
query += str(driver['id'])
cur.execute(query)
con.commit()
time.sleep(2)
| true |
5fd9353aa69cf0da94e67f18cc8ab979041e7d9f | Python | chess-equality/Arthur | /src/test/resources/same/operators/Operators.py | UTF-8 | 314 | 3 | 3 | [
"Apache-2.0"
] | permissive | def andOperator():
if True and True:
print ""
def orOperator():
if True or True:
print ""
def equalOperator():
if True == True:
print ""
def notEqualOperator():
if True != True:
print ""
def alternateNotEqualOperator():
if True <> True:
print ""
| true |
f2551a2cb174b8704ee3e9afa78ad6fdb55036b7 | Python | san33eryang/learnpy | /decorator.py | UTF-8 | 2,543 | 3.4375 | 3 | [] | no_license | # -*- coding: utf-8 -*
# 增加日志功能,并返回函数
def log(func):
def wrapper(*args,**kwargs):
print('call %s():'% func.__name__)
return func(*args,**kwargs)
return wrapper
@log
def nows():
print('2019-3-24 12:00')
# 增加日志功能,并返回函数,并解决了 nows的名字改变的情况
import functools
def log1(func):
@ functools.wraps(func)
def wrapper(*args,**kwargs):
print('call %s():'% func.__name__)
return func(*args,**kwargs)
return wrapper
@log1
def nows_update():
print('2019-3-24 12:00')
# 增加日志功能,并返回函数,解决了 nows的名字改变的情况,并可自定义text
def log2(text):
def decorator(func):
@ functools.wraps(func)
def wrapper(*args,**kwargs):
print('%s %s():'% (text,func.__name__))
return func(*args,**kwargs)
return wrapper
return decorator
@log2('execute')
def nows_update2():
print('2019-3-24 13:00')
# exercise
import time
def metric(fn):
@functools.wraps(fn)
def wrapper(*args,**kwargs):
start_time=time.time()
end_time=time.time()
use_time=end_time-start_time
print('%s executed in %s ms'%(fn.__name__,use_time))
return fn(*args,**kwargs)
return wrapper
# 测试
@metric
def fast(x, y):
time.sleep(0.1)
return x + y;
@metric
def slow(x, y, z):
time.sleep(0.5)
return x * y * z;
print (fast(11, 22))
print (slow(11, 22,33))
f=fast(11,22)
s = slow(11, 22, 33)
if f != 33:
print('测试失败!')
elif s != 7986:
print('测试失败!')
print('')
print('<----next part exercise2--->')
def log3(text='info'):
def decorator(func):
@ functools.wraps(func)
def wrapper(*args,**kwargs):
print('%s enter call %s():' % (text, func.__name__))
print('begain call')
func_result=func(*args,**kwargs)
print('end call')
return func_result
return wrapper
return decorator
@log3()
def nows_update3():
print('2019-3-24 14:00')
if __name__=='__main__':
nows()
print(nows.__name__) # 由于return 了wrapper,so 现在nows的名字是 wrapper
print()
nows_update()
print(nows_update.__name__) # 由于return 了wrapper,so 现在nows的名字是 wrapper
print()
nows_update2()
print(nows_update2.__name__) # 由于return 了wrapper,so 现在nows的名字是 wrapper
print()
print('<----next part exercise--->')
nows_update3()
| true |
b67b7316ee04f52d5585a42ecb3448b91843f863 | Python | vatula/capi | /capi/src/interfaces/datastructures/polygon.py | UTF-8 | 374 | 2.6875 | 3 | [
"MIT"
] | permissive | import abc
import typing
from capi.src.implementation.dtos.coordinate import Coordinate
class IPolygon(abc.ABC):
@property
@abc.abstractmethod
def vertices(self) -> typing.Sequence[Coordinate]:
pass
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
pass
@abc.abstractmethod
def __repr__(self) -> str:
pass
| true |
2a044f432e9d0539873edb17ea6ed946a0b09374 | Python | srp2210/PythonBasic | /pp_w3resource_solutions/basic_part_1/pp_w3_9.py | UTF-8 | 81 | 2.5625 | 3 | [] | no_license | exam_date = (11, 12, 2014)
print(exam_date[0], "/",exam_date[1],"/",exam_date[2]) | true |
53addf3e83f31fc1a265878056563dc299cefcf3 | Python | Y-Joo/Baekjoon-Algorithm | /pythonProject/Graph/Alphabet.py | UTF-8 | 581 | 2.671875 | 3 | [] | no_license | def bfs(start):
pas = set()
pas.add(board[0][0])
queue = set([start])
m = 1
while queue:
x, y, cnt, passed = queue.pop()
m = max(m, cnt)
for i in range(4):
lx, ly = x + dx[i], y + dy[i]
if 0 <= lx < r and 0 <= ly < c:
if board[lx][ly] not in passed:
queue.add((lx, ly, cnt + 1, passed + board[lx][ly]))
return m
r, c = map(int, input().split())
board = []
for i in range(r):
board.append(input())
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
print(bfs((0,0,1,board[0][0])))
| true |
a62f7ce02799b28d37604bceea04a2eb95589ea2 | Python | MakarVS/GeekBrains_Algorithms_Python | /Lesson_8/les_8_task_2.py | UTF-8 | 2,260 | 3.765625 | 4 | [] | no_license | """
Задача № 2.
Доработать алгоритм Дейкстры (рассматривался на уроке), чтобы он дополнительно возвращал список вершин,
которые необходимо обойти.
"""
from collections import deque
g = [
[0, 0, 1, 1, 9, 0, 0, 0],
[0, 0, 9, 4, 0, 0, 5, 0],
[0, 9, 0, 0, 3, 0, 6, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 5, 0],
[0, 0, 7, 0, 8, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 2, 0],
]
def dijkstra(graph, start):
length = len(graph)
is_visited = [False] * length
cost = [float('inf')] * length
parent = [-1] * length
cost[start] = 0
min_cost = 0
_start = start
while min_cost < float('inf'):
is_visited[start] = True
for i, vertex in enumerate(graph[start]):
if vertex != 0 and not is_visited[i]:
if cost[i] > vertex + cost[start]:
cost[i] = vertex + cost[start]
parent[i] = start
min_cost = float('inf')
for i in range(length):
if min_cost > cost[i] and not is_visited[i]:
min_cost = cost[i]
start = i
way = [deque() for _ in range(length)]
for i in range(length):
j = i
while parent[j] != _start:
if parent[j] != -1:
way[i].appendleft(str(parent[j]))
else:
break
j = parent[j]
else:
way[i].appendleft(str(_start))
way[i].append(str(i))
if i == _start:
way[i].appendleft(str(i))
return cost, way
s = int(input('От какой вершины идти? '))
cost, way = dijkstra(g, s)
for i in range(len(cost)):
length = len(way[i])
if length > 1:
print(f'Кратчайшее расстояние от вершины {s} до вершины {i} равно {cost[i]} при этом надо пройти через вершины:'
f' {", ".join(list(way[i]))}')
elif i == s:
print(f'{i} - начальная вершина')
else:
print(f'Из вершины {s} в вершину {i} нет пути')
| true |
f333d77abf45c939a3a1b06212e1e5448b6b6809 | Python | fosc/tick-tack-toe | /opponents.py | UTF-8 | 3,971 | 4.125 | 4 | [] | no_license | """
This module contains implementations of the Player interface. A Player provides the play method:
1. play(Game State) --> tuple
The Game State interface provides the following methods:
1. is_game_over() --> Boolean
2. get_moves() --> list of tuples
3. is_winnable() --> Boolean
4. + tuple --> new Game State
"""
class PlayerFactory:
"""Factory of the Player interface"""
def __init__(self):
self._creators = {}
def register_opponent(self, level, creator):
"""Register a class with method play() and assign it a difficulty 'level'"""
self._creators[level] = creator
def get_opponent(self, level):
"""Get an opponent instance that plays with difficulty 'level'"""
creator = self._creators.get(level)
if not creator:
raise ValueError(level)
return creator()
def max_depth(my_func):
"""Track recursion depth for a class method based on the current_depth member"""
def wrapper(self, *args, **kwargs):
self.current_depth += 1
res = my_func(self, *args, **kwargs)
self.current_depth -= 1
return res
return wrapper
class RecursiveSearchAlgorithm:
"""Recursively traverses a tree of possible game outcomes and return next move."""
def __init__(self, max_search_depth):
self.current_depth = 0
self.max_search_depth = max_search_depth
def search_depth_exceeded(self):
return self.current_depth >= self.max_search_depth
@max_depth
def is_good_move(self, game):
"""Draws and Wins are both considered equally good by this method"""
if game.is_game_over():
return True # you cannot loose on your turn - only win or draw
if self.search_depth_exceeded():
return True # eventually we stop looking and say its safe
if game.is_winnable():
return False # it we have left game in a state were opponent can win
can_be_won = True # we can win (or draw) unless we find opponent has winning move
for opponent_move in game.get_moves():
new_game = game + opponent_move
# can_win_after_this_move is False until we find a good move
# (or if there are no moves --> draw)
can_win_after_this_move = True if not new_game.get_moves() else False
for move in new_game.get_moves():
can_win_after_this_move = \
can_win_after_this_move or self.is_good_move(new_game + move)
# we need to be able to win after all opponent moves
can_be_won = can_be_won and can_win_after_this_move
return can_be_won
def play(self, game):
possible_moves = game.get_moves()
move_dict = {}
for move in possible_moves:
move_dict[move] = self.is_good_move(game + move)
print(move_dict)
if move_dict[move]:
return move
print(move_dict)
print("could not find a good move")
return possible_moves[0]
def string_to_tuple(my_str):
just_numbers = ''.join(c for c in my_str if c.isdigit())
tuple_of_strings = tuple(just_numbers)
return int(tuple_of_strings[0]), int(tuple_of_strings[1])
class HumanPlayer:
@staticmethod
def get_coordinate(message):
return string_to_tuple(input(message))
def play(self, game):
print(game)
ask = "enter the coordinates of your move (e.g. enter: 1,2 ):\n(Please note bottom left is 0,0)\n"
return HumanPlayer.get_coordinate(ask)
class MediumOpponent(RecursiveSearchAlgorithm):
def __init__(self):
super().__init__(2)
class HardOpponent(RecursiveSearchAlgorithm):
def __init__(self):
super().__init__(5)
opponent_factory = PlayerFactory()
opponent_factory.register_opponent('medium', MediumOpponent)
opponent_factory.register_opponent('human', HumanPlayer)
opponent_factory.register_opponent('hard', HardOpponent)
| true |
51732a90a88ebcc4ecc710f86b6cc3d3eb6e78af | Python | bot-kevin/python | /juegos/milove.py | UTF-8 | 570 | 3.265625 | 3 | [] | no_license | import turtle
azadine = turtle.Turtle()
badis = turtle.Screen()
badis.bgcolor("black")
badis.title("I love you")
azadine.speed(1)
azadine.goto(0,-100)
azadine.pensize(9)
azadine.color("red")
azadine.begin_fill()
azadine.fillcolor("red")
azadine.left(140)
azadine.forward(180)
azadine.circle(-90,200)
azadine.setheading(60)
azadine.circle(-90,200)
azadine.forward(176)
azadine.end_fill()
azadine.setheading(140)
azadine.forward(170)
azadine.setheading(210)
azadine.forward(200)
azadine.setheading(-210)
azadine.setheading(390)
azadine.forward(600)
turtle.done()
| true |
deefbff9896ca13b1a15e5d32e312c734c4057d2 | Python | boboalex/LeetcodeExercise | /leetcode_98.py | UTF-8 | 717 | 3.4375 | 3 | [] | no_license | import math
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def __init__(self):
self.pre = -2 ** 31
def isValidBST(self, root: TreeNode) -> bool:
def validation(node):
if not node:
return True
if not validation(node.left):
return False
if node.val < self.pre:
return False
self.pre = node.val
return validation(node.right)
return validation(root)
if __name__ == '__main__':
t0 = TreeNode(0)
s = Solution()
res = s.isValidBST(t0)
print(res) | true |
45d6f234c686cdcec9b6aa66854b542c14c6dc55 | Python | UWPCE-PythonCert-ClassRepos/SP_Online_PY210 | /students/Z_shen/lesson09/test_mailroom_oo.py | UTF-8 | 2,013 | 3 | 3 | [] | no_license | from donor_models import *
import os.path
import pathlib
import pytest
donor_list = {'William Gates': [1500.99, 3500, 800.25],
'Jeff Bezos': [145.72, 1350.25],
'Paul Allen': [250.00, 57.00],
'Mark Zuckerberg': [600.00]}
def test_donor():
a = Donor('William Gates', 123)
assert a.name == 'William Gates'
assert a. amount == 123
def test_repr():
a = Donor('William Gates', 123)
assert a.__repr__() == 'William Gates donated $123'
def test_str():
a = Donor('William Gates', 123)
assert a.__str__() == 'Donor(William Gates, 123)'
def test_send_letter():
a = Donor('William Gates', 123)
content = ('''Dear {},
Thank you for your generous donation of ${:,.2f} to us.
It will be put to very good use.
Sincerely,
-The Team
'''.format('William Gates', 123))
assert a.send_letter() == content
def test_donor_collection():
b = DonorCollection(donor_list)
assert b.donors == donor_list
def test_add_new_donor():
b = DonorCollection()
test = {'William Gates': [123]}
assert b.add_new_donor('William Gates', 123) == test
def test_add_amount_same_donor():
b = DonorCollection({'William Gates': [123]})
test = {'William Gates': [123, 111]}
assert b.add_amount_same_donor('William Gates', 111) == test
def test_times():
b = DonorCollection({'William Gates': [123, 123, 123]})
assert b.times('William Gates') == 3
def test_total():
b = DonorCollection({'William Gates': [123, 123, 123]})
assert b.total('William Gates') == 123*3
def test_sorted():
b = DonorCollection(donor_list)
test = ['William Gates', 'Jeff Bezos', 'Mark Zuckerberg', 'Paul Allen']
assert b.sorted() == test
def test_send_letters_to_all():
b = DonorCollection(donor_list)
dirpath = pathlib.Path('./').absolute()
file1 = os.path.join(dirpath, 'Mark_Zuckerberg.txt')
file2 = os.path.join(dirpath, 'Paul_Allen.txt')
b.send_letters_to_all()
assert os.path.exists(file1)
assert os.path.exists(file2) | true |
a35a1a87eb797744cdd7f9029e00a810e7c860aa | Python | koki0702/chainer0 | /chainer0/functions/basic_math.py | UTF-8 | 4,494 | 2.578125 | 3 | [
"MIT"
] | permissive | import numpy as np
import chainer0
from chainer0.function import Function
from chainer0 import variable
from chainer0 import functions
class Add(Function):
def forward(self, a, b):
self.is_broadcast = a.shape != b.shape
y = a + b
return y
def backward(self, gy):
ga, gb = gy, gy
if self.is_broadcast:
a, b = self.inputs
ga = chainer0.functions.sum_to(ga, a.shape)
gb = chainer0.functions.sum_to(gb, b.shape)
return ga, gb
class Sub(Function):
def forward(self, a, b):
self.is_broadcast = a.shape != b.shape
y = a - b
return y
def backward(self, gy):
ga, gb = gy, -gy
if self.is_broadcast:
a, b = self.inputs
ga = chainer0.functions.sum_to(ga, a.shape)
gb = chainer0.functions.sum_to(gb, b.shape)
return ga, gb
class Mul(Function):
def forward(self, a, b):
self.is_broadcast = a.shape != b.shape
y = a * b
return y
def backward(self, gy):
x0, x1 = self.inputs
ga, gb = gy * x1, gy * x0
if self.is_broadcast:
a, b = self.inputs
ga = chainer0.functions.sum_to(ga, a.shape)
gb = chainer0.functions.sum_to(gb, b.shape)
return ga, gb
class Div(Function):
def forward(self, a, b):
self.is_broadcast = a.shape != b.shape
y = a / b
return y
def backward(self, gy):
x0, x1 = self.inputs
ga = gy / x1
gb = -ga * x0 / x1
if self.is_broadcast:
a, b = self.inputs
ga = chainer0.functions.sum_to(ga, a.shape)
gb = chainer0.functions.sum_to(gb, b.shape)
return ga, gb
class Neg(Function):
def forward(self, x):
return -x
def backward(self, gy):
return -gy
class Pow(Function):
def forward(self, a, b):
self.is_broadcast = a.shape != b.shape
y = a ** b
return y
def backward(self, gy):
x0, x1 = self.inputs
ga = x1 * (x0 ** (x1 - 1)) * gy
gb = functions.log(x0) * (x0 ** x1) * gy
if self.is_broadcast:
a, b = self.inputs
ga = chainer0.functions.sum_to(ga, a.shape)
gb = chainer0.functions.sum_to(gb, b.shape)
return ga, gb
class Absolute(Function):
def forward(self, x):
y = abs(x)
return y
def backward(self, gy):
y = self.outputs[0]
sign = variable(np.sign(y.data))
return sign * gy
def add(self, rhs):
f = Add()
return f(self, rhs)
def sub(self, rhs): # lhs - rhs
f = Sub()
return f(self, rhs)
def rsub(self, rhs): # rhs - lhs
f = Sub()
return f(rhs, self)
def mul(self, rhs):
f = Mul()
return f(self, rhs)
def pow(self, rhs):
f = Pow()
return f(self, rhs)
def rpow(self, rhs):
f = Pow()
return f(rhs, self)
def neg(self):
f = Neg()
return f(self)
def absolute(self):
f = Absolute()
return f(self)
def div(self, rhs):
f = Div()
return f(self, rhs)
def rdiv(self, rhs):
f = Div()
return f(rhs, self)
def install_variable_arithmetics():
variable.Variable.__neg__ = neg
variable.Variable.__abs__ = absolute
variable.Variable.__add__ = add
variable.Variable.__radd__ = add
variable.Variable.__sub__ = sub
variable.Variable.__rsub__ = rsub
variable.Variable.__mul__ = mul
variable.Variable.__rmul__ = mul
variable.Variable.__pow__ = pow
variable.Variable.__rpow__ = rpow
variable.Variable.__div__ = div
variable.Variable.__truediv__ = div
variable.Variable.__rdiv__ = rdiv
variable.Variable.__rtruediv__ = rdiv
'''
- variable.Variable.__neg__ = neg
- variable.Variable.__abs__ = absolute
- variable.Variable.__add__ = add
- variable.Variable.__radd__ = add
- variable.Variable.__sub__ = sub
- variable.Variable.__rsub__ = rsub
- variable.Variable.__mul__ = mul
- variable.Variable.__rmul__ = mul
- variable.Variable.__div__ = div
- variable.Variable.__truediv__ = div
- variable.Variable.__rdiv__ = rdiv
- variable.Variable.__rtruediv__ = rdiv
variable.Variable.__floordiv__ = floordiv
variable.Variable.__rfloordiv__ = rfloordiv
- variable.Variable.__pow__ = pow
- variable.Variable.__rpow__ = rpow
variable.Variable.__matmul__ = matmul
variable.Variable.__rmatmul__ = rmatmul
''' | true |
69e91a4e520be5c6e12721d0a874116ef26cd7e7 | Python | elderfd/numpyson | /numpyson.py | UTF-8 | 6,469 | 2.8125 | 3 | [
"MIT"
] | permissive | """
transparent serialization of numpy/pandas data via jsonpickle.
compatible to python2.7 and python3.3 and allows to serialize
between the two interpreters.
majorly based on code and ideas of David Moss in his MIT licensed pdutils
repository: https://github.com/drkjam/pdutils
Note that the serialization/deserialization is not space-efficient
due to the nature of json/jsonpickle. You could certainly save space
by compressing/decompressing the resulting json output if you need to.
(C) David Moss, Holger Krekel 2014
"""
__version__ = '0.4'
import numpy as np
import pandas as pd
import jsonpickle.handlers
import jsonpickle.util
class BaseHandler(jsonpickle.handlers.BaseHandler):
def nrestore(self, arg, reset=False):
return self.context.restore(arg, reset=reset)
def nflatten(self, arg, reset=False):
return self.context.flatten(arg, reset=reset)
class NumpyNumber(BaseHandler):
def flatten(self, obj, data):
data["__reduce__"] = (self.nflatten(type(obj)), [float(obj)])
return data
def restore(self, obj):
cls, args = obj['__reduce__']
cls = self.nrestore(cls)
return cls(args[0])
class NumpyArrayHandler(BaseHandler):
"""A jsonpickle handler for numpy (de)serialising arrays."""
def flatten(self, obj, data):
order = 'F' if obj.flags.fortran else 'C'
buf = jsonpickle.util.b64encode(obj.tostring(order=order))
#TODO: including other parameters like byteorder, etc?
#TODO: see numpy.info(obj) and obj.__reduce__() for details.
shape = self.nflatten(obj.shape)
dtype = str(obj.dtype)
args = [shape, dtype, buf, order]
data['__reduce__'] = (self.nflatten(np.ndarray), args)
return data
def restore(self, obj):
cls, args = obj['__reduce__']
cls = self.nrestore(cls)
shape = self.nrestore(args[0])
dtype = np.dtype(self.nrestore(args[1]))
buf = jsonpickle.util.b64decode(args[2])
order = args[3]
return cls(shape=shape, dtype=dtype, buffer=buf, order=order)
class PandasTimeSeriesHandler(BaseHandler):
"""A jsonpickle handler for numpy (de)serialising pandas TimeSeries objects."""
def flatten(self, obj, data):
values = self.nflatten(obj.values)
index = self.nflatten(obj.index.values)
args = [values, index]
data['__reduce__'] = (self.nflatten(pd.TimeSeries), args)
return data
def restore(self, obj):
cls, args = obj['__reduce__']
cls = self.nrestore(cls)
cls = self.nrestore(cls)
values = self.nrestore(args[0])
index = self.nrestore(args[1])
return cls(data=values, index=index)
class PandasDateTimeIndexHandler(BaseHandler):
"""A jsonpickle handler for numpy (de)serialising pandas DateTimeIndex objects."""
def flatten(self, obj, data):
values = self.nflatten(obj.values)
freq = self.nflatten(obj.freq)
args = [values, freq]
data['__reduce__'] = (self.nflatten(pd.DatetimeIndex), args)
return data
def restore(self, obj):
cls, args = obj['__reduce__']
cls = self.nrestore(cls, reset=False)
values = self.nrestore(args[0])
freq = self.nrestore(args[1])
return cls(data=values, freq=freq)
def build_index_handler_for_type(index_class):
"""A class factor that builds jsonpickle handlers for various index types."""
if not issubclass(index_class, pd.Index) or index_class == pd.DatetimeIndex:
raise TypeError('expected a subclass of pandas.Index, got %s' % type(index_class))
class _IndexHandler(BaseHandler):
"""A jsonpickle handler for numpy (de)serialising pandas Index objects."""
def flatten(self, obj, data):
values = self.nflatten(obj.values)
args = [values]
data['__reduce__'] = (self.nflatten(index_class), args)
return data
def restore(self, obj):
cls, args = obj['__reduce__']
cls = self.nrestore(cls)
values = self.nrestore(args[0])
return cls(data=values)
return _IndexHandler
PandasInt64IndexHandler = build_index_handler_for_type(pd.Int64Index)
PandasFloat64IndexHandler = build_index_handler_for_type(pd.Float64Index)
PandasIndexHandler = build_index_handler_for_type(pd.Index)
class PandasDataFrameHandler(BaseHandler):
"""A jsonpickle handler for numpy (de)serialising pandas DataFrame objects."""
def flatten(self, obj, data):
pickler = self.context
flatten = pickler.flatten
values = [flatten(obj[col].values) for col in obj.columns]
index = flatten(obj.index.values)
columns = flatten(obj.columns.values)
args = [values, index, columns]
data['__reduce__'] = (flatten(pd.DataFrame), args)
return data
def restore(self, obj):
cls, args = obj['__reduce__']
cls = self.nrestore(cls)
values = self.nrestore(args[0])
index = self.nrestore(args[1])
columns = self.nrestore(args[2])
return cls(dict(zip(columns, values)), index=index)
def register_handlers():
"""Call this function to register handlers with jsonpickle module."""
jsonpickle.handlers.register(np.float64, NumpyNumber)
jsonpickle.handlers.register(np.int64, NumpyNumber)
jsonpickle.handlers.register(np.ndarray, NumpyArrayHandler)
jsonpickle.handlers.register(pd.Index, PandasIndexHandler)
jsonpickle.handlers.register(pd.DatetimeIndex, PandasDateTimeIndexHandler)
jsonpickle.handlers.register(pd.Int64Index, PandasInt64IndexHandler)
jsonpickle.handlers.register(pd.Float64Index, PandasFloat64IndexHandler)
jsonpickle.handlers.register(pd.TimeSeries, PandasTimeSeriesHandler)
jsonpickle.handlers.register(pd.DataFrame, PandasDataFrameHandler)
def dumps(obj):
register_handlers()
return jsonpickle.encode(obj, unpicklable=True).encode("utf-8")
#from jsonpickle.pickler import _make_backend, Pickler
#backend = _make_backend(None)
#context = Pickler(unpicklable=True,
# make_refs=True,
# keys=False,
# backend=backend,
# max_depth=None)
#context._mkref = lambda x: True
#return backend.encode(context.flatten(obj, reset=False)).encode("utf-8")
def loads(obj):
register_handlers()
return jsonpickle.decode(obj.decode("utf-8"))
| true |
213bbe7ce2832ef07c329e587287441c6cd27e58 | Python | noxtoby/dem | /python/dem_utilities.py | UTF-8 | 16,890 | 2.546875 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
import os
import pystan
from sklearn.model_selection import StratifiedKFold
from matplotlib import pyplot as plt
import seaborn as sn
import statsmodels.formula.api as smf
import statsmodels.api as sm
import itertools
from datetime import datetime
def preliminaries(fname_save,d1d2='~/Code/GitHub/TADPOLE_Billabong_pyDEM/data/TADPOLE_D1_D2.csv'):
"""
Differential Equation Model prep
Returns a cleaned pandas DataFrame
Author: Neil P Oxtoby, UCL, November 2018
"""
dem_markers = ['WholeBrain', 'Hippocampus', 'Ventricles', 'Entorhinal', 'MMSE', 'ADAS11', 'FAQ']
if os.path.isfile(fname_save):
print(' ...Save file detected ({0}). Prep work done. Good on ya.'.format(fname_save))
df = pd.read_csv(fname_save,low_memory=False)
return df, dem_markers
else:
print(' ...Executing preliminaries() function.')
#* Load data
df = pd.read_csv(d1d2,low_memory=False)
df = df.loc[~np.isnan(df.group)]
df = df[['RID','Time','group']+dem_markers]
df.rename(columns={'group':'DX'},inplace=True)
df.to_csv(fname_save,index=False)
return df, dem_markers
def check_for_save_file(file_name,function):
if os.path.isfile(file_name):
print('check_for_save_file(): File detected ({0}) - you can load data.'.format(file_name))
#ebm_save = sio.loadmat(file_name)
return 1
else:
if function is None:
print('You should call your function')
else:
print('You should call your function {0}'.format(function.__name__))
return 0
def dxdt(x,t):
# n = np.isnan(t) | np.isnan(x)
# lm = np.polyfit(t[~n],x[~n],1)
#* Fit a GLM using statsmodels
glm_formula = 'x ~ t'
mod = smf.ols(formula=glm_formula, data={'x':x,'t':t})
res = mod.fit()
return res.params[1]
def dem_gradients(df,
markers,
fname_save,
id_col='RID',
t_col='Time',
dx_col = 'DX',
n_timepoints_min=2):
"""
dem_gradients()
Calculates individual gradients from longitudinal data and
returns a cross-section of differential data
Neil Oxtoby, UCL, November 2018
"""
if os.path.isfile(fname_save):
print(' ...Save file detected ({0}). Differential data calculated. Good on ya.'.format(fname_save))
df_dem = pd.read_csv(fname_save,low_memory=False)
return df_dem
else:
print(' ...Executing dem_gradients() function.')
#* Remove individuals without enough data
counts = df.groupby([id_col]).agg(['count'])
counts.reset_index(inplace=True)
has_long_data = (np.all(counts>=n_timepoints_min,axis=1))
rid_include = counts[id_col][ has_long_data ].values
#* Add baseline DX
counts = counts.merge(df.loc[df['Time']==0,[id_col,dx_col]].rename(columns={dx_col:dx_col+'.bl'}),on='RID')
dxbl_include = counts[dx_col+'.bl'][ has_long_data ].values
#* Baseline DX
df = df.merge(df.loc[df['Time']==0,[id_col,dx_col]].rename(columns={dx_col:dx_col+'.bl'}))
id_dxbl = df[[id_col,dx_col+'.bl']]
#* Keep only RID included
df_ = df.loc[ df[id_col].isin(rid_include) ]
#* Add baseline DX
df_ = df_.merge(id_dxbl)
#* Calculate gradients
df_dem = pd.DataFrame(data={id_col:rid_include,dx_col+'.bl':dxbl_include})
for i in df_dem[id_col]:
rowz = i==df_[id_col]
rowz_dem = i==df_dem[id_col]
t = df_.loc[rowz,t_col]
for m in markers:
x = df_.loc[rowz,m]
df_dem.loc[rowz_dem,m+'-mean'] = np.mean(x)
df_dem.loc[rowz_dem,m+'-grad'] = dxdt(x,t)
df_dem.to_csv(fname_save,index=False)
return df_dem
def dem_postselect(df_dem,markers,dx_col='DX'):
"""
Postselects differential data as done in Villemagne 2013:
- Omits non-progressing (negative gradient), non-abnormal (less than biomarker median of CN) differential data
Neil Oxtoby, UCL, November 2018
"""
dx_dict = {1:'CN',2:'MCI',3:'AD',4:'CNtoMCI',5:'MCItoAD',6:'CNtoAD',7:'MCItoCN',8:'ADtoMCI',9:'ADtoCN'}
x_text = '-mean'
y_text = '-grad'
df_postelection = pd.DataFrame(data={'Marker':markers})
#* 1. Restrict to MCI and AD - purifies, but might also remove presymptomatics in CN
dx_included = [2,3]
df_ = df_dem.loc[df_dem[dx_col].isin(dx_included)].copy()
#* 2. Exclude normal and non-progressing
for m in markers:
#* 2.1 Normal threshold = median of CN (alt: use clustering)
normal_threshold = df_dem.loc[df_dem[dx_col].isin([1]),m+x_text].median()
#* 2.2 Non-progressing = negative gradient
nonprogress_threshold = 0
excluded_rows = (df_[m+x_text] < normal_threshold) & (df_[m+y_text] < nonprogress_threshold)
df_postelection.loc[df_postelection['Marker']==m,'Normal-Threshold'] = normal_threshold
return df_, df_postelection
def clinical_progressors(df,id_col='RID',dx_col='DX'):
"""
NOT CURRENTLY USED
"""
dx_dict = {1:'Stable NL', 2:'Stable MCI', 3:'Stable: Dementia',
4:'Conversion: NL to MCI', 5:'Conversion: MCI to Dementia', 6:'Conversion: NL to Dementia',
7:'Reversion: MCI to NL', 8:'Reversion: Dementia to MCI', 9:'Reversion: Dementia to NL'}
counts2 = df.groupby([id_col,dx_col]).agg(['count'])
counts3 = counts2.groupby([id_col]).agg('count')
nonstable_dx = counts3[dx_col]>2
nonreverting_dx = counts3[dx_col].isin([1,2,3,4,5,6])
rid_progressors = counts3.loc[nonstable_dx & nonreverting_dx,id_col]
return rid_progressors
def fit_dem(df_dem,markers,stan_model,betancourt=False):
"""
dem_fit = fit_dem(df,markers,stan_model)
"""
x_text = '-mean'
y_text = '-grad'
df_dem_fits = pd.DataFrame(data={'Marker':markers})
# #* 1. Linear regression
# slope, intercept, r_value, p_value, std_err = stats.linregress(x_,dxdt_)
# DEMfit = {'linreg_slope':slope}
# DEMfit['linreg_intercept'] = intercept
# DEMfit['linreg_r_value'] = r_value
# DEMfit['linreg_p_value'] = p_value
# DEMfit['linreg_std_err'] = std_err
for m in markers:
x = df_dem[m+x_text].values
y = df_dem[m+y_text].values
i = np.argsort(x)
x = x[i]
y = y[i]
#* GPR setup: hyperparameters, etc.
if betancourt:
x_scale = (max(x)-min(x))
y_scale = (max(y)-min(y))
sigma_scale = 0.1*y_scale
x_predict = np.linspace(min(x),max(x),20)
N_predict = len(x_predict)
#* MCMC CHAINS: initial values
rho_i = x_scale/2
alpha_i = y_scale/2
sigma_i = sigma_scale
init = {'rho':rho_i, 'alpha':alpha_i, 'sigma':sigma_i}
dem_gpr_dat = {'N': len(x),
'x': x,
'y': y,
'x_scale' : x_scale,
'y_scale' : y_scale,
'sigma_scale' : sigma_scale,
'x_predict' : x_predict,
'N_predict' : N_predict
}
df_dem_fits.loc[df_dem_fits['Marker']==m,'x_predict'] = x_predict
else:
x2 = x**2
y2 = y**2
scaleFactor = 1
inv_rho_sq_scale = (max(x)-min(x))**2/scaleFactor # (max(x**2)-min(x**2))/scaleFactor
eta_sq_scale = (max(y)-min(y))**2/scaleFactor # (max(y**2)-min(y**2))/scaleFactor
sigma_sq_scale = 0.1*eta_sq_scale
# GP priors: hyperparameter scales
cauchyHWHM_inv_rho_sq = inv_rho_sq_scale
cauchyHWHM_eta_sq = eta_sq_scale
cauchyHWHM_sigma_sq = sigma_sq_scale
prior_std_inv_rho_sq = cauchyHWHM_inv_rho_sq
prior_std_eta_sq = cauchyHWHM_eta_sq
prior_std_sigma_sq = cauchyHWHM_sigma_sq
#* MCMC CHAINS: initial values
inv_rho_sq = inv_rho_sq_scale
eta_sq = eta_sq_scale
sigma_sq = sigma_sq_scale
init = {'inv_rho_sq':inv_rho_sq, 'eta_sq':eta_sq, 'sigma_sq':sigma_sq}
dem_gpr_dat = {'N1': len(x),
'x1': x,
'y1': y,
'prior_std_eta_sq' : prior_std_eta_sq,
'prior_std_inv_rho_sq' : prior_std_inv_rho_sq,
'prior_std_sigma_sq' : prior_std_sigma_sq
}
print('Performing GPR for {0}'.format(m))
fit = stan_model.sampling(data=dem_gpr_dat,
init=[init,init,init,init],
iter=1000,
chains=4)
df_dem_fits.loc[df_dem_fits['Marker']==m,'pystan_fit_gpr'] = fit
return df_dem_fits
def fit_diagnostics(stan_model_fit):
pass
return None
def sample_from_gpr_posterior(x,y,xp,alpha,rho,sigma,
CredibleIntervalLevel=0.95,
nSamplesFromGPPosterior=500):
#* GP Posterior
stds = np.sqrt(2) * special.erfinv(CredibleIntervalLevel)
#* Covariance matrices from kernels: @kernel_pred, @kernel_err, @kernel_obs
def kernel_pred(alpha,rho,x_1,x_2):
kp = alpha**2*np.exp(-rho**2 * (np.tile(x_1,(len(x_2),1)).transpose() - np.tile(x_2,(len(x_1),1)))**2)
return kp
def kernel_err(sigma,x_1):
ke = sigma**2*np.eye(len(x_1))
return ke
def kernel_obs(alpha,rho,sigma,x_1):
ko = kernel_pred(alpha,rho,x_1,x_1) + kernel_err(sigma,x_1)
return ko
#* Observations - full kernel
K = kernel_obs(alpha=alpha,rho=rho,sigma=sigma,x_1=x)
#* Interpolation - signal only
K_ss = kernel_pred(alpha=alpha,rho=rho,x_1=xp,x_2=xp)
#* Covariance (observations & interpolation) - signal only
K_s = kernel_pred(alpha=alpha,rho=rho,x_1=xp,x_2=x)
#* GP mean and covariance
#* Covariance from fit
y_post_mean = np.matmul(np.matmul(K_s,np.linalg.inv(K)),y)
y_post_Sigma = (K_ss - np.matmul(np.matmul(K_s,np.linalg.inv(K)),K_s.transpose()))
y_post_std = np.sqrt(np.diag(y_post_Sigma))
#* Covariance from data - to calculate residuals
K_data = K
K_s_data = kernel_pred(alpha=alpha,rho=rho,x_1=x,x_2=x)
y_post_mean_data = np.matmul(np.matmul(K_s_data,np.linalg.inv(K_data)),y)
residuals = y1 - y_post_mean_data
RMSE = np.sqrt(np.mean(residuals**2))
# Numerical precision
eps = np.finfo(float).eps
## 3. Sample from the posterior (multivariate Gaussian)
#* Diagonalise the GP posterior covariance matrix
Vals,Vecs = np.linalg.eig(y_post_Sigma)
A = np.real(np.matmul(Vecs,np.diag(np.sqrt(Vals))))
y_posterior_middle = y_post_mean
y_posterior_upper = y_post_mean + stds*y_post_std
y_posterior_lower = y_post_mean - stds*y_post_std
#* Sample
y_posterior_samples = np.tile(y_post_mean,reps=(nSamplesFromGPPosterior,1)).transpose()
+ np.matmul(A,np.random.randn(len(y_post_mean),nSamplesFromGPPosterior))
if np.abs(np.std(y)-1) < eps:
y_posterior_samples = y_posterior_samples*np.std(y) + np.mean(y)
return (y_posterior_middle,y_posterior_upper,y_posterior_lower,y_posterior_samples)
#* Covariance matrices from kernels: @kernel_pred, @kernel_err, @kernel_obs
def kernel_pred(eta,rho,x_1,x_2):
kp = eta**2*np.exp(-rho**2 * (np.tile(x_1,(len(x_2),1)).transpose() - np.tile(x_2,(len(x_1),1)))**2)
return kp
def kernel_err(sigma,x_1):
ke = sigma**2*np.eye(len(x_1))
return ke
def kernel_obs(eta,rho,sigma,x_1):
ko = kernel_pred(eta,rho,x_1,x_1) + kernel_err(sigma,x_1)
return ko
from scipy import special
def evaluate_GP_posterior(x_p,x_data,y_data,rho_sq,eta_sq,sigma_sq,
nSamplesFromGPPosterior = 1000,
plotGPPosterior = True,
CredibleIntervalLevel = 0.95):
#* Observations - full kernel
K = kernel_obs(np.sqrt(eta_sq),np.sqrt(rho_sq),np.sqrt(sigma_sq),x_data)
#* Interpolation - signal only
K_ss = kernel_pred(np.sqrt(eta_sq),np.sqrt(rho_sq),x_p,x_p)
#* Covariance (observations & interpolation) - signal only
K_s = kernel_pred(np.sqrt(eta_sq),np.sqrt(rho_sq),x_p,x_data)
#* GP mean and covariance
#* Covariance from fit
y_post_mean = np.matmul(np.matmul(K_s,np.linalg.inv(K)),y_data)
y_post_Sigma = (K_ss - np.matmul(np.matmul(K_s,np.linalg.inv(K)),K_s.transpose()))
y_post_std = np.sqrt(np.diag(y_post_Sigma))
#* Covariance from data - to calculate residuals
K_data = K
K_s_data = kernel_pred(np.sqrt(eta_sq),np.sqrt(rho_sq),x_data,x_data)
y_post_mean_data = np.matmul(np.matmul(K_s_data,np.linalg.inv(K_data)),y_data)
residuals = y_data - y_post_mean_data
RMSE = np.sqrt(np.mean(residuals**2))
# Numerical precision
eps = np.finfo(float).eps
## 3. Sample from the posterior (multivariate Gaussian)
stds = np.sqrt(2) * special.erfinv(CredibleIntervalLevel)
#* Diagonalise the GP posterior covariance matrix
Vals,Vecs = np.linalg.eig(y_post_Sigma)
A = np.real(np.matmul(Vecs,np.diag(np.sqrt(Vals))))
y_posterior_middle = y_post_mean
y_posterior_upper = y_post_mean + stds*y_post_std
y_posterior_lower = y_post_mean - stds*y_post_std
#* Sample
y_posterior_samples = np.tile(y_post_mean,(nSamplesFromGPPosterior,1)).transpose() + np.matmul(A,np.random.randn(len(y_post_mean),nSamplesFromGPPosterior))
if np.abs(np.std(y_data)-1) < eps:
y_posterior_samples = y_posterior_samples*np.std(y_data) + np.mean(y_data)
return y_posterior_samples, y_posterior_middle, y_posterior_upper, y_posterior_lower, RMSE
def plot_gpr_posterior(x,xp,y,y_posterior_middle,y_posterior_upper,y_posterior_lower,y_posterior_samples,lable='x'):
fig, ax = plt.subplots(1,2)
ax[0].subplot(121)
ax[0].plot(xp,y_posterior_middle,color='k',linewidth=2.0,linestyle='-',zorder=1,label='GP posterior mean')
ax[0].plot(xp,y_posterior_upper,color='r',linewidth=2.0,linestyle='--',zorder=2,label='+/- std')
ax[0].plot(xp,y_posterior_samples[:,1],color=(0.8,0.8,0.8),zorder=3,label='Post samples')
ax[0].plot(xp,y_posterior_lower,color='r',linewidth=2.0,linestyle='--',zorder=4)
ax[0].plot(xp,y_posterior_samples,color=(0.8,0.8,0.8),zorder=0)
ax[0].plot(x,y,color='b',marker='.',linestyle='',label='Data')
ax[0].legend()
ax[1].subplot(122)
ax[1].plot(x,y,'b.',label="Data")
ax[1].legend(loc=2)
ax[1].ylabel('dx/dt')
ax[1].xlabel(lable)
fig.show()
return fig, ax
#############################
def dem_staging():
"""
Given a trained DEM, and correctly-formatted data, stage the data
NOTE: To use CV-DEMs, you'll need to call this for each CV fold, then combine.
Author: Neil P Oxtoby, UCL, November 2018
"""
pass
def dem_integrate():
pass
def dem_cv(x,
y,
cv_folds=StratifiedKFold(n_splits=10, shuffle=False, random_state=None)
):
"""
*** WIP ***
Run 10-fold cross-validation
FIXME: calculate errors using the test set
Author: Neil P Oxtoby, UCL, November 2018
"""
pystan_fit_gpr_cv = []
f = 0
for train_index, test_index in cv_folds.split(x, y):
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
#* Fit
pystan_fit_k = dem_fit(x_train,y_train,events)
#* Save
pystan_fit_gpr_cv.append(pystan_fit_k)
f+=1
print('CV fold {0} of {1}'.format(f,cv_folds.n_splits))
return pystan_fit_gpr_cv
def cv_similarity(mcmc_samples_cv,seq):
pvd_cv = []
for k in range(len(mcmc_samples_cv)):
pvd, seq = extract_pvd(ml_order=seq,samples=mcmc_samples_cv[k])
pvd_normalised = pvd/np.tile(np.sum(pvd,axis=1).reshape(-1,1),(1,pvd.shape[1]))
pvd_cv.append(pvd_normalised)
#* Hellinger distance between rows
# => average HD between PVDs
# => 45 HDs across 10-folds
hd = np.zeros(shape=(10,10))
for f in range(len(pvd_cv)):
for g in range(len(pvd_cv)):
for e in range(pvd_cv[f].shape[0]):
hd[f,g] += hellinger_distance(pvd_cv[f][e],pvd_cv[g][e])/pvd_cv[f].shape[0]
cvs = 1 - np.mean(hd[np.triu_indices(hd.shape[0],k=1)]**2)
return cvs
def dem_plot():
"""
WIP
Author: Neil P Oxtoby, UCL, November 2018
"""
pass
return fig, ax
def hellinger_distance(p,q):
#hd = np.linalg.norm(np.sqrt(p)-np.sqrt(q),ord=2)/np.sqrt(2)
#hd = (1/np.sqrt(2)) * np.sqrt( np.sum( [(np.sqrt(pi) - np.sqrt(qi))**2 for pi,qi in zip(p,q)] ) )
hd = np.sqrt( np.sum( (np.sqrt(p) - np.sqrt(q))**2 ) / 2 )
return hd
| true |
7557656e38c08e2295c753923085f1d14de47ab3 | Python | mayankmahavar111/Text-Classification | /stem.py | UTF-8 | 1,529 | 2.5625 | 3 | [] | no_license | import os
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer ,WordNetLemmatizer
output =[]
stop=set(stopwords.words('english'))
stemmer = PorterStemmer()
lemma =WordNetLemmatizer()
for j in range(22):
if j >9 :
with open('reut2-0'+str(j)+'.sgm','r') as f:
text=f.readlines()
else:
with open('reut2-00'+str(j)+'.sgm','r') as f:
text=f.readlines()
for x in text:
if 'LEWISSPLIT' in x:
test=x.split('LEWISSPLIT="')[1]
test=test.split('"')[0]
output.append(test)
f.close()
output=list(set(output))
print output
try:
os.makedirs('stem')
for x in output:
os.makedirs('stem/'+str(x))
except:
pass
count=0
for j in range(len(output)):
lis=os.listdir('lewisplit/'+str(output[j]))
for i in lis:
print count
count=count+1
f=open('lewisplit/'+str(output[j])+'/'+i)
text=f.read()
token=word_tokenize(text)
filtered=[]
try:
for x in token:
if x.lower() not in stop and x.isdigit() == False and x!=',':
filtered.append(str(stemmer.stem(x)))
test=""
for x in filtered:
if x == '.':
test+='.'+'\n'
else:
test+=x+' '
t=open('stem/'+str(output[j])+'/'+i,'wb')
t.write(test)
t.close()
except:
continue
| true |
ee9f27a57bce0ee2310cb7215773ea89b6ed1736 | Python | harimurugesan/Python-Workouts | /hacker rank & hacker earth codes/discount dbs problem.py | UTF-8 | 498 | 3.171875 | 3 | [] | no_license | def disc(prices):
newprice = []
discountprice = []
list1 = []
lenp = len(prices)
for i in range(lenp):
discountprice.append(int(input()))
print(discountprice)
for num1, num2 in enumerate(prices):
newprice.append(num2 - discountprice[num1])
print(newprice)
for p1,p2 in enumerate(prices):
if p2 == newprice[p1]:
list1.append(p1)
print(list1)
list1 = map(str,list1)
print(" ".join(list1))
disc([50,30,20,33,53,90])
| true |
e8b561871ca494b174032768da3342f78457f82c | Python | Nishi0607/DSAlgoPython | /Queue-Python.py | UTF-8 | 949 | 3.859375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 22:14:43 2020
@author: NK
"""
#Queue implementation in Python
class Queue:
def __init__(self):
self.queue = []
def isEmpty(self):
self.queue == []
def enqueue(self, data):
self.queue.append(data)
def dequeue(self):
if len(self.queue)==0 :
return -1
data = self.queue[0]
del self.queue[0]
return data
def peek(self):
return self.queue[0] if len(self.queue)>0 else -1
def size(self):
return len(self.queue)
#Testing
q = Queue()
q.enqueue(2)
q.enqueue(3)
q.enqueue(4)
print("Size %d" % q.size())
q.dequeue()
print("Peek queue: %d" % q.peek())
q.dequeue()
print("Size %d" % q.size())
print("Peek queue: %d" % q.peek())
q.dequeue()
print("Size %d" % q.size())
print("Peek queue: %d" % q.peek()) | true |
9ba6e92981a1d9d17602c2573a2d5b5b652d2023 | Python | ngroebner/Autoencoders | /Autoencoders/decoders.py | UTF-8 | 2,303 | 2.75 | 3 | [
"MIT"
] | permissive | import numpy as np
import torch
from torch import nn, optim
from torch.nn import functional as F
from Autoencoders.layers import Flatten, UnFlatten
class Decoder2DConv(nn.Module):
"""Constructs an decoder for use in various autoencoder models.
This is antisymmetric to the Encoder2DConv class. I.e., it
takes as input a latent vector and outputs a 2D matrix with
dimensions outputdims.
TODO: Add parameter to define number of convolutional layers.
TODO: Add blocks and residuals? - Maybe better for a different class.
Args:
latentdims (int): Number of dimensions in the latent space
nchannels (int): Number of channels in the original input data.
Default = 1.
nfilters (int): Number of filters in each layer of the encoder.
Default is 32.
"""
def __init__(
self,
outputdims,
latentdims,
nlayers=2,
nchannels=1,
nfilters=32,
kernel_size=3,
stride=1,
padding=1,
use_batchnorm=False
):
super(Decoder2DConv, self).__init__()
self.nchannels = nchannels
self.kernel_size = 3
self.stride = 1
self.outputdims = outputdims
self.nfilters = nfilters
self.latentin = nn.Linear(latentdims, nfilters*outputdims[0]*outputdims[1])
self.unflatten = UnFlatten()
# string together arbitrary number of convolutional layers
convlayers = []
for layer in range(nlayers):
if layer == nlayers - 1:
#last layer, out_channels = nchannels, sigmoid activation layer
convlayers.append(nn.Conv2d(nfilters, nchannels, kernel_size, stride, padding))
convlayers.append(nn.Sigmoid())
else:
convlayers.append(nn.Conv2d(nfilters, nfilters, kernel_size, stride, padding))
if use_batchnorm:
convlayers.append(nn.BatchNorm2d(nfilters))
convlayers.append(nn.ReLU())
self.convlayers = nn.Sequential(*convlayers)
def forward(self, x):
x = self.latentin(x)
x = self.unflatten(x, self.nfilters, self.outputdims)
return self.convlayers(x)
| true |
48f2953838a928d8258404aa498c43bde2ca9439 | Python | hxdaze/TCP-IP-Controlled-Robot | /server socket/robot-socket-gui.py | UTF-8 | 2,399 | 2.859375 | 3 | [] | no_license | # Robot Controller Client with socket-connection - made in May 2021 for TI502
# Matheus Seiji Luna Noda - 19190
# All imports
from PySimpleGUI import PySimpleGUI as gui
import struct, socket, sys, _thread
# Function that returns the port used for the socket
def get_port():
return 9001
# Function that returns the IP address looked for
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255',1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
return IP
# Sets the GUI's theme
gui.theme('Reddit')
# Sets the GUI's layout (a TextField and two Buttons)
layout = [
[gui.Text(size=(40,1), key='-OUTPUT-')],
[gui.Button('Start'), gui.Button('Stop')]
]
# Creates the GUI's window
window = gui.Window('Webots Controller', layout)
# Event loop
while True:
# Gets the events and the values that accours on the window
event, values = window.read()
# If the button 'Start' is pressed
if event == 'Start':
try:
# Sets the message to 'start'
msg = 'start'
# Sets the socket and connects with the server
socket.setdefaulttimeout(0.5)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
sock.connect((get_ip(), get_port()))
# Sends the encoded message
sock.sendall(msg.encode())
finally:
# Closes the socket
sock.close()
# Updates the TextField
window['-OUTPUT-'].update('Enviou mensagem \'start\'')
# If the button 'Stop' is pressed
elif event == 'Stop':
try:
# Sets the message to 'stop'
msg = 'stop'
# Sets the socket and connects with ther server
socket.setdefaulttimeout(0.5)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
sock.connect((get_ip(), get_port()))
# Sends the encoded message
sock.sendall(msg.encode())
finally:
# Closes the socket
sock.close()
# Updates the TextField
window['-OUTPUT-'].update('Saindo do socket')
# If the window-close button is pressed, ends the Event Loop
elif event == gui.WINDOW_CLOSED:
break
# Closes the window
window.close() | true |
80288ac7239c2e17a3fd081251ecc33eb92049d9 | Python | CCM-Balderas-Pensamiento-Comp/decisiones | /assignments/15ParkingFare/src/exercise.py | UTF-8 | 259 | 3.46875 | 3 | [] | no_license | def parking_cost(hours, minutes):
# Write your code here
def main():
hours = int(input("Enter number of hours: "))
minutes = int(input("Enter number of minutes: "))
print(parking_cost(hours, minutes))
if __name__ == '__main__':
main()
| true |
8b4d919bf394018e3f3c27f7acb5fd50aaf0aaf7 | Python | martincastro1575/python | /courseraPython/begin/SumarDosDados.py | UTF-8 | 1,144 | 4.125 | 4 | [] | no_license | """Este programa tirara dos dados y sumara el resultado"""
import random
# esta funcion elige elige un numero entre 1 y 6
def TirarDado():
Dado= int((random.random()*10%6)+1)
return Dado
# esta funcion suma los dos dados
def SumarDosDados(d1,d2):
resultado = d1+d2
return resultado
# esta funcion muestra en pantalla el resultado
def Resultado_a_mostrar():
dado1,dado2= TirarDado(),TirarDado()
print('El primer dado es:', dado1 ,'y el segundo es: ', dado2, 'la suma es ' ,SumarDosDados(dado1,dado2))
# funcion principal que cambia de mensaje depediendo si ya tiro una vez los dados
def Sumar_Dos_Dados(a):
if a == True:
mensaje= input("¿quieres tirar los dados? presiona s para jugar o n para salir:")
else:
mensaje= input("¿Tirar otra vez? s/n:")
while (mensaje == 's') or (mensaje == 'S'):
a=False
Resultado_a_mostrar()
break
while (mensaje == 'n') or (mensaje == 'N'):
break
else:
Sumar_Dos_Dados(a)
def menu():
primeratirada= True
Sumar_Dos_Dados(primeratirada)
menu()
| true |
9b5f39e29d6532da01400e4cf4b3745b026e64f0 | Python | Jonathan-aguilar/DAS_Sistemas | /Ago-Dic-2018/Daniel Enriquez/ExamenExtraordinario/BaseExtra.py | UTF-8 | 1,535 | 2.796875 | 3 | [
"MIT"
] | permissive | import time, re, requests, os, errno, json, sqlite3
i=0
#conexion con la base
db = sqlite3.connect('Cervecitas.db')
cursor = db.cursor()
#Mediante este ciclo se trae una cerveza a la vez de la API desde la posicion 0 a la 50
for i in range(0,50):
i+=1
url = 'https://api.punkapi.com/v2/beers/'+ str(i)
request = requests.get(url)
#Elementos extraidos del Jason de la API
id = request.json()[0]['id']
name=request.json()[0]['name']
description=request.json()[0]['description']
image=request.json()[0]['image_url']
first_brewed=request.json()[0]['first_brewed']
target_fg=request.json()[0]['target_fg']
srm=request.json()[0]['srm']
abv=request.json()[0]['abv']
ph=request.json()[0]['ph']
tagline=request.json()[0]['tagline']
attenuation_level=request.json()[0]['attenuation_level']
#Se insertan en las tablas ya creadas en el archivo base.py a la vez que se hace cada ciclo for
cursor.execute("INSERT INTO INFOPRINCIPAL(id,name,description) VALUES(?,?,?)",(id,name,description))
db.commit()
cursor.execute("INSERT INTO INFOSECUNDARIA(id,image,first_brewed,target_fg) VALUES(?,?,?,?)",(id,image,first_brewed,target_fg))
db.commit()
cursor.execute("INSERT INTO INFOEXTRA(id,srm,abv,ph,tagline,attenuation_level) VALUES(?,?,?,?,?,?)",(id,srm,abv,ph,tagline,attenuation_level))
db.commit()
#Extrae el nombre de la cerveza para poder apreciar la insercion de cada elemnto
print("Cerveza {}".format(name)+ " " + "se insertó correctamente")
db.close()
| true |
948e6c589788028d915fc802b9e265bc49380c21 | Python | prachi411/Data_Structures_and_Algorithms.github.io | /Python/graph traversal.py | UTF-8 | 613 | 3.125 | 3 | [
"Unlicense"
] | permissive | class graph:
def __init__(self,edges):
self.edges=edges
self.graph_dic={}
for start,end in edges:
if start in self.graph_dic:
self.graph_dic[start].append(end)
else:
self.graph_dic[start]=[end]
print("graph_dic",self.graph_dic)
if __name__ == '__main__':
routes = [
("Mumbai", "Paris"),
("Mumbai", "Dubai"),
("Paris", "Dubai"),
("Paris", "New York"),
("Dubai", "New York"),
("New York", "Toronto"),
]
route_graph = graph(routes)
| true |
bb5f849ab83576b7c11e473109bf7fe20d54565d | Python | gitandlucsil/python_classes | /complet_curs/oriented_objects/cont_bank.py | UTF-8 | 675 | 3.609375 | 4 | [] | no_license | class Cont:
def __init__(self, client, number):
self.client = client
self.number = number
self.money = 0
def pull_money(self, value):
self.money += value
def push_money(self, value):
self.money -= value
def report(self):
print("Cont number "+self.number+" has "+str(self.money))
class ContSpecial(Cont):
def __init__(self, client, number, limit):
Cont.__init__(self, client, number)
self.limit = limit
cont = Cont("Me","1234-56")
cont.pull_money(200)
cont.push_money(3.65)
cont.report()
cont_spec = ContSpecial("You", "65-4321", 2000)
print(cont_spec)
print(cont_spec.client) | true |
ba1898f4b58303ecab1f93c1226894c02f0f5991 | Python | malithj/blog-examples | /mtpltlib-custom-hatch/main.py | UTF-8 | 1,863 | 3.296875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.hatch import Shapes, _hatch_types
from matplotlib.patches import Rectangle
class SquareHatch(Shapes):
"""
Square hatch defined by a path drawn inside [-0.5, 0.5] square.
Identifier 's'.
"""
def __init__(self, hatch, density):
self.filled = False
self.size = 1
self.path = Rectangle((-0.25, 0.25), 0.5, 0.5).get_path()
self.num_rows = (hatch.count('s')) * density
self.shape_vertices = self.path.vertices
self.shape_codes = self.path.codes
Shapes.__init__(self, hatch, density)
def main():
# attach our new hatch
_hatch_types.append(SquareHatch)
# plot random bars
np.random.seed(101)
num = 10
y_values = np.random.rand(num)
x_values = np.arange(num)
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
color_blue = np.asarray([0, 107, 164]) / 255
width = 0.5
# group bars
ax.bar(x_values[::2] - width / 2, y_values[::2], color='w', edgecolor=color_blue, hatch='s', width=width)
ax.bar(x_values[::2] + width / 2, y_values[1::2], color='w', edgecolor=color_blue, hatch='sss', width=width)
# set labels and ticks
ax.set_title("Bar Chart")
ax.set_xlabel("x")
ax.set_ylabel("y")
y_ticks = np.linspace(0, np.round(max(y_values), 0), 5)
ax.set_yticks(y_ticks)
ax.set_xticks(x_values[::2])
ax.set_xticklabels(['a', 'b', 'c', 'd', 'e'])
# clear spines and set color
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_bounds(y_ticks[0], y_ticks[-1])
ax.spines['bottom'].set_bounds(x_values[0], x_values[-2])
ax.spines['left'].set_color('darkorange')
ax.spines['bottom'].set_color('darkorange')
plt.show()
if __name__ == '__main__':
main()
| true |
0981a3679c46bac83952cca95e6165c6bd9eb915 | Python | mushahiroyuki/beginning-python | /Chapter06/0611print-params2.py | UTF-8 | 432 | 3.421875 | 3 | [] | no_license | #@@range_begin(list1) # ←この行は無視してください。本文に引用するためのものです。
#ファイル名 Chapter06/0611print-params2.py
def print_params_2(title, *params):
print(title)
print(params)
#実行
print_params_2('引数:', 1, 2, 3)
print_params_2('引数はこれだけ:')
#@@range_end(list1) # ←この行は無視してください。本文に引用するためのものです。
| true |
a80e310d0af3d816d175ab5d110692da06c66ae5 | Python | cpe342/PythonCourse | /Lists/list_comp_inter.py | UTF-8 | 191 | 3.421875 | 3 | [] | no_license | num1=[1,2,3,4]
num2=[3,4,5,6]
answer=[]
answer=[n for n in num1 if n in num2]
print(list(answer))
names=["Ellie","Tim","Matt"]
answer2=[n[::-1].lower() for n in names]
print(list(answer2)) | true |
15434546a032255ee7cfb29f30a6501d60d81d41 | Python | kdaivam/PythonPrep | /Leetcode/remove_duplicates_in_list.py | UTF-8 | 637 | 3.484375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 9 21:36:00 2019
@author: kanyad
"""
def removeDuplicates_count_by_2( nums) :
i = 1
cnt = 1
while i < len(nums):
print(nums)
if nums[i] == nums[i-1]:
cnt += 1
else:
cnt = 1
if cnt >2:
del(nums[i])
else:
i += 1
nums = [0,0,1,1,1,2,2,3,3,4]
removeDuplicates_count_by_2(nums)
s = set(nums)
print(len(s))
n = 1
while n < len(nums):
if nums[n] == nums[n-1]:
del(nums[n])
print(nums)
else:
n += 1
| true |
bf843bf241e023487d426b574f80a7db65cdf3ef | Python | molchiro/AtCoder | /old/ABC144/D.py | UTF-8 | 253 | 3.46875 | 3 | [] | no_license | import math
a, b, x = list(map(int, input().split()))
if a**2*b == x:
theta = 90
elif a**2*b/2 > x:
h = 2*x/a/b
theta = math.degrees(math.atan(h/b))
else:
h = 2*x/(a**2)-b
theta = math.degrees(math.atan(a/(b-h)))
print(90 - theta)
| true |
cf4323ca5710c59edb3e6e736b832dd19d8b1100 | Python | traffaillac/traf-kattis | /roundedbuttons.py | UTF-8 | 457 | 3.34375 | 3 | [] | no_license | from math import hypot
for _ in range(int(input())):
x, y, w, h, r, m, *clicks = map(float, input().split())
for i in range(int(m)):
X, Y = clicks[i * 2], clicks[i * 2 + 1]
inside = (
x <= X <= x+w and y+r <= Y <= y+h-r or
x+r <= X <= x+w-r and y <= Y <= y+h or
hypot(x+r-X, y+r-Y) <= r or
hypot(x+w-r-X, y+r-Y) <= r or
hypot(x+r-X, y+h-r-Y) <= r or
hypot(x+w-r-X, y+h-r-Y) <= r)
print('inside' if inside else 'outside')
print()
| true |
4e70e672a1965c8990383ccf0803b456a49a18cc | Python | ender8848/the_fluent_python | /chapter_18/multi_coroutine_spider.py | UTF-8 | 799 | 2.75 | 3 | [] | no_license | import time
import requests
from multiprocessing.dummy import Pool as ThreadPool
total = 100
thread = 4
async def request(loop):
url = 'http://127.0.0.1:5000'
future = loop.run_in_executor(None, requests.get, url)
response = await future
def divide(i):
import asyncio
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
tasks = [asyncio.ensure_future(request(loop)) for i in range(total//thread)]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
if __name__ == '__main__':
time0 = time.time()
pool = ThreadPool(thread)
i = [j for j in range(0, thread)]
pool.map(divide, i)
pool.close()
pool.join()
time1 = time.time()
print('爬取%d个网页,总花费时间: %.3f' % (total, time1 - time0), end='')
'''爬取100个网页,总花费时间: 3.302'''
| true |
a8130a7aaf3eb5028db76900692cf2c3dc8561a5 | Python | Mbabysbreath/Python_Test | /src01/hello.py | UTF-8 | 1,341 | 3 | 3 | [] | no_license | from selenium import webdriver
import time
driver = webdriver.Chrome()
# 打开驱动指向的浏览器
driver.get("https://www.baidu.com/")
# 用id查询
# driver.find_element_by_id("kw").send_keys("大虞海棠")
# # time.sleep(6)
# driver.find_element_by_id("su").click()
# 用name查询
# driver.find_element_by_name("wd").send_keys("王一博")
# time.sleep(3)
# driver.find_element_by_id("su").click()
# time.sleep(6)
# 用class查询class="s_ipt nobg_s_fm_hover"
# driver.find_element_by_class_name("s_ipt nobg_s_fm_hover").send_keys("王一博")
# time.sleep(3)
# driver.find_element_by_class_name("btn self-btn bg s_btn btn_h btnhover").click()
# time.sleep(4)
# 用文字链接查询
# driver.find_element_by_link_text("抗击肺炎").click()
# driver.find_element_by_partial_link_text("抗击肺炎").click()
# 用Partial link text定位--部分链接
# driver.find_element_by_partial_link_text("hao").click()
# xpath查询
# driver.find_element_by_xpath("//*[@id='kw']").send_keys("Lisa")
# driver.find_element_by_xpath("//*[@id='su']").click()
# css样式查找 class用.s_ipt(用.) id用(#su)
driver.find_element_by_css_selector(".s_ipt").send_keys("肖战")
driver.find_element_by_css_selector("#su").click()
time.sleep(3)
# 后退
driver.back()
time.sleep(5)
# 前进
driver.forward()
time.sleep(3)
driver.quit()
| true |
57802cd00a33596ee0ee680deb3cd4255325e40a | Python | nonnikb/verkefni | /Lokapróf/1 Basics/Time calculation.py | UTF-8 | 572 | 4.0625 | 4 | [] | no_license | """Given seconds (int) calculate hours, minutes and seconds.
For example, given 80000 seconds that is 22 hours, 13 minutes and 20 seconds.
Hint 1: use integer division // and remainder %
Hint 2: we require that you create and output variables hours, minutes
and seconds but you will likely find an additional variable useful."""
sec = int(80000) #"""input("Input seconds: ")"""
hour = int(sec)//3600
minute = int(sec)/60 - hour*60
minute = int(minute)
second = int(sec)-hour*3600-minute*60
second = int(second)
print(int(hour))
print(int(minute))
print(int(second))
| true |
b9b181065f40d5e9f6044622625c70ce4303be1e | Python | BarrettJB/CS104 | /lab1/lab1.py | UTF-8 | 505 | 3 | 3 | [] | no_license | #
# Lab 1, CS104
# Barrett Bryson 1252391
# Caleb Bieske 2219011
# 9-4-2014
#
from __future__ import division, print_function
input = raw_input
from myro import *
init("COM40")
print("Done connecting")
# Make the robot draw a circle by making the left wheel
# go forward at speed 0.4, and the right wheel go forward
# at speed 0.75. Stop the robot after 30 seconds.
print("Issuing motors command")
robot.motors(0.4, 1)
print("Doing nothing for 20 seconds")
wait(20)
print("Issuing stop command")
stop()
print("Done") | true |
9196fafcdeb26e9802cb89d820002678d77e3e8d | Python | Gageowe/texquest | /screens.py | UTF-8 | 2,634 | 2.90625 | 3 | [] | no_license | class Screen:
def __init__(self, content = None, icon = "*",width = 40, height = 10, top = 1, bottom = 1, left = 1, right = 1):
self.content = content
self.width = width
self.height = height
self.top = top
self.bottom = bottom
self.left = left
self.right = right
self.length = len(content)
self.messageContent = []
self.message = []
self.rowSpace = self.width - self.left - self.right
self.colSpace = self.height - self.top - self.bottom
self.icon = icon
self.lSpace = int((self.rowSpace - self.length)/2)
self.rSpace = int((self.rowSpace - self.length +1 )/2)
if self.length <= (self.rowSpace):
self.messageContent.append(self.content)
self.rows = 1
else:
self.rows = int(self.length/self.rowSpace) + 1
for row in range(0,self.rows):
if (row + 1)*self.rowSpace > self.length:
self.messageContent.append(self.content[row*self.rowSpace])
else:
self.messageContent.append(self.content[row*self.rowSpace:(row+1)*self.rowSpace])
print(self.messageContent)
self.tSpace = int((self.colSpace - self.rows)/2)
self.bSpace = int((self.colSpace - self.rows + 1)/2)
for row in range(0,self.height):
self.message.append("")
self.rowNum = 0
if row < top or row > (height - 1 - bottom):
for i in range(self.width):
self.message[row] += self.icon
elif (row >= self.top and row < self.top + self.tSpace) or (row <= self.height - self.bottom and row >= self.top + self.tSpace + self.rows):
for i in range(self.width):
if i < self.left or i >= self.width - self.right:
self.message[row] += self.icon
else:
self.message[row] += " "
else:
for i in range(self.width):
if i < self.left or i >= self.width - self.right:
self.message[row] += self.icon
elif i < self.left + self.lSpace or i >= self.width - self.right - self.rSpace:
self.message[row] += " "
elif i == self.left + self.lSpace + 1:
self.message[row] += self.messageContent[self.rowNum]
self.rowNum += 1
self.content = ""
for row in self.message:
self.content += row + "\n"
def show(self):
print(self.content)
| true |
e5b2fce8fb9382bb0f7f01336f05861d1088a7a9 | Python | bkandel/BiteBar | /ConvertToTxt.py | UTF-8 | 534 | 2.75 | 3 | [] | no_license | #!/usr/bin/python
import glob
import os
import struct
FilesToConvert = glob.glob('*.dat')
for File in FilesToConvert:
FileComponents = os.path.splitext(File)
BaseFileName = FileComponents[0]
fid = open(File, 'rb')
BinaryString = fid.read()
AsciiData = []
i = 115
while (i + 28) < len(BinaryString):
AsciiData.append(struct.unpack('>Iffffff', BinaryString[i:i+28]))
i = i + 28
fid.close()
outfile = open(BaseFileName + '.txt', 'w')
for line in AsciiData:
outfile.write(str(line).strip('()') + '\n')
| true |
03093a3318187bbbbb8ce295821f782664412d20 | Python | ringhilterra/DSE201-Data-Management-Systems | /final/testing_data/soccer_data_generator.py | UTF-8 | 2,078 | 2.96875 | 3 | [] | no_license | import random
import pandas as pd
filename = "soccer_test_data_big.sql"
numTeams = 1000
numMatches = 100000
hlist = [] #hteam
vlist = [] #vteam
s1_list = [] #home score
s2_list = [] #visit team score
for i in range(1,numMatches):
h = random.randrange(1,numTeams+1)
v = random.randrange(1,numTeams+1)
# a team cannot play itself
if (h != v):
hlist.append(h)
vlist.append(v)
s1_list.append(random.randrange(0,6))
s2_list.append(random.randrange(0,6))
df = pd.DataFrame([hlist, vlist, s1_list, s2_list]).T
#do not want duplicate matches (home,away) teams
df = df.drop_duplicates(subset=[0, 1])
df = df.dropna()
df = df.astype(int)
f= open(filename,"w+")
#insert some corner cases to test
f.write("INSERT INTO teams (name, coach) VALUES ('ateam', 'a');\n")
# insert a team who plays in no game
f.write("INSERT INTO teams (name, coach) VALUES ('ryan_team', 'ryan');\n")
# insert team whole plays only in one match as away team and loses
f.write("INSERT INTO teams (name, coach) VALUES ('bob_team', 'bob');\n")
f.write("INSERT INTO matches (hTeam, vTeam, hScore, vScore) VALUES ('ateam', 'bob_team', 2, 0);\n")
# insert team whole plays only in one match as home team and ties
f.write("INSERT INTO teams (name, coach) VALUES ('joe_team', 'joe');\n")
f.write("INSERT INTO matches (hTeam, vTeam, hScore, vScore) VALUES ('joe_team', 'ateam', 2, 2);\n")
# insert team whole plays only in one match as home team and wins
f.write("INSERT INTO teams (name, coach) VALUES ('pal_team', 'pal');\n")
f.write("INSERT INTO matches (hTeam, vTeam, hScore, vScore) VALUES ('pal_team', 'ateam', 4, 1);\n")
for i in range(1,numTeams+1):
val = "INSERT INTO teams (name, coach) VALUES ('team{0}', 'coach{1}');\n".format(i,i)
f.write(val)
for i in range(len(df)):
h = int(df.iloc[i,0])
v = int(df.iloc[i,1])
s1 = int(df.iloc[i,2])
s2 = int(df.iloc[i,3])
val = "INSERT INTO matches (hTeam, vTeam, hScore, vScore) VALUES ('team{0}', 'team{1}', {2}, {3});\n".format(h,v,s1,s2)
f.write(val)
f.close()
| true |
532612005343510281d53bae828726c877906f05 | Python | mramire8/structured | /utilities/amt_tokenizer.py | UTF-8 | 386 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | __author__ = 'maru'
class AMTSentenceTokenizer(object):
def __init__(self):
pass
def tokenize_sents(self, doc):
return [sent.split("THIS_IS_A_SEPARATOR") for sent in doc]
def tokenize(self, doc):
return doc.split("THIS_IS_A_SEPARATOR")
def __call__(self, doc):
return doc
def __str__(self):
return self.__class__.__name__ | true |
434765246c329015c46316ccb907b6ba13ecb691 | Python | samuelyeewl/specmatch-emp | /specmatchemp/plots.py | UTF-8 | 5,999 | 3.1875 | 3 | [] | no_license | """
@filename plots.py
Helper functions to plot various data from SpecMatch-Emp
"""
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
def reverse_x():
"""Reverses the x-axis of the current figure"""
plt.xlim(plt.xlim()[::-1])
def reverse_y():
"""Reverses the y-axis of the current figure"""
plt.ylim(plt.ylim()[::-1])
def hide_x_ticks():
"""Hide x label ticks"""
ax = plt.gca()
ax.axes.get_xaxis().set_ticks([])
def hide_y_ticks():
"""Hide y label ticks"""
ax = plt.gca()
ax.axes.get_yaxis().set_ticks([])
def annotate_point(x, y, text, offset=5, offset_x=None, offset_y=None,
text_kw={}):
"""Annotates the point at a given x, y position (in data coordinates),
at a given pixel offset.
Args:
x: x-coordinate of point
y: y-coordinate of point
text (str): String to annotate
offset: (optional) pixel offset to use
offset_x, offset_y: (optional) pixel offset to use in x, y directions
text_kw (dict): (optional) any additional keywords to pass to plt.text
"""
if offset_x is None or offset_y is None:
offset_x = offset
offset_y = offset
ax = plt.gca()
trans_offset = transforms.offset_copy(ax.transData, units='dots',
x=offset_x, y=offset_y)
plt.text(x, y, text, transform=trans_offset, **text_kw)
def annotate_spectrum(text, spec_offset=0, offset_x=10, offset_y=5,
align='left', text_kw={}):
"""Annotates a spectrum.
Args:
text (str): String to annotate
spec_offset: (optional) Vertical offset of spectrum
offset_x: (optional) Pixel offset from left/right boundary
offset_y: (optional) Vertical pixel offset from spectrum
align: (optional) 'left' or 'right' alignment for text
text_kw (dict): (optional) any additional keywords to pass to plt.text
"""
ax = plt.gca()
xlim = ax.get_xlim()
if align == 'left':
xpos = xlim[0]
offset_x = abs(offset_x)
elif align == 'right':
xpos = xlim[1]
offset_x = -abs(offset_x)
else:
return
# transform to pixel coords
disp_coords = ax.transData.transform((xpos, spec_offset + 1))
disp_coords = (disp_coords[0] + offset_x, disp_coords[1] + offset_y)
# invert transform to go back to data coords
data_coords = ax.transData.inverted().transform(disp_coords)
ax_coords = ax.transAxes.inverted().transform(disp_coords)
# fix y position in data coordinates (fixed offset from spectrum)
# but allow x position to float so we can pan horizontally
trans = transforms.blended_transform_factory(ax.transAxes, ax.transData)
bbox = dict(facecolor='white', edgecolor='none', alpha=0.8)
plt.text(ax_coords[0], data_coords[1], text, bbox=bbox, transform=trans,
horizontalalignment=align, **text_kw)
def label_axes(param_x=None, param_y=None, rescale=True):
"""Convenience function for tweaking axes to make plots
Args:
param_x (str): Parameter to plot on x-axis
param_y (str): Parameter to plot on y-axis
rescale (bool): Whether to rescale
"""
if param_x is 'Teff':
reverse_x()
plt.xlabel('Effective Temperature (K)')
if rescale:
plt.xticks([3000, 4000, 5000, 6000, 7000])
if param_x is 'feh':
plt.xlabel('[Fe/H] (dex)')
if param_x is 'radius':
plt.xlabel(r'$R\ (R_\odot)$')
if rescale:
ax = plt.gca()
ax.set_xscale('log')
if param_y is 'radius':
plt.ylabel(r'Stellar Radius (Solar-radii)')
if rescale:
ax = plt.gca()
ax.set_yscale('log')
yt = [0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 1, 2, 3, 4, 5, 7, 10, 20]
ax.set_yticks(yt, minor=False)
ax.set_ylim(0.1, 20)
def set_tight_lims(data_x, data_y, center_x=None, center_y=None,
mode='symmetric', buf=0.3):
"""Sets plot limits around a target subset of data, centered at
a given point.
Args:
data_x (np.ndarray): x-coordinates of data
data_y (np.ndarray): y-coordinates of data
center_x (optional [float]): x-coordinate of center point
center_y (optional [float]): y-coordinate of center point
mode: (optional) 'symmetric': Make limits symmetric about target
'tight': Use asymmetric limits
buf (float): Buffer radius
"""
ax = plt.gca()
if center_x is None:
maxx = max(data_x)
minx = min(data_x)
sepx = maxx - minx
maxx = maxx + buf * sepx
minx = minx - buf * sepx
ax.set_xlim((minx, maxx))
else:
distx = data_x - center_x
maxx = max(max(distx), 0)
minx = min(min(distx), 0)
if mode == 'symmetric':
limx = max(abs(maxx), abs(minx))
limx = limx + buf * limx
ax.set_xlim((center_x - limx, center_x + limx))
elif mode == 'tight':
maxx = maxx + buf * maxx if maxx != 0 else -buf * minx
minx = minx + buf * minx if minx != 0 else -buf * maxx
ax.set_xlim((center_x + minx, center_x + maxx))
if center_y is None:
maxy = max(data_y)
miny = min(data_y)
sepy = maxy - miny
maxy = maxy + buf * sepy
miny = miny - buf * sepy
ax.set_ylim((miny, maxy))
else:
disty = data_y - center_y
maxy = max(max(disty), 0)
miny = min(min(disty), 0)
if mode == 'symmetric':
limy = max(abs(maxy), abs(miny))
limy = limy + buf * limy
ax.set_ylim((center_y - limy, center_y + limy))
elif mode == 'tight':
maxy = maxy + buf * maxy if maxy != 0 else -buf * miny
miny = miny + buf * miny if miny != 0 else -buf * maxy
ax.set_ylim((center_y + miny, center_y + maxy))
| true |
0341c61c76c02fe64b43c73874bb62a6e13f7ee3 | Python | GNeki4/urfuwmbot | /sheet_addition.py | UTF-8 | 2,273 | 3.25 | 3 | [] | no_license | from datetime import datetime, timedelta
import time
def get_dates_from_now(n):
list_of_dates = []
for single_date in (datetime.today() + timedelta(n) for n in range(n)):
list_of_dates.append(single_date.strftime("%d.%m"))
return list_of_dates
def merge_cells(sheetId, ss, top, bottom, left, right):
body = {
"requests": [
{
"mergeCells": {
"mergeType": "MERGE_ALL",
"range": { # In this sample script, all cells of "A1:C3" of "Sheet1" are merged.
"sheetId": sheetId,
"startRowIndex": top - 1,
"endRowIndex": bottom - 1,
"startColumnIndex": left,
"endColumnIndex": right
}
}
}
]
}
ss.batch_update(body)
def get_time_hours(*args):
list_of_hours = []
for element in args:
if not (isinstance(element, int)):
raise TimeIsNotIntException("Ввел хуйню полную а не время")
else:
list_of_hours.append(element)
return sorted(set(list_of_hours))
class TimeIsNotIntException(Exception):
pass
# get_time_hours(10.6)
def get_days_of_the_week(n):
list_of_weeks = []
for single_date in (datetime.today() + timedelta(n) for n in range(n)):
weekday = single_date.weekday()
if weekday == 0:
list_of_weeks.append("Понедельник\nMonday")
if weekday == 1:
list_of_weeks.append("Вторник\nTuesday")
if weekday == 2:
list_of_weeks.append("Среда\nWednesday")
if weekday == 3:
list_of_weeks.append("Четверг\nThursday")
if weekday == 4:
list_of_weeks.append("Пятница\nFriday")
if weekday == 5:
list_of_weeks.append("Суббота\nSaturday")
if weekday == 6:
list_of_weeks.append("Воскресенье\nSunday")
return list_of_weeks # spizdi
'''
lma1 = get_dates_from_now(2)
for day in lma1:
print(day)
'''
'''
lmao = get_days_of_the_week(10)
for day in lmao:
print(day)
'''
# print(time.strftime("%H:%M")) | true |
094f9fd68e9c2acf3a89a113bf5a7735768e424d | Python | zhaojunqin93/Reinforement_Learning | /RL/Policy Gradient/Policy_Gradient.py | UTF-8 | 2,983 | 2.921875 | 3 | [] | no_license | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
class PolicyGradient:
def __init__(self, n_features, n_actions, learning_rate = 0.01, reward_decay = 0.95):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.ep_obs, self.ep_as, self.ep_rs = [], [], []
self.cost_his = []
self._build_net()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def _build_net(self):
with tf.variable_scope('inputs'):
self.tf_obs = tf.placeholder(tf.float32, [None, self.n_features], name='observation')
self.tf_acts = tf.placeholder(tf.int32, [None, ], name='actions_num')
self.tf_vt = tf.placeholder(tf.float32, [None, ], name='action_value')
layer = tf.layers.dense(self.tf_obs,
32,
tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
bias_initializer=tf.constant_initializer(0.1),
name='fc1')
self.all_act = tf.layers.dense(layer,
self.n_actions,
tf.nn.softmax,
kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
bias_initializer=tf.constant_initializer(0.1),
name='fc2')
with tf.variable_scope('loss'):
log_prob = tf.reduce_sum(-tf.log(self.all_act) * tf.one_hot(self.tf_acts, self.n_actions), axis=1)
self.loss = tf.reduce_mean(log_prob * self.tf_vt)
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
def choose_action(self, observation):
prob_weights = self.sess.run(self.all_act, feed_dict={self.tf_obs: observation[np.newaxis, :]})
action = np.random.choice(range(prob_weights.shape[1]), p=prob_weights.ravel())
return action
def store_transition(self, s, a, r):
self.ep_obs.append(s)
self.ep_as.append(a)
self.ep_rs.append(r)
def learn(self):
# discount and normalize episode reward
discounted_ep_rs_norm = self._discount_and_norm_rewards()
# train on episode
_, cost = self.sess.run([self.train_op, self.loss], feed_dict={
self.tf_obs: np.vstack(self.ep_obs), # shape=[None, n_obs]
self.tf_acts: np.array(self.ep_as), # shape=[None, ]
self.tf_vt: discounted_ep_rs_norm, # shape=[None, ]
})
self.ep_obs, self.ep_as, self.ep_rs = [], [], [] # empty episode data
self.cost_his.append(cost)
return discounted_ep_rs_norm
def _discount_and_norm_rewards(self):
# discount episode rewards
discounted_ep_rs = np.zeros_like(self.ep_rs)
running_add = 0
for t in reversed(range(0, len(self.ep_rs))):
running_add = running_add * self.gamma + self.ep_rs[t]
discounted_ep_rs[t] = running_add
# normalize episode rewards
discounted_ep_rs -= np.mean(discounted_ep_rs)
discounted_ep_rs /= np.std(discounted_ep_rs)
return discounted_ep_rs
def plot_cost(self):
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.ylabel('Cost')
plt.xlabel('training steps')
plt.show() | true |
b444104cdb08e4aa3b75295bc041858ba9de96e0 | Python | SINHOLEE/Algorithm | /python/SSAFY_정규수업/9월/서울2반9월16일/순열.py | UTF-8 | 716 | 2.90625 | 3 | [] | no_license | # arr = [3, 1, 6, 4]
#
# def perm(r):
# global count
# count+= 1
# if len(arr) == r:
# print(temp, 'count = ',count)
# return
# for j in range(len(arr)):
# if visited[j] == False:
# visited[j] = True
# temp[r] = arr[j]
# perm(r + 1)
# visited[j] = False
#
#
# visited = [False] * len(arr)
# temp = [0] * len(arr)
# count = 0
# perm(0)
def perm(depth, temp):
if depth == 3:
print(temp)
return
for i in range(3):
# if i == 1:
# continue
temp[depth], temp[i] = temp[i], temp[depth]
perm(depth+1, temp)
temp[depth], temp[i] = temp[i], temp[depth]
perm(0, [0,1,2]) | true |
86960b1b5c6f444c6f09ef2def68d11362a0c84f | Python | pabluc/test-gh-raspberry | /led.py | UTF-8 | 319 | 2.96875 | 3 | [] | no_license | import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
pinout = 18
color = "Green"
GPIO.setup(pinout,GPIO.OUT)
print "LED on N" + str(pinout) + " " + color
GPIO.output(pinout,GPIO.HIGH)
time.sleep(1)
print "LED off N" + str(pinout) + " " + color
GPIO.output(pinout,GPIO.LOW)
time.sleep(1)
| true |
291f279a6ceb9939cefa4227e2be48ac44df6b9b | Python | gauravsinha12/Screen-Recorder-In-Python | /samaye.py | UTF-8 | 376 | 3.25 | 3 | [] | no_license | from datetime import datetime
tdelta=""
try:
s1 = input("enter the time to start meeting ")
s2 = f"{datetime.now().time().hour}:{datetime.now().time().minute}:{datetime.now().time().second}"
FMT = '%H:%M:%S'
tsub = datetime.strptime(s1, FMT) - datetime.strptime(s2, FMT)
except:
print("Enter in this format for example (HH:MM:SS) :- 10:45:45")
print(tsub) | true |
97f35448773dd049515d14bb54e990ec1d609112 | Python | brovador/advent-of-code-python-2017 | /day24/main2.py | UTF-8 | 1,314 | 2.84375 | 3 | [] | no_license | #encoding: utf-8
import os
import re
import string
import sys
max_strength = 0
max_length = 0
def main():
input_file = './input.txt'
with open(input_file, 'r') as f:
lines = [map(int, l.strip().split('/')) for l in f]
ports = sorted([line + [sum(line)] for line in lines], lambda x, y: x[2] > y[2])
starting_ports = [port for port in ports if port[0] == 0]
def add_port(port_list, remaining_ports):
global max_strength
global max_length
candidates = [port for port in remaining_ports if port[0] == port_list[-1][1] or port[1] == port_list[-1][1]]
if candidates == []:
# end of the list
length = len(port_list)
strength = sum([port[2] for port in port_list])
if length > max_length or (length == max_length and strength > max_strength):
max_length = length
max_strength = strength
else:
for c in candidates:
new_remaining_ports = [port for port in remaining_ports if port != c]
c = c if c[0] == port_list[-1][1] else [c[1], c[0], c[2]]
new_port_list = port_list[:] + [c]
add_port(new_port_list, new_remaining_ports)
for starting_port in starting_ports:
port_list = [starting_port]
remaining_ports = [port for port in ports if port != starting_port]
add_port(port_list, remaining_ports)
print max_strength
if __name__ == '__main__':
main() | true |
1d2c9a4252d76c9f7f4b49a650e666c00f3ce63a | Python | ARJOM/testes-sistema | /tribos/backend/app/utils/getAge.py | UTF-8 | 228 | 3.046875 | 3 | [] | no_license | from datetime import datetime
def get_age(date):
now = datetime.now()
birthday = datetime.strptime(date, "%Y-%m-%d")
return abs((now.year - birthday.year) - ((now.month, now.day) < (birthday.month, birthday.day)))
| true |
142ab16a96affd6ce2f29b49bea45cd8206d1c53 | Python | sublee/josa | /josa.py | UTF-8 | 984 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import warnings
from korean import Loanword, Noun, Particle, hangul, morphology
warnings.warn('This library has been deprecated. Use "korean" instead.',
DeprecationWarning)
def has_jongseong(word, lang='eng'):
if lang == 'kor':
word = Noun(word)
else:
if lang == 'eng':
lang = 'nld'
word = Loanword(unicode(word), lang)
try:
return bool(hangul.get_final(word.read()[-1]))
except IndexError:
raise ValueError
def josa(word, particle, lang='eng'):
if lang == 'kor':
word = Noun(word)
else:
if lang == 'eng':
lang = 'nld'
word = Loanword(unicode(word), lang)
try:
return morphology.pick_allomorph(Particle(particle), suffix_of=word)
except IndexError:
raise ValueError
def append(word, type, lang='eng', spacing=False):
space = ' ' if spacing else ''
return word + space + josa(word, type, lang)
| true |
3cc50a9911a77726966cd90bf0709293b458b593 | Python | adityanshastry/Car-alarm-trust | /common/Utils.py | UTF-8 | 3,943 | 2.640625 | 3 | [] | no_license | from __future__ import division
import numpy as np
from sklearn.utils.extmath import cartesian
import Constants
def scale_to_fourier_basis(value, bounds):
return (value - bounds[0]) / (bounds[1] - bounds[0])
def update_states_to_bounds(state):
state[0] = max(state[0], Constants.states[0][0])
state[0] = min(state[0], Constants.states[0][1])
state[1] = max(state[1], Constants.states[1][0])
state[1] = min(state[1], Constants.states[1][1])
return state
def get_fourier_basis_constants(fourier_basis_order):
return cartesian([np.arange(0, fourier_basis_order+1, 1), np.arange(0, fourier_basis_order+1, 1)])
def get_action_distribution(max_action, num_actions, epsilon):
action_distribution = np.ones(shape=num_actions) * epsilon / num_actions
action_distribution[Constants.actions[max_action]] = 1 - epsilon + (epsilon / num_actions)
return action_distribution
def get_trial_splits(max_trials):
starts = range(0, max_trials, 100)
ranges = []
for index, start in enumerate(starts):
if index < len(starts):
ranges.append([start, start+100])
return ranges
pass
def get_probabilities_for_observations(observations_df):
observation_stats = {}
total_instances = len(observations_df.index)
observation_stats["age"] = {}
observation_stats["age"][0] = observations_df.age[observations_df["age"] == 0].count() / total_instances
observation_stats["age"][1] = observations_df.age[observations_df["age"] == 1].count() / total_instances
observation_stats["age"][2] = observations_df.age[observations_df["age"] == 2].count() / total_instances
observation_stats["age"][3] = observations_df.age[observations_df["age"] == 3].count() / total_instances
observation_stats["accidents"] = {}
observation_stats["accidents"][0] = observations_df.accidents[observations_df["accidents"] == 0].count() / total_instances
observation_stats["accidents"][1] = observations_df.accidents[observations_df["accidents"] == 1].count() / total_instances
observation_stats["accidents"][2] = observations_df.accidents[observations_df["accidents"] == 2].count() / total_instances
observation_stats["fatalities"] = {}
observation_stats["fatalities"][0] = observations_df.fatalities[observations_df["fatalities"] == 0].count() / total_instances
observation_stats["fatalities"][1] = observations_df.fatalities[observations_df["fatalities"] == 1].count() / total_instances
observation_stats["fatalities"][2] = observations_df.fatalities[observations_df["fatalities"] == 2].count() / total_instances
observation_stats["sex"] = {}
observation_stats["sex"][1] = observations_df.sex[observations_df["sex"] == 1].count() / total_instances
observation_stats["sex"][2] = observations_df.sex[observations_df["sex"] == 2].count() / total_instances
observation_stats["alcohol"] = {}
observation_stats["alcohol"][1] = observations_df.alcohol[observations_df["alcohol"] == True].count() / total_instances
observation_stats["alcohol"][0] = observations_df.alcohol[observations_df["alcohol"] == False].count() / total_instances
observation_stats["drugs"] = {}
observation_stats["drugs"][1] = observations_df.drugs[observations_df["drugs"] == True].count() / total_instances
observation_stats["drugs"][0] = observations_df.drugs[observations_df["drugs"] == False].count() / total_instances
observation_stats["distracted"] = {}
observation_stats["distracted"][1] = observations_df.distracted[
observations_df["distracted"] == True].count() / total_instances
observation_stats["distracted"][0] = observations_df.distracted[
observations_df["distracted"] == False].count() / total_instances
return observation_stats
def main():
print get_trial_splits(100)
if __name__ == '__main__':
main()
| true |
1bc4af1bb0daef25fcf4fb0db67eb22c9b5592d5 | Python | ShirleyMwombe/Python-Training | /Stringmethods.py | UTF-8 | 271 | 3.46875 | 3 | [] | no_license | name = "SHirley"
#print(name.find("r"))
#print(len(name))
#print(type(name))
#print(name.capitalize())
#print(name.count("l"))
#print(name.upper())
#print(name.lower())
#print(name.isdigit())
#print(name.isalpha())
#print(name.replace("H","k"))
print(name*3) | true |
72a4508b128d0b0c80230468d3aa5a5abda35e9a | Python | zhouyuels/webTest | /WebTEST/main/commom/init/Browser.py | UTF-8 | 2,121 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @FileName :Browser.py
# @Time :2019/12/3 17:26
# @Author :ZhouYue
# @Description :浏览器驱动设置,取的driver
import os
from selenium import webdriver
from main.config.readconfig import Readconfig
from main.commom.init.globalvar import globalvar
from main.commom.tools.log import log
class Browser():
logs = log.Log()
logger = logs.getlog()
"""
获取浏览器驱动
"""
path = os.path.split(os.path.realpath(__file__))[0]
setupPath = os.path.join(path, "../../config/configFile/SetUp.ini")
browser = Readconfig(setupPath).get_value("BROWSER", "browser")
option = webdriver.ChromeOptions()
option.add_argument('headless')
driver = webdriver.Chrome(globalvar().DriverPath(browser),chrome_options=option)
# try:
# if browser == "Ie":
# driver = webdriver.Ie(globalvar().DriverPath(browser))
# if browser == "Chrome":
# driver = webdriver.Chrome(globalvar().DriverPath(browser))
# except Exception as e:
# logger.error("启动浏览器驱动错误")
# raise
# else:
# driver.quit()
# def __init__(self):
# path = os.path.split(os.path.realpath(__file__))[0]
# setupPath = os.path.join(path, "../../config/SetUp.ini")
# self.browser = Readconfig(setupPath).get_value("BROWSER","browser")
def getDriver(self):
"""取得driver实例"""
return Browser.driver
def setDriver(self):
"""重新设置driver实例"""
if Browser.browser == "Ie":
driver = self.Ie()
if Browser.browser == "Chrome":
driver = self.Chrome()
Browser.driver = driver
def Ie(self):
"""启动Ie"""
driver = webdriver.Ie(globalvar().DriverPath("Ie"))
# driver.implicitly_wait(5)
return driver
def Chrome(self):
"""启动Chrome"""
driver = webdriver.Chrome(globalvar().DriverPath("Chrome"))
# driver.implicitly_wait(5)
return driver
if __name__ == "__main__":
aa = Browser()
| true |
09255b8eb0862e85833b5e28ea3bfc7a2fceffbc | Python | igizm0/SimplePyScripts | /rumble (vibration) a xbox 360 controller/web/rumble.py | UTF-8 | 1,048 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: http://stackoverflow.com/questions/19749404/
import ctypes
# Define necessary structures
class XINPUT_VIBRATION(ctypes.Structure):
_fields_ = [
("wLeftMotorSpeed", ctypes.c_ushort),
("wRightMotorSpeed", ctypes.c_ushort)
]
# Load Xinput.dll
xinput = ctypes.windll.xinput1_1
# Set up function argument types and return type
XInputSetState = xinput.XInputSetState
XInputSetState.argtypes = [ctypes.c_uint, ctypes.POINTER(XINPUT_VIBRATION)]
XInputSetState.restype = ctypes.c_uint
def set_vibration(left_motor, right_motor, controller=0):
if type(left_motor) == float:
left_motor_value = int(left_motor * 65535)
else:
left_motor_value = left_motor
if type(right_motor) == float:
right_motor_value = int(right_motor * 65535)
else:
right_motor_value = right_motor
vibration = XINPUT_VIBRATION(left_motor_value, right_motor_value)
XInputSetState(controller, ctypes.byref(vibration))
| true |
54a8f9e3cba8472adaec23f0f44e99694913a4ee | Python | arkocal/tetai | /torch_learn_by_trial.py | UTF-8 | 3,471 | 2.671875 | 3 | [] | no_license | #
# Step 1. pick a field
# Step 2. pick 2 random moves
# Step 3. rate moves
# Step 4. play for NR_MOVES, re-evaluate
# Step 5. train by swapping ORIGINAL EVALUATIONS if worse > better
import random
import time
from ai_players import TorchAIPlayer
import utils
from mechanics import Mechanics
nes_tetris = Mechanics()
NR_MOVES = 10
EPOCHS = 1_000_000
MIN_SCORE = -10*10 # should suffice
GAMMA = 0.01
ALPHA = 0.01
class GameOver(Exception):
pass
def max_height(field):
max_height = 0
for x in range(len(field)):
for y in range(len(field[0])):
if field[x][y]:
max_height = max(max_height, y)
return max_height
def play(ai_player, field, nr_moves):
for nr_pieces, piece in enumerate(ai_player.mechanics.piece_types):
if nr_pieces == nr_moves:
return field, ai_player.score_field(field)
if not ai_player.mechanics.can_place_piece(field, piece, ai_player.mechanics.start_placement):
raise GameOver
placement, _ = ai_player.choose_placement(field, piece)
field = ai_player.mechanics.place_piece(field, piece, placement)
return field, ai_player.score_field(field)
def fg_from_file(path):
with open(path) as field_file:
real_fields = [line.split()[0] for line in field_file]
def fg(): #EXISTING
return utils.deserialize_field(random.choice(real_fields))
return fg
field_generator_0 = fg_from_file("fields/fields")
ai_player = TorchAIPlayer(nes_tetris)
change_mind = 0
epoch_start = time.time()
for i in range(EPOCHS):
if i and i%100 == 0:
print(i, change_mind, time.time()-epoch_start)
epoch_start = time.time()
change_mind = 0
ai_player.dump("models/experimental/trial")
field = field_generator_0()
piece = random.choice(nes_tetris.piece_types)
placements = nes_tetris.get_valid_end_placements(field, piece)
if not placements:
continue
p1, _ = random.choice(placements)
p2, _ = random.choice(placements)
field_1 = nes_tetris.place_piece(field, piece, p1)
field_2 = nes_tetris.place_piece(field, piece, p2)
score_1 = ai_player.score_field(field_1)
score_2 = ai_player.score_field(field_2)
nr_moves = random.randint(5, 15)
try:
future_field_1, future_score_1 = play(ai_player, field_1, nr_moves)
height_1 = max_height(future_field_1)
except GameOver:
future_score_1 = MIN_SCORE
height_1 = 25
try:
future_field_2, future_score_2 = play(ai_player, field_2, nr_moves)
height_2 = max_height(future_field_2)
except GameOver:
future_score_2 = MIN_SCORE
height_2 = 25
if (score_1 > score_2 and height_1 > height_2):
change_mind += 1
score_1_new = (score_1 + GAMMA*score_2)/(1+GAMMA)
score_2_new = (score_2 + GAMMA*score_1)/(1+GAMMA)
elif (score_2 > score_1 and height_2 > height_1):
change_mind += 1
score_1_new = (score_1 + GAMMA*score_2)/(1+GAMMA)
score_2_new = (score_2 + GAMMA*score_1)/(1+GAMMA)
elif score_1 > score_2:
diff = score_1 - score_2
score_1_new = score_1 + ALPHA*diff
score_2_new = score_2 - ALPHA*diff
elif score_2 > score_1:
diff = score_2 - score_1
score_1_new = score_1 - ALPHA*diff
score_2_new = score_2 + ALPHA*diff
ai_player.train([(field_1, score_1_new), (field_2, score_2_new)])
ai_player.dump("models/experimental/trial")
| true |
7e7a40af9dd3370c78fe8744e5b2d38476cb8398 | Python | vinayaklal98/ITDBot | /app/gsearch.py | UTF-8 | 277 | 2.84375 | 3 | [] | no_license | from googlesearch import search
def searching(query):
results = {}
key = 1
for i in search(query, tld="co.in", num=10, stop=10, pause=2):
results[key] = i
key += 1
else:
return results
#query = input("Enter Search: ")
#searching(query) | true |
396d4adb7f3c7aca4d9103f9c68bf8c63c136567 | Python | phicau/olaFlow | /tutorials/wavemakerFlume/constant/pistonWaveGen.py | UTF-8 | 1,706 | 2.5625 | 3 | [] | no_license | #!/usr/bin/python
import numpy as np
def dispersion(T, h):
L0 = 9.81*T**2/(2.*np.pi)
L = L0
for i in range(0,100):
Lnew = L0 * np.tanh(2.*np.pi/L*h)
if(abs(Lnew-L)<0.001):
L = Lnew
break
L = Lnew
return L
## Piston wavemaker data ##
H = 0.1
T = 3.0
h = 0.4
phase0 = 0.
direction = 0.
nPaddles = 1
bLims = [0., 0.]
t0 = 0.
tEnd = 31.
dt = 0.05
########################
# Calculations
L = dispersion(T, h)
k = 2.*np.pi/L
w = 2.*np.pi/T
times = np.linspace(t0, tEnd, round((tEnd-t0)/dt)+1)
coords = np.linspace(bLims[0], bLims[1], nPaddles+1)
coords = coords[:-1] + np.diff(coords)/2.
HoS = 4. * np.sinh(k*h)**2. / (np.sinh(2.*k*h) + 2.*k*h)
S = H/HoS
# Export
fid = open('wavemakerMovement.txt', 'w')
fid.write('wavemakerType Piston;\n')
fid.write('tSmooth 1.5;\n')
fid.write('genAbs 0;\n\n')
fid.write('timeSeries {0}(\n'.format( len(times) ))
for t in times:
fid.write('{0}\n'.format(t))
fid.write(');\n\n'.format( len(times) ))
fid.write('paddlePosition {0}(\n'.format( nPaddles ))
for i in range(0, nPaddles):
fid.write('{0}(\n'.format( len(times) ))
for t in times:
x = S/2. * np.cos(-w*t + np.pi/2. + phase0 + 2.*np.pi*coords[i]/L*np.sin(direction*np.pi/180.) )
fid.write('{0}\n'.format(x))
fid.write(')\n')
fid.write(');\n\n')
fid.write('paddleEta {0}(\n'.format( nPaddles ))
for i in range(0, nPaddles):
fid.write('{0}(\n'.format( len(times) ))
for t in times:
x = H/2. * np.cos(-w*t + phase0 + 2.*np.pi*coords[i]/L*np.sin(direction*np.pi/180.) )
fid.write('{0}\n'.format(x))
fid.write(')\n')
fid.write(');\n\n')
fid.close()
| true |
76ef9f48be5b09e2a1c2253e4e74279cbbc46b1e | Python | elanstop/protein-classification-and-generation | /make_data.py | UTF-8 | 3,882 | 3.21875 | 3 | [] | no_license | from Bio import SeqIO
import numpy as np
import pickle
from random import shuffle, seed
# data downloaded in .fasta file format from UniProt
# funky amino letters are X,U,Z,B. We exclude sequences containing these letters.
# 100_to_200.fasta was created with the following search terms: length 100 to 200, complete sequences, evidence at
# protein level, reviewed. it was used as the source for the training files 100_to_200_natural.txt and
# 100_to_200_random.txt
# 100_to_200_transcript_level.fasta was created with the same search terms, only using evidence at transcript level
# rather than protein level. Most of the sequences are not found in the other file, but a small number of duplicates
# are dropped to create the testing set
class Preprocess:
def __init__(self, data_type, natural_output_file, random_output_file, raw_train_data='100_to_200.fasta',
raw_test_data="100_to_200_transcript_level.fasta", reference_list=None):
self.data_type = data_type
self.natural_output_file = natural_output_file
self.random_output_file = random_output_file
self.raw_train_data = raw_train_data
self.raw_test_data = raw_test_data
self.reference_list = reference_list
self.code_dict = self.make_amino_dict()
self.input_sequences = self.extract_sequences()
self.encoded_sequences = self.encode()
self.encoded_shuffled_sequences = self.shuffle_sequences()
@staticmethod
def make_amino_dict():
amino_list = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W',
'Y']
one_hots = np.eye(20, 20)
code_list = [one_hots[i] for i in range(20)]
code_dict = dict(zip(amino_list, code_list))
print(code_dict)
return code_dict
def extract_sequences(self):
sequence_list = []
bad_letters = set('XUZB')
if self.data_type == 'training':
data = self.raw_train_data
if self.data_type == 'testing':
data = self.raw_test_data
for record in SeqIO.parse(data, "fasta"):
# exclude sequences containing the funky amino acids
if any([(c in bad_letters) for c in str(record.seq)]):
continue
sequence_list.append(list(str(record.seq)))
# drop sequences that are part of the training set when building testing set
if self.data_type == 'testing':
sequence_list = [s for s in sequence_list if s not in self.reference_list]
return sequence_list
def encode(self):
encoded_sequence_list = []
for sequence in self.input_sequences:
this_sequence = []
for letter in sequence:
new_letter = self.code_dict[letter]
this_sequence.append(new_letter)
encoded_sequence_list.append(this_sequence)
return encoded_sequence_list
def shuffle_sequences(self):
shuffled_sequence_list = []
for sequence in self.encoded_sequences:
seed()
new_sequence = sequence.copy()
shuffle(new_sequence)
shuffled_sequence_list.append(new_sequence)
return shuffled_sequence_list
def save(self):
file = open(self.natural_output_file, 'wb')
pickle.dump(self.encoded_sequences, file)
file.close()
file2 = open(self.random_output_file, 'wb')
pickle.dump(self.encoded_shuffled_sequences, file2)
file2.close()
training_data = Preprocess('training', 'new_training_natural_proteins.txt', 'new_training_random_proteins.txt')
training_data.save()
testing_data = Preprocess('testing', 'new_testing_natural_proteins.txt', 'new_testing_random_proteins.txt',
reference_list=training_data.input_sequences)
testing_data.save()
| true |
24695cb13b7e5bd0a6679fc88d767a6afc6c44ec | Python | max-kalganov/NN_subject | /Lab_3/classifier.py | UTF-8 | 2,534 | 2.8125 | 3 | [] | no_license | from os.path import join
import pandas as pd
from tensorflow.keras import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Dense
from sklearn.metrics import confusion_matrix
from matplotlib import pyplot as plt
import numpy as np
from utils import get_dataset
# import TensorBoard as tb
from tensorboard.program import TensorBoard
class BinClassifier:
def __init__(self):
self.classif = Sequential()
self.classif.add(Dense(28*28,
activation='relu',
kernel_initializer='random_normal',
input_dim=28 * 28,
name='features1'))
self.classif.add(Dense(10,
activation='sigmoid',
kernel_initializer='random_normal',
input_dim=28*28,
name='features'))
self.classif.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
def train(self, x_train, y_train):
batch_size = 1000
self.classif.fit(x_train,
y_train,
batch_size=batch_size,
epochs=100,
shuffle=True)
return self.classif.evaluate(x_train, y_train)
def test(self, x_test, full_return: bool = False):
y_pred = self.classif.predict(x_test)
return y_pred if full_return else np.maximum(y_pred-0.5, 0)
def save(self):
self.classif.save('data/classifier.h5')
print("classifier is saved")
def load(self, classifier_name: str = 'classifier'):
self.classif = load_model(f'data/{classifier_name}.h5')
print("classifier is loaded")
def test(binclassif: BinClassifier, x_test, y_test):
y_pred = binclassif.test(x_test)
cm = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred, axis=1))
t_pos = cm[0, 0]
t_neg = cm[1, 1]
f_pos = cm[0, 1]
f_neg = cm[1, 0]
test_acc = (t_pos + t_neg)/(t_pos + t_neg + f_pos + f_neg)
print(f"test accuracy = {test_acc}")
def train_and_test():
(x_train, y_train), (x_test, y_test) = get_dataset()
binclassif = BinClassifier()
loss, acc = binclassif.train(x_train, y_train)
print(f"\ntraining results for dataset:\nloss = {loss}\naccuracy = {acc}\n")
binclassif.save()
test(binclassif, x_test, y_test)
if __name__ == '__main__':
train_and_test()
| true |
3f38420c535f31f133aadb1f09adc1ef3ba8ce37 | Python | ibssasimon/CSC365G22Lab1-2 | /ericFuncs.py | UTF-8 | 2,463 | 3.703125 | 4 | [] | no_license | def searchStudent(students, teachers, lastName):
for student in students:
if lastName == student.lastName:
for teacher in teachers:
if teacher.classroom == student.classroom:
print("\nStudent: " + student.lastName + ", " + student.firstName +
" GPA: " + student.GPA +
" Classroom: " + student.classroom +
" Teacher: " + teacher.lastName + ", " + teacher.firstName +
"\n")
def searchStudentBus(students, lastName, bus):
if bus == "B" or bus == "Bus":
for student in students:
if lastName == student.lastName:
print("\nStudent: " + student.lastName + ", " + student.firstName +
" Bus Route: " + student.bus +
"\n")
def searchTeacher(students, teachers, lastName):
for teacher in teachers:
if lastName == teacher.lastName:
classroom = teacher.classroom
for student in students:
if student.classroom == classroom:
print("\nStudent: " + student.lastName + ", " + student.firstName + "\n")
def searchTeachersOfGrade(students, teachers, grade):
teachersList = []
classroom = 0
for student in students:
if student.grade == grade:
classroom = student.classroom
for teacher in teachers:
if teacher.classroom == classroom:
if teacher not in teachersList:
teachersList.append(teacher)
for t in teachersList:
print(" Teacher: " + t.lastName + ", " + t.firstName + " teaches grade " + str(grade) + "\n")
def searchTeacherFactor(students, teachers, lastName):
numStudents = 0
classroom = 0
totalGPA = 0
averageGPA = 0
for teacher in teachers:
if teacher.lastName == lastName:
classroom = teacher.classroom
for student in students:
if student.classroom == classroom:
numStudents += 1
totalGPA += float(student.GPA)
averageGPA = round((totalGPA / numStudents), 2)
print(teacher.lastName + ", " + teacher.firstName + " has " + str(numStudents) +
" students in classroom " + classroom + " with an average GPA of " + str(averageGPA) + "\n") | true |
10faa955ed7cedf291fa0562eb0485f98bcaa73f | Python | cinhori/LeetCode | /python_src/valid_parentheses.py | UTF-8 | 1,350 | 4 | 4 | [] | no_license | # 给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。
# 有效字符串需满足:
# 左括号必须用相同类型的右括号闭合。
# 左括号必须以正确的顺序闭合。
# 注意空字符串可被认为是有效字符串。
#
# 示例 1:
# 输入: "()"
# 输出: true
# 示例 2:
# 输入: "()[]{}"
# 输出: true
# 示例 3:
# 输入: "(]"
# 输出: false
# 示例 4:
# 输入: "([)]"
# 输出: false
# 示例 5:
# 输入: "{[]}"
# 输出: true
class Solution:
# 36ms, 84.19%; 13.7MB, 5.22%
def isValid(self, str):
hashmap = {'{':1, '}':6, '(':2, ')':5, '[':3, ']':4}
result = []
for s in str:
if hashmap[s] < 4:
result.append(s)
else:
if result == []: return False # 排除[']']
tmp = result.pop()
if hashmap[tmp] + hashmap[s] != 7:
return False
return len(result) == 0
# 44ms, 51.93%; 13.6MB, 5.22%
def isValid2(self, s):
dic = {'{': '}', '[': ']', '(': ')', '?': '?'}
stack = ['?']
for c in s:
if c in dic: stack.append(c)
elif dic[stack.pop()] != c: return False
return len(stack) == 1
if __name__ == "__main__":
print(Solution().isValid("[]]")) | true |
4ac8872b63eda9c684840d3611a7b8e69d8fad67 | Python | HawpT/BrainFloss | /playgame/models.py | UTF-8 | 3,146 | 2.640625 | 3 | [] | no_license | # from __future__ import unicode_literals
from django.db import models
from django.conf import settings
# Create your models here. models are tables
class Level_One(models.Model):
op1 = models.IntegerField(blank=False, null=False, default=0)
op2 = models.IntegerField(blank=True, null=True, default=0)
student_answer = models.IntegerField(blank=False, null=False, default=0)
problem_type = models.IntegerField(blank=False, null=False, default=1)
problem_level = models.IntegerField(blank=False, null=False, default=1)
student_id = models.IntegerField(blank=False, null=False, default=0)
def __str__(self):
stu_ref = Student.objects.get(student_id=self.student_id) # student who answered this problem
if self.problem_type == 1:
return "L" + str(self.problem_level) + " Add Problem: " + str(self.op1) \
+ " + " + str(self.op2) + " = " + str(self.student_answer) + \
" @user: " + str(stu_ref.first_name) + " " + str(stu_ref.last_name)
elif self.problem_type == 2:
return "L" + str(self.problem_level) + " Sub Problem: " + str(self.op1) \
+ " - " + str(self.op2) + " = " + str(self.student_answer) + \
" @user: " + str(stu_ref.first_name) + " " + str(stu_ref.last_name)
elif self.problem_type == 3:
return "L" + str(self.problem_level) + " Num Problem: " + str(self.op1) \
+ " is in the " + str(self.student_answer) + "'s place." + \
" @user: " + str(stu_ref.first_name) + " " + str(stu_ref.last_name)
def score(self):
if int(self.problem_type) == 1:
if (int(self.op1) + int(self.op2)) == int(self.student_answer):
return 1
else:
return 0
elif int(self.problem_type) == 2:
if (int(self.op1) - int(self.op2)) == int(self.student_answer):
return 1
else:
return 0
elif int(self.problem_type) == 3:
return "Level " + str(self.problem_level) + " Digits Problem: " + str(self.op1) \
+ " is in the " + str(self.student_answer) + "'s place."
class Student(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
student_id = models.IntegerField(blank=False, null=True)
def __str__(self):
return " Name: " + str(self.first_name) + " " + str(self.last_name) + " ID: " + str(self.student_id)
def get_student_fname(self):
return self.first_name
def get_student_lname(self):
return self.last_name
def get_student_id(self):
return self.student_id
class Teacher(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
teach_id = models.IntegerField(blank=False, null=True)
def teach_fname(self):
return self.first_name
def teach_lname(self):
return self.last_name
def t_id(self):
return self.teach_id
| true |
d0bbf56f725df4594d04ceea6a4f4ff373480305 | Python | santhosh-kumar/DataScienceToolbox | /tests/unit/common/utils/test_string_utils.py | UTF-8 | 1,849 | 3.125 | 3 | [] | no_license | """
Unit Test for string_utils
"""
from unittest import TestCase
from utils.string_utils import StringUtils
from exceptions.exceptions import AssertionException
class TestStringUtils(TestCase):
"""
Unit test for string utils
"""
def test_str_to_boolean(self):
"""Test str_to_boolean
Args:
self: TestStringUtils
Returns:
None
Raises:
None
"""
self.assertTrue(StringUtils.str_to_boolean('t'))
self.assertTrue(StringUtils.str_to_boolean('T'))
self.assertTrue(StringUtils.str_to_boolean('yes'))
self.assertTrue(StringUtils.str_to_boolean('YES'))
self.assertTrue(StringUtils.str_to_boolean('1'))
self.assertTrue(StringUtils.str_to_boolean('true'))
self.assertTrue(StringUtils.str_to_boolean('TRUE'))
self.assertFalse(StringUtils.str_to_boolean('0'))
self.assertFalse(StringUtils.str_to_boolean('No'))
self.assertFalse(StringUtils.str_to_boolean('some value'))
with self.assertRaises(AssertionException) as context:
self.assertFalse(StringUtils.str_to_boolean(1))
self.assertTrue('Invalid String Value' in str(context.exception))
def test_to_str(self):
"""Test to_str
Args:
self: TestStringUtils
Returns:
None
Raises:
None
"""
self.assertEqual('test', StringUtils.to_str('test'))
self.assertEqual('test', StringUtils.to_str(b'test'))
def test_to_bytes(self):
"""Test to_str
Args:
self: TestStringUtils
Returns:
None
Raises:
None
"""
self.assertEqual(b'test', StringUtils.to_bytes(b'test'))
self.assertEqual(b'test', StringUtils.to_bytes('test'))
| true |