blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3f83279312fa5531920a7bf716aa8bbf1be5f909 | Python | architnagpal001/Tkinter-Full-Course- | /Tkinter/canvas-widget.py | UTF-8 | 531 | 2.875 | 3 | [
"MIT"
] | permissive | from tkinter import *
root = Tk()
canvas_width = 800
canvas_height = 400
root.geometry(f"{canvas_width}x{canvas_height}")
can_widget = Canvas(root, width=canvas_width, height=canvas_height)
can_widget.pack()
can_widget.create_line(0,0,800,200, fill="red")
can_widget.create_line(0,400,800,0, fill="red")
can_widget.create_rectangle(5, 5, 900, 500, fill="blue")
can_widget.create_oval(5, 5, 800, 400, fill="red")
can_widget.create_text(400, 200, text="Archit Nagpal", font="comicsansms 45 bold")
root.mainloop() | true |
f548c8a4efb15084cf083e436ff04c8878197bbb | Python | cosmicBboy/pandera | /pandera/model_components.py | UTF-8 | 11,182 | 2.515625 | 3 | [
"MIT"
] | permissive | """SchemaModel components"""
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from .checks import Check
from .errors import SchemaInitError
from .schema_components import (
Column,
Index,
PandasDtypeInputTypes,
SeriesSchemaBase,
)
AnyCallable = Callable[..., Any]
SchemaComponent = TypeVar("SchemaComponent", bound=SeriesSchemaBase)
CHECK_KEY = "__check_config__"
DATAFRAME_CHECK_KEY = "__dataframe_check_config__"
_CheckList = Union[Check, List[Check]]
def _to_checklist(checks: Optional[_CheckList]) -> List[Check]:
checks = checks or []
if isinstance(checks, Check): # pragma: no cover
return [checks]
return checks
class FieldInfo:
"""Captures extra information about a field.
*new in 0.5.0*
"""
__slots__ = (
"checks",
"nullable",
"allow_duplicates",
"coerce",
"regex",
"check_name",
"alias",
"original_name",
"dtype_kwargs",
)
def __init__(
self,
checks: Optional[_CheckList] = None,
nullable: bool = False,
allow_duplicates: bool = True,
coerce: bool = False,
regex: bool = False,
alias: str = None,
check_name: bool = None,
dtype_kwargs: Dict[str, Any] = None,
) -> None:
self.checks = _to_checklist(checks)
self.nullable = nullable
self.allow_duplicates = allow_duplicates
self.coerce = coerce
self.regex = regex
self.alias = alias
self.check_name = check_name
self.original_name = cast(str, None) # always set by SchemaModel
self.dtype_kwargs = dtype_kwargs
@property
def name(self) -> str:
"""Return the name of the field used in the DataFrame"""
if self.alias is not None:
return self.alias
return self.original_name
def __set_name__(self, owner: Type, name: str) -> None:
self.original_name = name
def __get__(self, instance: Any, owner: Type) -> str:
return self.name
def __set__(self, instance: Any, value: Any) -> None: # pragma: no cover
raise AttributeError(f"Can't set the {self.original_name} field.")
def _to_schema_component(
self,
pandas_dtype: PandasDtypeInputTypes,
component: Type[SchemaComponent],
checks: _CheckList = None,
**kwargs: Any,
) -> SchemaComponent:
if self.dtype_kwargs:
pandas_dtype = pandas_dtype(**self.dtype_kwargs) # type: ignore
checks = self.checks + _to_checklist(checks)
return component(pandas_dtype, checks=checks, **kwargs) # type: ignore
def to_column(
self,
pandas_dtype: PandasDtypeInputTypes,
checks: _CheckList = None,
required: bool = True,
name: str = None,
) -> Column:
"""Create a schema_components.Column from a field."""
return self._to_schema_component(
pandas_dtype,
Column,
nullable=self.nullable,
allow_duplicates=self.allow_duplicates,
coerce=self.coerce,
regex=self.regex,
required=required,
name=name,
checks=checks,
)
def to_index(
self,
pandas_dtype: PandasDtypeInputTypes,
checks: _CheckList = None,
name: str = None,
) -> Index:
"""Create a schema_components.Index from a field."""
return self._to_schema_component(
pandas_dtype,
Index,
nullable=self.nullable,
allow_duplicates=self.allow_duplicates,
coerce=self.coerce,
name=name,
checks=checks,
)
def Field(
*,
eq: Any = None,
ne: Any = None,
gt: Any = None,
ge: Any = None,
lt: Any = None,
le: Any = None,
in_range: Dict[str, Any] = None,
isin: Iterable = None,
notin: Iterable = None,
str_contains: str = None,
str_endswith: str = None,
str_length: Dict[str, Any] = None,
str_matches: str = None,
str_startswith: str = None,
nullable: bool = False,
allow_duplicates: bool = True,
coerce: bool = False,
regex: bool = False,
ignore_na: bool = True,
raise_warning: bool = False,
n_failure_cases: int = 10,
alias: str = None,
check_name: bool = None,
dtype_kwargs: Dict[str, Any] = None,
**kwargs,
) -> Any:
"""Used to provide extra information about a field of a SchemaModel.
*new in 0.5.0*
Some arguments apply only to numeric dtypes and some apply only to ``str``.
See the :ref:`User Guide <schema_models>` for more information.
The keyword-only arguments from ``eq`` to ``str_startswith`` are dispatched
to the built-in `~pandera.checks.Check` methods.
:param nullable: whether or not the column/index is nullable.
:param allow_duplicates: whether or not to accept duplicate values.
:param coerce: coerces the data type if ``True``.
:param regex: whether or not the field name or alias is a regex pattern.
:param ignore_na: whether or not to ignore null values in the checks.
:param raise_warning: raise a warning instead of an Exception.
:param n_failure_cases: report the first n unique failure cases. If None,
report all failure cases.
:param alias: The public name of the column/index.
:param check_name: Whether to check the name of the column/index during
validation. `None` is the default behavior, which translates to `True`
for columns and multi-index, and to `False` for a single index.
:param dtype_kwargs: The parameters to be forwarded to the type of the field.
:param kwargs: Specify custom checks that have been registered with the
:class:`~pandera.extensions.register_check_method` decorator.
"""
# pylint:disable=C0103,W0613,R0914
check_kwargs = {
"ignore_na": ignore_na,
"raise_warning": raise_warning,
"n_failure_cases": n_failure_cases,
}
args = locals()
checks = []
check_dispatch = _check_dispatch()
for key in kwargs:
if key not in check_dispatch:
raise SchemaInitError(
f"custom check '{key}' is not available. Make sure you use "
"pandera.extensions.register_check_method decorator to "
"register your custom check method."
)
for arg_name, check_constructor in check_dispatch.items():
arg_value = args.get(arg_name, kwargs.get(arg_name))
if arg_value is None:
continue
if isinstance(arg_value, dict):
check_ = check_constructor(**arg_value, **check_kwargs)
else:
check_ = check_constructor(arg_value, **check_kwargs)
checks.append(check_)
return FieldInfo(
checks=checks or None,
nullable=nullable,
allow_duplicates=allow_duplicates,
coerce=coerce,
regex=regex,
check_name=check_name,
alias=alias,
dtype_kwargs=dtype_kwargs,
)
def _check_dispatch():
return {
"eq": Check.equal_to,
"ne": Check.not_equal_to,
"gt": Check.greater_than,
"ge": Check.greater_than_or_equal_to,
"lt": Check.less_than,
"le": Check.less_than_or_equal_to,
"in_range": Check.in_range,
"isin": Check.isin,
"notin": Check.notin,
"str_contains": Check.str_contains,
"str_endswith": Check.str_endswith,
"str_matches": Check.str_matches,
"str_length": Check.str_length,
"str_startswith": Check.str_startswith,
**Check.REGISTERED_CUSTOM_CHECKS,
}
class CheckInfo: # pylint:disable=too-few-public-methods
"""Captures extra information about a Check."""
def __init__(
self,
check_fn: AnyCallable,
**check_kwargs: Any,
) -> None:
self.check_fn = check_fn
self.check_kwargs = check_kwargs
def to_check(self, model_cls: Type) -> Check:
"""Create a Check from metadata."""
name = self.check_kwargs.pop("name", None)
if not name:
name = getattr(
self.check_fn, "__name__", self.check_fn.__class__.__name__
)
def _adapter(arg: Any) -> Union[bool, Iterable[bool]]:
return self.check_fn(model_cls, arg)
return Check(_adapter, name=name, **self.check_kwargs)
class FieldCheckInfo(CheckInfo): # pylint:disable=too-few-public-methods
"""Captures extra information about a Check assigned to a field."""
def __init__(
self,
fields: Set[Union[str, FieldInfo]],
check_fn: AnyCallable,
regex: bool = False,
**check_kwargs: Any,
) -> None:
super().__init__(check_fn, **check_kwargs)
self.fields = fields
self.regex = regex
def _to_function_and_classmethod(
fn: Union[AnyCallable, classmethod]
) -> Tuple[AnyCallable, classmethod]:
if isinstance(fn, classmethod):
fn, method = fn.__func__, cast(classmethod, fn)
else:
method = classmethod(fn)
return fn, method
ClassCheck = Callable[[Union[classmethod, AnyCallable]], classmethod]
def check(*fields, regex: bool = False, **check_kwargs) -> ClassCheck:
"""Decorator to make SchemaModel method a column/index check function.
*new in 0.5.0*
This indicates that the decorated method should be used to validate a field
(column or index). The method will be converted to a classmethod. Therefore
its signature must start with `cls` followed by regular check arguments.
See the :ref:`User Guide <schema_model_custom_check>` for more.
:param _fn: Method to decorate.
:param check_kwargs: Keywords arguments forwarded to Check.
"""
def _wrapper(fn: Union[classmethod, AnyCallable]) -> classmethod:
check_fn, check_method = _to_function_and_classmethod(fn)
setattr(
check_method,
CHECK_KEY,
FieldCheckInfo(set(fields), check_fn, regex, **check_kwargs),
)
return check_method
return _wrapper
def dataframe_check(_fn=None, **check_kwargs) -> ClassCheck:
"""Decorator to make SchemaModel method a dataframe-wide check function.
*new in 0.5.0*
Decorate a method on the SchemaModel indicating that it should be used to
validate the DataFrame. The method will be converted to a classmethod.
Therefore its signature must start with `cls` followed by regular check
arguments. See the :ref:`User Guide <schema_model_dataframe_check>` for
more.
:param check_kwargs: Keywords arguments forwarded to Check.
"""
def _wrapper(fn: Union[classmethod, AnyCallable]) -> classmethod:
check_fn, check_method = _to_function_and_classmethod(fn)
setattr(
check_method,
DATAFRAME_CHECK_KEY,
CheckInfo(check_fn, **check_kwargs),
)
return check_method
if _fn:
return _wrapper(_fn) # type: ignore
return _wrapper
| true |
fe477afd361f60292c0ae958b906b3e40e8855de | Python | ankitaku2019/Term-Project-Open-CV | /signUp.py | UTF-8 | 2,568 | 3.359375 | 3 | [] | no_license | #user can sign up using this
import string
def mousePressed(event, data):
if data.width//4<event.x<0.75*data.width and 0.25*data.height<event.y<0.375*data.height:
data.clickedPass=False
data.clickedUser=True
elif data.width//4<event.x<0.75*data.width and 0.5*data.height<event.y<0.625*data.height:
data.clickedUser=False
data.clickedPass=True
elif data.width//4<event.x<0.75*data.width and 0.80*data.height<event.y<0.90*data.height:
#the creation of a new user is not working either
if data.database.createNewUser(data.user,data.password)!=False:
#resets for next user
data.password, data.user="", ""
data.mode="LoadScreen"
def keyPressed(event, data):
if event.char in string.ascii_letters or event.char in string.digits \
or event.char in string.punctuation==True:
if data.clickedUser==True:
data.user+=event.char
if data.clickedPass==True:
data.password+=event.char
elif event.keysym=="BackSpace":
if data.clickedUser==True:
data.user=data.user[:-1]
elif data.clickedPass==True:
data.password=data.password[:-1]
print(data.password)
def redrawAll(canvas, data):
#creates the background
canvas.create_rectangle(0, 0, data.width, data.height,fill="blue")
#username and its coressponding rectangle
canvas.create_text(10, data.height//4, anchor="nw", text="Create a Username:", font="Arial 16 bold", fill="orange")
canvas.create_rectangle(0.37*data.width, data.height//4, 0.75*data.width, 0.375*data.height, outline="orange", width=7)
#types username for the letters you type in
canvas.create_text(data.width//2, 0.30*data.height, font="Arial 16 bold", text=data.user)
#password and its corresponding rectangle
canvas.create_text(10, 0.5*data.height, anchor="nw", text="Create a Password:", font="Arial 16 bold", fill="orange")
canvas.create_rectangle(0.37*data.width, 0.5*data.height, 0.75*data.width, 0.625*data.height, outline="orange", width=7)
#types the password that someone types in
canvas.create_text(data.width//2, 0.55*data.height, text=data.password, font="Arial 16 bold")
#GO and its corresponding rectangle
canvas.create_rectangle(data.width//4, 0.80*data.height, 0.75*data.width, 0.90*data.height, fill="AntiqueWhite1")
canvas.create_text((data.width//2)-20, 0.83*data.height, anchor="nw", text="GO!", font="Arial 16 bold", fill="orange") | true |
3a9f00fc6e578308ac5819bbd6ee8add4175f1ff | Python | wills0909/OATR | /figure3.py | UTF-8 | 1,448 | 3.15625 | 3 | [] | no_license | # This file is to figure out the relationship between total energy consumption with Ratio of acoustic and optical
# You can change Node number, Data amount:L and the ratio.
from get_optimum_omega import *
import math
# define a function of total consumption
def totalConsumption(DataAmout, OpticaRatio):
dis1 = OpticaRatio * DataAmout * R_o / delta_o
dis2 = (1 - OpticaRatio) * DataAmout * R_c / delta_c
exponential = math.exp(Klambda * dis1 / math.cos(theta))
dividend = 2 * math.pow(OpticaRatio * DataAmout, 3) * math.pow(R_o, 2) * pi * (1 - math.cos(theta_0)) * P_l
divisor = math.pow(delta_o, 3) * eta_t * eta_r * A_r * math.cos(theta)
fraction1 = dividend / divisor
energy_acoustic = (1 - OpticaRatio) * DataAmout * P_i * math.pow(dis2, k) * math.pow(alpha_f, dis2)
return exponential * fraction1 + OpticaRatio * DataAmout * E_elec + energy_acoustic
if __name__ == "__main__":
V = [1000, 2000, 3000, 4000, 5000] # V can be 1000 or 2000 or 3000 or 4000 or 5000
print("Here is the energy consumption vs. data amount of fig.3, V equals 1000 or 2000 or 3000 or 4000 or 5000.")
print("=" * 20)
for v in V:
print("Data amount=", v, 'bit')
print(totalConsumption(DataAmout=v, OpticaRatio=0.85))
print(totalConsumption(DataAmout=v, OpticaRatio=0.9))
print(totalConsumption(DataAmout=v, OpticaRatio=0.95))
print(totalConsumption(DataAmout=v, OpticaRatio=1))
| true |
03baf7ddcd3a7d6f203fe7ff153b39e92c5c5708 | Python | shehryarbajwa/Mad-Libs | /udacity.py | UTF-8 | 2,493 | 4.0625 | 4 | [] | no_license | easy_string = ('_1 is the capital of Canada, _2 is the capital of UK, _3 is the capital of Pakistan, _4 is the capital of China')
easy_answer = ('ottawa', "london", "islamabad", "beijing")
medium_string = ('__1__ is the birthplace of Napolean, __2__ is the birthplace of Kennedy, __3__ is the birthplace of Ada Lovelace, __4__ is the birthplace of Gandhi')
medium_answer = ('france', 'usa', 'uk', 'india')
hard_string = ('__1__ is the birth year of Napolean, __2__ is the birth year of Kennedy, __3__ is the birth year of Ada Lovelace, __4__ is the birth year of Gandhi')
hard_answer = ('1769', '1919', '1815', '1869')
blanks = ["first blank" , "second blank" , "third blank" , "fourth blank"]
named_blanks = ("1", "2" , "3", "4")
def play_game(difficulty, answers):
#This function takes in the difficulty level and answers associated with the blanks the user guesses. Once the user guesses the right thing, it updates the blank with the correct guess.
element = 0
replaced = answers
for answer in difficulty:
user_asked = input("Can you guess the %s." % blanks[element]).lower()
if answer == user_asked.lower():
replaced = replaced.replace(named_blanks[element], user_asked) #change this to replaced instead of replaced_string, use replaced as the string, named_blanks[element] as the old substring instead (see https://www.tutorialspoint.com/python/string_replace.htm)
print(replaced)
elif answer != user_asked.lower():
print ('Incorrect. try again')
while True:
user_asked = input("Can you guess the %s." % blanks[element])
if answer == user_asked.lower():
print ('Correct')
replaced = replaced.replace(named_blanks[element], user_asked)
print(replaced)
break
element = element + 1
def difficulty():
#This function asks the user to decide whether they want to play easy, medium or hard
user_input = input("Choose your level of difficulty. easy, medium, hard")
if user_input.lower() == "easy":
print(easy_string)
return easy_answer, easy_string
elif user_input.lower() == "medium":
print(medium_string)
return medium_answer,medium_string
elif user_input.lower() == "hard":
print(hard_string)
return hard_answer,hard_string
else:
print ("You failed to put in the right level of difficulty")
some_string, difficulty_level = difficulty()
play_game(some_string, difficulty_level)
| true |
c18b80164915bcdaf800e8f9a93a3d8679d4b121 | Python | pcleon/python_challenge | /l6-2.py | UTF-8 | 420 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
#coding=utf-8
#学习python 压缩zip
import zipfile
dir = "files" + "/"
file = dir + "channel.zip"
zip_file = zipfile.ZipFile(file)
comment =''
file_name = '90052.txt'
zc =[]
while 1:
f = open('files/%s' % file_name)
next_num = f.read().split()[-1]
f.close
zc.append(zip_file.getinfo(file_name).comment)
if next_num.isdigit():
file_name = next_num + '.txt'
else:
break
print ''.join(zc)
| true |
c72670ebc1b0d081bc44244c4695d54a3aef14c4 | Python | RQuispeC/Machine-Learning | /t3/t3_1.py | UTF-8 | 6,300 | 3.0625 | 3 | [
"MIT"
] | permissive | import numpy as np
import csv
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn import model_selection
from sklearn import metrics
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
from sklearn import svm
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
def preprocess_data():
#read cvs file
file_obj = open("abalone.csv", "rt")
reader = csv.reader(file_obj)
data = []
for row in reader:
data.append(row)
data = np.array(data)
#extratct first column and convert data to floats
first_column = [row[0] for row in data]
data = np.array([row[1::] for row in data]).astype(np.float)
#convert first column using one-hot-encoding and restore it to data
first_column = np.array(pd.get_dummies(first_column))
data = np.hstack((first_column, data))
#separe and transform last column
labels = data[:, -1]
labels[labels <= 13] = 0
labels[labels > 13] = 1
data = data[:, 0:-1]
#standardize data
scaler = preprocessing.StandardScaler().fit(data)
data = scaler.transform(data)
return data, labels
def logistic_regressor_evaluation(x, y):
C_space = [10**i for i in range(-1, 4) ]
#create stratified k-folds
skFolds = model_selection.StratifiedKFold(n_splits = 5, random_state = 1)
skFolds.get_n_splits(x, y)
accuracy_mean = 0
for train_index, test_index in skFolds.split(x, y):
#set train and test data
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
#create stratified k-folds for nested loop
nested_skFolds = model_selection.StratifiedKFold(n_splits = 3, random_state = 1)
nested_skFolds.get_n_splits(x_train, y_train)
best_c = 0
best_accuracy = 0
for c in C_space:
c_accuracy = 0.0
for nested_train_index, nested_test_index in nested_skFolds.split(x_train, y_train):
#set nested train and test data
nested_x_train, nested_x_test = x_train[nested_train_index], x_train[nested_test_index]
nested_y_train, nested_y_test = y_train[nested_train_index], y_train[nested_test_index]
#create and train the nested logistic Regressor
nested_clf = LogisticRegression(C=c, random_state=1)
nested_clf.fit(nested_x_train, nested_y_train)
#test the trained model
nested_pred = nested_clf.predict(nested_x_test)
nested_accuracy = metrics.accuracy_score(nested_y_test, nested_pred)
c_accuracy += nested_accuracy
c_accuracy /= 3.0
print("\tnested accuracy", c_accuracy, c)
#update best accuracy
if best_accuracy <= c_accuracy:
best_accuracy = c_accuracy
best_c = c
#train logistic Regressor with the best C
clf = LogisticRegression(C=best_c, random_state=1)
clf.fit(x_train, y_train)
#test the trained model
pred = clf.predict(x_test)
accuracy = metrics.accuracy_score(y_test, pred)
print("LR KF Accuracy", accuracy, best_c)
#acumulate accuracy
accuracy_mean += accuracy
print("Logistic regressor mean accuracy", accuracy_mean/5.0)
def SVM_evaluation(x, y):
parameters = {'C' : [10**i for i in range(-1, 4) ]}
#create stratified k-folds
skFolds = model_selection.StratifiedKFold(n_splits = 5, random_state = 1)
skFolds.get_n_splits(x, y)
accuracy_mean = 0
for train_index, test_index in skFolds.split(x, y):
#set train and test data
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
#create nested SVM
nested_svm = svm.LinearSVC(random_state = 1)
#create nested loop and get best parametes
nested_clf = GridSearchCV(nested_svm, parameters, cv = 3)
nested_clf.fit(x_train, y_train)
print("\tNested Accuracy", nested_clf.best_score_, nested_clf.best_params_)
#train SVM with the best parameters
clf = svm.LinearSVC(C = nested_clf.best_params_['C'], random_state = 1)
clf.fit(x_train, y_train)
#test the trained model
pred = clf.predict(x_test)
accuracy = metrics.accuracy_score(y_test, pred)
print("SVM KF Accuracy", accuracy, nested_clf.best_params_['C'])
#acumulate accuracy
accuracy_mean += accuracy
print("SVM mean accuracy", accuracy_mean/5.0)
def LDA_evaluation(x, y):
skFolds = model_selection.StratifiedKFold(n_splits = 5, random_state = 1)
skFolds.get_n_splits(x, y)
accuracy_mean = 0
for train_index, test_index in skFolds.split(x, y):
#set train and test data
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
#create and train LDA
clf = LinearDiscriminantAnalysis()
clf.fit(x_train, y_train)
#test the trained model
pred = clf.predict(x_test)
accuracy = metrics.accuracy_score(y_test, pred)
print("LDA KF Accuracy", accuracy)
#acumulate accuracy
accuracy_mean += accuracy
print("LDA mean accuracy", accuracy_mean/5.0)
def train_best_classifier(x, y):
C_space = [10**i for i in range(-1, 4) ]
#create stratified k-folds
skFolds = model_selection.StratifiedKFold(n_splits = 3, random_state = 1)
skFolds.get_n_splits(x, y)
best_c = 0
best_accuracy = 0
for c in C_space:
accuracy_mean = 0
#test each parameter C with the 3-folds
for train_index, test_index in skFolds.split(x, y):
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
svc = svm.LinearSVC(random_state = 1, C=c).fit(x_train, y_train)
pred = svc.predict(x_test)
accuracy = metrics.accuracy_score(y_test, pred)
accuracy_mean += accuracy
accuracy_mean /= 3.0
print("\t nested", accuracy_mean, c)
#update best accuracy and best C
if best_accuracy <= accuracy_mean:
best_accuracy = accuracy_mean
best_c = c
print("best", best_accuracy, best_c)
#create final classifier
return svm.LinearSVC(C=best_c, random_state = 1).fit(x, y)
if __name__ == "__main__":
x, y = preprocess_data()
logistic_regressor_evaluation(x, y)
SVM_evaluation(x, y)
LDA_evaluation(x,y)
train_best_classifier(x , y)
| true |
d9be63cd74bef9864b30d1f39905c6dddeead375 | Python | Escartin85/Krypthon | /gui/Terminal.py | UTF-8 | 2,656 | 3.28125 | 3 | [] | no_license | # this class will hold functions for interaction with a terminal or command prompt
# cleaning it working in all operative systems. And adding the customization of
# the out with many diferents colors
# @Author: Javier Escartin Diaz
# @ID Student: 15017740
import os
import sys
# define class Terminal or Command Prompt
class Terminal():
dDefault_columns = "80"
dDefault_rows = "24"
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
YELLOW = "\033[1;33m"
GREEN = '\033[1;32m'
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[1;m"
GRAY = "\033[1;30m"
default_columns = "80"
default_rows = "24"
# constructor Terminal or Command Prompt
def __init__(self):
Terminal.loadDefaultSizes()
Terminal.clean()
@staticmethod
def loadDefaultSizes():
columns, rows = os.get_terminal_size(0)
Terminal.dDefault_columns = columns
Terminal.dDefault_rows = rows
# reset the size was before run the program
@staticmethod
def defaultSizeTerminal():
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=self.default_columns, cols=self.default_rows))
# resize the terminal by custome config
@staticmethod
def setSizeTerminal(customColumns="80", customRows="24"):
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=customRows, cols=customColumns))
# Clean console or terminal depends of each different operative system.
@staticmethod
def clean():
if os.name in ('nt', 'dos'):
os.system('cls')
elif os.name in ('linux', 'osx', 'posix'):
os.system('clear')
# printing text in colour
@staticmethod
def print(Text='', Color='reset'):
if Color.lower() == "red":
print(Terminal.RED + Text + Terminal.RESET)
elif Color.lower() == "blue":
print(Terminal.BLUE + Text + Terminal.RESET)
elif Color.lower() == "cyan":
print(Terminal.CYAN + Text + Terminal.RESET)
elif Color.lower() == "green":
print(Terminal.GREEN + Text + Terminal.RESET)
elif Color.lower() == "blod":
print(Terminal.BOLD + Text + Terminal.RESET)
elif Color.lower() == "reverse":
print(Terminal.REVERSE + Text + Terminal.RESET)
elif Color.lower() == "gray":
print(Terminal.GRAY + Text + Terminal.RESET)
elif Color.lower() == "yellow":
print(Terminal.YELLOW + Text + Terminal.RESET)
elif Color.lower() == "reset":
print(Terminal.RESET + Text + Terminal.RESET)
else:print(Terminal.RESET + Text + Terminal.RESET)
| true |
bec6135103d278b1399e2c21377e093b417a0f36 | Python | triposorbust/stable-persistence | /spec/parsed_spec.py | UTF-8 | 5,645 | 3.03125 | 3 | [] | no_license | #!/usr/bin/python
import os.path
import sys
sys.path.append(os.path.relpath("../src", os.path.dirname(__file__)))
import parsed as P
import unittest
TEST_FILENAME = "parsed.spec"
class CatAcceptanceTest(unittest.TestCase):
def setUp(self):
self.cat_parser = P.SeriesIterator(TEST_FILENAME, False)
def test_data_parsing_a(self):
self.cat_parser.parse_pattern("# ZT2 ZT3 ZT1")
ud = [1.0, None, 2.0]
pd = self.cat_parser.pattern_series(ud)
self.assertEqual([2.0, 1.0], pd)
def test_data_parsing_b(self):
self.cat_parser.parse_pattern("# Z0 Z0 Z2 Z2 Z4 Z4 Z6 Z6")
ud = [x*2 for x in range(8)]
pd = self.cat_parser.pattern_series(ud)
self.assertEqual([0, 4, 8, 12, 2, 6, 10, 14], pd)
def test_data_parsing_c(self):
self.cat_parser.parse_pattern("# FOOZ1 FOOZ3 FOOZ2 FOOZ1")
ud = [3.14, 0.0, 1.1, 4.1]
pd = self.cat_parser.pattern_series(ud)
self.assertEqual([3.14, 1.1, 0.0, 4.1], pd)
def tearDown(self):
pass
class AvgAcceptanceTest(unittest.TestCase):
def setUp(self):
self.avg_parser = P.SeriesIterator(TEST_FILENAME, True)
def test_data_parsing_a(self):
self.avg_parser.parse_pattern("# Z2 Z4 Z3 Z1")
ud = [1.0, None, 2.0, 4.0]
pd = self.avg_parser.pattern_series(ud)
self.assertEqual([4.0, 1.0, 2.0], pd)
def test_data_parsing_b(self):
self.avg_parser.parse_pattern("# Z0 Z2 Z4 Z6 Z0 Z2 Z4 Z6")
ud = [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7]
pd = self.avg_parser.pattern_series(ud)
self.assertEqual([2.2, 3.3, 4.4, 5.5], pd)
def test_data_parsing_c(self):
self.avg_parser.parse_pattern("# FOOZ1 FOOZ4 FOOZ1 FOOZ6")
ud = [3.14, 0.0, 1.1, 4.1]
pd = self.avg_parser.pattern_series(ud)
self.assertEqual([2.12, 0.0, 4.1], pd)
def tearDown(self):
pass
class SeriesIteratorTest(unittest.TestCase):
def setUp(self):
self.avg_parser = P.SeriesIterator(TEST_FILENAME, True)
self.cat_parser = P.SeriesIterator(TEST_FILENAME, False)
def _test_iteration_helper(self, parser):
counter = 0
for series in parser:
self.assertTrue(series[0]) # should be a string
counter += 1
for number in series[1]:
self.assertIsInstance(number, float)
self.assertEqual(counter, len(series[1]))
else:
self.assertEqual(counter, 12)
def test_iteration(self):
self._test_iteration_helper(self.avg_parser)
self._test_iteration_helper(self.cat_parser)
def _test_zstr_to_t_helper(self, parser):
self.assertEqual(parser.zstr_to_t("CerianiZT12"), 12)
self.assertEqual(parser.zstr_to_t("Z0"), 0)
self.assertEqual(parser.zstr_to_t("ChangZT20"), 20)
self.assertEqual(parser.zstr_to_t("UedaZT1"), 1)
self.assertEqual(parser.zstr_to_t("#"), None)
self.assertEqual(parser.zstr_to_t("FOO"), None)
def test_zstr_to_t(self):
self._test_zstr_to_t_helper(self.avg_parser)
self._test_zstr_to_t_helper(self.cat_parser)
def _test_parse_pattern_helper(self, parser):
self.assertIsInstance(parser.pattern, tuple)
parser.parse_pattern("# FOO 12 Z12 0 4 Z8 BAR4")
self.assertEqual(parser.pattern, (None, 12, 12, 0, 4, 8, 4))
parser.parse_pattern("#")
self.assertEqual(parser.pattern, ())
def test_parse_pattern(self):
self._test_parse_pattern_helper(self.avg_parser)
self._test_parse_pattern_helper(self.cat_parser)
def _test_vstr_to_v_helper(self, parser):
self.assertEqual(parser.vstr_to_v("3.3"), 3.3)
self.assertEqual(parser.vstr_to_v("NA"), None)
self.assertEqual(parser.vstr_to_v("-1"), -1.0)
def test_vstr_to_v(self):
self._test_vstr_to_v_helper(self.avg_parser)
self._test_vstr_to_v_helper(self.cat_parser)
def _test_pattern_series_helper(self, parser):
parser.pattern = None
self.assertEqual(parser.pattern_series("foobar"), "foobar")
def test_cat_pattern_series(self):
self._test_pattern_series_helper(self.cat_parser)
self.cat_parser.pattern = (None, 1, 1, 1, 2, 3)
ud = map(float, range(6))
pd = self.cat_parser.pattern_series(ud)
self.assertEqual(pd, [1.0, 4.0, 5.0, 2.0, 3.0])
self.cat_parser.pattern = (4, 3, 3, 2, 2, 1, 1, 1)
ud = map(float, range(8))
ud.reverse()
pd = self.cat_parser.pattern_series(ud)
self.assertEqual(pd, [2.0, 4.0, 6.0, 7.0, 1.0, 3.0, 5.0, 0.0])
self.cat_parser.pattern = None
ud = [1, "FOO"]
pd = self.cat_parser.pattern_series(ud)
self.assertEqual(pd, [1, "FOO"])
def test_avg_pattern_series(self):
self._test_pattern_series_helper(self.avg_parser)
self.avg_parser.pattern = (1, None, 3, 2, 1)
ud = map(float, range(5))
pd = self.avg_parser.pattern_series(ud)
self.assertEqual(pd, [2.0, 3.0, 2.0])
self.avg_parser.pattern = (4, 3, None, 3)
ud = [4.0, 1.0, 3.0, 2.0]
pd = self.avg_parser.pattern_series(ud)
self.assertEqual(pd, [1.5, 4.0])
self.avg_parser.pattern = None
ud = [1, "FOO"]
pd = self.avg_parser.pattern_series(ud)
self.assertEqual(pd, [1, "FOO"])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| true |
4d36f6f13aa9ed1d940c59a0629c67a9e00eb14b | Python | hkim150/Leetcode-Problems | /Top_Interview_Questions/131._Palindrome_Partitioning/solution.py | UTF-8 | 996 | 3.109375 | 3 | [] | no_license | class Solution:
def partition(self, s: str) -> List[List[str]]:
leng = len(s)
# get the memoization of dp palindrome
memPal = [[False] * leng for _ in range(leng)]
memPal[leng-1][leng-1] = True
for i in range(leng-1):
memPal[i][i] = memPal[i+1][i] = True
for l in range(leng-2, -1, -1):
for r in range(l+1, leng):
memPal[l][r] = s[l] == s[r] and memPal[l+1][r-1]
ans = []
# dp possible break point
def breakPoint(start, end, lst=[]):
if end == leng-1:
if memPal[start][end]:
lst.append(s[start:end+1])
ans.append(lst)
return
if memPal[start][end]:
breakPoint(end+1, end+1, lst+[s[start:end+1]])
breakPoint(start, end+1, lst)
breakPoint(0, 0)
return ans | true |
e8824cf3fa551fa18994ee6fdf5b084c487bd7cd | Python | tzerchyuan/Coup | /coup/classes/characters/Ambassador.py | UTF-8 | 464 | 3.15625 | 3 | [
"MIT"
] | permissive | from .Character import Character
class Ambassador(Character):
def __init__(self):
pass
def description(self) -> str:
return "Ambassadors can do the following:\n Exchange: Exchange cards with the Court. First take random cards (default: 2) from the Court deck. Choose which, if any, to exchange with your face-down cards. Then return two cards to the Court deck. \n Block: Steal"
def __str__(self):
return "Ambassador"
| true |
74b0d1704da71f37da465616541f8a40c1f55e55 | Python | gavinmaleney/gavobd | /gavobd.py | UTF-8 | 1,431 | 2.8125 | 3 | [] | no_license | # encoding: utf-8
import obd
import time
starttime = time.time()
# connect to the car
connection = obd.OBD()
# Assign OBD Commands to Variables
rpm = obd.commands.RPM # select an OBD command (sensor) - RPM
dtc = obd.commands.GET_DTC # Trouble Codes
intake_pressure = obd.commands.INTAKE_PRESSURE # Pressure
vehicle_speed = obd.commands.SPEED # Vehicle Speed
intake_temp = obd.commands.INTAKE_TEMP
engine_load = obd.commands.ENGINE_LOAD
coolant_temp = obd.commands.COOLANT_TEMP
maf = obd.commands.MAF
# Query with variable name and store in new variable
while True:
rpm_response = connection.query(rpm) # send the command, and parse the response
dtc_response = connection.query(dtc)
intake_pressure_response = connection.query(intake_pressure)
vehicle_speed_response = connection.query(vehicle_speed)
intake_temp_response = connection.query(intake_temp)
engine_load_response = connection.query(engine_load)
coolant_temp_response = connection.query(coolant_temp)
maf_response = connection.query(maf)
# Possible endpoint for labelling the output
# rpm_endpoint = "RPM:" . rpm_response.value
# print(rpm_endpoint)
# Print query responses
print(rpm_response.value)
print(dtc_response.value)
print(intake_pressure_response.value)
print(vehicle_speed_response.value)
print(intake_temp_response)
print(engine_load_response)
print(coolant_temp_response)
print(maf_response)
time.sleep(1)
#print(response.unit)
| true |
5c438b5dd90acb5f6a095a5804655fcf1b400dab | Python | choyj0920/algorithm | /algorithm_202012/baek2108.py | UTF-8 | 556 | 3.40625 | 3 | [] | no_license | # 백준 2108 통계학
import sys
from collections import Counter
num=[0 for i in range(8001)]
#input
n= int(sys.stdin.readline())
arr=[0 for i in range(n)]
cnt=0
for _ in range(n):
temp= int(sys.stdin.readline())
arr[_]=temp
cnt+=temp
# 산술 평균
ave =round(cnt/n)
print(ave)
# arr sort
arr=sorted(arr)
# 중간 값
print(arr[n//2])
count = Counter(arr)
count = sorted(count.items(),key = lambda x : (-x[1],x[0]))
if n>1 and count[0][1]== count[1][1]:
print(count[1][0])
else:
print(count[0][0])
print(arr[n-1]-arr[0])
| true |
c975083aca1aa57b6bccc82b3b7958ee12dcaa1e | Python | evenchen0321/testmac | /20170119/filenew.py | UTF-8 | 676 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Even
f = open("yesterday",'r',encoding="utf-8")
'''
# print(f.readline())
# print(f.readline())
# print(f.readline())
# print(f.readline())
# print(f.readline())
笨办法
'''
'''
for i in range(5):
print(f.readline())
第二种办法
'''
'''
for index,i in enumerate(f.readlines()):
print(index,i.strip())
if index == 9:
print("----------------------")
break
处理大数据不合适,太占内存
'''
count =0
for i in f:
if count ==9:
print("--------------")
break
count +=1
print(count,i.strip())
#迭代器
f.close() | true |
0d39314d7bc63edb504bc2c7e6f00dc8b04a5f0a | Python | OctavianBarbu/MyFirstProject | /TicTacToe_functions.py | UTF-8 | 2,508 | 3.796875 | 4 | [] | no_license | from IPython.display import clear_output
import random
def display_board(board):
clear_output() #Clearing the output every time the function is called
print(" ")
print(f" {board[7]} | {board[8]} | {board[9]} ")
print("-----|-----|-----")
print(f" {board[4]} | {board[5]} | {board[6]} ")
print("-----|-----|-----")
print(f" {board[1]} | {board[2]} | {board[3]} ")
print(" ")
def player_input():
markers = ["X", "O"]
marker = "wrong"
while marker not in markers:
marker = input("Would you like to X or O ?").upper()
if marker == "X":
return ("X", "O")
else:
return ("O", "X")
def place_marker(board, marker, position):
board[int(position)] = marker
def win_check(board, mark):
return ((board[7] == mark and board[8] == mark and board[9] == mark) or # top
(board[4] == mark and board[5] == mark and board[6] == mark) or # horizontal middle
(board[1] == mark and board[2] == mark and board[3] == mark) or # bottom
(board[7] == mark and board[4] == mark and board[1] == mark) or # vertical left
(board[8] == mark and board[5] == mark and board[2] == mark) or # vertical middle
(board[9] == mark and board[6] == mark and board[3] == mark) or # vertical right
(board[7] == mark and board[5] == mark and board[3] == mark) or # diagonal
(board[9] == mark and board[5] == mark and board[1] == mark)) # diagonal
def choose_first():
#randomly deciding wich player goes first
if random.randint(0, 1) == 0:
return 'Player 2'
else:
return 'Player 1'
def space_check(board, position):
#this will be used inside another function
return board[int(position)] == " "
def player_choice(board):
position = "0"
positions = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
while position not in positions or space_check(board, position) == False:
position = input("Where would you like to place your mark next?")
return position
def full_board_check(board):
#Checking if there's any space on the board
if " " in board:
return False
else:
return True
def replay():
choice = "wrong"
choices = ["YES", "NO"]
while choice not in choices:
choice = input("Would you like to play again?").upper()
if choice == "YES":
return True
else:
return False
| true |
683ee747874e33f94ff9c7b9fcf5f1bfccd635f3 | Python | Roel/Gyrid-server | /olof/logger.py | UTF-8 | 4,683 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | #-*- coding: utf-8 -*-
#
# This file belongs to Gyrid Server.
#
# Copyright (C) 2012 Roel Huybrechts
# All rights reserved.
"""
Module providing logging infrastructure for info and error messages from the server.
"""
import logging, logging.handlers
import os
import sys
import time
import traceback
class Logger(object):
"""
Main Logger class that handles logging of information.
"""
def __init__(self, server, filename):
"""
Initialisation.
@param server (Olof) Reference to main Olof server instance.
@param filename (str) Filename of the logfile. Log resides in logs/filename.
"""
self.server = server
self.filename = filename
self.base_path = self.server.paths['logs']
self.location = os.path.join(self.base_path, self.filename)
if not os.path.exists(self.base_path):
os.makedirs(self.base_path)
self.server.checkDiskAccess(self.location)
self.logger = None
def __getLogger(self):
"""
Get the logging.Logger object for this logger. Uses a RotatingFileHandler to automatically rotate the log when
filesize exceeds 512 kB. Four backups are stored.
"""
logger = logging.getLogger(self.location)
logger.setLevel(logging.INFO)
if len(logger.handlers) < 1:
handler = logging.handlers.RotatingFileHandler(self.location, maxBytes=524288, backupCount=4)
handler.setFormatter(logging.Formatter("%(message)s"))
logger.addHandler(handler)
return logger
def __procMsg(self, message):
"""
Process the given message, creating a logger when none available.
@param message (str) The message to process
@return (str, str) The timestamp this message was processed. The message itself.
"""
if self.logger == None:
self.logger = self.__getLogger()
message = message.strip() if message != None else ""
return time.strftime('%Y%m%d-%H%M%S-%Z'), message
def debug(self, message):
"""
Write the given message to standard output as debug message.
@param message (str) The message to log.
"""
t, m = self.__procMsg(message)
if self.server.debug_mode:
f = ' (%s)' % self.filename if self.filename != 'server' else ''
sys.stdout.write("%s Gyrid Server%s: %s.\n" % (t, f, m))
def logInfo(self, message):
"""
Log the given message as information. When running in debug mode, print to stdout too.
@param message (str) The message to log.
"""
t, m = self.__procMsg(message)
if self.server.debug_mode:
f = ' (%s)' % self.filename if self.filename != 'server' else ''
sys.stdout.write("%s Gyrid Server%s: %s.\n" % (t, f, m))
m = m.replace('\n', '\nI ')
self.logger.info("I %s: %s." % (t, m))
def logError(self, message):
"""
Log the given message as error. When running in debug mode, print to stderr too.
@param message (str) The message to log.
"""
t, m = self.__procMsg(message)
if self.server.debug_mode:
f = ' (%s)' % self.filename if self.filename != 'server' else ''
sys.stderr.write("%s Gyrid Server%s: Error: %s.\n" % (t, f, m))
m = m.replace('\n', '\nE ')
self.logger.info("E %s: %s." % (t, m))
def logException(self, exception, message=None):
"""
Log the given exception as error. When running in debug mode, print to stderr too.
@param exception (Exception) The exception to log.
@param message (str) A message to clarify the exception. Optional.
"""
eType = sys.exc_info()[0].__name__
eTraceback = traceback.format_exc()
t, m = self.__procMsg(message)
if self.server.debug_mode:
f = ' (%s)' % self.filename if self.filename != 'server' else ''
dbgStr = "\n%s Gyrid Server%s: Error: " % (t, f)
if message != None:
dbgStr += '%s. \n%s Gyrid Server%s: ' % (message.strip(), t, f)
dbgStr += '%s exception: %s.\n' % (eType, str(exception))
sys.stderr.write(dbgStr)
sys.stderr.write(' ' + '\n '.join(eTraceback.rstrip().split('\n')) + '\n\n')
m = m.replace('\n', '\nE ')
mLog = '%s. ' % m if m != '' else ''
self.logger.info("E %s: %s%s exception: %s." % (t, mLog, eType, str(exception)))
self.logger.info("E %s" % eTraceback.rstrip().replace('\n', '\nE '))
| true |
8e75aeafe726c7be06d866436bb0df3e9551559d | Python | chrisl0411/Python-Fundamentals-Part-1 | /fibonacci.py | UTF-8 | 292 | 3.9375 | 4 | [] | no_license | def fibonacci(n):
if n == 1:
return 1
elif n == 2:
return 1
else:
fn1 = fibonacci(n-1)
fn2 = fibonacci(n-2)
print("fibonacci("+str(n)+") =",str(fn1)+" + "+str(fn2))
return fn1 + fn2
def main():
print(fibonacci(7))
main()
| true |
305757e73eee1f0e9c78658d6b73839749f33494 | Python | forestlight1999/algorithm | /heap.py | UTF-8 | 5,421 | 3.6875 | 4 | [] | no_license | #!/usr/bin/env python
# coding=utf-8
'''
@Author: forestlight
@Date: 2019-08-25 17:06:29
@LastEditTime: 2019-09-04 20:32:35
@Description: used to implement the heap data structure
'''
from utils import get_randint_data
from collections.abc import Iterable
'''
@description: now we use array to implement min heap
@param {type}
@return:
'''
class Heap(object):
def __init__(self, array, size):
assert(isinstance(array, Iterable))
assert(size > 0 and len(array) == size)
# the array is random data array
self.heap_size = size
self.array = array
'''
@description: use sink API to generate heap
@param: style: min(min heap) / max(max heap)
@return:
'''
def generate_heap(self, heap_size, style='min'):
# now we use sink API to generate min heap
# 1. get the last none-leaf node index: heap_size / 2
# first, binary heap is a complete binary tree:
# if index 0 is root node, the last leaf node index is heap_size-1, its parent node index is ((heap_size-1)-1))/2 => heap_size/2 - 1
# if index 1 is root node, the last leaf node index is heap_size, its parent node index is heap_size/2
index = heap_size // 2 - 1 # or index = (heap_size - 2)/2
# why we just sink from last none-leaf node? and why we traverse from bottom to top?
# that's because we treat each leaf node as a binary heap
# so we traverse from bottom to top, and sink each none-leaf node one by one
while index >= 0:
self.sink(index, heap_size, style)
index -= 1
"""
index = 1
while index < heap_size:
self.rise(index, index+1, style)
index += 1
"""
print('generate {} heap: {}'.format(style, self.array))
'''
@description: assume that we still storage the rist element of the array
parent_index i: left_child_index(2*i+1), right_child_index(2*i+2)
child_index i: parent_index((i-1)/2)
left_child_index i: right_child_index = left_child_index + 1
@param: min(min heap)/max(max heap)
@return:
'''
def rise(self, index, heap_size, style='min'):
# default rise the last data of array(new data will be appened to end of array)
assert(0 <= index < heap_size)
assert(style in ('min', 'max'))
array = self.array
child_index = index
parent_index = (child_index - 1) // 2
value = array[index]
while parent_index >= 0:
if (style == 'min' and array[parent_index] > value) or \
(style == 'max' and array[parent_index] < value):
array[child_index] = array[parent_index]
child_index = parent_index
parent_index = (parent_index - 1) // 2
else: break
# insert value back to child position
array[child_index] = value
'''
@description:
@param {type}
@return:
'''
def sink(self, index, heap_size, style='min'):
def get_child_index(child_index, heap_size, style='min'):
array = self.array
if child_index + 1 < heap_size:
if (style == 'min' and array[child_index+1] < array[child_index]) or \
(style == 'max' and array[child_index+1] > array[child_index]):
return (child_index + 1)
return child_index
assert(0 <= index < heap_size)
array = self.array
parent_index = index
child_index = 2 * parent_index + 1
value = array[index]
while child_index < heap_size:
child_index = get_child_index(child_index, heap_size, style)
# 1. check whether parent child has already arrive the right position
if (style == 'min' and value <= array[child_index]) or \
(style == 'max' and value >= array[child_index]):
break
# 2. update parent_index and child_index
array[parent_index] = array[child_index]
parent_index = child_index
child_index = 2 * child_index + 1
# 3. insert the value back to parent position
array[parent_index] = value
'''
@description: sort the array through binary heap
@param: 's': sort array data from smallest to biggest, need to generate max heap first
'b': sort array data from biggest to smallest, need to generate min heap first
@return:
'''
def sort(self, style='s'):
assert(style in ('s', 'b'))
array = self.array
heap_size = self.heap_size
# 1. generate the heap first
# NOTE: need to transfer the sort style to heap style first!!!
style = 'max' if style == 's' else 'min'
self.generate_heap(heap_size, style)
# 2. use sink API to sort the array
current_size = heap_size
while current_size > 1:
# 2.1 switch top node and current last node
array[0], array[current_size-1] = array[current_size-1], array[0]
# 2.2 reduce current heap size
current_size -= 1
# 2.3 sink the top node
self.sink(0, current_size, style)
print('heap sorted: {}'.format(array))
if 1:
size = 10
array = get_randint_data(size)
heap = Heap(array, size)
heap.sort('s')
| true |
ff9658dc68dd22c6fd2dbf880a09f1a2fb08d19f | Python | ankrypted/castle-defence | /gameComponents.py | UTF-8 | 3,660 | 3.515625 | 4 | [] | no_license | import pygame
import math
import computation
'''
This class is to denote a bullet
A bullet has the following attributes:
1> timeTravelled : The amount of time a bullet has lived
2> orientation : The current orientation of the bullet in radians
3> firedBy : Id of the entity to which this bullet belongs
'''
class Bullet:
timeTravelled = 0
orientation = 0
coordinate = (0, 0)
firedBy = -1
def __init__(self, orientation, coordinate, firedBy):
self.orientation = orientation
self.coordinate = coordinate
self.firedBy = firedBy
self.updateCoordinate(30)
def updateCoordinate(self, unitDistance):
deg, x, y = computation.getDirection(self.orientation)
self.coordinate = (self.coordinate[0] + (x * unitDistance * math.cos(math.radians(deg))),
self.coordinate[1] + (y * unitDistance * math.sin(math.radians(deg))))
'''
This class is to denote a Tank
A Tank has the following attributes:
1> orientation : The current orientation of the tank in radians
2> imageFile : The path to the file to load to display as tank
3> life : how much life is left of the tank
'''
class Tank:
# position of the tank
coordinate = (0, 0)
# the image to display for this tank
imageFile = ""
fixedCursor = ""
mobileCursor = ""
center = 0
# current orientation of the tank
orientation = 0
# the base rectangle for a tank
rectangle = ""
# amount of life for a tank
life = 100
# boost of this tank
boost = 50
# life bar for this tank
lifeBar = ""
# boost bar for this tank
boostBar = ""
# multiplier to get direction of movement for a tank
reverse = 1
def __init__(self, (X, Y), imageFile, orientation, life, boost, lifeBarX, lifeBarY, lifeBarW, lifeBarH, boostBarX,
boostBarY, boostBarW, boostBarH):
self.coordinate = (X, Y)
self.imageFile = imageFile
self.orientation = orientation
self.life = life
self.boost = boost
self.lifeBar = pygame.Rect(lifeBarX, lifeBarY, lifeBarW, lifeBarH)
self.boostBar = pygame.Rect(boostBarX, boostBarY, boostBarW, boostBarH)
self.fixedCursor = pygame.image.load(self.imageFile)
self.mobileCursor = self.fixedCursor
self.mobileCursor = pygame.transform.rotate(self.fixedCursor, self.orientation)
self.rectangle = pygame.rect
def rotateAntiClockwise(self, offset):
self.orientation += offset + 360
self.orientation %= 360
self.updateMobileCursor()
def rotateClockwise(self, offset):
self.orientation -= offset + 360
self.orientation %= 360
self.updateMobileCursor()
def updateMobileCursor(self):
self.mobileCursor = pygame.transform.rotate(self.fixedCursor, self.orientation)
def updateCenter(self):
self.center = self.mobileCursor.get_rect().center
self.center = (self.center[0] + self.coordinate[0], self.center[1] + self.coordinate[1])
def updateCoordinate(self, isBoost, unitDistance):
deg, x, y = computation.getDirection(self.orientation)
boostVal = 1
if isBoost == 1:
boostVal = 10
self.coordinate = (
self.coordinate[0] + (x * self.reverse * boostVal * unitDistance * math.cos(math.radians(deg))),
self.coordinate[1] + (y * self.reverse * boostVal * unitDistance * math.sin(math.radians(deg))))
def toggleReverse(self):
self.reverse = 1 if self.reverse == -1 else 1
| true |
9507e5b1bb777974a4439ae637b438a7f425ac43 | Python | Xilma/Lynn | /Python Practice/main.py | UTF-8 | 1,801 | 4.59375 | 5 | [] | no_license | #imports the module Calculator from the calculator file
from calculator import Calculator
#creates an object of the Calculator class
calculate = Calculator()
#calls the __str__ method that returns my name
print(calculate)
#calculates the sum
def addNumbers():
input_string = input("Enter the numbers you want to add, separated by space: ")
list_add = input_string.split()
print("The sum is: ")
calculate.printSum(list_add)
#calculates the subtraction
def subtractNumbers():
num1 = int(input("Enter the first number: "))
num2 = int(input("Enter the second number: "))
print(str(num1) + " - " + str(num2) + " is: ")
calculate.printSubtraction(num1, num2)
#calculates the quotient
def divideNumbers():
num1 = int(input("Enter the first number: "))
num2 = int(input("Enter the second number: "))
print(str(num1) + " divided by " + str(num2) + " is: ")
calculate.printQuotient(num1, num2)
#calculate the product
def multiplyNumbers():
input_string = input("Enter the numbers you want to multiply, separated by space: ")
list_add = input_string.split()
print("The product is: ")
calculate.printProduct(list_add)
#display menu
input_operation = input("What would you like to do today?\n" +
" 1. Add numbers \n 2. Subtract numbers" +
"\n 3. Multiply numbers \n 4. Divide numbers\n\n")
if input_operation == '1':
#call the add numbers method
addNumbers()
elif input_operation == '2':
#call the subtract numbers method
subtractNumbers()
elif input_operation == '3':
#call the multiply numbers method
multiplyNumbers()
elif input_operation == '4':
#call the divide numbers method
divideNumbers()
else:
print("Sorry, no such option exists!")
| true |
f876e9dac51dff32c551510b9940743eb9229905 | Python | ticod/goodee-python | /210205/exam2.py | UTF-8 | 609 | 4.09375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 5 14:38:36 2021
@author: Dohun
exam2.py
"""
def calc(var1, var2, oper):
if oper == "+":
return var1 + var2
elif oper == "-":
return var1 - var2
elif oper == "*":
return var1 * var2
elif oper == "/":
return var1 / var2
else:
return 0
oper = input("연산자를 선택하세요:(+ - * /): ")
var1 = int(input("첫 번째 수: "))
var2 = int(input("두 번째 수: "))
res = calc(var1, var2, oper)
print("계산: %d %s %d = %d" % (var1, oper, var2, res))
print("계산:", var1, oper, var2, "=", res) | true |
1b6b180b53293f3bfc7c19ddb4348d8af4188a97 | Python | zimarinvitalii/task-2 | /тема 4/task4.py | UTF-8 | 1,171 | 4.3125 | 4 | [] | no_license | '''
Write a program that asks the answer for a mathematical expression,
checks whether the user is right or wrong, and then responds with a message accordingly.
'''
import random
import operator
def askQuestion():
answer = randomCalc()
guess = float(input())
return guess == answer
def randomCalc():
ops = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv
}
num1 = random.randint(0,11)
num2 = random.randint(1,11)
op = random.choice(list(ops.keys()))
answer = ops.get(op)(num1,num2)
print(f'What is {num1} {op} {num2}?\n')
return answer
print(score)
def quiz():
print('Welcome. This is a 10 question math quiz\n')
score = 0
times = 0
while times < 10:
correct = askQuestion()
if times == 10:
break
elif correct:
score += 1
print('Correct!\n')
print(score)
times += 1
elif not correct:
times += 1
print('Incorrect!\n')
print(score)
return print(f'Your score was {score}/10')
quiz()
askQuestion()
randomCalc()
| true |
4041f16e43fc0ee2f290f7d6fd5033e9d2a94b0a | Python | gaborpapp/AIam | /movement_ai/text_renderer.py | UTF-8 | 3,421 | 2.890625 | 3 | [] | no_license | from OpenGL.GL import *
from OpenGL.GLUT import *
try:
import FTGL
except ImportError:
pass
class TextRenderer:
def __init__(self, window, text, size, font=None, spacing=None):
self.window = window
self.text = text
self.size = size
self.font = font
self.spacing = spacing
self.scale = 1
def render(self, x, y, z, v_align="bottom", h_align="left", three_d=False):
width, height = self.get_size()
glPushMatrix()
glTranslatef(x, y, z)
if three_d:
glRotatef(-self.window._camera_x_orientation, 1.0, 0.0, 0.0)
glRotatef(-self.window._camera_y_orientation, 0.0, 1.0, 0.0)
glScalef(self.scale, self.scale, self.scale)
if h_align == "right":
glTranslatef(-width, 0, 0)
elif h_align == "center":
glTranslatef(-width/2, 0, 0)
if v_align == "top":
glTranslatef(0, -self.size, 0)
self.stroke()
#self._draw_bbox() # TEMP
glPopMatrix()
def _draw_bbox(self):
width, height = self.get_size()
glBegin(GL_LINE_LOOP)
glVertex2f(0, 0)
glVertex2f(width, 0)
glVertex2f(width, height)
glVertex2f(0, height)
glEnd()
class GlutTextRenderer(TextRenderer):
TOP = 119.05 # http://www.opengl.org/resources/libraries/glut/spec3/node78.html
BOTTOM = 33.33
def __init__(self, *args):
TextRenderer.__init__(self, *args)
if not self.font:
self.font = GLUT_STROKE_ROMAN
self.scale = self.size / (self.TOP + self.BOTTOM)
def stroke(self):
glLineWidth(1.0)
glPointSize(1.0)
for c in self.text:
if c == ' ' and self.spacing is not None:
glTranslatef(self.spacing, 0, 0)
else:
glutStrokeCharacter(self.font, ord(c))
def get_size(self):
glPushMatrix()
glScalef(self.scale, self.scale, self.scale)
width = 0
for c in self.text:
if c == ' ' and self.spacing is not None:
width += self.spacing
else:
width += glutStrokeWidth(self.font, ord(c))
glPopMatrix()
return width, self.size
class FontAttributes:
def __init__(self, name, size):
self.name = name
self.size = size
def __hash__(self):
return hash((self.name, self.size))
ftgl_fonts = {}
class FtglTextRenderer(TextRenderer):
RESOLUTION = 72
def __init__(self, *args):
TextRenderer.__init__(self, *args)
if not self.font:
raise Exception("font required")
self._font_object = self._prepare_font()
self.text = self.text.encode("utf8")
def _prepare_font(self):
attributes = FontAttributes(self.font, self.size)
try:
return ftgl_fonts[attributes]
except KeyError:
font_object = FTGL.OutlineFont(self.font)
font_object.FaceSize(int(self.size), self.RESOLUTION)
ftgl_fonts[attributes] = font_object
return font_object
def stroke(self):
glLineWidth(1.0)
glPointSize(1.0)
self._font_object.Render(self.text)
def get_size(self):
llx, lly, llz, urx, ury, urz = self._font_object.BBox(self.text)
width = urx - llx
height = ury - lly
return width, height
| true |
4c7ba045cec16be49f023dfb4ac8e74e71e324d0 | Python | bojinyao/snake | /oop_snake.py | UTF-8 | 11,199 | 2.890625 | 3 | [] | no_license | from random import randint
import os
import time
import signal
def New_Board_List(row, column):
assert type(row) is int and row > 0 and type(column) is int and column > 0
lst = [[' ' for _ in range(column + 2)] for _ in range(row + 2)]
for i in range(len(lst[0])):
lst[0][i] = '-'
for i in range(len(lst[-1])):
lst[-1][i] = '-'
for i in range(1, len(lst)-1):
lst[i][0] = '|'
for i in range(1, len(lst)-1):
lst[i][-1] = '|'
lst[0][0] = '+'
lst[0][-1] = '+'
lst[-1][0] = '+'
lst[-1][-1] = '+'
return lst
def Display_Board(board):
assert type(board) is list
global COLUMN
print(' '*(COLUMN - 4) + "SCORE: {}".format(SCORE))
for item in board:
print(" ".join(item))
def Empty_Locations(board):
assert type(board) is list
return [[r, c] for r in range(1, len(board) - 1) for c in range(1, len(board[0]) - 1) if board[r][c] == ' ']
# assert type(board) is list
# lst = []
# for r in range(1, len(board) - 1):
# for c in range(1, len(board[0]) - 1):
# if board[r][c] == ' ':
# lst.append([r, c])
# return lst
def Modify_Board(snake, food):
"""Handle Object Inputs"""
global BOARD
for coordinates in snake.body:
BOARD[coordinates[0]][coordinates[1]] = '#'
BOARD[food.location[0]][food.location[1]] = '*'
def New_Food():
global FOOD, EMPTY_SPOTS
FOOD.location = EMPTY_SPOTS[randint(0, len(EMPTY_SPOTS) - 1)]
def Deep_Copy(lst):
assert type(lst) is list
return [sublist[:] for sublist in lst]
class Snake(object):
def __init__(self, start_row, start_col, length = 3, orientation = 'right'):
assert type(start_row) is int and type(start_col) is int and type(length) is int
self.start_row = start_row
self.start_col = start_col
self.length = length
self.orientation = orientation
if self.orientation == 'right':
self.body = [[self.start_row, self.start_col - i] for i in range(self.length)]
elif self.orientation == 'left':
self.body = [[self.start_row, self.start_col + i] for i in range(self.length)]
elif self.orientation == 'up':
self.body = [[self.start_row - i, self.start_col] for i in range(self.length)]
elif self.orientation == 'down':
self.body = [[self.start_row + i, self.start_col] for i in range(self.length)]
self.head = self.body[0][:]
self.tail = self.body[-1][:]
def move(self, direction):
global FOOD, EAT, SCORE, WALL, GAME_OVER
global SNAKE, BOARD, ROW, COLUMN
snake_body = Deep_Copy(self.body[0:len(self.body) - 1])
self.tail = self.body[-1][:]
if direction == 'right':
self.head[1] += 1
if direction == 'left':
self.head[1] -= 1
if direction == 'up':
self.head[0] -= 1
if direction == 'down':
self.head[0] += 1
self.body = [self.head] + snake_body
if SNAKE.head[0] in (0, ROW + 1) or SNAKE.head[1] in (0, COLUMN + 1):
if WALL:
GAME_OVER = True
else:
if SNAKE.head[0] == 0:
SNAKE.head[0] = ROW
if SNAKE.head[0] == ROW + 1:
SNAKE.head[0] = 1
if SNAKE.head[1] == 0:
SNAKE.head[1] = COLUMN
if SNAKE.head[1] == COLUMN + 1:
SNAKE.head[1] = 1
def eat(self):
global FOOD, EAT, SCORE
snake_body = Deep_Copy(self.body[0:len(self.body) - 1])
if self.head == FOOD.location:
EAT = True
SCORE += 1
self.body += [self.tail]
else:
EAT = False
def not_running_into(self):
global GAME_OVER
for i in range(1, len(self.body)):
if self.head == self.body[i]:
GAME_OVER = True
def win(self):
print(' ')
print("Congradulations! You've beat this game!")
class Food(object):
def __init__(self, row, column):
assert type(row) is int and type(column) is int
self.row = row
self.column = column
self.location = [self.row, self.column]
def interrupted(signum, frame):
raise ValueError
def timed_input():
try:
return input()
except ValueError:
return ""
"""Where Each Round of Game is Managed"""
def Play_Snake_Game():
global BOARD, SNAKE, FOOD, SCORE, GAME_OVER, ROW, COLUMN, WALL, EAT, KEY, PreKEY, Starting_ROW, Starting_COL, EMPTY_SPOTS
global QUITING
GAME_OVER = False
SCORE = 0
KEY = 'd'
PreKEY = KEY
Modify_Board(SNAKE, FOOD)
EMPTY_SPOTS = Empty_Locations(BOARD)
Display_Board(BOARD)
signal.signal(signal.SIGALRM, interrupted)
while not GAME_OVER:
signal.alarm(1)
KEY = timed_input()
signal.alarm(0)
if KEY == 'p':
while True:
try:
ans = str(input('Type "r" to resume the game: '))
if ans != 'r':
raise ValueError
break
except ValueError:
print(' ')
if KEY not in ['a', 's', 'd', 'w', 'q']:
KEY = PreKEY
if (PreKEY == 'a' and KEY == 'd') or (PreKEY == 'd' and KEY == 'a') or (
PreKEY == 'w' and KEY == 's') or (PreKEY == 's' and KEY == 'w'):
KEY = PreKEY
if 'a' in KEY:
SNAKE.move('left')
elif 's' in KEY:
SNAKE.move('down')
elif 'd' in KEY:
SNAKE.move('right')
elif 'w' in KEY:
SNAKE.move('up')
elif KEY == 'q':
break
SNAKE.eat()
SNAKE.not_running_into()
PreKEY = KEY
Modify_Board(SNAKE, FOOD)
EMPTY_SPOTS = Empty_Locations(BOARD)
# print(EMPTY_SPOTS)
if not EMPTY_SPOTS:
SNAKE.win()
# Display_Board(BOARD)
return
if EAT:
New_Food()
BOARD = New_Board_List(ROW, COLUMN)
Modify_Board(SNAKE, FOOD)
Display_Board(BOARD)
print(' ')
print('Game Over')
"""Variable Set-up"""
def Variable_Set_Up():
global ROW, COLUMN, WALL, Starting_ROW, Starting_COL, LENGTH, BOARD, SNAKE, FOOD
while True:
try:
print(' ')
ROW = int(input("How many Vertical Rows do you want? (> 0): "))
if ROW < 1:
raise ValueError
break
except ValueError:
print(' ')
print('"Row" is an integer and greater than 2')
while True:
try:
print(' ')
COLUMN = int(input("How many horizontal Columns do you want? (> 2): "))
if COLUMN < 3:
raise ValueError
break
except ValueError:
print(' ')
print('"COLUMN" is an integer and greater than 2')
BOARD = New_Board_List(ROW, COLUMN)
while True:
try:
print(' ')
Starting_ROW = int(input("Which Row do you want to place your snake? (0 < x < {}): ".format(ROW + 1)))
if Starting_ROW < 1 or Starting_ROW > ROW:
raise ValueError
break
except ValueError:
print(' ')
print('Please make sure that your input satisfies the conditions')
while True:
try:
print(' ')
Starting_COL = int(input("Which Column do you want to place your snake? (0 < y < {}): ".format(COLUMN)))
if Starting_COL < 1 or Starting_COL > COLUMN:
raise ValueError
break
except ValueError:
print(' ')
print('Please make sure that your input satisfies the conditions')
print("There is a Portal-Rule,")
print("you may either die if you hit the walls, or you can turn Portal ON,")
print("so you emerge from the other side if you run into any walls")
temp = None
while True:
try:
print(' ')
temp = str(input("Do you want to turn ON the Portal-Rule? (y/n): "))
if temp == 'y':
WALL = False
elif temp == 'n':
WALL = True
else:
raise ValueError
break
except ValueError:
print(' ')
print('Please only type in "y" or "n"')
print(' ')
print("Your snake will start-off facing Right and going Right!")
while True:
try:
print(' ')
LENGTH = int(input("How long do you want your snake to be? (0< L < {}): ".format(Starting_COL + 1)))
if LENGTH < 1 or LENGTH > Starting_COL:
raise ValueError
break
except ValueError:
print(' ')
print("Your input must be an integer and within the given range")
SNAKE = Snake(Starting_ROW, Starting_COL, LENGTH, 'right')
FOOD = Food(Starting_ROW, randint(Starting_COL + 1, COLUMN))
def Standard_Game_Select():
global STANDARD_GAME
print(' ')
print('Do you want to customize? If not, then you will play a Standard Version')
while True:
try:
temp = str(input('(y/n): '))
if temp == 'y':
STANDARD_GAME = False
elif temp == 'n':
STANDARD_GAME = True
else:
raise ValueError
break
except ValueError:
print(' ')
print('Please type "y" or "n"')
"""Global Values"""
GAME_OVER = False
SCORE = 0
EAT = False
ROW = 20
COLUMN = 30
Starting_ROW = 11
Starting_COL = 10
WALL = False
LENGTH = None
BOARD = None
SNAKE = None
FOOD = None
EMPTY_SPOTS = None
STANDARD_GAME = None
QUITING = False
KEY = 'd'
PreKEY = KEY
"""User Interface"""
"""Not part of the repetitive loop"""
print(' ')
print('Welcome to Snake!')
print(' ')
print('object-oriented, written by Max Yao')
print(' ')
print('Use "w", "a", "s", "d" for controls')
print(' ')
print('Press "q" if you want to quit immediately')
print(' ')
print('Press "p" if you want to Pause the game')
print(' ')
print('Press "r" if you want to Resume the game')
"""Actual Game"""
while not QUITING:
Standard_Game_Select()
if not STANDARD_GAME:
Variable_Set_Up()
else:
ROW = 18
COLUMN = 28
Starting_ROW = 11
Starting_COL = 10
WALL = True
LENGTH = 3
BOARD = New_Board_List(ROW, COLUMN)
SNAKE = Snake(Starting_ROW, Starting_COL, LENGTH, 'right')
FOOD = Food(11, 15)
Play_Snake_Game()
if KEY == 'q':
break
while True:
try:
print(' ')
temp = str(input('This round is finished, do you want another round?(y/n): '))
if temp == 'y':
QUITING = False
elif temp == 'n':
QUITING = True
else:
raise ValueError
break
except ValueError:
print(' ')
print('Please type "y" or "n"')
print(' ')
print('Thanks for playing!')
| true |
2084d944da154773a3bcf364dc7b9b7fac63c7cb | Python | Isihiro4423/due_deligence | /due_deligence/filtering/result_underpriced_filter.py | UTF-8 | 923 | 2.84375 | 3 | [] | no_license | from typing import Dict, List
class ResultUnderpricedFilter(object):
def __init__(self, underpriced: int):
self._underpriced_cond = underpriced
def filter(self, results: Dict) -> Dict:
result_dict = {}
for sec_code, result in results.items():
result['due_deligences'] = self._filter_due_deligences(
result['due_deligences'])
if len(result['due_deligences']) > 0:
result_dict[sec_code] = result
return result_dict
def _filter_due_deligences(self, due_deligences: List) -> List:
result_list = []
for due_deligence in due_deligences:
if due_deligence['isError']:
continue
underpriced = due_deligence['underpriced']
if underpriced > 0 and underpriced <= self._underpriced_cond:
result_list.append(due_deligence)
return result_list
| true |
376fead969dde22405d037c29ae775ee1b5dfafc | Python | rec/EnergyEngineersCodes | /SummerStudents2016/IndividualCharacters/Ampersand.py | UTF-8 | 223 | 2.671875 | 3 | [] | no_license | from LEDsetup import*
led.fill(Off)
led.update()
def And (Color):
nd =[37,47,26,56,25,35,55,34,44,23,53,73,22,32,62,21,31,41,51,71]
for i in nd:
led.set(i,Color)
led.update()
return
And(Violet)
| true |
3ec8f6cd9b1e869e790c05dbae3bb17fdaefc3b6 | Python | Aasthaengg/IBMdataset | /Python_codes/p02261/s622713082.py | UTF-8 | 790 | 3.359375 | 3 | [] | no_license | n = int(input())
card1 = list(input().split())
card2 = [i for i in card1]
def bubbleSort(c1:list, n:int):
for i in range(n):
for j in range(n-1, i, -1):
if c1[j][1] < c1[j-1][1]:
c1[j], c1[j-1] = c1[j-1], c1[j]
return c1
def selectionSort(c2:list, n:int):
for i in range(n):
minj = i
for j in range(i, n):
if c2[j][1] < c2[minj][1]:
minj = j
c2[i], c2[minj] = c2[minj], c2[i]
return c2
bubblesort = bubbleSort(card1, n)
selectionsort = selectionSort(card2, n)
flag = 0
for i in range(n):
if bubblesort[i] != selectionsort[i]:
flag = 1
print(" ".join(bubblesort))
print("Stable")
print(" ".join(selectionsort))
if flag:
print("Not stable")
else:
print("Stable")
| true |
5c68fcb04f3f0962bd140398a81b36d72dc6dea7 | Python | atsunaakiya/teletako | /twitter_crawler.py | UTF-8 | 4,187 | 2.578125 | 3 | [] | no_license | import sys
from collections import deque
import re
from typing import Iterable, NamedTuple, List, Optional, Set, Tuple, Deque
import tweepy
from lib.config import TwitterConfig, parse
from lib.db import UDB
from lib.utils import UMessage, MessageType
class RelatedStatusRef(NamedTuple):
username: str
id: str
class WrappedMessage(NamedTuple):
msg: UMessage
related_id: List[Optional[RelatedStatusRef]]
def _get_retweet_name(s: str) -> Optional[str]:
res = re.search(r"^RT @(\w+):", s)
if res is not None:
return res.groups()[0]
def start_authorization(config: TwitterConfig) -> tweepy.API:
auth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret)
auth.set_access_token(config.access_key, config.access_secret)
api = tweepy.API(auth)
return api
def _split_twitter_status_url(s: str) -> Optional[RelatedStatusRef]:
res = re.search(r'^https?://twitter\.com/(\w+)/status/(\d+)$', s)
if res is not None:
user, id_ = res.groups()
return RelatedStatusRef(user, id_)
def _get_message_from_status(status: tweepy.models.Status) -> WrappedMessage:
id_ = str(status.id)
author = status.author.screen_name
content: str = status.text
if hasattr(status, 'extended_entities'):
media = status.extended_entities.get('media') or []
elif hasattr(status, 'entities'):
media = status.entities.get('media') or []
else:
media = []
images = [
m['media_url_https']
for m in media
if m['type'] == 'photo'
]
umsg = UMessage(
type=MessageType.Twitter,
id=id_,
author=author,
content=content,
monitor=author,
media_list=images,
source=f'https://twitter.com/{author}/status/{id_}'
)
related_urls = status.entities.get('urls') or []
related_id = [
_split_twitter_status_url(u['expanded_url'])
for u in related_urls
if 'expanded_url' in u
]
return WrappedMessage(umsg, related_id)
def _walk_status(api: tweepy.API, status: tweepy.models.Status, max_depth: int) -> Iterable[UMessage]:
q: Deque[Tuple[tweepy.models.Status, int]] = deque([(status, 0)])
while q:
status, depth = q.popleft()
msg = _get_message_from_status(status)
yield msg.msg
if depth < max_depth:
for r in msg.related_id:
if r is None:
continue
rs = api.get_status(r.id)
q.append((rs, depth + 1))
def get_twitter_medias(api: tweepy.API, db: UDB, username: str) -> Iterable[UMessage]:
tl = api.user_timeline(username)
for status in tl:
uid = f"{MessageType.Twitter.value}_{status.id}"
if db.data_exists(uid):
continue
yield from _walk_status(api, status, 2)
def _safe_get_twitter_medias(api: tweepy.API, db: UDB, username: str) -> Iterable[UMessage]:
try:
yield from get_twitter_medias(api, db, username)
except tweepy.error.TweepError as err:
print(f"Error on Twitter @ {username}: ", err)
def main():
with open('config.toml') as cf:
config = parse(cf)
api = start_authorization(config.twitter)
with UDB(config.redis) as db:
monitors = set(db.monitor_list(MessageType.Twitter))
for mu in monitors:
for msg in _safe_get_twitter_medias(api, db, mu):
if db.data_exists(msg.uid):
continue
if msg.author not in monitors:
r = db.relation_add(MessageType.Twitter, mu, msg.author, msg.id)
print(f"Rel Add: {mu} => {msg.author} [{r}]")
continue
retweet_user = _get_retweet_name(msg.content)
if retweet_user is not None:
r = db.relation_add(MessageType.Twitter, msg.author, retweet_user, msg.id)
print(f"Rel Add: {msg.author} => {retweet_user} [{r}]")
continue
if not msg.media_list:
continue
db.download_add(msg)
print(msg)
if __name__ == '__main__':
main()
| true |
781e14bcfdcea2e43ce8f912ab5f65cf183399b2 | Python | zhumu17/Movie_Recommender_System | /Models/RecentPopularModel.py | UTF-8 | 1,480 | 3.046875 | 3 | [] | no_license | # most popular model
# here it is a simple design: find the one with highest score with most of the users
import DatabaseQueries
import pandas as pd
import numpy as np
import logging
class RecentPopularModel(object):
# the year and number of rating thresholds can be modified inside DatabaseQueries SQL query
def __init__(self):
pass
def train(self):
df_ratings = DatabaseQueries.getRatings()
df_itemYear = DatabaseQueries.getItemFeature().loc[:,['itemId','Year']]
df_ratingsYear = pd.merge(df_ratings,df_itemYear, on='itemId')
df_ratingsYear = df_ratingsYear[df_ratingsYear.Year>2008]
# print(df_ratingsYear)
itemID = list(df_ratingsYear)[1]
ratings = list(df_ratingsYear)[2]
ratingsGrouped = df_ratingsYear.groupby(itemID)
ratingsGroupedCount = ratingsGrouped[ratings].count()
# print(ratingsGroupedCount)
itemRatingGroupedSorted = ratingsGrouped[ratings].mean()[ratingsGroupedCount > 15].sort_values(ascending=False)
# print(itemRatingGroupedSorted)
self.recentPopularList = itemRatingGroupedSorted.index.tolist()
# print(self.recentPopularList)
# unlike other models, most popular model doesn't need to predict, just directly recommend
def predict(self):
pass
# directly recommend with the item_id index, based on corresponding rating from high to low
def recommend(self):
return self.recentPopularList
| true |
87a8c44996e8bea7affbd93c8298bde46436dd17 | Python | JiGro/NLP_Wikipedia_Summarizer | /Summarizer.py | UTF-8 | 2,377 | 3.28125 | 3 | [] | no_license | import wikipedia
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize, sent_tokenize
def frequency_table(inputString):
stemmer = PorterStemmer()
words = word_tokenize(inputString)
stopWords = set(stopwords.words("english"))
filteredWords = [w for w in words if not w in stopWords]
freqTbl = dict()
for word in filteredWords:
word = stemmer.stem(word)
if word not in freqTbl:
freqTbl[word] = 1
else:
freqTbl[word] += 1
return freqTbl
def sentences_score(sentences, freqTbl):
sentenceVal = dict()
for sentence in sentences:
wordCount = (len(word_tokenize(sentence)))
for entry in freqTbl:
if entry in sentence.lower():
if sentence[:10] not in sentenceVal:
sentenceVal[sentence[:10]] = freqTbl[entry]
else:
sentenceVal[sentence[:10]] += freqTbl[entry]
#print(entry, sentence.lower(), sentence[:10], sentenceVal)
sentenceVal[sentence[:10]] = sentenceVal[sentence[:10]] // wordCount
return sentenceVal
def avg_score(sentenceVal):
sumValues = 0
for entry in sentenceVal:
sumValues += sentenceVal[entry]
avg = int(sumValues / len(sentenceVal))
return avg
def summary_generator(sentences, sentenceVal, threshold):
sentenceCount = 0
summary = ''
for sentence in sentences:
if sentence[:10] in sentenceVal and sentenceVal[sentence[:10]] > (threshold):
summary += " " + sentence
sentenceCount += 1
return summary
input_str = input('What summary from Wiki do you wish to receive?')
input_list = wikipedia.search(input_str)
print(input_list)
print('Which page do you want?')
response = ''
while response not in input_list:
response = input('Please copy from list above without quotes and paste here: ')
print('*** Summary: ' + str(response) + '***')
wiki_page = wikipedia.page(response)
print(wiki_page.title)
print(wiki_page.url)
wiki_content = wiki_page.content
freqTbl = frequency_table(wiki_content)
sentences = sent_tokenize(wiki_content)
sentenceScore = sentences_score(sentences, freqTbl)
avgScore = avg_score(sentenceScore)
threshold = 1.3
summary = summary_generator(sentences, sentenceScore, threshold * avgScore)
print(summary)
| true |
498ee90184b710e34183b7c97fa3b4203b66588f | Python | F286/FreeNet | /experimental/mnist.py | UTF-8 | 1,614 | 2.984375 | 3 | [] | no_license | import tensorflow as tf
sess = tf.Session()
from keras import backend as K
K.set_session(sess)
img = tf.placeholder(tf.float32, shape=(None, 784))
from keras.layers import Dense, Activation
def selu(x):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.nn.elu(x, alpha)
# Keras layers can be called on TensorFlow tensors:
x = Dense(128)(img)
# x = Activation(K.relu)(x)
# x = Activation(K.tanh)(x)
print(type(x))
x = selu(x)
print(type(x))
x = Dense(128)(x)
x = Activation(K.relu)(x)
preds = Dense(10, activation=K.softmax)(x)
labels = tf.placeholder(tf.float32, shape=(None, 10))
from keras.objectives import categorical_crossentropy
loss = tf.reduce_mean(categorical_crossentropy(labels, preds))
from tensorflow.examples.tutorials.mnist import input_data
mnist_data = input_data.read_data_sets('MNIST_data', one_hot=True)
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Initialize all variables
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Run training loop
print("Training..")
with sess.as_default():
for i in range(100):
batch = mnist_data.train.next_batch(50)
train_step.run(feed_dict={img: batch[0],
labels: batch[1]})
from keras.metrics import categorical_accuracy as accuracy
import numpy as np
print("Testing..")
acc_value = accuracy(labels, preds)
with sess.as_default():
t = acc_value.eval(feed_dict={img: mnist_data.test.images,
labels: mnist_data.test.labels})
print(np.mean(t) * 100)
| true |
5c7eaeadde7599e416f406208890373747a85dc7 | Python | barmi/CodingClub_python | /LV1/SourceCode/Chapter5/bounceBall.py | UTF-8 | 2,277 | 3.421875 | 3 | [] | no_license | # base source : https://stackoverflow.com/questions/29158220/tkinter-understanding-mainloop
# tkinter ref : http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/index.html
from tkinter import *
import math
canvas_width = 600
canvas_height = 400
root = Tk()
root.title = "Game"
#root.resizable(0,0)
#root.wm_attributes("-topmost", 1)
canvas = Canvas(root, width=canvas_width, height=canvas_height, bd=0, highlightthickness=0)
canvas.pack()
class Ball:
def __init__(self, canvas, color):
self.x = canvas_width / 2
self.y = canvas_height / 2
self.direction = 45.0
self.canvas = canvas
self.id = canvas.create_oval(-10, -10, 10, 10, fill=color)
self.canvas.move(self.id, self.x, self.y)
self.canvas.bind("<Button-1>", self.canvas_onclick)
self.text_id = self.canvas.create_text(300, 200, anchor='se')
self.canvas.itemconfig(self.text_id, text='hello')
def canvas_onclick(self, event):
self.canvas.itemconfig(
self.text_id,
text="You clicked at ({}, {})".format(event.x, event.y)
)
def move(self):
self.x = 5.0 * math.cos(math.radians(self.direction))
self.y = 5.0 * math.sin(math.radians(self.direction))
def draw(self):
self.move()
#(self.canvas.coords(self.id)[0] + self.canvas.coords(self.id)[2]) / 2
self.canvas.move(self.id, self.canvas.canvasx(self.x), self.canvas.canvasy(self.y))
pos = self.canvas.coords(self.id)
if pos[0] < 0:
if self.direction < 180:
self.direction -= 90
else:
self.direction += 90
if pos[1] < 0:
if self.direction > 270:
self.direction -= 90
else:
self.direction -= 90
if pos[2] > canvas_width:
if self.direction > 180:
self.direction -= 90
else:
self.direction += 90
if pos[3] > canvas_height:
if self.direction > 90:
self.direction += 90
else:
self.direction = 360 - self.direction
self.canvas.after(20, self.draw)
ball = Ball(canvas, "red")
ball.draw() #Changed per Bryan Oakley's comment.
root.mainloop()
| true |
52b00b8d3c9a6011c007d4846977afe60cc31261 | Python | Crystal-Lilith/PDbot | /cogs/dpy/ls.py | UTF-8 | 2,128 | 2.53125 | 3 | [] | no_license | @client.command(description='Shows you all files in the specified directory||<dir>')
@commands.has_any_role('PDBot Mod', 'PDBot Dev')
async def ls(ctx, *, directory='.'):
try:
x = ''
split_directory = directory.split('/')
for i in ['..']:
if i in split_directory:
embed = discord.Embed(title='Warning ❗', color=discord.Color.from_rgb(178, 34, 34),
description='You may not ls this directory!')
embed.set_footer(text=f'Attempted by: {ctx.message.author}', icon_url=ctx.author.avatar_url)
await ctx.channel.send(embed=embed)
return
for i in os.listdir(directory):
if i.lower() not in ['.env', 'start.sh']:
x = f"{x}{i}\n"
embed = discord.Embed(title=f'List of files and folders in `{directory}`', color=discord.Color.from_rgb(0, 191, 255),
description=f'```css\n{x}```')
embed.set_footer(text=f'Requested by: {ctx.message.author}', icon_url=ctx.author.avatar_url)
await ctx.channel.send(embed=embed)
except:
embed = discord.Embed(title='Error! ⚠️', color=discord.Color.from_rgb(255, 255, 51),
description='Directory doesn\'t exist!')
embed.set_footer(text=f'Attempted by: {ctx.message.author}', icon_url=ctx.author.avatar_url)
await ctx.channel.send(embed=embed)
@ls.error
async def ls_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
message = await ctx.channel.fetch_message(ctx.message.id)
user_cmd = message.content.split()[0].split('$')
cmd = get(client.commands, name=user_cmd[1])
desc = user_cmd[1]
syntax = cmd.description.split('||')[1]
embed = discord.Embed(title='Invalid Syntax! ⚠️', color=discord.Color.from_rgb(255, 255, 51),
description=f'${desc} ({syntax})')
embed.set_footer(text=f'Attempted by: {ctx.message.author}', icon_url=ctx.author.avatar_url)
await ctx.channel.send(embed=embed)
| true |
00568e1ed2ec4f5e623da2280573d9e1bc782c70 | Python | micdm/receipt-tracker | /receipt_tracker/tests/test_models.py | UTF-8 | 2,789 | 2.5625 | 3 | [] | no_license | import pytest
from pytest import fixture
from receipt_tracker.models import Product, ReceiptItem
pytestmark = pytest.mark.django_db
class TestSeller:
def test_str(self, seller):
result = str(seller)
assert result
def test_name_if_user_friendly_name_set(self, mocker, seller):
mocker.patch.object(seller, 'user_friendly_name')
result = seller.name
assert result
def test_name(self, mocker, seller):
mocker.patch.object(seller, 'user_friendly_name', None)
result = seller.name
assert result
class TestProduct:
@fixture
def another_product(self, mixer):
return mixer.blend(Product, user_fiendly_name='foo', barcode='123')
def test_str(self, product):
result = str(product)
assert result
def test_name_if_user_friendly_name_set(self, mocker, product):
mocker.patch.object(product, 'user_friendly_name')
result = product.name
assert result
def test_name_if_no_aliases(self, mocker, product):
mocker.patch.object(Product, 'aliases', mocker.PropertyMock(return_value=[]))
result = product.name
assert result
def test_details_if_food(self, product, food_product):
result = product.details
assert result == food_product
def test_details_if_non_food(self, product, non_food_product):
result = product.details
assert result == non_food_product
def test_details_if_unknown(self, product):
result = product.details
assert result is None
def test_copy_from_if_copied(self, product, another_product):
result = product.copy_from(another_product)
assert result
assert product.user_friendly_name == another_product.user_friendly_name
assert product.barcode == another_product.barcode
def test_copy_from_if_not_copied(self, product):
result = product.copy_from(product)
assert not result
class TestFoodProduct:
def test_str(self, food_product):
result = str(food_product)
assert result
class TestNonFoodProduct:
def test_str(self, non_food_product):
result = str(non_food_product)
assert result
class TestProductAlias:
def test_str(self, product_alias):
result = str(product_alias)
assert result
class TestReceipt:
def test_str(self, receipt):
result = str(receipt)
assert result
def test_calories(self, mocker, receipt, receipt_item):
mocker.patch.object(ReceiptItem, 'calories', mocker.PropertyMock(return_value=1))
result = receipt.calories
assert result == 1
class TestReceiptItem:
def test_str(self, receipt_item):
result = str(receipt_item)
assert result
| true |
454e9dc2363053f84d9534ed34ec71baf45c0041 | Python | retfings/PaddleGAN | /applications/DAIN/util.py | UTF-8 | 2,478 | 2.78125 | 3 | [] | no_license | import os, sys
import glob
import shutil
import cv2
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def combine_frames(input, interpolated, combined, num_frames):
frames1 = sorted(glob.glob(os.path.join(input, '*.png')))
frames2 = sorted(glob.glob(os.path.join(interpolated, '*.png')))
num1 = len(frames1)
num2 = len(frames2)
# assert (num1 - 1) * num_frames == num2
for i in range(num1):
src = frames1[i]
imgname = int(src.split('/')[-1].split('.')[-2])
assert i == imgname
dst = os.path.join(combined, '{:08d}.png'.format(i * (num_frames + 1)))
shutil.copy2(src, dst)
if i < num1 - 1:
try:
for k in range(num_frames):
src = frames2[i * num_frames + k]
dst = os.path.join(
combined,
'{:08d}.png'.format(i * (num_frames + 1) + k + 1))
shutil.copy2(src, dst)
except Exception as e:
print(e)
print(len(frames2), num_frames, i, k, i * num_frames + k)
def remove_duplicates(paths):
def dhash(image, hash_size=8):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
resized = cv2.resize(gray, (hash_size + 1, hash_size))
diff = resized[:, 1:] > resized[:, :-1]
return sum([2**i for (i, v) in enumerate(diff.flatten()) if v])
hashes = {}
image_paths = sorted(glob.glob(os.path.join(paths, '*.png')))
for image_path in image_paths:
image = cv2.imread(image_path)
h = dhash(image)
p = hashes.get(h, [])
p.append(image_path)
hashes[h] = p
for (h, hashed_paths) in hashes.items():
if len(hashed_paths) > 1:
for p in hashed_paths[1:]:
os.remove(p)
frames = sorted(glob.glob(os.path.join(paths, '*.png')))
for fid, frame in enumerate(frames):
new_name = '{:08d}'.format(fid) + '.png'
new_name = os.path.join(paths, new_name)
os.rename(frame, new_name)
frames = sorted(glob.glob(os.path.join(paths, '*.png')))
return frames
| true |
1f0234b238b9953acf531e09eefdebcf9b23b0ca | Python | IanGHill/Python_CRUD_cinema | /models/customer.py | UTF-8 | 2,143 | 2.875 | 3 | [] | no_license | import sys
sys.path.append('../db')
sys.path.append('../models')
from sqlrunner import *
from ticket import *
class Customer():
def __init__(self, name, funds):
self.id = 0
self.name = name
self.funds = funds
def save(self):
sql = """
INSERT INTO customers (name, funds)
VALUES (%s, %s)
RETURNING id;
"""
values = (self.name, self.funds)
result = Sqlrunner.run(sql, "fetchone", values)
self.id = result[0]
def all():
sql = "SELECT * FROM customers;"
rows = Sqlrunner.run(sql, "fetchall")
customer_array = []
for customer_row in rows:
fetched_customer = Customer(*customer_row[1:])
fetched_customer.id = customer_row[0]
customer_array.append(fetched_customer)
return customer_array
def find_by_name(name):
sql = "SELECT * from customers WHERE name=%s"
values = (name,)
customer_row = Sqlrunner.run(sql, "fetchone", values)
if customer_row is not None:
fetched_customer = Customer(*customer_row[1:])
fetched_customer.id = customer_row[0]
return fetched_customer
else:
return customer_row
def update(self):
sql = "UPDATE customers SET name=%s, funds=%s WHERE id=%s"
values = (self.name, self.funds, self.id)
Sqlrunner.run(sql, "", values)
def buy_ticket(self, film, screening):
if self.funds >= film.price:
if screening.tickets_available > 0:
self.funds -= film.price
screening.tickets_available -= 1
screening.tickets_sold += 1
self.update()
screening.update()
Ticket(film.id, screening.id, self.id).save()
else:
print("No tickets available")
else:
print("Not enough money")
def delete_all():
sql = "DELETE FROM CUSTOMERS"
SqlRunner.run(sql)
def delete_all():
sql = "DELETE FROM CUSTOMERS"
Sqlrunner.run(sql, "")
| true |
195cf34486ab1a8554730296d60e90d663a67ecc | Python | neon520/Python-for-dummies | /Programas varios/regularExpression with functions.py | UTF-8 | 877 | 3.75 | 4 | [] | no_license | #! /usr/bin/python3
import re
def caso1(str):
if re.match('\w+\s[A-Z]',str):
print (str+" cumple el caso 1 (palabra seguida de un espacio y una única letra mayúscula.")
return True
else:
return False
def caso2(str):
if re.match('\w+@(\w+)\.com|es',str):
print (str+" cumple el caso 2 (es un correo electrónico).")
return True
else:
return False
def caso3(str):
if re.match('(((\d{4}-){3})|((\d{4} ){3}))\d{4}',str):
print (str+" cumple el caso 3 (es una tarjeta de crédito).")
return True
else:
return False
def QueEsEsto(str):
if not caso1(str) and not caso2(str) and not caso3(str):
print (str+" no cumple ninguno de los casos.")
def main():
print ("Introduzca una cadena:")
str=input()
QueEsEsto(str)
#Ejecución del main
if __name__ == "__main__":
main() | true |
2096cd4848bb7abc79b9aedde943d1a021d737b9 | Python | DaniellFoncho/python-facturas | /ventas.py | UTF-8 | 3,160 | 3.03125 | 3 | [] | no_license | import libsistema as lbs
import libcrud as lbc
#menú para ventas
def submenu():
#variables tipo lista para archivos
vendedores = []
clientes = []
productos = []
detalles = []
ventas = []
#cargar los archivos llamando funciones
vendedores = lbc.cargar(vendedores,'vendedores.dat')
clientes = lbc.cargar(clientes,'clientes.dat')
productos = lbc.cargar(productos,'productos.dat')
#bucle para seleccionar las opciones
a = True
while a:
#llamado del menú que muestra las opciones
lbs.subMenu('VENTAS')
select = input()
if select == '6':
a = False
elif select == '1':
#Llama función que permite insertar ventas
venta = lbs.inserta_venta(vendedores,clientes,ventas)
lbs.mostrar_productos(productos)
suma_subtotales = 0
suma_iva = 0
suma_total = 0
while True:
#llama función que inserta detalles de la venta
detalle = lbs.inserta_detalle(productos,venta[0])
#agregar los detalles a la variable que pertenece
suma_subtotales = suma_subtotales + detalle[4]
suma_iva = suma_iva + detalle[5]
suma_total = suma_total + detalle[6]
#agregando lista a lista
detalles.append(detalle)
respuesta = input('agregar otro producto SI/NO: ')[:1]
if respuesta.upper() != 'S':
break
#detalles especificos al final de la venta para la hora de listar
venta.append(suma_subtotales)
venta.append(suma_iva)
venta.append(suma_total)
#agregando lista a lista
ventas.append(venta)
lbs.pausaEnter("verde","finalizado\n<ENTER>")
elif select == '2':
#listado de las ventas
lbs.listar_ventas(ventas)
#llamado a pausa
lbs.pausaEnter("verde","listado\n<ENTER>")
elif select == '3':
codigo = input('digita codigo de la factura: ')
#llamado a función que busca si el codigo anterior corresponde a alguna factura
i = lbs.buscar(ventas,codigo)
if i < 0:
print('N° DE FACTURA NO ENCONTRADO')
else:
venta = ventas[i]
#función que muestra la venta de acuerdo al codigo encontrado
lbs.mostrar_venta(venta)
print(130*'-')
#comparar el codigo encontrado con los codigos de los detalles
lbs.mostrar_detalles(codigo,detalles)
input()
elif select == '4':
if len(ventas) > 0:
codigo = input('CODIGO: ')
#llamado a función que busca el codigo dentro de la venta
i = lbs.buscar(ventas,codigo)
if i < 0:
print('CODIGO NO ENCONTRADO')
else:
venta = ventas[i]
#mostrar la venta
lbs.mostrar_venta(venta)
#anular la venta
lbs.anular(venta)
else:
lbs.pausaEnter('rojo','no hay ventas en la lista')
elif select == '5':
#guardar venta y detalles
lbc.guardar(ventas,'ventas.dat')
lbc.guardar(detalles,'detalles.dat')
lbs.pausaSegundos('verde','factura alamacenada',3) | true |
5095696417930a8f8d1235518d284e039dd60bf1 | Python | HamPUG/examples | /ncea_level2/snippets/snip_l2_01_a.py | UTF-8 | 1,030 | 3.375 | 3 | [] | no_license | # Import sys for retrieving the name of the program in list sys.argv[0]
import sys
# Import time an use the sleep() function to provide a delay in the program.
import time
_description_ = """
Import time and use sleep() for 5 seconds.
"""
_author_ = """Ian Stewart - December 2016
Hamilton Python User Group - https://hampug.pythonanywhere.com
CC0 https://creativecommons.org/publicdomain/zero/1.0/ """
# Program: snip_l2_01_a.py
print("Program {} has started...".format(sys.argv[0]))
print("Program will now sleep for 5 seconds...")
time.sleep(5) # Time in seconds
print("Program has finished sleeping.")
print("End of program")
input("Press Enter key to end program")
sys.exit()
"""
To check code style:
Linux...
$ python3 -m pep8 --statistic --ignore=E701 snip_l2_01_a.py
Install pep8 on Linux: $ sudo apt-get install python3-pep8
Windows...
> python -m pep8 --statistic --ignore=E701 snip_l2_01_a.py
Install pep8 on Windows: >pip3 install pep8
More information: https://www.python.org/dev/peps/pep-0008/
"""
| true |
fbce0a5487de5486fc37643128c9f11003b7f846 | Python | Stanford-PERTS/neptune | /app/model/notification.py | UTF-8 | 3,629 | 2.703125 | 3 | [
"JSON",
"Unlicense",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | """Notification: An alert to a user about some event, with sufficient context
to quickly navigate and take action."""
from collections import defaultdict
from google.appengine.ext import ndb
import json
import logging
from gae_models import Email, DatastoreModel
class Notification(DatastoreModel):
task_id = ndb.StringProperty()
# The related organization, project, or survey.
context_id = ndb.StringProperty()
subject = ndb.StringProperty()
body = ndb.TextProperty()
link = ndb.StringProperty()
dismissed = ndb.BooleanProperty(default=False)
autodismiss = ndb.BooleanProperty()
# Some notifications have nothing to view so they should have no view
# button, e.g. being rejected from an organization.
viewable = ndb.BooleanProperty(default=True)
# There may be an email created to go along with the notification if this
# entity was just instantiated via Notification.create(). If so, calling
# notification.put() will also put() the email (and thus put it in the
# queue for sending).
email = None
@classmethod
def create(klass, **kwargs):
if 'parent' not in kwargs:
raise Exception("Notifications must have a parent user.")
note = super(klass, klass).create(**kwargs)
note.email = note.create_email()
return note
@classmethod
def filter_redundant(klass, notifications):
"""Remove redundant notifications from a list.
Example redundant notifications:
* Joey updated task X to read "bla"
* Joey updated task X to read "blah"
...when both tasks haven't been dismissed.
"""
# Index notifications by their recipient so we can minimize entity
# group reads.
notes_by_parent = defaultdict(list)
for n in notifications:
notes_by_parent[DatastoreModel.get_parent_uid(n.uid)].append(n)
# Filter out redundant notifications from each user.
for parent_id, new_notes in notes_by_parent.items():
existing_notes = Notification.get(
ancestor=DatastoreModel.id_to_key(parent_id))
def not_redundant(new_note):
"""Does the new note match any existing, undismissed one?"""
return all([ex.task_id != new_note.task_id or ex.dismissed
for ex in existing_notes])
notes_by_parent[parent_id] = filter(not_redundant, new_notes)
# Collapse the lists for return.
return reduce(lambda x, y: x + y, notes_by_parent.values(), [])
@classmethod
def get_long_uid(klass, short_uid):
return super(klass, klass).get_long_uid(
short_uid, kinds=(klass.__name__, 'User'))
def create_email(self):
"""Create an email to match the content of the notification if user
has not set their notification preferences to disable emails.
"""
recipient = self.key.parent().get()
# The default is to send an email, unless the user has disabled
if recipient.notification_option.get('email', None) == 'no':
return None
return Email.create(
to_address=recipient.email,
subject=self.subject,
template='notification.html',
template_data={'subject': self.subject, 'body': self.body,
'link': self.link},
)
def after_put(self, *args, **kwargs):
"""If fresh from create(), saving a notification to the datastore
triggers sending the associated email."""
if self.email:
self.email.put()
| true |
0fb135b005d535c14f5e0099a3f95958116a3158 | Python | nvchung599/JerbBot | /trunk/basic_bot.py | UTF-8 | 12,305 | 2.65625 | 3 | [] | no_license | # PURPOSE:
# Bot parent class.
# Children bots scan/scrape their assigned websites, filtering jobs as they go.
# These jobs are reported back to the Agency.
from trunk.job import *
from trunk.general import *
import requests
from bs4 import BeautifulSoup
import abc
class BasicBot(metaclass=abc.ABCMeta):
# ------------------------------------------------------------------------------------------------------------------
# INITIALIZER
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, name, history):
self.name = name
if not os.path.exists("trunk/branch/" + str(self.name) + ".txt"):
self.initialize_base_url()
self.base_url = read_file("trunk/branch/" + self.name + ".txt")
self.current_url = self.base_url
self.current_soup = None
self.current_page_number = 1
self.history = history
self.jobs = []
self.job_index = 0
self.job_index_rejected = 0
self.need_to_terminate = False
self.initialize_search_settings()
# TODO TODO TODO TODO TODO TODO TODO TODO
# TODO TODO TODO TODO TODO TODO TODO TODO
self.bad_word_tolerance = int(file_to_set('trunk/filters/bad_word_tolerance.txt').pop()) # Body Text
self.good_word_tolerance = int(file_to_set('trunk/filters/good_word_tolerance.txt').pop()) # Body Text
self.min_years_exp = int(file_to_set('trunk/filters/min_years_exp.txt').pop())
self.min_str_len = int(file_to_set('trunk/filters/min_str_len.txt').pop())
self.page_limit = int(file_to_set('trunk/filters/page_limit.txt').pop())
# TODO TODO TODO TODO TODO TODO TODO TODO
# TODO TODO TODO TODO TODO TODO TODO TODO
self.initialize_filters()
self.essential_body = file_to_set('trunk/filters/essential_body.txt')
self.excluded_body = file_to_set('trunk/filters/excluded_body.txt')
self.excluded_title = file_to_set('trunk/filters/excluded_title.txt')
# ------------------------------------------------------------------------------------------------------------------
# NAV & SCAN
# ------------------------------------------------------------------------------------------------------------------
def scrape_all_pages(self):
"""navigate and scrape site until specified/unspecified page
limit or some other form of termination (hangups, errors)"""
# requests per page on indeed???
# make 'current_soup'
# reduce requests from min 4 per page to min 1 per page
# make initial soup before loop, seed loop with soup
r = requests.get(self.current_url)
self.current_soup = BeautifulSoup(r.content, "html.parser")
for i in range(self.page_limit):
try:
self.scrape_this_page(self.current_soup)
if not self.end_check(self.current_soup):
break
self.navigate_to_next_page(self.current_soup)
if self.need_to_terminate:
print(self.name + ' has encountered a problem (probably requests hangup) and is terminating early')
break
self.current_page_number += 1
except:
print('\n\nSCRAPE_ALL_PAGES HAS ENCOUNTERED AN EXCEPTION AND HAS BROKEN LOOP')
print('RETURNING JOB LIST AS IS\n\n')
break
else:
print('All lights green')
only_new_jobs = []
for each_job in self.jobs:
if each_job.rejection_identifier not in [5, 6]:
only_new_jobs.append(each_job)
return only_new_jobs
@abc.abstractmethod
def scrape_this_page(self, soup):
"""Parse the current URL and pass every job posting on the page through the function bullshit_filter()"""
@abc.abstractmethod
def navigate_to_next_page(self, soup):
"""Turns to the next page, overcoming any bot/scraping protection the site may have"""
@abc.abstractmethod
def end_check(self, soup):
"""Verifies that a next page exists"""
def tally(self):
"""This function should be called every time a job posting is processed"""
self.job_index += 1
print(self.name + ' processing job # ' + str(self.job_index))
def extract_job_details(self, job):
"""Given a job w/ URL defined... load the job with all applicable text belonging to that page"""
my_words = []
try:
r = requests.get(job.url)
except:
print('page loading error on job # %d -' % self.job_index)
print(job.url)
job.reject(4)
self.job_index_rejected += 1
return 'bad url'
soup = BeautifulSoup(r.content, 'html.parser')
target_tags = ['p', 'li', 'ul', 'span', 'br', 'font']
for tag in target_tags:
target_blocks = soup.find_all(tag)
for block in target_blocks:
append_this = get_words(block.get_text())
if len(append_this) >= self.min_str_len:
my_words += append_this
job.body = my_words
def filter_history(self, job):
"""Exclude duplicates of jobs that have already been saved to disk"""
if job in self.history:
print("historical duplicate on job # %d X" % self.job_index)
# print(job.url)
job.reject(6)
self.job_index_rejected += 1
return
def filter_title(self, job):
"""Reject jobs with at least 1 bad word"""
title_words = get_words(job.title)
for word in title_words:
if word in self.excluded_title:
print("bad title on job # %d X" % self.job_index)
# print(job.url)
job.reject(1)
self.job_index_rejected += 1
return
def filter_duplicate(self, job):
"""Exclude duplicates of jobs that have already been found during this search"""
if job in self.jobs:
print("duplicate posting on job # %d X" % self.job_index)
# print(job.url)
job.reject(5)
self.job_index_rejected += 1
return
def filter_body(self, job):
"""
Reject jobs based on:
-good word count/tolerance
-bad word count/tolerance
-particular strings
-whether any text was extracted at all
"""
# empty string indicates website has counter-scraping measures, implement delay in request to counteract
if len(job.body) == 0:
job.reject(4)
self.job_index_rejected += 1
print('text extraction error on job # %d -' % self.job_index)
print(job.url)
return
bad_word_count = 0
bad_words = []
good_word_count = 0
good_words = []
for i in range(len(job.body)):
# check for singular, DESIRED keywords. a specified number of desired keywords is required for approval
if job.body[i] in self.essential_body:
if job.body[i] not in good_words: # no dupes
good_words.append(job.body[i])
job.good_hits.append(job.body[i])
good_word_count += 1
# check for singular, FORBIDDEN keywords. a specified number of forbidden keywords is tolerated
if job.body[i] in self.excluded_body:
if job.body[i] not in bad_words:
bad_words.append(job.body[i])
bad_word_count += 1
# print('BAD WORD FOUND ' + body_words[i])
if bad_word_count > self.bad_word_tolerance:
job.reject(2)
self.job_index_rejected += 1
print("bad body text on job # %d X" % self.job_index)
print(bad_words)
print(job.url)
return
# check for characteristic "_ or more years of exp" line
try: # check if it's a number
if is_integer(job.body[i]):
my_int = int(job.body[i])
if my_int > self.min_years_exp:
for j in range(5): # scan the few words following the number
try:
if job.body[i+j] in ('experience', 'years'):
job.reject(3)
self.job_index_rejected += 1
print('bad exp req on job # %d X' % self.job_index)
# print(job.url)
return
except:
pass
except:
pass # if it's not a number
if good_word_count < self.good_word_tolerance:
job.reject(2)
self.job_index_rejected += 1
print("lack of good text on job # %d X" % self.job_index)
print(good_words)
print(job.url)
def bullshit_filter(self, title, company, url, city, date):
"""Does a full-spectrum-relevance check and records the job, relevant or not"""
this_job = Job(title, company, url, city, date)
self.filter_history(this_job)
if this_job.is_relevant:
self.filter_duplicate(this_job)
if this_job.is_relevant:
self.filter_title(this_job)
if this_job.is_relevant:
self.extract_job_details(this_job)
if this_job.is_relevant:
self.filter_body(this_job)
if this_job.is_relevant:
print('job # %d approved O' % self.job_index)
self.jobs.append(this_job)
# ------------------------------------------------------------------------------------------------------------------
# FILE & MEMORY MANAGEMENT
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def initialize_filters():
"""The filters are shared by all bots, and are stored in .txt files in trunk/filters"""
directory_name = 'trunk/filters'
essential_body = directory_name + "/essential_body.txt"
excluded_body = directory_name + "/excluded_body.txt"
excluded_title = directory_name + "/excluded_title.txt"
if not os.path.exists(directory_name):
create_folder(directory_name)
for file in (essential_body, excluded_body, excluded_title):
if not os.path.isfile(file):
print("Creating new file " + file)
write_file(file, "")
def initialize_search_settings(self):
directory_name = 'trunk/filters'
bad_word_tolerance_filename = directory_name + "/bad_word_tolerance.txt"
good_word_tolerance_filename = directory_name + "/good_word_tolerance.txt"
min_years_exp_filename = directory_name + "/min_years_exp.txt"
min_str_len_filename = directory_name + "/min_str_len.txt"
page_limit_filename = directory_name + "/page_limit.txt"
if not os.path.exists(directory_name):
create_folder(directory_name)
for file in (bad_word_tolerance_filename, good_word_tolerance_filename, min_years_exp_filename, min_str_len_filename, page_limit_filename):
if not os.path.isfile(file):
print("Creating new file " + file)
write_file(file, "0")
return 0
def initialize_base_url(self):
"""Base URLs are stored in .txt files alongside their respective bots in trunk/branch"""
path = "trunk/branch/" + str(self.name) + ".txt"
if not os.path.exists(path):
print("\n!! NEW BOT DETECTED !!\n")
print("Defining new basis for " + self.name)
else:
print("Updating basis for " + self.name)
print("Input base URL:")
input_url = input()
write_file(path, input_url)
| true |
a93cdcce54c0962c262506fda48da79fafe9b647 | Python | utamhank1/data_mining_algorithms | /data_splitting_rcv1.py | UTF-8 | 1,033 | 3.125 | 3 | [] | no_license | # This script takes the rcv1 dataset (www.jmlr.org/papers/volume5/lewis04a/lewis04a.pdf) of 800,000 categorized news
# stories and splits it into training and test data.
# Import libraries
import numpy as np
from sklearn.datasets import fetch_rcv1
def main():
# Fetch the rcv1 dataset from sklearn.
rcv1 = fetch_rcv1()
# Clean and reformat the dataset.
target = rcv1['target'].todense()
label = np.array(target[:, 33]).reshape(1, -1)[0]
label.dtype = 'int8'
label[label == 0] = -1
# Create numpy array of training data.
training_data = rcv1['data'][0:100000, :]
# Assign labels to training data.
training_label = label[0:100000]
test_data = rcv1['data'][100000:, :]
test_label = label[100000:]
# Save the training and test datasets to disk.
np.save('test_data_rcv1.npy', test_data)
np.save('test_label_rcv1', test_label)
np.save('training_data_rcv1', training_data)
np.save('training_label_rcv1', training_label)
if __name__ == '__main__':
main()
| true |
f6718a4bef102b0efc2d55fa623e75b697495bec | Python | Fromang/bigdata-1a | /programs/a.py | UTF-8 | 3,425 | 3.171875 | 3 | [] | no_license | from common import Traffic, AirportCounter, AirportTable
from _base import BaseTest
class BaseTestA(BaseTest):
def __init__(self, *args, **kwargs):
BaseTest.__init__(self, *args, **kwargs)
def test_process(self, traffic):
self.get_airport(traffic.arrival).increase_take_off()
self.get_airport(traffic.departure).increase_landing()
def get_airport(self, airport_name):
pass
def get_airport_table(self):
pass
def execute(self):
Traffic.read_file(self.filename, self.test_process)
def print_results(self):
table = self.get_airport_table()
print("Top ten airports:")
table.draw_top_ten()
print("Total airports: " + str(table.total_airports()))
class ListTest(BaseTestA):
def __init__(self, *args, **kwargs):
BaseTest.__init__(self, *args, **kwargs)
self.airports = list()
def get_airport(self, airport_name):
for airport in self.airports:
if airport.name == airport_name:
return airport
airport = AirportCounter(airport_name)
self.airports.append(airport)
return airport
def get_airport_table(self):
return AirportTable(self.airports)
ListTest.description = """List (without index)
This is the worst because each time has to find in all the stored airport counters."""
class SortedListTest(BaseTestA):
def __init__(self, *args, **kwargs):
BaseTest.__init__(self, *args, **kwargs)
self.airports = list()
def get_airport(self, airport_name):
(index, airport) = self.dichotomic_search(airport_name)
if airport is None:
airport = AirportCounter(airport_name)
self.airports.insert(index, airport)
return airport
def dichotomic_search(self, airport_name, first=0, last=None):
if last is None:
last = len(self.airports)
if last == 0:
return 0, None
index = (last - first) / 2 + first
airport = self.airports[index]
if airport.name == airport_name:
return index, airport
elif first == index:
if airport.name > airport_name:
return first + 1, None
else:
return first, None
elif airport.name > airport_name:
return self.dichotomic_search(airport_name, index, last)
elif airport.name < airport_name:
return self.dichotomic_search(airport_name, first, index)
def get_airport_table(self):
return AirportTable(self.airports)
SortedListTest.description = """Sorted list (equivalent to tree index)
Optimization for the list method.
The airports are sorted by name, then a dichotomic search can be used."""
class DictionaryTest(BaseTestA):
def __init__(self, *args, **kwargs):
BaseTest.__init__(self, *args, **kwargs)
self.airports = dict()
def get_airport(self, airport_name):
if airport_name in self.airports:
return self.airports[airport_name]
self.airports[airport_name] = AirportCounter(airport_name)
return self.airports[airport_name]
def get_airport_table(self):
return AirportTable(self.airports.values())
DictionaryTest.description = """Dictionary (equivalent to hash index)
This is the fastest method because its very easy to find elements using its index."""
| true |
cf28a4a6b2e61de42a2345cae81eb88ba0352853 | Python | V-Marco/dimreducers-crusher | /dimreducers_crusher/datasets/SwissRollDataset.py | UTF-8 | 505 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | import sklearn
import numpy as np
from AbstractDataset import AbstractDataset
from sklearn.datasets import make_swiss_roll
# shape is defined by args in get
# default is (1000,3)
class SwissRollDataset(AbstractDataset):
def __init__(self):
super().__init__()
def get(self,n_samples = 1000, noise = 0.1, **kwargs) -> np.ndarray:
try:
X, y = make_swiss_roll(n_samples = n_samples, noise = noise)
return X,y
except:
RuntimeError
@property
def is_sparse(self) -> bool:
return False
| true |
8d85f3a827210aff2864fbda3eee6960f3615099 | Python | malibustacy/tstoolbox | /tests/test_normalization.py | UTF-8 | 2,102 | 2.53125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_normalization
----------------------------------
Tests for `tstoolbox` module.
"""
import shlex
import subprocess
from unittest import TestCase
from pandas.util.testing import assert_frame_equal
import pandas as pd
from tstoolbox import tstoolbox
from tstoolbox import tsutils
class TestDescribe(TestCase):
def setUp(self):
self.data_0_to_1 = tstoolbox.read(
'tests/data_sunspot_normalized_0_to_1.csv')
self.data_10_to_20 = tstoolbox.read(
'tests/data_sunspot_normalized_10_to_20.csv')
self.data_zscore = tstoolbox.read(
'tests/data_sunspot_normalized_zscore.csv')
self.data_zscore = tsutils.memory_optimize(self.data_zscore)
self.data_pct_rank = tstoolbox.read(
'tests/data_sunspot_normalized_pct_rank.csv')
self.data_pct_rank = tsutils.memory_optimize(self.data_pct_rank)
def test_normalize_0_to_1(self):
''' Test the normalization API function from 0 to 1.
'''
out = tstoolbox.normalization(input_ts='tests/data_sunspot.csv')
assert_frame_equal(out, self.data_0_to_1)
def test_normalize_10_to_20(self):
''' Test the normalization API function from 10 to 20.
'''
out = tstoolbox.normalization(min_limit=10,
max_limit=20,
input_ts='tests/data_sunspot.csv')
assert_frame_equal(out, self.data_10_to_20)
def test_normalize(self):
''' Test the normalization API function using the zscore method.
'''
out = tstoolbox.normalization(mode='zscore',
input_ts='tests/data_sunspot.csv')
assert_frame_equal(out, self.data_zscore)
def test_pct_rank(self):
''' Test the normalization API function using the pct_rank method.
'''
out = tstoolbox.normalization(mode='pct_rank',
input_ts='tests/data_sunspot.csv')
assert_frame_equal(out, self.data_pct_rank)
| true |
b451997a985350a0cb525fb294eea8bd0a95ea9d | Python | Zemllia/bot-killer | /face_detect.py | UTF-8 | 641 | 2.546875 | 3 | [] | no_license | import cv2
import urllib.request
import numpy as np
def url_to_image(url):
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
def check_img(image):
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
image = url_to_image(image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
)
return len(faces)
| true |
08cd9d18b1382a7497b964b6201d0886e39e8280 | Python | niketagrawal/F3DASM | /f3dasm/abaqus/geometry/utils.py | UTF-8 | 2,030 | 3.453125 | 3 | [
"BSD-3-Clause"
] | permissive | '''
Created on 2020-03-24 15:01:13
Last modified on 2020-03-24 16:28:53
Python 3.7.3
v0.1
@author: L. F. Pereira (lfpereira@fe.up.pt)
Main goal
---------
Define functions used among the modules of the current subpackage.
'''
#%% imports
# third-party
import numpy as np
#%% geometry-related functions
def transform_point(point, orientation=0, rotation_axis=3,
origin_translation=(0., 0.), origin=(0., 0.),):
'''
Transform point given the orientation.
Parameters
----------
point : array-like, shape = [2] or [3]
orientation : float
Angle between x_min and x_min' (radians).
rotation_axis : int, possible values: 1, 2 , 3
Axis about the rotation is performed.
origin_translation : array-like, shape = [2]
Translation of the origin.
origin : array-like, shape = [2]
Origin of initial axis.
Returns
-------
transformed_point : array-like, shape = [2] or [3]
Transformed point.
'''
# initialization
m, n = np.cos(orientation), np.sin(orientation)
cx, cy = origin
d = len(point)
axes = (1, 2, 3)
if d == 2:
i, j = 0, 1
else:
i, j = [ii - 1 for ii in axes if ii != rotation_axis]
# computation
transformed_coordinates = (origin_translation[0] + cx + (point[i] - cx) * m - (point[j] - cy) * n,
origin_translation[1] + cy + (point[i] - cx) * n + (point[j] - cy) * m)
# transformed point
transformed_point = [None] * d
transformed_point[i] = transformed_coordinates[0]
transformed_point[j] = transformed_coordinates[1]
if d == 3:
transformed_point[rotation_axis - 1] = point[rotation_axis - 1]
return transformed_point
def get_orientations_360(orientation, addition_angle=np.pi / 2.):
orientations = [orientation]
stop_angle = 0.
while stop_angle < 2 * np.pi - addition_angle:
orientations.append(orientations[-1] + addition_angle)
stop_angle += addition_angle
return orientations
| true |
558eeeeedc166e6fb8ab9f8b3a0ca2f66d3783b0 | Python | warren511/210CT-coursework---Warren-Elliott | /Week 2 Question 3.py | UTF-8 | 2,801 | 3.703125 | 4 | [] | no_license | """
ADDITION(B,C)
for x range length of B //n
for i in range length of B[0] //n*n
addition_result1[x][i]<-ADD b[x][i] and c[x][i] //n*n
return addition_result //1
SCALE_BY_2(addition_result)
for x range length of addition_result //n
scaled_result[x][i]<-multiply addition_result[x][i] by 2 //n
return scaled_result //1
MULTIPICATION (B,C)
for x range length of B //n
for i in range length C[0] //n*n
for each loop range length of C //n*n*n
multiplication_result[x][i]<-multiply B[x][loop] and C[loop][i] //n*n*n
return multiplication_result //1
SUBTRACT(multiplication_result,scaled_result)
for x range length of multiplcation_result //n
for i in range (length multiplication result[0]) /n*n
final_result[x][i]<-SUBTRACT multiplication_result[x][i] and scaled_result[x][i] //n*n
return final_result //1
//A=B*C –2*(B+C) in order to compute this function, a bidmas approach is used;
//Lines 2,3,4,5,6: starting by adding the B and C matrix inside the equation. Add
by iterating each list representing a column and a row in the matrix;
//Lines 8,9,10,11: then scale the result obtained from the previous function by two;
//Lines 13,14,15,16,17:the same way the adding functon was implemented, a multiplication
function is added which will compute B*C;
//Lines 19,20,21,22: finally the last subtraction function follows the same structure
as addition and multiplication, where multiplication_result and scaled
result are added as arguments.
These are subtracted and the result is outputed in line 23.
RUNTIME BOUND:
n+(n*n)+(n*n)+1+n+n+1+n+(n*n)+(n*n*n)+(n*n*n)+1+n+(n*n)+(n*n)+1
2n^2+5n^2+5n+4
O(N^3) as nested loops are present.
//references:https:www.programiz.com/python-programming/examples/add-matrix
//www.programiz.com/python-programming/examples/multiply-matrix
"""
| true |
979dcd515e96c16f276bef0d6a9d25d0a96d7854 | Python | caiquanshi/fator_code | /classes.py | UTF-8 | 14,262 | 2.71875 | 3 | [] | no_license | import json
import sys
import sqlite3
import logging
class DAO:
"""A date access object super class """
def __init__(self, database):
self.connection = sqlite3.connect(database)
self.queryCursor = self.connection.cursor()
logging.basicConfig(level=logging.ERROR)
self.logger = logging.getLogger(__name__)
def disconnect(self):
"""Disconnect form data base"""
self.connection.close()
def commit(self):
"""Commit changes to date base """
self.connection.commit()
class CreateChannel(DAO):
""" A date access object class for create channl"""
def __init__(self, database):
DAO.__init__(self, database)
self.logger.debug('creating channel')
def create_channel(self, channel_id, channel_name):
"""Create a chanel table to date base"""
create_table_query = 'CREATE TABLE if not exists channels ' \
'(channel_id integer primary key, ' \
'channel_name text)'
insert_query = "INSERT INTO CHANNELS VALUES ({},'{}')". \
format(channel_id, channel_name)
try:
self.queryCursor.execute(create_table_query)
self.queryCursor.execute(insert_query)
except Exception as e:
self.logger.error('Failed to create channels table: ' + str(e))
self.disconnect()
return
self.commit()
def log_all_infomation_for_table(self, channel_id):
""" log all rows in table"""
channel_query = "select * from channels where channel_id = {}" \
.format(channel_id)
try:
rows = self.queryCursor.execute(channel_query)
except Exception as e:
self.logger.error('Failed to create channels table: ' + str(e))
self.disconnect()
return
for row in rows:
self.logger.info(row)
name = str(row[1])
return (row[0], name)
class ParseTopSpam(DAO):
""" A date access object class for parsing top spam"""
def __init__(self, database, file_name):
DAO.__init__(self, database)
self.logger.debug("generating summary for twitch chat log in {}"
.format(file_name))
def delete_original_spam_info(self,channel_id, stream_id):
"""Delete the original spam row for table"""
set_foreign_keys = 'PRAGMA foreign_keys = ON;'
create_table_query = 'create table if not exists top_spam' \
' (channel_id integer NOT NULL, ' \
'stream_id integer NOT NULL, spam_text string,' \
' spam_occurrences integer,' \
' spam_user_count integer,' \
' FOREIGN KEY(channel_id)' \
' REFERENCES channels(channel_id))'
delete_original_row = 'delete from top_spam where channel_id ' \
'= {} and stream_id = {}'. \
format(channel_id, stream_id)
try:
self.queryCursor.execute(set_foreign_keys)
self.queryCursor.execute(create_table_query)
self.queryCursor.execute(delete_original_row)
except Exception as e:
self.logger.error('Failed to set creat tabel: ' + str(e))
self.disconnect()
return
self.commit()
def insert_new_spam_row(self, info):
"""Insert new spam row to table"""
comment_count = info['comment_count']
channel_id = info['channel_id']
stream_id = info['stream_id']
comment_to_users = info['comment_to_users']
sorted_counts = sorted(comment_count.items(),
key=lambda kv: kv[1], reverse=True)
num_insertions = 0
for i, (comment, numComments) in enumerate(sorted_counts):
user_count = len(comment_to_users[comment])
self.logger.debug(i, comment, numComments, user_count)
# should probably parameterize "10"
if numComments > 10:
try:
self.queryCursor.execute('insert into top_spam values'
'(?,?,?,?,?)',
(channel_id, stream_id,
comment, numComments, user_count))
except Exception as e:
self.logger.error('Failed to insert spam row: ' + str(e))
self.disconnect()
return
num_insertions += 1
self.commit()
self.logger.info("inserted {} top spam records for stream {} "
"on channel {}".format(num_insertions, stream_id,
channel_id))
parse_info = "inserted {} top spam records for stream {} " \
"on channel {}".format(num_insertions, stream_id,
channel_id)
return parse_info
class GetTopSpam(DAO):
"""A data access object for get top spam"""
def __init__(self, database):
DAO.__init__(self, database)
self.logger.debug("getting top spam")
def get_spam(self, channel_id, stream_id):
"""Query top spam table and log it by json format"""
result = []
get_spam_query = "select * from top_spam where channel_id = {}" \
" and stream_id = {} order by spam_occurrences desc," \
" spam_user_count desc, spam_text". \
format(channel_id, stream_id)
try:
rows = self.queryCursor.execute(get_spam_query)
except Exception as e:
self.logger.error('Failed to get top spam: ' + str(e))
self.disconnect()
return
for row in rows:
result.append({"spam_text": row[2],
"occurrences": row[3], "user_count": row[4]})
self.logger.info(json.dumps(result, sort_keys=True))
return json.dumps(result, sort_keys=True)
class GetTopSpam2(DAO):
"""A data access object for get top spam"""
def __init__(self, database):
DAO.__init__(self, database)
self.logger.debug("getting top spam")
def get_spam(self, channel_id, stream_id):
"""Query top spam table and log it by json format"""
result = []
comment_count = {}
comment_to_users = {}
get_spam_query = "select * from chat_log where channel_id = {}" \
" and stream_id = {} ". \
format(channel_id, stream_id)
try:
rows = self.queryCursor.execute(get_spam_query)
except Exception as e:
self.logger.error('Failed to get top spam: ' + str(e))
self.disconnect()
return
for row in rows:
user = row[3]
message_body = row[2]
logging.debug(message_body)
if message_body in comment_count:
comment_count[message_body] += 1
else:
comment_count[message_body] = 1
if message_body in comment_to_users:
comment_to_users[message_body].add(user)
else:
comment_to_users[message_body] = set()
comment_to_users[message_body].add(user)
for comment in comment_count:
if comment_count[comment] > 10:
result.append({"occurrences": comment_count[comment], "spam_text": comment,
"user_count": len(comment_to_users[comment])})
# the result didn't order by spam_occurrences desc, spam_user_count desc, spam_text
result = sorted(result, key=lambda i: i["spam_text"])
result = sorted(result, key=lambda i: (i["occurrences"], i["user_count"]), reverse=True)
return json.dumps(result, sort_keys=True)
class StoreChatLog(DAO):
"""A data access object for storing chat log"""
def __init__(self, database):
DAO.__init__(self, database)
self.channel_id = None
self.stream_id = None
self.comments = None
self.logger.debug("storing raw logs in table")
def create_table(self, info):
channel_id = info['channel_id']
stream_id = info['stream_id']
create_table_query = 'create table if not exists chat_log ' \
'(channel_id integer NOT NULL, ' \
'stream_id integer NOT NULL, text string,' \
' user string, chat_time datetime, offset int, ' \
'FOREIGN KEY(channel_id)' \
' REFERENCES channels(channel_id))'
delete_row_query = \
'delete from chat_log where channel_id ={} and stream_id ={}' \
.format(channel_id, stream_id)
try:
self.queryCursor.execute(create_table_query)
self.queryCursor.execute(delete_row_query)
except Exception as e:
self.logger.error('Failed to create chat log table: ' + str(e))
self.disconnect()
return
self.commit()
def insert_log(self, info):
channel_id = info['channel_id']
stream_id = info['stream_id']
comments = info['comments']
for comment in comments:
try:
self.queryCursor.execute("insert into chat_log VALUES"
" (?,?,?,?,?,?)",
(channel_id, stream_id,
comment["message"]["body"],
comment["commenter"]["display_name"],
comment["created_at"],
comment["content_offset_seconds"]))
except Exception as e:
self.logger.error('Failed to insert chat log: ' + str(e))
self.disconnect()
return
self.commit()
insert_info = "inserted {} records to chat log for " \
"stream {} on channel {}" \
.format(len(comments), stream_id, channel_id)
return insert_info
class QueryChatLog(DAO):
"""A data access object for querying chat log """
def __init__(self, database):
DAO.__init__(self, database)
self.query_string = "select * from chat_log "
self.logger.debug("query chat log")
def make_query_string(self, filter_string):
"""Preparing query string by given filters in args"""
if len(filter_string) > 0:
self.query_string += "where "
for filter in filter_string:
# first element be column name, second be operator, last be the variable
parameters = filter.split(' ')
self.query_string += parameters[0]
self.convert_oprater(parameters[1])
self.str_variable(parameters[2], parameters[0])
if len(filter_string) > 0:
self.query_string = self.query_string[:-4]
self.query_string += " order by chat_time"
def convert_oprater(self, operator):
convert_dict = {"eq": " = ", "gt": " > ", "lt":" < ", "gteq":" >= ", "lteq": " <= ", "like":" like "}
self.query_string += convert_dict[operator]
def str_variable(self, condition_variable, column):
if column in ['text', 'user']:
self.query_string += "'" + condition_variable + "' AND "
else:
self.query_string += condition_variable + ' AND '
def do_query(self):
"""executing prepared query string"""
out = []
try:
rows = self.queryCursor.execute(self.query_string)
except Exception as e:
self.logger.error('Failed to query chat log table: ' + str(e))
self.disconnect()
return
names = []
for d in rows.description:
names.append(d[0])
for row in rows:
self.logger.debug(row)
json_formatted_row = {}
for i in range(len(names)):
json_formatted_row[names[i]] = row[i]
out.append(json_formatted_row)
self.logger.info(json.dumps(out, sort_keys=True))
return json.dumps(out, sort_keys=True)
class ViewershipMetrics(DAO):
"""A data access object for querying chat log """
def __init__(self, database):
DAO.__init__(self, database)
def query_viewers(self, channel_id, stream_id):
chat_logs = []
get_chat_log_query = "select * from chat_log where channel_id = {}" \
" and stream_id = {} order by chat_time" \
.format(channel_id, stream_id)
try:
rows = self.queryCursor.execute(get_chat_log_query)
except Exception as e:
self.logger.error('Failed to get top spam: ' + str(e))
self.disconnect()
return
for row in rows:
chat_logs.append(row)
return chat_logs
def make_viewer_data(self, chat_logs, channel_id, stream_id):
if len(chat_logs) == 0:
self.logger.info('no data')
return json.dumps([], sort_keys=True)
offset_dict = {}
start_time = chat_logs[0][4]
for row in chat_logs:
user = row[3]
offset = (row[5] // 60) + 1
if offset in offset_dict:
offset_dict[offset]['messages'] += 1
offset_dict[offset]['viewers'].add(user)
else:
offset_dict[offset] = {'messages': 1, 'viewers': set()}
offset_dict[offset]['viewers'].add(user)
result = {"channel_id": channel_id, 'stream_id': stream_id,
"starttime": start_time, "per_minute": []}
for offset in offset_dict:
per_minute_tamp = {"offset": offset, "viewers": len(offset_dict[offset]["viewers"]),
"messages": offset_dict[offset]["messages"]}
result["per_minute"].append(per_minute_tamp)
return json.dumps([result], sort_keys=True)
| true |
f8ba58c0b6365e47ea0104794b6e7770cee68791 | Python | thariqkhalid/perception4all | /classification/networks/inception.py | UTF-8 | 5,451 | 2.8125 | 3 | [] | no_license | import torch.nn.functional as F
from torch import nn
import torch
# we'll put the input to the inception block as the first value of the list
inception_3a = [192, 64, 96, 128, 16, 32, 32]
inception_3b = [256, 128, 128, 192, 32, 96, 64]
inception_4a = [480, 192, 96, 208, 16, 48, 64]
inception_4b = [512, 160, 112, 224, 24, 64, 64]
inception_4c = [512, 128, 128, 256, 24, 64, 64]
inception_4d = [512, 112, 144, 288, 32, 64, 64]
inception_4e = [528, 256, 160, 320, 32, 128, 128]
inception_5a = [832, 256, 160, 320, 32, 128, 128]
inception_5b = [832, 384, 192, 384, 48, 128, 128]
#Aux blocks
inception_4b_aux = [512,512]
inception_4e_aux = [528,832]
class inception_module(nn.Module):
def __init__(self, inception_block):
super(inception_module , self).__init__( )
self.conv1 = nn.Conv2d(in_channels=inception_block[0], out_channels=inception_block[1], kernel_size=(1,1), stride=(1,1))
self.conv3_r = nn.Conv2d(in_channels=inception_block[0], out_channels=inception_block[2], kernel_size=(1,1), stride=(1,1),padding=(1,1))
self.conv3 = nn.Conv2d(in_channels=inception_block[2],out_channels= inception_block[3], kernel_size=(3,3),stride=(1,1))
self.conv5_r = nn.Conv2d(in_channels=inception_block[0], out_channels=inception_block[4],kernel_size=(1,1), stride=(1,1),padding=(1,1))
self.conv5 = nn.Conv2d(in_channels=inception_block[4], out_channels=inception_block[5],kernel_size=(5,5), stride=(1,1),padding=(1,1))
self.pool = nn.MaxPool2d(kernel_size=(3,3), stride=(1,1),ceil_mode=True)
self.conv1_m = nn.Conv2d(in_channels=inception_block[0], out_channels=inception_block[6], kernel_size=(1,1), stride=(1,1),padding=(1,1))
def forward(self, x):
x1 = F.relu(self.conv1(x))
x2 = F.relu(self.conv3(F.relu(self.conv3_r(x))))
x3 = F.relu(self.conv5(F.relu(self.conv5_r(x))))
x4 = F.relu(self.conv1_m(self.pool(x)))
x_final = torch.cat((x1, x2, x3, x4),dim=1)
return x_final
"""
• An average pooling layer with 5×5 filter size and stride 3, resulting in an 4×4×512 output
for the (4a), and 4×4×528 for the (4d) stage.
• A 1×1 convolution with 128 filter for dimension reduction and rectified linear activation.
• A fully connected layer with 1024 units and rectified linear activation.
• A dropout layer with 70% ratio of dropped outputs.
• A linear layer with softmax loss as the classifier (predicting the same 1000 classes as the main classifier, but removed at inference time).
"""
class inceptionAux_module(nn.Module):
def __init__ ( self , inceptionAux_block ) :
super(inceptionAux_module , self).__init__( )
self.conv = nn.Conv2d(in_channels=inceptionAux_block[0],out_channels=inceptionAux_block[1], kernel_size=(1,1), stride=(1,1))
self.out_channels = inceptionAux_block[1]*4*4
self.avgPool = nn.AvgPool2d(kernel_size = (5,5), stride= (3,3), ceil_mode=True)
self.fc1 = nn.Linear(in_features = inceptionAux_block[1]*4*4 , out_features=1024)
self.fc2 = nn.Linear(in_features = 1024, out_features=10)
self.dropout = nn.Dropout(0.70)
def forward( self, x):
x = self.avgPool(x)
x = F.relu(self.conv(x))
x = x.view(-1, self.out_channels)
x = self.dropout(F.relu(self.fc1(x)))
x = F.softmax(self.fc2(x), dim=0)
return x
class InceptionNet(nn.Module):
def __init__ ( self ) :
super(InceptionNet, self).__init__( )
self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=(7,7), stride=(2,2), padding=(3,3))
self.pool = nn.MaxPool2d(kernel_size=(3,3),stride=(2,2), ceil_mode=True)
self.conv2 = nn.Conv2d(in_channels=64 , out_channels=64, kernel_size=(1,1),stride=(1,1)) # padding = 0
self.conv3 = nn.Conv2d(in_channels=64, out_channels=192, kernel_size=(3,3), stride=(1,1), padding=(1,1))
self.module_3a = inception_module(inception_3a)
self.module_3b = inception_module(inception_3b)
self.module_4a = inception_module(inception_4a)
self.module_4b = inception_module(inception_4b)
self.module_4b_aux = inceptionAux_module(inception_4b_aux)
self.module_4c = inception_module(inception_4c)
self.module_4d=inception_module(inception_4d)
self.module_4e=inception_module(inception_4e)
self.module_4e_aux=inceptionAux_module(inception_4e_aux)
self.module_5a = inception_module(inception_5a)
self.module_5b = inception_module(inception_5b)
self.avgPool = nn.AvgPool2d(kernel_size=(7,7), stride=(2,2), ceil_mode=True)
self.dropout = nn.Dropout(0.40)
self.fc = nn.Linear(in_features = 1024 * 1 * 1, out_features= 10)
def forward (self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv3(F.relu(self.conv2(x)))))
x = self.module_3a(x)
x = self.module_3b(x)
x = self.pool(x)
x = self.module_4a(x)
x1 = x
x = self.module_4b(x)
x1 = self.module_4b_aux(x1)
x = self.module_4c(x)
x = self.module_4d(x)
x2 = x
x = self.module_4e(x)
x2 = self.module_4e_aux(x2)
x = self.pool(x)
x = self.module_5a(x)
x = self.module_5b(x)
x = self.avgPool(x)
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = F.softmax(self.fc(x), dim=0)
return x,x1,x2
| true |
058b229e325a7795f80e5fde71e1b101b7ff6296 | Python | tabriz83/WriteUps | /CTF-2018/YISF/exploit.py | UTF-8 | 2,265 | 2.65625 | 3 | [
"MIT"
] | permissive | # -*- coding: cp949 -*-
from pwn import *
r = remote('112.166.114.144', 35122)
print r.recvuntil('[>]')
# 1. Yisf lotto
# 2. Yisf bank
# 3. explanation
# 4. EXIT
# [>]
r.sendline('2')
print r.recvuntil('[>]')
# Yisf Bank
# 1. Loan
# 2. Repayment
# 3. Bank account
# 4. Previous Screen
# [>]
r.sendline('2')
print r.recvuntil('[>]')
# [Before]
# your money: 10000won
# your loan money: 0won
# Hello ~ I came to pay the money back.
# [>]
r.sendline('-2100000000')
print r.recv()
print r.recv()
# [After]
# your money: 2100010000won
# your loan money: 2100000000won
r.sendline()
print r.recvuntil('[>]')
# Yisf Bank
# 1. Loan
# 2. Repayment
# 3. Bank account
# 4. Previous Screen
# [>]
r.sendline('3')
print r.recvuntil('[>]')
# 1. Opening an Account
# 2. Deposit
# 3. Withdraw
# 4. Delete
# 5. EXIT
# [>]
r.sendline('1')
print r.recvuntil(']') # [Name(7)]
r.sendline('asdf'*2)
print r.recvuntil(']') # [Password(4)]
r.sendline('1234')
print r.recvuntil(']') # [Member Number(1)]
r.sendline('1')
print r.recvuntil('Thank you~!')
# __ __
# ____ ____ ____ ________________ _/ |_ ______ _/ |_ ____ ___.__. ____ __ __
# _/ ___\/ _ \ / \ / ___\_ __ \__ \\ __\/ ___/ \ __\/ _ \ < | |/ _ \| | \
# \ \__( <_> ) | \/ /_/ > | \// __ \| | \___ \ | | ( <_> ) \___ ( <_> ) | /
# \___ >____/|___| /\___ /|__| (____ /__| /____ > |__| \____/ / ____|\____/|____/
# \/ \//_____/ \/ \/ \/
#
# Nice meet you~! asdfasd
# You are Password is 1234
# You are Membership Number is 1
# Thank you~!
r.sendline()
print r.recvuntil('[>]')
# 1. Opening an Account
# 2. Deposit
# 3. Withdraw
# 4. Delete
# 5. EXIT
# [>]
r.sendline('4')
print r.recvuntil('[>]')
# Please enter your password to delete your membership.
# [>]
r.sendline('1234')
print r.recvuntil('[>]')
# Oh~ I am sad
# See you again asdfasd
# Sign for deletion~
# [>]
r.sendline('%x '*16)
print r.recvuntil('Thank you~ bye')
# your sign is ffca9210 33323104 78250034 20782520 25207825 78252078 20782520 25207825 78252078 20782520 25207825 78252078 20782520 25207825 a2078 26d5ed00
# Delete Complete!
# Thank you~ bye
| true |
25df0398a769191503f0d314ee67c93fdb743a9c | Python | mariomech/mosaic-aca_aflux_data_processing | /radar/scripts/utils/in_out/write_helpers/write_ancillary_data.py | UTF-8 | 1,418 | 2.625 | 3 | [] | no_license | #!/usr/bin/python
def main(fid, data, setup):
"""Write position, attitude, velocity of the sensor."""
write_dropsonde_data(fid, data, setup)
write_beam_width(fid, data, setup)
def write_dropsonde_data(fid, data, setup):
varnames = (
'v_dropsonde_x', 'v_dropsonde_y',
'wind_speed_dropsonde', 'wind_to_direction_dropsonde',
'v_dropsonde_x_unc', 'v_dropsonde_y_unc',
'wind_speed_dropsonde_unc', 'wind_to_direction_dropsonde_unc',
'time_since_last_dropsonde', 'time_till_next_dropsonde',
)
for varname in varnames:
if varname not in data:
continue
long_name = varname
dimensions = ('time', setup['space_dim'])
units = 'm s-1'
comments = []
# *_unc
if varname[-4:] == 'unc':
long_name = 'uncertainty_of_' + varname[:4]
# time
if varname.startswith('time'):
units = 's'
dimensions = ('time',)
vid = fid.createVariable(varname, 'f', dimensions)
vid.long_name = long_name
vid.units = units
vid[:] = data[varname]
def write_beam_width(fid, data, setup):
"""Add beam width."""
varname = 'beam_fwhm'
if varname in data:
vid = fid.createVariable(varname, 'f', ())
vid.long_name = 'beam_full_width_at_half_maximum'
vid.units = 'degree'
vid[:] = data[varname]
| true |
7d76be377fa9c044953d8c06ee12effc66f49501 | Python | sumitpatra6/leetcode | /backspace_string.py | UTF-8 | 389 | 3.078125 | 3 | [] | no_license | class Solution:
def backSpaceCompare(self,S,T):
+
sol= Solution()
s = 'xywrrmp'
t = 'xywrrmu#p'
print(True)
print(sol.backSpaceCompare(s,t))
"""s = 'ab##'
t = 'c#d#'
print(True)
print(sol.backSpaceCompare(s,t))
s = 'a##c'
t = '#a#c'
print(True)
print(sol.backSpaceCompare(s,t))
s = 'a#c'
t = 'b'
print(False)
print(sol.backSpaceCompare(s,t))
""" | true |
891b02e0ee614939f87817dc11971bc26aa1f9db | Python | MartinR2295/University-Exercises-Artificial-Intelligence | /traveling_salesman/src/node.py | UTF-8 | 627 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""
Node Class
This is one single node in the graph
"""
class Node(object):
def __init__(self, x, y, name, color, size=50):
self.x = x
self.y = y
self.name = name
self.color = color
self.size = size
# calculate the distance to another node with the manhatten distance
def manhattan_distance_to(self, other):
return abs(self.x-other.x)+abs(self.y-other.y)
# calculate the distance to another node with the pythagorean distance
def pythagorean_distance_to(self, other):
return ((self.y-other.y)**2+(self.x-other.x)**2)**0.5
| true |
fd09333d11e27a068f4f0eddc9aae64842ac97d4 | Python | ColinMcCullough/ColinMcCullough.github.io | /cs260/data_structures/ch_six_hw/test_binheap.py | UTF-8 | 5,884 | 3.234375 | 3 | [] | no_license | from binheap import *
import unittest
class BinaryHeapTests(unittest.TestCase):
def setUp(self):
self.minbst = BinHeap(True,10)
self.maxbst = BinHeap(False,10)
def test_properties(self):
self.assertEqual(self.minbst.heapList,[0])
self.assertEqual(self.maxbst.heapList,[0])
self.assertEqual(self.minbst.current_size,0)
self.assertEqual(self.maxbst.current_size,0)
self.assertEqual(self.minbst.isminheap,True)
self.assertEqual(self.maxbst.isminheap,False)
def test_insert_min_heap(self):
self.minbst.insert(1)
self.assertEqual(self.minbst.heapList,[0,1])
self.assertEqual(self.minbst.current_size,1)
self.minbst.insert(20)
self.minbst.insert(10)
self.minbst.insert(61)
self.minbst.insert(2)
self.minbst.insert(43)
self.minbst.insert(68)
self.minbst.insert(23)
self.minbst.insert(14)
self.minbst.insert(66)
self.assertEqual(self.minbst.heapList,[0, 1, 2, 10, 14, 20, 43, 68, 61, 23, 66])
self.assertEqual(self.minbst.current_size,10)
self.minbst.insert(15)
#maxed out size and drops least important value
self.assertEqual(self.minbst.current_size,10)
self.assertEqual(self.minbst.heapList,[0, 1, 2, 10, 14, 20, 43, 15, 61, 23, 66])
def test_insert_max_heap(self):
self.maxbst.insert(1)
self.assertEqual(self.maxbst.heapList,[0,1])
self.assertEqual(self.maxbst.current_size,1)
self.maxbst.insert(20)
self.maxbst.insert(10)
self.maxbst.insert(61)
self.maxbst.insert(2)
self.maxbst.insert(43)
self.maxbst.insert(68)
self.maxbst.insert(23)
self.maxbst.insert(14)
self.maxbst.insert(66)
self.assertEqual(self.maxbst.heapList,[0, 68, 66, 61, 20, 23, 10, 43, 1, 14, 2])
self.assertEqual(self.maxbst.current_size,10)
self.maxbst.insert(15)
#maxed out size and drops least important value
self.assertEqual(self.maxbst.current_size,10)
self.assertEqual(self.maxbst.heapList,[0, 68, 66, 61, 20, 23, 10, 43, 15, 14, 2])
def test_findlowpriorityindx(self):
self.minbst.build_heap([ 1, 2, 10, 14, 20, 43, 68, 61, 23, 66])
self.assertEqual(self.minbst.findlowpriorityindx(self.minbst.current_size // 2 + 1),7)
self.maxbst.build_heap([ 68, 66, 61, 20, 23, 10, 43, 1, 14, 2])
self.assertEqual(self.minbst.findlowpriorityindx(self.minbst.current_size // 2 + 1),7)
def test_build_min_heap(self):
self.minbst.build_heap([11, 46, 78, 64, 17, 74, 95, 16, 28, 6, 75])
self.assertEqual(self.minbst.heapList,[0, 6, 11, 74, 16, 17, 78, 75, 64, 28, 46])
self.minbst.heapList = [0]
self.minbst.build_heap([11, 46, 78, 64, 17, 74, 95, 16, 28, 6, 75, 44, 31, 13, 2, 5])
self.assertEqual(self.minbst.heapList,[0, 2, 5, 13, 11, 6, 31, 44, 16, 28, 17])
def test_build_max_heap(self):
self.maxbst.build_heap([11, 46, 78, 64, 17, 74, 95, 16, 28, 6, 75])
self.assertEqual(self.maxbst.heapList,[0, 95, 75, 78, 46, 64, 74, 11, 16, 28, 17])
self.maxbst.heapList = [0]
self.maxbst.build_heap([11, 46, 78, 64, 17, 74, 95, 16, 28, 6, 75, 104, 222, 104, 111])
self.assertEqual(self.maxbst.heapList,[0, 222, 111, 95, 104, 104, 74, 78, 46, 75, 64])
def test_perc_down_min_heap(self):
self.minbst.build_heap([20,10,23,43,53,12,54,13,64])
self.minbst.heapList[1] = 30
self.minbst.perc_down(1,9)
self.assertEqual(self.minbst.heapList,[0, 12, 13, 23, 20, 53, 30, 54, 43, 64])
def test_perc_down_max_heap(self):
self.maxbst.build_heap([20,10,23,43,53,12,54,13,64])
self.maxbst.heapList[1] = 30
self.maxbst.perc_down(1,9)
self.assertEqual(self.maxbst.heapList,[0, 54, 53, 30, 43, 20, 12, 23, 13, 10])
def test_perc_up_min_heap(self):
self.minbst.build_heap([20,10,23,43,53,12,54,13,64])
self.minbst.heapList.append(5)
self.minbst.current_size += 1
self.minbst.perc_up(self.minbst.current_size)
self.assertEqual(self.minbst.heapList,[0, 5, 10, 12, 20, 13, 23, 54, 43, 64, 53])
def test_perc_up_max_heap(self):
self.maxbst.build_heap([20,10,23,43,53,12,54,13,64])
self.maxbst.heapList.append(25)
self.maxbst.current_size += 1
self.maxbst.perc_up(self.maxbst.current_size)
self.assertEqual(self.maxbst.heapList,[0, 64, 53, 54, 43, 25, 12, 23, 13, 10, 20])
def test_min_or_max_child(self):
self.minbst.build_heap([20,10,23,43,53,12,54,13,64])
self.maxbst.build_heap([20,10,23,43,53,12,54,13,64])
self.assertEqual(self.minbst.min_or_max_child(1,self.minbst.current_size),3)
self.assertEqual(self.maxbst.min_or_max_child(1,self.maxbst.current_size),3)
def test_min_heap_delRoott(self):
self.minbst.build_heap([20,10,23,43,53,12,54,13,64])
x = self.minbst.delRoot()
self.assertEqual(x,10)
self.assertEqual(self.minbst.heapList,[0, 12, 13, 23, 20, 53, 64, 54, 43])
def test_min_heap_delRoott(self):
self.maxbst.build_heap([20,10,23,43,53,12,54,13,64])
x = self.maxbst.delRoot()
self.assertEqual(x,64)
self.assertEqual(self.maxbst.heapList,[0, 54, 53, 23, 43, 20, 12, 10, 13])
def test_sort_min_heap(self):
self.minbst.build_heap([20,10,23,43,53,12,54,13,64])
self.minbst.sort()
self.assertEqual(self.minbst.heapList,[0, 64, 54, 53, 43, 23, 20, 13, 12, 10])
def test_sort_max_heap(self):
self.maxbst.build_heap([20,10,23,43,53,12,54,13,64])
self.maxbst.sort()
self.assertEqual(self.maxbst.heapList,[0, 10, 12, 13, 20, 23, 43, 53, 54, 64])
if __name__ == '__main__':
unittest.main() | true |
bf99e0549f4ef0954a23c165483b93bbcaa450af | Python | SerMeliodas/space_Shooterw | /space_shooter/game_objects.py | UTF-8 | 7,184 | 2.875 | 3 | [] | no_license | import pygame
import random
from settings import *
#============Bullet===============
class Bullet(pygame.sprite.Sprite):
def __init__(self,position):
super(Bullet,self).__init__()
self.image = pygame.image.load('assets/bullet.png')
self.rect = self.image.get_rect()
self.rect.midbottom = position
def update(self):
self.rect.move_ip(0,-(SPEED + 3))
#=============ENEMY==============
class Enemy(pygame.sprite.Sprite):
spawn_couldown = 1000
current_couldown = 0
def __init__(self,bullets,enemys,player,score):
super(Enemy,self).__init__()
self.image = pygame.image.load('assets/enemy.png')
self.image = pygame.transform.scale(self.image,(self.image.get_width()*2,
self.image.get_height()*2))
self.rect = self.image.get_rect()
self.rect.centerx = random.randint(round(self.image.get_width() / 2),
WIDTH - round(self.image.get_width() / 2))
self.rect.y = random.randint(-50,-10)
self.bullets = bullets
self.enemys = enemys
self.score = score
self.player = player
self.health_point = 2
def update(self):
self.rect.move_ip(0,SPEED-2)
self.bullet_enemy_colysion()
self.health_update()
self.enemy_player_colision()
def bullet_enemy_colysion(self):
for enemy in self.enemys:
x = enemy.rect.x + enemy.rect.width/3, (enemy.rect.x + enemy.rect.width) - enemy.rect.width / 3
y = enemy.rect.y + enemy.rect.height
x1 = enemy.rect.x, enemy.rect.x + enemy.rect.width
y1 = enemy.rect.y + 5
for bullet in self.bullets:
bullet_x = bullet.rect.x + bullet.rect.width / 2
if bullet_x > x[0] and bullet_x < x[1] and bullet.rect.y < y:
self.bullets.remove(bullet)
self.health_point -= 1
if bullet_x > x1[0] and bullet_x < x1[1] and bullet.rect.y < y1:
self.bullets.remove(bullet)
self.health_point -= 1
def health_update(self):
for enemy in list(self.enemys):
if enemy.health_point == 0:
self.enemys.remove(enemy)
self.score.score += 5
def enemy_player_colision(self):
colision = pygame.sprite.spritecollide(self.player,self.enemys,True)
if colision:
self.player.health_point -= 1
@staticmethod
def spawn(clock,bullets,enemys,player,score):
if Enemy.current_couldown <= 0:
enemys.add(Enemy(bullets,enemys,player,score))
Enemy.current_couldown = Enemy.spawn_couldown
else:
Enemy.current_couldown -= clock.get_time()
for enemy in list(enemys):
if enemy.rect.top > HEIGHT + enemy.rect.height:
enemys.remove(enemy)
#============Player================
class Player(pygame.sprite.Sprite):
def __init__(self,window,name,enemys,bullets,clock):
super(Player,self).__init__()
self.health_point = 6
self.image = pygame.image.load('assets/main.png')
self.image = pygame.transform.scale(self.image,(self.image.get_width()*3,
self.image.get_height()*3))
self.rect = self.image.get_rect()
self.rect.centerx = WIDTH - 100 / 2
self.rect.y = HEIGHT - self.rect.height
self.name = name
self.clock = clock
self.bullets = bullets
self.enemys = enemys
self.window = window
self.shoot_couldown = 480
self.current_couldown = 0
def update(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_a] and self.rect.left > 0:
self.rect.left -= SPEED
if keys[pygame.K_d] and self.rect.right < WIDTH:
self.rect.right += SPEED
if keys[pygame.K_w] and self.rect.top > HEIGHT - HEIGHT /3:
self.rect.top -= SPEED
if keys[pygame.K_s] and self.rect.bottom < HEIGHT:
self.rect.bottom += SPEED
self.shooting()
def shooting(self):
if self.current_couldown <= 0:
self.bullets.add(Bullet(self.rect.midtop))
self.current_couldown = self.shoot_couldown
else:
self.current_couldown -= self.clock.get_time()
for bullet in list(self.bullets):
if bullet.rect.bottom < 0:
self.bullets.remove(bullet)
#============HealthBar================
class HealthBar(pygame.sprite.Sprite):
def __init__(self):
super(HealthBar,self).__init__()
self.image = pygame.image.load('assets/healthbar/healthbar.png')
self.image = pygame.transform.scale(self.image,(self.image.get_width()*3,
self.image.get_height()*3))
self.rect = self.image.get_rect()
class HealthBarBackground(pygame.sprite.Sprite):
def __init__(self,player):
super(HealthBarBackground,self).__init__()
self.image = pygame.image.load('assets/healthbar/healthbar_background.png')
self.image = pygame.transform.scale(self.image,(self.image.get_width()*3,
self.image.get_height()*3))
self.rect = self.image.get_rect()
self.player = player
def update(self):
if self.player.health_point == 6:
self.rect.left = HEALTH_POSITION[0]
elif self.player.health_point == 5:
self.rect.left = HEALTH_POSITION[1]
elif self.player.health_point == 4:
self.rect.left = HEALTH_POSITION[2]
elif self.player.health_point == 3:
self.rect.left = HEALTH_POSITION[3]
elif self.player.health_point == 2:
self.rect.left = HEALTH_POSITION[4]
elif self.player.health_point == 1:
self.rect.left = HEALTH_POSITION[5]
elif self.player.health_point == 0:
self.rect.left = HEALTH_POSITION[6]
#============Background================
class Background(pygame.sprite.Sprite):
def __init__(self):
super(Background,self).__init__()
self.image = pygame.image.load('assets/background.png')
self.rect = self.image.get_rect()
self.rect.bottom = HEIGHT
def update(self):
self.rect.bottom += SPEED
if self.rect.bottom >= self.rect.height:
self.rect.bottom = HEIGHT
class Score():
def __init__(self,player,window):
super(Score,self).__init__()
self.score = 0
self.current_couldown = 0
self.couldown = 300
self.window = window
self.player = player
self.font = pygame.font.Font('assets/fonts/main_font.ttf',20)
def update(self,clock):
if self.current_couldown <= 0:
self.score += 1
self.current_couldown = self.couldown
else:
self.current_couldown -= clock.get_time()
def draw(self):
s = self.font.render(f"{self.score}",True,(255,255,255))
self.window.blit(s,(WIDTH - 100,20))
| true |
699db5ad5eb7149dc6baead23eddc8ca49985306 | Python | ericick/literal_mini_game | /game/iuuihw.py | UTF-8 | 2,104 | 2.6875 | 3 | [] | no_license | #-*-coding:utf-8-*-
from jiiu import uuju, xrze
#========================================================================
#========================================================================
class Game(object):
def enter(self):
return 'mine'
#========================================================================
def welcome():
print '=' * 79
print '欢迎来到这个新世界我的朋友!'
print '希望你能够有一段愉快的冒险经历!'
print '=' * 79
raw_input("> ")
#========================================================================
def choice():
#以下是各种选项 包括种族 职业和性别
dict = {'种族': {1: '人类',
2: '暗夜精灵',
3: '矮人',
4: '侏儒',
5: '德莱尼',
6: '兽人',
7: '巨魔',
8: '亡灵',
9: '牛头人',
10: '血精灵',
11: '狼人',
12: '熊猫人',},
'职业': {1: '猎人',
2: '法师',
3: '术士',
4: '德鲁伊',
5: '牧师',
6: '盗贼',
7: '萨满',
8: '圣骑士',
9: '战士',
10: '武僧'},
'性别': {1: '男',
2: '女'}}
#最终的选择就要输出到这个字典里面 这是初始化
choices = {'种族': 'null',
'职业': 'null',
'性别': 'null',
'姓名': 'null',
'等级': 1,
'红': 100,
'蓝': 100,
'任务开始': 'null',
'任务时长': 'null'}
#遍历字典 这是前面的字典 提供选项
for i, item in enumerate(dict):
choice = xrze.qrtk(dict[item], '请选择你的%s:' % item)
choices[item] = choice
#请求输入名字
print '给你的角色一个名字吧~'
choices['姓名'] = raw_input("> ")
print '你现在是一个%s性%s%s了,%s这个名字必将响彻大陆' % (choices['性别'], choices['种族'], choices['职业'], choices['姓名'])
uuju.data_dump(choices)
return choices
#========================================================================
class Iuuihw(Game):
def enter(self):
print '初始化第一步'
welcome()
choices = choice()
data = uuju.data_load()
print data['姓名']
return 'ybxijxmm'
| true |
f267e181bba8de580b89cea2442887c4950f9363 | Python | chiwah-keen/flask-demo-pro | /src/conf/loader.py | UTF-8 | 1,218 | 2.671875 | 3 | [] | no_license | #! /usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on 2020年02月06日
@author: jianzhihua
配置加载器
"""
import os
conf_dict = {}
def load_from_file(file_path=None):
'''
@param file_path
从配置文件中加载配置
'''
file_path = file_path or env_loader('CONF_FILE_PATH') or "/run/secrets/global.conf"
if os.path.exists(file_path):
with open(file_path) as infile:
for line in infile:
if not line.strip() or line.strip().startswith("#") or '=' not in line:
continue
tlist = line.strip().split('=')
if len(tlist) != 2:
continue
key, val = tlist
conf_dict[key] = val
return conf_dict
def file_loader(key):
if not conf_dict: load_from_file()
return conf_dict[key] if key in conf_dict else None
def env_loader(key):
'''
从环境变量中加载
'''
return os.environ[key] if key in os.environ else None
def conf_loader(key, default):
'''
配置加载器, 优先从环境变量中加载, 如果没有从配置文件中加载
'''
return env_loader(key) or file_loader(key) or default
| true |
986f016bf60312912c34627fb2830cedf9a9c497 | Python | henriqueguarneri/anisotroPy | /transformation.py | UTF-8 | 10,731 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | import math
import pandas as pd
import numpy as np
from scipy.spatial.distance import cdist, pdist
import matplotlib.pyplot as plt
import gc
class Transformation(object):
def __init__(self, basepath, basegrid, msp=1000, st = 0.0001):
self.basepath = basepath.basepath
self.basegrid = basegrid.basegrid
self.maximum_search_parameter = msp
self.search_tolerance = st
self.bp = None
self.vbp = None
self.pbc = None
self.pto = None
def calculate_bp(self):
"""
:param self: bp (distance bp)
:return: calculate distance matrix from basepath to basegrid
"""
self.bp = cdist(self.basepath[['E','N']].as_matrix(), self.basegrid[['E','N']].as_matrix())
def calculate_vbp_old(self):
vbpX = cdist(self.basepath[['E']].as_matrix(), self.basegrid[['E']].as_matrix(),lambda u, v: u-v)/self.bp
vbpY = cdist(self.basepath[['N']].as_matrix(), self.basegrid[['N']].as_matrix(),lambda u, v: u-v)/self.bp
a=vbpX
b=vbpY
self.vbp = np.dstack([a.ravel(),b.ravel()])[0].reshape(len(self.basepath),len(self.basegrid),2)
def calculate_vbp(self):
"""
:param self: vbp (vector bp)
:return: None
"""
try:
a = np.stack([self.basepath[['E', 'N']].as_matrix()]*len(self.basegrid))
b = np.stack([self.basegrid[['E', 'N']].as_matrix()]*len(self.basepath)).reshape(len(self.basegrid),len(self.basepath),2)
self.a = a
self.b = b
# a little magic
c = pd.DataFrame(np.vstack(a-b))
c[0] = c[0]/ np.hstack(self.bp)
c[1] = c[1]/ np.hstack(self.bp)
self.vbp = c.as_matrix().reshape(len(self.basepath),len(self.basegrid),2)
except RuntimeError:
print('Runtime error is raised')
raise
def calculate_pbc(self):
def foo(x):
return cdist(np.matrix(self.vbc[x]),self.vbp[x,:],lambda u,v: np.arccos(np.dot(u,v)))[0]
try:
self.vbc = self.basepath.vbc.as_matrix()
self.vbc[0] = [np.nan,np.nan]
#vbc = self.vbc[1:]
self.pbc = [foo(x) for x in range(len(self.vbc))]
self.pbc = np.stack(self.pbc)
except RuntimeError:
print('Runtime error is raised')
raise
def calculate_pto(self):
try:
vbc = self.basepath.vbc.as_matrix()
self.mtp1 = vbc[1:] * np.cos(self.pbc[1:]).T
self.mtp2 = np.multiply(self.bp[:-1].T,self.mtp1)
self.coord = self.basepath[['E','N']].as_matrix()
self.pto = map(lambda x: self.coord[:-1][x]+ list(self.mtp2.T[x]),range(len(self.coord[:-1])))
except RuntimeError:
print('Runtime error is raised')
raise
def vmod(self,x):
return np.sqrt((x*x).sum(axis=1))
def set_index_correlation(self):
try:
coord = self.basepath[['E', 'N']].as_matrix()
# Distancia Pto - B
cd1 = map(lambda i: np.array(self.vmod(coord[:-1][i]- self.pto[i])), range(len(coord[:-1])))
# Distancia Pto - C
cd2 = map(lambda i: np.array(self.vmod(coord[1:][i] - self.pto[i])), range(len(coord[1:])))
# Distancia C - Ba
cd3 = self.vmod(coord[1:] - coord[:-1])
# Refatorando o vetor cd3 para o tamanho de cd4
cd3_2 = np.stack([cd3] * len(self.basegrid))
# Soma de cd1 e cd2. (Deve ser igual a distancia cd3)
cd4 = np.array(cd1) + np.array(cd2)
# cd4 - cd3 (soma cd1 cd2)
cd5 = pd.DataFrame(cd4 - cd3_2.T)
# Distanca
bp = pd.DataFrame(self.bp)
self.cd1 = cd1
self.cd2 = cd2
self.cd3 = cd3
self.cd4 = cd4
self.cd5 = cd5
self.basegrid.loc[:,'id_basepath'] = bp[bp < self.maximum_search_parameter][self.cd5<self.search_tolerance].idxmin()#cd5[pd.DataFrame(self.bp) < 400][cd5<0.00000001].idxmin()
self.basegrid.loc[:,'id_basepath'][self.basegrid['id_basepath'].isnull()] = bp[bp < self.maximum_search_parameter].idxmin()
self.basegrid.loc[:,'id_basepath'][self.basegrid['id_basepath'].isnull()] = 0
self.basegrid.loc[:,'id_basepath'] = self.basegrid['id_basepath'].astype(int)
except RuntimeError:
print('Runtime error is raised')
raise
def set_pto(self):
try:
self.basegrid.loc[:,('ptoE')] = np.nan
self.basegrid.loc[:,('ptoN')] = np.nan
#pto = np.array(self.pto).reshape(len(self.basepath),len(self.basegrid))
self.basegrid.loc[:,['ptoE', 'ptoN']] = np.array(self.pto)[self.basegrid['id_basepath']-1, range(len(self.basegrid['id_basepath']))]
except RuntimeError:
print('Runtime error is raised')
raise
def set_position(self):
try:
bE = np.stack([self.basepath.E[:-1]] * len(self.basegrid)).T
bN = np.stack([self.basepath.N[:-1]] * len(self.basegrid)).T
cE = np.stack([self.basepath.E[1:]] * len(self.basegrid)).T
cN = np.stack([self.basepath.N[1:]] * len(self.basegrid)).T
pE = np.stack([self.basegrid.E] * len(cE))
pN = np.stack([self.basegrid.N] * len(cE))
pst = np.sign(np.multiply(cE - bE, pN - bN) - np.multiply(cN - bN, pE - bE))
position = np.array(pst)[self.basegrid.id_basepath-1,range(len(self.basegrid))]
self.basegrid.position = position
except RuntimeError:
print('Runtime error is raised')
raise
def set_d(self):
try:
idb = self.basegrid['id_basepath']
d = np.array(self.bp)[idb,range(len(idb))] * np.sin(np.array(self.pbc)[idb, range(len(idb))])
self.basegrid['d'] = d * self.basegrid.position
except RuntimeError:
print('Runtime error is raised')
raise
def set_s(self):
try:
s_increment = np.array(self.bp)[self.basegrid['id_basepath'], range(len(self.basegrid))] * np.cos(
np.array(self.pbc)[self.basegrid['id_basepath'], range(len(self.basegrid))])
s_matrix = np.stack([self.basepath.Dist] * len(self.basegrid))
s_b = np.array(s_matrix.T)[self.basegrid['id_basepath'], range(len(self.basegrid))]
self.basegrid['sincrement'] = s_increment
self.basegrid['sB'] = s_b
self.basegrid['s'] = s_b - s_increment # BUG: There is something to be done about the increment sign. Can it be positive or negative (Change it and see the basic example)
except RuntimeError:
print('Runtime error is raised')
raise
def plot_s_d(self):
plt.figure(figsize=(10, 10))
plt.scatter(self.basegrid.s[self.basegrid.s.notnull()], self.basegrid.d[self.basegrid.s.notnull()],
c = self.basegrid.d[self.basegrid.s.notnull()],edgecolor='face')
plt.show()
def plot_result(self):
plt.figure(figsize=(10, 10))
plt.scatter(self.basegrid.E, self.basegrid.N, c = self.basegrid.s, edgecolor='face')
plt.scatter(self.basepath.E, self.basepath.N, c = self.basepath.Dist, edgecolor='face')
plt.show()
def plot_final(self):
import matplotlib.gridspec as gridspec
def plot_width(df,buff=100):
return (df.d.astype('float')>-buff)&(df.d.astype('float')<buff)
fig = plt.figure(figsize=(9.5,6))
gs = gridspec.GridSpec(2,1,height_ratios=[4,1])
ax0 = plt.subplot(gs[0])
p0 = plt.scatter(
self.basegrid.E[plot_width(self.basegrid)].tolist(),
self.basegrid.N[plot_width(self.basegrid)].tolist(),
s=3,
edgecolors='none',
)
p01 = plt.plot(
self.basepath.E,
self.basepath.N,
'black',
ls='-.',
lw=1,
label='s - line')
plt.legend()
ax0.set_xlabel('E (m)')
ax0.set_ylabel('N (m)')
ax1 = plt.subplot(gs[1])
p0 = plt.scatter(
self.basegrid.s[plot_width(self.basegrid)].tolist(),
self.basegrid.d[plot_width(self.basegrid)].tolist(),
s=3,
edgecolors='none',
)
plt.plot([0,self.basepath.Dist.max()],[0,0],'black',ls='-.',lw=1)
ax1.set_xlabel('s (m)')
ax1.set_ylabel('d (m)')
plt.show()
def run(self, progressbar = None):
from datetime import datetime
startTime = datetime.now()
global progress_counter
progress_counter = 0
def counter(progress):
if progress != None:
global progress_counter
progress_counter+=1
progress.setValue(progress_counter)
else:
pass
print datetime.now() - startTime
counter(progressbar)
self.calculate_bp()
print(' calculate_bp')
gc.collect()
print datetime.now() - startTime
counter(progressbar)
self.calculate_vbp_old()
print(' calculate_vbp')
print datetime.now() - startTime
gc.collect()
self.calculate_pbc()
counter(progressbar)
print(' calculate_pbc')
print datetime.now() - startTime
gc.collect()
self.calculate_pto()
counter(progressbar)
print(' calculate_pto')
print datetime.now() - startTime
gc.collect()
self.set_index_correlation()
counter(progressbar)
print(' set_index_correlation')
print datetime.now() - startTime
gc.collect()
self.set_pto()
print(' set_pto')
counter(progressbar)
print datetime.now() - startTime
gc.collect()
self.set_position()
counter(progressbar)
print(' set_position')
print datetime.now() - startTime
gc.collect()
self.set_d()
counter(progressbar)
print(' set_d')
print datetime.now() - startTime
gc.collect()
self.set_s()
counter(progressbar)
print(' set_s')
print datetime.now() - startTime
gc.collect()
self.plot_final()
counter(progressbar)
print datetime.now() - startTime
gc.collect()
| true |
2ed55d41a528387e5273f2bf1461382a65e1b512 | Python | fredrikt/nerds | /producers/juniper_conf/util/logger.py | UTF-8 | 136 | 2.9375 | 3 | [] | no_license |
def error(msg):
print("ERROR: {0}".format(msg))
def warn(msg):
_log("WARN: {0}".format(msg))
def _log(msg):
print(msg)
| true |
ae39bdf65e12d3a85b84101334dd235f93121d62 | Python | cutesparrow/Intuduce-to-algorithm | /Part3/Chapter11/11-1.py | UTF-8 | 1,248 | 3.359375 | 3 | [] | no_license | #hash table can find or delete in O(1)
class HashTable:
potential_length = [11, 41, 71, 131, 251, 541, 761, 1091, 1181]
def __init__(self):
self._length = 0
self._rate = 0
self._count = 0
self._list = [(None, None)
for i in range(self.potential_length[self._length])]
def add(self, key, value, list=None):
if list == None:
list = self._list
index = key % self.potential_length[self._length]
while index < len(list):
if list[index][0] == None:
list[index] = (key, value)
if list == self._list:
self._count += 1
self.extend()
return True
else:
index += 1
return False
def extend(self):
if self._count / len(self._list) > 0.6:
self.rehase()
else:
return
def rehase(self):
self._length += 1
new_list = [(None, None)
for i in range(self.potential_length[self._length])]
for i in self._list:
if i[0] != None:
self.add(i[0], i[1], list=new_list)
self._list = new_list
a = HashTable()
| true |
53d69d27ff3fe1b82066b3ea01a5b3df0d93ff50 | Python | laocaicaicai/algorithm | /Week_04/id_3/dp/LeetCode_63_3_v2.py | UTF-8 | 788 | 3.40625 | 3 | [] | no_license | """
尝试DP
"""
class Solution:
def uniquePathsWithObstacles(self, grid) -> int:
if not grid or not grid[0]:
return 0
for m in range(len(grid)):
for n in range(len(grid[0])):
if grid[m][n] == 1:
grid[m][n] = 0
continue
if m == 0 and n == 0:
grid[m][n] = 1
continue
if n > 0:
grid[m][n] += grid[m][n-1]
if m > 0:
grid[m][n] += grid[m-1][n]
return grid[m][n]
s = Solution()
print(s.uniquePathsWithObstacles([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
]))
print(s.uniquePathsWithObstacles([[1, 0]]))
print(s.uniquePathsWithObstacles([[0, 1]]))
| true |
d0b7d86c06970220aad76c148b8d7ab90cf829fe | Python | starknguyen/data-science-python-uni-helsinki | /DataScienceUniHelsinki/Week01/unittest/reverse_dictionary_tst.py | UTF-8 | 1,547 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env python3
import unittest
from unittest.mock import patch
from Week01.reverse_dictionary import reverse_dictionary
class ReverseDictionary(unittest.TestCase):
def test_first(self):
d = {"move": ["liikuttaa"], "hide": ["piilottaa", "salata"]}
sd = str(d)
r = reverse_dictionary(d)
self.assertEqual(r["liikuttaa"], ["move"], msg="Incorrect translation of 'liikuttaa' for dict %s!" % sd)
self.assertEqual(r["piilottaa"], ["hide"], msg="Incorrect translation of 'piilottaa' for dict %s!" % sd)
self.assertEqual(r["salata"], ["hide"], msg="Incorrect translation of 'salata' for dict %s!" % sd)
self.assertEqual(len(r), 3, msg="Incorrect number of elements in result for dict %s!" % d)
def test_second(self):
d = {"move": ["liikuttaa"], "hide": ["piilottaa", "salata"], "six": ["kuusi"], "fir": ["kuusi"]}
sd = str(d)
r = reverse_dictionary(d)
self.assertEqual(r["liikuttaa"], ["move"], msg="Incorrect translation of 'liikuttaa' for dict %s!" % sd)
self.assertEqual(r["piilottaa"], ["hide"], msg="Incorrect translation of 'piilottaa' for dict %s!" % sd)
self.assertEqual(r["salata"], ["hide"], msg="Incorrect translation of 'salata' for dict %s!" % sd)
self.assertEqual(set(r["kuusi"]), set(["fir", "six"]), msg="Incorrect translation of 'kuusi' for dict %s!" % sd)
self.assertEqual(len(r), 4, msg="Incorrect number of elements in result for dict %s!" % d)
if __name__ == '__main__':
unittest.main()
| true |
8367b143b590a28e233739d452b3e5b273f1dcd2 | Python | kevinrankine/1401-projects | /mini3/cool.py | UTF-8 | 2,571 | 3.234375 | 3 | [] | no_license | import numpy as np
from copy import copy
import matplotlib.pyplot as plt
'''
input_neurons : [(neuron, weight), ..., (neuron, weight)]
tau : between [0, 1], regulates inertia of neuron's input
default_value : value an input neuron outputs (either 0 or 1)
'''
class Neuron(object):
def __init__(self, input_neurons = [], tau = 0.1, default_value = 1.0, N = 256):
self.prevX = 0.0
self.input_neurons = input_neurons
self.tau = tau
self.default_value = default_value
self.Xs = {0 : 0}
self.G = 1
self.V = [0. for _ in range(N)]
self.Vp = [0. for _ in range(N)]
def activate(self, t):
if len(self.input_neurons) > 0:
return 1.0 / (1 + np.exp(-1.0 * self.G * self.X(t)))
else:
return self.default_value
def X(self, t):
if not t in self.Xs.keys():
self.Xs[t] = self.Xs[t - 1] * (1 - self.tau) + self.tau * sum(map(lambda (neuron, weight) : weight * neuron.activate(t), self.input_neurons))
return self.Xs[t]
def add_input(self, neuron, weight):
self.input_neurons.append((neuron, weight))
def set_default(self, value):
self.default_value = value
def update_vg(self, t, rate = 0.5, NE = 0, alpha = 1):
N = len(self.V)
for i in range(len(self.V)):
noise = np.random.normal(0, 1)
noise = 0
self.V[i] = rate * self.Vp[i] + (1 - rate) * (self.activate(t) + alpha * (sum(self.Vp) - N * self.Vp[i])+ noise) - NE
self.G = sum(self.V)
self.Vp = self.V
self.V = [0. for _ in range(N)]
def main():
input_neuron_t = Neuron(default_value = 0.0)
input_neuron_d = Neuron(default_value = 1.0)
decision_target = Neuron(input_neurons = [(input_neuron_t, 1)], tau = 0.5)
decision_distract = Neuron(input_neurons = [(input_neuron_d, 1)], tau = 0.5)
decision_target.add_input(decision_distract, -1)
output_neuron = Neuron(input_neurons = [(decision_target, 1)], tau = 0.5)
data = []
D = 15
T = 5
for t in range(D):
data.append(output_neuron.activate(t))
decision_target.update_vg(t)
print decision_target.G
input_neuron_t.set_default(1.0)
input_neuron_d.set_default(0.0)
print "SWAP"
for t in range(D, D + T):
data.append(output_neuron.activate(t))
decision_target.update_vg(t)
data = np.array(data)
plt.scatter(np.linspace(0, D + T, D + T), data)
plt.show()
main()
| true |
25a7bb6ebb36b8e1f4238c51abc4a91016c5c21a | Python | osho1278/calculator-minor | /main.py | UTF-8 | 89 | 2.671875 | 3 | [] | no_license | from add import add
from subtract import subtract
print(add(5,5))
print(subtract(50,5))
| true |
8267f137f37b349e6e7085c37ca27cf14bf1aa16 | Python | DaHuO/Supergraph | /codes/CodeJamCrawler/16_0_1/guilhermedelima/A.py | UTF-8 | 547 | 3.375 | 3 | [] | no_license | #!/usr/bin/python2
import sys
def append_num( N, digit ):
num = str( N )
for i in num:
digit[ ord( i ) - ord( '0' ) ] = True
def solve(N):
if N == 0:
return 'INSOMNIA'
digit = [ False for i in range(10) ]
count = 1
while( False in digit ):
append_num( N*count, digit )
count = count+1
return str( N * ( count-1 ) )
def main():
N = input()
for k in range(N):
X = input()
sys.stdout.write('Case #' + str(k+1) + ': ')
sys.stdout.write( solve(X) + '\n' )
sys.stdout.flush()
if __name__ == '__main__':
main()
| true |
25abcc3d93372f82997680c61de6de68c8e74afd | Python | yjshi2015/PythonDemo | /com/syj/demo/firstDistributedSpider/NodeManager.py | UTF-8 | 5,152 | 2.890625 | 3 | [] | no_license | # coding=utf-8
from DataOutput import DataOutput
from UrlManager import UrlManager
import time
from multiprocessing.managers import BaseManager
from multiprocessing import Process, Queue
class NodeManager(object):
def start_manager(self, url_q, result_q):
'''
创建一个分布式管理器
:param url_q: URL管理进程将URL传递给爬虫节点的队列
:param result_q: 爬虫节点将数据返给数据提取进程的队列
:return:
'''
# 把创建的2个队列注册到网络上,利用register方法,callable参数关联了Queue对象
# 将Queue对象在网络中暴露
BaseManager.register('get_task_queue', callable=lambda:url_q)
BaseManager.register('get_result_queue', callable=lambda:result_q)
# 绑定端口8001,设置口令baike
manager = BaseManager(address=('127.0.0.1', 8001), authkey='baike')
# 返回manager对象
return manager
def result_solve_proc(self, result_q, conn_q, store_q):
'''
数据提取进程从result_queue队列读取返回的数据,并将数据中的URL添加到conn_q队列
交给URL管理器,将数据中的标题/摘要添加到store_q队列交给数据存储进程
:param result_q:
:param conn_q:
:param store_q:
:return:
'''
while (True):
try:
if not result_q.empty():
content = result_q.get(True)
if content['new_urls'] == 'end':
# 结果分析进程接收结束通知
print '结果分析进程接收结束通知'
store_q.put('end')
return
# URL为set类型
conn_q.put(content['new_urls'])
# 解析出来的数据为dict类型
store_q.put(content['data'])
else:
time.sleep(1)
except BaseException, e:
time.sleep(1)
def url_manager_proc(self, url_q, conn_q, root_url):
'''
url管理进程将conn_q队列获取的新URL提交给URL管理器,经过去重后,取出URL放入url_queue队列中传递给爬虫节点
:param url_q:
:param conn_q:
:param root_url:
:return:
'''
url_manager = UrlManager()
url_manager.add_new_url(root_url)
while True:
while(url_manager.has_new_url()):
# 从URL管理器获取新的URL
new_url = url_manager.get_new_url()
# 将新的URL发给爬虫节点
url_q.put(new_url)
print 'old_url=', url_manager.old_url_size()
# 当爬取2000个链接后就关闭,并保存进度
if(url_manager.old_url_size() > 2000):
# 通知爬虫节点工作结束
url_q.put('end')
print '控制节点发起结束通知'
# 关闭管理节点,同时存储set状态
url_manager.save_progress('new_urls.txt', url_manager.new_urls)
url_manager.save_progress('old_urls.txt', url_manager.old_urls)
return
# 将从result_solve_proc获取到的URL添加到URL管理器
try:
if not conn_q.empty():
urls = conn_q.get()
url_manager.add_new_urls(urls)
except BaseException, e:
time.sleep(1)
def store_proc(self, store_q):
'''
数据存储进程从store_q队列读取数据,并调用数据存储器进行数据存储
:param store_q:
:return:
'''
output = DataOutput()
while True:
if not store_q.empty():
data = store_q.get()
if data == 'end':
print '存储进程接收到结束通知'
output.output_end(output.filepath)
return
output.store_data(data)
else:
time.sleep(1)
if __name__ =="__main__":
'''
启动分布式管理器,URL管理进程,数据提取进程和存储进程,并初始化4个队列
'''
url_q = Queue()
result_q = Queue()
store_q = Queue()
conn_q = Queue()
# 创建分布式管理器
node = NodeManager()
manager = node.start_manager(url_q, result_q)
# 创建URL管理进程/数据提取进程和存储进程
url_manager_proc = Process(target=node.url_manager_proc, args=(url_q, conn_q,
'https://baike.baidu.com/item/%E9%95%BF%E5%AE%89%E5%8D%81%E4%BA%8C%E6%97%B6%E8%BE%B0/20110435',))
result_solve_proc = Process(target=node.result_solve_proc, args=(result_q, conn_q, store_q,))
store_proc = Process(target=node.store_proc, args=(store_q,))
# 启动3个进程和分布式管理器
url_manager_proc.start()
result_solve_proc.start()
store_proc.start()
manager.get_server().serve_forever() | true |
d93517a9e16392fb2b293e3849b05bbef25e1fa6 | Python | frankplus/electricityconsumptionapi | /app.py | UTF-8 | 1,627 | 2.59375 | 3 | [] | no_license | from flask import Flask
from flask import request
import mysql.connector
import sys
import json
import hashlib
with open('config.json') as config_data_file:
config = json.load(config_data_file)
mysqlconfig = config['mysql']
appconfig = config['app']
mydb = mysql.connector.connect(
host=mysqlconfig['host'],
port=mysqlconfig['port'],
auth_plugin=mysqlconfig['auth_plugin'],
user=mysqlconfig['user'],
passwd=mysqlconfig['passwd'],
database=mysqlconfig['db']
)
mycursor = mydb.cursor()
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/electricityusage', methods=['POST'])
def postusage():
reqdata = request.get_json()
print('request: ', reqdata, file=sys.stderr)
#check if all values are set
if (not 'start' in reqdata) or reqdata['start']=='':
return 'start field is required'
if (not 'end' in reqdata) or reqdata['end']=='':
return 'end field is required'
if not 'watthour' in reqdata:
return 'watthour field is required'
if not 'key' in reqdata:
return 'API key is required'
#authentication check
keyhash = (hashlib.sha256(reqdata['key'].encode('utf-8'))).hexdigest()
if keyhash != appconfig['API_KEY']:
return 'Authentication failed'
sql = "INSERT INTO consumption (start, end, watthour) VALUES (%s, %s, %s)"
entry = (reqdata['start'], reqdata['end'], reqdata['watthour'])
mycursor.execute(sql, entry)
mydb.commit()
return 'ok'
if __name__ == '__main__':
app.run(host=appconfig['ip'], port=appconfig['port'], debug=appconfig['debug']) | true |
78f6a43e1e5141b122ebbb4b12de8090661ac584 | Python | ADSL-Plucky/learngit | /homework-10/2.py | UTF-8 | 4,490 | 3.265625 | 3 | [] | no_license | # -*- encoding: utf-8 -*-
'''
@File : 2.py
@Time : 2020/06/3 21:30:35
@Author : 黎淞
@Version : 3.7.0
@Contact : 836046238@qq.com
'''
'''
2 设计一个留言本的表(ID,留言内容,留言人,留言时间,是否删除)(表名,和字段名自己设计成英文:注意,不要用中文,用中文的直接0分);
使用PyMySQL 驱动模块,实现对这个表的增加,删除,修改,查询;数据库操作需要加入异常处理逻辑;
'''
import pymysql
import os
import datetime
class DataBase:
def __init__(self,host = 'localhost',user = 'root',password = 'password',db = 'learn'):
self.host = host
self.user = user
self.password = password
self.db = db
self.database = pymysql.connect(self.host, self.user, self.password, self.db)
self.cursor = self.database.cursor()
def Send_Sql(self,sql):
try:
self.cursor.execute(sql)
self.database.commit()
return self.cursor
except Exception as e:
print(e)
print("SQL语句执行失败!")
self.database.rollback()
def interface(self):
'''
界面
:return:
'''
os.system('cls')
print(" 留言板 ")
print("----------------------------------------------------------")
print("0.退出")
print("1.增加留言信息")
print("2.删除留言信息")
print("3.修改留言信息")
print("4.查询留言信息")
print("5.输出全部留言")
print("----------------------------------------------------------")
num = 1
while num != 0:
try:
num = int(input("请输入您想要进行的操作(0 - 5):"))
if num in range(0, 6):
self.execute(num)
else:
raise Exception
except Exception:
print("您输入的操作数有误,请重新输入!")
print("退出数据库")
self.database.close()
def execute(self, n):
'''
查询操作
:return:
'''
print("您要进行的是操作{}!".format(n))
if n == 0:
return
elif n == 1:
message = input("请输入留言:")
name = input("请输入留言人:")
time = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
sql = 'insert into guestbook(message,name,time) values (\"%s\",\"%s\",\"%s\")'%(str(message),str(name),str(time))
self.Send_Sql(sql)
elif n == 2:
delete_id = input("请输入您要删除的留言id:")
sql = 'update guestbook set is_delete = 1 where id = \"%d\"'%(int(delete_id))
self.Send_Sql(sql)
elif n == 3:
id = input("请输入您要修改的留言id:")
fields = input("请输入您要修改的字段:")
value = input("请输入您要修改的值:")
sql = 'update guestbook set {} = \"{}\" WHERE id={}'.format(fields, value, id)
self.Send_Sql(sql)
elif n == 4:
id = input("请输入您要查询的id:")
sql = 'select * from guestbook where id = \"%d\" and is_delete = 0'%(int(id))
# sql = """SELECT * FROM guestbook WHERE id = \"{}\" AND is_delete = 0""".format(id)
res = self.Send_Sql(sql)
if res is not None:
if res.rowcount == 0:
print("查询结果为空!")
else:
print("{:<5}{:60}{:^10}{:>30}".format("id", "message", "name", "time"))
for r in res:
print("{:<5}{:60}{:^10}{:>30}".format(r[0], r[1], r[2], r[3]))
elif n == 5:
sql = 'select * from guestbook'
res = self.Send_Sql(sql)
if res is not None:
if res.rowcount == 0:
print("留言板没有留言")
else:
print("{:<5}{:60}{:^10}{:>30}{:>10}".format("id", "message", "name", "time", "is_delete"))
for r in res:
print("{:<5}{:60}{:^10}{:>30}{:>10}".format(r[0], r[1], r[2], r[3], r[4]))
results = self.cursor.fetchall()
if __name__ == '__main__':
database = DataBase()
database.interface() | true |
b38bd73559f7ee02c9f2dedd5ccc39d3b6c43443 | Python | JulianWack/MPhysProject | /SU2xSU2_preproduction/calibrate_paras.py | UTF-8 | 3,263 | 3.1875 | 3 | [] | no_license | import numpy as np
from SU2xSU2 import SU2xSU2
def calibrate(model_paras, sim_paras=None, production_run=False, accel=False):
'''For a model, specified by the dictionary model_paras, this function calibrates the values of ell and eps to produce an acceptance rate in the desireable range between 60 and 80%.
When acceptance rate is outside this range, the number of steps is adjusted according to the difference to the ideal acceptance rate of 65%. The step size if fixed by requiring
trajectories to be of unit length. To avoid getting caught in a loop, the calibration is limited to 10 iterations.
When sim_paras is not passed, 500 trajectories with no thinning and 50% burn in are simulated to ensure the calibration process is fast. These parameters can be
overwritten by passing an appropriate dictionary.
When wanting to fine tune the parameters for a production run, set production_run=True and specify the simulation further by passing sim_paras. This will also return the SU2xSU2 instance
It is advise to perform the above described rough calibration beforehand.
model_paras: dict
{N, a, ell, eps, beta} with ell, eps as guesses to start the calibration. Their product must be 1
sim_paras: dict
{M, thin_freq, burnin_frac, 'renorm_freq':10000, accel, 'store_data':False}
production_run: bool
set to True to return the calibrated SU2xSU2 instance
Returns
if not production run:
model_paras: dict
calibrated model parameters
else:
model: SU2xSU2 object
result of calibrated simulation to take measurements later on
model_paras: as above
'''
# defining bounds for desireable acceptance rate
lower_acc, upper_acc = 0.55, 0.8
if sim_paras is None:
# default for fast calibration
sim_paras = {'M':500, 'thin_freq':1, 'burnin_frac':0.5, 'renorm_freq':10000, 'accel':accel, 'store_data':False}
# use narrower range for desired acceptance to avoid barely passing fast calibration and then not passing during the production run, causing a much longer simulation to be repeated
lower_acc, upper_acc = 0.6, 0.75
good_acc_rate = False
count = 0
while good_acc_rate == False:
model = SU2xSU2(**model_paras)
model.run_HMC(**sim_paras)
acc_rate = model.acc_rate
d_acc_rate = 0.65 - acc_rate
if count >= 10:
good_acc_rate = True
if acc_rate < lower_acc or acc_rate > upper_acc:
new_ell = int(np.rint(model_paras['ell']*(1 + d_acc_rate)))
# due to rounding it can happen that ell is not updated. To avoid getting stuck in a loop, enforce minimal update of +/- 1
if new_ell == model_paras['ell']:
if d_acc_rate > 0:
new_ell += 1
else:
new_ell -= 1
if new_ell == 0:
break # stop calibration when step size has to be reduce below 1.
model_paras['ell'] = new_ell
model_paras['eps'] = 1/model_paras['ell']
count +=1
else:
good_acc_rate = True
if production_run:
return model, model_paras
return model_paras | true |
12fb02dd1010692755bc9aefcb49b7619dd31443 | Python | jarrodmillman/nb2plots | /nb2plots/tests/test_doctree2nb.py | UTF-8 | 4,554 | 2.671875 | 3 | [
"BSD-2-Clause"
] | permissive | """ Test conversion of doctree to Jupyter notebook
"""
from os.path import join as pjoin
from glob import glob
from nb2plots.converters import to_notebook
from nb2plots.ipython_shim import nbf
# Shortcuts
n_nb = nbf.new_notebook
n_md_c = nbf.new_markdown_cell
n_c_c = nbf.new_code_cell
from nose.tools import assert_equal
from .convutils import fcontents, DATA_PATH
def cells2json(cells):
nb = nbf.new_notebook()
nb['cells'] += cells
return nbf.writes(nb)
def assert_rst_cells_equal(rst_text, cells):
actual = to_notebook.from_rst(rst_text)
expected = cells2json(cells)
assert_equal(actual, expected)
def test_basic():
assert_rst_cells_equal('Some text', [n_md_c('Some text')])
def test_runrole_reference():
# Ignore notebook reference in source ReST
assert_rst_cells_equal('Some text :clearnotebook:`.`',
[n_md_c('Some text')])
assert_rst_cells_equal('Some text :fullnotebook:`.`',
[n_md_c('Some text')])
def test_only():
for builder_name in ('html', 'latex', 'unbelievable'):
assert_rst_cells_equal(
"""
Before
.. only:: {0}
Specific to builder {0}
After""".format(builder_name),
[n_md_c('Before\n\nAfter')])
assert_rst_cells_equal(
"""
Before
.. only:: markdown
More text
After""".format(builder_name),
[n_md_c('Before\n\nMore text\n\nAfter')])
def test_doctests():
assert_rst_cells_equal("""\
Text 1
>>> # A comment
>>> a = 1
Text 2
""", [n_md_c('Text 1'), n_c_c('# A comment\na = 1'), n_md_c('Text 2')])
def test_nbplots():
# nbplot directive with doctest markers
assert_rst_cells_equal("""\
Text 1
.. nbplot::
>>> # A comment
>>> a = 1
Text 2
""", [n_md_c('Text 1'), n_c_c('# A comment\na = 1'), n_md_c('Text 2')])
# nbplot directive with no doctest markers
assert_rst_cells_equal("""\
Text 1
.. nbplot::
# A comment
a = 1
Text 2
""", [n_md_c('Text 1'), n_c_c('# A comment\na = 1'), n_md_c('Text 2')])
# Doctest interspersed with text
assert_rst_cells_equal("""\
Text 1
.. nbplot::
>>> # A comment
Some thoughts I had
>>> a = 1
Text 2
""", [n_md_c('Text 1'),
n_c_c('# A comment'),
n_md_c('Some thoughts I had'),
n_c_c('a = 1'),
n_md_c('Text 2')])
def assert_nb_equiv(ipynb, expected):
actual_nb = nbf.reads(ipynb)
expected_nb = nbf.reads(expected)
# Ignore different minor versions of Notebook format
# It does not appear to be possible to request specific minor versions of
# the Notebook format.
expected_nb['nbformat_minor'] = actual_nb['nbformat_minor']
assert_equal(actual_nb, expected_nb)
def assert_conv_equal(rst_str, expected):
assert_nb_equiv(to_notebook.from_rst(rst_str), expected)
def test_example_files():
# test conversion over all .rst files, checking against .ipynb files
for rst_fname in glob(pjoin(DATA_PATH, '*.rst')):
rst_contents = fcontents(rst_fname, 't')
nb_fname = rst_fname[:-3] + 'ipynb'
nb_contents = fcontents(nb_fname, 't')
assert_conv_equal(rst_contents, nb_contents)
def test_notebook_basic():
# Test conversion of basic ReST to ipynb JSON
ipynb = to_notebook.from_rst(r"""
Title
=====
Some text with :math:`a = 1` math.
.. math::
\textrm{math block}
.. nbplot::
>>> c = 1
>>> c
1
More text.
.. nbplot::
>>> d = 2
>>> d
2""")
expected = r"""{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Title\n",
"\n",
"Some text with $a = 1$ math.\n",
"\n",
"$$\n",
"\\textrm{math block}\n",
"$$"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"c = 1\n",
"c"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"More text."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"d = 2\n",
"d"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 1
}"""
assert_nb_equiv(ipynb, expected)
def test_default_mathdollar():
# Test mathdollar extension present by default.
ipynb = to_notebook.from_rst(r'Some text with $a = 1$ math.')
expected = r"""{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Some text with $a = 1$ math."
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 1
}"""
assert_nb_equiv(ipynb, expected)
| true |
4221efe820e303b6dbcec5771dd6f37cce1239ae | Python | illikainen/luoda | /tests/plugins/test_markdown.py | UTF-8 | 1,500 | 2.703125 | 3 | [
"BSD-2-Clause"
] | permissive | # pylint: disable=W0621
#
# Copyright (c) 2019, Hans Jerry Illikainen <hji@dyntopia.com>
#
# SPDX-License-Identifier: BSD-2-Clause
from pathlib import Path
from textwrap import dedent
from luoda.item import Item
from luoda.plugins.markdown import available, run
from ..fixtures import tmpdir # pylint: disable=W0611
def test_available() -> None:
assert available()
def test_not_markdown() -> None:
path = Path("foo")
item = Item(path=path)
assert run(item) == item
def test_no_title(tmpdir: Path) -> None:
path = tmpdir / "foo.md"
path.write_text("## second")
item = Item(path=path)
res = run(item)
assert res.content.index("second")
assert res != item
def test_title(tmpdir: Path) -> None:
path = tmpdir / "foo.md"
path.write_text("# first \n ## second")
item = Item(path=path)
res = run(item)
assert res.content.count("first") == 0
assert res.content.count("second") == 1
assert res.title == "first"
assert res != item
def test_code(tmpdir: Path) -> None:
item = Item(path=tmpdir / "code.md")
md = dedent(
"""
# abcd
```python
print("m00")
```
```c
exit(EXIT_SUCCESS);
```
```
asdf
```
```foobarbazqux
hmm
```
"""
)
item.path.write_text(md)
content = run(item).content
assert content.count("highlight") == 2
assert content.count("exit") == 1
assert content.count("hmm") == 1
| true |
8eb528814a219e584735a84a487d84ba740fd23d | Python | Yorwxue/CPy | /Python_C_API/pymodule.py | UTF-8 | 324 | 3.28125 | 3 | [] | no_license |
# an example
# """
class pycompute(object):
def add(self, a, b):
return a + b
def subtraction(self, a, b):
return a - b
def compute(self, a, b):
print("a = %.2f" % a)
print("b = %.2f" % b)
# c = self.add(a, b)
c = self.subtraction(a, b)
return c
# """
| true |
31aed61f8d87e86361c32490cf6962f0181e1a39 | Python | al3xhh/CloudAndBigDataProgrammingAssigments | /P22_spark.py | UTF-8 | 317 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
import sys
from pyspark import SparkContext
sc = SparkContext()
logRDD = sc.textFile('../Data/P12_data')
logRDD = logRDD.map(lambda log: log.split())
logRDD = logRDD.map(lambda log: (log[6], 1))
logRDD = logRDD.reduceByKey(lambda key, value: key + value)
firstTen = logRDD.take(10)
print firstTen
| true |
49f39e47436a189ea3221f71efc86f01f005c617 | Python | RobertTLange/picknmix | /picknmix/picknmix.py | UTF-8 | 8,486 | 3.25 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""Pick n Mix is a simple stacking tool for stacking Sci-Kit learn models of your picks.
It provided 2 classes: Layer and Stack. Layer is a parallel combination of models,
while Stack combine Layers to create a stacking model"""
from copy import deepcopy
import numpy as np
import warnings
class Layer:
def __init__(self, models, preprocessors=None, proba=False):
"""Initialize Layer, create a parallel combination of Sci-Kit learn models
with or without preprocessors
Parameters
==========
preprocessors : a list of picks from sklearn.preprocessing,
if not None, the number of preprocessors and models must match.
If not using preprocessing for a model, None need to be put in place
models : a list of picks from sklearn models
proba : bool or a list of bool to show if predict_proba should be use instaed of predict,
useful for classifiers not in the final Layer. If is a list,
the length must match the number models.
"""
if preprocessors is not None:
assert len(preprocessors) == len(models), \
"Number of preprocessors and models does not match, got {} processors but {} models.".format(len(preprocessors),len(models))
if type(proba) != bool:
assert len(proba) == len(models), \
"Length of proba and number of models does not match, got {} processors but {} models.".format(len(proba),len(models))
self.width = len(models) # number of models
if preprocessors is None:
self.preprocessors = [None] * self.width
else:
self.preprocessors = deepcopy(preprocessors)
self.models = deepcopy(models)
if type(proba) == bool:
self.proba = [proba] * self.width
else:
self.proba = deepcopy(proba)
def fit(self, X, y):
"""Fit each preprocessors and models in Layer with (X, y) and return
predictions in an array of shape (n_samples, n_models) for the next Layer
Parameters
==========
X : array-like or sparse matrix, shape (n_samples, n_features)
Training data
y : array_like, shape (n_samples, n_targets)
Target values.
Returns
=======
C : array, shape (n_samples, n_models)
Returns predicted values for the next layer.
"""
result = None
for idx in range(self.width):
if self.preprocessors[idx] is not None:
X_new = self.preprocessors[idx].fit_transform(X)
else:
X_new = X
self.models[idx].fit(X_new,y)
if self.proba[idx]:
if _method_checker(self.models[idx],'predict_proba'):
temp_result = self.models[idx].predict_proba(X_new)
else:
warnings.warn("Warning: predict_proba not exist for {}, using predict instead".format(self.models[idx].__class__))
temp_result = self.models[idx].predict(X_new)
temp_result = np.expand_dims(temp_result, axis=1)
else:
temp_result = self.models[idx].predict(X_new)
temp_result = np.expand_dims(temp_result, axis=1)
if result is None:
result = temp_result
else:
result = np.concatenate((result, temp_result), axis=1)
return result
def predict(self, X):
"""With put fiting any preprocessors and models in Layer, return predictions
of X in an array of shape (n_samples, n_models) for the next Layer
Parameters
==========
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples
Returns
=======
C : array, shape (n_samples, n_models)
Returns predicted values for the next layer.
"""
result = None
for idx in range(self.width):
if self.preprocessors[idx] is not None:
X_new = self.preprocessors[idx].transform(X)
else:
X_new = X
if self.proba[idx]:
if _method_checker(self.models[idx],'predict_proba'):
temp_result = self.models[idx].predict_proba(X_new)
else:
warnings.warn("Warning: predict_proba not exist for {}, using predict instead".format(self.models[idx].__class__))
temp_result = self.models[idx].predict(X_new)
temp_result = np.expand_dims(temp_result, axis=1)
else:
temp_result = self.models[idx].predict(X_new)
temp_result = np.expand_dims(temp_result, axis=1)
if result is None:
result = temp_result
else:
result = np.concatenate((result, temp_result), axis=1)
return result
class Stack:
def __init__(self, layers, folds=None):
"""Initialize Stack, create a vertical stacking of Layers
Parameters
==========
layers : a list of Layers
folds: it could be either KFold, GroupKFold, StratifiedKFold
or TimeSeriesSplit cross-validator from sci-kit learn;
or a custom list of sets of index for different folds.
If None (default) all data will be used in training all layers.
"""
self.depth = len(layers) # number of layers
self.layers = deepcopy(layers)
self.use_folds = False
self.folds = None
self.splitter = None
if folds is not None:
if _check_custom_folds(folds):
self.use_folds = True
self.folds = folds
if len(folds) != self.depth:
raise AssertionError("There are {} folds but {} layers".format(len(folds), self.depth))
elif _method_checker(folds, 'get_n_splits') and _method_checker(folds, 'split'):
self.use_folds = True
self.splitter = folds
if self.splitter.get_n_splits() != self.depth:
warnings.warn("Warning: Number of fold is not the same as number of layers, using the number of layers as number of flods")
self.splitter.n_splits = self.depth
else:
raise AssertionError("{} is not a valid input".format(folds))
def fit(self, X, y):
"""Fit Layers with (X, y) and return the fitted Stack
Parameters
==========
X : array-like or sparse matrix, shape (n_samples, n_features)
Training data
y : array_like, shape (n_samples, n_targets)
Target values.
Returns
=======
self : obejct, the fitted Stack itself
"""
if self.use_folds:
if self.folds is None:
_, self.folds = self.splitter.split(X)
X_new = X[self.folds[0]]
y_new = y[self.folds[0]]
else:
X_new = X
for idx in range(self.depth):
if self.use_folds:
for pre_idx in range(idx):
X_new = self.layers[pre_idx].predict(X_new)
self.layers[idx].fit(X_new, y_new)
if idx < self.depth - 1:
X_new = X[self.folds[idx+1]]
y_new = y[self.folds[idx+1]]
else:
X_new = self.layers[idx].fit(X_new, y)
return self # follow convention of Sci-Kit learn and return self
def predict(self, X):
"""With given X, predict the result with the Stack
Parameters
==========
X : array-like or sparse matrix, shape (n_samples, n_features)
Samples.
Returns
=======
C : array, shape (n_samples,)
Returns predicted values from the Stack.
"""
X_new = X
for idx in range(self.depth):
X_new = self.layers[idx].predict(X_new)
# flatten result if only a number for each X
if X_new.shape[1] == 1:
X_new = X_new.flatten()
return X_new # this is the final result
def _method_checker(obj,method_name):
return method_name in dir(obj)
def _check_custom_folds(obj):
try:
return isinstance(obj[0][0], int)
except TypeError:
return False
| true |
36d33f7dc893f893e61f2f3fa3bcaf6853e1c282 | Python | ChihchengHsieh/EventLogDiCE | /cf_search/CfSearcher.py | UTF-8 | 3,610 | 2.671875 | 3 | [] | no_license | import tensorflow as tf
import pandas as pd
import textdistance
class CfSearcher(object):
def __init__(self, training_df, pred_model, milestones=[
"A_SUBMITTED_COMPLETE",
"A_PARTLYSUBMITTED_COMPLETE",
"A_PREACCEPTED_COMPLETE",
"A_ACCEPTED_COMPLETE",
"A_FINALIZED_COMPLETE",
"O_SELECTED_COMPLETE",
"O_CREATED_COMPLETE",
"O_SENT_COMPLETE",
"O_SENT_BACK_COMPLETE",
"A_APPROVED_COMPLETE",
"A_ACTIVATED_COMPLETE",
"A_REGISTERED_COMPLETE"
]) -> None:
super().__init__()
self.training_df = training_df
self.pred_model = pred_model
self.milestones = milestones
def search(self, activities, desired, amount=None, replace_amount=None):
# only keep the activties that are not milestones in the trace.
milestone_trace = [
a for a in activities if a in self.milestones]
# Find the cases containing milestone_filtered in training set.
query_df = self.training_df[[all(
[v in t['activity_vocab'] for v in milestone_trace]) for t in self.training_df.iloc]]
# Find the cases with the same amount.
if not amount is None:
query_df = query_df[query_df['amount'] == amount]
# Find cases containing desired df.
desired_df = query_df[[
desired in v for v in query_df['activity_vocab']]]
if (len(desired_df) <= 0):
raise Exception("Not matches found in trainig set")
# Remove ground truth tails, so we can use model to make prediction to see if the prediction is the same as the ground truth.
for idx in list(desired_df.index):
desired_idx = desired_df.loc[idx]['activity_vocab'].index(desired)
for col in ['activity', 'activity_vocab', 'resource', 'resource_vocab']:
desired_df.at[idx,
col] = desired_df.loc[idx][col][:desired_idx]
desired_df = pd.DataFrame(desired_df)
# Replacing the amount in the cases if the replace_amount=True
if not replace_amount is None:
desired_df['amount'] = [replace_amount] * len(desired_df)
all_predicted_vocabs = []
all_predicted_value = []
for idx in range(len(desired_df)):
ex_activity = tf.constant(
[desired_df.iloc[idx]['activity']], dtype=tf.float32)
ex_resource = tf.constant(
[desired_df.iloc[idx]['resource']], dtype=tf.float32)
ex_amount = tf.constant(
[desired_df.iloc[idx]['amount']], dtype=tf.float32)
out, _ = self.pred_model(
ex_activity, ex_resource, ex_amount, training=False)
out = tf.nn.softmax(out, axis=-1)
pred_idx = tf.argmax(out[:, -1, :], axis=-1).numpy()[0]
predicted_vocab = self.pred_model.activity_vocab.index_to_vocab(
pred_idx)
all_predicted_vocabs.append(predicted_vocab)
all_predicted_value.append(out[:, -1, pred_idx].numpy()[0])
desired_df['predicted_vocab'] = all_predicted_vocabs
desired_df['predicted_value'] = all_predicted_value
desired_df['lengths'] = [len(a) for a in desired_df['activity_vocab']]
desired_df['activity_sparcity'] = [textdistance.levenshtein.distance(
activities, a) for a in desired_df['activity_vocab']]
desired_df = desired_df.sort_values('activity_sparcity')
cf = desired_df[desired_df['predicted_vocab'] == desired]
return desired_df, cf
| true |
50a0f4736c7083914702df15769cd1b668ea1b18 | Python | yunaichun/python3-study | /03基础库/文件和目录.py | UTF-8 | 498 | 2.9375 | 3 | [] | no_license | import os
# from os import path
print(os.path.abspath('.'))
print(os.path.exists('/Users'))
print(os.path.isfile('/Users'))
print(os.path.isdir('/Users'))
print(os.path.join('/Users/', 'a'))
from pathlib import Path
p = Path('.')
print(p.resolve()) # 等价于 print(os.path.abspath('.'))
print(p.exists())
print(p.is_file())
print(p.is_dir())
# 加持功能
q = Path('/Users/yunaichun/Downloads/a/b')
# q.parents=True 代表的是父级目录不存在则自动创建
Path.mkdir(q, parents=True)
| true |
61ecc308d5ebbf91bdae81149f234377f0aacd30 | Python | unjambonakap/ctf | /hacklu/2010/ch5.bak/bot.py | UTF-8 | 2,473 | 2.53125 | 3 | [] | no_license | #!/usr/bin/python
from operator import indexOf;
import re;
import socket;
import sys;
from battleships import *;
from redbeard import *;
msgSuccess = ["Hahaha, is this your first sea-fight?!", "HrhrhrhrHrrrrrr!", "*Sing* 13 men on the dead man's chest, and a buddle of rum...","W0000ty!","rofl","You callin you'self mighty pirate? Hohohoho...","Still want your 500P?!","You better sail home while you can!"];
msgFail = ["GrrrrrRrrrRrr.","Must be the wind...","Uhm.","o_O","O_o","What-the...?!","Hahaha - this means nothing!","A lucky man you are.",":x","This can't be...","GrrrRrrrrRrrrrrrrrrrrrrrrrrrrrrrr.","There must be something wrong with me cannons."];
pos=0;
rem=14;
tb=[];
def nothing(s):
global tb, pos;
while True:
for l in s.recv(10240).split("\n"):
l=l.rstrip();
print "received "+l;
if (l.startswith(">>>")):
return;
elif (l.startswith("Cap'n Redbeard> ")):
l=l[l.index(">")+2:];
if (l in msgSuccess):
tb.append((indexOf(msgSuccess, l), len(msgSuccess)));
elif (l in msgFail):
tb.append((indexOf(msgFail, l), len(msgFail)));
elif (l.startswith("[i] Your opponent missed while shooting at") or l.startswith("[i] Your opponent hit a ship while shooting at ")):
if (l.find("hit")):
global rem;
rem-=1;
l=l[10:];
tmp=l[l.index("[")+1:l.index("]")];
a,b=tmp.split(",");
if (pos%5>=3):
tb.extend([(int(a), 10), (int(b), 10)]);
pos+=1;
elif (l.startswith("You lost")):
sys.exit(-1);
server="pirates.fluxfingers.net";
port=2204;
name="TA_MAMAN";
#s=socket.socket(socket.AF_INET, socket.SOCK_STREAM);
#print "connecting\n";
#s.connect((server, port));
#
#print "connected\n";
#
#nothing(s);
#s.send(name);
#
#while(rem>=5):
# print "REMAINING "+str(rem);
# nothing(s);
# s.send("0 0");
#
#print tb;
#
#
#s.close();
#
#
#
#
tb=[(2, 8), (1, 8), (6, 8), (2, 8), (6, 10), (1, 10), (3, 12), (1, 8), (0, 10), (7, 10), (6, 8), (3, 8), (2, 12), (2, 8), (6, 12), (7, 8), (0, 10), (3, 10), (6, 8), (0, 10), (4, 10), (3, 8), (9, 12)];
code="";
codea="nr=ni/30269.0 + nj/30307.0 + nk/30323.0; nr-=(int)nr;\n";
codeb="ni=(171*ni)%30269; nj=(172*nj)%30307; nk=(170*nk)%30323;\n";
for p, (a, b) in enumerate(tb):
cond="if ((%s!=nr*%s) continue;\n"% (b, a);
code+=codea+cond+codeb;
code+='printf("%d %d %d\\n", ni, nj, nk);\n';
fd=open("bf.c", "r");
content=fd.read();
fd.close();
content=content.replace("//INSERT", code);
fd=open("res.c", "w");
fd.write(content);
fd.close();
| true |
0d0f59c515e0ffb55bfba1cdbb884fa089a7f62f | Python | krenevych/programming | /P_04/ex_5.py | UTF-8 | 774 | 4.15625 | 4 | [] | no_license | """
Приклад 4.4. Визначити найменший з елементів квадратної дійсної матриці порядку N.
"""
# Блок введення матриці з клавіатури
N = int(input("Кількість рядків матриці "))
M = [] # Створюємо порожню матрицю
for i in range(N):
# Вводимо рядок матриці row з клавіатури
row = list(map(float, input("%d: " % i).split()))
M.append(row) # Додаємо рядок до матриці
# Визначення чи є матриця симетрична
n = len(M)
res = True
for i in range(n):
for j in range(n):
if M[i][j] != M[j][i]:
res = False
print(res)
| true |
da2ac8765dd0f194188cb9e2e7554161a01c6caa | Python | destroyerdust/Citation-Needed-AI | /server/main.py | UTF-8 | 6,085 | 3.0625 | 3 | [] | no_license | #!/usr/bin/python
"""
Main server for the AI competition
"""
# Creates a child process for the competitor AI, communicates via STDIO
#
# Main processing:
# 1- write world state to player process
# 2- wait for input from player process
# 3- process player output
# 4- sanity check world state; verify player moves are legal before applying them
# 5- update world state
# 6- move zombies (updates world state)
# 7- print new world state to client
# 8- flush player process stdin
import subprocess
import argparse
import fcntl
import os
from gameBoard import *
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("playerExe", metavar="/path/to/player/ai", type=str, help="Path to the player AI executable", default=None)
parser.add_argument("--height", type=int, help="Height of the board", default=12)
parser.add_argument("--width", type=int, help="Width of the board", default=12)
parser.add_argument("--zombies", type=int, help="Number of zombies", default=5)
#parser.add_argument("--player-row", type=int, help="Row for the human player to start on", default=-1)
#parser.add_argument("--player-col", type=int, help="Column for the human player to start on", default=-1)
parser.add_argument("--humans", type=int, help="Number of human players", default=1)
parser.add_argument("--blocked", type=int, help="%% of squares that are impassible", default=5)
parser.add_argument("--obscured", type=int, help="%% of squares that are passible but block LOS", default=10)
parser.add_argument("--max-turns", type=int, help="Maximum number of turns allowed before we assume the human wins", default=200)
args = parser.parse_args()
print 'Generating random board of size',args.width,'by',args.height,'containing',args.zombies,'zombies'
board = GameBoard(args.width,args.height,args.humans,args.zombies, args.blocked, args.obscured)
print 'Placing entities on the board'
for e in GameBoard._entities:
square = board.randomEmptySquare()
square.setOccupant(e)
print 'placing entity',e._id,'on square',square._id
print 'Initial board state:'
print str(board)
turn = 1
print '\n\n=== START OF TURN',turn,'==='
board.draw()
print 'Starting Child Process',args.playerExe
client = subprocess.Popen(args.playerExe.split(),stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=False,bufsize=1)
client.stdout.flush()
# set up client.stderr as non-blocking
#fd = client.stdout.fileno()
#fl = fcntl.fcntl(fd, fcntl.F_GETFL)
#fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
fd = client.stderr.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
print 'PID:',client.pid
print 'Writing initial state to child process'
client.stdin.write(str(args.width)+" "+str(args.height)+"\n")
client.stdin.write(str(board)+"\n")
client.stdin.flush()
gameOver = False
turns = 0
print 'Starting Game'
while not gameOver:
turns = turns+1
client.stderr.flush()
try:
dbg = client.stderr.readline().strip()
if len(dbg) > 0:
print '[debug]',dbg
except:
dbg = ''
client.stdout.flush()
line = client.stdout.readline().strip()
actions = []
while line != 'END TURN':
try:
dbg = client.stderr.readline().strip()
if len(dbg) > 0:
print '[debug]',dbg
except:
dbg = ''
if len(line) > 0 and line[0]!='#':
#print '>',line,'<'
actions.insert(len(actions), line)
#elif len(line) > 0 and line[0]=='#':
# print '[debug]',line.lstrip('#')
#client.stdout.flush()
#client.stderr.flush()
#client.stdin.flush()
line = client.stdout.readline().strip()
#print '>>>',line
actions.insert(len(actions), line)
print '[info] Recieved end of turn signal from client'
# get ready for the new turn
for e in Entity.allEntities:
e.actionsTaken = 0
# handle all of the actions the player wants to perform
for a in actions:
tokens = a.split()
if len(tokens) == 3:
action = tokens[0].strip().upper()
entity = Entity.allEntities[int(tokens[1])]
target = int(tokens[2])
#print '[info] Evaluating player action',action
if action == 'MOVE':
entity.move(target)
elif action == 'SHOOT':
entity.shoot(board.getSquareById(target))
elif action == 'SEARCH':
entity.search(board.getSquareById(target))
elif action == 'ATTACK':
entity.attack(board.getSquareById(target))
# zombies take their turns
print '[info] Taking Zombie Turns'
zombies = board.getEntities(Entity.ZOMBIE_TEAM)
random.shuffle(zombies)
for z in zombies:
if not z.isDead():
z.takeTurn(board)
print '[info] Zombie Turns Over'
# check for game over
humans = board.getEntities(Entity.HUMAN_TEAM)
numAlive = 0
for h in humans:
if not h.isDead():
numAlive = numAlive + 1
numZ = 0
for z in zombies:
if not z.isDead():
numZ = numZ + 1
if numZ == 0 and numAlive == 0:
print '* Everything died! Draw'
gameOver = True
elif numZ == 0:
print '* All zombies are dead! Player win'
gameOver = True
elif numAlive == 0:
print '* All humans are dead! Player loss'
gameOver = True
elif turns==args.max_turns:
print '* Ran out of turns. Draw'
gameOver = True
else:
turn = turn+1
print '\n\n=== START OF TURN',turn,'==='
board.draw()
client.stdin.write(str(board)+'\n')
client.stdin.flush()
client.stdin.write('GAME OVER\n')
client.stdin.flush()
gameOver = True
print '* Player AI lasted',turn,'turns'
print 'GAME OVER'
| true |
adf1ff716765ed73fac0b4237c2a66379f2b5d1f | Python | wfslithtaivs/InterviewPrepTasks | /patternmatch.py | UTF-8 | 2,854 | 3.953125 | 4 | [] | no_license | """Check if pattern matches.
Given a "pattern string" starting with "a" and including only "a" and "b"
characters, check to see if a provided string matches that pattern.
For example, the pattern "aaba" matches the string "foofoogofoo" but not
"foofoofoodog".
Patterns can only contain a and b and must start with a:
>>> pattern_match("b", "foo")
Traceback (most recent call last):
...
AssertionError: invalid pattern
>>> pattern_match("A", "foo")
Traceback (most recent call last):
...
AssertionError: invalid pattern
>>> pattern_match("abc", "foo")
Traceback (most recent call last):
...
AssertionError: invalid pattern
The pattern can contain only a's:
>>> pattern_match("a", "foo")
True
>>> pattern_match("aa", "foofoo")
True
>>> pattern_match("aa", "foobar")
False
It's possible for a to be zero-length (a='', b='hi'):
>>> pattern_match("abbab", "hihihi")
True
Or b to be zero-length (a='foo', b=''):
>>> pattern_match("aaba", "foofoofoo")
True
Or even for a and b both to be zero-length (a='', b=''):
>>> pattern_match("abab", "")
True
But, more typically, both are non-zero length:
>>> pattern_match("aa", "foodog")
False
>>> pattern_match("aaba" ,"foofoobarfoo")
True
>>> pattern_match("ababab", "foobarfoobarfoobar")
True
Tricky: (a='foo', b='foobar'):
>>> pattern_match("aba" ,"foofoobarfoo")
True
Now non-toy tests:
>>> pattern_match("abba" ,"foobarbarfod")
False
"""
def is_solution(solution, pattern, astring):
"""Check if the solution match string"""
matches = {}
i = 0
for c in pattern:
n = solution[ord(c) - ord('a')]
part = astring[i:i+n]
i += n
if not c in matches:
matches[c] = part
else:
if matches[c] != part:
return False
return i == len(astring)
def pattern_match(pattern, astring):
"""Can we make this pattern match this string?"""
# Q&D sanity check on pattern
assert (pattern.replace("a", "").replace("b", "") == ""
and pattern.startswith("a")), "invalid pattern"
str_len = len(astring)
n = tuple(pattern.count(x) for x in ('a', 'b'))
if any(el == 1 for el in n):
return True
for i in range(str_len/n[0] + 1):
if n[1] != 0:
x = (i, (str_len - i*n[0]) / n[1])
else:
x = (i, 0)
assert (x[1] >= 0), "Unrealistic len"
assert n[1] == 0 or (x[0] * n[0] + x[1] * n[1] == str_len), \
"Out of len: " + str(x)
if is_solution(x, pattern, astring):
return True
return False
if __name__ == '__main__':
import doctest
if doctest.testmod().failed == 0:
print "\n*** ALL TESTS PASSED. WE'RE WELL-MATCHED!\n"
| true |
b1bb4f33c87c9f53da18b2477c537f563c5caa43 | Python | nyu-cds/yfc259_assignment3 | /mpi_assignment_2.py | UTF-8 | 1,845 | 4.0625 | 4 | [] | no_license | """
Author: Yu-Fen Chiu
Date: 04/16/2017
Question 2:
Write an MPI program in which the first process get an integer less than 100 and send to the second process.
Then each process multiples the number with their ranks and continues to the last process.
Then the final result is send to the first process and printed.
Command used: mpiexec -n <size> python mpi_assignment_2.py
"""
from mpi4py import MPI
import numpy
comm = MPI.COMM_WORLD ## the communicator
rank = comm.Get_rank() ## the rank of the calling process within the communicator
size = comm.Get_size() ## the total number of processes contained in the communicator
randNum = numpy.zeros(1)
if rank == 0:
while True:
## ask user to enter a number
user_input = input("Tell me an integer less than 100: ")
## verify that user-entered number is an integer
try:
number = int(user_input)
## verify that user-entered is less than 100
if number < 100 and number > 0:
break
else:
print('The number you enter is invalid. Please enter an integer between 0 and 100!')
continue
except ValueError:
print('The number you enter is not an integer. Please try again!')
continue
randNum[0] = number
## send a message to process 1
comm.Send(randNum, dest = 1)
## receive the final result
comm.Recv(randNum, source = size - 1)
print(randNum[0])
else:
## receive a message from previous process
comm.Recv(randNum, source = rank - 1)
## multiply by rank
randNum *= rank
## check if it is the last one, then send a message to proper destination
if rank == size - 1:
comm.Send(randNum, dest = 0)
else:
comm.Send(randNum, dest = rank + 1) | true |
a60768771d974a354b8ea6b4ba2f74e43f4c355d | Python | JoseEvanan/frikr | /users/serializers.py | UTF-8 | 2,307 | 2.984375 | 3 | [] | no_license | from django.contrib.auth.models import User
from rest_framework import serializers
class UserSerializer(serializers.Serializer):
id = serializers.ReadOnlyField() # read only
first_name = serializers.CharField()
last_name = serializers.CharField()
username = serializers.CharField()
email = serializers.EmailField()
password = serializers.CharField()
def create(self, validated_data):
"""
Crea una isntancia de user a partir de los datos de
validated_data que contiene valores deserializados
:param valdiated_data: Dicionario de datos de usuario
:return: objeto user.
"""
instance = User()
return self.update(instance, validated_data)
def update(self, instance, validated_data):
"""
Actualiza una instancia de User a partir de los datos del diccionario
validated_data que contine valores deserializados
:param instance: objeto User a actualizar
:param validated_data: Dicionario de datos de usuario
:return: objeto user.
"""
instance.first_name = validated_data.get('first_name')
instance.last_name = validated_data.get('last_name')
instance.username = validated_data.get('username')
instance.email = validated_data.get('email')
instance.set_password(validated_data.get('password'))
instance.save()
return instance
def validate_username(self,data):
"""
Valida si existe un usuario con ese nombre
"""
print(data)
user = User.objects.filter(username=data)
#Si estoy creado ( no hay instancia) comprobar si hay usuarios con ese username
if not self.instance and len(user) != 0:
raise serializers.ValidationError(
" Ya existe un usuario con ese username ")
#Si estoy actualizando, el nuevo username es diferene al de la instancia( esta cambaido el userna,e)
# y ecise usuarios registrados con ese nuevo username
elif self.instance and self.instance.username != data and len(user) != 0:
raise serializers.ValidationError(
" Ya existe un usuario con ese username ")
else:
return data
#965079339
| true |
668c2af8c90533f05fdd070449e257183f600bec | Python | shepdl/Russian-Text-Mining | /screen-scraping/extract_author_links.py | UTF-8 | 5,010 | 2.828125 | 3 | [] | no_license | # coding=utf8
__author__ = 'Dave Shepard'
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
class AuthorLinkExtractor(object):
def __init__(self):
self.browser = webdriver.Firefox()
def get_links_from_individual_sections(self, author_link, sections):
parser = ParseSectionList(sections)
work_links = []
self.browser.get(author_link)
try:
works_container = self.browser.find_element_by_xpath('.//body/dd/dl')
except NoSuchElementException:
works_container = self.browser.find_element_by_xpath('.//body/dl')
for link in works_container.find_elements_by_xpath('*'):
parser = parser.handle_element(link, work_links)
if parser is None:
break
return self.download_files(work_links)
delete_header_script = """
document.querySelectorAll('body > center')[0].remove();
"""
def download_files(self, work_links):
work_and_text = []
for title, pub_year, url in work_links:
self.browser.get(url)
self.browser.execute_script(self.delete_header_script)
work_container = self.browser.find_element_by_css_selector('body')
work_and_text.append(
[title, pub_year, url, work_container.text,]
)
print "Downloaded all texts"
return work_and_text
def get_links_from_section_range(self, author_link, sections):
from_section = sections['from_section']
to_section = sections['to_section']
parser = ParseRange(from_section, to_section)
work_links = []
self.browser.get(author_link)
try:
works_container = self.browser.find_element_by_xpath('.//body/dd/dl')
except NoSuchElementException:
try:
works_container = self.browser.find_element_by_xpath('.//body/dl')
except NoSuchElementException:
works_container = self.browser.find_element_by_xpath('.//body/li/dl')
for link in works_container.find_elements_by_xpath('*'):
if parser is None:
break
parser = parser.handle_element(link, work_links)
return self.download_files(work_links)
def close(self):
self.browser.quit()
class ParseRange(object):
def __init__(self, from_section_name, until_section_name):
self.from_section_name = from_section_name
self.until_section_name = until_section_name
self.parsing = False
def handle_element(self, link, work_links):
if link.tag_name == 'p':
link_text = link.text.strip()[0:-1]
if link_text == self.until_section_name:
return None
if link_text == self.from_section_name or u'beginning' in self.from_section_name or self.parsing:
self.parsing = True
return ParseValidSection(self)
if link_text != self.until_section_name:
return ParseValidSection(self)
return self
else:
return self
class ParseSectionList(object):
def __init__(self, valid_section_list):
self.valid_section_list = valid_section_list
self.found_sections = []
def handle_element(self, link, work_links):
if link.tag_name == u'p':
if len(self.found_sections) == len(self.valid_section_list):
print u"Completed {}".format(', '.join(self.valid_section_list))
return None
link_text = link.text.split(':')[0].strip()
if link_text in self.valid_section_list:
self.found_sections.append(link_text)
return ParseValidSection(self)
else:
return self
elif link.tag_name == 'dl':
# skip sections we aren't interested in
return self
else:
return self
class ParseValidSection(object):
def __init__(self, parent):
self.parent = parent
def handle_element(self, link, work_links):
if link.tag_name == u'dl':
link_element = link.find_elements_by_xpath('.//a[@href]')[0]
title_element = link.find_elements_by_xpath('.//a[@href]/b')[0]
if title_element.text.strip() == u'New':
title_element = link.find_elements_by_xpath('.//a[@href]')[1]
pub_year = link.find_elements_by_xpath('.//small')[0].text[1:-1]
print u"Found {}, published in {}".format(title_element.text, pub_year)
url = link_element.get_attribute('href')
work_links.append(
(title_element.text, pub_year, url, )
)
return self
elif link.tag_name == u'p' or link.tag_name[0] == u'h': # tag is <p> or <h[1-x]>
return self.parent.handle_element(link, work_links)
else:
# Ignore other elements
return self
| true |
0d8fe3230af77e51eb67fb56b0e244feeeba0db1 | Python | MidD3Vil/Consulta-DNS | /main.py | UTF-8 | 3,483 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | import time
import socket
from requests import get
import smtplib
import socket
import os
print('')
restart = 'S'
while restart == 'S':
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
print('#####################')
time.sleep(0.5)
print('Consulta de IP por DNS')
time.sleep(0.5)
print('Tool by Dr Midnight')
time.sleep(0.5)
print('#####################\n')
time.sleep(0.5)
print('Iniciando o script...')
time.sleep(2)
print()
print('[1] Puxar IP de Hosts')
print('[2] Do que se trata?')
def host():
print('')
print('Olá!')
time.sleep(0.3)
print('Aqui, você pode pegar endereçamentos de específicos hostnames.\n')
time.sleep(0.5)
host = input('Consulte uma DNS: ')
host = socket.gethostname()
intern = socket.gethostbyname(host)
extern = get('https://api.ipify.org').text
print()
print(f'Host: {host}')
print(f'IP Interno: {intern}')
print(f'IP Externo: {extern}')
def help():
print('###########################################################################################################\n')
print('O QUE É?\n')
print('O IP é o seu endereço na internet. É por ele que o seu computador se comunica com outros computadores.\n'
' Ele pode ser estático (não muda) ou dinâmico (muda com o tempo) e é atribuído pela sua operadora de internet.\n')
print('Todo Site possui um local o qual recebe Host, ou seja, é Hospedado de uma máquina.')
print('Quando consultamos uma DNS através da Hostname, estamos checando os dados do Servidor que está fornecendo Host\n')
print('###########################################################################################################\n')
print('PARA QUE É ÚTIL?\n')
print('Com o IP de Host de um determinado Site em mãos, podemos não só apenas saber a geo localização, a qual\n'
' o servidor está sendo mantido, como também podemos usar disto para outras coisas\n')
print('Se uma pessoa cometer algum crime virtual a polícia pode descobrir o endereço verdadeiro do criminoso\n'
'através do IP procurando a operadora de internet que vai consultar o banco de dados deles onde estão\n'
'listados todos os clientes e horários mostrando quem usava qual IP e quando. Então se por acaso,'
' acabar\nsendo vítima de golpe através de alguma "http", esta ferramenta o ajudará na busca do responsável.\n')
print('Além disto existem diversos atributos para a área de Pen Tester, tais como:')
print('--> Invasão de computadores via exploit, que é alguma falha específica presente em algum software de uma máquina'
' ligada a rede.')
print('--> Ataques de DDOS, que é derrubar uma conexão (ou torna-la instável) enviando várias requisições por segundo,'
' gerando\n uma sobrecarga na rede onde o dispositivo com aquele IP está conectado.\n')
print('###########################################################################################################')
mid = input("\n>>> Escolha a opção: ")
if mid == '1':
host()
elif mid == '2':
help()
restart = str(input('\nDeseja realizar outra consulta S/N? ')).strip().upper()[0]
print('')
os.system('cls')
| true |
7ed5525feae66263b45558b0906f937835f7c669 | Python | SatyaChipp/PythonImpls | /LinkedLists/doublyLinkedLists.py | UTF-8 | 2,538 | 3.609375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 7 16:13:51 2018
@author: Jen
"""
class Node(object):
def __init__(self, data, prev=None, next=None):
self.data = data
self.prev = prev
self.next = next
class doubleLinkedLists(object):
head=None
tail=None
def size(self):
current = self.head
count=0
while current:
count+=1
current = current.next
return count
def insert(self, data, pos=None):
new_node = Node(data)
print("count {}".format(self.size()))
if pos is None and self.head is None:
self.head = self.tail =new_node #empty list
#both pointers point to none
elif pos == self.size(): #insert at tail
new_node.prev = self.tail
new_node.next = None
self.tail.next = new_node
self.tail = new_node
elif pos == 0 and self.head: #at head
new_node.next = self.head
new_node.prev = None
self.head.prev = new_node
self.head = new_node
elif pos!=0 and pos < self.size():
current = self.head
count=0
while current.next and count<pos:
count+=1
current=current.next
new_node.prev = current
new_node.next = current.next
current.next.prev = new_node
current.next = new_node
print("implt here")
print("count {}".format(self.size()))
def pop(self, data):
current = self.head
while current:
if current.data == data:
if current.prev is not None: #not head
current.prev.next = current.next
current.next.prev = current.prev.prev
else:#head
current.next.prev = None
self.head = current.next
current = current.next
def printLL(self):
print("show data")
current =self.head
while current:
print(current.prev.data if hasattr(current.prev, "data") else None)
print(current.data)
print(current.next.data if hasattr(current.next, "data") else None)
current=current.next
if __name__ == "__main__":
dll = doubleLinkedLists()
dll.insert(5)
dll.insert(6, 0)
dll.insert(7, 0)
dll.insert(8, 0)
dll.printLL()
| true |
38ea8c6030bf0d671a7234eef823eea98b16702c | Python | SeseelLybon/Minefield | /source/statemanager.py | UTF-8 | 2,458 | 2.625 | 3 | [] | no_license |
from enum import Enum
from enum import auto
import pyglet
from button import Button
from chunkmanager import ChunkManager
from scoremanager import ScoreManager
from configmanager import ConfigManager
class MineField:
window_size = ConfigManager.config_dict.get("window_size")
score_label = pyglet.text.Label('score: ' + str(0),
font_name='Times New Roman',
font_size=12,
x=50, y=window_size[1] - 50,
anchor_x='left', anchor_y='center')
clearedtiles_label = pyglet.text.Label('Tiles cleared: ' + str(0),
font_name='Times New Roman',
font_size=12,
x=50, y=window_size[1] - 75,
anchor_x='left', anchor_y='center')
@classmethod
def draw(cls, offset, window):
cls.score_label.text = 'score: ' + str(ScoreManager.getscore())
cls.clearedtiles_label.text = 'Tiles cleared: ' + str(ScoreManager.getclearedtiles())
ChunkManager.screenspaceocclude_drawchunks(offset, (window.width, window.height))
ChunkManager.updategenchunks(offset, (window.width, window.height))
cls.score_label.draw()
cls.clearedtiles_label.draw()
class MainMenu:
window_size = ConfigManager.config_dict.get("window_size")
buttons_dict = {"button_New": Button("New game", pos=(window_size[0]//2, window_size[1]-150)),
"button_Load": Button("Load game", pos=(window_size[0]//2, window_size[1]-200)),
"button_Save": Button("Save game", pos=(window_size[0]//2, window_size[1]-250)),
"button_Exit": Button("Exit game", pos=(window_size[0]//2, window_size[1]-300))}
@classmethod
def draw(cls, offset, window):
ChunkManager.screenspaceocclude_drawchunks(offset, (window.width, window.height))
for button in cls.buttons_dict.values():
button.draw(offset, window)
@classmethod
def getbuttonclicked(cls, mouse_pos, window):
#TODO: rewrite to ask button for collision
for button in cls.buttons_dict.values():
if button.getcollision(mouse_pos):
return button
return None
state_dict = {"MineField":MineField,
"MainMenu":MainMenu} | true |
14a10b80d44d499bb4935a6909157798429c3c12 | Python | Vijendrapratap/Machine-Learning | /Week4/Matplotlib/Barchart/1.WAP to display a bar chart of the popularity of programming Languages.py | UTF-8 | 572 | 4.09375 | 4 | [] | no_license | """
Write a Python programming to display a bar chart of the popularity of programming Languages.
Sample data:
Programming languages: Java, Python, PHP, JavaScript, C#, C++
Popularity: 22.2, 17.6, 8.8, 8, 7.7, 6.7
"""
import matplotlib.pyplot as plt
languages = ['Java', 'Python', 'PHP', 'JavaScript', 'C#', 'C++']
popularity = [22.2, 17.6, 8.8, 8, 7.7, 6.7]
x_pos = [i for i, _ in enumerate(languages)]
plt.bar(x_pos, popularity, color='green')
plt.xlabel("Languages")
plt.ylabel("Popularity")
plt.title("PopularitY of Programming Language\n")
plt.grid()
plt.show() | true |
8858ec01ae81627b0dea8d4e0fcc9dca437a1481 | Python | fmpn2/teste-lista-1-gracafpaula | /questao2.py | UTF-8 | 229 | 3.734375 | 4 | [] | no_license | #peca ao usuario um numero e verifique se este numero eh par. imprima na tela o resultado da sua verificacao.
numero = int(input("digite um numero"))
if(numero%2==0):
print("numero eh par")
else:
print("numero nao eh par")
| true |
4a842258855a96898e4af2cb5f3e3f7379449e83 | Python | noahsmitty/cs121cities | /main.py | UTF-8 | 4,916 | 3.109375 | 3 | [] | no_license | """
AUTHORS: Josh Cheung, Noah Smith, Arun Ramakrishna, Chuksi Emuwa, Giovanni Castro
DESCRIPTION: Our main app file using Flask and Fastai, responsible for taking an
input image from our frontend, sending it to the appropriate models,
returning a prediction, and routing between our two webpages.
"""
import os
from os import path
import sys
import pickle
import urllib.request
from fastai.learner import load_learner
from flask import Flask, flash, request, redirect, url_for, render_template
from flask import current_app, send_from_directory
from werkzeug.utils import secure_filename
# set image directory for welcome page
UPLOAD_FOLDER = '/tmp/'
app = Flask(__name__)
app.secret_key = "secret key"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
# set allowed file types
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
def allowed_file(filename):
"""
Sets the allowed file types upon image upload.
:param filename is the filename of the input image.
"""
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def display_text(filename):
"""
Uses Fastai to call our models and return a prediction, using an initial model
to determine image category, and then feeding the image to the appropriate model.
:param filename is the filename of the input image.
:return an fstring of the prediction, displayed using WTForms on our HTML page.
"""
model_dir = "models/"
# run initial landmark vs skyline model
learn_inf_categorize = load_learner(model_dir+"categorizeCPU.pkl", cpu=True)
category = learn_inf_categorize.predict(UPLOAD_FOLDER + filename)[0]
# run our other two models depending on first model
if category == "landmarks":
learn_inf = load_learner(model_dir+"landmarkModelCPU.pkl", cpu=True)
pred = learn_inf.predict(UPLOAD_FOLDER + filename)[0]
else:
learn_inf = load_learner(model_dir+"skylineModelCPU.pkl", cpu=True)
pred = learn_inf.predict(UPLOAD_FOLDER + filename)[0]
# capitalize city names for display
if pred == "san francisco":
pred = "San Francisco"
elif pred == "new york":
pred = "New York"
elif pred == "london":
pred = "London"
elif pred == "tokyo":
pred = "Tokyo"
else:
pred = "New Delhi"
return f"Prediction: {pred}"
@app.route('/', methods=['POST', 'GET'])
def welcome():
"""
Displays our welcome page.
:return either our welcome.html page, or if button is clicked, go to upload_image.
"""
if request.method == 'POST':
return redirect(url_for('upload_image'))
return render_template('welcome.html')
@app.route('/predict')
def upload_form():
"""
Renders our main prediction page, with route /predict.
:return our upload.html page.
"""
return render_template('upload.html')
@app.route('/predict', methods=['POST'])
def upload_image():
"""
Our main function responsible for image upload, and the clearing of our upload folder
after a prediction is made.
:return back to the upload.html page, or rerender the page to display the uploaded image.
"""
path = UPLOAD_FOLDER
file_list = os.listdir(path)
# this code clears everything in our image folder before we upload a new one
for f_n in file_list:
if f_n[-3:] == "jpg" or f_n[-4:] == "jpeg" or f_n[-3:] == "png":
try:
os.remove(path+f_n)
except:
print(f"Error while deleting file {path+f_n}")
# check if our file is in request
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
# saves our uploaded file into appropriate folder
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
full_fn = os.path.join(app.config['UPLOAD_FOLDER'], filename)
predict = display_text(file.filename)
return render_template('upload.html', filename=filename, prediction=predict,\
image_fn=full_fn)
flash('Allowed image types are -> png, jpg, jpeg')
return redirect(request.url)
@app.route('/tmp/<path:filename>', methods=['GET', 'POST'])
def download(filename):
"""
Sends image for display back to our frontend, downloading the image.
:param filename, the filename of our uploaded file.
:return file sent from the image directory /tmp/
"""
uploads = os.path.join(current_app.root_path, app.config['UPLOAD_FOLDER'])
return send_from_directory(directory=uploads, filename=filename)
if __name__ == "__main__":
app.run(debug=True)
| true |
2fbe30bac55108665810c796c55cce508a0fcf67 | Python | bam-lab/MagneticMicromanipulatorAPI | /Demagnetization/demag.py | UTF-8 | 3,408 | 3 | 3 | [
"MIT"
] | permissive | import time
import RPi.GPIO as GPIO
from getField import getField, getFieldAvg
# The main functions are:
# print(getField())
# print(getFieldAvg(int))
from power_supply import PowerSupply
ps = PowerSupply('/dev/ttyUSB0')
def calibrate():
print("Calibrating no field condition...")
return(getFieldAvg(20))
print("Calibration complete.")
def signum(value):
return int(value/abs(value))
def demagCurrent(noField):
print('noField: %f' % noField)
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(5, GPIO.OUT)
GPIO.setup(6, GPIO.OUT)
GPIO.output(6, GPIO.HIGH)
GPIO.output(5, GPIO.LOW)
print('Ensuring saturation, please wait.')
current = 1.5
ps.set_current(current)
ps.enable_output()
time.sleep(10) # apply current for 10 seconds to reach saturation
ps.disable_output()
current = 0.05
GPIO.output(5, GPIO.HIGH)
GPIO.output(6, GPIO.HIGH)
# position sensor
input('Position sensor and press Enter to begin demagnetization')
presentField = getField()
original_sign = signum(presentField - noField)
print('presentField %f' % presentField)
print('Original sign: %f' % original_sign)
ps.set_current(current)
for i in range(15):
print("The 0-field value is:\n" + str(noField))
GPIO.output(6, GPIO.LOW)
time.sleep(0.1)
ps.enable_output()
time.sleep(0.03) # allows ps power-on delay before opening the cct
GPIO.output(6, GPIO.HIGH)
ps.disable_output()
time.sleep(0.5) # delay for inductance before field reading
presentField = getField()
if(abs(presentField - noField) > 0.004*noField and
signum(presentField - noField) == original_sign) is False:
break
print("Present Field is: " + str(presentField))
if(abs(presentField - noField) > 0.004*noField and
signum(presentField - noField) != original_sign):
print("Overshoot of " + str(abs(presentField-noField)))
current = 0.05
ps.set_current(current)
for i in range(5):
GPIO.output(6, GPIO.HIGH)
GPIO.output(5, GPIO.LOW)
ps.enable_output()
ps.disable_output()
# time.sleep(0.001)#current duration
GPIO.output(5, GPIO.HIGH)
time.sleep(0.5)
presentField = getField()
if(abs(presentField - noField) > 0.004*noField and
(signum(presentField - noField) != original_sign)) is False:
break
time.sleep(1)
presentField = getField()
print("Final Field is: " + str(presentField))
print("Off by: " + str(presentField-noField))
# cleanup
GPIO.cleanup()
ps.disable_output()
def relay_switch(n):
# setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(5, GPIO.OUT)
GPIO.setup(6, GPIO.OUT)
GPIO.output(5, GPIO.HIGH)
GPIO.output(6, GPIO.HIGH)
current = 1.1
ps.set_current(current)
ps.enable_output()
for i in range(n):
# Relays energized
GPIO.output(5, GPIO.HIGH)
GPIO.output(6, GPIO.HIGH)
time.sleep(0.007)
# Relays de-energized
GPIO.output(5, GPIO.LOW)
GPIO.output(6, GPIO.LOW)
time.sleep(0.007)
if i % 10 == 0:
ps.set_current(current)
current += -0.1
# cleanup
GPIO.cleanup()
ps.disable_output()
| true |
ddde5531f9263ef392eb24dd66a8b894bf77c6f0 | Python | adamluna/web-335 | /week-8/luna_calculator.py | UTF-8 | 657 | 4.53125 | 5 | [] | no_license | # ========================================
# Title: luna_calculator.py
# Author: Adam Luna
# Date: 9 May 2021
# Description: Create a calculator using Python.
# ========================================
# Create an “add” function with two parameters and return the total
def add(add1, add2):
return add1 + add2
# Create a “subtract” function with two parameters and return the subtracted total
def subtract(sub1, sub2):
return sub1 - sub2
# Create a "divide" function with two parameters and returnt he divided total
def divide(div1, div2):
return div1 / div2
# print output
print(add(1, 1))
print(subtract(4, 4))
print(divide(6, 3)) | true |
ff57e2abb92625ac3fe060026d9be84d0058ddfc | Python | jhonatang1988/holbertonschool-web_back_end | /0x01-python_async_function/3-tasks.py | UTF-8 | 306 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python3
"""
tasks
"""
import asyncio
wait_random = __import__('0-basic_async_syntax').wait_random
def task_wait_random(max_delay: int) -> asyncio.Task:
"""
tasks
:param max_delay: max delay
:return: an asyncio task
"""
return asyncio.Task(wait_random(max_delay))
| true |
6c4e1f8183528cdb1f62afad08c472383e6cb0b3 | Python | SamelaBruna/Curso-Python | /ex058.py | UTF-8 | 492 | 3.875 | 4 | [] | no_license | from random import randint
numRand = randint(0,10)
tentativa = 1
jogada = int(input('Vamos Jogar!?\nAcabei de pensar em um número entre 0 e 10, será que você consegue adivinhar? '))
while numRand != jogada:
if jogada < numRand:
jogada = int(input('Mais...Tente novamente: '))
palpite += 1
elif jogada > numRand:
jogada = int(input('Menos...Tente novamente: '))
palpite += 1
print('Você acertou!! Após {} tentativas'.format(palpite)) | true |
022e455a34b15c1f400d53042c75e384ccc1af41 | Python | Arc676/Advent-of-Code-2017 | /Day6/realloc2.py | UTF-8 | 696 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
def findMax(arr):
i = 0
j = 0
maxVal = arr[0]
for val in arr:
if val > maxVal:
maxVal = val
i = j
j += 1
return i, maxVal
filename = raw_input("Enter filename: ")
file = open(filename, 'r')
banks = [int(i) for i in file.readline().split('\t')]
states = []
reallocations = 0
toRepeat = []
while True:
index, bankCount = findMax(banks)
banks[index] = 0
while bankCount > 0:
index += 1
bankCount -= 1
banks[index % len(banks)] += 1
reallocations += 1
if banks in states:
if len(toRepeat) == 0:
toRepeat = banks[:]
reallocations = 0
else:
if banks == toRepeat:
break
states.append(banks[:])
print reallocations, "reallocations needed"
| true |
d3b371bc0f0ec1cda70ac3d83485a28e2ce69c90 | Python | Biju-vinoth/LPHW | /ex11.py | UTF-8 | 597 | 3.484375 | 3 | [] | no_license | '''
#=============================================================================
# FileName: ex11.py
# Desc: recieve the input from keyboard
# Author: Honglong Wu
# Email: wuhonglong@genomics.cn
# Version: 0.0.1
# Created: 04/03/2015
# History:
#=============================================================================
'''
#-*- coding: utf-8 -*-
print "How old are you?",
age = raw_input()
print "How tall are you?",
height = raw_input()
print "How much do you weight?",
weight = raw_input()
print "So you're %r old, %r tall and %r heavy." % (age, height, weight)
| true |
4f9d56da858336948b70b2f45970398b5d904f27 | Python | nicolaspoulain/travaux | /python/curses_example.py | UTF-8 | 1,465 | 3.6875 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import curses
"""
pad = curses.newpad(100, 100)
# These loops fill the pad with letters; this is
# explained in the next section
for y in range(0, 100):
for x in range(0, 100):
try:
pad.addch(y,x, ord('a') + (x*x+y*y) % 26)
except curses.error:
pass
# Displays a section of the pad in the middle of the screen
pad.refresh(0,0, 5,5, 20,75)
"""
screen = curses.initscr()
"""
Usually curses applications turn off automatic echoing of keys to the
screen, in order to be able to read keys and only display them under
certain circumstances. This requires calling the noecho() function.
"""
curses.noecho()
"""
If your application doesn’t need a blinking cursor at all, you can call
curs_set(0) to make it invisible.
"""
curses.curs_set(0)
"""
Terminals usually return special keys, such as the cursor keys or navigation
keys such as Page Up and Home, as a multibyte escape sequence. Enable keypad
mode to return such sequences as special values such as curses.KEY_LEFT.
"""
screen.keypad(1)
while True:
event = screen.getch()
if event == ord("n"):
screen.clear()
screen.addstr("This is a NN\n\n")
else:
screen.clear()
screen.addstr("This is a Sample Curses Script\n\n")
if event == ord("q"):
break
"""
Then call the endwin() function to restore the terminal to its original
operating mode.
"""
curses.endwin()
| true |
425827c2fef1323e90fd9ebdaafa863501059546 | Python | CourseraK2/ud617Intro2HadoopMR | /code/lesson_4_weekday_sum_red.py | UTF-8 | 550 | 2.890625 | 3 | [] | no_license | #!/usr/bin/python
import sys
def reducer():
old_key = None
total = 0
for line in sys.stdin:
data = line.strip().split("\t")
if len(data) != 2:
continue
this_key, sale = data
if old_key and old_key != this_key:
print "{0}\t{1}".format(old_key, total)
total = 0
old_key = this_key
total += float(sale)
old_key = this_key
print "{0}\t{1}".format(old_key, total)
def main():
import StringIO
sys.stdin = sys.__stdin__
reducer()
main()
| true |
6775d8abad105fbd3be3e255dbc638b314c1fc68 | Python | annsshadow/fakepractice | /py/beg/createfile.py | UTF-8 | 580 | 3.4375 | 3 | [] | no_license | #! /usr/bin/env python
'createfile.py : to create text file'
import os
ls = os.linesep
# get the filename
while True:
if os.path.exists(fname):
print "[ERROR]: '%s' already exists" % fname
else:
break
# get file content:text line
all = []
print "\nenter lines ('.' by itself to quit).\n"
# loop until user finish input
while True:
entry = raw_input('>')
if entry == '.':
break
else:
all.append(entry)
# write lines to file with proper line-ending
fileobj = open(fname,'w')
fileobj.writelines(['%s%s' % (x, ls) for x in all])
fileobj.close
print '[FINISHED]file' | true |
ab85501bbc67b44a99828323d3cf15933d47fc6d | Python | YorkFish/git_study | /CodingInterviews/python/21_is_pop_order.py | UTF-8 | 600 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# coding:utf-8
class Solution:
def IsPopOrder(self, pushV, popV):
if pushV is None or len(pushV) != len(popV):
return None
stk = []
idx = 0
for num in pushV:
stk.append(num)
while stk and stk[-1] == popV[idx]:
stk.pop()
idx += 1
return idx == len(popV)
if __name__ == "__main__":
pushV = [1, 2, 3, 4, 5]
popV1 = [4, 5, 3, 2, 1]
popV2 = [4, 3, 5, 1, 2]
s = Solution()
print(s.IsPopOrder(pushV, popV1))
print(s.IsPopOrder(pushV, popV2))
| true |