blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c20327fa5b2a09d00d9948047d082ee30c565b23
|
Python
|
SRserves85/avro-to-python
|
/avro_to_python/utils/avro/types/primitive.py
|
UTF-8
| 1,461
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
""" file containing primitive type helper function """
from typing import Union
from avro_to_python.utils.avro.primitive_types import PRIMITIVE_TYPES
from avro_to_python.classes.field import Field
kwargs = {
'name': None,
'fieldtype': 'primitive',
'avrotype': None,
'default': None
}
def _primitive_type(field: Union[dict, str]) -> dict:
""" Function to add information to primitive type fields
Parameters
----------
field: dict or string
field object with information on field
Returns
-------
Field
"""
kwargs.update({
'name': field['name'],
'default': field.get('default', None)
})
if isinstance(field, dict):
if field.get('type', None) == 'array':
kwargs.update({'avrotype': field['items']['type']})
elif isinstance(field.get('type'), dict):
if field.get('type', {}).get('logicalType'):
kwargs.update({'avrotype': field['type']['type']})
elif isinstance(field.get('type'), str):
if field['type'] in PRIMITIVE_TYPES:
kwargs.update({'avrotype': field['type']})
else:
kwargs.update({'avrotype': field['item']['type']})
elif isinstance(field, str):
kwargs.update({'avrotype': field['item']['type']})
else:
raise ValueError(
f'{type(field)} is not in (dict, str)'
)
return Field(**kwargs)
| true
|
1a41c56a0f19367ff2c3cc216975921af33748e2
|
Python
|
igormba/python-exercises
|
/ex095.py
|
UTF-8
| 1,536
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
'''Aprimore o desafio 93 para que ele funcione com vários jogadores, incluindo um sistema de visualização de detalhes do aproveitamento de cada jogador.'''
time = list()
dados = dict()
gols = list()
while True:
dados.clear()
dados['Jogador'] = str(input('Nome do Jogador: '))
dados['Gols'] = list()
dados['Jogos'] = int(input(f'Quantas partidas {dados["Jogador"]} jogou? '))
for c in range(0, dados["Jogos"]):
dados['Gols'].append(int(input(f'Quantos gols na partida {c+1} ? ')))
dados['Total de Gols'] = sum(dados['Gols'])
while True:
continuar = str(input('Adicionar mais um jogadores? [S/N] ')).strip().upper()[0]
if continuar in 'SN':
break
print('Erro! Informe S ou N.')
time.append(dados.copy())
dados.clear()
if continuar == 'N':
break
print('=' * 30)
print(f'{"COD":<}{" NOME":<}{"GOLS":^17}{"TOTAL":>}')
print('=' * 30)
for k, v in enumerate(time):
print(f' {k:<3}{v["Jogador"]:<9}{str(v["Gols"]):<14}{v["Total de Gols"]}')
print('=' * 30)
while True:
escolha = int(input('Informe o codigo do jogador para analise: (999 finaliza) '))
if escolha == 999:
break
elif escolha not in range(0, len(time)):
print('Erro! Escolha o número correspondente ao jogador.')
else:
print(f'Informações do jogador {time[escolha]["Jogador"]}.')
for k, v in enumerate(time[escolha]['Gols']):
print(f'No jogo {k+1} fez {v} gols.')
print('=' * 30)
print('PROGRAMA FINALIZADO!')
| true
|
a086073a86c76b3fea77588490c4b496922beb6e
|
Python
|
IanGSalmon/Functional-Python
|
/Challenges/discounts.py
|
UTF-8
| 765
| 4
| 4
|
[] |
no_license
|
from functools import partial
prices = [
10.50,
9.99,
0.25,
1.50,
8.79,
101.25,
8.00
]
def discount(price, amount):
return price - price * (amount/100)
# First, import partial from functools
# Now use partial to make a version of discount that applies 10% discount
# Name this partial function discount_10
discount_10 = partial(discount, amount=10)
# Follow that same pattern to make discount_25 and discount_50
discount_25 = partial(discount, amount=25)
discount_50 = partial(discount, amount=50)
# Finally, I need to see all prices with each discount applied
# Use map to create prices_10, prices_25, and prices_50
prices_10 = map(discount_10, prices)
prices_25 = map(discount_25, prices)
prices_50 = map(discount_50, prices)
| true
|
353869e242d29af34e82676af9acfe114f8a844c
|
Python
|
mpucci92/MiscRisk
|
/Stationary_Complete.py
|
UTF-8
| 2,274
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
### Import all libraries ###
import pandas as pd
import numpy as np
from sklearn import preprocessing
# Visualizations #
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from statsmodels.tsa.stattools import adfuller
# In[2]:
eurusd = pd.read_csv('C:\\Users\\mpucci\\Desktop\\EURUSD.csv')
# In[3]:
eurusd['Candle'] = 100*((eurusd['Close'].values - eurusd['Open'].values)/(eurusd['Open'].values))
# In[4]:
normalcandle = eurusd['Candle']
candle_scaled = preprocessing.scale(eurusd['Candle'])
eurusd['CandleSTD'] = candle_scaled
value = eurusd['CandleSTD']
#plt.hist(candle_scaled,bins=500)
#plt.xlim(left=-20)
#plt.xlim(right=20)
#plt.show()
# In[5]:
tester = eurusd['CandleSTD']
a = tester.isnull().values.any() # No NaN Values
# In[6]:
from statsmodels.tsa.stattools import adfuller
def test_stationarity(timeseries,start,end,jump):
array=[]
for x in range(int(len(timeseries)/(jump))):
data = timeseries[start:end]
#Perform Dickey-Fuller test:
#print ('Results of Dickey-Fuller Test:')
dftest = adfuller(data, autolag = 'AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
for key,value in dftest[4].items():
dfoutput['Critical Value (%s)'%key] = value
if (dfoutput['Test Statistic'] < dfoutput['Critical Value (5%)'] or np.isnan(dfoutput['Test Statistic'])):
dfoutput['Stationary'] = 0
#print ('S')
else:
dfoutput['Stationary'] = 1
#print ('NS')
array.append(dfoutput['Stationary'])
start = start + jump
end = end+ jump
return (np.nan_to_num(array))
# In[8]:
val = test_stationarity(value,0,20,20)
plt.hist(val,bins=2)
plt.show()
# In[ ]:
# In[9]:
# Output for the autocorrelation
autocorrelation = np.nan_to_num(autocorrelation_array)
print(autocorrelation)
plt.hist(autocorrelation)
plt.show()
# In[ ]:
# Output for the Stationarity
stationarity = np.nan_to_num(stationarity_array)
print (stationarity)
plt.hist(stationarity)
plt.show()
# In[ ]:
| true
|
140ed852902f4a5f0f8eb9bbed649aa3c23a6bbf
|
Python
|
htn4179/mmo
|
/clientlogic.py
|
UTF-8
| 1,135
| 2.546875
| 3
|
[] |
no_license
|
g_players = {}
g_player = 0
g_uid = g_player
g_speed = 20
g_wbuffer = []
g_rbuffer = []
g_name = None
g_server_frame = 0
import common
import cPickle
class Player(object):
def __init__(self, _x = 0, _y = 0, _name = '', _speed = 0):
self.x = _x
self.y = _y
self.name = _name
self.target_x = self.x
self.target_y = self.y
self.speed = _speed
self.uid = -1
self.cmd_cache = []
def init_from_dict(self, dic):
for k, v in dic.items():
if hasattr(self, k):
setattr(self, k,v)
print k, v , getattr(self, k)
def update(self):
self.x, self.y = common.move_to_next_pos((self.target_x, self.target_y), (self.x, self.y), self.speed)
#g_wbuffer.append(cPickle.dumps(('move_to', {'x':self.x, 'y':self.y})))
def reg_user(self, uid):
self.uid = uid
print 'reg_user', uid
def update_target_pos(self, _x, _y):
self.target_x = _x
self.target_y = _y
cmd = ('move_to', {'x': self.target_x, 'y': self.target_y, 'uid': self.uid})
g_wbuffer.append(cPickle.dumps(cmd))
def set_target_pos(self, _x, _y):
self.target_x = _x
self.target_y = _y
| true
|
c1e7d95db3a3487cc8fc8c50a3a8246165350f0b
|
Python
|
HenrikBradland-Nor/FishFaceRecognition
|
/dataInspection.py
|
UTF-8
| 674
| 2.71875
| 3
|
[] |
no_license
|
import os
dir = os.getcwd()
for d in os.listdir():
if "Data" in d:
os.chdir(d)
for d in os.listdir():
if "head" in d:
os.chdir(d)
data_label = os.listdir()
tot_dir = [0, 0]
list_of_IDs = []
for label in data_label:
fish_ID, direction, image_nr = label.split('_')
image_nr, _ = image_nr.split('.')
if direction == 'l':
tot_dir[0] += 1
else:
tot_dir[1] += 1
if not fish_ID in list_of_IDs:
list_of_IDs.append(fish_ID)
print("Total number of fish IDs:", len(list_of_IDs))
print("Number of left images:", tot_dir[0])
print("Number of right images:", tot_dir[1])
| true
|
45f6f397ccec7e225017bec4fb04077116edb845
|
Python
|
iskwak/DetetctingActionStarts
|
/helpers/process_hantman_mat.py
|
UTF-8
| 5,118
| 2.953125
| 3
|
[] |
no_license
|
"""Get some stats on the hantman matfile."""
import argparse
import numpy
import pickle
def create_opts():
"""Create an opts dictionary."""
opts = dict()
opts['filename'] = ''
opts['outname'] = ''
return opts
def setup_opts(opts):
"""Setup default arguments for the arg parser.
returns an opt dictionary with default values setup.
"""
parser = argparse.ArgumentParser(description='Parse and convert Hantman'
' lab mouse mat files into a more python '
'friendly structure')
parser.add_argument('-f', '--filename', type=str, required=True,
help='matlab file to parse')
args = parser.parse_args()
opts['filename'] = args.filename
return opts
def convert_data(data):
"""Convert the spike style data to timing style data."""
# create a new label matrix with timing information rather than spikes
num_labels = len(data['label_names'])
num_experiments = len(data['exp'])
all_timings = []
for i in range(num_experiments):
num_frames = data['num_frames'][i]
timings = numpy.zeros((num_frames, num_labels))
curr_labels = data['labels'][i]
# find the first behavior
prev_idx = -1
prev_where_label = -1
for j in range(num_frames):
where_label = numpy.where(curr_labels[j])
if where_label[0].size > 0:
prev_idx = j
prev_where_label = where_label
timings[j][where_label] = 1
break
if prev_idx < num_frames:
# if a behavior is found continue processing
for j in range(prev_idx, num_frames):
where_label = numpy.where(curr_labels[j])
if where_label[0].size > 0:
timings[j][where_label] = 1
prev_where_label = where_label
else:
timings[j][prev_where_label] = 1 + \
timings[j - 1][prev_where_label]
all_timings.append(timings)
return all_timings
def process_matfile(opts):
"""Process hantman mouse pickled data.
Given an opts dict (created from setup_opts), process a matfile and
figure out a few things, such as number of experiments with no lift
labeled.
"""
with open(opts['filename'], 'r') as infile:
data = pickle.load(infile)
# for the hantman data, it would be nice to know a few things, such
# as the frame gap between behaviors.
# list of things:
# how often a label appears (and doesn't appear) per experiment
# the gap between labels (from the last labeled behavior
# gap between pairs of labels
# mean feature locations.
num_exp = len(data['exp'])
num_labels = len(data['label_names'])
# feature_dim = data['features'][0].shape[1]
label_counts = numpy.zeros((num_exp, num_labels))
label_gaps = []
# label pairs:
# lift -> hand
# hand -> grab
# grab -> sup
# sup -> at mouth
# at mouth -> chew
# between_label_gaps = numpy.zeros((num_exp, num_labels - 1))
# might be useful to get feature locations. (x1,y1,x2,y2)
# list pre-initialization in python is strange...
# feature_locations = [None] * num_labels
feature_locations = dict()
for i in range(num_labels):
feature_locations[i] = []
# finally the last thing to look for is the number of experiments
# where the order is incorrect (ie lift is not first, handopen is
# not second)
# out_order_labels = numpy.zeros((num_exp, num_labels))
# look for the gaps between behaviors.
for i in range(num_exp):
label_idx = []
# for each experiment, figure out how often each behavior occurs
for j in range(num_labels):
# per exp label count
label_counts[i, j] = data['labels'][i][:, j].sum()
label_idx.append(numpy.nonzero(data['labels'][i][:, j])[0])
if label_idx[j].any():
curr_features = data['features'][i][label_idx[j], :]
feature_locations[j].append(curr_features[0])
# figure out the gaps.
all_idx = numpy.concatenate(label_idx)
label_gaps.append(numpy.diff(all_idx))
label_gaps = numpy.concatenate(label_gaps)
print("label_gaps: ", label_gaps.mean(), label_gaps.std())
for i in range(num_labels):
print(data['label_names'][i])
print("\tlabel_counts: ", label_counts[:, i].mean(), \
label_counts[:, i].std())
curr_features = numpy.vstack(feature_locations[i])
print("\t", curr_features.shape)
print("\tlocation: ", curr_features.mean(axis=0), \
curr_features.std(axis=0))
if __name__ == "__main__":
opts = create_opts()
opts = setup_opts(opts)
process_matfile(opts)
| true
|
5e5542b6a7faacdc016ac9e1a981f59fb7c21efa
|
Python
|
qvo117/qvo117_280_GH_Lab5
|
/qvo117_280_Lab2_in_Lab5/test_maths.py
|
UTF-8
| 1,272
| 3.84375
| 4
|
[] |
no_license
|
import unittest # Import the Python unit testing framework
import maths # Our code to test
class MathsTest(unittest.TestCase):
''' Unit tests for our maths functions. '''
def test_add_with_new_parameter(self):
actual = maths.add(5, 5, 2)
self.assertEqual(actual, '1010')
def test_add(self):
''' Tests the add function. '''
#Arrange and Act
actual = maths.add(0, 1)
#Assert
self.assertEqual(actual, 1)
def test_fibonacci(self):
''' Tests the fibonacci function. '''
actual = maths.fibonacci(5)
self.assertEqual(actual, 5)
def test_convert_base_under_10(self):
actual = maths.convert_base(10, 2)
self.assertEqual(actual, '1010')
def test_convert_base_over_10(self):
actual = maths.convert_base(10, 16)
self.assertEqual(actual, 'A')
def test_factorial(self):
''' Tests the factorial function. '''
actual = maths.factorial(3)
self.assertEqual(actual, 6)
# This allows running the unit tests from the command line (python test_maths.py)
if __name__ == '__main__':
unittest.main()
| true
|
bfedc218060d367cd89518fe412c9eb02858cb9d
|
Python
|
openvinotoolkit/nncf
|
/nncf/experimental/torch/nas/bootstrapNAS/search/evaluator.py
|
UTF-8
| 7,980
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
from abc import abstractmethod
from pathlib import Path
from typing import Any, Callable, Dict, NoReturn, Optional, Tuple, TypeVar
from nncf.common.logging import nncf_logger
from nncf.common.utils.os import safe_open
DataLoaderType = TypeVar("DataLoaderType")
TModel = TypeVar("TModel")
EvalFnType = Callable[[TModel], float]
AccValFnType = Callable[[TModel, DataLoaderType], float]
class BNASEvaluatorStateNames:
BNAS_EVALUATOR_STAGE = "evaluator_state"
class BaseEvaluator:
"""
An interface for handling measurements collected on a target device. Evaluators make use
of functions provided by the users to measure a particular property, e.g., accuracy, latency, etc.
"""
def __init__(self, name: str, ideal_val: float):
"""
Initializes evaluator
:param name: Name of the evaluator
:param ideal_val: Ideal value for the metric computed by the evaluator
"""
self.name = name
self._current_value = -1
self._ideal_value = ideal_val
self.cache = {}
# TODO(pablo): Here we should store some super-network signature that is associated with this evaluator
@property
def current_value(self):
"""
:return: current value
"""
return self._current_value
@current_value.setter
def current_value(self, val: float) -> NoReturn:
"""
:param val: value to update the current value of the evaluator
:return:
"""
self._current_value = val
def evaluate_and_add_to_cache_from_pymoo(self, pymoo_repr: Tuple[float, ...]) -> float:
"""
Evaluates active sub-network and uses Pymoo representation for insertion in cache.
:param model: Active sub-network
:param pymoo_repr: tuple representing the associated values for the design variables
in Pymoo.
:return: the value obtained from the model evaluation.
"""
self._current_value = self.evaluate_subnet()
self.add_to_cache(pymoo_repr, self._current_value)
return self._current_value
@abstractmethod
def evaluate_subnet(self) -> float:
"""This method should implement how to a subnet is evaluated for a particular metric."""
def add_to_cache(self, subnet_config_repr: Tuple[float, ...], measurement: float) -> NoReturn:
"""
Adds evaluation result to cache
:param subnet_config_repr: tuple containing the values for the associated design variables.
:param measurement: value for the evaluator's metric.
:return:
"""
nncf_logger.debug(f"Add to evaluator {self.name}: {subnet_config_repr}, {measurement}")
self.cache[subnet_config_repr] = measurement
def retrieve_from_cache(self, subnet_config_repr: Tuple[float, ...]) -> Tuple[bool, float]:
"""
Checks if sub-network info is in cache and returns the corresponding value.
:param subnet_config_repr: tuple representing the values for the associated design variables.
:return: (True if the information is in cache, and corresponding value stored in cache, 0 otherwise)
"""
if subnet_config_repr in self.cache.keys():
return True, self.cache[subnet_config_repr]
return False, 0
def get_state(self) -> Dict[str, Any]:
"""
Returns state of the evaluatar
:return: Dict with the state of the evaluator
"""
state_dict = {
"name": self.name,
"current_value": self._current_value,
"ideal_value": self._ideal_value,
"cache": self.cache,
}
return state_dict
def update_from_state(self, state: Dict[str, Any]) -> NoReturn:
"""
Updates the cache and other values in the evaluator from a saved state.
:param state: dict with state that should be used for updating this evaluator
:return:
"""
new_dict = state.copy()
self.name = new_dict["name"]
self._ideal_value = new_dict["ideal_value"]
self._current_value = new_dict["current_value"]
self.cache = new_dict["cache"]
def load_cache_from_csv(self, cache_file_path: str) -> NoReturn:
"""
Loads cache from CSV file.
:param cache_file_path: Path to CSV file containing the cache information.
:return:
"""
with safe_open(Path(cache_file_path), "r", encoding="utf8") as cache_file:
reader = csv.reader(cache_file)
for row in reader:
rep_tuple = tuple(map(int, row[0][1 : len(row[0]) - 1].split(",")))
self.add_to_cache(rep_tuple, float(row[1]))
def export_cache_to_csv(self, cache_file_path: str) -> NoReturn:
"""
Exports cache information to CSV.
:param cache_file_path: Path to export a CSV file with the cache information.
:return:
"""
with safe_open(Path(cache_file_path) / f"cache_{self.name}.csv", "w", encoding="utf8") as cache_dump:
writer = csv.writer(cache_dump)
for key in self.cache:
row = [key, self.cache[key]]
writer.writerow(row)
class MACsEvaluator(BaseEvaluator):
def __init__(self, eval_func):
super().__init__("MACs", 0)
self.eval_func = eval_func
def evaluate_subnet(self) -> float:
"""
Evaluates metric using model
:param model: Active sub-network
:return: value obtained from evaluation.
"""
self._current_value = self.eval_func()
return self._current_value
class AccuracyEvaluator(BaseEvaluator):
"""
A particular kind of evaluator for collecting model's accuracy measurements
"""
def __init__(
self, model: TModel, eval_func: AccValFnType, val_loader: DataLoaderType, is_top1: Optional[bool] = True
):
"""
Initializes Accuracy operator
:param eval_func: function used to validate a sub-network
:param val_loader: Datq loader used by the validation function
:param is_top1: Whether is top 1 accuracy or top 5.
:param ref_acc: Accuracy from a model that is used as input to BootstrapNAS
"""
name = "top1_acc" if is_top1 else "top5_acc"
super().__init__(name, -100)
self._model = model
self._eval_func = eval_func
self._val_loader = val_loader
self._is_top1 = is_top1
def evaluate_subnet(self) -> float:
"""
Obtain accuracy from evaluating the model.
:param model: Active sub-network
:return: accuracy from active sub-network.
"""
self._current_value = self._eval_func(self._model, self._val_loader) * -1.0
return self._current_value
def get_state(self) -> Dict[str, Any]:
"""
Get state of Accuracy evaluator.
:return: Dict with state of evaluator
"""
state = super().get_state()
state["is_top1"] = self._is_top1
return state
def update_from_state(self, state: Dict[str, Any]) -> NoReturn:
"""
:param state: dict with state that should be used for updating this evaluator
:return:
"""
super().update_from_state(state)
new_dict = state.copy()
self._is_top1 = new_dict["is_top1"]
| true
|
8a5450016c8c0fef9828934b97281da68d303554
|
Python
|
ibllex/learn-tkinter
|
/layout/02.2_pack_manager_review.py
|
UTF-8
| 1,181
| 2.9375
| 3
|
[] |
no_license
|
from tkinter import Tk, LEFT, BOTH, X, N, Text
from tkinter.ttk import Frame, Label, Entry
from base.ExampleFrame import ExampleFrame
class Example(ExampleFrame):
def __init__(self):
super().__init__("Pack Manager - Review", 450, 500)
def init_ui(self):
frame_01 = Frame(self)
frame_01.pack(fill=X)
label_01 = Label(frame_01, text="Title", width=6)
label_01.pack(side=LEFT, padx=5, pady=5)
entry_01 = Entry(frame_01)
entry_01.pack(fill=X, padx=5, expand=True)
frame_02 = Frame(self)
frame_02.pack(fill=X)
label_02 = Label(frame_02, text="Author", width=6)
label_02.pack(side=LEFT, padx=5, pady=5)
entry_02 = Entry(frame_02)
entry_02.pack(fill=X, padx=5, expand=True)
frame_03 = Frame(self)
frame_03.pack(fill=BOTH, expand=True)
label_03 = Label(frame_03, text="Review", width=6)
label_03.pack(side=LEFT, anchor=N, padx=5, pady=5)
txt = Text(frame_03)
txt.pack(fill=BOTH, pady=5, padx=5, expand=True)
def main():
root = Tk()
app = Example()
root.mainloop()
if __name__ == '__main__':
main()
| true
|
56158906c7e7a79c10c146310d9af9604660e452
|
Python
|
cbcoutinho/learn_tkinter
|
/intro/main10.py
|
UTF-8
| 336
| 3.21875
| 3
|
[] |
no_license
|
import tkinter as tk
root = tk.Tk()
canvas = tk.Canvas(root, width=200, height=100)
canvas.pack()
blackline = canvas.create_line(0, 0, 200, 50)
redline = canvas.create_line(0, 100, 200, 50, fill='red')
greenbox = canvas.create_rectangle(25, 25, 130, 60, fill='green')
# canvas.delete(redline)
canvas.delete(tk.ALL)
root.mainloop()
| true
|
c69a85edbc1a5efeb9540f8156f937927c715b42
|
Python
|
Controlman-beep/ML-Foundation-and-ML-Techniques
|
/hw1/代码/helper.py
|
UTF-8
| 3,715
| 3.28125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 13 01:31:19 2019
@author: qinzhen
"""
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
def Judge(X, y, w):
"""
判别函数,判断所有数据是否分类完成
"""
n = X.shape[0]
#判断是否分类完成
num = np.sum(X.dot(w) * y > 0)
return num == n
def preprocess(data):
"""
数据预处理
"""
#获取维度
n, d = data.shape
#分离X
X = data[:, :-1]
#添加偏置项1
X = np.c_[np.ones(n), X]
#分离y
y = data[:, -1]
return X, y
def count(X, y, w):
"""
统计错误数量
"""
num = np.sum(X.dot(w) * y <= 0)
return np.sum(num)
def PLA(X, y, eta=1, max_step=np.inf):
"""
PLA算法,X,y为输入数据,eta为步长,默认为1,max_step为最多迭代次数,默认为无穷
"""
#获取维度
n, d = X.shape
#初始化
w = np.zeros(d)
#记录迭代次数
t = 0
#记录元素的下标
i = 0
#记录最后一个错误的下标
last = 0
while not(Judge(X, y, w)) and t < max_step:
if np.sign(X[i, :].dot(w) * y[i]) <= 0:
#迭代次数增加
t += 1
w += eta * y[i] * X[i, :]
#更新最后一个错误
last = i
#移动到下一个元素
i += 1
#如果达到n,则重置为0
if i == n:
i = 0
return t, last, w
def Pocket_PLA(X, y, eta=1, max_step=np.inf):
"""
Pocket_PLA算法,X,y为输入数据,eta为步长,默认为1,max_step为最多迭代次数,默认为无穷
"""
#获得数据维度
n, d = X.shape
#初始化
w = np.zeros(d)
#记录最优向量
w0 = np.zeros(d)
#记录次数
t = 0
#记录最少错误数量
error = count(X, y, w0)
#记录元素的下标
i = 0
while (error != 0 and t < max_step):
if np.sign(X[i, :].dot(w) * y[i]) <= 0:
w += eta * y[i] * X[i, :]
#迭代次数增加
t += 1
#记录当前错误
error_now = count(X, y, w)
if error_now < error:
error = error_now
w0 = np.copy(w)
#移动到下一个元素
i += 1
#如果达到n,则重置为0
if i == n:
i = 0
return error, w0
def f1(g, X, y, n, eta=1, max_step=np.inf):
"""
运行g算法n次,统计平均迭代次数,eta为步长,默认为1,max_step为最多迭代次数,默认为无穷
"""
result = []
data = np.c_[X, y]
for i in range(n):
np.random.shuffle(data)
X = data[:, :-1]
y = data[:, -1]
result.append(g(X, y, eta=eta, max_step=max_step)[0])
plt.hist(result, normed=True)
plt.xlabel("迭代次数")
plt.title("平均运行次数为"+str(np.mean(result)))
plt.show()
def f2(g, X1, y1, X2, y2, n, eta=1, max_step=np.inf):
"""
训练n次,每次在(X1, y1)上利用g算法训练,在(X2, y2)上评估结果,
eta为步长,默认为1,max_step为最多迭代次数,默认为无穷
"""
result = []
data = np.c_[X1, y1]
m = X2.shape[0]
for i in range(n):
np.random.shuffle(data)
X = data[:, :-1]
y = data[:, -1]
w = g(X, y, eta=eta, max_step=max_step)[-1]
result.append(count(X2, y2, w) / m)
plt.hist(result, normed=True)
plt.xlabel("错误率")
plt.title("平均错误率为"+str(np.mean(result)))
plt.show()
| true
|
68f52b11a06e02273565704a53b6b60a5621c9a8
|
Python
|
raoyuanqi/Vehicle-Model-Recognition
|
/mmr.py
|
UTF-8
| 11,660
| 2.8125
| 3
|
[] |
no_license
|
import numpy as np
from sklearn.mixture import gaussian_mixture as gauss
from sklearn.metrics.pairwise import cosine_similarity
import cv2
import os
import functions as fun
import time
'''This script is the main script of the make and model recognition using unsupervised learning
All of the functions used are in functions.py file
'''
#Number of SIFT components that we will be keeping after PCA reduction, original number of components is 128
pc_comp=100
#Booleans that keep track of the fisher vector pipeline
compute_all_steps=True
#paths that will be used for our proof of concept
#in further stage of the implementation we will have only one folder that we will be working with
paths=["aston","bmw","clio","dodge","peugot"]
#First we define the PCA REDUCTOR MATRICE
#This is the matrice that will be used to keep a certain amount of SIFT components
#Name of the file that stores the reducer matrice that will be used for the PCA reduction process
id="reducer"
covariance_type="diag"
#Check to see if there is a reducer file, if not create one
if(not(os.path.isfile(id+".npy")) or compute_all_steps):
print("No reducer file was found")
print("A new reducer file is being generated...")
fun.compute_save_reduce_vector(paths,id,pc_comp=pc_comp)
print("The Reducer file has been generated")
print("\n")
#Once the reducer file has been created it is time to load it and use it for PCA Reduction
print("Loading reducer file...")
reducer=np.load(id+".npy")
print("Reducer file loaded")
print("\n")
#Creation and storage of Reduced ROOT SIFT VECTORS
if(compute_all_steps):
print("No root sift files were found")
print("Generating root sift files...")
fun.compute_save_reduced_root_sift(reducer,paths)
print("Reduced root sift files generated and saved")
print("\n")
#Load all of the saved ROOT SIFT DESCRIPTORS and then use them to fit a GMM model
"""Implementation that has to be kept """
descriptors=np.atleast_2d(np.asarray(fun.file_counter(paths,".npy","reduced_data",remove=False,loader=True)))
print("the shape of the descriptors using the second function is ", descriptors.shape)
#Check to see if there are any trained GMM models
#If so load them and use them to create a fisher vector
#We will be using a range
for gmm_comp in range(50,1000,50):
gmm_means_file="./GMM/means"+str(gmm_comp)+".gmm.npy"
gmm_covariance_file="./GMM/covs"+str(gmm_comp)+".gmm.npy"
gmm_weight_file="./GMM/weights"+str(gmm_comp)+".gmm.npy"
if(os.path.isfile(gmm_means_file) and os.path.isfile(gmm_covariance_file) and os.path.isfile(gmm_weight_file)):
print("all the GMM files are in place and now we are going to load them")
print("loading files...")
gmm_means_file="./GMM/means"+str(gmm_comp)+".gmm.npy"
gmm_covariance_file="./GMM/covs"+str(gmm_comp)+".gmm.npy"
gmm_weight_file="./GMM/weights"+str(gmm_comp)+".gmm.npy"
means=np.load(gmm_means_file)
covs=np.load(gmm_covariance_file)
weights=np.load(gmm_weight_file)
print("GMM "+str(gmm_comp)+" loaded")
print("\n")
else:
# print("we did not find all of our files")
# print("we train a GMM Model")
# print("gathering ROOT SIFT descriptors...")
# descriptors=fun.compute_save_reduce_vector(paths,id,pc_comp=pc_comp,reduced=True).T
descriptors=np.atleast_2d(np.asarray(fun.file_counter(paths,".npy","reduced_data",remove=False,loader=True)))
# print("descriptors gathered")
print("training GMM %d..."%(gmm_comp))
#GMM MODEL
GMM=gauss.GaussianMixture(n_components=gmm_comp,covariance_type=covariance_type,max_iter=100000,n_init=1,init_params="kmeans")
GMM.fit(descriptors)
# print(np.sum(GMM.predict_proba(descriptors[0:20]),axis=1))
print("trained GMM %d..."%(gmm_comp))
print("saving the GMM model")
means=GMM.means_
covs=GMM.covariances_
weights=GMM.weights_
gmm_means_file="./GMM/means"+str(gmm_comp)+".gmm.npy"
gmm_covariance_file="./GMM/covs"+str(gmm_comp)+".gmm.npy"
gmm_weight_file="./GMM/weights"+str(gmm_comp)+".gmm.npy"
np.save(gmm_means_file,means)
np.save(gmm_covariance_file,covs)
np.save(gmm_weight_file,weights)
# print("GMM model has been saved")
print("\n")
# now we check to see if there is any fisher vector
num_fis=fun.file_counter(paths,".npy","fisher_vectors",remove=False)
if(compute_all_steps):
# print("No fisher vector files were found")
# print("generating them...")
print("Generate and Save fisher files for GMM %d..."%(gmm_comp))
fun.generate_fisher_vectors(paths,means,covs,weights,"_"+str(gmm_comp))
print("Fisher files saved")
print("\n")
# print("loading our fisher files...")
# fisher_vectors=np.atleast_2d(fun.file_counter(paths,".npy","fisher_vectors",remove=False,loader=True))
# print("fisher files have been generated")
# print(fisher_vectors.shape)
else:
print("we found our fisher files")
print("loading our fisher files...")
# fisher_vectors=np.atleast_2d(fun.file_counter(paths,".npy","fisher_vectors",remove=False,loader=True))
# print(fisher_vectors.shape)'''
######################################
# FINAL STAGE OF PROOF OF CONCEPT #
######################################
evaluation="data.txt"
for case in paths :
max_value=0
max_comp=0
min_value=1000
best_gmms=5
data=[]
for gmm_comp in range(50,1000,50):
target=open(evaluation,"a")
evaluation_limit=10
print("-------------------------------------------------")
print("evaluation of GMM %d for %s"%(gmm_comp,case))
# paths=["aston","bmw","clio","dodge","peugot"]
fisher_vectors=np.atleast_2d(fun.file_counter(paths,"_"+str(gmm_comp)+".npy","fisher_vectors",remove=False,loader=True,Fisher=True))
cosine_metric=cosine_similarity(fisher_vectors)
all_bmw=0
all_clio=0
all_dodge=0
all_peugot=0
all_aston=0
if(case=="aston"):
under=5
upper=15
if(case=="bmw"):
under=25
upper=35
if(case=="clio"):
under=45
upper=55
if(case=="dodge"):
under=65
upper=75
if(case=="peugot"):
under=85
upper=95
for ind in range(under,upper):
if(ind<20):
current=all_aston
case="aston"
if(ind>19 and ind<40):
current=all_bmw
case="bmw"
if(ind>39 and ind<60):
current=all_clio
case="clio"
if(ind>59 and ind<80):
current=all_dodge
case="dodge"
if(ind>79 and ind<100):
current=all_peugot
case="peugot"
indices=np.flip(np.argsort(cosine_metric[ind]),axis=0)
aston=0
bmw=0
dodge=0
clio=0
peugot=0
clio_translate=0
for sim in range(1,(evaluation_limit+1)):
if(indices[sim]<20):
aston=aston+1
if(indices[sim]>19 and indices[sim]<40):
bmw=bmw+1
if(indices[sim]>39 and indices[sim]<60):
clio=clio+1
if(indices[sim]>59 and indices[sim]<80):
dodge=dodge+1
if(indices[sim]>79 and indices[sim]<100):
peugot=peugot+1
# print("there are %d ASTON vehicles in the first %d images"%(aston,evaluation_limit))
# print("there are %d BMW vehicles in the first %d images"%(bmw,evaluation_limit))
# print("there are %d CLIO vehicles in the first %d images"%(clio,evaluation_limit))
# print("there are %d DODGE vehicles in the first %d images"%(dodge,evaluation_limit))
# print("there are %d PEUGOT vehicles in the first %d images"%(peugot,evaluation_limit))
# print("\n")
"""for sim in range(5):
if(indices[sim]<20):
# print("./buildings/%03d.png"%(indices[sim]+1))
if (indices[sim]==0):
image=cv2.imread("./aston/%03d.png"%(indices[sim]+1))
else:
image=cv2.imread("./aston/%03d.png"%(indices[sim]))
height, width = image.shape[:2]
image = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
if(sim==0):
cv2.imshow("original",image)
else:
cv2.imshow("similar %d"%(sim),image)
if(indices[sim]>19 and indices[sim]<40):
# print("./buildings/%03d.png"%(indices[sim]+1))
image=cv2.imread("./bmw/%03d.png"%(indices[sim]+1-20))
height, width = image.shape[:2]
image = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
if(sim==0):
cv2.imshow("original",image)
else:
cv2.imshow("similar %d"%(sim),image)
if(indices[sim]>39 and indices[sim]<60):
# print("./buildings/%03d.png"%(indices[sim]+1))
image=cv2.imread("./clio/%03d.png"%(indices[sim]+1-40))
height, width = image.shape[:2]
image = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
if(sim==0):
cv2.imshow("original",image)
else:
cv2.imshow("similar %d"%(sim),image)
if(indices[sim]>59 and indices[sim]<80):
# print("./buildings/%03d.png"%(indices[sim]+1))
image=cv2.imread("./dodge/%03d.png"%(indices[sim]+1-60))
height, width = image.shape[:2]
image = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
if(sim==0):
cv2.imshow("original",image)
else:
cv2.imshow("similar %d"%(sim),image)
if(indices[sim]>79 and indices[sim]<90):
image=cv2.imread("./peugot/%03d.png"%(indices[sim]+1-80))
height, width = image.shape[:2]
image = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
if(sim==0):
cv2.imshow("original",image)
else:
cv2.imshow("similar %d"%(sim),image)
if(indices[sim]>99):
image=cv2.imread("./cliotranslate/%03d.png"%(indices[sim]+1-100))
height, width = image.shape[:2]
image = cv2.resize(image,(2*width, 2*height), interpolation = cv2.INTER_CUBIC)
if(sim==0):
cv2.imshow("original",image)
else:
cv2.imshow("similar %d"%(sim),image)"""
all_bmw+=bmw
all_clio+=clio
all_dodge+=dodge
all_peugot+=peugot
all_aston+=aston
# all_clio_translate+=clio_translate
# cv2.imshow("image",np.zeros((100,1000,3)))
# cv2.waitKey(0)
# print("Overall GMM with %d components gives us : "%gmm_comp)
# print("BMW: %d"%all_bmw)
# print("CLIO: %d"%all_clio)
# print("DODGE: %d"%all_dodge)
# print("PEUGOT: %d"%all_peugot)
# print("ASTON: %d"%all_aston)
# print("Number of vehicles : %d"%(all_bmw+all_clio+all_dodge+all_peugot+all_aston))
max_comp,max_value=(gmm_comp,current) if max_value<current else (max_comp,max_value)
min_comp,min_value=(gmm_comp,current) if min_value>current else (min_comp,min_value)
data.append((current,gmm_comp))
# print("\n")
data.sort(key=lambda tup: tup[0])
data.reverse()
# print("best comp is %d yielding " %(max_comp))
# cv2.imshow("image",np.zeros((100,1000,3)))
# cv2.waitKey(0)
print("-------------------------------------------------")
mean_recall=0
mean_gmm=0
for i in range(best_gmms):
mean_recall+=data[i][0]
mean_gmm+=data[i][1]
mean_recall=mean_recall/best_gmms
mean_gmm=mean_gmm/best_gmms
target.write("Evaluation of model %s yields "%(case))
target.write("\n")
target.write("[")
for d in data:
target.write("("+str(d[0])+","+str(d[1])+")")
target.write("]")
target.write("\n")
target.write("best mean GMM is %s and best mean recall is %s"%(str(mean_gmm),str(mean_recall)))
target.write("\n"*2)
print("best comp is %d yielding %d " %(max_comp,max_value))
print("worst comp is %d yielding %d " %(min_comp,min_value))
print(data)
# break'''
target.close()
| true
|
83297563c68c5370c25c1eb554bfa4aaaf25259b
|
Python
|
NikolayVaklinov10/Python_Interview_Challenges
|
/part12mockSocialNetworkCompany/onSiteQuestion1.py
|
UTF-8
| 190
| 3.0625
| 3
|
[] |
no_license
|
def solution(lst, target):
seen = set()
for num in lst:
num2 = target - num
if num2 in seen:
return True
seen.add(num)
return False
| true
|
45c1b1f456308bd65b4f3070c7640e6fedf9adb9
|
Python
|
Qondor/Python-Daily-Challenge
|
/Daily Challenges/Daily Challenge #110 - Love VS. Friendship/words_strength_calculator.py
|
UTF-8
| 321
| 3.703125
| 4
|
[] |
no_license
|
def wordsToMarks(word):
"""If a = 1, b = 2, c = 3 ... z = 26, then l + o + v + e = 54
Function to find the "strength" of each of these values.
"""
value = []
value.extend((ord(letter) - 96) for letter in word)
return sum(value)
if __name__ == "__main__":
print(wordsToMarks("attitude"))
| true
|
fb7bd5d96ceed3a704510715d0ac2e80266b7c25
|
Python
|
tusharcoder/elasticdjango
|
/core/management/commands/seed_db.py
|
UTF-8
| 3,469
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
# @Author: Tushar Agarwal(tusharcoder) <tushar>
# @Date: 2017-02-19T18:54:14+05:30
# @Email: tamyworld@gmail.com
# @Filename: sample.py
# @Last modified by: tushar
# @Last modified time: 2017-03-13T14:36:26+05:30
from django.core.management.base import BaseCommand
from model_mommy import mommy
import random
from core.models import *
from faker import Factory
from django.contrib.auth.models import User
faker = Factory.create()
def random_with_N_digits(n):
"""utility function to generate the random number of the length n"""
range_start = 10**(n-1)
range_end = (10**n)-1
return random.randint(range_start, range_end)
class Command(BaseCommand):
help = "My shiny new management command."
def add_arguments(self, parser):
parser.add_argument('count', nargs=1, type=int)
def handle(self, *args, **options):
print("cleaning the database")
Product.objects.all().delete()
Category.objects.all().delete()
Subcategory.objects.all().delete()
UserProfile.objects.all().delete()
users=User.objects.exclude(is_staff=True).delete()
self.create_category()
self.create_subcategpries()
self.create_users()
self.createProducts(options)
print("production finished cheers:)")
def createProducts(self,options):
"""function to create the products"""
print( "finally, creating the products...")
self.products=[]
for i in range(options['count'][0]):
price = random_with_N_digits(3)
category=random.choice(self.categories)
name='{}{} product'.format(faker.name(),category.name)
subcategory=random.choice(list(filter(lambda x : x.category.name==category.name, self.subcategories)))
seller=random.choice(self.userprofiles)
product_type=random.choice(['downloadable_product','shipped_product'])
product=mommy.prepare(Product,name=name,price=price,category=category,subcategory=subcategory,seller=seller,product_type=product_type)
self.products.append(product)
Product.objects.bulk_create(self.products)
def create_category(self):
"""function to create the categories"""
print( "categories...")
_list=['books','toys','kitchenware','computer and accessories','mobile and tablets','clothing and Accessories','watches and eyeware']
self.categories=[]
for param in _list:
category=mommy.make(Category,name=param)
self.categories.append(category)
def create_subcategpries(self):
"""function to create the sub categories"""
print( "subcategories...")
self.subcategories=[]
for category in self.categories:
sub_cat_name='{} {}'.format(faker.name(),category.name)
subcategory=mommy.make(Subcategory, name=sub_cat_name, category=category)
self.subcategories.append(subcategory)
def create_users(self):
"""function to create the users"""
print( "users...")
self.userprofiles=[]
domains=['gmail','outlook','yahoo','yahoo.co.in']
for r in range(0,100):
email = '{}{}@{}'.format(faker.name(),str(r),random.choice(domains))
password=random_with_N_digits(10)
user=User.objects.create_user(email,email,password)
userprofile=mommy.make(UserProfile,user=user,user_type='vendor')
self.userprofiles.append(userprofile)
| true
|
781181bd6934a88e34020b1ee967c1d136521936
|
Python
|
amorochenets/python
|
/less07/less07_1.py
|
UTF-8
| 482
| 3.453125
| 3
|
[] |
no_license
|
# !/usr/bin/env python3
# coding UTF-8
# for i in 'hello world':
# print(end='_')
# if i == 'o':
# continue
# print(i*2, end='')
# i = 5
# while i < 15:
# print(i)
# i += 2
newList = []
while True:
word = input('Please enter one word: ')
if word.find(' ') != -1:
print('Too many worlds..')
continue
if word in newList:
print('Already has this word')
continue
newList.append(word)
print(newList)
| true
|
1a1ffc08f5c531c63dd5e898316e4bf31d46f5e3
|
Python
|
PedchenkoSergey/GB-Python-Basics
|
/task2/task_2_5.py
|
UTF-8
| 517
| 4.0625
| 4
|
[] |
no_license
|
my_list = [7, 5, 3, 3, 2]
while True:
my_num = input("Введите новый элемент рейтинга, или введите exit: ")
flag = True
if my_num == "exit":
break
for i in range(0, len(my_list)):
if int(my_num) > my_list[i]:
my_list.insert(i, int(my_num))
flag = False
break
if flag:
my_list.append(int(my_num))
print(f"Списко рейтинга чисел выглядит таким образом: {my_list}")
| true
|
3920212992d152e09fd41ee17f72c7f249a067f6
|
Python
|
julian-ramos/ksi
|
/client.py
|
UTF-8
| 2,842
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
"""
This client connects to a wiiRemote
The macAddress of the wiiMote has to be specified on
the file wiiMAC.dat. This file should have the next format
XXXXXXX
XXXXXXX
Where each line is the MAC address
"""
from time import sleep
import cwiid
import socket
import sys
host = 'localhost'
port = 50010
size = 2048
socketCon=False
# s.send('identity wiiMote1')
wiiMAC=[]
#wiiMACAddr: Write your wiimote address in the file 'wiiMAC.data'.
#Such as:
#xx:xx:xx:xx:xx:xx
#xx:xx:xx:xx:xx:xx
#Home motes
# wiiMACFile = 'wiiMAC.data'
#Office motes
wiiMACFile = 'wiiMAC2.data'
wiiNum = 0
try:
wiiNum = int(sys.argv[1])
except:
print 'Add a wiiNum to select a wiimote device'
done=False
while not done:
wiiMACAddr = []
try:
wiiMACReader = open(wiiMACFile, 'r')
wiiMACAddr = wiiMACReader.readlines()
wiiMACReader.close()
# print wiiMACAddr
except:
print 'Error: Wiimote MAC addr file not found.'
continue
# wiiMACReader = open(wiiMACFile, 'r')
# wiiMACAddr = wiiMACReader.readlines()
# wiiMACReader.close()
if len(wiiMACAddr) < 2:
print 'Error: Invalid wiimote MAC addr.'
continue
wiiMAC = wiiMACAddr[wiiNum].strip()
wiiID = 'wiiMote' + str(wiiNum+1)
print "Trying to connect to " + wiiID + " " + wiiMAC
print "Press 1 & 2 on the Wiimote simultaneously, to find it"
try:
wii = cwiid.Wiimote(wiiMAC)
wii.enable(cwiid.FLAG_MESG_IFC)
wii.rpt_mode = cwiid.RPT_IR | cwiid.RPT_BTN
print wiiMAC + " connected successfully"
data=[0,0]
done=True
connected=True
except:
print('Couldnt initialize the wiiMote')
rpt=[ [0,0] for i in range(4)]
while 1:
messages = wii.get_mesg()
for mesg in messages: # Loop through Wiimote Messages
if mesg[0] == cwiid.MESG_IR: # If message is IR data
cont=-1
for m in mesg[1]: # Loop through IR LED sources
cont+=1
if m: # If a source exists
rpt[cont][0]=m['pos'][0]
rpt[cont][1]=m['pos'][1]
mess2send='%s,0|x%.3fy%.3f,1|x%.3fy%.3f,2|x%.3fy%.3f,3|x%.3fy%.3f,'%(wiiID,\
rpt[0][0],rpt[0][1],\
rpt[1][0],rpt[1][1],\
rpt[2][0],rpt[2][1],\
rpt[3][0],rpt[3][1])
if socketCon==False:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,port))
except:
s=None
if not s==None:
socketCon=True
else:
s.send(mess2send)
s.close()
| true
|
399411a3f0b700a5d3f2bec33a9ddea923003d3f
|
Python
|
nikpet/hb
|
/week1/1/fibonacci.py
|
UTF-8
| 259
| 3.640625
| 4
|
[] |
no_license
|
def fibonacci(n):
result = [1, 1]
for i in range(1, n-1):
result.append(result[i-1] + result[i])
return result[:n]
if __name__ == "__main__":
print(fibonacci(1))
print(fibonacci(2))
print(fibonacci(3))
print(fibonacci(10))
| true
|
f4ad44c5aefdda9649739e882d6de3695dac39f6
|
Python
|
sinopeus/thrax
|
/bin/lexicon.py
|
UTF-8
| 1,370
| 3.25
| 3
|
[] |
no_license
|
import re
from collections import Iterator
chars = re.compile('[,\?\!#&_`\.%·; <>]')
class Corpus(Iterator):
def __init__(self, corpus_file, count=None):
self.text = open(corpus_file)
self.count = 0
if count != None:
self.__setstate__(count)
def __iter__(self):
return self
def __next__(self):
self.count += 1
return self.tokenise(self.text.readline())
def tokenise(self, line):
return chars.split(line.strip().lower().replace("-", ""))
def rewind(self):
self.text.seek(0)
def __getstate__(self):
return (self.text.name, self.count)
def __setstate__(self,count):
while self.count < count:
next(self)
class Dictionary(Iterator):
def __init__(self, dict_file, size):
self.indices = {}
self.size = size
self.dict_file = open(dict_file)
self.build()
def __iter__(self):
return self
def __next__(self):
return self.dict_file.readline().strip()
def build(self):
idx = 0
while idx < self.size:
self.indices[next(self)] = idx
idx +=1
def enlarge(self, size):
while self.size < size:
self.indices[next(self)] = self.size
self.size +=1
def lookup(self, word):
return self.indices[word]
def exists(self, word):
return (word in self.indices)
def __getstate__(self):
return (self.dict_file.name, self.size)
| true
|
e83b2307cc001d16677fa3c370021ae4a4b1405d
|
Python
|
edward035358370/Leetcode
|
/leetcode/Permutations/version1.py
|
UTF-8
| 459
| 3.171875
| 3
|
[] |
no_license
|
class Solution(object):
def permute(self, li):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
def cut(li,temp,res = []):
if len(li) == 1:
res.append(temp + li)
for i in range(len(li)):
cut(li[:i]+li[i+1:],temp+[li[i]])
return res
return cut(li,[])
li = [1,2,3]
sol = Solution()
ans = sol.permute(li)
print(ans)
| true
|
1aad45bb2ce8efdc5cd8a874949c3da88308acc1
|
Python
|
scmbuildrelease/gitfusionsrc
|
/libexec/p4gf_lfs_tracker.py
|
UTF-8
| 1,506
| 2.515625
| 3
|
[] |
no_license
|
#! /usr/bin/env python3.3
"""Check if a Git work tree path is under Git LFS control."""
import p4gf_lfs_checker
class LFSTracker(object):
"""Check if a Git work tree path is under Git LFS control."""
def __init__(self, *, ctx):
"""Create a LFSChecker to do the checking."""
self.ctx = ctx
# delay init of checker until it's needed
# by then context will have its repo object ready
self._checker = None
@property
def checker(self):
"""Lazy init the checker."""
if not self._checker:
self._checker = p4gf_lfs_checker.LFSChecker(self.ctx)
return self._checker
def add_cl(self, *, branch, p4change):
"""Find .gitattributes files on branch@p4change and cache any lfs lines."""
self.checker.add_cl(branch=branch, p4change=p4change)
def is_tracked_git(self, *, commit_sha1, gwt_path):
"""Check if the given GWT path is under Git LFS control as of the commit.
Commit is a Git commit from git-fast-export, not yet written to Helix.
"""
return self.checker.is_tracked_git(commit_sha1=commit_sha1, gwt_path=gwt_path)
def is_tracked_p4(self, *, branch, p4change, gwt_path):
"""Check if the given GWT path is under Git LFS control as of the P4Change.
p4change is a Helix P4Change instance not yet written to
git-fast-import.
"""
return self.checker.is_tracked_p4(branch=branch, p4change=p4change, gwt_path=gwt_path)
| true
|
c13bf8c204b23b22a188d0a0d35a3746c26f07ef
|
Python
|
NoellePatterson/FFC_QA
|
/calculations/snowEarly.py
|
UTF-8
| 2,343
| 2.609375
| 3
|
[] |
no_license
|
import numpy as np
from Utils.convertDateType import convertOffsetToJulian
def snowEarly(classes):
snowEarlySpring = {}
snowEarlyWet = {}
for currentClass, value in classes.items():
springTim = []
wetTim = []
for i, results in enumerate(value):
springTim.append(value[i].loc['SP_Tim'])
for i, results in enumerate(value):
wetTim.append(value[i].loc['Wet_Tim'])
for index, gage in enumerate(springTim): # loop through each gage (223)
counterSp = 0
allWaterYearsSp = 0
year = int(gage.index[0])
for i, year in enumerate(gage): # loop through each year in the gage
if np.isnan(springTim[index][i]) == False:
allWaterYearsSp = allWaterYearsSp + 1
offsetSpringTim = [int(springTim[index][i])]
offsetSpringTim = convertOffsetToJulian(offsetSpringTim, year)
if offsetSpringTim[0] < 106:
counterSp = counterSp + 1
if currentClass in snowEarlySpring:
snowEarlySpring[currentClass].append(counterSp/allWaterYearsSp)
else:
snowEarlySpring[currentClass] = [counterSp/allWaterYearsSp]
for index, gage in enumerate(wetTim): # loop through each gage (223)
counterWet = 0
allWaterYearsWet = 0
year = int(gage.index[0])
for i, year in enumerate(gage): # loop through each year in the gage
if np.isnan(wetTim[index][i]) == False:
allWaterYearsWet = allWaterYearsWet + 1
offsetWetTim = [int(wetTim[index][i])]
offsetWetTim = convertOffsetToJulian(offsetWetTim, year)
if offsetWetTim[0] < 152:
counterWet = counterWet + 1
if currentClass in snowEarlyWet:
snowEarlyWet[currentClass].append(counterWet/allWaterYearsWet)
else:
snowEarlyWet[currentClass] = [counterWet/allWaterYearsWet]
for currentClass in snowEarlySpring:
snowEarlySpring[currentClass] = np.nanmean(snowEarlySpring[currentClass])
snowEarlyWet[currentClass] = np.nanmean(snowEarlyWet[currentClass])
return snowEarlySpring, snowEarlyWet
| true
|
33a0947aab4ee7ac6f9c2ea9c0d721d7fed7ce7e
|
Python
|
oyedeloy/Pirple.com_courses
|
/Python is easy/Classes/classs_inheritance.py
|
UTF-8
| 1,680
| 3.6875
| 4
|
[] |
no_license
|
class Team:
def __init__(self,Name = "Name",Origin = "Origin"):
self.team_name = Name #We have created a variable "Team_name" which is now part of the class.
self.team_origin = Origin
def define_team_name(self, Name):
self.team_name = Name
def define_team_origin(self, Origin):
self.team_origin = Origin
#class inheritance
#class Inheritance_name(parent_class)
class Players(Team): #Every team has plsyers and the class Players will be a part of the bigger team class
def __init__(self,player_name,player_point,team_name,team_origin):
Team.__init__(self,team_name,team_origin) #You also have to initialise the parent class
self.player_name = player_name
self.player_point = player_point #Assuming that every player start with 0 points
#These attributes belong to the player class
#lets make a method for the players class
def scored_point(self):
self.player_point +=1
def set_name(self,Name):
self.player_name = Name
def __str__(self): #__str__ allows us to call players directly without specifying a method
return self.player_name + " has scored " + str(self.player_point) + " points"
Player_1 = Players("drogba",0,"chealsea","London")
print(Player_1.player_name)
print(Player_1.player_point)
Player_1.define_team_name("BCC_Lion")
print(Player_1.team_name)
print(Player_1.team_origin) #The Player class inherited all the attribute of the team
Player_1.scored_point()
print(Player_1.player_point)
Player_1.set_name("makalele")
print(Player_1.player_name)
print(Player_1)
| true
|
63013b6fc22c8147319e2c9655cd0d352e884d2b
|
Python
|
kamkasz/downlpj.py
|
/downlpj.py
|
UTF-8
| 3,845
| 2.671875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*
#importing modules
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import requests
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/53 "
"(KHTML, like Gecko) Chrome/15.0.87")
#if website is coded to check user's browser, this should make sure that it won't stop PhantomJs from working
#adress of the site on which you want to log on
baseurl = "example.com/login.aspx?"
#login and username
username = "username"
password = "password"
#text which will be written into forms
data="xxx"
godzina="yyy"
data2="zzz
godzina2="aaa"
#defining of elements, like forms or buttons, which are needed to navigate the site, it will make it possible to localize them
xpaths = { 'txtUser' : "//input[@name='txtUser']",
'txtPassword' : "//input[@name='txtPassword']",
'submitButton' : "//input[@name='login']",
'submitButton2' : "//input[@name='werwe']",
'submitButton3' : '//a[@id="ListOfStations"]',
'Datef' : "//input[@name='Datef]",
'Hourf' : "//input[@name='Hourf']",
'submitButton4' : "//input[@name='Odswiez']",
'Datet' : "//input[@name='Datet']",
'Hourt' : "//input[@name='Hourt']",
'submitButton5' : '//a[@id="hl_generate_excel"]'
}
#opening PhantomJs webdriver, defining userAgent
mydriver = webdriver.PhantomJS(desired_capabilities=dcap)
#opening a site
mydriver.get(baseurl)
#login into website
mydriver.find_element_by_xpath(xpaths['txtUser']).clear()
mydriver.find_element_by_xpath(xpaths['txtUser']).send_keys(username)
mydriver.find_element_by_xpath(xpaths['txtPassword']).clear()
mydriver.find_element_by_xpath(xpaths['txtPassword']).send_keys(password)
mydriver.find_element_by_xpath(xpaths['submitButton']).click()
#waiting till page loads, it's probably better to use WebDriverWait
#navigating site
time.sleep(10)
mydriver.get('http://example.com/')
time.sleep(40)
mydriver.find_element_by_xpath(xpaths['submitButton3']).click()
time.sleep(30)
mydriver.find_element_by_xpath(xpaths['submitButton2']).click()
time.sleep(10)
#filling out forms
mydriver.find_element_by_xpath(xpaths['Datef']).clear()
mydriver.find_element_by_xpath(xpaths['Datef']).send_keys(data)
mydriver.find_element_by_xpath(xpaths['Hourf']).clear()
mydriver.find_element_by_xpath(xpaths['Hourf']).send_keys(godzina)
mydriver.find_element_by_xpath(xpaths['Datet']).clear()
mydriver.find_element_by_xpath(xpaths['Datet']).send_keys(data2)
mydriver.find_element_by_xpath(xpaths['Hourt']).clear()
mydriver.find_element_by_xpath(xpaths['Hourt']).send_keys(godzina2)
mydriver.find_element_by_xpath(xpaths['submitButton4']).click()
time.sleep(5)
mydriver.find_element_by_xpath(xpaths['submitButton5']).click()
#changing browser's windows
config_window = mydriver.window_handles[0]
asz=mydriver.switch_to_window(config_window)
#creating headers - they will be used is sessions
headers = {
"User-Agent":
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/53 "
"(KHTML, like Gecko) Chrome/15.0.87"
}
s = requests.session()
s.headers.update(headers)
#getting cookies from selenium to use them in session
for cookie in mydriver.get_cookies():
c = {cookie['name']: cookie['value']}
s.cookies.update(c)
#downloading a file
response = s.get('http://dssp.imgw.ad/tpp/exel.aspx',stream=True)
with open("/home/kamil/Desktop/maska/euro3.xls", 'wb') as fd:
fd.write(response.content)
mydriver.quit()
| true
|
d9ed12cddf28f423ef6a31d42f51cea5fff1fb95
|
Python
|
kj-lai/GroceryMarketCrawler
|
/redtick.py
|
UTF-8
| 3,984
| 2.546875
| 3
|
[] |
no_license
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
import pandas as pd
import numpy as np
start_time = time.time()
driver = webdriver.Chrome()
driver.switch_to_window(driver.current_window_handle)
driver.maximize_window()
driver.get('https://shop.redtick.com/')
category = driver.find_element_by_class_name('row-sm-height').find_elements_by_css_selector('.group.col-md-1.col-sm-2.col-sm-height.col-middle')
outputs = []
for stage1 in category[1:]:
stage1.click()
time.sleep(2)
main_cat = stage1.find_element_by_tag_name('h3').get_attribute('innerText')
next_stage1 = stage1.find_elements_by_class_name('col-sm-4')
for stage2 in next_stage1:
mid_cat = stage2.find_element_by_tag_name('h4').get_attribute('innerText')
next_stage2 = stage2.find_elements_by_class_name('sub')
for stage3 in next_stage2:
sub_url = stage3.get_attribute('href')
sub_cat = stage3.get_attribute('innerText')
outputs.append([main_cat, mid_cat, sub_cat, sub_url])
time.sleep(1)
driver.quit()
print('*** Process Completeed ***')
data = pd.DataFrame(outputs, columns=['main_cat', 'mid_cat', 'sub_cat', 'sub_url'])
print('Dataframe shape:\t',data.shape)
data.to_csv('redtick_category.csv',index=False)
print(data.main_cat.unique())
wanted = ['Mid-Autumn Festival','Fresh Market','Chilled & Frozen','Bakery','Beverages','Alcohols']
data2 = data[data.main_cat.isin(wanted)].reset_index(drop=True)
print(data2.shape)
print(data2.main_cat.unique())
start_time = time.time()
driver = webdriver.Chrome()
driver.switch_to_window(driver.current_window_handle)
driver.maximize_window()
# driver.get('https://shop.redtick.com/')
item_datas = []
item_counts = []
for i, row in data2.iterrows():
driver.get(row.sub_url)
# driver.get('https://shop.redtick.com/store/en/instant-noodle-cupbowl')
while True:
item_counts.append([row.main_cat, row.mid_cat, row.sub_cat ,driver.find_element_by_css_selector('.count').text.split(' ')[0]])
item_list = driver.find_elements_by_css_selector('.col-content')
for item in item_list:
item_name = item.find_element_by_tag_name('h3').text
item_price = item.find_element_by_class_name('price').text.split(' ')[0][2:]
item_vol = item.find_element_by_class_name('unit').text
try:
old_price = item.find_element_by_class_name('price-strike').text.split(' ')[0][2:]
except:
old_price = '0.00'
try:
promo = item.find_element_by_class_name('visible-lg-inline-block').text
except:
promo = ''
item_datas.append([row.main_cat, row.mid_cat, row.sub_cat, item_name, item_price, item_vol, old_price, promo])
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(1)
try:
buttons = driver.find_element_by_class_name('pagination').find_elements_by_tag_name('a')
if buttons[-1].get_attribute('rel') == 'next':
buttons[-1].click()
else:
break;
except:
break;
time.sleep(1)
driver.quit()
print('*** Process Completeed ***')
print('Time used:\t',time.time()-start_time)
item_df = pd.DataFrame(item_datas, columns=['main_cat','mid_cat','sub_cat','item_name','item_price','item_vol','item_old_price','promotion'])
print('Dataframe shape:\t',item_df.shape)
item_df.to_csv('20180922_redtick_database.csv',index=False)
print(item_df.head())
count_df = pd.DataFrame(item_counts, columns=['main_cat','mid_cat','sub_cat','counts'])
count_df.counts = count_df.counts.astype('int')
print(count_df.shape)
count_df.to_csv('redtick_subcat_count.csv',index=False)
| true
|
aa40c3edf679db7a75d59d986d08f1d86d98bd79
|
Python
|
e125743/ItemChecker
|
/MarginChecker.py
|
UTF-8
| 2,935
| 2.953125
| 3
|
[] |
no_license
|
# coding: UTF-8
import cv2
import numpy as np
import sys
def patternCut(filepath):
#画像をグレースケール(,0)で取得
img = cv2.imread(filepath,0)
#切り取り画像の左上のpixel
#'無'の左上のpixel
x, y = 720, 4070
#切り取り画像の幅と高さのpixel数
#'無'の切り取りpixel数
w = 775
h = 150
#画像から切り取り実行
roi = img[y:y+h, x:x+w]
#for ro in roi:
# print(ro)
#画像から項目欄の左端の縦枠のpixelを推定。
leftPixel = -1
#画像上端から下方向にpixel行を取得。
for sh in range(h):
#pixel行の左端から右方向にpixelを取得。
for sw in range(w):
#取得したpixelが黒っぽい色をしていた時且つ、
#その下の50pixelの内、40pixelが黒っぽい色をしていた時の処理。
if roi[sh, sw] < 100 and 255*10 > np.sum(roi[sh:sh+50, sw]):
leftPixel = sw
break
#leftPixelに数値が入ったら終了。
if leftPixel != -1:
break
print(leftPixel)
#画像から項目欄の下端の横枠のpixelを推定。
underPixel = -1
#画面右端から左方向にpixel列を取得。
for sw in range(w-1, 0, -1):
#pixel列の下端から上方向にpixelを取得。
for sh in range(h-1, 0, -1):
#取得したpixelが黒っぽい色をしていた時且つ、
#その左の50pixelの内、40pixelが黒っぽい色をしていた時の処理。
if roi[sh, sw] < 100 and 255*10 > np.sum(roi[sh, sw-50:sw]):
underPixel = sh
break
#underPixelに数値が入ったら終了。
if underPixel != -1:
break
print(underPixel)
#縦枠と横枠が交わるpixelから内側に10pixel動かし、枠を画像から消去。
patternRoi = roi[underPixel-85:underPixel-10, leftPixel+10:leftPixel+400]
#filepathからファイル名を取得
filename = filepath.split('/')[-1]
#print(filename)
return filename, patternRoi
def marginChecker(filename, patternRoi):
#丸が付いていない画像のpixel数値の合計を判断基準に設定。
#testMargin.pyで出力した数値。
nonCircle = 7078241
saveDir = 'ResultData/'
#判断基準より画像のpixel数値の合計が小さい時且つ、
#判断基準と画像のpixel数値の合計との差が丸の最小円周より大きい時の処理。
if abs(nonCircle - np.sum(patternRoi)) > 255*60*3*2 and nonCircle > np.sum(patternRoi):
saveDir += 'NoMove/'
else:
saveDir += 'Move/'
cv2.imwrite(saveDir + filename, patternRoi)
if __name__ == "__main__":
argvs = sys.argv
if (len(argvs) <= 1):
print('Usage: # python %s TestDataFilePath' % argvs[0])
quit()
filename, patternRoi = patternCut(argvs[1])
marginChecker(filename, patternRoi)
| true
|
63fa890be66010425650d457681f0ce515f7c025
|
Python
|
jptboy/NeuralNetwork
|
/aknet/mnist.py
|
UTF-8
| 1,887
| 2.6875
| 3
|
[] |
no_license
|
# from tensorflow.keras.datasets import mnist
# dataMnist = mnist.load_data()
# train, test = dataMnist
# inputs, targets = train
# import numpy as np
# testx, testy = test
# import numpy as np
# inputs1 = []
# trainx1 = []
# targets1 = []
# for i in range(len(inputs)):
# inputs1.append(inputs[i].flatten())
# for i in range(len(testx)):
# trainx1.append(testx[i].flatten())
# for i in range(len(targets)):
# foo = np.zeros(10)
# foo[targets[i]] = 1
# targets1.append(foo)
# inputs = np.array(inputs1)
# testx = np.array(trainx1)
# targets = np.array(targets1)
from train import train
from network import NeuralNetwork
from layer import LinearLayer, Sigmoid, Relu, Softmax, Tanh, LeakyRelu
from loss import MSE, CrossEntropy
from optimizer import MBGD
import numpy as np
from sklearn.datasets import load_digits
digits = load_digits()
inputs = digits.data
for x in inputs:
x /= 255
targets = []
for num in digits.target:
baz = np.zeros(10)
baz[num] = 1
targets.append(baz)
targets = np.array(targets)
from sklearn.model_selection import train_test_split
inputs,xtest,targets, ytest = train_test_split(inputs, targets, test_size = 0.2)
np.seterr(all='raise')
net = NeuralNetwork([
LinearLayer(inputSize=64, outputSize=16),
LeakyRelu(),
LinearLayer(inputSize=16, outputSize=10),
LeakyRelu(),
Softmax()
])
train(net, inputs, targets, loss= CrossEntropy(), num_epochs=600, optimizer=MBGD(learningRate=0.0001), showGraph=True)
net.serialize("serializedMNIST.json")
# net.loadParamsFromFile("/home/ayush/scratch/Net/aknet/serializedMNIST.json")
total = len(xtest)
correct = 0
for x, y in zip(xtest, ytest):
predicted = net.forward(x)
if np.argmax(predicted) == np.argmax(y):
correct +=1
# plt.imshow(x.reshape((28,28)))
# plt.show()
print(np.argmax(predicted), np.argmax(y))
print(correct/total)
| true
|
7e6ff35a1e723705d1fd3f4b2d7f4e9a9590da2a
|
Python
|
tianxiongWang/codewars
|
/stringIncre.py
|
UTF-8
| 640
| 3.359375
| 3
|
[] |
no_license
|
def increment_string(string):
key = -1
num = ''
for i in range(len(string) - 1, -1, -1):
if ord(string[i]) < 48 or ord(string[i]) > 57:
key = i
break
num += string[i]
if key == len(string) - 1:
return string + '1'
if key != -1:
finallyNum = num[-1::-1]
length = len(finallyNum)
resultNum = str(int(finallyNum) + 1).zfill(length)
result = string[0:key+1] + resultNum
return result
else:
length = len(string)
result = str(int(string) + 1).zfill(length)
return result
print(increment_string('foo'))
| true
|
95d73f03165651e2ac097eae0e360ef9b06358e9
|
Python
|
Madsss11/IS-206
|
/opg5.py
|
UTF-8
| 454
| 3.5625
| 4
|
[] |
no_license
|
name = 'Mads Soeftestad'
age = 21
height = 186
weight = 92
eyes = 'blue'
teeth = 'white'
hair = 'Blonde'
print "Let's talk about %s." % name
print "He's %d centimeters tall." % height
print "He's %d kilos." %weight
print "Beautiful guy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually white %s depending on the coffe and snus." % teeth
print "If i add %d, %d, and %d I get %d." % (
age, height, weight, age + height + weight)
| true
|
617c29c008b7a78bf4af6f60ddf9dc0f6659c929
|
Python
|
esdalmaijer/2017_Lund_Collaborative_short-term_memory_game
|
/experiments/short-term_memory_assessment/custom.py
|
UTF-8
| 15,701
| 3.078125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import math
import random
from constants import *
import pygaze
from pygaze.screen import Screen
from pygaze._misc.misc import pos2psychopos, rgb2psychorgb
import numpy
from psychopy.visual import Circle, GratingStim, ImageStim, Rect
# # # # #
# FUNCTIONS
def pol2car(cor, mode='degrees'):
"""Returns the cartesian coordinates corresponding to given polar coordinates
(see circle below)
0
315 45
270 90
225 135
180
arguments
cor -- a single or a list of (distance, angle) polar TUPLES
keyword arguments
mode -- inicates whether angles are in degrees or radians (default
= 'degrees')
returns
cor -- a single of a list of (x, y) Cartesian tuples
"""
# polar coordinate
if type(cor) == tuple:
cor = [cor]
# Cartesian coordinates
newcor = []
for dist, ang in cor:
if mode == 'degrees':
ang = math.radians(ang-90)
else:
ang = ang - math.radians(90)
x = (math.cos(ang) * float(dist)) + DISPSIZE[0]/2.0
y = (math.sin(ang) * float(dist)) + DISPSIZE[1]/2.0
newcor.append((int(x),int(y)))
# single tuple or list
if len(newcor) == 1:
newcor = newcor[0]
return newcor
def calc_angular_dist(ori1, ori2, zero=360):
"""Calculates the angular distance between two angles (in DEGREES!),
each angle representing a point on an imaginary circle (default). Note
that the origin of the round distribution can be determined by setting the
'zero' keyword.
arguments
ori1 - first orientation(s) (the target orientation), in degrees
ori2 - second orientation (the response), in degrees
keyword arguments
zero - origin of the distribution (360 for a full circle, 180 for lines
along the diameter of a circle)
"""
if type(ori1) in [int,float, numpy.int, numpy.int16, numpy.int32, numpy.int64, numpy.float, numpy.float16, numpy.float32, numpy.float64]:
ori1 = [ori1]
elif type(ori1) == numpy.ndarray and ori1.shape == ():
ori1 = [ori1]
ori1 = numpy.array(ori1)
if type(ori2) in [int,float, numpy.int, numpy.int16, numpy.int32, numpy.int64, numpy.float, numpy.float16, numpy.float32, numpy.float64]:
ori2 = [ori2]
elif type(ori2) == numpy.ndarray and ori2.shape == ():
ori2 = [ori2]
ori2 = numpy.array(ori2)
# moduli after division
ori1 = ori1 % zero
ori2 = ori2 % zero
# calculate the difference
error = ori2 - ori1
# where the difference is larger then a clockwise 90 degree rotation, it
# should be counted as a counter-clockwise (negative) rotation
error[error>zero/2] = -1 * (zero - error[error>zero/2])
error[error<=-zero/2] = (zero + error[error<=-zero/2])
return error
def generate_locs(nlocs, mindist, edgeprop=0.1):
"""Generates a list of (x,y) Cartesian coordinates in pixels that are
randomly distributed over the display, with a minimum distance between
them.
Arguments
nlocs - Integer that determines the number of orientations
to be generated.
mindist - Integer that determines the minimal Cartesian
distance between locations.
Keyword Arguments
edgeprop - Float that determines the proportion of the screen
around the edges that cannot be used to place stimulus
locations in.
"""
# Calculate the minimum and maximum coordinates.
xmin = int(0 + edgeprop * DISPSIZE[0])
xmax = int(DISPSIZE[0] - edgeprop * DISPSIZE[0])
ymin = int(0 + edgeprop * DISPSIZE[1])
ymax = int(DISPSIZE[1] - edgeprop * DISPSIZE[1])
# Generate an empty list of locations.
stimx = numpy.zeros(nlocs) * numpy.NaN
stimy = numpy.zeros(nlocs) * numpy.NaN
locs = []
# Keep trying to add a random location until the list is of the requested
# size, or until the number of attempts is OVER 9000!
attempt = 0
max_attempts = 9000
while len(locs) < nlocs:
# Update the attempt counter.
attempt += 1
if attempt > max_attempts:
raise Exception("ERROR in custom.generate_locs: Used to many attempts to generate %d locations with a minimal distance of %d" \
% (nlocs, mindist))
# Generate a random coordinate that is not near the centre.
tooclose = True
while tooclose:
x = random.randint(xmin, xmax)
y = random.randint(ymin, ymax)
if numpy.sqrt((DISPCENTRE[0]-x)**2 + (DISPCENTRE[1]-y)**2) > 0.1 * DISPCENTRE[0]:
tooclose = False
# If there is no location yet, add the current one.
if len(locs) == 0:
d = numpy.inf
# Calculate the distance between the new coordinate and all other
# coordinates.
else:
d = (stimx-x)**2 + (stimy-y)**2
d = numpy.sqrt(d[numpy.isnan(d)==False])
# Check whether the distance between the new coordinate and all other
# coordinates is large enough.
if numpy.min(d) > mindist:
i = len(locs)
stimx[i] = x
stimy[i] = y
locs.append((x,y))
return locs
def generate_oris(noris, mindist, zero=360):
"""Generates orientations with a certain minimal distance between them. The
generated orientations will be a list of integers.
Arguments
noris - Integer that determines the number of orientations
to be generated.
mindist - Integer that determines the minimal circular
distance between orientations.
Keyword Arguments
zero - Integer that determines the maximum value in the
circular space. If you're using a round space,
0 = 360, for example. Default value is 360.
Returns
oris - A list of integers between 0 and the maximum value
of your space (set by the zero keyword).
"""
# Generate an empty list to hold the orientations.
oris = []
# Keep trying to add a new random number, until the list is of the
# requested size.
attempt = 0
max_attempts = 9000
while len(oris) < noris:
# Generate a random number.
o = random.randint(0, zero-1)
# If there are no orientations yet, add the new one.
if oris == []:
oris.append(o)
# Check if the number is distant enough from all other numbers.
else:
# Calculate the distance between the new orientation and all
# existing orientations.
d = numpy.abs(calc_angular_dist(numpy.array(oris), o, zero=zero))
# Check whether the lowest distance is higher than the minimal
# distance.
if d.min() >= mindist:
oris.append(o)
else:
attempt += 1
# Break if we used to many attempts.
if attempt > max_attempts:
raise Exception("ERROR in custom.generate_oris: Used to many attempts to generate %d orientations with a minimal distance of %d" \
% (noris, mindist))
return oris
# # # # #
# STIMULUS CLASS
class StimScreen:
"""Custom class for stimulus screens in this experiment."""
def __init__(self, nstim, locs, oris, linewidth=3, \
stimtypes='gabor', showcolourwheel=False):
"""Initialises a new StimScreen instance.
Arguments
nstim - Integer that determines the number of stimuli on
this screen.
locs - List of (x,y) tuples that determine the positions
of all stimuli. The list's length should be equal
to the number of stimuli as defined by nstim.
oris - List of integers that determine the orientations
of all stimuli. The list's length should be equal
to the number of stimuli as defined by nstim.
Keyword Arguments
linewidth - Integer or a list of integers that determines the
width of the lines around stimuli (in pixels).
Default value is 3.
stimtypes - String or a list of strings that determines the
type of stimulus. Options are 'gabor' and 'noise'.
Default is 'gabor'.
showcolourwheel- Boolean that determines whether a central colour
wheel should be drawn of not. Default is False.
"""
# Settings from the constants.
self._sf = float(STIMSF) / float(STIMSIZE)
self._alpha = STIMALPHA
self._contrast = STIMCONTRAST
# Noise texture.
self._noisetex = numpy.random.rand(STIMNOISERES,STIMNOISERES)
# Convert the linewidth to a list (if necessary).
if type(linewidth) in [int, float]:
linewidth = nstim * [int(linewidth)]
# Convert the stimulus types to a list (if necessary).
if type(stimtypes) in [str, unicode]:
stimtypes = nstim * [stimtypes]
# Create a Screen to use its wonderful drawing functions.
self.scr = Screen()
# Draw the fixation cross.
self.scr.draw_fixation(fixtype=FIXTYPE, diameter=FIXSIZE)
# Draw the colouw wheel
if showcolourwheel:
# Load the image.
self.scr.draw_image(CWIMG, scale=CWIMGSCALE)
# Create an internal list of stimuli (=PsychoPy stimulus instances) by
# copying it from the internal screen.
self.screen = self.scr.screen
# Keep a list of the index numbers of all stimuli. The indices refer
# to positions within the self.scr.screen list of PsychoPy stimuli.
self._stimindexnrs = []
self._outlineindexnrs = []
# Draw the stimuli.
for i in range(nstim):
# Add the stimulus' index number to the list of indices.
self._stimindexnrs.append(len(self.screen))
# # Create a new Rect stimulus instance.
# stim = Rect(pygaze.expdisplay, \
# pos=pos2psychopos(locs[i]), \
# fillColor=rgb2psychorgb(list(oris[i])), \
# lineColor=rgb2psychorgb(list(linecols[i])), \
# lineWidth=linewidth[i], \
# width=STIMSIZE, \
# height=STIMSIZE)
# Create a Gabor-ish GratingStim.
if stimtypes[i] == 'gabor':
tex = 'sin'
else:
tex = self._noisetex
stim = GratingStim(pygaze.expdisplay, \
pos=pos2psychopos(locs[i]), \
ori=oris[i], \
size=STIMSIZE, \
sf=self._sf, \
opacity=self._alpha, \
contrast=self._contrast, \
tex=tex, \
mask='circle', \
color=(1,1,1)
)
# Add the new stimulus to our list of stimuli.
self.screen.append(stim)
# Add an outline for the stimulus.
self._outlineindexnrs.append(len(self.screen))
stim = Circle(pygaze.expdisplay, \
pos=pos2psychopos(locs[i]), \
lineWidth=linewidth[i], \
radius=STIMSIZE//2, \
edges=32, \
closeShape=False, \
fillColor=None, \
lineColor=(0,0,0)
)
# Add the new stimulus to our list of stimuli.
self.screen.append(stim)
def update(self, locs, oris, linewidth=None, stimtypes=None):
"""Updates the locations, colours, line colours, and line widths of
this stimulus array.
Arguments
locs - List of (x,y) tuples that determine the positions
of all stimuli. The list's length should be equal
to the number of stimuli as defined by nstim.
oris - List of integers that determine the orientations
of all stimuli. The list's length should be equal
to the number of stimuli as defined by nstim.
Keyword Arguments
linewidth - Integer or a list of integers that determines the
width of the lines around stimuli (in pixels), or
None to leave the width as it is. Default value is
None.
stimtypes - String or a list of strings that determines the
type of stimulus. Options are 'gabor' and 'noise',
or None to not update.
Default is None.
"""
# Convert the linewidth to a list (if necessary).
if type(linewidth) in [int, float]:
linewidth = len(self._stimindexnrs) * [int(linewidth)]
# Convert the stimulus types to a list (if necessary).
if type(stimtypes) in [str, unicode]:
stimtypes = len(self._stimindexnrs) * [stimtypes]
# Loop through all stimuli.
# stimnr is a number between 0 and nstim
# stimindexnr refers to the index of a stimulus in self.screen
for stimnr, stimindexnr in enumerate(self._stimindexnrs):
# Update the stimulus location.
self.screen[stimindexnr].pos = pos2psychopos(locs[stimnr])
self.screen[self._outlineindexnrs[stimnr]].pos = pos2psychopos(locs[stimnr])
# # Update the stimulus colour.
# self.screen[stimindexnr].fillColor = rgb2psychorgb(list(oris[stimnr]))
# Update the stimulus orientation.
self.screen[stimindexnr].setOri(oris[stimnr])
# Update the stimulus line width and colour.
if linewidth != None:
self.screen[self._outlineindexnrs[stimnr]].lineWidth = linewidth[stimnr]
if linewidth[stimnr] == PROBELINEWIDTH:
self.screen[self._outlineindexnrs[stimnr]].lineColor = \
(1,-1,-1)
else:
self.screen[self._outlineindexnrs[stimnr]].lineColor = \
(0,0,0)
# Update the stimulus texture.
if stimtypes != None:
if stimtypes[stimnr] == 'gabor':
self.screen[stimindexnr].setTex('sin')
else:
self.screen[stimindexnr].setTex(self._noisetex)
| true
|
e8f2eadda03bf28dec092056c0ef2390d3c4c9f5
|
Python
|
hakusama1024/PCI_Report_Generator
|
/people.py
|
UTF-8
| 454
| 2.859375
| 3
|
[] |
no_license
|
class People(object):
def __init__(self, combi_name=None, status=None, sso=None, First_Name=None, Last_Name=None, bu=None, company=None):
self.combi_name = combi_name
self.sso = sso
self.First_Name = First_Name
self.Last_Name = Last_Name
self.bu = bu
self.status = status
self.company = company
def print_people(self):
print self.combi_name, "\t", self.status, "\t", self.company
| true
|
c2f1391ee6e1ce4aa1cca4a81314f323495dc655
|
Python
|
ProspePrim/PythonGB
|
/Lesson 5/task_5_2.py
|
UTF-8
| 985
| 4
| 4
|
[] |
no_license
|
#Создать текстовый файл (не программно), сохранить в нем несколько строк,
#выполнить подсчет количества строк, количества слов в каждой строке.
with open('test.txt', 'w') as my_f:
my_l = []
line = input('Введите текст \n')
while line:
my_l.append(line + '\n')
line = input('Введите текст \n')
if not line:
break
my_f.writelines(my_l)
with open('test.txt' , 'r') as f:
i = 0
for el in f:
i += 1
print(f"Строка {i}: {el}")
count = len(el) - el.count("\n")
print(f"Количество знаков: {count}")
print(f"Количество слов в строке: {len(el.split())} ")
print('_____________________________________')
print('*'*20)
print(f"Количество строк: {i}")
| true
|
0b76c5263cef809a07234d5e3841d9113b27b0a3
|
Python
|
angelxie2442/weibo2019
|
/webscraping_weibo/combinednlp.py
|
UTF-8
| 2,659
| 3.140625
| 3
|
[] |
no_license
|
import copy
import xmnlp
from snownlp import SnowNLP
# replace some words with english before using snownlp
def replacing(chartold):
chart = copy.deepcopy(chartold)
for index in range((len(chart))):
unece = (chart[index][3]).find('L')
if unece > 0:
chart[index][3] = (chart[index][3])[0:unece]
chart[index][0] = (chart[index][0]).replace("\n", "")
chart[index][3] = (chart[index][3]).replace("\n", "")
chart[index][3] = (chart[index][3]).replace("特朗普", "Trump")
chart[index][3] = (chart[index][3]).replace("推特", "Twitter")
chart[index][3] = (chart[index][3]).replace("收起全文d", "")
chart[index][3] = (chart[index][3]).replace("O网页链接", "")
chart[index][3] = (chart[index][3]).replace("秒拍视频", "")
chart[index][3] = (chart[index][3]).replace("发推", "tweets")
chart[index][3] = (chart[index][3]).replace("中兴", "ZTE")
chart[index][3] = (chart[index][3]).replace("华为", "Huawei")
chart[index][3] = (chart[index][3]).replace("【", "")
chart[index][3] = (chart[index][3]).replace("】", "")
chart[index][3] = (chart[index][3]).replace("...", ",")
chart[index][3] = (chart[index][3]).replace("#", "")
chart[index][3] = (chart[index][3]).replace("~", "")
return chart
def addscore(chart):
# add a column containing the sentiment score on the original 4-column chart
chart1=copy.deepcopy(chart)
for vvv in range(len(chart1)):
if len((chart1[vvv])[3])==0:
chart1[vvv].append("blank")
chart1[vvv].append("blank")
chart1[vvv].append("blank")
else:
x= (chart1[vvv])[3]
s= SnowNLP(chart1[vvv][3])
t1=xmnlp.sentiment(x)
t2=s.sentiments
t3=(t1+t2)/2
chart1[vvv].append(t1)
chart1[vvv].append(t2)
chart1[vvv].append(t3)
return chart1
def addfivecolnames(chart):
chart1=[]
chart1.append([])
chart1[0].append('time')
chart1[0].append('name')
chart1[0].append('type')
chart1[0].append('content')
chart1[0].append('scoreXM')
chart1[0].append('scoreSNOW')
chart1[0].append('scoreAVG')
for www in range(len(chart)):
chart1.append([])
chart1[www+1].append(chart[www][0])
chart1[www+1].append(chart[www][1])
chart1[www+1].append(chart[www][2])
chart1[www+1].append(chart[www][3])
chart1[www+1].append(chart[www][4])
chart1[www+1].append(chart[www][5])
chart1[www+1].append(chart[www][6])
return chart1
| true
|
a849cbdc1dc24878d511e52f264796f85aa21d4f
|
Python
|
Yu-python/python3-algorithms
|
/5. LeetCode/912.sort-an-array/3.py
|
UTF-8
| 2,504
| 4.5
| 4
|
[] |
no_license
|
"""
方法三:归并排序
解题思路:
「自底向上」使用迭代(iterative)的方式,实现归并排序算法
1. size = 1, 对仅包含单个元素的左右子序列进行合并操作
2. size = 2, 步骤1归并排序后,每2个元素已经是排序好的,所以可以对分别包含2个元素的左右子序列进行合并操作
3. size = 4, 步骤2归并排序后,每4个元素已经是排序好的,所以可以对分别包含4个元素的左右子序列进行合并操作
4. 如此反复执行,就能得到一个有序序列了
"""
from collections import deque
class Solution:
def sortArray(self, nums: List[int]) -> List[int]:
self.merge_sort(nums)
return nums
def merge_sort(self, nums: List[int]):
"""不断计算出提供给合并函数 merge(left, right) 的左右子序列即可"""
n = len(nums)
size = 1 # size 表示要进行合并操作的左右子序列的长度,取值范围为 1, 2, 4 ... n-1。当 size = 1 时,左右子序列都是单个元素,肯定为有序的,可以直接合并操作了
while size < n: # size 最大值为 n - 1。如果 size < n-1,则序列为奇数个数时,最后一个元素不会被排序(错误!)
low = 0 # low, mid, high 都表示下标
while low + size < n: # 保证 mid = low + size 不会下标越界
mid = low + size
high = mid + size # 即 high = low + 2 * size
left = nums[low:mid]
right = nums[mid:high] # 切片不会取high,所以 high = low + 2 * size 也没事
merged = self.merge(left, right) # 合并左右子序列
nums[low:high] = merged # 将合并操作排序好的序列,重新赋值给 nums[low:high]
low = high # 将下标 low 往右移动
size = 2 * size # 自底向上,第1次合并操作是针对单个元素,结果为2个元素。第2次合并操作就是针对2个元素,结果为4个元素...
def merge(self, left: int, right: int) -> List[int]:
"""使用双端队列 deque 简化左右子序列的合并操作"""
merged, left, right = [], deque(left), deque(right)
while left and right:
merged.append(left.popleft() if left[0] <= right[0] else right.popleft()) # deque popleft is also O(1)
merged.extend(right if right else left) # 合并左右子序列中剩余的还没有比较的元素
return merged
| true
|
bb13db54669f9e82b44f6188b914e105e8fcb2f5
|
Python
|
adampann/data_compression
|
/PA1/coding.py
|
UTF-8
| 1,886
| 3.390625
| 3
|
[] |
no_license
|
# def solution(D, A):
# n = list(A) #output list
# failure_track = [-1] * len(A)
# for index in range(len(A)):
# #print('index: ', index, '| ', end='')
# current = A[index]
# for depth in range(D):
# if current != -1:
# #print(current, ' ', end='')
#
# n[index] = current
# current = A[current]
#
# else:
# n[index] = -1
# failure_track[index] = depth
# break
#
# return n
# x = [-1, 0,4,2,1]
# print(solution(3, x))
#print([0] * len(x))
class Node:
def __init__(self, value=None, childL=None, childR=None):
self.value = value
self.childL = childL
self.childR = childR
def addChild(self, key):
if key < self.value:
self.childL = Node(key)
else:
self.childR = Node(key)
def __getleft__(self):
return self.childL
def __getrigth__(self):
return self.childR
class binaryTree:
def __init__(self, list):
self.head = Node(list[0])
for item in list[1:]:
current = self.head
while current != None:
if item < current.value:
if current.childL != None:
current = current.childL
else:
current.childL = Node(item)
else:
if current.childR != None:
current = current.childR
else:
current.childR = Node(item)
def findClosest(self, house):
current_node = self.head
closest = current_node
best_distance = float("inf")
while current_node != None:
if current_node.value == house:
return current_node.value
elif (current_node.value - house) < best_distance:
closest = current_node
best_distance = abs(current_node.value - house)
if house < current_node.value:
current_node = current_node.childL
else:
current_node = current_node.childR
return closest.value
def solution(stores, houses):
n =[]
tree = binaryTree(stores)
for house in houses:
n.append(tree.findClosest(house))
return n
print(solution([1,5,20,11,16],[5,10,17]))
| true
|
6b6b1ab7b5c8b477698f7354122fe6235676e4de
|
Python
|
guome/BrailleOCR
|
/data/generate_dataset.py
|
UTF-8
| 6,196
| 2.71875
| 3
|
[] |
no_license
|
import cv2
import argparse
import numpy as np
import os
import glob
import sys
from PIL import ImageFont, ImageDraw, Image
from pathlib import Path
# a function to draw training images
def draw_image(textlist, TLGAN_save_path, CRNN_save_path):
# make sure that the save path exists
Path(TLGAN_save_path).mkdir(parents=True, exist_ok=True)
Path(CRNN_save_path).mkdir(parents=True, exist_ok=True)
full_img = np.zeros((0, 256, 3), dtype=np.uint8)
bounding_boxes = []
crnn_dataframe = {}
for text, i in zip(textlist, range(len(textlist))):
if len(text) <= 10:
img = np.zeros((60, 256, 3), dtype=np.uint8)
img_pil = Image.fromarray(img)
draw = ImageDraw.Draw(img_pil)
draw.text((30, 20), text, font=font, fill=(b, g, r))
#draw CRNN image
cv2.imwrite(os.path.join(CRNN_save_path, str(len(os.listdir(CRNN_save_path))) + ".jpg"), np.array(img_pil))
crnn_dataframe.update({str(len(os.listdir(CRNN_save_path))-1) + ".jpg": text})
full_img = np.concatenate([full_img, img_pil], axis=0)
xmin = 30
xmax = xmin + 20*len(text)
ymin = 20 + 60*i
ymax = ymin+20
bounding_boxes.append([xmin, xmax, ymin, ymax])
else:
full_img = np.concatenate([full_img, np.zeros((60, 256, 3))], axis=0)
# make sure that the images are in the same size
if len(textlist) < args.n_text:
full_img = np.concatenate([full_img, np.zeros((60*(args.n_text-len(textlist)), 256, 3),)], axis=0)
# check that images are 256X256
if full_img.shape[0] < 256:
full_img = np.concatenate([full_img, np.zeros(((256-full_img.shape[0]), 256, 3))], axis=0)
cv2.imwrite(os.path.join(TLGAN_save_path, str(len(os.listdir(TLGAN_save_path))) + ".jpg"), full_img)
return {str(len(os.listdir(TLGAN_save_path))-1) + ".jpg": bounding_boxes}, crnn_dataframe
def writeCSV(TLGAN_dataframe, CRNN_dataframe, TLGAN_csv_filename="TLGAN.csv", CRNN_csv_filename="CRNN.csv"):
with open(TLGAN_csv_filename, "w") as f:
cnt = 0
for filename in TLGAN_dataframe.keys():
print(f"writing CSV ... {cnt}/{len(TLGAN_dataframe.keys())} done")
f.write(filename)
for bb in TLGAN_dataframe[filename]:
for axis in bb:
f.write(","+str(axis))
f.write("\n")
cnt += 1
with open(CRNN_csv_filename, 'w', encoding="utf-8") as f:
cnt = 0
for filename in CRNN_dataframe.keys():
print(f"writing CSV ... {cnt}/{len(CRNN_dataframe.keys())} done")
f.write(filename)
for label in CRNN_dataframe[filename]:
f.write(","+str(label))
f.write("\n")
cnt += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Convert text to image file")
parser.add_argument("--text_file_path", type=str, default="./")
parser.add_argument("--text_file_name", type=str, default="crawling.txt")
parser.add_argument("--TLGAN_save_path", type=str, default="./images/TLGAN")
parser.add_argument("--CRNN_save_path", type=str, default="./images/CRNN")
parser.add_argument("--n_text", type=int, default=4)
parser.add_argument("--simple", type=bool, default=True)
parser.add_argument("--n_simple", type=int, default=50)
args = parser.parse_args()
n_text = args.n_text
tlgan_csv = {}
crnn_csv = {}
is_simple = args.simple
n_simple = args.n_simple
if os.path.exists(args.TLGAN_save_path):
for file in glob.glob(args.TLGAN_save_path+"/*"):
os.remove(file)
if os.path.exists(args.CRNN_save_path):
for file in glob.glob(args.CRNN_save_path + "/*"):
os.remove(file)
if is_simple:
with open(os.path.join(args.text_file_path, args.text_file_name), 'r', encoding="utf-8") as textFile:
lines = textFile.read().split("\n")[:n_simple]
b, g, r = 255, 255, 255
fontpath = "fonts/H2GTRM.TTF"
font = ImageFont.truetype(fontpath, 20)
if len(lines) <= n_text:
tlgan, crnn = draw_image(lines, args.TLGAN_save_path, args.CRNN_save_path)
tlgan_csv.update(tlgan)
crnn_csv.update(crnn)
else:
for i in range(0, len(lines), n_text):
print(f"writing images ... {i}/{len(lines)} done")
if i + n_text >= len(lines):
tlgan, crnn = draw_image(lines[i:], args.TLGAN_save_path, args.CRNN_save_path)
tlgan_csv.update(tlgan)
crnn_csv.update(crnn)
else:
tlgan, crnn = draw_image(lines[i:i + n_text], args.TLGAN_save_path, args.CRNN_save_path)
tlgan_csv.update(tlgan)
crnn_csv.update(crnn)
writeCSV(tlgan_csv, crnn_csv)
else:
with open(os.path.join(args.text_file_path, args.text_file_name), 'r', encoding="utf-8") as textFile:
lines = textFile.read().split("\n")
b, g, r = 255, 255, 255
fontpath = "fonts/H2GTRM.TTF"
font = ImageFont.truetype(fontpath, 20)
if len(lines) <= n_text:
tlgan, crnn = draw_image(lines, args.TLGAN_save_path, args.CRNN_save_path)
tlgan_csv.update(tlgan)
crnn_csv.update(crnn)
else:
for i in range(0, len(lines), n_text):
print(f"writing images ... {i}/{len(lines)} done")
if i+n_text >= len(lines):
tlgan, crnn = draw_image(lines[i:], args.TLGAN_save_path, args.CRNN_save_path)
tlgan_csv.update(tlgan)
crnn_csv.update(crnn)
else:
tlgan, crnn = draw_image(lines[i:i+n_text], args.TLGAN_save_path, args.CRNN_save_path)
tlgan_csv.update(tlgan)
crnn_csv.update(crnn)
writeCSV(tlgan_csv, crnn_csv)
| true
|
3e9d1c9e0a92ac3be3123ee0a677dcc8ff64befc
|
Python
|
jackocoo/PoetrySlam
|
/stanza.py
|
UTF-8
| 9,183
| 3.171875
| 3
|
[] |
no_license
|
from Phyme import Phyme
from profanityfilter import ProfanityFilter
#import nltk
import random
import os
import sys
import nltk
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
from nltk.corpus import wordnet as wordnet
"""
This function takes a single word and returns a dictionary of dictionaries of "associated" words.
The dictionary returned has keys which are perfect rhymes of the starting word. Each key has a value which
is a list of words which are hypernyms or hyponyms of the key. Additionally, the key "antonyms"
refers to a list of antonyms to the starting word.
"""
def get_word_web(start_word):
ph = Phyme()
perfect_rhymes = ph.get_perfect_rhymes(start_word)
pf = ProfanityFilter()
#gets one syllable perfect rhymes
if 1 in perfect_rhymes:
list_single_rhymes = perfect_rhymes[1]
else:
list_single_rhymes = perfect_rhymes[2]
word_web = {}
antonyms = []
for word in list_single_rhymes:
if pf.is_clean(word):
syns = wordnet.synsets(word)
associated_words = {}
associated_words["v"] = [] #verb
associated_words["n"] = [] #noun
associated_words["s"] = []
associated_words["a"] = [] #adjective
associated_words["r"] = []
associated_words["i"] = []
for l in syns:
arr = l.name().split(".")
if len(arr[1]) == 1:
results = associated_words[arr[1]]
results.append(l.lemmas()[0].name())
associated_words[arr[1]] = results
if len(l.hypernyms()) > 0:
for hyp in l.hypernyms():
arr = hyp.name().split(".")
if len(arr[1]) == 1:
results = associated_words[arr[1]]
results.append(hyp.lemmas()[0].name())
associated_words[arr[1]] = results
if len(l.hyponyms()) > 0:
for hyp in l.hyponyms():
arr = hyp.name().split(".")
if len(arr[1]) == 1:
results = associated_words[arr[1]]
results.append(hyp.lemmas()[0].name())
associated_words[arr[1]] = results
for syn in l.lemmas():
if syn.antonyms():
antonyms.append(syn.antonyms()[0].name())
word_web[word] = associated_words
word_web["antonyms"] = antonyms
return word_web
"""
This function is what generates new lines for the poem. It is given a word_web
(the dictionary) and the specific key with which it is generating the line according to.
The lines follow a formt which is an article, a noun, a verb, a preposition, and the specific key.
In instances where there are not words in the dictionary to choose from, the system chooses a
generic noun/verb randomly from a list.
"""
def make_new_line(dictionary, word):
v_set = set()
n_set = set()
vowels = {"a", "A", "e", "E", "i", "I", "o", "O", "u", "U"}
random_verbs = ["is", "are", "go", "goes", "has", "have", "can", "could", "may"]
random_nouns = ["it", "they", "he", "she", "one"]
preps = ["to", "for", "from", "of", "before", "after", "beside", "above", "through", "by", "with"]
verb = ""
noun = ""
prep = ""
adj = ""
associated_verbs = dictionary[word]["v"]
associated_nouns = dictionary[word]["n"]
associated_adj = dictionary[word]["a"]
#shuffle to ensure new orders each time this is called for a specific key
random.shuffle(associated_nouns)
random.shuffle(associated_verbs)
random.shuffle(associated_adj)
#reflects whether or not a generic verb or noun was selected
generic_verb = False
generic_noun = False
if len(associated_adj) > 0:
index = random.randrange(0, len(associated_adj), 1)
adj = associated_adj[index]
if len(associated_verbs) == 0:
index = random.randrange(0, len(random_verbs), 1)
generic_verb = True
verb = random_verbs[index]
else:
index = random.randrange(0, len(associated_verbs), 1)
verb = associated_verbs[index]
if len(associated_nouns) == 0:
index = random.randrange(0, len(random_nouns), 1)
generic_noun = True
noun = random_nouns[index]
else:
index = random.randrange(0, len(associated_nouns), 1)
noun = associated_nouns[index]
index = random.randrange(0, len(preps), 1)
prep = preps[index]
#some of the words in the library have underscores instead of spaces
verb = verb.replace("_", " ")
noun = noun.replace("_", " ")
if len(associated_adj) > 0:
noun = adj + " " + noun
article = ""
if generic_noun == False:
char = noun[0]
if char in vowels:
article = "an"
else:
article = "a"
if len(article) == 0:
result = noun + " " + verb + " " + prep + " " + word
else:
result = article + " " + noun + " " + verb + " " + prep + " " + word
if generic_verb == True and generic_noun == True:
return ""
return result
"""
This function calls make_new_line() for each of the keys in the word_web dictionary.
This gives a line to each synonym for the original starting word.
"""
def print_lines(web):
lines = []
keys = []
for key in web.keys():
if key != "antonyms":
keys.append(key)
#done to ensure randomness of order
random.shuffle(keys)
count = 0
for key in keys:
string = make_new_line(web, key)
if len(string) > 0:
lines.append(string)
count += 1
if count > 11:
return lines
return lines
"""
This function find the antonym in the list that is least semantically similar to the
given word. It does this by using wordnet's wup_similarity() function which finds a path
from one word to another through synonyms (if possible).
"""
def find_best_antonym(ant_list, word):
best = 100;
best_word = ""
original = wordnet.synsets(word)
original_syn = wordnet.synset(original[0].name()) #the word we have been given
for ant in ant_list:
if ant != word:
w1 = wordnet.synsets(ant)
antonym_to_compare = wordnet.synset(w1[0].name()) #the antonym we are comparing it to
score = original_syn.wup_similarity(antonym_to_compare)
if score != None and score < best:
best = score
best_word = w1[0].name()
return best_word.split(".")[0]
"""
This function takes two lists of lines and combines them into a poem. This function creates
12 lines which are comprised of two smaller lines, one from each list. The lines are combined using
the word "but" because the lines are created using (loose) antonyms and should have opposing themes.
A list of the 12 final lines is returned.
"""
def synthesize_antonym_components(list1, list2):
result = []
long_one = []
short_one = []
if len(list1) >= len(list2):
long_one = list1
short_one = list2
else:
long_one = list2
short_one = list1
count = 0;
for i in range(0, len(short_one)):
string = short_one[i] + " but " + long_one[i]
result.append(string)
count += 1
if count > 11:
break
return result
"""
This function evaluates and score a poem on certain criteria. Poems that have greater alliteration,
greater semantic similarities between the first and second parts of each line, respectively, and
greater semantic differences between the first and second halves of each line will receive higher scores.
The final score is simply a sum of each of the subcategories.
"""
def evaluate_poem(poem_lines):
alliteration_score = 0
contradiction_score = 0
similarity_score = 0
for line in poem_lines:
first_half = line.split("but")[0]
second_half = line.split("but")[1]
split_words = line.split(" ")
letter_set = set()
for word in split_words:
letter_set.add(word[0].lower())
alliteration_score += 12 - len(letter_set) #higher score when first letters are repeated
return alliteration_score
"""
This function puts together the above functions, taking an input word as the starting word,
making the word_web associated with it and its best antonym, then synthesizing the lines, and
evaluating the poem. The poem will regenerate from the same web until it reaches a score above
a certain threshold. Then the function will return that poem
"""
def make_poem(starting_word):
start_web = get_word_web(starting_word)
start_lines = print_lines(start_web)
antonym = find_best_antonym(start_web["antonyms"], starting_word)
if len(antonym) == 0:
antonym = "dark"
title = starting_word + " vs. " + antonym
ant_web = get_word_web(antonym)
ant_lines = print_lines(ant_web)
total_lines = synthesize_antonym_components(start_lines, ant_lines)
score = evaluate_poem(total_lines)
count = 0
while score < 50:
start_lines = print_lines(start_web)
ant_lines = print_lines(ant_web)
total_lines = synthesize_antonym_components(start_lines, ant_lines)
score = evaluate_poem(total_lines)
count += 1
if (count > 10):
score = 50
print("score is " + str(score))
total_lines.append(title)
return total_lines
"""
This function calls the function to make the poem with the given word, then prints the poem
and reads the poem aloud.
"""
def main():
word = sys.argv[1]
poem = make_poem(word)
title = poem[len(poem) - 1]
count = 0
print(" " + title + "\n")
full_string = ""
for line in poem:
if line == title:
break
print(line)
full_string += line + "." + "\n"
count += 1
if count % 4 == 0:
print()
print()
full_string = "'" + full_string + "'"
os.system("say " + full_string)
main()
| true
|
cfd901290958322f5c3ea8446497b2ab9e8c2ac7
|
Python
|
PWDAVENPORTJENKINS/GEL_BRUTE
|
/brute_force_method.py
|
UTF-8
| 5,118
| 2.984375
| 3
|
[] |
no_license
|
"""
Created on Mon May 27 12:45:18 2019
P. W. Davenport-Jenkins
University of Manchester
MSc Econometrics
"""
from numba import jit
import scipy.optimize as optimize
from functools import partial
import scipy.stats as stats
import numpy as np
NO_PMC = 6
def make_grid(anchor, mesh_size, size):
mu = anchor[0]
sigma = anchor[1]
mu_upper = [mu + mesh_size * i for i in range(size)]
mu_lower = [mu - mesh_size * (1+i) for i in range(size)]
mu_array = list(reversed(mu_lower)) + mu_upper
sigma_upper = [sigma + mesh_size * i for i in range(size)]
sigma_lower = [sigma - mesh_size * (1+i) for i in range(size)]
sigma_array = list(reversed(sigma_lower)) + sigma_upper
return [mu_array, sigma_array]
# An m vector of moment restrictions
# defined by the user.
def moment_conditions_matrix(beta, z):
"""
There are m moment restrictions.
There are N data points, called z.
For each data point the m moment restrictions are computed.
The result is an N x m matrix with each column representing
a different moment restriction.
"""
# In this example we do the normal distribution with location 3 and
# scale 5.
# central moments are defined as E[(z-mu)^k]
mu = beta[0]
sigma = beta[1]
moments_matrix = np.concatenate(
(
z - mu,
sigma**2 - (z - mu)**2,
(z - mu)**3,
3 * sigma**4 - (z - mu)**4,
(z - mu)**5,
15 * sigma**6 - (z - mu)**6
),
axis=1
)
return moments_matrix
# This oculd perhaps be done more efficiently.
# einstien notation?
def GMM_weighting_matrix(moment_conditions_matrix):
number_of_restrictions = len(moment_conditions_matrix[0])
sample_size = len(moment_conditions_matrix)
omega = np.zeros((number_of_restrictions, number_of_restrictions))
for moment_vector in moment_conditions_matrix:
omega = omega + np.outer(moment_vector, moment_vector)
return np.linalg.inv(omega/sample_size)
def GMM_objective_function(x, sample, weighting_matrix):
sample_moments_vector = moment_conditions_matrix(x, sample).mean(axis=0)
return sample_moments_vector.T @ weighting_matrix @ sample_moments_vector
def GMM(beta_initial, sample, number_of_moment_conditions):
# First Stage: calculate initial beta
beta_1 = optimize.minimize(
GMM_objective_function,
beta_initial,
args=(sample, np.identity(number_of_moment_conditions)),
method="BFGS"
)
beta_1 = beta_1.x
# Use this value to compute the optimal weighting matrix
weighting_matrix = GMM_weighting_matrix(
moment_conditions_matrix(beta_1, sample)
)
# Second stage:: use the optimal weighting matrix to compute 2S-GMM
# estimator of beta
beta_2 = optimize.minimize(
GMM_objective_function,
beta_1,
args=(sample, weighting_matrix),
method="BFGS"
)
return beta_2.x
def ET(value):
return -np.exp(value)
def ET_cost_function(moments, x):
return -np.mean(ET(moments @ x))
def GEL_ET(sample, beta, number_of_moment_conditions):
"""
Here data_matrix is a 1000 x 6 matrix
"""
moments_matrix = moment_conditions_matrix(beta, sample)
# Small \lambda gives small \nu. Near 0 should be feasible.
initial_params = np.zeros(number_of_moment_conditions)
cost = partial(ET_cost_function, moments_matrix)
result = optimize.minimize(
cost,
initial_params,
method="Nelder-Mead",
options={'maxiter': 1000}
)
return result.x
@jit
def brute_force(sample, beta_grid, number_of_moment_conditions):
mu_list = beta_grid[0]
sigma_list = beta_grid[1]
mu_size = len(mu_list)
sigma_size = len(sigma_list)
lambda_dictionary = dict()
for i in range(mu_size):
for j in range(sigma_size):
beta = [mu_list[i], sigma_list[j]]
lambda_beta = GEL_ET(sample, beta, number_of_moment_conditions)
lambda_dictionary[tuple(beta)] = lambda_beta
objective_dict = dict()
for i in range(mu_size):
for j in range(sigma_size):
beta = [mu_list[i], sigma_list[j]]
moments = moment_conditions_matrix(beta, sample).mean(axis=0)
lambda_beta = lambda_dictionary[tuple(beta)]
value = -ET_cost_function(moments, lambda_beta)
objective_dict[tuple(beta)] = value
return objective_dict, lambda_dictionary
normal_random_variable = stats.norm(3, 5)
# generate n random values from above distribution
sample = normal_random_variable.rvs(size=(100, 1))
initial_beta = [2, 4]
beta_GMM = GMM(initial_beta, sample, NO_PMC)
# increasing the last input parameter significantly increases cost
beta_grid = make_grid(beta_GMM, 0.005, 10)
dict_to_max, lambda_dictionary = brute_force(sample, beta_grid, NO_PMC)
min_objective = min(dict_to_max.values())
for key, value in dict_to_in.items():
if value == min_objective:
beta_GEL = key
lambda_GEL = lambda_dictionary[beta_GEL]
| true
|
03a443e724195a9a66cf309d574aa0b95ad09dd9
|
Python
|
ABLE-KU-Leuven/LISSA-Dashboard
|
/scripts/VisualisatiePaper/visualiseConversation.py
|
UTF-8
| 3,861
| 2.546875
| 3
|
[] |
no_license
|
import plotly
import plotly.plotly as py
import plotly.figure_factory as ff
import os
import datetime
plotly.tools.set_credentials_file(username='martijn.millecamp', api_key='TYJyoUHlfhRpDa8CZUMU')
pathJasper = "/Users/martijn/Documents/Able/Data/evaluationJasper"
pathBart = "/Users/martijn/Documents/Able/Data/evaluationBart"
colors = {'Student': 'rgb(239,138,98)',
'SA': 'rgb(103,169,207)',
'Stop': 'rgb(153,153,153)'}
def parseDate(ms):
ms = int(ms)
date = datetime.datetime.fromtimestamp(ms/1000)
return date
def parseLine(line):
# return ms as int and the value of my comment
values = line.split()
return [int(values[3]), values[4]]
def writeToDict(startTime, endTime, name, resource):
df.append(dict(Task=name, Start= startTime, Finish= endTime, Resource=resource))
def visualiseStudent(path, name):
# Open file
f = open(path, 'r')
firstLine = f.readline()
# get the first time and value
[startSessionTime,value] = parseLine(firstLine)
student = False
startTime = parseDate(startSessionTime)
totalTime = 0
for line in f:
[time, value] = parseLine(line)
if value == 's':
if not student:
student = True
startTime = time - startSessionTime + 82800000
startTime = parseDate(startTime)
else:
if student:
endTime = time - startSessionTime + 82800000
endTime = parseDate(endTime)
writeToDict(startTime, endTime, name, "Student")
elapsedTime = endTime - startTime
diff = elapsedTime.total_seconds()
totalTime += diff
student = False
endSessionTime = parseDate(time - startSessionTime +82800000)
endStopbar = parseDate(time - startSessionTime + 10000 +82800000)
df.append(dict(Task=name, Start= endSessionTime, Finish= endStopbar, Resource="Stop"))
totalSessionTime = endSessionTime - parseDate(82800000)
totalSessionTimeS = totalSessionTime.total_seconds()
timeUse.append(totalTime)
percentageUse.append(round(totalTime/totalSessionTimeS,2))
def visualiseSA(path, name):
# Open file
f = open(path, 'r')
firstLine = f.readline()
# get the first time and value
[startSessionTime,value] = parseLine(firstLine)
sa = False
startTime = parseDate(startSessionTime)
totalTime = 0
for line in f:
[time, value] = parseLine(line)
if value == 'b' or value == 'j':
if not sa:
sa = True
startTime = time - startSessionTime + 82800000
startTime = parseDate(startTime)
else:
if sa:
endTime = time - startSessionTime + 82800000
endTime = parseDate(endTime)
writeToDict(startTime, endTime, name, "SA")
elapsedTime = endTime - startTime
diff = elapsedTime.total_seconds()
totalTime += diff
sa = False
totalSessionTime = endSessionTime - parseDate(82800000)
totalSessionTimeS = totalSessionTime.total_seconds()
timeUseSA.append(totalTime)
percentageUseSA.append(round(totalTime/totalSessionTimeS,2))
df = []
timeUse = []
percentageUse = []
timeUseSA = []
percentageUseSA = []
for file in os.listdir(pathJasper):
if not file.startswith('.'):
visualiseStudent( pathJasper + '/'+ file, file + "SA1")
# visualiseSA( pathJasper + '/'+ file, file + "SA1")
for file in os.listdir(pathBart):
if not file.startswith('.'):
visualiseStudent(pathBart + '/'+ file, file + "SA2")
# visualiseSA(pathBart + '/'+ file, file + "SA2")
print percentageUse
fig = ff.create_gantt(df, colors=colors, index_col='Resource', show_colorbar=True, group_tasks=True)
plotly.offline.plot(fig)
| true
|
9bdc8a310d9e355c6b752c8c77046046d4816778
|
Python
|
alistair-clark/project-euler
|
/problem15.py
|
UTF-8
| 2,384
| 4.21875
| 4
|
[] |
no_license
|
'''
Project Euler
Problem #15: Lattice paths
Starting in the top left corner of a 2×2 grid,
and only being able to move to the right and down,
there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20×20 grid?
Date: May 17, 2019
'''
class Nodes(object):
def __init__(self,name):
"""Assumes name is a string"""
self.name = name
def getName(self):
return self.name
def __str__(self):
return self.name
class Edge(object):
def __init__(self, src, dest):
"""Assumes src and dest are nodes"""
self.src = src
self.dest = dest
def getSource(self):
return self.src
def getDestination(self):
return self.dest
def __str__(self):
return self.src.getName() + '->' + self.dest.getName()
class Digraph(object):
# nodes is a list of the nodes in the graph
# edges is a dict mapping each node to a list of its children
def __init__(self):
self.nodes = []
self.edges = {}
def addNode(self, node):
if node in self.nodes:
raise ValueError('Duplicate node')
else:
self.nodes.append(node)
self.edges[node] = []
def addEdge(self, edge):
src = edge.getSource()
dest = edge.getDestination()
if not (src in self.nodes and dest in self.nodes):
raise ValueError('Node not in graph')
else:
self.edges[src].append(dest)
def childrenOf(self, node):
return self.edges[node]
def hasNode(self, node):
return node in self.nodes
def __str__(self):
result = ''
for src in self.nodes:
for dest in self.edges[src]:
result = result + src.getName() + '->' + dest.getName() + '\n'
return result[:-1] # omit final newline
def printPath(path):
"""Assumes path is a list of nodes"""
result = ''
for in in range(len(path)):
result = result + str(path[i])
if i != len(path) - 1:
result = result + '->'
return result
def BFS(graph, start, end, path, )
def makeGrid(graph, length, width):
graph.nodes = []
graph.edges = []
for i in range(length+1):
for j in range(width+1):
graph.nodes.append(Nodes(str(i) + "," + str(j)))
grid = Digraph()
makeGrid(grid, 2, 2)
str(grid)
| true
|
4e30c77ca44905d2382888803ce6b310f822f316
|
Python
|
d0coat01/logs-analysis-project
|
/Reports.py
|
UTF-8
| 4,476
| 3
| 3
|
[
"MIT"
] |
permissive
|
import psycopg2 as psql
import sys
class Reports:
"""Movie class initializes with a title, image url, and trailer url"""
def connect(self, database_name):
"""Connect to the PostgreSQL database. Returns a database connection."""
try:
db = psql.connect("dbname={}".format(database_name))
return db
except psql.Error as e:
print("Unable to connect to database")
# THEN perhaps exit the program
sys.exit(1) # The easier method
# OR perhaps throw an error
# raise e
# If you choose to raise an exception,
# It will need to be caught by the whoever called this function
def exec_query(self, query):
db = self.connect("news")
cursor = self.db.cursor()
cursor.execute(query)
results = cursor.fetchall()
cursor.close()
db.close()
return results
def print_report(self):
"""Create a text-based report."""
# Used (\n) for line separations instead of triple quotes for easy-to-read code.
print(("\n"
"**********************************\n"
"* Analysis of Newspaper Articles *\n"
"**********************************\n"))
# Printed each function separately so you wouldn't have to wait for all queries to finish before seeing result.
print(self.get_top_articles())
print(self.get_top_authors())
print(self.get_days_with_errors())
print("**********************************\n"
"* End of Analysis *\n"
"**********************************\n")
def get_top_articles(self):
"""Get the top 3 articles based on their occurrences in article_log"""
# View alert: Used a view called article_log.
# article_log is every log whose path contains "/articles/".
# The path in article_log is changed to reflect an article slug when possible.
# We can join on article_log and article now to get the top 3 articles.
query = ("select a.title, count(l.article_slug) as views\n"
"from articles a\n"
"left join article_log l on a.slug=l.article_slug\n"
"group by a.title\n"
"order by views DESC\n"
"LIMIT 3;\n")
results = self.exec_query(self, query)
report = ("\n"
"Top 3 Articles of All Time\n"
"\n")
for result in results:
report += '"' + result[0] + '" - ' + str(result[1]) + " views \n"
return report
def get_top_authors(self):
"""Get the top authors based on their occurrences in article_log"""
# This is similar to top articles except we use the authors table for grouping and selection.
query = ("select authors.name, count(article_log.article_slug) as views from authors\n"
"left join articles on authors.id = articles.author\n"
"left join article_log on articles.slug = article_log.article_slug\n"
"group by authors.name\n"
"order by views DESC;\n")
results = self.exec_query(query)
report = ("\n"
"Top Authors of All Time\n"
"\n")
for result in results:
report += result[0] + ' - ' + str(result[1]) + " views \n"
return report
def get_days_with_errors(self):
"""Get all days that have more than 1% of their requests end in errors."""
# Created two views here. One with the total # of requests for each day.
# The other with the number of errors in each day.
query = ("select to_char( e.date, 'Mon DD, YYYY') as date\n"
",round((e.error_count::decimal/r.request_count::decimal) * 100, 1) as percent_errors\n"
"from daily_errors e, daily_requests r\n"
"where e.date = r.date\n"
"and (e.error_count::decimal/r.request_count::decimal) > 0.01\n"
"order by date DESC;\n")
results = self.exec_query(query)
report = ("\n"
"Days Where 1% of Requests Lead to Errors\n"
"\n")
for result in results:
report += result[0] + ' - ' + str(result[1]) + "% errors\n"
return report
if __name__ == '__main__':
reports = Reports()
reports.print_report()
| true
|
6b63e8f8b2388fc6d1c4f25f499ae2de7dac4b39
|
Python
|
konstantinosKokos/directionality-bias
|
/data/generics.py
|
UTF-8
| 3,997
| 3.171875
| 3
|
[] |
no_license
|
from typing import *
from random import choice
from abc import abstractmethod
Atom = TypeVar('Atom')
Op = TypeVar('Op')
Domain = TypeVar('Domain')
Something = TypeVar('Something')
Atoms = List[Atom]
Ops = List[Op]
class Expr:
@abstractmethod
def __init__(self):
pass
def __str__(self) -> str:
return f'( {str(self.left)} {str(self.op)} {str(self.right)} )'
def __call__(self) -> str:
return str(self)
def __repr__(self) -> str:
return str(self)
def polish(self) -> str:
return f'{str(self.op)} {polish(self.left)} {polish(self.right)}'
def __len__(self) -> int:
return get_expr_len(self)
def polish(expr) -> str:
return expr.polish() if isinstance(expr, Expr) else str(expr)
class LeftExpr(Expr, Generic[Atom, Op]):
def __init__(self, left: Union[Atom, 'LeftExpr'], right: Atom, op: Op) -> None:
super(LeftExpr, self).__init__()
self.left = left
self.right = right
self.op = op
class RightExpr(Expr, Generic[Atom, Op]):
def __init__(self, left: Atom, right: Union[Atom, 'RightExpr'], op: Op) -> None:
super(RightExpr, self).__init__()
self.left = left
self.right = right
self.op = op
@overload
def flip_expr(expr: LeftExpr[Atom, Op]) -> RightExpr[Atom, Op]:
pass
@overload
def flip_expr(expr: RightExpr[Atom, Op]) -> LeftExpr[Atom, Op]:
pass
@overload
def flip_expr(expr: Atom) -> Atom:
pass
def flip_expr(expr):
if isinstance(expr, LeftExpr):
return RightExpr(left=expr.right, right=flip_expr(expr.left), op=expr.op)
elif isinstance(expr, RightExpr):
return LeftExpr(left=flip_expr(expr.right), right=expr.left, op=expr.op)
else:
return expr
def sample_left_expr(depth: int, atoms: Atoms, ops: Ops) -> Union[LeftExpr[Atom, Op], Atom]:
if depth == 0:
return sample(atoms)
else:
return LeftExpr(left=sample_left_expr(depth=depth - 1, atoms=atoms, ops=ops),
right=sample(atoms),
op=sample(ops))
def sample_right_expr(depth: int, atoms: Atoms, ops: Ops) -> Union[RightExpr[Atom, Op], Atom]:
if depth == 0:
return sample(atoms)
else:
return RightExpr(left=sample(atoms),
right=sample_right_expr(depth=depth-1, atoms=atoms, ops=ops),
op=sample(ops))
def sample(choices: List[Something]) -> Something:
return choice(choices)
def eval_lexp(expr: Union[Atom, LeftExpr],
atom_semantics: Callable[[Atom], Domain],
op_semantics: Mapping[Op, Callable[[Domain, Domain], Domain]]) -> Domain:
if isinstance(expr, LeftExpr):
return op_semantics[expr.op](eval_lexp(expr.left, atom_semantics, op_semantics), atom_semantics(expr.right))
else:
return atom_semantics(expr)
def eval_rexp(expr: Union[Atom, RightExpr],
atom_semantics: Callable[[Atom], Domain],
op_semantics: Mapping[Op, Callable[[Domain, Domain], Domain]]) -> Domain:
if isinstance(expr, RightExpr):
return op_semantics[expr.op](atom_semantics(expr.left),
eval_rexp(expr.right, atom_semantics, op_semantics))
else:
return atom_semantics(expr)
def eval_exp(expr: Union[Atom, Expr],
atom_semantics: Callable[[Atom], Domain],
op_semantics: Mapping[Op, Callable[[Domain, Domain], Domain]],
invert: bool = False) -> Domain:
if invert:
expr = flip_expr(expr)
if isinstance(expr, LeftExpr):
return eval_lexp(expr, atom_semantics, op_semantics)
elif isinstance(expr, RightExpr):
return eval_rexp(expr, atom_semantics, op_semantics)
else:
return atom_semantics(expr)
def get_expr_len(expr: Union[Atom, Expr]) -> int:
if isinstance(expr, Expr):
return max(get_expr_len(expr.left), get_expr_len(expr.right)) + 1
else:
return 1
| true
|
a490e4216b0e3129ad18f087bf21fe0676fd753f
|
Python
|
lein-hub/python_basic
|
/programmers/dart_game.py
|
UTF-8
| 649
| 2.796875
| 3
|
[] |
no_license
|
def solution(dartResult):
scores = []
num = ''
for i in dartResult:
if i.isnumeric():
num += i
continue
elif i.isalpha():
if i == 'S':
scores.append(int(num))
num = ''
elif i == 'D':
scores.append(int(num) ** 2)
num = ''
elif i == 'T':
scores.append(int(num) ** 3)
num = ''
elif i == '*':
if len(scores) > 1:
scores[-2] *= 2
scores[-1] *= 2
elif i == '#':
scores[-1] *= -1
return sum(scores)
| true
|
348d4c0779f00121f949374166b8e3d99829154b
|
Python
|
nthon/Julia-sublime
|
/unicode.py
|
UTF-8
| 1,930
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
import sublime
import sublime_plugin
import re
from .julia_unicode import latex_symbols, emoji_symbols
RE_COMMAND_PREFIX = re.compile(
r".*(\\[\^_]+[a-zA-Z0-9=+\-()]*|\\[a-zA-Z]+|\\:[_a-zA-Z0-9+\-]+:*)$")
symbols = latex_symbols + emoji_symbols
class JuliaUnicodeMixin(object):
def find_command_backward(self, view, pt):
line_content = view.substr(view.line(pt))
row, col = view.rowcol(pt)
m = RE_COMMAND_PREFIX.match(line_content[:col])
if m:
return sublime.Region(pt - len(m.group(1)), pt)
def look_command_backward(self, view, pt):
ret = self.find_command_backward(view, pt)
if ret:
return view.substr(ret)
def normalize_completion(symbols):
return sublime.CompletionList(
(sublime.CompletionItem(
trigger=s[0],
completion=s[1],
annotation=s[1],
kind=sublime.KIND_AMBIGUOUS)
for s in symbols),
flags=sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)
class JuliaUnicodeListener(JuliaUnicodeMixin, sublime_plugin.EventListener):
def should_complete(self, view, pt):
if view.score_selector(pt, "source.julia") > 0:
return True
elif view.settings().get('is_widget') and \
view.window().active_view().match_selector(pt, "source.julia"):
return True
else:
return False
def on_query_completions(self, view, prefix, locations):
if not self.should_complete(view, locations[0]):
return None
prefix = self.look_command_backward(view, locations[0])
if not prefix:
return None
ret = [s for s in latex_symbols if s[0].startswith(prefix)]
if not ret:
ret = [s for s in emoji_symbols if s[0].startswith(prefix)]
return normalize_completion(ret)
| true
|
7a443ddc84e32538e99d693233e174fb2cf00030
|
Python
|
elouie/10701NNGRN
|
/meanSqErr.py
|
UTF-8
| 285
| 2.734375
| 3
|
[] |
no_license
|
import numpy as np
# Get the mean squared error
def meanSqErr(input, res, numRuns, numMols, ts):
error = np.zeros(ts)
for i in range(ts):
for j in range(numRuns):
error[i] = np.sum(np.square(input[j, :, i] - res[j, :, i]))/numMols
return error/numRuns
| true
|
97b438fc438310bf2e5f62452b1e7f1846a18881
|
Python
|
Vivekg95/Python-Tutorial
|
/practise18.py
|
UTF-8
| 204
| 2.53125
| 3
|
[] |
no_license
|
def main():
f=open("C:\\Users\\Vivek Kumar\\Desktop\\python10\\guru99.txt","w+")
for i in range(10):
f.write("this is line %d\r\n" % (i+1))
f.close()
if __name__ == "__main__":main()
| true
|
57c903eeb786fca644dcfaf7cbba26b56d46a3fd
|
Python
|
jtxiao/Harmonizer
|
/psola.py
|
UTF-8
| 2,848
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
# coding: utf-8
# # 21M.359 Fundamentals of Music Processing
# ## Lab3
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import IPython.display as ipd
from ipywidgets import interact
import sys
sys.path.append("../common")
from util import *
import fmp
# %matplotlib inline
get_ipython().magic(u'matplotlib notebook')
plt.rcParams['figure.figsize'] = (12, 4)
# In[52]:
snd = load_wav("audio/voiceD4.wav")
sr = 22050
ipd.Audio(snd,rate=sr)
# In[104]:
# answer:
def bin_to_freq(k,Fs,N):
return k * float(Fs)/N
def freq_to_pitch(f):
return np.log2(f/ 440.)*12 + 69
def pitch_to_freq(p):
return 440.*2**((p-69)/12.)
def freq_to_samp(f,sr):
return int(sr/f/2)
# In[127]:
# answer:
def zpad(x,Nzp):
if len(x) >= Nzp:
return x
else:
zero_len = Nzp-len(x)
return np.concatenate([np.zeros(zero_len),x])
N_new = 2048*2**4
xw_new = zpad(xw, N_new)
# answer:
fft_new = np.fft.rfft(xw_new)
l_new = len(fft_new)
ab_new = np.abs(fft_new)
plt.figure()
plt.plot(ab_new)
peaks = find_peaks(ab_new, thresh= .125)
freqs_new = bin_to_freq(peaks,sr,N_new)
freq_to_pitch(freqs_new)
# In[142]:
def epoch_indices(snd, samples):
ind = np.argmax(snd[0:samples])
epochs = []
epochs.append(ind)
while ind < len(snd):
ind += samples
epochs.append(ind)
return epochs
def new_epochs(snd, samples): #new samples length
ind = samples
epochs = []
epochs.append(ind)
while ind<len(snd):
ind += samples
epochs.append(ind)
return epochs
def nearest(new, epochs):
idx = (np.abs(np.array(epochs)-new)).argmin()
return epochs[idx]
def PSOLA(snd, epoch_indices,new_epochs, samples):
snd = np.array(snd)
hann = np.hanning(2*samples + 1)
new_snd = np.zeros(len(snd))
for new in new_epochs:
near = nearest(new, epoch_indices)
if near <= samples:
window = hann[samples-near:]*snd[0:near+samples+1]
elif near >= len(snd) - samples:
window = hann[:samples + (len(snd)-near)]*snd[near-samples:]
else:
window = hann*snd[near-samples:near+samples+1]
if new <= samples:
new_snd[0:len(window)] += window
elif new > len(snd) - samples:
new_snd[:len(snd)-new+samples]
else:
new_snd[new-samples:new+samples+1] += window
return new_snd
D4_samp =freq_to_samp( pitch_to_freq(50),sr )
G4_samp = freq_to_samp( pitch_to_freq(50+15),sr )
fft =np.fft.rfft(snd)
fft2 = np.fft.rfft(new)
ep = epoch_indices(snd,D4_samp)
nep = new_epochs(snd,G4_samp)
new = PSOLA(snd,ep,nep,D4_samp)
plt.figure()
plt.plot(snd[0:1000])
plt.figure()
plt.plot(new[0:1000])
ipd.Audio(new,rate=sr)
# In[50]:
ipd.Audio(snd, rate=sr)
# In[ ]:
| true
|
967752f9d79be03454194846f37675354edf596a
|
Python
|
seongbeenkim/Algorithm-python
|
/BOJ(Baekjoon Online Judge)/Greedy/1783_병든 나이트(ill knight).py
|
UTF-8
| 241
| 3.125
| 3
|
[] |
no_license
|
#https://www.acmicpc.net/problem/1783
import sys
n, m = map(int, sys.stdin.readline().split())
if n == 1:
print(1)
elif n == 2:
print(min(4,(m+1)//2))
else:
if m >= 7:
print(m-7 + 5)
else:
print(min(4,m))
| true
|
3f503a5922d07049f5e4431e562571cb9d3818bd
|
Python
|
jtgorman/DCC_jp2_converter
|
/dcc_jp2_converter/modules/converter.py
|
UTF-8
| 6,038
| 2.625
| 3
|
[] |
no_license
|
"""High-level logic for orchestrating a conversion job."""
import logging
import os
import stat
import shutil
import tempfile
from .file_manager import get_tiffs
from .command_runner import CommandRunner
from dcc_jp2_converter import ImagemagickCommandBuilder, Exiv2CommandBuilder
from dcc_jp2_converter import imagemagickCommandBuilders as im_cb
from dcc_jp2_converter import exiv2CommandBuilders as exi2_cb
logger = logging.getLogger("dcc_jp2_converter")
def _cleanup_multiple(real_name, path):
"""
This is a hack that I had to write because imagemagick makes multiple
files when it finds an embedded thumbnail in the tiff file. This function
looks for any files that contain the "real name" in it and renames the
largest one to that name it was supposed to have from the start.
Args:
real_name: the name of the file it supposed to be saved as
path: the path were the multiple version of the file are stored.
"""
name, ext = os.path.splitext(os.path.basename(real_name))
duplicates = []
# find the duplicates with a given name
for file in os.scandir(path):
if os.path.splitext(file.name)[1].lower() != ".jp2":
continue
if name in file.name:
duplicates.append(file.path)
sorted_size = sorted(duplicates, key=lambda f: os.path.getsize(f))
largest = sorted_size[-1]
assert os.path.getsize(largest) >= os.path.getsize(sorted_size[0])
os.rename(largest, os.path.join(path, real_name))
def convert_tiff_access_folder(path: str, overwrite_existing=True, remove_on_success=False):
"""
Converts all tiff files located in that folder into JP2000 .jp2 files and
migrated all the metadata from the tiff file into the newly produced jp2
file. All new files are saved in the same place that as their source files.
Args:
path: The path to the folder containing tiff files to be converter.
overwrite_existing: If an existing jp2 file already exists in the same
folder, overwrite it with a new file.
remove_on_success: Delete access tiff file afterwards if successfully converted.
"""
image_convert_command = ImagemagickCommandBuilder(builder=im_cb.Standard())
metadata_extractor = Exiv2CommandBuilder(exi2_cb.ExtractIPTCCommand())
metadata_injector = Exiv2CommandBuilder(exi2_cb.InsertIPTCCommand())
with tempfile.TemporaryDirectory(prefix="convert_") as tmp_folder:
command_runner = CommandRunner()
tiffs = list(get_tiffs(path))
total_files_converted = 0
for i, tiff in enumerate(tiffs):
tmp_access_tif = os.path.join(tmp_folder, os.path.basename(tiff))
tmp_access_jp2 = os.path.splitext(tmp_access_tif)[0] + ".jp2"
final_access_jp2 = os.path.join(
path, os.path.basename(tmp_access_jp2))
if not overwrite_existing:
if os.path.exists(final_access_jp2):
logger.warning(
"{} already exists. skipping".format(final_access_jp2))
continue
logger.info("Converting part {} of {}: From {} to {} ".format(
i + 1, len(tiffs), os.path.basename(tiff),
os.path.basename(final_access_jp2)))
logger.debug(
"Copying \"{}\" to temp folder \"{}\"".format(tiff, path))
shutil.copyfile(tiff, tmp_access_tif)
logger.debug(
"Using \"{}\" to create \"{}\"".format(
tmp_access_tif, tmp_access_jp2))
im_command = image_convert_command.build_command(
tmp_access_tif, tmp_access_jp2)
try:
command_runner.run(im_command)
if remove_on_success:
# remove the read only attribute first
os.chmod(tiff, stat.S_IWRITE)
logger.info("Deleting file, \"{}\".".format(tiff))
os.remove(tiff)
except RuntimeError as e:
logger.error(e)
raise
# exit(1)
finally:
stdout, stderr = command_runner.get_output()
if stdout:
logger.info(stdout)
if stderr:
logger.warning(stderr)
# HACK: Clean up from Imagemagick because it seems to have a
# problem with embedded thumbnails
_cleanup_multiple(os.path.basename(tmp_access_jp2), tmp_folder)
# Create sidecar metadata files
logger.debug(
"Extracting metadata from \"{}\"".format(tmp_access_tif))
mde_command = metadata_extractor.build_command(tmp_access_tif)
try:
command_runner.run(mde_command)
except RuntimeError as e:
logger.error(e)
exit(1)
finally:
stdout, stderr = command_runner.get_output()
if stdout:
logger.info(stdout)
if stderr:
logger.warning(stderr)
# Insert sidecar metadata files into jp2
logger.debug(
"Injecting metadata into \"{}\"".format(tmp_access_jp2))
mdi_command = metadata_injector.build_command(tmp_access_jp2)
try:
command_runner.run(mdi_command)
except RuntimeError as e:
logger.error(e)
exit(1)
finally:
stdout, stderr = command_runner.get_output()
if stdout:
logger.info(stdout)
if stderr:
logger.warning(stderr)
logger.debug(
"Moving \"{}\" into \"{}\"".format(tmp_access_jp2, path))
shutil.move(tmp_access_jp2, final_access_jp2)
total_files_converted += 1
logger.info("Converted {} file(s) in {}.".format(total_files_converted, path))
| true
|
686d8bd0e5a30e54c26f427e1331082f70439363
|
Python
|
marcomarasca/SDCND-Vehicle-Detection
|
/udacity_parser.py
|
UTF-8
| 6,457
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
import os
import csv
import numpy as np
import cv2
import argparse
from tqdm import tqdm
"""
The script used to parse the udacity dataset (https://github.com/udacity/self-driving-car/tree/master/annotations)
into usable images to train the vehicle vs non-vehicle classifier. Extracts from the tagged images the bounding boxes of
the cars, resizes to the correct dimensions and extract patches where vehicles are not present.
"""
def process_udacity_dataset(map_file = os.path.join('data', 'udacity', 'labels_crowdai.csv'),
img_folder = os.path.join('data', 'udacity', 'object-detection-crowdai'),
dest_folder = os.path.join('data', 'udacity'),
dest_size = 64,
format = 'png',
limit = None,
skip = 5):
print('Loading Udacity CSV file from: {}'.format(map_file))
vehicles_folder = os.path.join(dest_folder, 'vehicles')
if not os.path.isdir(vehicles_folder):
os.makedirs(vehicles_folder)
notvehicles_folder = os.path.join(dest_folder, 'non-vehicles')
if not os.path.isdir(notvehicles_folder):
os.makedirs(notvehicles_folder)
file_bboxes = {}
with open(map_file, 'r') as csv_file:
lines = [line for line in csv_file]
if limit is not None:
lines = lines[:limit]
reader = csv.reader(lines)
# Skip header
_ = next(reader, None)
for x_min, y_min, x_max, y_max, img_file, label, _ in filter(lambda row:row[5] in ['Car', 'Truck'], tqdm(reader, total = len(lines), unit = ' images', desc = 'Parsing Vehicles')):
img = cv2.imread(os.path.join(img_folder, img_file))
x_min = int(x_min)
x_max = int(x_max)
y_min = int(y_min)
y_max = int(y_max)
if y_max <= y_min or x_max <= x_min:
print('Wrong bounding box for {}: ({}, {}), ({}, {})'.format(img_file, x_min, y_min, x_max, y_max))
continue
bboxes = file_bboxes.get(img_file)
if bboxes is None:
bboxes = []
file_bboxes[img_file] = bboxes
bboxes.append(((x_min, y_min), (x_max, y_max)))
print('Processed images: {}'.format(len(file_bboxes)))
for i, (img_file, bboxes) in enumerate(tqdm(file_bboxes.items(), unit=' images', desc='Saving images')):
if i % skip == 0:
img = cv2.imread(os.path.join(img_folder, img_file))
_save_windows(img, bboxes, vehicles_folder, img_file, dest_size, format)
notvehicle_boxes = _get_notvehicles_bboxes(bboxes, img.shape)
_save_windows(img, notvehicle_boxes, notvehicles_folder, img_file, dest_size, format)
def _save_windows(img, bboxes, folder, img_file, dest_size, format):
for i, bbox in enumerate(bboxes):
img_window = img[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]]
interpolation = cv2.INTER_CUBIC
if img_window.shape[0] < dest_size or img_window.shape[1] < dest_size:
interpolation = cv2.INTER_AREA
img_window = cv2.resize(img_window, (dest_size, dest_size), interpolation = interpolation)
dest_file = os.path.join(folder, '{}_{}.{}'.format(img_file.split('.')[0], i, format))
cv2.imwrite(dest_file, img_window)
def _get_notvehicles_bboxes(taken_bboxes, img_shape):
window_size = np.random.choice([128, 256])
stride = int(window_size * 0.8)
y_gap = 120 # Skip the hood
bboxes = []
max_notvehicles = len(taken_bboxes)
for x in range(0, img_shape[1], stride):
# Skip x with 1/3 probability
if np.random.choice([True, False], replace = False, p = [1/3, 2/3]):
continue
for y in range(img_shape[0] - y_gap, window_size, -stride):
# Skip y with 1/3 probability
if np.random.choice([True, False], replace = False, p = [1/3, 2/3]):
continue
bbox = ((x, y - window_size), (x + window_size, y))
if _is_free_box(bbox, taken_bboxes):
bboxes.append(bbox)
if len(bboxes) >= max_notvehicles:
return bboxes
return bboxes
def _is_free_box(bbox, bboxes):
for taken_bbox in bboxes:
if _overlap(bbox, taken_bbox):
return False
return True
def _overlap(bbox1, bbox2):
if bbox1[1][0] <= bbox2[0][0]: return False # bbox1 is left of bbox2
if bbox1[0][0] >= bbox2[1][0]: return False # bbox1 is right of bbox2
if bbox1[1][1] <= bbox2[0][1]: return False # bbox1 is above bbox2
if bbox1[0][1] >= bbox2[1][1]: return False # bbox1 is below bbox2
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='SVC Training')
parser.add_argument(
'--map_file',
type=str,
default=os.path.join('data', 'udacity', 'labels_crowdai.csv'),
help='Mapping CSV file'
)
parser.add_argument(
'--img_folder',
type=str,
default=os.path.join('data', 'udacity', 'object-detection-crowdai'),
help='Folder where the original images are stored'
)
parser.add_argument(
'--dest_folder',
type=str,
default=os.path.join('data', 'udacity'),
help='Folder where to save the cropped images'
)
parser.add_argument(
'--dest_size',
type=int,
default=64,
help='What size are the destination images'
)
parser.add_argument(
'--format',
type=str,
default='png',
help='Which format to save the cropped images'
)
parser.add_argument(
'--limit',
type=int,
default=None,
help='Limit to the given amount of images'
)
parser.add_argument(
'--skip',
type=int,
default=7,
help='Only process every x images (e.g. to avoid time series data)'
)
args = parser.parse_args()
process_udacity_dataset(map_file=args.map_file,
img_folder=args.img_folder,
dest_folder=args.dest_folder,
dest_size=args.dest_size,
format=args.format,
limit=args.limit,
skip=args.skip)
| true
|
346087395b1de1955facadf0a28b43440680e6eb
|
Python
|
RITIKAJINDAL11/interview-bot
|
/aiml_category_generator.py
|
UTF-8
| 1,291
| 2.71875
| 3
|
[] |
no_license
|
import os
category=input("\nEnter Category")
current_directory=os.getcwd()
path="{}/aiml/{}".format(current_directory,category)
print(current_directory)
if not os.path.exists(path):
os.makedirs(path)
pattern=input("\nEnter Pattern").upper()
srai=input("\nEnter SRAI").upper()
addmore=1
ans=True
with open("{}/{}.aiml".format(path,pattern), "a+") as f:
while ans:
if addmore==1:
f.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<aiml>\n\n<category>\n<pattern>\n{}\n</pattern>\n<template>\n<random>\n<li></li>\n</random>\n</template>\n</category>\n\n".format(srai))
f.write("<category>\n<pattern>\n{}\n</pattern>\n<template>\n<srai>\n{}\n</srai>\n</template>\n</category>\n\n".format(pattern,srai))
f.write("<category>\n<pattern>\n_ {}\n</pattern>\n<template>\n<srai>\n{}\n</srai>\n</template>\n</category>\n\n".format(pattern,srai))
f.write("<category>\n<pattern>\n{} *\n</pattern>\n<template>\n<srai>\n{}\n</srai>\n</template>\n</category>\n\n".format(pattern,srai))
f.write("<category>\n<pattern>\n_ {} *\n</pattern>\n<template>\n<srai>\n{}\n</srai>\n</template>\n</category>\n\n".format(pattern,srai))
a=input("ADD Alternative pattern")
if a!="y":
ans=False
f.write("</aiml>")
else:
pattern=input("\nEnter Pattern").upper()
addmore=addmore+1
| true
|
c951f981cd833d8094cf8426f0251b78e8fefba5
|
Python
|
PhyuCin/CP1404PRAC
|
/Prac_01/test.py
|
UTF-8
| 30
| 2.78125
| 3
|
[] |
no_license
|
x = 4
y = 5
print(is_even(x))
| true
|
9580d8f530a1987279b6321b578f9b6b76e5b897
|
Python
|
cliffpham/algos_data_structures
|
/data_structures/disjoint_set/structure.py
|
UTF-8
| 2,214
| 4.1875
| 4
|
[] |
no_license
|
# Disjoint Set: A "take me to your leader" data structure
# Whenever a union occurs, we point to the nodes' leaders' rank and compare the two
# By doing so we can also assign all of the lower ranking leader's "lackeys" to the higher ranking leader
# aka "Path Compression"
# We can check if a node is directed to the correct leader by calling on the find_set function
class Node:
def __init__(self, data):
self.rank = 0
self.data = data
self.leader = self
self.lackeys = [self]
class DisjointSet():
def __init__(self):
self.sets = {}
def make_set(self, data):
if data in self.sets:
print("this set already exists")
else:
new_set = Node(data)
self.sets[data] = new_set
def path_compression(self, main, subordinate):
print(main.leader.data)
while subordinate.lackeys:
cur = subordinate.lackeys.pop()
cur.leader = main
main.leader.lackeys.append(cur)
def union(self, main, subordinate):
sets = self.sets
main = sets[main]
subordinate = sets[subordinate]
if main.leader.rank > subordinate.leader.rank:
self.path_compression(main.leader, subordinate.leader)
elif subordinate.leader.rank > main.leader.rank:
self.path_compression(subordinate.leader, main.leader)
elif main.leader.rank == subordinate.leader.rank:
main.leader.rank += 1
self.path_compression(main.leader, subordinate.leader)
def find_set(self, main):
sets = self.sets
if main not in sets:
print("Set does not exist")
else:
return sets[main].leader
#disjoint_set = DisjointSet()
#disjoint_set.make_set('A')
#disjoint_set.make_set('B')
#disjoint_set.make_set('C')
#disjoint_set.make_set('D')
#disjoint_set.make_set('E')
#disjoint_set.union('A', 'B')
#print(disjoint_set.sets['A'].rank)
#disjoint_set.union('C', 'D')
#print(disjoint_set.sets['D'].leader.data)
#disjoint_set.union('D', 'E')
#print(disjoint_set.sets['C'].lackeys)
#print(disjoint_set.sets['C'].rank)
#disjoint_set.union('A', 'D')
#print(disjoint_set.find_set('C'))
| true
|
075a90a1ccc9bd1c280ef087487ba655fffbade6
|
Python
|
jmt-transloc/ondemand-python
|
/integration/api/test_rides_api.py
|
UTF-8
| 6,433
| 2.609375
| 3
|
[] |
no_license
|
from typing import Generator
import pytest
from pytest import fixture
from requests import HTTPError
from utilities.api_helpers.rides import RidesAPI
from utilities.api_helpers.services import ServicesAPI
from utilities.factories.rides import RideFactory
from utilities.factories.services import ServiceFactory
@pytest.mark.api
class TestRidesAPI:
"""Batter of tests for RidesAPI functionality."""
@pytest.fixture
def ride(self) -> Generator[dict, None, None]:
"""Instantiate a ride for RidesAPI method testing."""
service: dict = ServiceFactory()
ride: dict = RideFactory(service=service)
yield ride
self.services_api.delete_service(service)
@pytest.fixture(autouse=True)
def set_api(self) -> None:
"""Instantiate all APIs used in RidesAPI testing."""
self.rides_api: RidesAPI = RidesAPI()
self.services_api: ServicesAPI = ServicesAPI()
@pytest.mark.low
@pytest.mark.unit
def test_cancel_ride__failure__invalid_input(self) -> None:
"""Check that the cancel_ride method fails with invalid input."""
with pytest.raises(TypeError):
self.rides_api.cancel_ride(111111) # type: ignore
@pytest.mark.low
@pytest.mark.unit
def test_cancel_ride__failure__ride_required(self) -> None:
"""Check that the cancel_ride method fails without a ride param."""
with pytest.raises(TypeError) as e:
self.rides_api.cancel_ride() # type: ignore
assert "required positional argument: 'ride'" in str(e.value)
@pytest.mark.high
@pytest.mark.integration
def test_cancel_ride__success(self, ride: fixture) -> None:
"""Check that a ride status may be changed to 'Canceled'."""
try:
self.rides_api.cancel_ride(ride)
except HTTPError:
pytest.fail('Test failed due to HTTPError.')
@pytest.mark.low
@pytest.mark.unit
def test_change_ride_status__failure__invalid_input(self) -> None:
"""Check that the change_ride_status method fails with invalid input."""
with pytest.raises(TypeError):
self.rides_api.change_ride_status(ride=1111111, status='testing') # type: ignore
@pytest.mark.low
@pytest.mark.unit
def test_change_ride_status__failure__ride_required(self) -> None:
"""Check that the change_ride_status method fails without a ride param."""
with pytest.raises(TypeError) as e:
self.rides_api.change_ride_status(status='Canceled') # type: ignore
assert "required positional argument: 'ride'" in str(e.value)
@pytest.mark.low
@pytest.mark.unit
def test_change_ride_status__failure__status_required(self) -> None:
"""Check that the change_ride_status method fails without a status param."""
with pytest.raises(TypeError) as e:
self.rides_api.change_ride_status(ride={}) # type: ignore
assert "required positional argument: 'status'" in str(e.value)
@pytest.mark.high
@pytest.mark.integration
def test_change_ride_status__success(self, ride: fixture) -> None:
"""Check that a ride status may be changed successfully."""
try:
self.rides_api.change_ride_status(ride=ride, status='Canceled')
except HTTPError:
pytest.fail('Test failed due to HTTPError.')
@pytest.mark.low
@pytest.mark.unit
def test_complete_ride__failure__invalid_input(self) -> None:
"""Check that the complete_ride method fails with invalid input."""
with pytest.raises(TypeError):
self.rides_api.complete_ride(111111) # type: ignore
@pytest.mark.low
@pytest.mark.unit
def test_complete_ride__failure__ride_required(self) -> None:
"""Check that the complete_ride method fails without a ride param."""
with pytest.raises(TypeError) as e:
self.rides_api.complete_ride() # type: ignore
assert "required positional argument: 'ride'" in str(e.value)
@pytest.mark.high
@pytest.mark.integration
def test_complete_ride__success(self, ride: fixture) -> None:
"""Check that a ride status may be changed to 'Completed'."""
try:
self.rides_api.complete_ride(ride)
except HTTPError:
pytest.fail('Test failed due to HTTPError.')
@pytest.mark.low
@pytest.mark.unit
def test_create_ride__failure__invalid_input(self) -> None:
"""Check that the create_ride method fails with invalid input."""
with pytest.raises(HTTPError):
self.rides_api.create_ride(111111) # type: ignore
@pytest.mark.low
@pytest.mark.unit
def test_create_ride__failure__ride_data_required(self) -> None:
"""Check that the create_ride method fails without a ride_data param."""
with pytest.raises(TypeError) as e:
self.rides_api.create_ride() # type: ignore
assert "required positional argument: 'ride_data'" in str(e.value)
@pytest.mark.high
@pytest.mark.integration
def test_create_ride__success(self) -> None:
"""Check that a ride may be created."""
service: dict = ServiceFactory()
ride_data: dict = RideFactory.build(service_id=service['service_id'])
try:
self.rides_api.create_ride(ride_data)
self.services_api.delete_service(service)
except HTTPError:
pytest.fail('Test failed due to HTTPError.')
@pytest.mark.low
@pytest.mark.unit
def test_start_ride__failure__invalid_input(self) -> None:
"""Check that the start_ride method fails with invalid input."""
with pytest.raises(TypeError):
self.rides_api.start_ride(111111) # type: ignore
@pytest.mark.low
@pytest.mark.unit
def test_start_ride__failure__ride_required(self) -> None:
"""Check that the start_ride method fails without a ride param."""
with pytest.raises(TypeError) as e:
self.rides_api.start_ride() # type: ignore
assert "required positional argument: 'ride'" in str(e.value)
@pytest.mark.high
@pytest.mark.integration
def test_start_ride__success(self, ride: fixture) -> None:
"""Check that a ride status may be changed to 'In Progress'."""
try:
self.rides_api.start_ride(ride)
except HTTPError:
pytest.fail('Test failed due to HTTPError.')
| true
|
9679d330a5b1b5c4e04e9227af5f6d3e5f6a9b47
|
Python
|
roperch/codewars-python-algorithms
|
/who-ate-cookie.py
|
UTF-8
| 586
| 3.890625
| 4
|
[] |
no_license
|
# test cases:
# cookie("Ryan") --> "Who ate the last cookie? It was Zach!"
# cookie(26) --> "Who ate the last cookie? It was Monica!"
# cookie(2.3) --> "Who ate the last cookie? It was Monica!"
# cookie(true) --> "Who ate the last cookie? It was the dog!"
def cookie(x):
if type(x) is str:
return "Who ate the last cookie? It was Zach!"
elif type(x) is float:
return "Who ate the last cookie? It was Monica!"
elif type(x) is int:
return "Who ate the last cookie? It was Monica!"
else:
return "Who ate the last cookie? It was the dog!"
| true
|
b69eb4fb66f35d9b45c6a381495924212fc2a861
|
Python
|
Rywells88/Gaussian_NaiveBayes_ML_Classifiers
|
/GaussianClassifier.py
|
UTF-8
| 2,333
| 3.3125
| 3
|
[] |
no_license
|
import numpy as np
from numpy import pi, exp
from scipy.io import loadmat
import matplotlib.pyplot as plt
data = loadmat('a1digits.mat') #dictionary types
training_data = data['digits_train']
testing_data = data['digits_test']
class_dict = {}
fig, ax = plt.subplots(1, 10, figsize = (18,10))
# this function returns the prior probabilities of the 2 classes taking in the label set as inputs
def calc_classPrior():
return 0.01
def calc_mean(k, pixel): # k is the class for the mean we are trying to generate, pixel is a value from 1-64
global training_data
sumK = 0
for i in range(len(training_data[k])): # 700
sumK += training_data[k][i][pixel] # go through example 0, 1,2,3,... 700
#goes through each example for a given class and pixel, and finds the mean value of that pixel.
return (sumK/len(training_data[k]))
def getVariance(x):
return (np.var(x))
def feature_in_class_probability(x,variance, means):
answer_list = []
for j in range(10):
sumTotal = 0
sub = 0
sub = (np.subtract(x,means[j]))**2
sumTotal = np.sum(sub)
product = exp(-sumTotal)
PCkx = product * calc_classPrior()
answer_list.append(PCkx)
return answer_list
def main():
global training_data
global testing_data
training_data= np.transpose(training_data)
testing_data =np.transpose(testing_data)
means = np.zeros((10, 64))
for i in range(10):
for j in range(64):
means[i][j] = calc_mean(i, j)
V = getVariance(training_data)
correctCount = 0
error_count = np.zeros(10)
for k in range(10):
for example in range(400):
PxCk = feature_in_class_probability(testing_data[k][example], V, means)
print(max(PxCk), " in class ",PxCk.index(max(PxCk)))
print("\n")
if(k == PxCk.index(max(PxCk))):
correctCount += 1
else:
error_count[k] += 1
print(correctCount/4000)
print(error_count)
names = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
for i in range(10):
ax[i].imshow(means[i].reshape(8, 8), cmap='gray', interpolation='nearest')
ax[i].set_title(names[i])
ax[0].text(0,-5, "Standard deviation : 0.3049" )
plt.show()
main()
| true
|
32077a02323fa17ec502e4bc2ba5dead2136b663
|
Python
|
JKutt/PyDev
|
/testDCIP.py
|
UTF-8
| 6,151
| 2.546875
| 3
|
[] |
no_license
|
import unittest
import numpy as np
import DCIPsimpeg.DCIPtools as DCIP
class TestingReCalculationMethods(unittest.TestCase):
# testing rho calculation
def testRhoCalculation(self):
# homogenous halfspace
rho = 2200
In = 1.00
# create synthetic variables testing for a dipole generation and its calculations
hdr_line = ['DIPOLE', 'RDG', 'Status', 'Rx1x', 'Rx1y', 'Rx2x', 'Rx2y', 'Tx1x', 'Tx1y', 'Tx2x', 'Tx2y',
'Rx1East', 'Rx1North', 'Rx1Elev', 'Rx2East', 'Rx2North', 'Rx2Elev',
'Tx1East', 'Tx1North', 'Tx1Elev', 'Tx2East', 'Tx2North', 'Tx2Elev',
'Rx1File', 'Rx2File', 'TxFile', 'k',
'Coupling', 'Sp', 'Vp_err', 'Vp', 'In', 'In_err', 'Rho', 'Rho_QC', 'Mx_QC',
'Mx_err', 'Stack', 'TimeBase', 'Vs']
data_line = ['1', '1', 'Good', '500', '500', '700', '500', '200', '500', '100', '500',
'345500', '4345500', '520', '345700', '4345500', '522',
'345200', '4345500', '519', '345100', '4345500', '520', 'AA000.raw', 'AA000.raw',
'AA000.raw', '0',
'15.0', '0.0', '0.0', '0.0', str(In), '0.0', str(rho), 'Accept', 'Accept', '0.0',
'0', '2000', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0',
'0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0']
varInfo = DCIP.Jreadtxtline(hdr_line, data_line)
voltage_dipole = DCIP.JvoltDipole(varInfo)
current_dipole = DCIP.JinDipole(varInfo)
# calculate expected vp of the homogenous halfspace
gk = voltage_dipole.calcGeoFactor(current_dipole)
vp = In * rho * (1 / gk)
# assign this volatage for testing
voltage_dipole.Vp = vp
# calc the resistivity with synthetic Vp to see if it matches known
rho_test = voltage_dipole.calcRho(current_dipole)
print('Vp: ', vp)
# send results
self.assertTrue(np.isclose(rho_test, rho))
def testNegativeRhoCalculation(self):
# homogenous halfspace
rho = 2200
In = 1.00
# create synthetic variables testing for a dipole generation and its calculations
hdr_line = ['DIPOLE', 'RDG', 'Status', 'Rx1x', 'Rx1y', 'Rx2x', 'Rx2y', 'Tx1x', 'Tx1y', 'Tx2x', 'Tx2y',
'Rx1East', 'Rx1North', 'Rx1Elev', 'Rx2East', 'Rx2North', 'Rx2Elev',
'Tx1East', 'Tx1North', 'Tx1Elev', 'Tx2East', 'Tx2North', 'Tx2Elev',
'Rx1File', 'Rx2File', 'TxFile', 'k',
'Coupling', 'Sp', 'Vp_err', 'Vp', 'In', 'In_err', 'Rho', 'Rho_QC', 'Mx_QC',
'Mx_err', 'Stack', 'TimeBase', 'Vs']
data_line = ['1', '1', 'Good', '500', '500', '700', '500', '200', '500', '100', '500',
'345700', '4345500', '520', '345500', '4345500', '522',
'345200', '4345500', '519', '345100', '4345500', '520', 'AA000.raw', 'AA000.raw',
'AA000.raw', '0',
'15.0', '0.0', '0.0', '0.0', str(In), '0.0', str(rho), 'Accept', 'Accept', '0.0',
'0', '2000', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0',
'0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0']
varInfo = DCIP.Jreadtxtline(hdr_line, data_line)
voltage_dipole = DCIP.JvoltDipole(varInfo)
current_dipole = DCIP.JinDipole(varInfo)
# calculate expected vp of the homogenous halfspace
gk = voltage_dipole.calcGeoFactor(current_dipole)
vp = In * rho * (1 / gk)
# assign this volatage for testing
voltage_dipole.Vp = vp
# calc the resistivity with synthetic Vp to see if it matches known
rho_test = voltage_dipole.calcRho(current_dipole)
# send results
print('Vp: ', vp)
self.assertTrue(np.isclose(rho_test, rho))
# test mx calculation
def testMxCalculation(self):
# homogenous halfspace
rho = 2200
mx = 1
In = 1.00
# window widths
windows = [20,20,40,40,40,40,80,80,80,80,80,80,80,160,160,160,160,160,160,160]
hdr_line = ['DIPOLE', 'RDG', 'Status', 'Rx1x', 'Rx1y', 'Rx2x', 'Rx2y', 'Tx1x', 'Tx1y', 'Tx2x', 'Tx2y',
'Rx1East', 'Rx1North', 'Rx1Elev', 'Rx2East', 'Rx2North', 'Rx2Elev',
'Tx1East', 'Tx1North', 'Tx1Elev', 'Tx2East', 'Tx2North', 'Tx2Elev',
'Rx1File', 'Rx2File', 'TxFile', 'k',
'Coupling', 'Sp', 'Vp_err', 'Vp', 'In', 'In_err', 'Rho', 'Rho_QC', 'Mx_QC',
'Mx_err', 'Stack', 'TimeBase', 'Vs']
data_line = ['1', '1', 'Good', '500', '500', '700', '500', '200', '500', '100', '500',
'345500', '4345500', '520', '345700', '4345500', '522',
'345200', '4345500', '519', '345100', '4345500', '520', 'AA000.raw', 'AA000.raw',
'AA000.raw', '0',
'15.0', '0.0', '0.0', '0.0', str(In), '0.0', str(rho), 'Accept', 'Accept', '0.0',
'0', '2000', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0',
'0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0', '0.0']
varInfo = DCIP.Jreadtxtline(hdr_line, data_line)
# create voltage dipole
voltage_dipole = DCIP.JvoltDipole(varInfo)
# create current dipole
current_dipole = DCIP.JinDipole(varInfo)
# calculate expected vp of the homogenous halfspace
gk = voltage_dipole.calcGeoFactor(current_dipole)
vp = In * rho * (1 / gk)
# assign this volatage for testing
voltage_dipole.Vp = vp
voltage_dipole.Vs = np.ones(20) * 1e-3
# calc the resistivity with synthetic Vp to see if it matches known
mx_test = voltage_dipole.calcMx(windows, 0, 20)
# send results
self.assertTrue(np.isclose(mx_test, mx / vp))
if __name__ == '__main__':
unittest.main()
| true
|
72afdc049a36330e316f1cbd11e9aea5cfe8c452
|
Python
|
jadeaxon/demongeon
|
/src/main.py
|
UTF-8
| 464
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import sys
from demongeon import *
# PRE: Due to use of f strings, this only works on Python 3.6 or later.
try:
assert sys.version_info >= (3, 6)
except:
print("ERROR: You are not running Python 3.6 or later.")
sys.exit(1)
# Play the game.
major, minor, patch = version
print(f"Welcome to Demongeon v{major}.{minor}.{patch}!")
print("Try to find the treasure and escape without being killed.")
world = World()
world.start()
| true
|
4dd89ebf5ffd330e4aefd8985b387bacc271c491
|
Python
|
VVivid/python-programs
|
/dict/14.py
|
UTF-8
| 260
| 3.734375
| 4
|
[] |
no_license
|
"""Write a Python program to sort a dictionary by key."""
color_dict = {'red': '#FF0000',
'green': '#008000',
'black': '#000000',
'white': '#FFFFFF'}
for item in sorted(color_dict):
print(item, color_dict[item])
| true
|
6011a7ff2087f1bc93d9e71f2972af7584cb90cf
|
Python
|
tellmemax/ftp-server-bruteforce-attack
|
/ftpcracker.py
|
UTF-8
| 557
| 2.703125
| 3
|
[] |
no_license
|
import socket
import re
import sys
def connection(op,user,passw):
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print('Trying ' + ip + ':' + user + ':' + passw)
sock.connect(('192.168.1.1',21))
data = sock.revc(1024)
sock.send('User' + user + * '\r\n')
data = sock.revc(1024)
sock.send('Password' + passw * '\r\n')
data = sock.recv(1024)
sock.send('Quit' * '\r\n')
sock.close()
return data
user = 'User1'
password = ['pass1','pass2','pass3']
for password in password:
print(connection('192.168.1.1',user,password))
| true
|
38585735d9937fe3c8484c33e758e5e6c6760c7e
|
Python
|
andrewchch/kanban_sim
|
/kanbansim/models/work.py
|
UTF-8
| 3,293
| 2.984375
| 3
|
[] |
no_license
|
import logging
class Work:
"""
A body of work to be done for a workflow step. Assumption is that all work is done by one actor.
"""
def __init__(self, name='Work', size=0, env=None, workflow=None, case=None):
assert size is not None, "Size must be greater than zero"
self.work_to_do = size
self.work_done = 0
self.name = name
self.env = env
self.work_done_by = None
self.assignee = None
self.workflow = workflow
self.case = case
self.started = False
self.start_time = None
self.end_time = None
def set_workflow(self, workflow):
"""
Set the parent workflow
:param workflow:
:return:
"""
self.workflow = workflow
def set_case(self, case):
"""
Set the parent case
:param case:
:return:
"""
self.case = case
def set_env(self, env):
self.env = env
def is_done(self):
return self.work_to_do == self.work_done
def done_by(self):
return self.work_done_by
def assign_to(self, _assignee):
self.assignee = _assignee
def assigned_to(self):
return self.assignee
def do_work(self, by=None):
"""
Do some work (must have an assignee first)
:return:
"""
assert (by or self.assignee) is not None, "Noone specified to work on the work item"
assert self.env is not None, "Must have an env to do work"
# Set the start time if it hasn't already been set
if not self.started:
self.start_time = self.env.now
if self.workflow is not None:
self.workflow.set_start_time(self.env.now)
self.started = True
self.work_done_by = by or self.assignee
if self.work_done < self.work_to_do:
self.work_done += 1
if self.is_done():
self.end_time = self.env.now
# Signal to the workflow that we're finished
logging.info('%s finished by %s at %s' % (self, self.work_done_by, self.env.now))
self.workflow.work_is_finished()
def finish(self, by=None):
"""
Flag the work as having been done by the assignee and then unassign the work.
:param by:
:return:
"""
assert (by or self.assignee) is not None, "Noone specified to finish the workstep"
assert self.env is not None, "Must have an env to finish work"
# Set the start time if it hasn't already been set
if not self.started:
self.start_time = self.env.now
if self.workflow is not None:
self.workflow.set_start_time(self.env.now)
self.started = True
self.work_done_by = by or self.assignee
self.assignee = None
self.work_done = self.work_to_do
self.end_time = self.env.now
# Signal to the workflow that we're finished
logging.info('%s finished by %s at %s' % (self, self.work_done_by, self.env.now))
self.workflow.work_is_finished()
def size(self):
return self.work_to_do
def __str__(self):
return '%s work of size %d with %d done' % (self.name, self.work_to_do, self.work_done)
| true
|
8b7fbbe1fe05cb819a9300e1bf6de40365e3f0fd
|
Python
|
eduardo579/Python3_Curso
|
/calculadora.py
|
UTF-8
| 1,037
| 4.46875
| 4
|
[] |
no_license
|
print('1. suma | 2. resta | 3. multiplicación | 4. división | 5. salir')
def suma(numero1, numero2):
resultado = numero1 + numero2
print('El resultado es: '+str(resultado))
def resta(numero1, numero2):
resultado = numero1 - numero2
print('El resultado es: '+str(resultado))
def multi(numero1, numero2):
resultado = numero1 * numero2
print('El resultado es: '+str(resultado))
def divi(numero1, numero2):
resultado = numero1 / numero2
print('El resultado es: '+str(resultado))
while True:
elegir = int(input('Selecciona la opción: '))
if elegir >= 1 and elegir <= 4:
print('Escribe los números: ')
num1 = int(input('Nº 1: '))
num2 = int(input('Nº 2: '))
if elegir == 1:
suma(num1, num2)
elif elegir == 2:
resta(num1, num2)
elif elegir == 3:
multi(num1, num2)
else:
divi(num1, num2)
elif elegir == 5:
break
else:
print('No es una opción válida')
| true
|
dcbbaa561ba799814510bcf56c312253870fcd7f
|
Python
|
pranithkumar361999/Hackerrank-Solutions
|
/staircase.txt
|
UTF-8
| 370
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the staircase function below.
def staircase(n):
sp=n-1
st=1
for i in range(n):
print((" ")*sp,end="")
print("#"*st,end="")
sp-=1
st+=1
print()
if __name__ == '__main__':
n = int(input())
staircase(n)
| true
|
23a91d379a58f1b701e5b98767c031c90e1a27f5
|
Python
|
P1234Sz/PythonKurs
|
/Spotkanie2/zad2.py
|
UTF-8
| 336
| 3.578125
| 4
|
[] |
no_license
|
def Odsetki (oproc, czas, kwota):
odsetki = kwota * oproc * czas / 12
return odsetki
licz = Odsetki (0.03, 3, 1000)
print(licz)
i = 0
kwota = 1000
while i < 4:
kwota = kwota + Odsetki(0.03, 3, kwota)
i = i + 1
print("Odnawialna = " + str(round(kwota, 2)))
print("Caloroczna = " + str(1000 + Odsetki(0.03, 12, 1000)))
| true
|
424b842c17014e96f6a3bd2c0821389630e9ca1e
|
Python
|
Greensahil/CS697
|
/matplot.py
|
UTF-8
| 342
| 3.265625
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import math
m = 0.5
c = 4
x = [ i for i in range(100)]
y = [m*i + c for i in x]
#plt.xticks(x,[i for i in range(2010,2020)])
plt.xlabel('Years')
plt.ylabel('price')
plt.plot(x,y)
y = [i**3 for i in x]
plt.plot(x,y)
y = [math.sin(i) for i in x]
plt.plot(x,y)
plt.legend(['exam','work'])
#
plt.show()
| true
|
3389428130191aa7bb149993646916109121e5d1
|
Python
|
itroot/projecteuler
|
/solved/level-2/26/solve.py
|
UTF-8
| 584
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def divide1by(number, accuracy):
my1=10**accuracy
return str(my1/number)
def findDividePeriod(numbers, dividerLength):
array=numbers[2*dividerLength+1:]
pattern=array[0:dividerLength]
array=array[dividerLength:]
return array.index(pattern)+1
def periodLength(number):
numbers=divide1by(number, 10000)
return findDividePeriod(numbers, len(str(number)))
value=2
maxPeriod=1
for i in range(2, 1000):
period=periodLength(i)
if period>maxPeriod:
maxPeriod=period
value=i
print value
| true
|
e3f0ed38e08f66229dc7dbe47673509833875394
|
Python
|
sanskrit-lexicon/BUR
|
/issue3/prep_burab_examples.py
|
UTF-8
| 2,438
| 2.5625
| 3
|
[] |
no_license
|
#-*- coding:utf-8 -*-
"""prep_burab_examples.py
"""
from __future__ import print_function
import sys,re,codecs
import digentry
class ABtip:
def __init__(self,line):
m = re.search(r'^(.*)<(.*?)>(.*)</.*?> *\(([0-9]+)\)$',line)
self.ab = m.group(1).rstrip()
self.tipcode = m.group(2)
assert self.tipcode in ['tip','TIP']
self.tip = m.group(3)
self.count = int(m.group(4))
def init_burab(filein):
with codecs.open(filein,"r","utf-8") as f:
recs = [ABtip(line.rstrip('\r\n'))for line in f]
print(len(recs),"tooltips read from",filein)
return recs
def simplify(x):
removals = ['{%','%}','{#','#}','<ab>','</ab>','<lbinfo.*?/>',
'<lang n="greek">','</lang>','{@','@}']
for regex in removals:
x = re.sub(regex,'',x)
return x
def entrylines(entry,abmatch):
outarr = []
text = ' '.join(entry.datalines)
# first match
index = text.find(abmatch)
if index == -1:
return outarr
a = text[0:index]
b = text[index+len(abmatch):]
a1 = simplify(a)
b1 = simplify(b)
a2 = a1[-30:]
b2 = b1[0:30]
meta = re.sub('<k2>.*$','',entry.metaline)
outarr.append(meta)
outarr.append(a2 + abmatch + b2)
outarr.append(';')
return outarr
def init_examples(abrecs,entries):
outrecs = []
abrecs1 = [rec for rec in abrecs if rec.tipcode == 'TIP']
for irec,rec in enumerate(abrecs1):
abmatch = '<ab>%s</ab>' % rec.ab
outarr = []
outarr.append('; ' + ('"'*70))
outarr.append('')
outarr.append('; ' + ('"'*70))
count = rec.count
n = 0
for entry in entries:
lines = entrylines(entry,abmatch)
if lines != []:
for line in lines:
outarr.append(line)
n = n + 1
if n == count:
break
if n == 10: # stop at 10 examples per abbreviation
break
case = irec + 1
if count == 1:
outarr[1] = '; Case %s: Abbreviation %s occurs %s time' % (case,rec.ab,count)
else:
outarr[1] = '; Case %s: Abbreviation %s occurs %s times' % (case,rec.ab,count)
outrecs.append(outarr)
return outrecs
def write_examples(fileout,outrecs):
with codecs.open(fileout,"w","utf-8") as f:
for outarr in outrecs:
for line in outarr:
f.write(line+'\n')
print(len(outrecs),"examples written to",fileout)
if __name__=="__main__":
filein = sys.argv[1] # prep_burab
filein1 = sys.argv[2] # temp_bur_6
fileout = sys.argv[3] #
abrecs = init_burab(filein)
entries = digentry.init(filein1)
outrecs = init_examples(abrecs,entries)
write_examples(fileout,outrecs)
| true
|
5f5e86ba34a88def9d71eb2345a8aa2f625e1bed
|
Python
|
olesmith/SmtC
|
/Curve/ArcLength.old.py
|
UTF-8
| 4,074
| 3.21875
| 3
|
[
"CC0-1.0"
] |
permissive
|
import time
from Base import *
from Timer import Timer
class Curve_ArcLength():
Integration_NI=10
Integration_Type=1 #(1) simpson, (2)
##!
##! If curve length defined with a function name, call it.
##! Otherwise, use numerical integration.
##!
def S_Calc(self,t,t0=None,n=100):
if (not t0):
t0=self.t1
if (self.S_Func):
method=getattr(self,self.S_Func)
return method(t)
else:
return self.Curve_Length(t0,t,n)
##!
##! Find arc lengths for t values in ts.
##!
##! Type 1: Trapezoids
##! Type 2: Simpsons
##!
def Curve_Lengths_Calc(self,ts=[]):
if (len(ts)==0):
ts=self.ts
t1=ts[0]
ss=[0.0]
dss=[0.0]
s=0.0
timer=Timer("Calculating "+str(len(ts))+" Arclengths:")
for i in range(1, len(ts) ):
t1=ts[i-1]
t2=ts[i]
ds=self.Curve_Length(t1,t2,self.Integration_NI)
s+=ds
ss.append(s)
dss.append(ds)
self.dS=dss
self.S=ss
return s
##!
##! Return n nearest t value
##!
def T2n(self,t):
n=0
for i in range( len(self.ts) ):
if (t>=self.ts[i]):
n=i
if (n>=len(self.ts)-1) : n=len(self.ts)-1
return n
##!
##! Find arc length. We suppose reparametrization has taken place.
##!
def Curve_Length(self,t1,t2,n=100):
return self.Trapezoids(t1,t2,n)
def Trapezoid(self,t1,t2):
f1=self.dr(t1).Length()
f2=self.dr(t2).Length()
return 0.5*(t2-t1)*(f2+f1)
def Trapezoids_Naive(self,t1,t2,n=100):
dt=(t2-t1)/(1.0*(n-1))
t=t1
trapez=0.5*self.dr(t1).Length()
for i in range(n-1):
trapez+=self.dr(t).Length()
t+=dt
trapez=0.5*self.dr(t2).Length()
return trapez*dt
def Trapezoids(self,t1,t2,n=100):
dt=(t2-t1)/(1.0*(n-1))
trapez=0.0
t=t1
for i in range(n):
trapez+=self.Trapezoid(t1,t1+dt)
t+=dt
return trapez
def Simpson(self,t1,t2):
h3=(t2-t1)/3.0
f1=self.dr( t1 ).Length()
f2=self.dr( 0.5*(t1+t2) ).Length()
f3=self.dr( t2 ).Length()
return h3*( f1+4.0*f2+f3 )
def Simpsons(self,t1,t2,n=100):
dt=(t2-t1)/(1.0*(n-1))
t1=0.0
t2=dt
simpson=0.0
for i in range(n):
simpson+=self.Simpson(t1,t2)
t1+=dt
t2+=dt
return simpson
##!
##! Calculate Curve (arc) lengths for a large number of curve points a
##! reparemetrize by the arclength.
##!
def Reparameterize(self,nn=100):
nn*=self.NPoints
ts=Intervals(self.t1,self.t2,nn)
dt=(self.t2-self.t1)/(1.0*len(ts))
t=self.t1
#self.Calc_dSs(ts)
#s=self.Calc_Ss(ts)
self.ts=ts
s=self.Curve_Lengths_Calc()
ds=s/(1.0*(self.NPoints-1))
print "Curve Length: ",s,self.NPoints," intervals",s,ds
if (self.Verbose>2):
print "i\tj\ts_{j-1}\t\ts\t\ts_j"
s=0.0
rts=[self.t1]
ss=[]
for i in range( self.NPoints ):
j=t2Interval(self.S,s)
if (j>=0):
eta=(s-self.S[j-1])/(self.S[j]-self.S[j-1])
t=Convex( ts[j-1], ts[j], eta)
if (self.Verbose>2):
print i,"\t",j,"\t",self.S[j-1],"\t",s,"\t",self.S[j]
rts.append( t )
s+=ds
ss.append( s )
rts.append(self.t2)
ss.append(s)
self.ts=rts
#self.S=ss
self.Integration_NI=1000
self.Curve_Lengths_Calc()
return rts
| true
|
066911240f89abd9e374f64bb8cad8816d7a2ba3
|
Python
|
NonsoAmadi10/Okra
|
/okra/identity.py
|
UTF-8
| 3,492
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
import requests
from .okra_base import OkraBase
from .utils import validate_id, validate_dates, validate_date_id
class Identity(OkraBase):
"""This module returns various account holder information on file with the bank, including names, emails phone
numbers and addresses.
Key functions:
get_identities -- fetches all identities
by_id -- fetches various account holder information on file using the id.
by_options -- fetches identity info using the options metadata you provided when setting up the widget
by_customer -- fetches various account holder information using the customer id
by_date -- fetches various account information using date range
customer_date -- fetches an account information during a specific date period
"""
def __init__(self, PRIVATE_TOKEN):
super(Identity, self).__init__(PRIVATE_TOKEN)
def get_identities(self):
"""fetches all identities"""
url = self._base_url + \
self.endpoints_dict["identity"]["get_identities"]
response = requests.post(url, headers=self.headers)
return response.json()
@validate_id
def get_by_id(self, id):
""" fetches various account holder information using the id
Keyword arguments:
id -- the identity id
Return: JSON object
"""
url = self._base_url + self.endpoints_dict["identity"]["by_id"]
response = requests.post(url, headers=self.headers, data={"id": id})
return response.json()
def by_options(self, options):
""" fetches identity info using the options metadata
Keyword arguments:
options -- an object containing the metadata associated with the record
Return: JSON object
"""
url = self._base_url + self.endpoints_dict["identity"]["by_options"]
response = requests.post(
url, headers=self.headers, data={"options": options})
return response.json()
@validate_id
def by_customer(self, customer):
""" fetches various account information using customer info
Keyword arguments:
customer -- customer id info
Return: JSON object
"""
url = self._base_url + self.endpoints_dict["identity"]["by_customer"]
response = requests.post(url, headers=self.headers, data={
"customer": customer})
return response.json()
@validate_dates
def by_date(self, _from, _to):
""" fetches account holder information using date range
Keyword arguments:
_from -- the start date
_to -- the end date
Return: JSON object
"""
url = self._base_url + self.endpoints_dict["identity"]["by_date"]
response = requests.post(url, headers=self.headers, data={
"from": _from, "to": _to})
return response.json()
@validate_date_id
def customer_date(self, _from, _to, customer):
""" fetches an account holder information using date range and customer id
Keyword arguments:
_from -- The start date
_to: The end date
customer: customer id info
Return: JSON object
"""
url = self._base_url + self.endpoints_dict["identity"]["customer_date"]
response = requests.post(url, headers=self.headers, data={
"from": _from, "to": _to, "customer": customer})
return response.json()
| true
|
d3bf9e974b28dcfcd44e3b3a3e4e5cb9a06660af
|
Python
|
encryptogroup/SoK_ppClustering
|
/utils/cluster/dataset.py
|
UTF-8
| 2,876
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
# MIT License
#
# Copyright (c) 2021 Aditya Shridhar Hegde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sklearn.datasets as skdatasets
from pathlib import Path
import numpy as np
from .score import Score
class Dataset:
@staticmethod
def load_gz(data_dir, dname):
ds = Dataset()
data_dir = Path(data_dir)
ds.data = np.loadtxt((data_dir / f"{dname}.data.gz").resolve(), ndmin=2)
ds.labels = []
for f in data_dir.glob(f"{dname}.labels*.gz"):
# Subtract 1 to ensure label for noise is -1
label = np.loadtxt(f.resolve(), dtype=np.intc) - 1
ds.labels.append(label)
# Number of clusters is the derived from the first label set
ds.n_clusters = set([len(np.unique(label[label != -1])) for label in ds.labels])
return ds
@staticmethod
def random(n_samples, n_features, centers, random_state):
ds = Dataset()
X, y = skdatasets.make_blobs(
n_samples=n_samples,
n_features=n_features,
centers=centers,
random_state=random_state,
)
ds.data = X
ds.labels = [y]
ds.n_clusters = centers
return ds
def print_stats(self):
npoints, dim = self.data.shape
print("Number of data points:", npoints)
print("Dimension of data point:", dim)
print("Number of labels (groundtruth):", len(self.labels))
print("Clusters per label:", end=" ")
for c in self.n_clusters:
print(c, end=" ")
print()
print("Noise points per label:", end=" ")
for lbl in self.labels:
print((lbl == -1).sum(), end=" ")
print()
def groundtruth_external_metrics(self):
return [
Score.evaluate(self.data, label, label).external() for label in self.labels
]
| true
|
0fed9ea24b66c71616a74e73d93dde4ff5b7d5ad
|
Python
|
Adills1494/phrase-hunter-game.py
|
/constants.py
|
UTF-8
| 487
| 2.640625
| 3
|
[] |
no_license
|
PHRASES = [
("Doth mother know you weareth her drapes", "Shakespeare in the park? More like Tony Stark in the park")
("You gonna eat your tots", "In the cafeteria with Nepoleon Dynomite")
("With great power comes great responsibility", "Bitten by a radioactive spider and this is what you tell me uncle?")
("Honey Where is my super suit", "When your wife hides your suit and you have to save the city")
("Why so serious", "So let me tell you about my father Batman")
]
| true
|
956c8a0290e7c55283c7e36753015b7a21c62cd5
|
Python
|
valohai/valohai-yaml
|
/valohai_yaml/utils/duration.py
|
UTF-8
| 1,320
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
import datetime
import re
from typing import Optional, Union
suffixes = {
"s": 1,
"m": 60,
"h": 60 * 60,
"d": 60 * 60 * 24,
"w": 60 * 60 * 24 * 7,
}
simple_duration_re = re.compile(r"(?P<value>[.\d]+)\s*(?P<suffix>[a-z]*)")
def parse_duration_string(duration_str: str) -> datetime.timedelta:
total_duration_sec = 0.0
for match in simple_duration_re.finditer(duration_str):
value = float(match.group("value"))
suffix = match.group("suffix").lower()
if suffix[0] not in suffixes:
raise ValueError(f"Unknown duration suffix in {match.group(0)!r}")
total_duration_sec += value * suffixes[suffix[0]]
if total_duration_sec <= 0:
raise ValueError(f"Could not parse positive duration from {duration_str!r}")
return datetime.timedelta(seconds=total_duration_sec)
def parse_duration(
duration_value: Union[int, str, None]
) -> Optional[datetime.timedelta]:
if duration_value is None:
return None
if isinstance(duration_value, int):
return datetime.timedelta(seconds=duration_value)
if isinstance(duration_value, str):
if duration_value.strip():
return parse_duration_string(duration_value)
return None
raise ValueError(f"Could not parse duration from {duration_value!r}")
| true
|
2b70b401626203b0922c4ec7666e0978bd9bc013
|
Python
|
Swastik-Saha/Python-Programs
|
/Upper&Lower_Triangular_Matrix.py
|
UTF-8
| 674
| 3.921875
| 4
|
[] |
no_license
|
#Upper and Lower Triangular Matrix
def ultMat():
dim = int(raw_input("Enter the dimension of the matrix: "))
mat =[[0 for i in range(0,dim)]for j in range(0,dim)]
for i in range(0,dim):
for j in range(0,dim):
mat[i][j] = int(raw_input("Enter an element of the matrix"))
upper_mat = []
lower_mat = []
for i in range(0,dim):
for j in range(0,dim):
if(j>i):
upper_mat.append(mat[i][j])
if(j<i):
lower_mat.append(mat[i][j])
print upper_mat,lower_mat
| true
|
7f16a78002b815fb80d10017f93242c7d60dc777
|
Python
|
Confucius-hui/LeetCode
|
/排序算法/快速排序.py
|
UTF-8
| 26
| 2.53125
| 3
|
[] |
no_license
|
a = [1,23,9]
print(min(a))
| true
|
eb9580e7393f073428d23dda1ff37d53a106ce91
|
Python
|
StBogdan/PythonWork
|
/HackerRank/Intro/Python_if-else.py
|
UTF-8
| 323
| 3.53125
| 4
|
[] |
no_license
|
if __name__ == '__main__':
n = int(input())
if( n % 2 != 00):
print("Weird")
else:
if( 2<=n and n<=5):
print("Not Weird")
elif (6<=n and n<=20):
print("Weird")
elif (n>20):
print("Not Weird")
| true
|
f7b3e1b45226dd00147f69a150379f0b95b94eab
|
Python
|
ryoryo666/Gazebo_sim
|
/src/archive/SpeedClass.py
|
UTF-8
| 578
| 3.0625
| 3
|
[] |
no_license
|
class VW():
def __init__(self,TurningData,d,translation_v):
self.r=TurningData.Radius
self.direction=TurningData.Direction
self.d=d
self.v=translation_v
self.v_in =self.v*((self.r+self.d)/self.r)
self.v_out =self.v*((self.r-self.d)/self.r)
def V_IN(self):
return self.v_in
def V_OUT(self):
return self.v_out
def W(self):
if self.direction=="l":
return (self.v_in-self.v_out)/(2*self.d)
elif self.direction=="r":
return (self.v_out-self.v_in)/(2*self.d)
| true
|
cb21666fd7fcf24724ef23c9fe54456e5d316339
|
Python
|
AbcSxyZ/GetHome-Leboncoin
|
/Ad_class.py
|
UTF-8
| 3,724
| 3.09375
| 3
|
[] |
no_license
|
class Ad:
"""Object to represent an Ad of Leboncoin website,
mainly for renting ads"""
def __init__(self, title, link, price=None, pictures=None, phone=None,
description=None, proprietary=None, size=None, rooms=None,
publication_date=None, address=None):
self.title = title
self.link = link
self.set_price(price)
self.set_phone(phone)
self.set_description(description)
self.set_proprietary(proprietary)
self.set_size(size)
self.set_rooms(rooms)
self.set_publication_date(publication_date)
self.set_address(address)
#Need a list or None for pictures. Loop over each image to save it
self.pictures = []
if pictures and type(pictures) == list:
for pic in pictures:
self.set_picture(pic)
elif pictures != None:
raise TypeError("Invalid type for pictures : {}.".format(pictures))
def __repr__(self):
Ad_repr = "Ad name : {}\nAd link : {}\n".format(self.title, self.link)
Ad_repr += "Proprietary : {} | ".format(self.proprietary)
if self.phone:
Ad_repr += "phone number : {} | ".format(self.phone)
Ad_repr += "Publication : {}".format(self.publication_date)
Ad_repr += "\n"
if self.price:
Ad_repr += "Price : {}\n".format(self.price)
if self.size:
Ad_repr += "Size : {}\n".format(self.size)
if self.rooms:
Ad_repr += "Rooms : {}\n".format(self.rooms)
Ad_repr += "\nDESCRIPTION : \n{}\n".format(self.description)
if len(self.pictures) > 0:
Ad_repr += "\nImages:\n"
for image in self.pictures:
Ad_repr += "{}\n".format(image)
return Ad_repr
def mail_format(self):
"""
Format an ad to be send by email, create the body of the mail with
available informations
"""
#Add mail header with proprietary informations
mail_str = ""
mail_str += self.proprietary
if self.phone:
mail_str += " | " + self.phone
mail_str += " | " + self.publication_date + "\n"
mail_str += self.link + "\n\n"
if self.address:
mail_str += self.address + "\n"
#Add optional element to our mail string
price_str = "Price : {}".format(self.price) if self.price else None
rooms_str = "Rooms : {}".format(self.rooms) if self.rooms else None
size_str = "Size : {}".format(self.size) if self.size else None
extra = [price_str, rooms_str, size_str]
extra = list(filter(None, extra))
#Loop over each formated string to add it
for index, option_str in enumerate(extra):
mail_str += option_str
if index + 1 != len(extra):
mail_str += ' | '
if len(extra) >= 1:
mail_str += '\n\n'
#Add the description
mail_str += "\nDESCRIPTION : \n" + self.description + "\n"
return mail_str
def set_publication_date(self, publication_date):
self.publication_date = publication_date
def set_rooms(self, rooms):
self.rooms = rooms
def set_size(self, size):
self.size = size
def set_price(self, price):
self.price = price
def set_picture(self, picture):
self.pictures.append(picture)
def set_phone(self, phone):
self.phone = phone
def set_proprietary(self, proprietary):
self.proprietary = proprietary
def set_description(self, description):
self.description = description
def set_address(self, address):
self.address = address
| true
|
16b245766e87b3b67e190078900dbad9814f622f
|
Python
|
icicl/minecraft-map-art-nbt-creator
|
/mc map nbt.py
|
UTF-8
| 2,943
| 2.671875
| 3
|
[] |
no_license
|
pathtomcsaves = "/Users/admin/Library/Application Support/minecraft/saves/"#path to the minecraft saves folder (this is for mac, if you replace 'admin' with your username
mcworldfolder = "testserv/"#name of the world
image = 'eagle.jpeg'#filepath of image, or image name if it is in the same directory as this file
crop = True# stretches image to fit if false
mapswide = 2#how many maps wide your display will be
mapstall = 2#how many maps tall your display will be
map_id = 14#the id of the first map to be modified. for example, if I am using maps #23 to #28 to make a 3X2 display, this value should be 23
#tags = {'zCenter':20,'xCenter':139,'locked':97,'colors':152,'trackingPosition':64}
'''some of the nbt tags'''
z = 0#zCenter
x = 0#xCenter
locked = True
track = False#trackingPosition
zb = bytes([(z//(256**i))%256 for i in range(3,-1,-1)])
xb = bytes([(x//(256**i))%256 for i in range(3,-1,-1)])
lb = bytes([locked])
tb = bytes([track])
import gzip#https://github.com/python/cpython/blob/3.8/Lib/gzip.py (pip didn't work for me)
with gzip.open('map_template.dat') as f:
dat = f.read()
#colorbytes = b'\x00'*16384
def mkmap(id_,colorbytes=b'\x00'*16384):
d2 = dat[:20]+zb+dat[24:64]+tb+dat[65:97]+lb+dat[98:139]+xb+dat[143:152]+colorbytes+dat[-20:]
with gzip.open(pathtomcsaves+mcworldfolder+'data/map_'+str(id_)+'.dat','wb+') as f:
f.write(d2)
from colors import colors
#colors = {5:(109,153,48),34:(255,255,255),43:(79,57,40),74:(299,299,51)}
def closest(pix):
rmod,gmod,bmod = 0,0,0
modifyer = 2#power to which each pixel difference is ranged - 2 seems fine, maybe experiment with other values on other colorsets
min_ = (256**modifyer)*3#minimum difference between given pixel and color from set
minid = 0#empty
# min_pix = (255,255,255)#white, can be whatever
for i in colors:
dist = (pix[0]+rmod-colors[i][0])**modifyer+(pix[1]+gmod-colors[i][1])**modifyer+(pix[2]+bmod-colors[i][2])**modifyer
if dist < min_:
min_=dist############## update the min distance
minid=i#and its id
# min_pix = colors[i]#### and the corresponding color
return minid
#for i in range(200):
# if dat[i:i+16]==b'trackingPosition':print(i+16)
from PIL import Image
im = Image.open(image)
x,y=im.width,im.height
x_,y_=x,y
if crop:
if x*mapstall > y*mapswide:
x = y*mapswide//mapstall
else:
y = x*mapstall//mapswide
im = im.resize((mapswide*128,mapstall*128),box=((x_-x)//2,(y_-y)//2,(x_+x)//2,(y_+y)//2))
il = im.load()
#for x in range(mapswide*128):
# for y in range(mapstall*128):
# il[x,y] = closest(il[x,y])
#im.show()
for xm in range(mapswide):
for ym in range(mapstall):
cb = bytes([0,0,64,0])
for y in range(128*xm,128*xm+128):
for x in range(128*ym,128*ym+128):
cb+=bytes([closest(il[x,y])])
mkmap(map_id,cb)
print(map_id)
map_id += 1
| true
|
d59007ad7805cd6534f4ef06d563a77b47746519
|
Python
|
y0ge5h/olympic_project_new
|
/q02_country_operations/build.py
|
UTF-8
| 600
| 2.609375
| 3
|
[] |
no_license
|
# %load q02_country_operations/build.py
# default imports
from greyatomlib.olympics_project_new.q01_rename_columns.build import q01_rename_columns
#Previous Functions
path = './data/olympics.csv'
OlympicsDF=q01_rename_columns(path)
def q02_country_operations(OlympicsDF):
OlympicsDF['Country_Name'] = OlympicsDF['Country'].apply(str).apply(change_name)
OlympicsDF.iloc[100,16] = 'Portugal'
return OlympicsDF
def change_name(name):
modified_name = ''
for x in range(0,len(name)):
if name[x] == '\xa0':
modified_name = name[0:x]
return modified_name
| true
|
01580d1945c5883ea37f139bec279fe4bef93a59
|
Python
|
strengthen/LeetCode
|
/Python3/152.py
|
UTF-8
| 3,202
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
__________________________________________________________________________________________________
sample 48 ms submission
class Solution:
def maxProduct(self, nums: List[int]) -> int:
def split(nums: List[int], n: int):
"""
Split the list at elements equal to n
"""
ret = []
while (True):
try:
find_index = nums.index(n)
except ValueError:
ret.append(nums)
return ret
ret.append(nums[:find_index])
nums = nums[find_index + 1:]
def prod(lst: List[int]):
"""
Return the product of all numbers in the list.
If the list is empty, return one.
"""
ret = 1
for n in lst:
ret *= n
return ret
def maximize(lst: List[int]):
"""
Return the maximun product possible in the list.
Assume it doesn't include zeros.
"""
ret = prod(lst)
if ret > 0:
return ret
# If the product is negative, there must be an odd
# number or negative numbers in the list
# If the list is 1-element long, is already maximized
if len(lst) == 1:
return ret
# Search the first and last negative elements
for i, n in enumerate(lst):
if n < 0:
first_negative = i
break
for i, n in enumerate(reversed(lst), start=1):
if n < 0:
last_negative = len(lst) - i
break
# Check which slice's product is larger (in magnitude)
first_prod = prod(lst[:first_negative + 1])
last_prod = prod(lst[last_negative:])
return ret // max(first_prod, last_prod)
splitted = split(nums, 0)
has_zero = len(splitted) > 1
maximized = [maximize(l) for l in splitted if len(l) > 0]
if maximized:
if has_zero:
return max(max(maximized), 0)
else:
return max(maximized)
else:
if has_zero:
return 0
else:
raise ValueError("Given empty list")
__________________________________________________________________________________________________
sample 13120 kb submission
class Solution:
def maxProduct(self, nums: List[int]) -> int:
if len(nums) == 1:
return nums[0]
import sys
gmax = nums[0]
imax = nums[0]
imin = nums[0]
for j in range(1, len(nums)):
if nums[j] < 0:
imax,imin = imin, imax
imax = max(nums[j], nums[j]*imax)
imin = min(nums[j], nums[j]*imin)
if imax > gmax:
gmax = imax
return gmax
__________________________________________________________________________________________________
| true
|
e3291742ef2c995bc898ac82b5dc3fd1ff13c247
|
Python
|
ashtmMSFT/FoodTruckFinder
|
/ftf.py
|
UTF-8
| 4,097
| 3.3125
| 3
|
[] |
no_license
|
from geopy import distance
import logging
import os
import pandas
import requests
import sys
## 'Constants'
FOOD_TRUCK_DATA_CSV = "Mobile_Food_Facility_Permit.csv"
FOOD_TRUCK_DATA_URL = "https://data.sfgov.org/api/views/rqzj-sfat/rows.csv?accessType=DOWNLOAD"
## Function definitions
def parse_user_input(user_input):
""" Parse and validate the script arguments to learn the user's current location. """
if len(user_input) != 2:
logging.error("Incorrect number of arguments provided")
display_expected_usage_and_quit()
try:
user_lat = float(user_input[0])
user_long = float(user_input[1])
except ValueError:
logging.error("Incorrect type of arguments provided")
display_expected_usage_and_quit()
return (user_lat, user_long)
def display_expected_usage_and_quit():
""" Print a message displaying the expected script usage, then exit. """
print("Expected usage: py ftf.py [your_latitude] [your_longitude]")
print(" e.g., py ftf.py 37.7775 -122.416389")
raise SystemExit
def download_latest_data():
""" Download the latest food truck permit dataset from SFGov
and save it to disk. """
try:
response = requests.get(FOOD_TRUCK_DATA_URL)
response.raise_for_status()
with open(FOOD_TRUCK_DATA_CSV, 'w') as file:
file.write(response.text)
except requests.RequestException:
logging.error("There was a problem retrieving the most recent food truck data.")
logging.warning("Falling back to local data (if available).")
def read_local_data():
""" Read the food truck dataset from disk. """
try:
data = pandas.read_csv(FOOD_TRUCK_DATA_CSV)
return data
except:
logging.error("Food truck data file could not be loaded! Cannot proceed -- aborting program")
raise
def clean_data(data):
""" Remove data rows (i.e., food trucks) with incomplete location data
or without an approved permit. """
# Filter out food trucks missing lat/long data
trucks_without_lat_long = data[(data["Latitude"] == 0) | (data["Longitude"] == 0)].index
data.drop(trucks_without_lat_long, inplace = True)
# Filter out food trucks with pending/expired/suspended permits
trucks_without_valid_permit = data[data["Status"] != "APPROVED"].index
data.drop(trucks_without_valid_permit, inplace = True)
def calculate_distances(data, user_location):
""" Add a column to our dataset indicating how far away the food truck is from the user. """
data["DistanceFromUser"] = data.apply(calculate_distance_to_truck, axis = 1, user_location = user_location)
def calculate_distance_to_truck(row, user_location):
""" Use GeoPy's distance function to calculate how far away the user is
from a particular food truck (in miles). """
food_truck_location = (row["Latitude"], row["Longitude"])
return distance.distance(food_truck_location, user_location).miles
def sort_by_distance(data):
""" Sort our data in place by DistanceFromUser, ascending. """
data.sort_values(by = ['DistanceFromUser'], inplace = True)
def display_results(results):
""" Show the user a list of the closest food trucks. """
print()
print("=" * 60)
print("= We found some food trucks near you. Let's eat!")
print("=" * 60, end = "\n\n")
for row in results.itertuples():
print(f"{row.Applicant}")
print(f"{row.DistanceFromUser:.2f} miles away -- {row.Address} ({row.Latitude:.4f}, {row.Longitude:.4f})")
print(f"{row.FoodItems}", end = "\n\n")
def main(user_input = sys.argv[1:]):
user_location = parse_user_input(user_input)
# Fetch the food truck data and save it to disk (if not already present)
if not os.path.isfile(FOOD_TRUCK_DATA_CSV):
download_latest_data()
data = read_local_data()
clean_data(data)
calculate_distances(data, user_location)
sort_by_distance(data)
results = data.head(10)
display_results(results)
return results
## Run the script
if __name__ == "__main__":
main()
| true
|
b837c5a5b4f235b360855aa1104a39f08f99c123
|
Python
|
felipee1/GraphQlPython
|
/database/config/crud.py
|
UTF-8
| 1,638
| 2.625
| 3
|
[] |
no_license
|
import sys
from sqlalchemy.orm import Session
from database.entitys import User
from database.entitys.User import UserAuthenticate
from database.models import UserModel
import bcrypt
def get_user_by_username(db: Session, username: str):
user = db.query(UserModel.UserInfo).filter(
UserModel.UserInfo.username == username).first()
email = db.query(UserModel.UserInfo).filter(
UserModel.UserInfo.email == username).first()
byid = db.query(UserModel.UserInfo).filter(
UserModel.UserInfo.id == int(username)).first()
return user or email or byid
def get_by_param(db: Session, param: any, field: any, data: any):
return db.query(param).filter(field == data).first()
def create_any(db: Session, model: any):
db_data = model
db.add(db_data)
db.commit()
db.refresh(db_data)
return db_data
def update_any(db: Session, model: any):
db_data = model
db.add(db_data)
db.commit()
db.refresh(db_data)
return db_data
def create_user(db: Session, user: User.UserCreate):
hashed_password = bcrypt.hashpw(
user.password.encode('utf-8'), bcrypt.gensalt()).decode()
db_user = UserModel.UserInfo(
username=user.username, email=user.email, password=hashed_password, fullName=user.fullName, admin=user.admin)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def check_username_password(db: Session, user: UserAuthenticate):
db_user_info: UserModel.UserInfo = get_user_by_username(
db, username=user.username)
return bcrypt.checkpw(user.password.encode('utf-8'), db_user_info.password.encode('utf-8'))
| true
|
bfaf30eb38170f60ae025a23289038ec5e30835a
|
Python
|
tejvi-m/pandas_opencl
|
/tests/benchmark.py
|
UTF-8
| 578
| 2.578125
| 3
|
[] |
no_license
|
import numpy as np
import os
import scipy.stats
results = []
command = os.popen(' g++ -std=c++17 tests/cppTest.cpp -lOpenCL -O3 -w')
command.close()
for j in [9, 99, 999, 9999, 99999, 999999, 9999999, 99999999]:
xres = []
for i in range(0, 5):
command = os.popen('./a.out ' + str(j))
# print(command.read())
z = float(command.read())
xres.append(z)
command.close()
print(i, j, z)
results.append(scipy.stats.gmean(xres))
print("appended ", scipy.stats.gmean(xres))
print(results)
np.save("add01.npy", results)
| true
|
acc9416df8fd601869ba048584a37c1e32865764
|
Python
|
ankit-cn-47/DSA
|
/python/minValue.py
|
UTF-8
| 282
| 3.546875
| 4
|
[] |
no_license
|
def main():
arr = [25, 10, 9, 38, 75]
minValue = findMin(arr)
print(minValue)
def findMin(arr):
length = len(arr)
minIndex = 0
for i in range(length):
if arr[i] < arr[minIndex] :
minIndex = i
return arr[minIndex]
main()
| true
|
38703dbcfe5764f4bb8efc6ce2d85ccf04551aa8
|
Python
|
christensonb/Seaborn
|
/seaborn/file/file.py
|
UTF-8
| 7,863
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
""" This module contains helper functions for manipulating files """
__author__ = 'Ben Christenson'
__date__ = "10/7/15"
import os
import shutil
import hashlib
import time
import inspect
import json
if os.name == 'posix': # mac
TRASH_PATH = '/'.join(os.getcwd().split('/')[:3] + ['.Trash'])
else:
TRASH_PATH = '.' # todo implement for other os
def mkdir(path):
if os.path.exists(os.path.abspath(path)):
return
path_directories = os.path.abspath(path).replace('\\', '/').split('/')
full_path = ''
for directory in path_directories[1:]:
full_path += '/' + directory
if not os.path.exists(full_path):
os.mkdir(full_path)
def mkdir_for_file(filename):
mkdir(os.path.split(filename)[0])
def clear_path(path):
"""
This will move a path to the Trash folder
:param path: str of the path to remove
:return: None
"""
from time import time
if TRASH_PATH == '.':
shutil.rmtree(path, ignore_errors=True)
else:
shutil.move(path,
'%s/%s_%s' % (TRASH_PATH, os.path.basename(path), time()))
def get_filename(filename, trash=False):
if trash:
filename = os.path.join(TRASH_PATH, filename)
full_path = os.path.abspath(filename)
if not os.path.exists(os.path.split(full_path)[0]):
mkdir(os.path.split(full_path)[0])
if os.path.exists(full_path):
os.remove(full_path)
return full_path
def find_folder(folder_name, path=None):
frm = inspect.currentframe().f_back
path = path or os.path.split(frm.f_code.co_filename)[0] or os.getcwd()
for i in range(100):
try:
if os.path.exists(os.path.join(path, folder_name)):
return os.path.join(path, folder_name)
path = os.path.split(path)[0]
if len(path) <= 1:
break
except Exception as e:
return None
if os.path.exists(os.path.join(path, folder_name)):
return os.path.join(path, folder_name)
def find_file(file, path=None):
frm = inspect.currentframe().f_back
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
path = path or os.path.split(frm.f_code.co_filename)[0] or os.getcwd()
original_path = path
for i in range(100):
try:
file_path = os.path.abspath(os.path.join(path, file))
if os.path.exists(file_path):
return file_path
path = os.path.split(path)[0]
if len(path) <= 1:
break
except Exception as e:
break;
if os.path.exists(os.path.join(path, file)):
return os.path.join(path, file)
raise Exception("Failed to find file: %s in the folder hierachy: %s"%(file,original_path))
def sync_folder(source_folder, destination_folder, soft_link='folder',
only_files=False):
clear_path(destination_folder)
if os.name == 'posix' and soft_link == 'folder':
mkdir_for_file(destination_folder)
os.system('ln -s "%s" "%s"' % (source_folder, destination_folder))
return
for root, subs, files in os.walk(source_folder):
for file in files:
file = os.path.join(root, file)
copy_file(file, file.replace(source_folder, destination_folder),
soft_link=bool(soft_link))
if only_files:
break
for sub in subs:
mkdir(sub.replace(source_folder, destination_folder))
def _md5_of_file(context, sub_string):
"""
This will return the md5 of the file in sub_string
:param context:
:param sub_string: str of the path or relative path to a file
:return: str
"""
md5 = hashlib.md5()
file_path = sub_string
if not os.path.exists(file_path):
file_path = os.path.join(os.environ['CAFE_DATA_DIR_PATH'], file_path)
if not os.path.exists(file_path):
file_path = file_path.replace(' ', '_')
assert (os.path.exists(file_path)), "File %s doesn't exist" % file_path
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
return md5.hexdigest()
def read_local_file(filename):
"""
This will read a file in the same directory as the calling function
:param filename: str of the basename of the file
:return: str of the content of the file
"""
frm = inspect.currentframe().f_back
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
path = os.path.split(frm.f_code.co_filename)[0]
return read_file(os.path.join(path, filename))
def relative_path(*args):
"""
This will return the file relative to this python script
:param args: str of the relative path
:return: str of the full path
"""
frm = inspect.currentframe().f_back
if frm.f_code.co_name == 'run_code':
frm = frm.f_back
path = os.path.split(frm.f_code.co_filename)[0]
return os.path.abspath(os.path.join(path, *args))
def mdate(filename):
"""
:param filname: str of the file
:return: float of the modified date of the file
"""
return os.stat(filename).st_mtime
def read_file(full_path):
assert os.path.exists(full_path), "File '%s' doesn't exist" % full_path
ret = open(full_path, 'r').read()
if full_path.endswith('.json'):
try:
json.loads(ret)
except Exception as e:
raise Exception("%s is not valid JSON" % full_path)
return ret
def copy_file(source_file, destination_file, soft_link=False):
"""
:param source_file: str of the full path to the source file
:param destination_file: str of the full path to the destination file
:param soft_link: bool if True will soft link if possible
:return: None
"""
if not os.path.exists(source_file):
raise IOError("No such file: %s" % source_file)
mkdir_for_file(destination_file)
if os.path.exists(destination_file):
os.remove(destination_file)
if os.name == 'posix' and soft_link:
try:
os.symlink(source_file, destination_file)
except:
shutil.copy(source_file, destination_file)
else:
try:
shutil.copy(source_file, destination_file)
except Exception as e:
raise
def read_folder(folder, ext='*', uppercase=False, replace_dot='.', parent=''):
"""
This will read all of the files in the folder with the extention equal
to ext
:param folder: str of the folder name
:param ext: str of the extention
:param uppercase: bool if True will uppercase all the file names
:param replace_dot: str will replace "." in the filename
:param parent: str of the parent folder
:return: dict of basename with the value of the text in the file
"""
ret = {}
if os.path.exists(folder):
for file in os.listdir(folder):
if os.path.isdir(os.path.join(folder, file)):
child = read_folder(os.path.join(folder, file),
ext, uppercase, replace_dot,
parent=parent + file + '/')
ret.update(child)
else:
if ext == '*' or file.endswith(ext):
key = file.replace('.', replace_dot)
key = uppercase and key.upper() or key
ret[parent + key] = read_file(os.path.join(folder, file))
return ret
class print_file():
def write(self, text):
print(text)
def flush(self):
pass
def smoke_test():
mkdir('%s/file_smoke_test/test1' % TRASH_PATH)
assert os.path.exists('%s/file_smoke_test/test1' % TRASH_PATH)
os.rmdir('%s/file_smoke_test/test1' % TRASH_PATH)
os.rmdir('%s/file_smoke_test' % TRASH_PATH)
if __name__ == '__main__':
smoke_test()
| true
|
0fd2a3bf64167ffaaaed9a7e53e3c709d392bfb0
|
Python
|
Maya2468/Python-Code
|
/AtlantaPizza.py
|
UTF-8
| 331
| 3.71875
| 4
|
[] |
no_license
|
number_of_pizzas = eval(input("How many pizzas do you want? "))
cost_per_pizza = eval(input("How much does each pizza cost? "))
subtotal = number_of_pizzas * cost_per_pizza
tax_rate = 0.08
sales_tax = subtotal * tax_rate
total = subtotal + sales_tax
print("Pizza $",subtotal)
print("Sales Tax $", sales_tax)
print("Total $",total)
| true
|
f80b72ad5ba3c3cacf7087aff859762016b2c3d3
|
Python
|
a3910/aid1807-1
|
/aid1807正式班老师课件/Django/Day06/DjangoDemo06/index/forms.py
|
UTF-8
| 1,077
| 2.546875
| 3
|
[] |
no_license
|
from django import forms
#声明ChoiceField要用到的数据
TOPIC_CHOICE = (
('1','好评'),
('2','中评'),
('3','差评'),
)
#表示评论内容的表单控件的class
#控件1-评论标题-文本框
#控件2-电子邮箱-邮件框
#控件3-评论内容-Textarea
#控件4-评论级别-下拉选择框
#控件5-是否保存-复选框
class RemarkForm(forms.Form):
#评论标题
# forms.CharField() - 文本框
# label : 控件前的文本标签
subject = forms.CharField(label='标题')
#电子邮箱
# forms.EmailField() - Email框
# label : 控件前的文本标签
email = forms.EmailField(label='邮箱')
#品论内容
# widget=Textarea : 将当前的单行文本框变为多行文本域
message = forms.CharField(label='内容',widget=forms.Textarea)
#品论级别
# forms.ChoiceField() - 下拉列表框
# choices : 表示当前下拉列表框中的数据,取值为元组或列表
topic = forms.ChoiceField(label='级别',choices=TOPIC_CHOICE)
#是否保存-复选框
isSaved = forms.BooleanField(label='是否保存')
| true
|
ea3739dbd8b516026bc94f1639e80ac0e5833fde
|
Python
|
oooto/nlp100
|
/第4章_形態素解析/ans36.py
|
UTF-8
| 1,162
| 2.828125
| 3
|
[] |
no_license
|
import pathlib
import japanize_matplotlib
import matplotlib.pyplot as plt
import pandas as pd
file_path = pathlib.Path(__file__).resolve().parent / 'neko.txt.mecab'
with open(file_path, encoding="utf-8") as f:
strs = f.read().split("EOS\n")
strs = [s for s in strs if s != ""]
blocks = []
for sentences in strs:
sentence_list = [s for s in sentences.split("\n") if s != ""]
expand_sentence_list = []
for sentence in sentence_list:
name, attr = sentence.split("\t")
attr = attr.split(",")
sentence_dict = {
"surface": name,
"base": attr[6],
"pos": attr[0],
"pos1": attr[1]
}
expand_sentence_list.append(sentence_dict)
blocks.append(expand_sentence_list)
word_list = []
for sentence in blocks:
for word in sentence:
word_list.append(word["base"])
series = pd.Series(word_list)
series_top10 = series.value_counts().head(10)
fig, ax = plt.subplots()
series_top10.plot(kind="bar", ax=ax)
ax.set_title("出現頻度が高い10語")
ax.set_xlabel("単語(原型)")
ax.set_ylabel("出現頻度")
plt.show()
| true
|
8cdc4d2892d17e31293fe655ed207def4f1e67f8
|
Python
|
KaduHod/Estacao-Metereologica
|
/Medição.py
|
UTF-8
| 585
| 3.296875
| 3
|
[] |
no_license
|
import dht
import machine
import time
d = dht.DHT11(machine.Pin(4))
while True:
d.measure()
temperatura = d.temperature()
humidade = d.humidity()
if (temperatura > 31) and (humidade > 70):
print("Temperatura de acordo com os requisitos necessários para ligar o relé")
else:
print("Temperatura não está acordo com os requisitos necessários para ligar o relé")
print(humidade)
print(temperatura)
print("Temperatura: {} \n Humidade: {} \n\n ============= \n".format(d.temperature(), d.humidity()))
time.sleep(5)
| true
|
9faff3096c23b25682736f693f1b71dce73adcde
|
Python
|
PPpiper7/project1
|
/ine-phon if-elif3/ex4.py
|
UTF-8
| 457
| 3.8125
| 4
|
[] |
no_license
|
print('Please select operation')
print('1.add')
print('2.Subtract')
print('3.Multiply')
print('4.Divide')
operation = int(input('Select opration form 1, 2, 3, 4 : '))
n = int(input('Enter first number :'))
m = int(input('Enter second number :'))
if operation == 1 :
print( n,'+',m,'=',n + m )
elif operation == 2 :
print( n,'-',m,'=',n - m )
elif operation == 3 :
print( n,'*',m,'=',n * m )
elif operation == 4 :
print( n,'/',m,'=',n / m )
| true
|
e0d6d1e9986b0e0f17ec5e63b5e400d4774afbc4
|
Python
|
IsaacLaw12/KlotskiSolver
|
/test_block.py
|
UTF-8
| 1,938
| 3.0625
| 3
|
[] |
no_license
|
import unittest
import numpy as np
from block import Block
class test_block(unittest.TestCase):
def test_adjacent(self):
orig = Block((0,0), (3,3))
down = Block((3,0), (1,1))
right = Block((0,3), (1,1))
inside = Block((0,0), (1,1))
not_adj = Block((5,5), (1,1))
self.assertTrue(orig.is_adjacent(down))
self.assertTrue(orig.is_adjacent(right))
self.assertTrue(down.is_adjacent(orig))
self.assertTrue(right.is_adjacent(orig))
self.assertTrue(not orig.is_adjacent(inside))
self.assertTrue(not orig.is_adjacent(not_adj))
self.assertTrue(not inside.is_adjacent(orig))
self.assertTrue(not not_adj.is_adjacent(orig))
def test_contains(self):
orig = Block((0,0), (3,3))
inside = Block((0,0), (1,1))
partially_inside = Block((2,2), (2,2))
outside = Block((5,5), (1,1))
self.assertTrue(orig.contains(inside))
self.assertTrue(not orig.contains(partially_inside))
self.assertTrue(not orig.contains(outside))
def test_intersection(self):
first = Block((2,2),(4,4))
second = Block((1,1),(2,2))
self.assertEqual(first.intersection(second), {(2,2)})
def test_append_coordinates(self):
block = Block((0,0))
self.assertEqual(tuple(block.min_corner), (0,0))
self.assertEqual(tuple(block.max_corner), (1,1))
block.append_coordinate([6,6])
self.assertEqual(tuple(block.min_corner), (0,0))
self.assertEqual(tuple(block.max_corner), (7,7))
def test_hashing(self):
block_one = Block((2,2), (2,2))
block_two = Block((3,3), (2,2))
self.assertTrue(not block_one.hash_corners() == block_two.hash_corners())
block_two.move_block(np.array([-1, -1]))
self.assertTrue(block_one.hash_corners() == block_two.hash_corners())
if __name__ == '__main__':
unittest.main()
| true
|
b51d429070c1aee0949c2470a0181eea50542c41
|
Python
|
ezy0812/asdf
|
/python/python-in-html/xmlparsing.py
|
UTF-8
| 571
| 2.546875
| 3
|
[] |
no_license
|
import sys
from io import StringIO
import contextlib
import bs4
doc = open('test.html').read()
soup = bs4.BeautifulSoup(doc, 'lxml')
py = soup.find_all(id="#py")
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
out = []
for tag in py:
with stdoutIO() as s:
exec(tag.text)
out.append(s.getvalue())
for o,p in zip(out, py):
p.string.replace_with(o)
with open('test.html', 'w') as f:
f.write(str(soup))
| true
|
2fed69f970c086fdba8791bbcd91d306203f5c10
|
Python
|
indhu6499/guvi
|
/code/prog13.py
|
UTF-8
| 214
| 3.015625
| 3
|
[] |
no_license
|
p,o = [int(i) for i in input(" ").split(" ")]
m = []
List = [int(i) for i in input().split()]
for _ in range(o):
l, k = [int(i) for i in input().split()]
m.append(min(List[l-1:k]))
for i in m:
print(i)
| true
|
889c0ec8b5e68058b2d17b254a248ff3fb52cadc
|
Python
|
William1104/how-it-works
|
/src/main/java/ru/skuptsov/differentiation/autoGradPytorch.py
|
UTF-8
| 341
| 3.328125
| 3
|
[] |
no_license
|
import torch
# x=2, y=3, α = 4, β=5
x = torch.tensor(2.0, requires_grad=False)
y = torch.tensor(3.0, requires_grad=False)
α = torch.tensor(4.0, requires_grad=True)
β = torch.tensor(5.0, requires_grad=True)
# loss = (αx+β-y)^2
loss = pow((α * x + β - y), 2)
loss.backward()
print(loss)
print(α.grad)
print(β.grad)
print(x.grad)
| true
|
46e727bbec38809daca79b159c8b08d700005e73
|
Python
|
ianzhang1988/PythonScripts
|
/network/hdfs/debug_hdfs.py
|
UTF-8
| 3,659
| 2.546875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
# @Time : 2020/3/18 10:35
# @Author : ZhangYang
# @Email : ian.zhang.88@outlook.com
import subprocess, os
import logging
import logging.handlers
import time
class HDFSError(Exception):
pass
class HadoopFsAdapterClient(object):
def __init__(self, hdfs_user_name, hdfs_root):
self.hdfs_user_name = hdfs_user_name
self.hdfs_root_path = hdfs_root
os.environ['HADOOP_ROOT_LOGGER']="DEBUG,console"
self.count = 0
self.max_count = 100
self.time_gap = [(10,30),(30,45), (45,60), (60, 99999999)]
self.time_gap_prefix = ['20s','45s','60s','60up']
def _run_shell(self, operation, operand):
hadoop_bin = "/opt/hadoop-2.4.1/bin/hadoop"
# cmd_tmp = 'runuser -l {user} -c "{hadoop_bin} fs -{operation} {operand}"'
# su user, run python, otherwise no debug output
cmd_tmp = "{hadoop_bin} fs -{operation} {operand}"
cmd = cmd_tmp.format(user=self.hdfs_user_name, hadoop_bin=hadoop_bin, operation=operation, operand=operand)
ret = 0
begin = time.time()
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
ret = e.returncode
output = e.output
logging.error('check_output error: %s', str(e))
time_elapsed = time.time() - begin
if ret == 0 :
self.record_abnormal_case(time_elapsed, output)
logging.info('hdfs cmd: %s, ret: %s, time: %s', cmd, ret, time_elapsed)
return True if ret == 0 else False
def record_abnormal_case(self, time_elapsed, output):
if time_elapsed < 10:
return
for idx, (lo,hi) in enumerate(self.time_gap):
if time_elapsed > lo and time_elapsed <= hi:
break
prefix = self.time_gap_prefix[idx]
with open('%s_%04d_%04d.txt' % (prefix, int(time_elapsed), self.count), 'w') as f:
f.write(output)
self.count+=1
def if_continue(self):
return self.count < self.max_count
def upload(self, local_path, hdfs_path):
# make hdfs parent path
hdfs_path = self.hdfs_root_path + '/' + hdfs_path
hdfs_path = hdfs_path.rstrip('/')
operation='mkdir -p'
operand = os.path.dirname(hdfs_path)
self._run_shell(operation, operand)
operation='put -f'
operand = '{local_path} {hdfs_path}'.format(local_path=local_path, hdfs_path=hdfs_path)
ret = self._run_shell(operation, operand)
# if not ret:
# raise HDFSError("upload hdfs error")
# return ret
def main():
# config logging
log_file = "debug_hdfs.log"
logger = logging.getLogger()
logger.setLevel('DEBUG')
# create log path
log_path = '.'
fh = logging.handlers.TimedRotatingFileHandler(os.path.join(log_path, log_file), when='D', backupCount=7)
sh = logging.StreamHandler()
###########This set the logging level that show on the screen#############
# sh.setLevel(logging.DEBUG)
# sh.setLevel(logging.ERROR)
formatter = logging.Formatter(
'%(asctime)s :%(filename)s L%(lineno)d %(levelname)s: %(message)s'
)
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
logging.info("Current log level is : %s", logging.getLevelName(logger.getEffectiveLevel()))
client = HadoopFsAdapterClient('sohuvideo','/user/sohuvideo')
while client.if_continue():
client.upload('debug_hdfs', 'online/encodedFile')
if __name__ == "__main__":
main()
| true
|
258deb1b4990ab4fe910a0e03e820edbcd2d1c8d
|
Python
|
LungFun/3d-LDA
|
/src/data/all/trans_info.py
|
UTF-8
| 223
| 2.765625
| 3
|
[] |
no_license
|
import sys
with open(sys.argv[1]) as fp:
for line in fp:
line = line.strip()
if ':' not in line:
print line
else:
print ' '.join([i.split(':')[0] for i in line.split()])
| true
|
b98da8a7e33392cfdede3db29b6cd1db06fc67ad
|
Python
|
wan-catherine/Leetcode
|
/test/test_130_surrounded_regions.py
|
UTF-8
| 879
| 2.953125
| 3
|
[] |
no_license
|
from unittest import TestCase
from problems.N130_Surrounded_Regions import Solution
class TestSolution(TestCase):
def test_solve(self):
self.assertListEqual([["O","O","O"],["O","O","O"],["O","O","O"]], Solution().solve([["O","O","O"],["O","O","O"],["O","O","O"]]))
def test_solve_1(self):
input = [["X","X","X","X"],["X","O","O","X"],["X","X","O","X"],["X","O","X","X"]]
self.assertListEqual([["X","X","X","X"],["X","X","X","X"],["X","X","X","X"],["X","O","X","X"]], Solution().solve(input))
def test_solve_2(self):
input = [["O","X","X","O","X"],["X","O","O","X","O"],["X","O","X","O","X"],["O","X","O","O","O"],["X","X","O","X","O"]]
expected = [["O","X","X","O","X"],["X","X","X","X","O"],["X","X","X","O","X"],["O","X","O","O","O"],["X","X","O","X","O"]]
self.assertListEqual(expected, Solution().solve(input))
| true
|
73d2afb98798da44d2f1b97a979993b74ccc5149
|
Python
|
knzh/Python
|
/py/while-if-break.py
|
UTF-8
| 80
| 3.359375
| 3
|
[] |
no_license
|
n=1
while n<=100:
if n>50:
break
print(n)
n+=1
print('END')
| true
|