blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
44dae8bb6e4004e76a2b7c96a8b12ff2dd961b7f | Python | atrox3d/python-corey-schafer-tutorials | /03-python practical examples/07-advanced logging.py | UTF-8 | 5,791 | 3.234375 | 3 | [] | no_license | #
# https://youtu.be/jxmzY9soFXg
#
#########################################################################################################
# DEBUG: Detailed information, typically of interest only when diagnosing problems.
# INFO: Confirmation that things are working as expected.
# WARNING: An indication that something unexpected happened, or indicative of some problem in the near future
# (e.g. ‘disk space low’). The software is still working as expected.
# ERROR: Due to a more serious problem, the software has not been able to perform some function.
# CRITICAL: A serious error, indicating that the program itself may be unable to continue running.
import logging
from modules import utils
#########################################################################################################
#
# this module creates another root logger that has precedence over the local root logger
# as a consequence the output of this script goes to the employee_logger's log file
#
#########################################################################################################
from modules import employee_logger
"""
########################################################################################################################
- SET LOGFILE PATH
- DISPLAY IT
########################################################################################################################
"""
logfile = utils.getdatafilepath(__file__ + '.log') # set logfile path
print(logfile)
errorfile = utils.getdatafilepath(__file__ + '.error.log') # set logfile path
print(errorfile)
"""
########################################################################################################################
- GET LOCAL (NON-ROOT) LOGGER INSTANCE
- SET LEVEL TO INFO (DEFAULT IS WARNING)
########################################################################################################################
"""
logger = logging.getLogger(__name__) # get local logger
logger.setLevel(logging.DEBUG) # set logger level >= INFO
"""
########################################################################################################################
- GET SAME FORMATTER INSTANCE FOR ALL HANDLERS
########################################################################################################################
"""
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s') # get formatter
"""
########################################################################################################################
- GET FILE HANDLER INSTANCE
- SET FORMATTER FOR FILE HANDLER INSTANCE
- ADD HANDLER TO LOCAL LOGGER
########################################################################################################################
"""
file_handler = logging.FileHandler(logfile) # get file handler
file_handler.setFormatter(formatter) # set formatter for file handler
logger.addHandler(file_handler) # add file handler to logger
"""
########################################################################################################################
- GET FILE HANDLER INSTANCE
- SET FORMATTER FOR FILE HANDLER INSTANCE
- ADD HANDLER TO LOCAL LOGGER
########################################################################################################################
"""
errorfile_handler = logging.FileHandler(errorfile) # get file handler
errorfile_handler.setFormatter(formatter) # set formatter for file handler
errorfile_handler.setLevel(logging.ERROR)
logger.addHandler(errorfile_handler) # add file handler to logger
"""
########################################################################################################################
- GET CLI HANDLER INSTANCE
- SET FORMATTER FOR CLI HANDLER INSTANCE
- ADD HANDLER TO LOCAL LOGGER
########################################################################################################################
"""
cli_handler = logging.StreamHandler() # get CLI handler (default=stderr)
cli_handler.setFormatter(formatter) # set formatter for CLI handler
logger.addHandler(cli_handler) # add CLI handler to logger
# logging.basicConfig(
# level=logging.DEBUG, # INFO ad above
# filename=logfile, # log on file
# format='%(asctime)s:%(levelname)s:%(name)s:%(message)s' # date time, level name, message
# )
def add(x, y):
"""Add Function"""
return x + y
def subtract(x, y):
"""Subtract Function"""
return x - y
def multiply(x, y):
"""Multiply Function"""
return x * y
def divide(x, y):
"""Divide Function"""
try:
return x / y
except ZeroDivisionError as zde:
logger.error(zde)
logger.exception(zde)
# return zde
num_1 = 20
num_2 = 0
add_result = add(num_1, num_2)
logger.debug('Add: {} + {} = {}'.format(num_1, num_2, add_result))
sub_result = subtract(num_1, num_2)
logger.debug('Sub: {} - {} = {}'.format(num_1, num_2, sub_result))
mul_result = multiply(num_1, num_2)
logger.debug('Mul: {} * {} = {}'.format(num_1, num_2, mul_result))
div_result = divide(num_1, num_2)
logger.debug('Div: {} / {} = {}'.format(num_1, num_2, div_result))
| true |
25e6a6637fb4c1c54d3365ff5a462e8dc1700a18 | Python | vilus/learning_telegram | /weather/tests/test_parser.py | UTF-8 | 779 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import pytest
from ..demultiplexer import get_weather_location, check_params
def test_get_weather_location():
params = {'message': {'text': '/say Bangkok weather: {0}'}}
assert get_weather_location(params) == 'Bangkok'
def test_check_params():
params = {
'update_id': 1,
'message': {
'message_id': 100500,
'date': 1516121407,
'text': '/say Bangkok weather: {0}',
'from': {},
'chat': {},
}
}
check_params(params)
with pytest.raises(Exception):
params['message']['text'] = '/say Bangkok weather: ololo'
check_params(params)
with pytest.raises(Exception):
params['message']['text'] = 'Hi'
check_params(params)
| true |
8d4a2081d86027990d03a552bb37148d1cdb94bf | Python | kiramipt/dash_news | /app.py | UTF-8 | 10,442 | 2.625 | 3 | [] | no_license | import random
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from plotly.colors import DEFAULT_PLOTLY_COLORS
import pandas as pd
from datetime import date
from dash.dependencies import Input, Output
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__)
server = app.server
# read data
DF = pd.read_csv('./data/lenta_ru/gb_filtered.csv')
# create new column with date string
DF['year_month'] = DF[['year', 'month']].apply(lambda x: date(year=x[0], month=x[1], day=1).strftime('%Y-%m'), axis=1)
# delete bad years data
DF = DF[DF['year'] >= 2000]
# min and max date in data frame
MIN_DF_YEAR = min(DF['year'])
MAX_DF_YEAR = max(DF['year'])
# create topic dictionary of matches: data frame column topic_n --> russian topic description
with open('./data/lenta_ru/topic_names.txt') as f:
topic_names = f.read().splitlines()
TOPIC_DICT = {f'topic_{i}': theme for i, theme in enumerate(topic_names)}
app.layout = html.Div(children=[
html.Div([
html.Div([
html.H3(children='Themes filters'),
html.Div([
html.Label('Select count of themes'),
dcc.Slider(
id='top-n-theme-count-slider',
min=0,
max=40,
step=10,
value=20,
marks={i: str(i) for i in [i for i in range(0, 40+1, 5)]},
),
], style={'margin-bottom': 20}),
html.Label('Select several theme'),
dcc.Dropdown(
id='themes-dropdown',
multi=True
),
], style={'width': '34%', 'display': 'inline-block', 'margin-right': 30}),
html.Div([
html.H3(children='Dates filters'),
html.Label('Select date range'),
dcc.RangeSlider(
id='year-range-slider',
marks={str(year): str(year) for year in range(2000, 2019)},
min=MIN_DF_YEAR,
max=MAX_DF_YEAR,
value=[MIN_DF_YEAR, MAX_DF_YEAR]
),
], style={'width': '34%', 'display': 'inline-block', 'vertical-align': 'top'}),
]),
html.Div([
dcc.Graph(
id='stacked-bar-graph'
)
]),
html.Div([
dcc.Graph(id='line-graph'),
]),
html.Div([
dcc.Graph(id='first-difference-line-graph'),
]),
html.Div([
dcc.Graph(id='bar-graph')
]),
html.Div([
dcc.Graph(id='word-cloud-graph')
]),
])
@app.callback(
Output(component_id='themes-dropdown', component_property='options'),
[Input(component_id='top-n-theme-count-slider', component_property='value'),
Input(component_id='year-range-slider', component_property='value')])
def set_themes_options(top_n_theme_count, selected_year_range):
# data frame columns with topic statistics
df_topic_columns = [col for col in DF.columns if col.find('topic') != -1]
# select only top n largest topics
top_n_topic_columns = DF[(DF['year'] >= selected_year_range[0]) & (DF['year'] <= selected_year_range[1])] \
[df_topic_columns].sum(axis=0).sort_values(ascending=False) \
.head(top_n_theme_count)
top_n_topic_columns = list(top_n_topic_columns.index)
return [{'label': TOPIC_DICT[v], 'value': v} for v in top_n_topic_columns]
@app.callback(
Output(component_id='stacked-bar-graph', component_property='figure'),
[Input(component_id='top-n-theme-count-slider', component_property='value'),
Input(component_id='themes-dropdown', component_property='value'),
Input(component_id='themes-dropdown', component_property='options'),
Input(component_id='year-range-slider', component_property='value')])
def update_stacked_bar_graph(selected_top_n_theme_count, selected_themes, available_themes,
selected_year_range):
# if none of themes not be selected, than choose all themes
_selected_themes = selected_themes if selected_themes else [v['value'] for v in available_themes]
# choose years only in date range
df = DF[(DF['year'] >= selected_year_range[0]) & (DF['year'] <= selected_year_range[1])]
data = []
# select data for figure
for topic in _selected_themes:
data.append({
'type': 'bar',
'x': df['year_month'].values,
'y': [int(e) if e else None for e in df[topic].values],
'name': TOPIC_DICT[topic]
})
# figure dict
figure = {
'data': data,
'layout': {
'title': 'Stacked Bar Chart',
'barmode': 'stack',
'yaxis': {
'hoverformat': '.0f'
}
}
}
return figure
@app.callback(
Output(component_id='line-graph', component_property='figure'),
[Input(component_id='top-n-theme-count-slider', component_property='value'),
Input(component_id='themes-dropdown', component_property='value'),
Input(component_id='themes-dropdown', component_property='options'),
Input(component_id='year-range-slider', component_property='value')])
def update_line_graph(selected_top_n_theme_count, selected_themes, available_themes,
selected_year_range):
# if none of themes not be selected, than choose all themes
_selected_themes = selected_themes if selected_themes else [v['value'] for v in available_themes]
# choose years only in date range
df = DF[(DF['year'] >= selected_year_range[0]) & (DF['year'] <= selected_year_range[1])]
data = []
# select data for figure
for topic in _selected_themes:
data.append(go.Scatter(
x=df['year_month'].values,
y=[int(e) if e else None for e in df[topic].values],
mode='lines+markers',
name=TOPIC_DICT[topic]
))
# figure dict
figure = {
'data': data,
'layout': {
'title': 'Line Chart',
'yaxis': {'type': 'linear'},
'xaxis': {'type': 'date', 'showline': True, 'range': ['2000-01', '2018-07'], 'showgrid': False}
}
}
return figure
@app.callback(
Output(component_id='first-difference-line-graph', component_property='figure'),
[Input(component_id='top-n-theme-count-slider', component_property='value'),
Input(component_id='themes-dropdown', component_property='value'),
Input(component_id='themes-dropdown', component_property='options'),
Input(component_id='year-range-slider', component_property='value')])
def update_first_difference_line_graph(selected_top_n_theme_count, selected_themes, available_themes,
selected_year_range):
# if none of themes not be selected, than choose all themes
_selected_themes = selected_themes if selected_themes else [v['value'] for v in available_themes]
# choose years only in date range
df = DF[(DF['year'] >= selected_year_range[0]) & (DF['year'] <= selected_year_range[1])]
data = []
# select data for figure
for topic in _selected_themes:
data.append(go.Scatter(
x=df['year_month'].values,
y=[int(e) if e else None for e in df[topic].diff().fillna(0).values],
mode='lines+markers',
name=TOPIC_DICT[topic]
))
# figure dict
figure = {
'data': data,
'layout': {
'title': 'First Difference Line Chart',
'yaxis': {'type': 'linear'},
'xaxis': {'type': 'date', 'showline': True, 'range': ['2000-01', '2018-07'], 'showgrid': False}
}
}
return figure
@app.callback(
Output(component_id='word-cloud-graph', component_property='figure'),
[Input(component_id='top-n-theme-count-slider', component_property='value'),
Input(component_id='themes-dropdown', component_property='value'),
Input(component_id='themes-dropdown', component_property='options'),
Input(component_id='year-range-slider', component_property='value')])
def update_word_cloud_graph(selected_top_n_theme_count, selected_themes, available_themes,
selected_year_range):
words = ['just', 'some', 'random', 'words', 'and', 'more', 'other', 'things']
colors = [DEFAULT_PLOTLY_COLORS[random.randrange(1, 10)] for i in range(len(words))]
weights = [random.randint(15, 35) for i in range(len(words))]
data = go.Scatter(
x=[random.random() for i in range(30)],
y=[random.random() for i in range(30)],
mode='text',
text=words,
marker={'opacity': 0.3},
textfont={'size': weights,
'color': colors}
)
layout = go.Layout({
'xaxis': {'showgrid': False, 'showticklabels': False, 'zeroline': False},
'yaxis': {'showgrid': False, 'showticklabels': False, 'zeroline': False},
'title': 'Word Cloud'
})
figure = go.Figure(
data=[data],
layout=layout
)
return figure
@app.callback(
Output(component_id='bar-graph', component_property='figure'),
[Input(component_id='top-n-theme-count-slider', component_property='value'),
Input(component_id='themes-dropdown', component_property='value'),
Input(component_id='themes-dropdown', component_property='options'),
Input(component_id='year-range-slider', component_property='value')])
def update_bar_graph(selected_top_n_theme_count, selected_themes, available_themes,
selected_year_range):
# if none of themes not be selected, than choose all themes
_selected_themes = selected_themes if selected_themes else [v['value'] for v in available_themes]
# choose years only in date range
df = DF[(DF['year'] >= selected_year_range[0]) & (DF['year'] <= selected_year_range[1])]
data = []
# select data for figure
for topic in _selected_themes:
data.append(go.Box(
y=df[topic].values,
name=TOPIC_DICT[topic]
))
layout = go.Layout({
'title': 'Vertical Box Plot',
'showlegend': False,
'yaxis': {
'hoverformat': '.0f'
}
})
figure = go.Figure(
data=data,
layout=layout
)
return figure
if __name__ == '__main__':
app.run_server(debug=True)
| true |
0fb05d0fef20b2e794aec5c13ef29cfa0f7ffc04 | Python | beOk91/baekjoon2 | /baekjoon15726.py | UTF-8 | 84 | 2.546875 | 3 | [] | no_license | a,b,c=map(int,input().strip().split())
print(max(a/1000*b//c*1000,a/1000//b*c*1000)) | true |
cde950d1b0d8ab503b7060c3d1d27135bdbee5f7 | Python | jpramos123/Artigo-CC5661 | /accuracy.py | UTF-8 | 1,681 | 2.90625 | 3 | [] | no_license | import numpy as np
import math as math
class accuracy:
def __init__(self, clusters, lessons):
self.num_dim = np.shape(clusters)[1]
self.num_klus = np.shape(clusters)[0]
self.num_lessons = np.shape(lessons)[0]
self.seeds = clusters
self.lessons = lessons
#Euclidian Distance Method,
#Returning the distance of each seeds(Kluster required) from each lessons
# arr_dist = [ seed_1 [dist_lesson_1, dist_lessons_2, ... , dist_lesson_n]
# seed_2 [dist_lesson_1, dist_lessons_2, ... , dist_lesson_n]]
def eucl_dist(self):
self.arr_dist = np.zeros((self.num_klus, self.num_lessons))
sum_dist = 0
for i in range(self.num_klus):
for j in range(self.num_lessons):
for n in range(self.num_dim):
sum_dist = sum_dist + (self.seeds[i][n] - self.lessons[j][n])**2
sum_dist = math.sqrt(sum_dist)
self.arr_dist[i][j] = sum_dist
sum_dist = 0
def min_dist(self):
self.count_minor = [0] * (self.num_klus)
vec_min = []
menor = 0
for j in range(self.num_lessons):
for i in range(self.num_klus):
if self.arr_dist[i][j] <= self.arr_dist[menor][j]:
menor = i
vec_min.append(menor)
self.count_minor[menor] += 1
menor = 0
vm = np.array(vec_min)
return vm
def percentage(self, dist, km):
size = np.shape(dist)[0]
count = 0
for i in range (size):
if dist[i] == km[i]:
count += 1
return (count / size) * 100
| true |
8386cfa51473bcb8cb7ecf3c0132e655afa5f7db | Python | dodosman/PredictX_plane_flights | /calculations.py | UTF-8 | 1,299 | 3.21875 | 3 | [] | no_license | import geopy.distance
import pandas as pd
class FlightDistance:
def read_cvs(self):
df = pd.read_csv("Flight Distance Test.csv")
return df
def dep_coordinates_together(self):
df = self.read_cvs()
dep_coordin_together = list(zip(df['Departure_lat'], df['Departure_lon']))
return dep_coordin_together
def arriv_coordinates_together(self):
df = self.read_cvs()
arriv_coordin_together = list(zip(df['Arrival_lat'], df['Arrival_lon']))
return arriv_coordin_together
def calculate_distance(self):
distances = []
for i, j in zip(self.dep_coordinates_together(), self.arriv_coordinates_together()):
distances.append(round(geopy.distance.distance(i, j).miles))
print(distances)
return distances
def add_distance_to_csv(self):
df = pd.read_csv("Flight Distance Test.csv")
df["Distance"] = self.calculate_distance()
unique_distance = df[['Normalised City Pair', 'Distance']].drop_duplicates().value_counts(sort=False).reset_index(
name='City pair unique distance')
print(type(unique_distance))
df = df.merge(unique_distance[['Normalised City Pair', 'City pair unique distance']], how='left',
on='Normalised City Pair')
df.to_csv("flights.csv")
fd = FlightDistance()
fd.calculate_distance()
| true |
b85da377d3f362d7049f3c2d6571f35542042d03 | Python | CristianCuartas/Python-Course | /Bases Python/06-arreglos.py | UTF-8 | 633 | 4.03125 | 4 | [] | no_license | lenguajes = ['Python', 'Kotlin', 'Java', 'JavaScript']
print(lenguajes[3])
# Ordenar los elementos
lenguajes.sort()
print(lenguajes)
# Acceder un elemento dentro de un texto
aprendiendo = f'Estoy aprendiendo {lenguajes[3]}.'
print(aprendiendo)
# Modificando valores
lenguajes[2] = 'PHP'
# Agregar elementos
lenguajes.append('Ruby')
# Eliminar
del lenguajes[0]
print(lenguajes)
tabla = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
print(tabla[0][2], tabla[1][1], tabla[2][2])
tabla_2 = [[0, 10]]
tabla.extend(tabla_2)
print(tabla)
tabla_2.insert(1, [11, 20])
print(tabla_2)
print(tabla.pop())
print(tabla.pop(0))
print(tabla.reverse())
| true |
1b21ac1102e1c193579d5f37e6cd079c624ec3ef | Python | GilbertQ/Python | /Tarea_01.py | UTF-8 | 666 | 4.03125 | 4 | [] | no_license | #Calcular el sueldo líquido de una persona
# 100 100 100 => 275.34
nombre = input ("Ingrese el nombre del empleado : ")
puesto = input ("Ingrese el puesto de {}: ".format(nombre))
sueldob = input("Ingrese el sueldo base de {}: ".format(nombre))
bonificacion = input("Ingrese el valor de la bonificacion de {}: ".format(nombre))
comisiones = input("Ingrese el valor de las comisiones de {}: ".format(nombre))
sueldoTotal = float(sueldob)+float(bonificacion)+float(comisiones)
ahorro = (sueldoTotal*0.05)
igss = (float(sueldob)+float(bonificacion))*0.0483
sueldoL = sueldoTotal-ahorro-igss
print("El sueldo líquido de {} es: {}.".format(nombre,sueldoL))
| true |
2290017b189a194f46dd15e31784015da5088e03 | Python | Aasthaengg/IBMdataset | /Python_codes/p02819/s996325275.py | UTF-8 | 285 | 2.9375 | 3 | [] | no_license | x = int(input())
eratostenes = [True] * 10**6
for i in range(2, len(eratostenes)):
if eratostenes[i]:
for j in range(i * 2, len(eratostenes), i):
eratostenes[j] = False
for i in range(x, len(eratostenes)):
if eratostenes[i]:
print(i)
exit() | true |
af192c72373736e32f4c064c34c327932f29532e | Python | shashankrnr32/WaveCLI | /plot/wave.py | UTF-8 | 1,372 | 2.78125 | 3 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | #!/usr/bin/env python3
# =============================================================================
# Developer : Shashank Sharma(shashankrnr32@gmail.com)
# License : MIT License
# Year : 2019
# =============================================================================
# =============================================================================
# Description:
# Plot .wav file
# =============================================================================
import argparse
import matplotlib.pyplot as plot
import Utilities as util
import numpy as np
parser = argparse.ArgumentParser(description = 'Plot Signal of a .wav File')
parser.add_argument('-i', nargs = '?', required = True, type = str, help = 'Input File (.wav)')
parser.add_argument('-o', nargs = '?', type = str, help = 'Output Image File (Optional)')
parser.add_argument('-norm',default = False, action = 'store_true', help = 'Normalize Amplitude Values')
args = parser.parse_args()
[fs,y_axis] = util.WavRead(args.i)
#Normalize the Amplitude
if args.norm:
y_axis = y_axis/max(y_axis)
x_axis = np.arange(0,len(y_axis)/fs,1/fs)
plot.plot(x_axis,y_axis)
plot.grid()
plot.title(args.i.split('/')[-1])
plot.xlabel('Time (s)')
if args.norm:
plot.ylabel('Amplitude (Normalized)')
else:
plot.ylabel('Amplitude')
plot.tight_layout()
if args.o:
plot.savefig(args.o, dpi = 360)
else:
plot.show()
| true |
80ba09381f6826f282683a285374f66d092ea13a | Python | codemedici/crypto | /vigenere_brute.py | UTF-8 | 762 | 3.0625 | 3 | [] | no_license | import itertools
import vigenereCipher, freqAnalysis
letters = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
dictionary = [ ''.join(c) for c in itertools.permutations(letters, 3) ]
ciphertext = "AZAXVHEFWVWOUWQCFEMHKARAZEJVNLCBFOVMMMBMSNHBAUGKZMBJGIEOWZWWWBYVLOZJLSUBIPDEYJXDGMBEYXSIKLNABQZNWTNTMAKZBNSRYJZYAPSFJNGRDXNAFOAWHO"
def main():
topKey = []
for k in dictionary:
decrypted = vigenereCipher.decryptMessage(k,ciphertext)
tuplekey = (k, freqAnalysis.englishFreqMatchScore(decrypted))
topKey.append(tuplekey)
topKey.sort(key=lambda x:x[1], reverse=True)
for i in topKey[:4]:
print i
mess = vigenereCipher.decryptMessage(i[0],ciphertext)
for j in range(0,len(mess),5):
print mess[j:j+5],
print
if __name__ == '__main__':
main()
| true |
a87f5ff8bdac2294c20578adb954e73929e942e5 | Python | fiso1011/HydroBA | /Sandtrap.py | UTF-8 | 11,460 | 2.578125 | 3 | [] | no_license | import numpy as np
import RawMaterial as c_rm
class Sandtrap:
def __init__(self,input_data):
self.sandtrap_data=input_data.input_dict["sandtrap_data"] ["dict"]
self.sandtrap_material=input_data.input_dict["sandtrap_material"]["dict"]
self.total_sandtrap_cost
#import relevant Dicts
self.site_data = input_data.input_dict["site_data"]["dict"]
self.intake_material= input_data.input_dict["intake_material"]["dict"]
self.channel_data = input_data.input_dict["channel_data"]["dict"]
self.labour_cost = input_data.input_dict["labour_cost"]["dict"]
self.labour_time = input_data.input_dict["labour_time"]["dict"]
self.raw_material = input_data.input_dict["raw_material"]["dict"]
self.constants = input_data.input_dict["constants"]["dict"]
# Dict to store Dimensions
self.sandtrap_dimensions= {}
self.sandtrap_storage = {}
#Dict to store the Material cost, the labour cost miscellaneous
self.sandtrap_cost={}
#calls all other methods and returns the total intake cost and material volume in 2 dicts
def total_sandtrap_cost(self):
self.calculate_sandtrap_dimensions()
self.calculate_sandtrap_material()
self.calculate_sandtrap_labour()
self.total_sandtrap_cost=sum(self.sandtrap_cost.values())
def calculate_sandtrap_dimensions(self):
slope=self.site_data["terrain_slope"]
wall_width = self.sandtrap_data["wall width"]
channel_width = (self.site_data["used_flow"] / (self.channel_data["channel roughness"] * 0.48 *\
np.power(self.channel_data["channel slope"],0.5))) ** (3 / 8)
channel_perimeter = channel_width * (1 + 2 * self.channel_data["security height"])
channel_area = channel_width ** 2
basin_width = np.sqrt(self.site_data["used_flow"] / (0.75 * self.sandtrap_data["basin velocity"]))# 0.15m/2 maximum suspension start Zanke
basin_height = basin_width * 1.25
v_channel = self.site_data["used_flow"] / channel_area
v_basin=self.site_data["used_flow"]/(0.75 * (basin_width**2))
dyn_viscosity = (1 / (0.1 * ((self.site_data["water_temperature"] + self.constants["kelvin"]) ** 2) - 34.335 *\
(self.site_data["water_temperature"] + self.constants["kelvin"]) + 2472)) #Formel wiki
kin_viscosity = dyn_viscosity / self.constants["p_water"]
max_diameter = self.sandtrap_data["filtered diameter"]
d_factor = (((((self.constants["p_stone"] / self.constants["p_water"]) - 1) * self.constants["gravitation"]) /\
(kin_viscosity ** 2)) ** (1 / 3)) * max_diameter
v_wo = ((11 * kin_viscosity) / max_diameter) * (np.sqrt(1 + 0.01 * ((d_factor) ** 3)) - 1)
k_factor = (1 / ((v_basin ** 0.4) * (v_channel ** 0.3))) * (1 / np.tan(np.deg2rad(5))) *\
(1 / ((self.constants["gravitation"] * (0.75 * (basin_width**2) / (2.98 * basin_width))) ** 0.15))
v_w = v_wo - (0.21 / k_factor) #Formula Ortmann
settling_lenght = (v_basin / v_w) * basin_height
Uv_length = (basin_height - channel_width) / np.tan(np.deg2rad(5)) #5 degree opening slope
spillway_length = (self.site_data["used_flow"] * self.channel_data["security height"] * 0.3564) /\
(0.5 * ((0.15) ** (3 / 2))) #15cm height
# Dimensions of Part1
a1 = (0.84 / 2.98) * channel_perimeter # channel-sided side length of the truncated pyramid
a2 = 0.84 * basin_width # basin-sided side length of the truncated pyramid (idealized shape 0,84^2*1.5=1.05)
#1:upper 2:lower part volume of channel/santrap opening
v_1_1 = (((a1 ** 2) + a1 * a2 + (a2 ** 2)) * Uv_length)/3
v_1_2 = (3 / 12) * ((a1 ** 2) + a1 * a2 + (a2 ** 2)) * Uv_length * (1 / np.tan(np.pi / 3)) # cotengens with radian
#1:upper 2:lower part area of channel/santrap opening
a_1_1 = (channel_width + (0.75 * basin_width - channel_width) * 0.5) * (np.sqrt(Uv_length**2+(basin_width-channel_width)**2)) * 2
a_1_2 = ((0.5*basin_width) / np.cos(np.deg2rad(38.66))) * (np.sqrt(Uv_length**2+(basin_width-channel_width)**2)) * 0.5 * 2
#Dimensions of Part2
#lower part volume and 1: upper 2: lower part area of sandtrap settling part
v_2_2 = 0.5 * basin_width * (0.4 + 0.2) * basin_width * settling_lenght
a_2_1=(0.75*basin_width*settling_lenght+(settling_lenght**2)*0.04*0.5)*2
a_2_2=((0.5/np.cos(np.deg2rad(38.66)))*2+0.2)*basin_width*settling_lenght
# pressure basin Dimensions
pbasin_width = basin_width # =pbasin_length
pbasin_height = basin_height + settling_lenght * 0.04 #4% slope inside basin
# lower part of pressure basin volume
v_3_1 = (pbasin_height - 0.75 * pbasin_width) * (pbasin_width ** 2)
#1: base 2:lower part 3:upper part (transition) area of pressure basin
a_3_1=(pbasin_width+wall_width*2)**2
a_3_2=(pbasin_height-0.75*pbasin_width)*(2*pbasin_width+2*wall_width)*2 #lower part
a_3_3=0.75*pbasin_width*(pbasin_width*1.5+(pbasin_width+2+wall_width)*2) #upper part transition from sandtrap to pressure basin
#Excavation Volume
exc_vol1=v_1_2+a_1_2*((wall_width)+self.raw_material["gravel_thickness"])+\
((((basin_width+2*wall_width)**2)*np.tan(np.deg2rad(slope))*0.5)/(((basin_width)**2)*0.75+(2*0.75*basin_width)*wall_width))*\
(v_1_1+a_1_1*wall_width)
exc_vol2=v_2_2+(a_2_2+settling_lenght*settling_lenght*0.04)*(wall_width+self.raw_material["gravel_thickness"])+\
(settling_lenght*settling_lenght*0.04*0.5*basin_width)+\
((((basin_width+2*wall_width)**2)*np.tan(np.deg2rad(slope))*0.5)/(((basin_width)**2)*0.75+(2*0.75*basin_width)*wall_width))*\
(0.75*(basin_width**2)*settling_lenght+0.75*basin_width*2*settling_lenght*wall_width)
exc_vol3=v_3_1+a_3_1*(wall_width+self.raw_material["gravel_thickness"])+a_3_2*wall_width+\
((pbasin_width+2*wall_width)**3)*(np.tan(np.deg2rad(slope))*0.5)
#Gravel Area
gravel_sqm1=(a_1_2+a_2_2) #sandtrap, slope
gravel_sqm2=a_3_1 #pressure basin
#Structure Volume (with or without walls)
if ((basin_width+2*wall_width)*np.tan(np.deg2rad(slope))) > 0.75*basin_width:
hdiff1=((basin_width+2*wall_width)*(np.tan(np.deg2rad(slope))-0.75*basin_width))
hdiff0=hdiff1*(channel_perimeter/(2.98*basin_width))
wall1_vol=((hdiff0+0.5*(hdiff1-hdiff0))*Uv_length)*wall_width #first part of wall
wall2_vol=hdiff1*settling_lenght*wall_width#second part of wall, constant basin width
basin_volume=(a_1_1+a_1_2+a_2_1+a_2_2)*wall_width+spillway_length*1*0.2+(spillway_length+2)*(0.3*0.2) #v basin+catchment/spillway
pbasin_volume=(a_3_1+a_3_2+a_3_3)*wall_width
structure_vol1=wall1_vol+wall2_vol+basin_volume
structure_vol2=pbasin_volume
else:
basin_vol=(a_1_1+a_1_2+a_2_1+a_2_2)*wall_width+spillway_length*1*0.2+(spillway_length+2)*(0.3*0.2)
pbasin_volume = (a_3_1 + a_3_2 + a_3_3) * wall_width
structure_vol1=basin_vol
structure_vol2 = pbasin_volume
self.sandtrap_dimensions["excavation_vol"] = exc_vol1+exc_vol2+exc_vol3
self.sandtrap_dimensions["structure_vol"] = structure_vol1
self.sandtrap_storage["structure_vol"] = structure_vol2
self.sandtrap_dimensions["gravel_sqm"]=gravel_sqm1+gravel_sqm2
self.sandtrap_dimensions["contact_sqm"]=(a_1_1+a_1_2+a_2_1+a_2_2+a_3_1+a_3_2+a_3_3)*1.5 #formwork or surface finish area
self.sandtrap_dimensions["basin width"]=basin_width
def calculate_sandtrap_material(self):
# calculate structure material price
if self.sandtrap_material["structural material"]=="RCC":
sandtrap_rcc=c_rm.Raw_Material(self.sandtrap_dimensions,self.raw_material,self.constants)
raw_mat_price1=sandtrap_rcc.calculate_rcc()
elif self.sandtrap_material["structural material"]=="MAS":
sandtrap_mas=c_rm.Raw_Material(self.sandtrap_dimensions,self.raw_material,self.constants)
raw_mat_price1=sandtrap_mas.calculate_masonry()
self.sandtrap_dimensions["structure_vol"] = self.sandtrap_dimensions["structure_vol"] +\
self.sandtrap_dimensions["contact_sqm"] * self.raw_material["surface_finish"] # cement finish
#pressure basin concrete
sandtrap_rcc2 = c_rm.Raw_Material(self.sandtrap_storage, self.raw_material, self.constants)
raw_mat_price2 = sandtrap_rcc2.calculate_rcc()
#sum
self.sandtrap_cost["raw material"] = raw_mat_price1+raw_mat_price2
gravel=self.sandtrap_dimensions["gravel_sqm"]*self.raw_material["gravel_thickness"]*self.raw_material["gravel"]
self.sandtrap_cost["material"]= gravel+self.sandtrap_material["fine rake"]+self.intake_material["sluice gate"]
def calculate_sandtrap_labour(self):
self.sandtrap_cost["excavation labour"] = (self.sandtrap_dimensions["excavation_vol"] *\
(1.1123*np.exp(0.4774*self.site_data["excavating_factor"]))) * self.labour_cost["noskill_worker"]
self.sandtrap_cost["laying"] = (self.sandtrap_dimensions["gravel_sqm"])*3*self.labour_time["laying"]*self.labour_cost["noskill_worker"]#gravel
#calculate structure material work
if self.sandtrap_material["structural material"] =="RCC":
formwork_labour = self.sandtrap_dimensions["contact_sqm"] * self.labour_time["formwork"] * self.labour_cost["skill_worker"]
concreting_labour = (self.sandtrap_dimensions["structure_vol"] * self.labour_time["concreting"]) * self.labour_cost["skill_worker"]
hauling_cost = (((self.sandtrap_dimensions["structure_vol"]+self.sandtrap_dimensions["gravel_sqm"]*\
self.raw_material["gravel_thickness"]) * self.constants["p_structure"]) / 50) * 2 * self.labour_cost["hauling_cost"]
self.sandtrap_cost["structure labour"] = formwork_labour + concreting_labour+hauling_cost
elif self.sandtrap_material["structural material"] =="MAS":
surface_labour = self.sandtrap_dimensions["contact_sqm"] * self.labour_time["plastering"] * self.labour_cost["skill_worker"] # cement finish
mas_labour = (self.sandtrap_dimensions["structure_vol"] * self.labour_time["bricklaying"]) * self.labour_cost["skill_worker"]
hauling_cost = (((self.sandtrap_dimensions["structure_vol"] + self.sandtrap_dimensions["gravel_sqm"] *\
self.raw_material["gravel_thickness"]*self.sandtrap_dimensions["contact_sqm"]*\
self.raw_material["surface_finish"]) * self.constants["p_structure"]) / 50) * 2 * self.labour_cost["hauling_cost"]
self.sandtrap_cost["structure labour"] = surface_labour + mas_labour+hauling_cost
self.sandtrap_storage["material"] = round(self.sandtrap_cost["raw material"] + self.sandtrap_cost["material"],0)
self.sandtrap_storage["labour"] = round(self.sandtrap_cost["excavation labour"] + self.sandtrap_cost["structure labour"] + self.sandtrap_cost["laying"],0) | true |
5acdc62487cfd3cf7d70e85af139dceaaa2e8440 | Python | patinbsb/Main | /irc/twitch.py | UTF-8 | 3,946 | 2.96875 | 3 | [] | no_license | """This script attempts to use the activity of the chat room of a video stream to gauge and log
the interesting events which occur"""
import socket
import string
import time
from time import localtime, strftime
import urllib
import json
import datetime
time.clock()
'''Setting up info for irc connection'''
# IRC connection data
HOST_EVENT = ["199.9.251.213", "199.9.252.26"] # second entry seems to be the one
HOST = "irc.twitch.tv" # This is the Twitch IRC ip, don't change it.
PORT = 6667 # Same with this port, leave it be.
NICK = "patinbsb" # This has to be your bots username.
PASS = "<Enter Pass HERE>" # Instead of a password, use this http://twitchapps.com/tmi/, since Twitch is soon updating to it.
IDENT = "patinbsb" # Bot username again
REALNAME = "patinbsb" # This doesn't really matter.
CHANNEL = "#riotgames" # This is the channel your bot will be working on.
'''obtaining the number of viewers'''
viewer_raw = urllib.urlopen("https://api.twitch.tv/kraken/streams/{0}".format(CHANNEL[1:])).read()
viewer_json = json.loads(viewer_raw)
viewer = (viewer_json["stream"]["viewers"])
'''creating the socket to connect to the irc'''
s = socket.socket() # Creating the socket variable
if CHANNEL == "#riotgames":
s.connect((HOST_EVENT[1], PORT))
else:
s.connect((HOST, PORT)) # Connecting to Twitch
s.send("PASS %s\r\n" % PASS) # Notice how I'm sending the password BEFORE the username!
# Just sending the rest of the data now.
s.send("NICK %s\r\n" % NICK)
s.send("USER %s %s bla :%s\r\n" % (IDENT, HOST, REALNAME))
# Connecting to the channel.
s.send("JOIN %s\r\n" % CHANNEL)
'''processing the info received by the socket'''
readbuffer = ""
# Eternal loop letting the bot run.
ticker = 0
combo = 0
oldtime = time.clock()
starttime = 0
endtime = 0
rate = 0
rate_starttime = 0
rate_endtime = 0
chat_capture = []
uptime = time.clock()
while (1):
# Receiving data from IRC and spitting it into manageable lines.
readbuffer = readbuffer + s.recv(1024)
ticker += 1
temp = string.split(readbuffer, "\n")
readbuffer = temp.pop()
for line in temp:
chat_line = (line[line.find("#") + len(CHANNEL) + 2:])
chat_name = line[1:line.find("!")]
chat_total = (chat_name + ": " + chat_line)
print (chat_total)
line = string.rstrip(line)
line = string.split(line)
if (line[0] == "PING"):
s.send("PONG %s\r\n" % line[1])
instant_rate = time.clock() - oldtime
if ticker > 9 and rate_starttime == 0:
rate_starttime = time.time()
if ticker > 109 and rate_endtime == 0:
rate_endtime = time.time()
rate = ((rate_endtime - rate_starttime) / 100)
for i in range(20):
print ("**** TICKER CAPTURED ({0})****".format(str(rate)))
#TODO: fix this so the instant_rate is relevant based on number of chat users
if instant_rate < rate:
if combo == 0:
starttime = strftime("%a, %d %b %Y %H:%M:%S", localtime())
starttime_file = strftime("%a, %d %b %Y %H,%M,%S", localtime())
combo += 1
chat_capture.append(chat_total)
if instant_rate > rate:
if combo > 20:
endtime = strftime("%a, %d %b %Y %H:%M:%S", localtime())
uptime_now = (time.clock() - uptime)
tim = str(datetime.timedelta(seconds=uptime_now))
form = tim.replace(":", "-")
form2 = form.replace("-", "h ", 1)
form3 = form2.replace("-", "m ")
form4 = form3.replace(".", "s")
with open(starttime_file + ", " + form4[:form4.find("s") + 1] + ".txt", "a") as f:
f.write(starttime + " " + endtime + "\n")
f.write("Seconds: " + str(float("{0:.2f}".format((time.clock() - uptime)))) + "\n")
for line in chat_capture:
f.write(line + "\n")
chat_capture = []
combo = 0
oldtime = time.clock()
| true |
eb2c144e3b573df18d8d46b72476d4d3ef648621 | Python | elifoster/Miscellaneous | /other/dice.py | UTF-8 | 806 | 3.6875 | 4 | [] | no_license | import random
import time
import sys
import gc
def loop():
lo = input("Would you like to roll again? (0 for yes, 1 for no)\n")
while True:
if lo == str(0):
run()
elif lo == str(1):
print("Done")
sys.exit()
def run():
number = input("How many sides would you like your dice to have? ")
try:
if int(number) < 3:
print("A die must have 3 or more sides.")
number = None
run()
else:
print("Rolling...")
new_number = random.uniform(1, int(number))
print(int(new_number))
number = None
new_number = None
loop()
except ValueError:
print("That is not an integer!")
number = None
run()
run()
| true |
4fc05debb26b0849812235774023e165a5e7f8ba | Python | towithyou/master-agent-rpc | /master/agent.py | UTF-8 | 750 | 2.75 | 3 | [] | no_license |
import datetime
from common.state import *
class Agent:
'''客户端注册的信息需要封装, 提供一个信息存储的类, 数据存储在类的实例中'''
def __init__(self, id, hostname, ip):
self.id = id
self.hostname = hostname
self.ip = ip
self.regtime = datetime.datetime.now() # 服务器生出注册时间
self.state = WAITING # 可以在注册的时候把状态带上
self.outputs = {} # 每个agent执行任务的信息和结果
# 如果遍历任务和结果从这里面拿
# {task_id:{code:0, ret:'result'}} 需要放到redis中
self.lastupdatetime = None
def __repr__(self):
return "<Agent {} {}>".format(self.id, self.outputs)
| true |
109cbfd693fa0cb6a468b3ab3a805a94c42f449c | Python | JaeWorld/PS_everyday | /BOJ/11779.py | UTF-8 | 1,121 | 3.25 | 3 | [] | no_license | # BOJ 11779 최소비용 구하기 2
# 다익스트라 알고리즘
import sys
import heapq
input = sys.stdin.readline
INF = 987654321
def djikstra(start, graph, n, parent):
dist = [INF]*(n+1)
dist[start] = 0
queue = []
heapq.heappush(queue, [0, start])
while queue:
d, v = heapq.heappop(queue)
for a, w in graph[v]:
nxt = d + w
if nxt < dist[a]:
dist[a] = nxt
heapq.heappush(queue, [nxt, a])
parent[a] = v
return dist
def main():
n = int(input())
m = int(input())
links = [list(map(int, input().split())) for _ in range(m)]
start, end = map(int, input().split())
graph = [[] for _ in range(n+1)]
parent = [0]*(n+1)
path = []
for link in links:
u, v, w = link
graph[u].append([v, w])
dist = djikstra(start, graph, n, parent)
e = end
while True:
if start == e:
path.insert(0, start)
break
path.insert(0, e)
e = parent[e]
print(dist[end])
print(len(path))
print(*path)
main()
| true |
1c48f1c8934a1cfe8ff018172b601bf50933a4e3 | Python | IINemo/isanlp | /src/isanlp/processor_spacy.py | UTF-8 | 7,013 | 2.9375 | 3 | [
"MIT",
"Python-2.0"
] | permissive | import spacy
from . import annotation as ann
class ProcessorSpaCy:
""" Wrapper around spaCy - The NLP library for multiple languages.
Performs:
1. Tokenization, sentence splitting.
2. POS-Tagging, morphological analysis, lemmatizing.
3. Dependency parsing.
4. Named entity recognition.
USAGE EXAMPLE
(!) For ru_core_news_lg model, the spacy's tokenization does not always match with its components (parser, ner),
so enforce an external tokenizer if possible.
(!) Download the model beforehand with $ python -m spacy download model_name
from isanlp import PipelineCommon
from isanlp.processor_razdel import ProcessorRazdel
from isanlp.processor_spacy import ProcessorSpaCy
ppl = PipelineCommon([
(ProcessorRazdel(), ['text'],
{'tokens': 'tokens',
'sentences': 'sentences'}),
(ProcessorSpaCy('ru_core_news_lg'), ['tokens', 'sentences'],
{'lemma': 'lemma',
'postag': 'postag',
'morph': 'morph',
'syntax_dep_tree': 'syntax_dep_tree',
'entities': 'entities'}),
])
Tested with spacy==3.3.1
"""
def __init__(self, model_name='en_core_web_trf', morphology=True, parser=True, ner=True, delay_init=False):
"""
Args:
model_name (str): spaCy model name, RoBERTa-based model for English by default.
morphology (boolean): load the model with postagger, lemmatizer, and morphology predictor. Will not affect other modules.
parser (boolean): load the model with dependency parser. Disabling the parser leads to a switch to rule-based tokenizer.
ner (boolean): load the model with named entity recognition. Will not affect other modules.
"""
self._modelname = model_name
self._enable_morphology = morphology
self._enable_parser = parser
self._enable_ner = ner
self.model = None
if not delay_init:
self.init()
def init(self):
if self.model is None:
exclude = []
for key, include in zip(['tagger', 'parser', 'ner'],
[self._enable_morphology, self._enable_parser, self._enable_ner]):
if not include: exclude.append(key)
self.model = spacy.load(self._modelname, exclude=exclude)
if self._enable_parser == False: self.model.add_pipe(
"sentencizer") # By default sentence boundaries are predicted in the dependency parser.
def __call__(self, *argv):
"""Performs tokenization, tagging, lemmatizing and parsing.
Args:
text(str): text. OR
tokens(list): List of Token objects.
sentences(list): List of Sentence objects.
Returns:
Dictionary that contains:
1. tokens - list of objects Token.
2. sentences - list of objects Sentence.
3. lemma - list of lists of strings that represent lemmas of words.
4. postag - list of lists of strings that represent postags of words.
5. morph - list of lists of strings that represent morphological features.
6. syntax_dep_tree - list of lists of objects WordSynt that represent a dependency tree.
7. entities - list of lists of objects Span that represent named entities.
"""
assert self.model
if type(argv[0]) == str:
# Run with tokenization
text = argv[0]
spacy_doc = self.model(text)
else:
# Run on pre-tokenized text
tokens, sentences = argv[0], argv[1]
words = [tok.text for tok in tokens]
sent_starts = []
for sentence in sentences:
sent_starts += [True] + [False] * (sentence.end - sentence.begin - 1)
assert len(words) == len(sent_starts)
spacy_doc = spacy.tokens.Doc(self.model.vocab, words=words, sent_starts=sent_starts)
spacy_doc = self.model(spacy_doc)
return self._dictionarize(spacy_doc)
def _dictionarize(self, doc, tokenization=True):
def features_as_dict(features):
result = dict()
for feature in features:
if '=' in feature:
key, value = feature.split('=')
result[key] = value
return result
def recount_offsets_by_sentence(token_idxs):
if len(token_idxs) == 1:
return token_idxs
last_sentence_end = token_idxs[0][-1] + 1
result = [token_idxs[0]]
for sentence in token_idxs[1:]:
result.append([tok - last_sentence_end for tok in sentence])
last_sentence_end = sentence[-1] + 1
return result
result = dict()
if tokenization:
tokens = [ann.Token(tok.text, begin=tok.idx, end=tok.idx + len(tok.text)) for tok in doc]
sentences = [ann.Sentence(sent.start, sent.end) for sent in doc.sents]
result.update({'tokens': tokens, 'sentences': sentences})
if self._enable_morphology:
lemma = [[t.lemma_ for t in s] for s in doc.sents]
postag = [[t.pos_ for t in s] for s in doc.sents]
morph = [[features_as_dict(t.morph) for t in s] for s in doc.sents]
result.update({'lemma': lemma, 'postag': postag, 'morph': morph})
if self._enable_parser:
# Map absolute indexes to intrasentential indexes
idxs = [[t.i for t in s] for s in doc.sents]
idxs_by_sent = recount_offsets_by_sentence(idxs)
idx_dict = [dict(zip(idxs[i], idxs_by_sent[i])) for i in range(len(idxs))]
# Parsing results
link_names = [[t.dep_ for t in s] for s in doc.sents]
link_heads = [[t.head.i for t in s] for s in doc.sents] # absolute
link_heads = [[idx_dict[i][head] for head in link_heads[i]] for i in
range(len(link_heads))] # intrasentential
# Collect to WordSynt objects
syntax_dep_tree = []
for sentence in zip(link_heads, link_names):
current_syntax = []
for parent, link_name in zip(*sentence):
current_syntax.append(ann.WordSynt(parent, link_name))
syntax_dep_tree.append(current_syntax)
# SpaCy links the ROOT token to itself, in isanlp we use -1
for sent in range(len(syntax_dep_tree)):
for tok in range(len(syntax_dep_tree[sent])):
if syntax_dep_tree[sent][tok].parent == tok:
syntax_dep_tree[sent][tok].parent = -1
result.update({'syntax_dep_tree': syntax_dep_tree})
if self._enable_ner:
entities = [ann.TaggedSpan(ent.label_, ent.start, ent.end) for ent in doc.ents]
result.update({'entities': entities})
return result
| true |
429642deb2400f4b2ab53aeab6173f4e8ea9fca7 | Python | surim-wang/kidney | /source/2020-07-11_imageTOtext.py | UTF-8 | 13,046 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 11 21:11:18 2020
@author: SURIMWANG
"""
#%% 이미지 txt로 가져와서 한줄씩 들어온 데이터 전처리하기
import numpy as np
import pandas as pd
import os
os.chdir('D:/MNIST/source')
from PIL import Image
import pytesseract
#test_kor= pytesseract.image_to_string(Image.open('../image/test_img3.png'), lang = 'kor')
#test_eng= pytesseract.image_to_string(Image.open('../image/test_img3.png'), lang = 'eng')
test_kor_eng= pytesseract.image_to_string(Image.open('../image/test_img3.png'), lang = 'kor+eng')
text = test_kor_eng.split('\n') #122
#text_raw = text
# 텝으로 띄워진 행을 제거하자(의미없는 행)
for num in reversed(range(len(text))):
if text[num] =='' or text[num] == ' ':
del text[num]
# 우선 눈에 보이는 결측치를 수정하자(노가다작업의 일부) ocr을 인력이 극복할수 밖에 없는 문제다.
for num, line in enumerate(text):
text[num]= text[num].replace("|","")
text[num]= text[num].replace("\80","WBC")
text[num]= text[num].replace("Het","Hct")
text[num]= text[num].replace("LOL-C(Hl","LDL-C(계산")
text[num]= text[num].replace(";",":")
text[num] = text[num].replace("_","")
text[num] = text[num].replace("컴사명","검사명")
for num, line in enumerate(text):
text[num] = text[num].split(' ')
for line in range(len(text)):
for atom in range(len(text[line])):
#print(line, atom)
print(text[line][atom])
text[line][atom] = text[line][atom].replace("[","")
#text[line][atom] = text[line][atom].split(':')
#이제 해야할하는건 행마다 빈칸 제거
for num_h in reversed(range(len(text))):
for num_l in reversed(range(len(text[num_h]))):
if text[num_h][num_l] == '':
del text[num_h][num_l]
# 접수일 잡을수있는지 확인해보려했던 코드
for row in text:
for column in range(len(row)):
if '접수일:' in row[column]:
print(row[column][4:])
#이제 행을 불러오면서 검사명이 포한된 행 다음줄부터 검사명이 나올때까지 첫번째가 그릇테이블에 검사명과 일치하면
# 아 아니다 테이블로 만들자.
#test_boxes = pytesseract.image_to_boxes(Image.open('../image/test_img3.png'), lang = 'eng')
plate_table = pd.read_csv("../2020-08-22_중복제거테이블.csv", encoding = 'euc-kr')
#%%
#test_table = plate_table
for result_id in text:
for num, std_result_id in enumerate(plate_table['검사명']):
print(result_id[0], std_result_id)
if result_id[0] == std_result_id:
#plate_table.loc[plate_table['검사명'] == 'Cast', '결과'] = 10
plate_table.iloc[num,1] = result_id[1]
# 결과가 잘못 인식된것 나중에 한번에 사용자 & 본사 담당자가 수정해줘야함.
def is_nan(x):
return (x is np.nan or x != x)
drop_num_lst= []
for num in range(len(plate_table)):
x = plate_table.iloc[num,1]
if is_nan(x) == True:
print(num)
drop_num_lst.append(num)
# ocr 테이블에 없는 데이터 제거하기
plate_table = plate_table.drop(drop_num_lst)
plate_table = plate_table.reset_index()
plate_table = plate_table.drop('index', axis = 1)
# 잘못인식된 데이터 맞춤 수정하기
plate_table.iloc[1,1] = 5.1
plate_table.iloc[3,1] = 45.6
plate_table.iloc[6,1] = 130
plate_table.iloc[8,1] = 5.1
plate_table.iloc[22,1] = 5.1
# 결과에 담겨있는 문자형 숫자를 실수로 변경
def func_float(table, row, col):
x= float(table.iloc[row,col])
table.iloc[row,col] = x
for row in range(len(plate_table)):
func_float(plate_table, row ,1)
print(plate_table)
plate_table.to_csv("../result/ocr2csv.csv", index= False, encoding='utf-8-sig' )
#%% cv2 그레이 스케일 성공
fname = "../image/이력서사진.jpg"
fname = "../image/내가디자인한신발5.PNG"
fname = "../image/IMG_8960.jpg"
fname = "test_img3.jpg"
fname = "이력서사진.jpg"
fname = "내가디자인한신발5.PNG"
import cv2
fname = "../image/test_img3.jpg"
original = cv2.imread(fname, cv2.IMREAD_COLOR)
gray = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)
unchange = cv2.imread(fname, cv2.IMREAD_UNCHANGED)
cv2.imshow('Gray', unchange)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.waitKey(1)
test_gray_kor= pytesseract.image_to_string(gray, lang = 'kor')
test_gray_eng= pytesseract.image_to_string(gray, lang = 'eng')
cv2.imwrite('../image/gray.jpg', gray)
#%% 테이블 ocr
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import csv
import os
os.chdir('D:/MNIST/source')
try:
from PIL import Image
except ImportError:
import Image
import pytesseract
### first
#read your file
#file=r"../image/syImg.jpg"
file=r"../image/test_img1.PNG"
img = cv2.imread(file,0)
img.shape
#thresholding the image to a binary image
thresh,img_bin = cv2.threshold(img,100,255,cv2.THRESH_BINARY |cv2.THRESH_OTSU)
#thresh,img_bin = cv2.threshold(img,110,255,cv2.THRESH_BINARY |cv2.THRESH_TOZERO)
#cv2.THRESH_TRUNC
#thresh,img_bin = cv2.threshold(img_bin,190,255,cv2.THRESH_BINARY |cv2.THRESH_TRUNC)
#img_opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
#img_closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
#plt.imshow(img_opening,cmap='gray')
#plt.imshow(img_closing,cmap='gray')
#plt.imshow(img,cmap='gray')
#
#
#for r in range(1, 2219):
# for c in range(1,1569):
# print(r, c)
# if int(img[r+1, c-1]) > 230:
# img[r,c] = 255
# elif int(img[r+1, c-1]) < 50:
# img[r,c] = 0
#
#
#img_bin1 = img_bin
#for r in range(1, 2219):
# for c in range(1,1569):
# print(r, c)
# if int(img_bin[r+1, c-1]) + int(img_bin[r+1, c]) + int(img_bin[r+1, c+1]) + int(img_bin[r, c-1]) + int(img_bin[r, c+1]) + int(img_bin[r-1, c-1]) + int(img_bin[r-1, c]) + int(img_bin[r-1, c+1]) > 1530:
# img_bin[r,c] = 255
#
#
#for r in range(215, 259):
# for c in range(45,48):
# print(r, c)
# img_bin[r,c] = 255
#for r in range(490, 895):
# for c in range(45,48):
# print(r, c)
# img_bin[r,c] = 255
#inverting the image
img_bin = 255-img_bin
cv2.imwrite('../image/cv_inverted.png',img_bin)
#Plotting the image to see the output
plotting = plt.imshow(img_bin,cmap='gray')
plt.show()
### second find to detect rectangular boxes.
# Length(width) of kernel as 100th of total width
kernel_len = np.array(img).shape[1]//100
# Defining a vertical kernel to detect all vertical lines of image
ver_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_len))
# Defining a horizontal kernel to detect all horizontal lines of image
hor_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_len, 1))
# A kernel of 2x2
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
### vertical line catch
#Use vertical kernel to detect and save the vertical lines in a jpg
image_1 = cv2.erode(img_bin, ver_kernel, iterations=3)
#image_1 = cv2.erode(img_bin, ver_kernel[0:2], iterations=3)
vertical_lines = cv2.dilate(image_1, ver_kernel, iterations=3)
#vertical_lines = cv2.dilate(image_1, ver_kernel[0:3], iterations=3)
cv2.imwrite("../image/vertical.jpg",vertical_lines)
#Plot the generated image
#plotting = plt.imshow(image_1,cmap='gray')
plotting = plt.imshow(image_1,cmap='gray')
plt.show()
### horizon line catch
#Use horizontal kernel to detect and save the horizontal lines in a jpg
image_2 = cv2.erode(img_bin, hor_kernel, iterations=3)
horizontal_lines = cv2.dilate(image_2, hor_kernel, iterations=3)
cv2.imwrite("../image/horizontal.jpg",horizontal_lines)
#Plot the generated image
plotting = plt.imshow(image_2,cmap='gray')
plt.show()
### combine the horizontal and vertical lines
# Combine horizontal and vertical lines in a new third image, with both having same weight.
img_vh = cv2.addWeighted(vertical_lines, 0.5, horizontal_lines, 0.5, 0.0)
#Eroding and thesholding the image
img_vh = cv2.erode(~img_vh, kernel, iterations=2)
thresh, img_vh = cv2.threshold(img_vh,128,255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
cv2.imwrite("../image/img_vh.jpg", img_vh)
bitxor = cv2.bitwise_xor(img,img_vh)
bitnot = cv2.bitwise_not(bitxor)
#Plotting the generated image
plotting = plt.imshow(bitnot,cmap='gray')
plt.show()
# Detect contours for following box detection
contours, hierarchy = cv2.findContours(img_vh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
# Sort all the contours by top to bottom.
contours, boundingBoxes = sort_contours(contours, method="top-to-bottom")
#Creating a list of heights for all detected boxes
heights = [boundingBoxes[i][3] for i in range(len(boundingBoxes))]
#Get mean of heights
mean = np.mean(heights)
#Create list box to store all boxes in
box = []
# Get position (x,y), width and height for every contour and show the contour on image
for c in contours:
x, y, w, h = cv2.boundingRect(c)
if (w<1000 and h<500):
image = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
box.append([x,y,w,h])
plotting = plt.imshow(image,cmap="gray")
plt.show()
#Creating two lists to define row and column in which cell is located
row=[]
column=[]
j=0
#Sorting the boxes to their respective row and column
for i in range(len(box)):
if(i==0):
column.append(box[i])
previous=box[i]
else:
if(box[i][1]<=previous[1]+mean/2):
column.append(box[i])
previous=box[i]
if(i==len(box)-1):
row.append(column)
else:
row.append(column)
column=[]
previous = box[i]
column.append(box[i])
print(column)
print(row)
#calculating maximum number of cells
countcol = 0
for i in range(len(row)):
countcol = len(row[i])
if countcol > countcol:
countcol = countcol
#Retrieving the center of each column
center = [int(row[i][j][0]+row[i][j][2]/2) for j in range(len(row[i])) if row[0]]
center=np.array(center)
center.sort()
#Regarding the distance to the columns center, the boxes are arranged in respective order
finalboxes = []
for i in range(len(row)):
lis=[]
for k in range(countcol):
lis.append([])
for j in range(len(row[i])):
diff = abs(center-(row[i][j][0]+row[i][j][2]/4))
minimum = min(diff)
indexing = list(diff).index(minimum)
lis[indexing].append(row[i][j])
finalboxes.append(lis)
pytesseract.pytesseract.tesseract_cmd = r'C:/Program Files/Tesseract-OCR/tesseract.exe'
#
#from every single image-based cell/box the strings are extracted via pytesseract and stored in a list
outer=[]
for i in range(len(finalboxes)):
for j in range(len(finalboxes[i])):
inner=''
if(len(finalboxes[i][j])==0):
outer.append(' ')
else:
for k in range(len(finalboxes[i][j])):
y,x,w,h = finalboxes[i][j][k][0],finalboxes[i][j][k][1], finalboxes[i][j][k][2],finalboxes[i][j][k][3]
finalimg = bitnot[x:x+h, y:y+w]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 1))
border = cv2.copyMakeBorder(finalimg,2,2,2,2, cv2.BORDER_CONSTANT,value=[255,255])
resizing = cv2.resize(border, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
dilation = cv2.dilate(resizing, kernel,iterations=1)
erosion = cv2.erode(dilation, kernel,iterations=1)
out = pytesseract.image_to_string(erosion)
if(len(out)==0):
out = pytesseract.image_to_string(erosion, config='--psm 3')
inner = inner +" "+ out
outer.append(inner)
#Creating a dataframe of the generated OCR list
arr = np.array(outer)
dataframe = pd.DataFrame(arr.reshape(len(row),countcol))
print(dataframe)
data = dataframe.style.set_properties(align="left")
#Converting it in a excel-file
#data.to_excel("../result/output1.xlsx")
#data.to_excel("../result/output1_point_del.xlsx")
data.to_excel("../image/output_gray.xlsx")
#%%
import pkg_resources
pkg_resources.working_set.by_key['pytesseract'].version
| true |
05651b9197f064ed95edae3eb9a4d5f00803cf17 | Python | apple/coremltools | /coremltools/converters/mil/mil/types/get_type_info.py | UTF-8 | 2,123 | 2.734375 | 3 | [
"MIT",
"BSD-3-Clause"
] | permissive | # Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from .type_spec import FunctionType, Type
from .type_void import void
def get_python_method_type(py_function):
# given a python class method, parse the annotations to figure out the type
function_inputs = []
function_output = get_type_info(void)
annotations = {}
if hasattr(py_function, "type_annotations"):
annotations = {
k: get_type_info(v) for k, v in py_function.type_annotations.items()
}
if hasattr(py_function, "return_type"):
function_output = get_type_info(py_function.return_type)
try:
if hasattr(py_function, "__func__"):
argcount = py_function.__func__.__code__.co_argcount
argnames = py_function.__func__.__code__.co_varnames[:argcount]
else:
argcount = py_function.__code__.co_argcount
argnames = py_function.__code__.co_varnames[:argcount]
except:
raise TypeError(
"Unable to derive type information from method %s. "
"You might have a misspecified type. Ex: use compyler.int and not int"
% py_function
)
for arg in argnames:
if arg in annotations:
function_inputs.append(annotations[arg])
elif arg != "self":
raise TypeError(
"Function "
+ str(py_function)
+ " insufficient annotations. "
+ arg
+ " needs a type"
)
typeinfo = FunctionType(function_inputs, function_output, py_function)
return typeinfo
def get_type_info(t):
if hasattr(t, "__type_info__"):
ret = t.__type_info__()
assert ret.python_class is not None
return ret
elif isinstance(t, type):
return Type(t.__name__, python_class=t)
elif hasattr(t, "__call__"):
return get_python_method_type(t)
raise TypeError("Unsupported type %s" % t)
| true |
a37ca072ab79b25b0522b18d403bc6faaf4c3ad3 | Python | 44Schwarz/resume-storage | /api/tests.py | UTF-8 | 557 | 2.84375 | 3 | [] | no_license | import unittest
from django.test import TestCase
# Create your tests here.
class TestParseText(unittest.TestCase):
def test_regex(self):
import re
from .tasks import RE_PATTERN
test_string = 'A-B Company (2015-01-11 - 2018-07-26; laboris nisi ut aliquipc,fdmf2f ea commodo conse).'
match = re.findall(RE_PATTERN, test_string)
if match:
self.assertEqual(match, [('A-B Company', '2015-01-11', '2018-07-26',
'laboris nisi ut aliquipc,fdmf2f ea commodo conse')])
| true |
2adb753aa9c81c01d80142e68746034dda8a2907 | Python | mrulle/python_course_solutions | /06-conditional_statements/random_sentence.py | UTF-8 | 2,481 | 3.5 | 4 | [] | no_license | # adjectives from: http://www.enchantedlearning.com/wordlist/adjectives.shtml
# nouns from: http://www.talkenglish.com/vocabulary/top-1500-nouns.aspx
# verbs from: http://www.linguasorb.com/english/verbs/most-common-verbs/
import random
'''
adjectives are 1 word per line
nouns has a line format of 'word frequency type/types' where type is a comma seperated list in parenthesis
verbs file contains the 100 most common english verbs line format 'position verb simplepast pastparticiple'
verbs line example: '1 to be were been' NOTE: if using line.split() to keep [1] and [2] together
'''
file_names = {'nouns':'dirty_noun_list.txt', 'adjectives': 'clean_adjective_list.txt', 'verbs': 'dirty_verbs_list.txt'}
words = {'nouns': [], 'adjectives':[], 'verbs': []}
vowels = ['a', 'e', 'i', 'o', 'u', 'y']
# opens a filename and returns a list containing the first word on each line
def read_data(file_name):
res = []
with open(file_name) as f:
for line in f:
if line.strip():
tmp_words = line.split()
if file_name == 'dirty_verbs_list.txt':
res.append(tmp_words[2])
else:
res.append(tmp_words[0])
return res
# takes each filename from the dict file_names and puts the words into the corresponding list in the dict 'words'
for k, v in file_names.items():
words[k] = read_data(v)
'''
for noun in words['nouns']:
print(noun)
for adjective in words['adjectives']:
print(adjective)
'''
# returns a string of a random adjective + a random noun
def generate_group_names(amount):
res = []
for i in range(20):
a = random.choice(words['adjectives'])
n = random.choice(words['nouns'])
temp = '{0} {1}'.format(a,n)
res.append(temp)
return res
# generates a random sentence
def generate_sentence():
a = random.choice(words['adjectives'])
n = random.choice(words['nouns'])
v = random.choice(words['verbs'])
p = None
if a[0] in vowels:
p = 'an'
else:
p = 'a'
temp = '{0} {1} {2} {3}'.format(p, a, n, v)
return temp
# generates a random sentence
def generate_sentence2():
a = random.choice(words['adjectives'])
n = random.choice(words['nouns'])
v = random.choice(words['verbs'])+'s'
p = None
if a[0] in vowels:
p = 'an'
else:
p = 'a'
temp = '{0} {1} {2} {3}'.format(p, a, n, v)
return temp
print(generate_sentence2())
| true |
57697da5b3d4a87938f0234655647b6643763560 | Python | abigail-Moore/GBS-analysis | /new_subj_rtd.py | UTF-8 | 1,185 | 2.828125 | 3 | [] | no_license | #! /usr/bin/env python
InFileName = "rtd.mIDs"
OutFileName = "rtd_0001of0001_subj.fa"
SubjectSeqs = [ ]
SplitLine = [ ]
AllUniqued = 0
UniqSubjects = 0
WrittenSeqs = 0
InFile = open(InFileName, 'rU')
for Line in InFile:
Line = Line.strip('\n').strip('\r').split()
AllUniqued += 1
if len(Line) > 2:
NewSeq = Line[0]
SubjectSeqs.append(NewSeq)
UniqSubjects += 1
InFile.close()
print("A total of %d unique sequences were found. %d of these were found in more than one individual.\n" % (AllUniqued, UniqSubjects))
if UniqSubjects == 0: #If there are no sequences that appear more than once, then just randomly choose the first 10 for the file.
InFile = open(InFileName, 'rU')
LineNum = 0
for Line in InFile:
Line = Line.strip('\n').strip('\r').split()
if LineNum < 10:
NewSeq = Line[0]
SubjectSeqs.append(NewSeq)
LineNum += 1
InFile.close()
OutFile = open(OutFileName, 'w')
for Line in SubjectSeqs:
OutFile.write(">")
OutFile.write(Line)
OutFile.write("\n")
SplitLine = Line.split(".")
OutFile.write(SplitLine[2])
OutFile.write("\n")
WrittenSeqs += 1
OutFile.close()
print("%d sequences were written to the file %s.\n" % (WrittenSeqs, OutFileName))
| true |
0991f5edbb67b88d2913c2b99dc1bc490a77233c | Python | bwang8482/LeetCode | /Google/305_Number_of_Islands_II.py | UTF-8 | 2,840 | 4.15625 | 4 | [] | no_license | """
A 2d grid map of m rows and n columns is initially filled with water. We may perform an addLand operation which turns the water at position (row, col) into a land. Given a list of positions to operate, count the number of islands after each addLand operation. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example:
Given m = 3, n = 3, positions = [[0,0], [0,1], [1,2], [2,1]].
Initially, the 2d grid grid is filled with water. (Assume 0 represents water and 1 represents land).
0 0 0
0 0 0
0 0 0
Operation #1: addLand(0, 0) turns the water at grid[0][0] into a land.
1 0 0
0 0 0 Number of islands = 1
0 0 0
Operation #2: addLand(0, 1) turns the water at grid[0][1] into a land.
1 1 0
0 0 0 Number of islands = 1
0 0 0
Operation #3: addLand(1, 2) turns the water at grid[1][2] into a land.
1 1 0
0 0 1 Number of islands = 2
0 0 0
Operation #4: addLand(2, 1) turns the water at grid[2][1] into a land.
1 1 0
0 0 1 Number of islands = 3
0 1 0
We return the result as an array: [1, 1, 2, 3]
"""
"""Solution:
1. using find-union: disjoint-set algorithm
2. add new set when adding new position
3. unite the new set with all its neighbor
"""
class Solution(object):
def numIslands2(self, m, n, positions):
"""
:type m: int
:type n: int
:type positions: List[List[int]]
:rtype: List[int]
"""
ans = []
land = Union()
for position in positions:
# Error 1: remember to convert list to tuple
land.add(tuple(position))
for x,y in (-1, 0), (1, 0), (0, -1), (0, 1):
neighbor = (position[0] + x, position[1] + y)
if neighbor in land.id:
land.unite(tuple(position), neighbor)
ans.append(land.island)
return ans
class Union(object):
def __init__(self):
self.rank = {}
self.id = {}
self.island = 0
def root(self, index):
while self.id[index] != index:
self.id[index] = self.id[self.id[index]]
index = self.id[index]
return index
def add(self, index):
# Error 2: remember to check duplicate
if index not in self.id:
self.rank[index] = 1
self.id[index] = index
self.island += 1
def unite(self, left, right):
root_left, root_right = self.root(left), self.root(right)
if root_left == root_right:
return
if self.rank[root_right] > self.rank[root_left]:
root_left, root_right = root_right, root_left
self.id[root_right] = root_left
self.rank[root_left] += self.rank[root_right]
self.island -= 1
| true |
2acb1d381ba428603bebdf81074e1b505da81b0d | Python | xingyunsishen/Python_CZ | /tom.py | UTF-8 | 846 | 3.875 | 4 | [] | no_license | #-*- coding:utf-8 -*-
class Cat(object):
def __init__(self, new_name, new_age):
print('\033[0;31;42m ======哈哈哈=======\033[0m')
self.name = new_name
self.age = new_age
def __str__(self):
return '\033[0;33;43m %s 的年龄:%d\033[0m'%(self.name, self.age)
def eat(self):
print('\033[0;38;47m name: %s eating...\033[0m' % (self.name))
def drink(self):
print('\033[0;32;43m name:%s drinking...\033[0m'%(self.name))
def introduce(self):
print('\033[0;33;43m 年龄:%d 名字:%s\033[0m'% (self.age, self.name))
tom = Cat('汤姆', 20)
#tom.age = 19
#tom.name = 'tangmu'
##tom.eat()
##tom.drink()
##tom.introduce()
print(tom)
lanmao = Cat('蓝猫', 22)
#lanmao.name = "蓝猫"
#lanmao.age = 20
##lanmao.eat()
##lanmao.drink()
##lanmao.introduce()
print(lanmao)
| true |
326e91c1f7f0289b9878730a4d9d973a181dc49e | Python | jiadaizhao/LeetCode | /1201-1300/1252-Cells with Odd Values in a Matrix/1252-Cells with Odd Values in a Matrix.py | UTF-8 | 432 | 2.90625 | 3 | [
"MIT"
] | permissive | class Solution:
def oddCells(self, n: int, m: int, indices: List[List[int]]) -> int:
rows = [False] * n
cols = [False] * m
countRow = countCol = 0
for r, c in indices:
rows[r] ^= True
cols[c] ^= True
countRow += 1 if rows[r] else -1
countCol += 1 if cols[c] else -1
return countRow * (m - countCol) + countCol * (n - countRow)
| true |
a8513dba9129ce3622e1fb262936da74dbf40937 | Python | barryntklc/pysqlite | /pysqlite_manager/Objects/NodeList.py | UTF-8 | 2,589 | 3.15625 | 3 | [] | no_license | import pysqlite_manager
from _winapi import NULL
from .Node import Node
# NodeList
# Stores the connection information for a bunch of Nodes.
#
class NodeList(object):
Nodes = []
def __init__(self):
self.Nodes = []
# print("Nodelist created.")
# Add
# Adds a Node to the NodeList if does not yet exist. Otherwise, modifies the specified attribute of each
# Node in the NodeList if it does.
#
def Add(self, name, key="", val=""):
if NodeList.Contains(self, name):
for SelNode in self.Nodes:
if SelNode.GetName() == name:
if key == 'ip':
SelNode.SetIPAddr(val)
if key == 'port':
SelNode.SetPortNum(val)
else:
NewNode = Node(name)
if key == 'ip':
NewNode.SetIPAddr(val)
if key == 'port':
NewNode.SetPortNum(val)
self.Nodes.append(NewNode)
# Contains
# Determines if the list of nodes contains a node with a specific name.
#
def Contains(self, name):
for SelNode in self.Nodes:
if SelNode.GetName() == name:
return True
return False
# Size
# Returns the size of the NodeList.
#
def Size(self):
return len(self.Nodes)
# NumNodes
# Counts the number of catalog and normal nodes in the cluster config.
#
def NumNodes(self):
cat_counter = 0
node_counter = 0
for SelNode in self.Nodes:
if SelNode.GetName() == 'cat':
cat_counter += 1
else:
node_counter += 1
return cat_counter, node_counter
# Get
# Gets a node by its name.
#
def Get(self, name):
for SelNode in self.Nodes:
if SelNode.GetName() == name:
return SelNode
return -1
# GetCat
# Gets the node with cat as its name.
#
def GetCat(self) -> Node:
for SelNode in self.Nodes:
if SelNode.GetName() == 'cat':
return SelNode
return NULL
def GetNodes(self):
NodeBuffer = []
for SelNode in self.Nodes:
if SelNode.GetName() != 'cat':
NodeBuffer.append(SelNode)
return NodeBuffer
# ToString
# Returns a string representation of the NodeList.
#
def ToString(self):
buffer = ""
for self.Node in self.Nodes:
buffer += self.Node.ToString() + '\n'
return buffer, self.Size()
| true |
9260f4791bb789c7ef8035858c277bb0a1b8bd3e | Python | furuolan/Chest-X-ray-Disease-Diagnosis-using-Faster-R-CNN | /Code/src/image_to_array.py | UTF-8 | 732 | 2.875 | 3 | [] | no_license | import time
import cv2
import numpy as np
import pandas as pd
def convert_images_to_arrays(file_path, df):
lst_images = [l for l in df['Image_Index']]
return np.array([np.array(cv2.imread(file_path + img, cv2.IMREAD_GRAYSCALE)) for img in lst_images])
def save_to_array(arr_name, arr_object):
return np.save(arr_name, arr_object)
if __name__ == '__main__':
start_time = time.time()
labels = pd.read_csv("../data/sample_labels.csv")
print("Writing Train Array")
X_train = convert_images_to_arrays('../data/resized-512/', labels)
print(X_train.shape)
print("Saving Train Array")
save_to_array('../data/X_sample.npy', X_train)
print("Seconds: ", round(time.time() - start_time), 2) | true |
3eb8c92b8597176ec6615434f13d786ddc4cbd60 | Python | AnnMertens/Project_Boontje | /boontje/htmltags_to_corpus.py | UTF-8 | 8,144 | 3.375 | 3 | [] | no_license | """ html corpora opdelen in zinnen en woorden en deze zinnen in een lijst teruggeven als corpus"""
import nltk.data
import make_corpus
import filefunctions
from nltk.tokenize import word_tokenize
import tagging
import glob
# variabele tagger maken
#tagger_conll = tagging.tagger_conll2002('b')
tagger_alpino = tagging.tagger_alpino()
# functie die een corpus doorloopt en alles wegschrijft naar list of dicts
def corpus_to_sentences(corpus):
"""divide a corpus (this is a list of chapters) in sentences and put them in a list"""
sentences = list()
for chapter in corpus:
# dit is een list van dictionarys voor elke zin in de chapter 1.
chapter_sentences = chapter_to_sentences(chapter)
#extend ipv append gebruiken om te vermijden dat er een list of list binnen de list gemaakt wordt
sentences.extend(chapter_sentences)
return sentences
# functie om chapters in zinnen te splitsen
def chapter_to_sentences(chapter):
"""divide a chapter in sentences (list of sentence dictionaries) and put them in a list"""
sentences = list()
dict_of_title = sentence_to_dict(chapter["title"].get_text())
sentences.append(dict_of_title)
sentences.extend(part_to_sentences(chapter["paragraphs"]))
# for paragraph in chapter["paragraphs"]:
# # dit is een list van dictionaries, voor elke zin in de paragraaf 1.
# paragraph_sentences = paragraph_to_sentences(paragraph)
# # extend ipv append gebruiken om te vermijden dat er een list of list binnen de list gemaakt wordt
# sentences.extend(paragraph_sentences)
return sentences
def part_to_sentences(part):
"""devide a book part in sentences (list of sentence dictionaries) and put them in a list"""
sentences = list()
for paragraph in part:
# dit is een list van dictionaries, voor elke zin in de paragraaf 1.
paragraph_sentences = paragraph_to_sentences(paragraph)
# extend ipv append gebruiken om te vermijden dat er een list of list binnen de list gemaakt wordt
sentences.extend(paragraph_sentences)
return sentences
# functie die een paragraaf splitst in zinnen
def paragraph_to_sentences(paragraph):
"""divide a paragraph in sentences (list of sentence dictionaries) and put them in a list"""
sentences = list()
tokenizer = nltk.data.load('tokenizers/punkt/dutch.pickle')
sentences_only_text = list()
# List van strings maken van alle zinnen (string) in de paragraaf
sentences_only_text.extend(tokenizer.tokenize(paragraph.get_text()))
# Overloop de zinnen (string) uit de paragraaf
for sentence_only_text in sentences_only_text:
# Maak van elke zin een dict en voeg hem toe aan de list (sentences) van alle zin dictionaries van de paragraaf
sentences.append(sentence_to_dict(sentence_only_text))
return sentences
# functie die zin omzet in dictionary met als key "sentence"" en als value de zin en tweede key "words" en als value een list van alle woorden
def sentence_to_dict(text_only_sentence):
""" make a dict with
key "sentence", value the text_only_sentence and
key "words", value list of words and
key "tagged_alpino_words", value list of sets (word, alpino_tag)
of that text_only_sentence"""
result = dict()
result["sentence"] = text_only_sentence
result["words"] = sentence_to_list_of_words(text_only_sentence)
#result["tagged_conll_words"] = tagger_conll.tag(result["words"])
result["tagged_alpino_words"] = tagger_alpino.tag(result["words"])
return result
# functie die zin omzet in list van woorden
def sentence_to_list_of_words(sentence):
""" divide a sentence in words and put them in a list"""
list_of_words_without_punctuation = list()
list_of_words = word_tokenize(sentence)
for word in list_of_words:
word_without_punctuation = make_corpus.remove_punctation(word)
if word_without_punctuation != "":
list_of_words_without_punctuation.append(word_without_punctuation)
return list_of_words_without_punctuation
# functie die uit een corpus alle zinnen haalt en wegschrijft in een dict met keys "sentence" en "words" en values de zin en list of words
def get_sentences_from_corpora_kapellekensbaan():
"""find all the sentences in the 4 corpora and put them in a tuple with for each corpus a dict """
# filetext = filefunctions.read_file("primaire bronnen/corpusKB/x97890295680438.xhtml")
# htmltagswithcontent = filefunctions.get_tags_with_specific_classnames_from_html(filetext)
# chapters = make_corpus.divide_in_chapters(htmltagswithcontent)
# Files in juiste volgorde uitlezen.
chapters = get_chapters("primaire bronnen/corpusKB/x97890295680436.xhtml")
chapters.extend(get_chapters("primaire bronnen/corpusKB/x97890295680438.xhtml"))
chapters.extend(get_chapters("primaire bronnen/corpusKB/x978902956804310.xhtml"))
chapters.extend(get_chapters("primaire bronnen/corpusKB/x978902956804312.xhtml"))
chapters.extend(get_chapters("primaire bronnen/corpusKB/x978902956804313.xhtml", ["een onfatsoenlijk boek"]))
ondineke_list_of_chapters, reinaert_list_of_chapters, vandaag_list_of_chapters, KB_list_of_chapters = make_corpus.divide_in_corpora(chapters)
return corpus_to_sentences(ondineke_list_of_chapters), corpus_to_sentences(reinaert_list_of_chapters), corpus_to_sentences(vandaag_list_of_chapters), corpus_to_sentences(KB_list_of_chapters)
def get_sentences_from_corpus_het_verdriet_van_belgie():
"""find all the sentences in the corpus Het verdriet van België and put them in a list """
part = list()
for found_file in glob.glob("primaire bronnen/corpusHVVB/*.xhtml"):
part.extend(get_bulk_content(found_file, r"(wp\-.*)|(calibre1)|(wpv.*)", "wpo-newpage"))
return part_to_sentences(part)
def get_sentences_from_corpus_walschap():
"""find all the sentences in the corpus Walschap and put them in a list """
part = list()
for found_file in glob.glob("primaire bronnen/corpusWalschap/*.html"):
part.extend(get_bulk_content(found_file, r"(noindent)|(indent)", "booksection"))
return part_to_sentences(part)
# functie om alle stukken van het corpus samen te klutsen
def get_chapters(filename, chapter_titles_to_skip=list()):
"""get chapters and excluse chapters from specific list """
filetext = filefunctions.read_file(filename)
htmltagswithcontent = filefunctions.get_tags_with_specific_classnames_from_html(filetext)
chapters = make_corpus.divide_in_chapters(htmltagswithcontent)
# Verwijder ongewenste hoofdstukken (toevoegingen door andere schrijvers in nawoord)
for chapter in chapters:
if chapter["title"].get_text() in chapter_titles_to_skip:
chapters.remove(chapter)
return chapters
# functie om andere corpora te lezen
def get_bulk_content(filename, classNamesPattern, start_tag):
""" get content from html with specific classname"""
filetext = filefunctions.read_file(filename)
htmltags_with_content = filefunctions.get_tags_with_specific_classnames_from_html_start_element(filetext, classNamesPattern, start_tag)
tags_with_content = list()
return htmltags_with_content
# # KB
# ondineke_sentences, reinaert_sentences, vandaag_sentences, KB_sentences = get_sentences_from_corpora_kapellekensbaan()
# # Print first Sentence
# print(KB_sentences[0]["sentence"])
# # Print last Sentence
# print(KB_sentences[-1]["sentence"])
# # Lengte van de corpora
# print(str(len(ondineke_sentences)) + ' - ' + str(len(reinaert_sentences)) + ' - ' + str(len(vandaag_sentences)) + ' - ' + str(len(KB_sentences)))
# # HVVB
# het_verdriet_van_belgie_sentences = get_sentences_from_corpus_het_verdriet_van_belgie()
# print("Het verdriet van België")
# print(het_verdriet_van_belgie_sentences[5])
# print(het_verdriet_van_belgie_sentences[-1])
# print(len(het_verdriet_van_belgie_sentences))
# # Walschap
# walschap_sentences = get_sentences_from_corpus_walschap()
# print("Walschap")
# print(walschap_sentences[0])
# print(walschap_sentences[-1])
# print(len(walschap_sentences))
| true |
feac20b4d18b6d91546c7028c878c372cdd4a1ac | Python | ratalex/pyNastran | /pyNastran/bdf/utils.py | UTF-8 | 17,745 | 2.8125 | 3 | [] | no_license | """
Defines various utilities including:
- parse_patran_syntax
- parse_patran_syntax_dict
- Position
- PositionWRT
- TransformLoadWRT
"""
from __future__ import print_function, unicode_literals
from copy import deepcopy
from typing import List, Union, Dict, Tuple, Optional
import numpy as np # type: ignore
from numpy import unique, cross, dot, array # type: ignore
from pyNastran.bdf.cards.collpase_card import collapse_colon_packs
from pyNastran.bdf.bdf_interface.utils import deprecated
from pyNastran.utils.numpy_utils import integer_types
def parse_patran_syntax(node_sets, pound=None):
# type: (str, Optional[int]) -> np.ndarray
"""
Parses Patran's syntax for compressing nodes/elements
Parameters
----------
node_sets : str
the node_set to parse
pound : int / str
value : the pound value (e.g. # in 1:#, which means all)
Returns
-------
nodes : List[int]
the integer values
Patran has a short syntax of the form:
+------------+----------------------+
| String | Output |
+------------+----------------------+
|"1 2 3" | [1, 2, 3] |
+------------+----------------------+
|"5:10" | [5, 6, 7, 8, 9, 10] |
+------------+----------------------+
|"12:20:2" | [12, 14, 16, 18, 20] |
+------------+----------------------+
Examples
--------
**Example 1**
>>> node_sets = "1 2 3 5:10 12:20:2"
>>> data = parse_patran_syntax(node_sets)
>>> data
data = [1, 2, 3, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20]
**Example 2**
>>> node_sets = "1 2 3:#"
>>> data = parse_patran_syntax(node_sets, pound=10)
>>> data
data = [1, 2, 3, 5, 6, 7, 8, 9, 10]
.. warning:: Don't include the n/node or e/element or any other
identifier, just a string of "1 2 3 5:10 12:20:2".
Use parse_patran_syntax_dict to consider the identifier.
"""
assert isinstance(node_sets, str), type(node_sets)
if pound is not None:
assert isinstance(pound, (str, integer_types)), type(pound)
node_sets = node_sets.replace('#', str(pound).strip())
if len(node_sets) == 0:
return array([], dtype='int32')
snodes = node_sets.split()
nodes = [] # type: List[int]
for snode in snodes:
_apply_comma_colon_int_node(nodes, snode)
return unique(nodes)
def _apply_comma_colon_int_node(nodes, snode):
"""helper method for parse_patran_syntax"""
if ',' in snode:
comma_split_node = snode.split(',')
for comma_node in comma_split_node:
_apply_comma_colon_int_node(nodes, comma_node)
elif ':' in snode:
new_set = _apply_colon_set(snode)
nodes += new_set
else:
nodes.append(int(snode))
def _apply_colon_set(snode):
"""helper method for parse_patran_syntax"""
ssnode = snode.split(':')
if len(ssnode) == 2:
nmin = int(ssnode[0])
nmax = int(ssnode[1])
new_set = list(range(nmin, nmax + 1))
elif len(ssnode) == 3:
nmin = int(ssnode[0])
nmax = int(ssnode[1])
delta = int(ssnode[2])
nmin, nmax = min([nmin, nmax]), max([nmin, nmax])
if delta > 0:
new_set = list(range(nmin, nmax + 1, delta))
else:
new_set = list(range(nmin, nmax + 1, -delta))
else:
raise NotImplementedError(snode)
return new_set
def write_patran_syntax_dict(dict_sets):
# type: (Dict[str, np.ndarray]) -> str
"""
writes partran syntax
Parameters
----------
dict_sets : Dict[str] = List[int]
str : the key
values : the integer values for that key
Returns
-------
node_sets : str
the node_set to parse
See ``parse_patran_syntax_dict`` for explanation of usage
"""
msg = ''
for key, dict_set in sorted(dict_sets.items()):
singles, doubles = collapse_colon_packs(dict_set, thru_split=4)
double_list = ('%s:%s' % (double[0], double[2])
if len(double) == 3 else '%s:%s:%s' % (double[0], double[2], double[4])
for double in doubles)
double_str = ' '.join(double_list)
msg += '%s %s %s ' % (
key,
' '.join(str(single) for single in singles),
double_str,
)
assert '%' not in msg, msg
return msg.strip().replace(' ', ' ')
def parse_patran_syntax_dict(node_sets, pound_dict=None, msg=''):
# type: (str, Dict[str, Optional[int]], str) -> Dict[str, np.ndarray]
"""
Parses Patran's syntax for compressing nodes/elements
Parameters
----------
node_sets : str
the node_set to parse
pound_dict : List[str] : int
key : the string
value : the pound value (e.g. 1:#)
msg : str
error message; currently unused
Returns
-------
nodes : Dict[str] = List[int]
str : the key
values : the integer values for that key
Examples
--------
**Example 1**
>>> node_sets = "e 1:3 n 2:6:2 Node 10:13"
>>> data = parse_patran_syntax_dict(node_sets)
>>> data = {
'e' : [1, 2, 3],
'n' : [2, 4, 6],
'Node' : [10, 11, 12, 13],
}
**Example 2**
>>> node_sets = "e 1:3 n 2:6:2 Node 10:#"
# a pound character will be set to 20, but only for 'Node', but not
# 'n' so define it twice if needed
>>> pounds = {'Node' : 20}
>>> data = parse_patran_syntax_dict(node_sets, pounds=pounds)
>>> data = {
'e' : [1, 2, 3],
'n' : [2, 4, 6],
'Node' : [10, 11, 12, 13],
}
Notes
-----
An identifier (e.g. "e") must be used.
Use parse_patran_syntax to skip the identifier.
.. warning:: case sensitive
"""
data = {} # type: Dict[str, List[int]]
try:
snodes = node_sets.split()
except AttributeError:
print('node_sets =', node_sets, type(node_sets))
raise
except TypeError:
print('node_sets =', node_sets, type(node_sets))
raise
if pound_dict is None:
pound_dict = {}
key = None
for snode in snodes:
if ':' in snode:
ssnode = snode.split(':')
if len(ssnode) == 2:
if ssnode[0].isdigit():
nmin = int(ssnode[0])
else:
raise NotImplementedError('ssnode=%s must be int,int' % ssnode)
if ssnode[1].isdigit():
nmax = int(ssnode[1])
elif ssnode[1] == '#' and key in pound_dict:
nmax = int(pound_dict[key])
else:
raise NotImplementedError('ssnode=%s must be int,int' % ssnode)
new_set = list(range(nmin, nmax + 1))
elif len(ssnode) == 3:
if ssnode[0].isdigit():
nmin = int(ssnode[0])
else:
raise NotImplementedError('ssnode=%s must be int,int,int' % ssnode)
if ssnode[1].isdigit():
nmax = int(ssnode[1])
elif ssnode[1] == '#' and key in pound_dict:
nmax = int(pound_dict[key])
else:
raise NotImplementedError('ssnode=%s must be int,int,int' % ssnode)
delta = int(ssnode[2])
nmin, nmax = min([nmin, nmax]), max([nmin, nmax])
if delta > 0:
new_set = list(range(nmin, nmax + 1, delta))
else:
new_set = list(range(nmin, nmax + 1, -delta))
else:
raise NotImplementedError(snode)
if key is None:
msg = 'data must be of the form "Node 10:13", not "10:13"\n'
msg += 'new_set=%s' % array(new_set, dtype='int32')
raise SyntaxError(msg)
data[key] += new_set
else:
if snode.isdigit():
data[key].append(int(snode))
else:
key = snode
if key is None:
msg = 'data must be of the form "Node 10:13", not "10:13"'
raise SyntaxError(msg)
if key not in data:
data[key] = []
for key, ints in data.items():
data[key] = unique(ints)
return data
def parse_patran_syntax_dict_map(node_sets, type_map, msg=''):
# type: (str, Dict[str, str], str) -> Dict[str, np.ndarray]
"""
Parses Patran's syntax for compressing nodes/elements
Parameters
----------
node_sets : str
the node_set to parse
type_map : dict[key_in] : key_out
key_in : str
the name of the input string
key_out : str
the name of the out string
#pound_dict : List[str] : int
#key : the string
#value : the pound value (e.g. 1:#)
msg : str
error message; currently unused
Returns
-------
nodes : Dict[str] = List[int]
str : the key
values : the integer values for that key
Examples
--------
**Example 1**
.. code-block:: python
# we drop the coordinate systems because we didn't request them
# (coord is not referenced)
#
>>> node_sets = "e 1:3 n 2:6:2 Node 10:13 N 15 coord 1:10"
>>> type_map = {
'n' : 'Node',
'Node' : 'Node',
'e' : 'Element',
'Elm' : 'Element',
'Element' : 'Element',
}
**Example 2**
>>> data = parse_patran_syntax_dict(node_sets, type_map)
>>> data = {
'Element' : [1, 2, 3],
'Node' : [2, 4, 6, 10, 11, 12, 13, 15],
}
.. todo:: doesn't support pound_dict
"""
# makes it so we can pass in 'N' and 'n' and still get 'Node' out
update_type_map = {} # type: Dict[str, str]
for key, value in type_map.items():
if key in update_type_map:
assert update_type_map[key] == value
update_type_map[key.upper()] = value
dict_in = parse_patran_syntax_dict(node_sets.upper(), pound_dict=None)
dict_temp = {} # type: Dict[str, np.ndarray]
for key_in, value in sorted(dict_in.items()):
key_in2 = key_in.upper()
if key_in2 in update_type_map:
key_out = update_type_map[key_in2]
#print('key_in=%r key_out=%r' % (key_in, key_out))
if key_out in dict_temp:
dict_temp[key_out].append(value)
else:
dict_temp[key_out] = [value]
else:
print('skipping key=%r while parsing %s' % (key_in, msg))
dict_out = {} # type: Dict[str, np.ndarray]
for key, value_list in dict_temp.items():
if len(value_list) == 1:
value = value_list[0]
else:
value = np.hstack(value_list)
value.sort()
dict_out[key] = value
return dict_out
def Position(xyz, cid, model, is_cid_int=None):
"""
Gets the point in the global XYZ coordinate system.
Parameters
----------
xyz : (3,) ndarray
the position of the GRID in an arbitrary coordinate system
cid : int
the coordinate ID for xyz
model : BDF()
the BDF model object
is_cid_int : bool
is cid/cid_new an integer or a Coord object (deprecated)
Returns
-------
xyz2 : (3,) ndarray
the position of the GRID in an arbitrary coordinate system
"""
if is_cid_int is not None: # pragma: no cover
deprecated('Position(xyz, cid, model, is_cid_int=%s)' % is_cid_int,
'Position(xyz, cid, model)', '1.2',
levels=[-1])
cp_ref = _coord(model, cid)
xyz2 = cp_ref.transform_node_to_global(xyz)
return xyz2
def TransformLoadWRT(F, M, cid, cid_new, model, is_cid_int=None):
"""
Transforms a force/moment from an arbitrary coordinate system to another
coordinate system.
Parameters
----------
Fxyz : (3, ) float ndarray
the force in an arbitrary coordinate system
Mxyz : (3, ) float ndarray
the moment in an arbitrary coordinate system
cid : int
the coordinate ID for xyz
cid_new : int
the desired coordinate ID
model : BDF()
the BDF model object
is_cid_int : bool
is cid/cid_new an integer or a Coord object (deprecated)
Returns
-------
Fxyz_local : (3, ) float ndarray
the force in an arbitrary coordinate system
Mxyz_local : (3, ) float ndarray
the force in an arbitrary coordinate system
"""
if is_cid_int is not None: # pragma: no cover
deprecated('TransformLoadWRT(F, M, cid, cid_new, model, is_cid_int=%s)' % is_cid_int,
'TransformLoadWRT(F, M, cid, cid_new, model)', '1.2',
levels=[-1])
if cid == cid_new: # same coordinate system
return F, M
# find the vector r for doing:
# M = r x F
cp_ref = _coord(model, cid)
coord_to_ref = _coord(model, cid_new)
r = cp_ref.origin - coord_to_ref.origin
# change R-theta-z to xyz
Fxyz_local_1 = cp_ref.coord_to_xyz(F)
Mxyz_local_1 = cp_ref.coord_to_xyz(M)
# pGlobal = pLocal1 * beta1 + porigin1
# pGlobal = pLocal2 * beta2 + porigin2
# pLocal1 * beta1 + porigin1 = pLocal2 * beta2 + porigin2
# plocal1 * beta1 + porigin1 - porigin2 = plocal2 * beta2
# (plocal1 * beta1 + porigin1 - porigin2) * beta2.T = plocal2
#
# origin transforms only apply to nodes, so...
# Fglobal = Flocal1 * beta1
# Flocal2 = (Flocal1 * beta1) * beta2.T
Fxyz_global = dot(Fxyz_local_1, cp_ref.beta())
Fxyz_local_2 = dot(dot(Fxyz_local_1, cp_ref.beta()), coord_to_ref.beta().T)
# find the moment about the new origin due to the force
Mxyz_global = cross(r, Fxyz_global)
dMxyz_local_2 = cross(r, Fxyz_local_2)
Mxyz_local_2 = Mxyz_local_1 + dMxyz_local_2
# rotate the delta moment into the local frame
M_local = coord_to_ref.xyz_to_coord(Mxyz_local_2)
return Fxyz_local_2, Mxyz_local_2
def PositionWRT(xyz, cid, cid_new, model, is_cid_int=None):
"""
Gets the location of the GRID which started in some arbitrary system and
returns it in the desired coordinate system
Parameters
----------
xyz : (3, ) float ndarray
the position of the GRID in an arbitrary coordinate system
cid : int
the coordinate ID for xyz
cid_new : int
the desired coordinate ID
model : BDF()
the BDF model object
is_cid_int : bool
is cid/cid_new an integer or a Coord object (deprecated)
Returns
-------
xyz_local : (3, ) float ndarray
the position of the GRID in an arbitrary coordinate system
"""
if is_cid_int is not None: # pragma: no cover
deprecated('PositionWRT(xyz, cid, cid_new, model, is_cid_int=%s)' % is_cid_int,
'PositionWRT(xyz, cid, cid_new, model)', '1.2',
levels=[-1])
if cid == cid_new: # same coordinate system
return xyz
cp_ref = _coord(model, cid)
coord_to_ref = _coord(model, cid_new)
if 0: # pragma: no cover
# pGlobal = pLocal1 * beta1 + porigin1
# pGlobal = pLocal2 * beta2 + porigin2
# pLocal1 * beta1 + porigin1 = pLocal2 * beta2 + porigin2
# plocal1 * beta1 + porigin1 - porigin2 = plocal2 * beta2
# (plocal1 * beta1 + porigin1 - porigin2) * beta2.T = plocal2
# convert R-Theta-Z_1 to xyz_1
p1_local = cp_ref.coord_to_xyz(xyz)
# transform xyz_1 to xyz_2
p2_local = dot(
dot(p1_local, cp_ref.beta()) + cp_ref.origin - coord_to_ref.origin,
coord_to_ref.beta().T)
# convert xyz_2 to R-Theta-Z_2
xyz_local = coord_to_ref.xyz_to_coord(p2_local)
else:
# converting the xyz point arbitrary->global
xyz_global = cp_ref.transform_node_to_global(xyz)
# now converting it to the output coordinate system
xyz_local = coord_to_ref.transform_node_to_local(xyz_global)
return xyz_local
def split_eids_along_nids(model, eids, nids):
"""
Dissassociate a list of elements along a list of nodes.
The expected use of this function is that you have two bodies that
are incorrectly equivalenced and you would like to create duplicate
nodes at the same location and associate the new nodes with one half
of the elements.
Pick the nodes along the line and the elements along one side of the line.
Parameters
----------
model : BDF()
the BDF model
eids : list/tuple
element ids to disassociate
nids : list/tuple
node ids to disassociate
Implicitly returns model with additional nodes.
Notes
-----
xref should be set to False for this function.
"""
#assert model.xref == False, model.xref
nid = max(model.nodes.keys()) + 1
nid_map = {}
for nidi in nids:
node = model.nodes[nidi]
node2 = deepcopy(node)
node2.nid = nid
model.nodes[nid] = node2
nid_map[nidi] = nid
nid += 1
for eid in eids:
nodes = []
elem = model.elements[eid]
for nidi in elem.nodes:
if nidi in nid_map:
nodes.append(nid_map[nidi])
else:
nodes.append(nidi)
assert len(np.unique(nodes)) == len(nodes), 'nodes=%s' % nodes
elem.nodes = nodes
def _coord(model, cid):
"""helper method"""
if isinstance(cid, integer_types):
cp_ref = model.Coord(cid)
else:
cp_ref = cid
return cp_ref
| true |
3752f06271de1f722d94bfaee78b2bd9a4623ca0 | Python | ksons/ln.py | /ln/triangle.py | UTF-8 | 2,685 | 2.75 | 3 | [
"MIT"
] | permissive | from pyrr import Vector3
from .hit import Hit, NoHit
from .box import Box
from .ray import Ray
from .path import Paths
from .util import vector_min, vector_max
EPS = 1e-9
class Triangle:
def __init__(self, v1=None, v2=None, v3=None):
self.v1 = Vector3() if v1 is None else Vector3(v1)
self.v2 = Vector3() if v2 is None else Vector3(v2)
self.v3 = Vector3() if v3 is None else Vector3(v3)
self.box = None
self.update_bounding_box()
def compile(self):
pass
def bounding_box(self) -> Box:
return self.box
def update_bounding_box(self):
min = vector_min(vector_min(self.v1, self.v2), self.v3)
max = vector_max(vector_max(self.v1, self.v2), self.v3)
self.box = Box(min, max)
def contains(v: Vector3, f: float) -> bool:
return False
def intersect(self, r: Ray) -> Hit:
e1x = self.v2.x - self.v1.x
e1y = self.v2.y - self.v1.y
e1z = self.v2.z - self.v1.z
e2x = self.v3.x - self.v1.x
e2y = self.v3.y - self.v1.y
e2z = self.v3.z - self.v1.z
px = r.direction.y * e2z - r.direction.z * e2y
py = r.direction.z * e2x - r.direction.x * e2z
pz = r.direction.x * e2y - r.direction.y * e2x
det = e1x * px + e1y * py + e1z * pz
if det > -EPS and det < EPS:
return NoHit
inv = 1 / det
tx = r.origin.x - self.v1.x
ty = r.origin.y - self.v1.y
tz = r.origin.z - self.v1.z
u = (tx * px + ty * py + tz * pz) * inv
if u < 0.0 or u > 1.0:
return NoHit
qx = ty * e1z - tz * e1y
qy = tz * e1x - tx * e1z
qz = tx * e1y - ty * e1x
v = (r.direction.x * qx + r.direction.y * qy + r.direction.z * qz) * inv
if v < 0.0 or (u + v) > 1.0:
return NoHit
d = (e2x * qx + e2y * qy + e2z * qz) * inv
if d < EPS:
return NoHit
return Hit(self, d)
def paths(self) -> Paths:
return Paths([
[self.v1, self.v2],
[self.v2, self.v3],
[self.v3, self.v1]])
def show_tree(self, level):
return "%s%5.2f,%5.2f,%5.2f %5.2f,%5.2f,%5.2f %5.2f,%5.2f,%5.2f\n" % (level * ' ',
self.v1.x, self.v1.y, self.v1.z,
self.v2.x, self.v2.y, self.v2.z,
self.v3.x, self.v3.y, self.v3.z)
def __str__(self):
return "V1: {}, V2: {}, V3: {}".format(self.v1, self.v2, self.v3)
| true |
ea5bb1314fe7d97aba14114cfcd1d3964cd3608e | Python | purusoth-lw/my-work | /36.multiple Inheritance.py | UTF-8 | 1,253 | 3.859375 | 4 | [] | no_license | """
Multiple Inheritance:
=====================
many parent class
But only one child class
syntax:
=======
class Class1:
statements
class Class2:
statements
class Class3(Class1,class2):
statements
class Father:
cash1=50000
def show1(self):
print("Father cash :",self.cash1)
class Mother:
cash2=30000
def show2(self):
print("Mother Cash :",self.cash2)
class Son(Father,Mother):
cash3=1000
def show3(self):
cash=self.cash1+self.cash2+self.cash3
print("Son cash :",self.cash3)
print("Son access cash : ",cash)
f=Father()
f.show1()
print()
m=Mother()
m.show2()
print()
s=Son()
s.show3()
s.show1()
s.show2()
"""
#multiple inheritance using constructor
class Father:
cash1=50000
def __init__(self):
print("Father cash :",self.cash1)
class Mother:
cash2=30000
def __init__(self):
print("Mother Cash :",self.cash2)
class Son(Father,Mother):
cash3=1000
def __init__(self):
cash=self.cash1+self.cash2+self.cash3
print("Son cash :",self.cash3)
print("Son access cash : ",cash)
super().__init__()
#super().__init__()
s=Son()
| true |
3b90a8f957de622c93c40d604ddf3a0029eff406 | Python | francoisleroux16/MRN_Final | /testdata.py | UTF-8 | 27,016 | 2.90625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 22 18:36:26 2020
@author: Francois le Roux
This script is regarding everything relating to the test data
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
import importing
def cubic(x,val1,val2):
tck = interpolate.splrep(val1,val2)
return interpolate.splev(x,tck)
def cubic_spline(x,val1,val2):
cs = interpolate.CubicSpline(val1, val2)
xvals = cs(x)
return xvals
def make_P(data):
'''Assume correct for now'''
P1val = 1
P2val = 3
P3val = 0
P4val = 2
P5val = 4
output = np.zeros((len(data),5))
for j in range(len(data)):
P1 = data[j,P1val]
P2 = data[j,P2val]
P3 = data[j,P3val]
P4 = data[j,P4val]
P5 = data[j,P5val]
output[j,0] = P1
output[j,1] = P2
output[j,2] = P3
output[j,3] = P4
output[j,4] = P5
return output
def insane_make_P(data,a,b,c,d,e):
P1val = a
P2val = b
P3val = c
P4val = d
P5val = e
output = np.zeros((len(data),5))
for j in range(len(data)):
P1 = data[j,P1val]
P2 = data[j,P2val]
P3 = data[j,P3val]
P4 = data[j,P4val]
P5 = data[j,P5val]
output[j,0] = P1
output[j,1] = P2
output[j,2] = P3
output[j,3] = P4
output[j,4] = P5
return output
def lets_not_go_mentally_insane(d1,d2,calibration):
import itertools
import math
output = []
looplist = list(itertools.permutations([0,1,2,3,4]))
for k in looplist:
val1 = k[0]
val2 = k[1]
val3 = k[2]
val4 = k[3]
val5 = k[4]
merged_data = merge_data(insane_make_P(d1,val1,val2,val3,val4,val5),get_position_data(d2))
results= make_big_array(merged_data, calibration)
if not math.isnan(results[0,14]):
output.append(k)
return output
def get_position_data(data):
'''
Takes the raw output from labview and converts it into a [n,2] matrix
Parameters
----------
data : Raw Labview data
Raw Data.
Returns
-------
Matrix of positions in order of test.
'''
r,c = data.shape
multiply = int(c/2)
output = np.zeros((len(data)*multiply,2))
counter = 0
for k in range(multiply):
for j in range(len(data)):
Z = data[j,2*k+1]
Y = data[j,2*k]
output[counter,0] = Z
output[counter,1] = Y
counter += 1
return output
def plot_position_data(data,c):
plt.figure("Position Data")
plt.scatter(data[:,0],data[:,1],c='black',s=10)
plt.title("Coordinates at which values were measured",fontsize=28)
plt.xlabel(r"$Z$ [mm]",fontsize=22)
plt.ylabel(r"$Y$ [mm]",fontsize=22,rotation=0,labelpad=30)
return 'Done'
def merge_data(pvals,position):
output = np.zeros((len(pvals),7))
for k in range(len(pvals)):
output[k,0] = pvals[k,0] #P1
output[k,1] = pvals[k,1] #P2
output[k,2] = pvals[k,2] #P3
output[k,3] = pvals[k,3] #P4
output[k,4] = pvals[k,4] #P5
output[k,5] = position[k,0] #Z
output[k,6] = position[k,1] #Y
return output
def make_pdash(coeffs,poff):
pdash = np.zeros(len(coeffs))
counter = 0
for j in coeffs:
pdash[counter] = 9.8*(j[3]+0.15)-poff
counter += 1
return pdash
def make_ydash(pdash,coeffs,calibration_data):
def make_inter(data):
output = np.zeros((len(data),2))
for j in range(len(data)):
output[j,0] = data[j,0] #Pitch
output[j,1] = data[j,2] #Cpy
return output
counter = 0
ydash = np.zeros(len(pdash))
for j in pdash:
Cpy_point = coeffs[counter,2] #Cpy point
interpolation_point = [[j,Cpy_point]]
interpolation_input = make_inter(calibration_data)
'''Interpolate using j and Cpy to calculate ydash'''
ydash[counter] = interpolate.griddata(interpolation_input, calibration_data[:,1], interpolation_point, method='cubic')
counter += 1
return ydash
def make_p(ydash,coeffs,calibration_data):
def make_inter(data):
output = np.zeros((len(data),2))
for j in range(len(data)):
output[j,0] = data[j,1] #Yaw
output[j,1] = data[j,3] #Cpp
return output
counter = 0
pvals = np.zeros(len(ydash))
for k in ydash:
Cpp_point = coeffs[counter,3] #Cpp Point
interpolation_point = [[k,Cpp_point]]
interpolation_input = make_inter(calibration_data)
pvals[counter] = interpolate.griddata(interpolation_input, calibration_data[:,0], interpolation_point,method='cubic')
counter += 1
return pvals
def correct_p(pvals,poff):
for j in range(len(pvals)):
pvals[j] = pvals[j]-poff
return pvals
def get_Cpts(pfinal,yfinal,testdata,calibration_data):
def make_inter(p,y):
output = np.zeros((len(p),2))
for j in range(len(p)):
output[j,0] = p[j]
output[j,1] = y[j]
return output
interpoints = make_inter(pfinal, yfinal)
interpolation_input = calibration_data[:,0:2] #Pitch and Yaw
Cpts_data = interpolate.griddata(interpolation_input, calibration_data[:,6], interpoints, method='cubic')
return Cpts_data
def do_v_steps(testdataraw,positionraw,calibration_data,poff=2.5): #NEED Calibration DATA
'''
testdataraw = Labview output from five-hole probe (P1-P5)
positionraw = Position matrix from Labview
Calibration Data = Output excel from calibration.py
poff = some value (we can vary it and test the difference later)
'''
'''Step 1: Make coeffs'''
posdata = get_position_data(positionraw)
pvalues = make_P(testdataraw)
combined = merge_data(pvalues, posdata)
coeffs = new_coeffs(combined)
'''Step 2: Generate Pdash'''
pdash = make_pdash(coeffs, poff)
'''Setp 3: Generate Ydash'''
ydash = make_ydash(pdash, coeffs, calibration_data)
'''Step 4: Get Pitch Angle'''
pvals = make_p(ydash,coeffs,calibration_data)
p_angle = correct_p(pvals,poff)
'''Step 5: Get Yaw Angle'''
y_angle = make_ydash(p_angle, coeffs, calibration_data)
'''Step 6: Get Cpts '''
Cpts = get_Cpts(p_angle, y_angle, coeffs, calibration_data)
return Cpts, p_angle, y_angle
def new_coeffs(data):
'''
Takes the five Pressure values and position Z,Y.
[P1--P5,Z,Y]
Parameters
----------
data : nx5 array of Pressure vals
The pressure values recorded exlusively by the probe.
Returns
-------
A array of the Cpp and Cpy vals with the associated positions -> [Z,Y,Cpy,Cpp]
'''
def calc_Pavg(datapoint):
return (datapoint[1]+datapoint[2]+datapoint[3]+datapoint[4])/4
def calc_Cpy(datapoint):
P1 = datapoint[0]
P2 = datapoint[1]
P3 = datapoint[2]
Pavg = calc_Pavg(datapoint)
return (P3-P2)/(P1-Pavg)
def calc_Cpp(datapoint):
P1 = datapoint[0]
P4 = datapoint[3]
P5 = datapoint[4]
Pavg = calc_Pavg(datapoint)
return (P5-P4)/(P1-Pavg)
output = np.zeros((len(data),4))
for j in range(len(data)):
Cpy = calc_Cpy(data[j])
Cpp = calc_Cpp(data[j])
output[j,2] = Cpy
output[j,3] = Cpp
output[j,0] = data[j,5] #Z
output[j,1] = data[j,6] #Y
return output
def sample_new_coeffs(data):
def calc_Pavg(datapoint):
return (datapoint[3]+datapoint[4]+datapoint[5]+datapoint[6])/4
def calc_Cpy(datapoint):
P1 = datapoint[2]
P2 = datapoint[3]
P3 = datapoint[4]
Pavg = calc_Pavg(datapoint)
return (P3-P2)/(P1-Pavg)
def calc_Cpp(datapoint):
P1 = datapoint[2]
P4 = datapoint[5]
P5 = datapoint[6]
Pavg = calc_Pavg(datapoint)
return (P5-P4)/(P1-Pavg)
output = np.zeros((len(data),4))
for j in range(len(data)):
Cpy = calc_Cpy(data[j])
Cpp = calc_Cpp(data[j])
output[j,2] = Cpy
output[j,3] = Cpp
output[j,0] = data[j,0] #Z
output[j,1] = data[j,1] #Y
return output
def spatial_resolutionP2P3(data,d=0,c=30):
'''First let us do P2 and P3'''
index = np.lexsort((data[:,0],data[:,1])) # First Y and then Z
for k in range(c):
vals = np.zeros((int(len(data)/c),3))
counter = 0
for j in range(len(vals)):
vals[counter,0] = data[index[k*int(len(data)/c)+counter],0] #Z
vals[counter,1] = data[index[k*int(len(data)/c)+counter],3] #P2
vals[counter,2] = data[index[k*int(len(data)/c)+counter],4] #P3
# vals[counter,2] = data[index[k*c+counter],4] #P3
counter += 1
# print(len(vals))
# print(vals)
P2new = cubic(vals[:,0]-d,vals[:,0],vals[:,1])
P3new = cubic(vals[:,0]+d,vals[:,0],vals[:,2])
for j in range(len(P2new)):
data[index[k*int(len(data)/c)+j],3] = P2new[j]
data[index[k*int(len(data)/c)+j],4] = P3new[j]
return data
def spatial_resolutionP4P5(data,d=0,c=30):
index = np.lexsort((data[:,1],data[:,0]))
for k in range(c):
vals = np.zeros((int(len(data)/c),3))
counter = 0
for j in range(len(vals)):
vals[counter,0] = data[index[k*int(len(data)/c)+counter],1] #Y
vals[counter,1] = data[index[k*int(len(data)/c)+counter],5] #P4
vals[counter,2] = data[index[k*int(len(data)/c)+counter],6] #P5
counter += 1
P4new = cubic(vals[:,0]-d,vals[:,0],vals[:,1])
P5new = cubic(vals[:,0]+d,vals[:,0],vals[:,2])
for j in range(len(P4new)):
data[index[k*int(len(data)/c)+j],5] = P4new[j]
data[index[k*int(len(data)/c)+j],6] = P5new[j]
return data
# test_raw_data_sorted,v_const,Pd_const = importing.windtunnel_data()
# Pvals = make_P(test_raw_data_sorted)
# position_raw_data = importing.position_data()
# calibration_data = importing.coefficients()
def make_big_array(merged_data,calibration_data,a1=1,a2=3,a3=0,a4=2,a5=4,d=0,c=30):
'''
Makes a big array containing all our wanted outputs
Parameters
----------
merged_data : numpy array
The combined position and test data.
calibration_data : numpy array
Calibration Data from the Calibration.py output excel file.
d : float, optional
The diameter of the five-hole probe. The default is 0.
c : Int, optional
Makes the process of spatial correction easier - should be equal to the amount of data collected per pass. The default is 30.
Returns
-------
output : numpy array
All our wanted outputs in one massive array.
'''
output = np.zeros((len(merged_data),18))
for j in range(len(merged_data)):
output[j,0] = merged_data[j,5] #Z
output[j,1] = merged_data[j,6] #Y
'''Check order'''
output[j,2] = merged_data[j,a1] #P1
output[j,3] = merged_data[j,a2] #P2
output[j,4] = merged_data[j,a3] #P3
output[j,5] = merged_data[j,a4] #P4
output[j,6] = merged_data[j,a5] #P5
'''Apply Spatial Correction before continuing'''
'''----------------------------------------'''
output = spatial_resolutionP2P3(output,d,c)
output = spatial_resolutionP4P5(output,d,c)
'''----------------------------------------'''
for k in range(len(merged_data)):
P1 = output[k,2] #P1
P2 = output[k,3] #P2
P3 = output[k,4] #P3
P4 = output[k,5] #P4
P5 = output[k,6] #P5
Pavg = (output[k,3]+output[k,4]+output[k,5]+output[k,6])/4
output[k,7] = (output[k,3]+output[k,4]+output[k,5]+output[k,6])/4 #Pavg
'''SORT THIS OUT'''
Ps = 86819
# Ps = 65500
Pt = Ps + (10.2)*100
# Pt = Ps + (71.3)
Cpy = -(P2-P3)/(P1-Pavg)
Cpp = -(P4-P5)/(P1-Pavg)
Cpt = (Pt-Pavg)/(P1-Pavg)
Cps = (P1-Ps)/(P1-Pavg)
output[k,8] = Pt
output[k,9] = Ps
output[k,10] = Cpy
output[k,11] = Cpp
output[k,12] = Cpt
output[k,13] = Cps
coeffs = output[:,8:12]
poff = 2.5
'''Step 2: Generate Pdash'''
pdash = make_pdash(coeffs, poff)
# print(pdash.shape)
'''Setp 3: Generate Ydash'''
ydash = make_ydash(pdash, coeffs, calibration_data)
'''Step 4: Get Pitch Angle'''
pvals = make_p(ydash,coeffs,calibration_data)
p_angle = correct_p(pvals,poff)
'''Step 5: Get Yaw Angle'''
y_angle = make_ydash(p_angle, coeffs, calibration_data)
'''Step 6: Get Cpts '''
Cpts = get_Cpts(p_angle, y_angle, coeffs, calibration_data)
rho = 1.225
for k in range(len(merged_data)):
output[k,14] = Cpts[k]
output[k,15] =p_angle[k]
output[k,16] = y_angle[k]
V = np.sqrt(2*output[k,14]*np.abs(output[k,2]-output[k,7])/rho)
output[k,17] = V
return output
def sample_big_array(merged_data,calibration_data):
'''Repeated the name 'Merge Data' because replacing it would take really long -- it should actually be collected data'''
output = np.zeros((len(merged_data),18))
for j in range(len(merged_data)):
output[j,0] = merged_data[j,0] +140#Z
output[j,1] = merged_data[j,1] #Y
output[j,2] = merged_data[j,2] #P1
output[j,3] = merged_data[j,3] #P2
output[j,4] = merged_data[j,4] #P3
output[j,5] = merged_data[j,5] #P4
output[j,6] = merged_data[j,6] #P5
'''-------------------'''
output = spatial_resolutionP2P3(output,d=0,c=37)
output = spatial_resolutionP4P5(output,d=0,c=35)
'''-------------------'''
for j in range(len(merged_data)):
P1 = output[j,2]
P2 = output[j,3]
P3 = output[j,4]
P4 = output[j,5]
P5 = output[j,6]
Pavg = (P2+P3+P4+P5)/4
output[j,7] = Pavg #Pavg
Pt = merged_data[j,7]
output[j,8] = merged_data[j,7] #Pt
Ps = merged_data[j,8]
output[j,9] = merged_data[j,8] #Ps
Cpy = -(P2-P3)/(P1-Pavg)
Cpp = -(P4-P5)/(P1-Pavg)
Cpt = (Pt-Pavg)/(P1-Pavg)
Cps = (P1-Ps)/(P1-Pavg)
# Cpts = (Pt-Ps)/(P1-Pavg)
output[j,10] = Cpy
output[j,11] = Cpp
'''Review if necessary first'''
output[j,12] = Cpt
output[j,13] = Cps
# output[j,14] = Cpts
coeffs = output[:,8:12]
poff = 4.5
'''Step 2: Generate Pdash'''
pdash = make_pdash(coeffs, poff)
# print(pdash.shape)
'''Setp 3: Generate Ydash'''
ydash = make_ydash(pdash, coeffs, calibration_data)
'''Step 4: Get Pitch Angle'''
pvals = make_p(ydash,coeffs,calibration_data)
p_angle = correct_p(pvals,poff)
'''Step 5: Get Yaw Angle'''
y_angle = make_ydash(p_angle, coeffs, calibration_data)
'''Step 6: Get Cpts '''
Cpts = get_Cpts(p_angle, y_angle, coeffs, calibration_data)
rho = 1.225
for k in range(len(merged_data)):
output[k,14] = Cpts[k]
output[k,15] =p_angle[k]
output[k,16] = y_angle[k]
V = np.sqrt(2*output[k,14]*np.abs(output[k,2]-output[k,7])/rho)
output[k,17] = V
return output
def make_SAMPLE_DATA_array(merged_data,calibration_data,a1=1,a2=3,a3=0,a4=2,a5=4,d=0,c=30):
output = np.zeros((len(merged_data),18))
for j in range(len(merged_data)):
output[j,0] = merged_data[j,5] #Z
output[j,1] = merged_data[j,6] #Y
'''Check order'''
output[j,2] = merged_data[j,a1] #P1
output[j,3] = merged_data[j,a2] #P2
output[j,4] = merged_data[j,a3] #P3
output[j,5] = merged_data[j,a4] #P4
output[j,6] = merged_data[j,a5] #P5
output[j,7] = merged_data[j,7] #Pavg
output[j,8] = merged_data[j,8] #Pt
output[j,9] = merged_data[j,9] #Ps
'''Apply Spatial Correction before continuing'''
'''----------------------------------------'''
output = spatial_resolutionP2P3(output,d,c=29)
output = spatial_resolutionP4P5(output,d,c=59)
'''----------------------------------------'''
for k in range(len(merged_data)):
P1 = output[k,2] #P1
P2 = output[k,3] #P2
P3 = output[k,4] #P3
P4 = output[k,5] #P4
P5 = output[k,6] #P5
Pavg = output[k,7] #Pavg
Ps = output[k,9] #Ps
Pt = output[k,8] #Pt
Cpy = -(P2-P3)/(P1-Pavg)
Cpp = -(P4-P5)/(P1-Pavg)
Cpt = (Pt-Pavg)/(P1-Pavg)
Cps = (P1-Ps)/(P1-Pavg)
output[k,10] = Cpy
output[k,11] = Cpp
output[k,12] = Cpt
output[k,13] = Cps
coeffs = output[:,8:12]
poff = 2.5
'''Step 2: Generate Pdash'''
pdash = make_pdash(coeffs, poff)
# print(pdash.shape)
'''Setp 3: Generate Ydash'''
ydash = make_ydash(pdash, coeffs, calibration_data)
'''Step 4: Get Pitch Angle'''
pvals = make_p(ydash,coeffs,calibration_data)
p_angle = correct_p(pvals,poff)
'''Step 5: Get Yaw Angle'''
y_angle = make_ydash(p_angle, coeffs, calibration_data)
'''Step 6: Get Cpts '''
Cpts = get_Cpts(p_angle, y_angle, coeffs, calibration_data)
rho = 1.17
for k in range(len(merged_data)):
output[k,14] = Cpts[k]
output[k,15] =p_angle[k]
output[k,16] = y_angle[k]
V = np.sqrt(2*output[k,14]*np.abs(output[k,2]-output[k,7])/rho)
output[k,17] = V
return output
def do_test():
wind, pos, coeff = importing.testing()
test_raw_data_sorted = wind
position_raw_data = pos
calibration_data = coeff
return test_raw_data_sorted, position_raw_data, calibration_data
# test_raw_data_sorted, position_raw_data, calibration_data = do_test()
def use_sample_data():
collected = importing.sample_data('SAMPLE: Collected')
calibration = importing.sample_data('SAMPLE: Calibration')
return collected, calibration
def pressure_plot_for_report(pvals):
fig, ax = plt.subplots(figsize=(5,5))
c = 30
line, = ax.plot(pvals[:,0],label="P1")
xout = []
for k in range(int(len(pvals)/c)):
minval_x = pvals[c*k:c*k+c,0].argmin()
ax.scatter(k*c+minval_x,pvals[k*c+minval_x,0])
xout.append(k*c+minval_x)
ax.annotate(str(k*c+minval_x),xy=(k*c+minval_x,pvals[k*c+minval_x,0]-4),xycoords='data')
xvals = np.array(xout)
avg = []
for j in range(len(xvals)-1):
diff = xvals[j+1]- xvals[j]
avg.append(diff)
avg = np.array(avg).mean()
ax.annotate(r'Average distance between minimums, $\bar{x}_{min}$ ='+'{}'.format(avg),xy=(600,260),xycoords='data')
# plt.figure('Pressure')
# plt.plot(pvals[:,0],label="P1")
# plt.plot(pvals[:,1],label="P2")
# plt.plot(pvals[:,2],label="P3")
# plt.plot(pvals[:,3],label="P4")
# plt.plot(pvals[:,4],label="P5")
# plt.legend()
# '''Add pointers'''
# line, = ax.plot(pvals[:,1],label="P2")
# line, = ax.plot(pvals[:,2],label="P3")
# line, = ax.plot(pvals[:,3],label="P4")
# line, = ax.plot(pvals[:,4],label="P5")
return avg
def get_Velocity(Cpts,testdataraw,rho=1.225):
'''
Testdata raw will be used once again for this - use make_P to get it into the right shape
Parameters
----------
Cpts : TYPE
DESCRIPTION.
testdata : TYPE
DESCRIPTION.
rho : TYPE, optional
DESCRIPTION. The default is 1.225.
Returns
-------
output : TYPE
DESCRIPTION.
'''
pvals = make_P(testdataraw)
output = np.zeros(len(Cpts))
for j in range(len(Cpts)):
pavg = (pvals[j,1]+pvals[j,2]+pvals[j,3]+pvals[j,4])/4
output[j] = np.sqrt((2*Cpts[j]*np.abs(pvals[j,0]-pavg))/rho)
return output
def sample_get_Velocity(Cpts,collected,rho=1.225):
output = np.zeros(len(Cpts))
for j in range(len(Cpts)):
output[j] = np.sqrt((2*Cpts[j]*np.abs(collected[j,2]-collected[j,7]))/rho)
return output
def make_velocity_components(V,p,y):
Vt = np.zeros(len(V))
Vr = np.zeros(len(V))
Vz = np.zeros(len(V))
for j in range(len(V)):
Vt[j] = V[j]*np.cos(y[j])*np.cos(p[j])
Vr[j] = V[j]*np.sin(y[j])
Vz[j] = V[j]*np.cos(y[j])*np.sin(p[j])
return Vt,Vr,Vz
def get_Velocity_big(bigboy):
output = np.zeros((len(bigboy),5)) ###Seems this one might be wrong###
for j in range(len(bigboy)):
output[j,0] = bigboy[j,0] #Z
output[j,1] = bigboy[j,1] #Y
V = bigboy[j,17]
p = bigboy[j,15] * np.pi/180
y = bigboy[j,16] * np.pi/180
Vr = V*np.cos(y)*np.cos(p)
Vt = V*np.sin(y)
Vz = V*np.cos(y)*np.sin(p)
Vx = Vr*np.cos(Vt)
Vy = Vr*np.sin(Vt)
# Vz = Vz
output[j,2] = Vx
output[j,3] = Vy
output[j,4] = Vz
return output
def get_Velocity_big_alternate(bigboy):
output = np.zeros((len(bigboy),5))
for j in range(len(bigboy)):
output[j,0] = bigboy[j,0] #Z
output[j,1] = bigboy[j,1] #Y
V = bigboy[j,17]
p = bigboy[j,15] * np.pi/180
y = bigboy[j,16] * np.pi/180
Vr = V*np.cos(y)*np.cos(p)
Vt = V*np.sin(y)
Vz = V*np.cos(y)*np.sin(p)
Vx = Vr
Vy = Vt
# Vz = Vz
output[j,2] = Vx
output[j,3] = Vy
output[j,4] = Vz
return output
def get_Velocity_Jono_version(bigboy, v=37):
output = np.zeros((len(bigboy),5))
for j in range(len(bigboy)):
output[j,0] = bigboy[j,0] #Z
output[j,1] = bigboy[j,1] #Y
# V = bigboy[j,17]
p = bigboy[j,15] * np.pi/180
y = bigboy[j,16] * np.pi/180
Vr = v*np.cos(y)*np.cos(p)
Vt = v*np.sin(y)
Vz = v*np.cos(y)*np.sin(p)
Vx = Vr
Vy = Vt
# Vz = Vz
output[j,2] = Vx
output[j,3] = Vy
output[j,4] = Vz
return output
def downwash_correction(Vdata,d=3.2/1000,c=30):
output = np.zeros((len(Vdata),5))
# temp = np.zeros((len(Vdata),5))
def first_derivative(Va,Vb,Z1,Z2):
returnval = (Va-Vb)/(Z1-Z2)
return returnval
# Vz first
index = np.lexsort((Vdata[:,0],Vdata[:,1])) # First Y and then Z
counter = 0
delD = 0.2*d
for j in range(c):
for k in range(c-1):
Vcurrent = Vdata[index[j*30+k],2] #Vx current
Vnext = Vdata[index[j*30+k+1],2] #Vx next
Zcurrent = Vdata[index[j*30+k],0] #Zval
Znext = Vdata[index[j*30+k+1],0] #Znext
val = first_derivative(Vcurrent, Vnext, Zcurrent, Znext) #f'
output[index[j*30+k],4] = Vdata[index[j*30+k],4]+delD*val #Vz
output[index[j*30+k],0] = Zcurrent #Z
output[index[j*30+k],2] = Vcurrent #Vx value does not need to change
output[index[j*30+29],4] = Vdata[index[j*30+29],4] #New Vz
output[index[j*30+29],0] = Zcurrent #Z
output[index[j*30+29],2] = Vcurrent #Vx value does not need to change
index2 = np.lexsort((Vdata[:,1],Vdata[:,0])) #First Z and then Y
for l in range(c):
for s in range(c-1):
Vcurrent = Vdata[index2[l*30+s],2] #Vx current
Vnext = Vdata[index2[l*30+s+1],2] #Vx next
Ycurrent = Vdata[index2[l*30+s],1] #Current Y val
Ynext = Vdata[index2[l*30+s+1],1] #Next Y val
val = first_derivative(Vcurrent, Vnext, Ycurrent, Ynext)
output[index2[l*30+s],3] = Vdata[index2[l*30+s],3] + delD*val #New Vy
output[index2[l*30+s],1] = Ycurrent #Y
output[index2[l*30+29],3] = Vdata[index2[l*30+29],3]
output[index2[l*30+29],1] = Ycurrent #Y
return output
def use_seaborn(vold,vnew):
import seaborn as sns
Vz_diff = np.zeros(len(vold))
Vy_diff = np.zeros(len(vold))
for j in range(len(vold)):
vyu = vold[j,3]
vy = vnew[j,3]
Vy_diff[j] = vy-vyu #not using abs
vzu = vold[j,4]
vz = vnew[j,4]
Vz_diff[j] = vz-vzu
fig = plt.figure("Vy")
sns.distplot(Vy_diff, label=r"$V_y$")
plt.title(r"Distribution of the changes between original and downwash corrected $V_y$",fontsize=28)
plt.xlabel("Value Difference",fontsize=26)
plt.ylabel("Frequency",fontsize=26)
plt.legend(fontsize=22)
fig = plt.figure("Vz")
sns.distplot(Vz_diff, label=r"$V_z$")
plt.title(r"Distribution of the changes between original and downwash corrected $V_z$",fontsize=28)
plt.xlabel("Value Difference",fontsize=26)
plt.ylabel("Frequency",fontsize=26)
plt.legend(fontsize=22)
return None
def do_all(merged_data,calibration_data):
bigboy = make_big_array(merged_data, calibration_data)
V_data = get_Velocity_big(bigboy)
return bigboy, V_data
def show_V(Vin):
'''
Parameters
----------
Vin : Numpy Array
Z,Y,V1,V2,V3.
Returns
-------
None - only makes a plot of the data
'''
Z = Vin[:,0]
Y = Vin[:,1]
# V1 = Vin[:,2]
u = Vin[:,2]-39.0
v = Vin[:,3]
w = Vin[:,4]
# V2 = Vin[:,3]
# V3 = Vin[:,4]
fig = plt.figure()
plt.quiver(Z, Y,w,v,scale_units='xy')
plt.show()
return None
def try_3D_plot(Vin):
from mpl_toolkits.mplot3d import Axes3D
X = np.zeros(len(Vin))
for k in range(len(Vin)):
X[k] = 1
Y = Vin[:,1]
Z = Vin[:,0]
u = Vin[:,2]
for j in range(len(u)):
u[j] = u[j] - 37
v = Vin[:,3]
w = Vin[:,4]
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.quiver(X,Y,Z,u,v,w)
plt.show()
return None
def basic_quiver(data):
'''
PLots a simple quiver plot
Parameters
----------
data : Array
In the form: Z, Y, Vx, Vy, Vz.
Returns
-------
None.
'''
plt.figure("Quiver PLot")
Z = data[:,0]
Y = data[:,1]
u = data[:,2] #Vx
v = data[:,3] #Vy
w = data[:,4] #Vz
plt.quiver(Z,Y,w,v,scale_units='xy',linewidth=0.00002, width=0.0008)
plt.figure('Vx')
plt.plot(u,label="Vx")
plt.legend()
plt.show() | true |
67a941b7ff7fe9c9786de722aeb5d7305739c608 | Python | lawiet019/foodie | /foodieProject/users/utils.py | UTF-8 | 7,115 | 2.859375 | 3 | [] | no_license | import random
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from string import ascii_letters,digits
from .models import UserProfile
from django.core.mail import send_mail
from django.conf import settings
import configparser
from datetime import datetime
from .models import EmailVerifyRecord
import jwt
from datetime import datetime,timedelta
_letter_cases = "abcdefghjkmnpqrstuvwxy" # 小写字母,去除可能干扰的i,l,o,z
_upper_cases = _letter_cases.upper() # uppercase alpha
_numbers = ''.join(map(str, range(3, 10))) # number
init_chars = ''.join((_letter_cases, _upper_cases, _numbers))
def get_chars(chars,length):
return random.sample(chars, length)
def create_validate_code(size=(120, 50),
chars=init_chars,
img_type="GIF",
mode="RGB",
bg_color=(255, 255, 255),
fg_color=(0, 0, 255),
font_size=18,
font_type="static/Fonts/Arvo-Regular.ttf",
length=4,
draw_lines=True,
n_line=(1, 2),
draw_points=True,
point_chance=2):
"""
@todo: 生成验证码图片
@param size: 图片的大小,格式(宽,高),默认为(120, 30)
@param chars: 允许的字符集合,格式字符串
@param img_type: 图片保存的格式,默认为GIF,可选的为GIF,JPEG,TIFF,PNG
@param mode: 图片模式,默认为RGB
@param bg_color: 背景颜色,默认为白色
@param fg_color: 前景色,验证码字符颜色,默认为蓝色#0000FF
@param font_size: 验证码字体大小
@param font_type: 验证码字体,默认为 ae_AlArabiya.ttf
@param length: 验证码字符个数
@param draw_lines: 是否划干扰线
@param n_lines: 干扰线的条数范围,格式元组,默认为(1, 2),只有draw_lines为True时有效
@param draw_points: 是否画干扰点
@param point_chance: 干扰点出现的概率,大小范围[0, 100]
@return: [0]: PIL Image实例
@return: [1]: 验证码图片中的字符串
"""
width, height = size # 宽高
# 创建图形
img = Image.new(mode, size, bg_color)
draw = ImageDraw.Draw(img) # 创建画笔
def create_lines():
"""绘制干扰线"""
line_num = random.randint(*n_line) # 干扰线条数
for i in range(line_num):
# 起始点
begin = (random.randint(0, size[0]), random.randint(0, size[1]))
# 结束点
end = (random.randint(0, size[0]), random.randint(0, size[1]))
draw.line([begin, end], fill=(0, 0, 0))
def create_points():
"""绘制干扰点"""
chance = min(100, max(0, int(point_chance))) # 大小限制在[0, 100]
for w in range(width):
for h in range(height):
tmp = random.randint(0, 100)
if tmp > 100 - chance:
draw.point((w, h), fill=(0, 0, 0))
def create_strs():
"""绘制验证码字符"""
c_chars = get_chars(chars,length)
strs = ' %s ' % ' '.join(c_chars) # 每个字符前后以空格隔开
font = ImageFont.truetype(font_type, font_size)
font_width, font_height = font.getsize(strs)
draw.text(((width - font_width) / 3, (height - font_height) / 3),
strs, font=font, fill=fg_color)
return ''.join(c_chars)
if draw_lines:
create_lines()
if draw_points:
create_points()
strs = create_strs()
# 图形扭曲参数
params = [1 - float(random.randint(1, 2)) / 100,
0,
0,
0,
1 - float(random.randint(1, 10)) / 100,
float(random.randint(1, 2)) / 500,
0.001,
float(random.randint(1, 2)) / 500
]
img = img.transform(size, Image.PERSPECTIVE, params) # 创建扭
img = img.filter(ImageFilter.EDGE_ENHANCE_MORE) # 滤镜,边界加强(阈值更大)
return img, strs
def code_to_lower(code):
res = ""
for i in code:
res += code.lower()
return res
# 发送注册邮件
def send_register_email(email, send_type):
# 发送之前先保存到数据库,到时候查询链接是否存在
# 实例化一个EmailVerifyRecord对象
if not EmailVerifyRecord.objects.filter(email = email,sendType = send_type).exists():
email_record = EmailVerifyRecord.objects.create(email = email,sendType = send_type)
# 生成随机的code放入链接
while True:
code = "".join(get_chars(init_chars,16))
if not EmailVerifyRecord.objects.filter(code=code).exists():
break
EmailVerifyRecord.objects.filter(email = email,sendType = send_type).update(code = code,sendTime = datetime.now())
EMAIL_FROM = settings.EMAIL_HOST_USER
# 定义邮件内容:
email_title = ""
email_body = ""
if send_type == "register":
email_title = "foodie - activate your account"
print(code)
email_body = "Please click the link below to activate your account: http://127.0.0.1:3000/users/activation/{0}".format(code)
# 使用Django内置函数完成邮件发送。四个参数:主题,邮件内容,发件人邮箱地址,收件人(是一个字符串列表)
send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])
# 如果发送成功
if send_status:
return True
if send_type == "forget":
email_title = "foodie- retrieve password"
email_body = "Please click the link below to retrieve your password: http://127.0.0.1:4000/reset/{0}".format(code)
send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])
# 如果发送成功
if send_status:
return True
# 0 verified
# 1 expired
# 2 invalid
# 3, version id changed, become invalid
def authentication(token):
try:
v = jwt.decode(token,settings.SECRET_KEY,algorithms=["HS256"])
except jwt.ExpiredSignatureError:
return 1,None,None
except:
return 2,None,None
# we need to verify the version is latest
email = v["data"]["email"]
if UserProfile.objects.filter(email = email).exists():
user = UserProfile.objects.filter(email = email)
if user.versionID == v.data.versionID:
return 0,user,v
return 3,None,None
# generate the token
#type -1 access token
#type -2 refresh token
def generate_token(user, type):
if type ==1:
token =jwt.encode({'exp': datetime.now() + timedelta(hours=2),'data':{'email':user.email,'version':user.versionID,"type":1}},settings.SECRET_KEY,algorithm='HS256')
else:
token =jwt.encode({'exp': datetime.now() + timedelta(days=30),'data':{'email':user.email,'version':user.versionID,"type":2}},settings.SECRET_KEY,algorithm='HS256')
return token
| true |
0e0888d583a11f2bb48108675c7f1dcf40ca6835 | Python | Lodewic/DuPont-hackathon | /src/enzyme_hackathon/utils.py | UTF-8 | 5,358 | 2.9375 | 3 | [] | no_license | import numpy as np
import tensorflow as tf
import pandas as pd
from keras import backend as K
from keras import Model
from keras.layers import Input, Dense, Flatten, Dropout, BatchNormalization
from keras.optimizers import Adam
from keras.regularizers import l2
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
from collections import OrderedDict
import random
AA_LABELS = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S',
'T', 'V', 'W', 'Y']
def one_hot_dict(labels: list = None) -> dict:
labels = AA_LABELS if labels is None else labels
encoding = (LabelEncoder()
.fit_transform(labels)
.reshape(len(labels), 1))
one_hot = OneHotEncoder(sparse=False, categories=[range(len(labels))]).fit_transform(encoding)
return dict(zip(labels, one_hot))
def one_hot_encode(sequence: list or str, encoding: dict, max_len: int = None) -> np.array:
one_hot_sequence = np.array([encoding[element] for element in sequence])
if max_len is not None:
padding = np.zeros(shape=(max_len - len(sequence), len(encoding[sequence[0]])))
one_hot_sequence = np.concatenate([one_hot_sequence, padding])
return one_hot_sequence
def one_hot_encode_sequences(sequences: list, encoding: dict) -> np.array:
if len(set(len(seq) for seq in sequences)) > 1:
raise ValueError('All sequences must have the same length!')
return np.array([one_hot_encode(seq, encoding) for seq in sequences])
def one_hot_encode_screening_data(data: pd.DataFrame, seed: int = 123):
"""Prepares the train and test data used.
:return: train and test data and values
"""
one_hot_encoding = one_hot_dict()
sequences = one_hot_encode_sequences(data['sequence'].values, one_hot_encoding)
components = dict(x=train_test_split(sequences, random_state=seed), y=OrderedDict())
for out in ['productivity', 'performance', 'stability']:
values = data[out].values
values = values.reshape(len(values), 1)
mean, std = np.nanmean(values), np.nanstd(values)
components['y'][out] = dict()
components['y'][out]['data'] = train_test_split((values - mean) / std, random_state=seed)
components['y'][out]['norm'] = mean, std
return components
def nan_mse(y_true, y_pred):
"""Custom loss function which computes the mean squared error ignoring nan values.
:param y_true: tf tensor containing the true response values
:param y_pred: tf tensor containing the predicted response values
:return: mean squared error for those values in which y_true is not nan
"""
nan_mask = tf.logical_not(tf.is_nan(y_true))
n_valid = K.sum(K.cast(nan_mask, tf.float32))
zeros = K.zeros_like(y_true)
y_true = tf.where(nan_mask, y_true, zeros)
y_pred = tf.where(nan_mask, y_pred, zeros)
return tf.divide(K.sum(K.square(y_true - y_pred)), n_valid)
def r_squared(y_true, y_pred):
"""Custom metric for keras. Computes R^2 (1 - (RSS/TSS))."""
nan_mask = tf.logical_not(tf.is_nan(y_true))
zeros = K.zeros_like(y_true)
y_true = tf.where(nan_mask, y_true, zeros)
y_pred = tf.where(nan_mask, y_pred, zeros)
ss_res = K.sum(K.square(y_true - y_pred), axis=0) # Compute residuals
ss_tot = K.sum(K.square(y_true - K.mean(y_true, axis=0)), axis=0) # Compute total sum squares
r2 = 1 - ss_res / (ss_tot + K.epsilon())
# If residuals > TSS the result will be negative, make those values 0
neg_indices = K.less_equal(r2, 0)
zeros = tf.zeros(shape=tf.shape(r2), dtype=tf.float32)
r2 = tf.where(neg_indices, zeros, r2)
return r2 # In multitask cases, it will return the average r2 for each of the tasks
def create_fc_model(data: dict):
"""Create a multi-task fully connected neural network model
:param data:
:return:
"""
sequence_input = Input(shape=data['x'][0][0].shape, name='sequence')
sequence = Flatten()(sequence_input)
sequence = Dense(50, activation='relu', kernel_regularizer=l2(1.1))(sequence)
sequence = Dropout(.1)(sequence)
outputs = []
for task, _ in data['y'].items():
t = Dense(20, activation='relu', kernel_regularizer=l2(1.1))(sequence)
t = Dropout(.1)(t)
t = BatchNormalization()(t)
outputs.append(Dense(1, name=task)(t))
model = Model(inputs=[sequence_input], outputs=outputs)
model.compile(loss=nan_mse, optimizer=Adam(lr=.0001), metrics=[r_squared])
return model
def predict(variant, model, data, one_hot_encoding):
oh_variant = one_hot_encode_sequences([variant], one_hot_encoding)
predicted = model.predict(oh_variant)
return np.squeeze(np.array([(p * d['norm'][1]) + d['norm'][0]
for p, (_, d) in zip(predicted, data['y'].items())]))
def generate_variant(reference: str, model: Model, data: dict, alt_residues: list) -> pd.Series:
one_hot_encoding = one_hot_dict()
variant = list(reference)
n_mutations = random.randint(2, len(reference) // 15)
mutate_at = random.choices(list(range(len(reference))), k=n_mutations)
for pos in mutate_at:
variant[pos] = random.choice(alt_residues)
return (variant, pd.Series(predict(variant, model, data, one_hot_encoding), index=list(data['y'])))
| true |
370f3d6f2dec1fb1d4ff280930b831930b079b1e | Python | nileshmahale03/Python | /Python/5 Tuples.py | UTF-8 | 1,084 | 4.3125 | 4 | [
"MIT"
] | permissive |
#Tuple
#ordered
#indexed
#Immutable
#Faster than list
t = ("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
print(t)
print(type(t))
#t.append(20) AttributeError: 'tuple' object has no attribute 'append'
print(t[2])
print(t[-1])
print(t[1:3])
print(len(t))
print(t.count("Monday"))
print(t.index("Friday"))
#Problem:
"""
Given a tuple A , find if all elements of tuple are different or not.
Example 1:
Input:
A = (1, 2, 3, 4, 5, 4)
Output:
Not Distinct
Example 2:
Input:
A = (1, 2, 3, 4, 5)
Output:
Distinct
"""
A = (1, 2, 3, 4)
s = set(A)
if len(A) != len(s):
output = "Not Distinct"
elif len(A) == len(s):
output = "Distinct"
print(output)
print("#######################################################")
#Problem:
"""
Given a tuple A with distinct elements and an integer X, find the index position of X. Assume to have X in the tuple always.
Example 1:
Input:
A = (1, 2, 3, 4, 5)
X = 3
Output:
2
Example 2:
Input:
A = (3, 2, 1, 5, 4)
X = 5
Output:
3
"""
A = (1, 2, 3, 4, 5)
X = 3
for i in range(len(A)):
if A[i] == X:
print(i)
| true |
88f0c96ae54c8810920b11236cc288b05059c353 | Python | dantin/daylight | /dcp/002/solution.py | UTF-8 | 1,594 | 3.96875 | 4 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
class Solution():
"""Algorithm:
1. Construct `left`, with `left[i]` contains product of all elements on `left` of `nums[i]` excluding
`nums[i]`
2. Construct `right`, with `right[i]` contains product of all elements on `right` of `nums[i]`
excluding `nums[i]`
3. return multiply of `left` and `right`
"""
def crack(self, nums):
"""
Time Complexity: O(n)
Space Complexity: O(n)
Auxiliary Space: O(1)
"""
prod = [1 for _ in range(len(nums))]
tmp = 1
for i in range(len(nums)):
prod[i] = tmp
tmp *= nums[i]
tmp = 1
for i in reversed(range(len(nums))):
prod[i] *= tmp
tmp *= nums[i]
return prod
def crack2(self, nums):
"""
Time Complexity: O(n)
Space Complexity: O(n)
Auxiliary Space: O(n)
"""
left, right = [1 for _ in range(len(nums))], [1 for _ in range(len(nums))]
for i in range(1, len(nums)):
left[i] = nums[i - 1] * left[i - 1]
for i in reversed(range(0, len(nums) - 1)):
right[i] = nums[i + 1] * right[i + 1]
return [lhs * rhs for (lhs, rhs) in zip(left, right)]
if __name__ == '__main__':
inputs = [
dict(nums=[1, 2, 3, 4, 5] ),
dict(nums=[3, 2, 1]),
]
for data in inputs:
print()
nums = data['nums']
solver = Solution()
print(' Input:', nums)
answer = solver.crack(nums)
print(' Output:', answer)
| true |
43e9e072be0561a97cd91948a3b4430e95d25ed9 | Python | starbt/pic_link | /pic_link.py | UTF-8 | 2,593 | 2.71875 | 3 | [] | no_license | from __future__ import division
from PIL import Image
import numpy
import numexpr
import os
import os.path
import random
path = r'/home/xcv/learning_python/scrapy/picture'
bigPhoto = r'/home/xcv/learning_python/scrapy/big.jpg'
aval = []
W_num = 25
H_num = 25
W_size = 360
H_size = 640
alpha = 0.3
#获得所有照片信息
def getAllPhotos():
for parent, dirnames, filenames in os.walk(path):
for filename in filenames:
endName = os.path.splitext(filename)[-1]
if endName == '.jpg' or endName == '.png':
aval.append(os.path.join(parent, filename))
#将照片转为一样的大小
def transfer(img_path, dst_width, dst_height):
im = Image.open(img_path)
if im.mode != 'RGBA':
im = im.convert('RGBA')
s_w, s_h = im.size
if s_w > s_h:
im = im.rotate(90)
resized_img = im.resize((dst_width, dst_height), Image.ANTIALIAS)
resized_img = resized_img.crop((0, 0, dst_width, dst_height))
return resized_img
#照片拼接
def link_pics():
iW_size = W_num * W_size
iH_size = H_num * H_size
I = numpy.array(transfer(bigPhoto, iW_size, iH_size))
#I = numexpr.evaluate("""I*(1-alpha)""")
for i in range(W_num):
for j in range(H_num):
temp = I[(j*H_size):((j+1)*H_size), (i*W_size):((i+1)*W_size)]
res = numexpr.evaluate("""temp*(1-alpha)""")
I[(j*H_size):((j+1)*H_size), (i*W_size):((i+1)*W_size)] = res
for i in range(W_num):
for j in range(H_num):
SH = I[(j*H_size):((j+1)*H_size), (i*W_size):((i+1)*W_size)]
DA = transfer(random.choice(aval), W_size, H_size)
res = numexpr.evaluate("""SH+DA*alpha""")
I[(j*H_size):((j+1)*H_size), (i*W_size):((i+1)*W_size)] = res
img = Image.fromarray(I.astype(numpy.uint8))
img = img.point(lambda i : i * 1.5)
img.save('new_image.jpg')
#第二种算法,这里会发生内存溢出的错误,可能运算量太大
def link_pics2():
iW_size = W_num * W_size
iH_size = H_num * H_size
I = numpy.array(transfer(bigPhoto, iW_size, iH_size)) * 1.0
for i in range(W_num):
for j in range(H_num):
s = random.choice(aval)
res = I[ j*H_size:(j+1)*H_size, i*W_size:(i+1)*W_size] * numpy.array(transfer(s, W_size, H_size))/255
I[ j*H_size:(j+1)*H_size, i*W_size:(i+1)*W_size] = res
img = Image.fromarray(I.astype(numpy.uint8))
img = img.point(lambda i : i * 1.5)
img.save("createNevImg_past.jpg")
if __name__ == '__main__':
getAllPhotos()
link_pics()
| true |
520cc3699a1fc2f9f7252fa0728c936382f90cf5 | Python | zsmountain/lintcode | /python/stack_queue_hash_heap/545_top_k_largest_numbers_ii.py | UTF-8 | 2,502 | 4.25 | 4 | [] | no_license | '''
Implement a data structure, provide two interfaces:
add(number). Add a new number in the data structure.
topk(). Return the top k largest numbers in this data structure. k is given when we create the data structure.
Have you met this question in a real interview?
Example
s = new Solution(3);
>> create a new data structure.
s.add(3)
s.add(10)
s.topk()
>> return [10, 3]
s.add(1000)
s.add(-99)
s.topk()
>> return [1000, 10, 3]
s.add(4)
s.topk()
>> return [1000, 10, 4]
s.add(100)
s.topk()
>> return [1000, 100, 10]
'''
class Solution:
"""
@param: k: An integer
"""
def __init__(self, k):
# do intialization if necessary
self.k = k
self.data = [0 for _ in range(k + 2)]
"""
@param: num: Number to be added
@return: nothing
"""
def add(self, num):
# write your code here
self.data[0] += 1
new_pos = self.data[0]
self.data[new_pos] = num
parent_pos = new_pos // 2
while parent_pos:
if self.data[parent_pos] > self.data[new_pos]:
self.data[parent_pos], self.data[new_pos] = self.data[new_pos], self.data[parent_pos]
else:
break
parent_pos = parent_pos // 2
new_pos = new_pos // 2
if self.data[0] == self.k + 1:
self.pop()
def pop(self):
if self.data[0] == 0:
raise Exception('pop from empty')
res = self.data[1]
self.data[self.data[0]], self.data[1] = self.data[1], self.data[self.data[0]]
pos = 1
self.data[0] -= 1
while self.data[0] >= pos * 2:
min_index = pos
left = pos * 2
right = pos * 2 + 1
if self.data[0] >= left and self.data[left] < self.data[min_index]:
min_index = left
if self.data[0] >= right and self.data[right] < self.data[min_index]:
min_index = right
if min_index == pos:
break
self.data[min_index], self.data[pos] = self.data[pos], self.data[min_index]
pos = min_index
return res
"""
@return: Top k element
"""
def topk(self):
# write your code here
return sorted(self.data[1:self.data[0] + 1], reverse=True)
s = Solution(3)
s.add(3)
s.add(10)
print(s.topk()) # [10, 3]
s.add(1000)
s.add(-99)
print(s.topk()) #[1000, 10, 3]
s.add(4)
print(s.topk()) # [1000, 10, 4]
s.add(100)
print(s.topk()) #[1000, 100, 10]
| true |
493ed3820055157e7531dfba8d6fbcf537f812dc | Python | cRAN-cg/Competitive | /code/python/basic/a_very_big_sum.py | UTF-8 | 141 | 3.09375 | 3 | [] | no_license | #!/usr/bin/python3 env
import sys
n = int(input().strip())
arr = [int(arr_vals) for arr_vals in input().strip().split(" ")]
print(sum(arr)) | true |
817a54ec999d608bc04b607a1f00ff58db3dc290 | Python | ylashin/deep-learning-workshop | /Sample10/TrainLocalModel.py | UTF-8 | 916 | 3.09375 | 3 | [] | no_license | import pickle
import os
import pandas
import numpy as np
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
# create the outputs folder
os.makedirs('./outputs', exist_ok=True)
# load input dataset from a DataPrep package as a pandas DataFrame
inputDf = pandas.read_csv('data.csv')
# load features and labels
X = inputDf[['Attr1', 'Attr2']].values
Y = inputDf['Identity'].values
# split data 70%-30% into training set and test set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=0)
# train a support vector machine model on the training set
model = SVC()
model.fit(X_train, Y_train)
print (model)
# evaluate the test set
accuracy = model.score(X_test, Y_test)
print ("Accuracy is {}".format(accuracy))
# serialize the model on disk
print ("Export the model to model.pkl")
f = open('./outputs/model.pkl', 'wb')
pickle.dump(model, f)
f.close() | true |
c0694a38e64600b75757cebec9d79df815fdaf7b | Python | deggs7/py-practice | /sweet/attr_pass.py | UTF-8 | 807 | 3.1875 | 3 | [
"Unlicense"
] | permissive |
class Test(object):
name = 'origin'
t = Test()
all = [ 'a',
1,
t,
['a', 'b', 'c'],
('x', 'y', 'z'),
{
'name': 'abc',
'desc': 'xyc'
}
]
def change(n):
print id(n)
if type(n) == str:
n = 'b'
elif type(n) == int:
n = 2
elif type(n) == list:
n.append('d')
elif type(n) == tuple:
n = ('b', 'c', 'd')
elif type(n) == dict:
n['update'] = 'ok'
elif type(n) == Test:
n.name = 'okokok'
print id(n)
for x in all:
if type(x) == Test:
print x.name
else:
print x
change(x)
print '-----------function------------'
if type(x) == Test:
print x.name
else:
print x
print '==============================='
| true |
86c4b48c5cc516f538a3b442bfdba6df5f3e64ac | Python | kennethnym/covid19-alarm | /server/routes/alarms/daily_brief.py | UTF-8 | 2,582 | 3.25 | 3 | [
"MIT"
] | permissive | """
This module handles daily brief processing.
"""
import logging
import datetime
from typing import Dict, Any
import pyttsx3
from server.api.weather import fetch_weather
from server.api.news import fetch_news_headlines
from server.api.covid import fetch_covid_data
__speech_engine = pyttsx3.init()
def daily_brief(alarm_info: Dict[str, Any]):
"""
Gives the user a brief of the current weather, the top news, and the local covid infection rate.
"""
logging.info("Daily brief initiated on %s.", datetime.datetime.now())
alarm_title = alarm_info["title"]
brief_message = f"""
Hello! This is a scheduled daily brief titled: {alarm_title}.
{__covid_brief()}
{__weather_brief() if alarm_info["include_weather"] else ""}
{__news_brief() if alarm_info["include_news"] else ""}
This is the end of your briefing. Have a nice day.
"""
logging.info("brief message: %s", brief_message)
__speech_engine.say(brief_message)
__speech_engine.runAndWait()
def __covid_brief() -> str:
"""
Generates a brief message about the latest covid19 data.
"""
try:
(
is_latest_covid_data_available,
_,
new_cases,
cumulative_cases,
new_deaths,
cumulative_deaths,
) = fetch_covid_data()
return f"""
First, some Covid-19 update.
{'Today, ' if is_latest_covid_data_available else 'Latest data is not available, so previous data will be recapped.'}
In England, there are {new_cases} number of new cases,
and unfortunately {new_deaths} people lost their battle against Covid19.
In total, there are {cumulative_cases} number of cases,
and {cumulative_deaths} lives are lost in this pandemic.
"""
except:
return "Unfortunately an error occurred when getting latest covid data."
def __weather_brief() -> str:
"""
Generates a brief message about the current weather.
"""
weather = fetch_weather(lat=50.718410, long=-3.533899)
return f"""
Currently in your location, expect {weather['weather'][0]['description']}.
The temperature is {weather['main']['temp']}.
"""
def __news_brief() -> str:
"""
Generates a brief message of latest news headlines.
"""
news = fetch_news_headlines(country="gb")
if len(news) == 0:
return "There are no top news for you right now."
return "Also, here are some top news headlines for you.\n" + "\n".join(
[f"{article['title']}; " for article in news]
)
| true |
c62cfb446a03c57dc1235e36000b298661b6ed1c | Python | DanishKhan14/DumbCoder | /Python/strings/shortestPalind.py | UTF-8 | 697 | 4.15625 | 4 | [] | no_license | """
Given a string S, you are allowed to convert it to a palindrome by adding characters in front of it.
Find and return the shortest palindrome you can find by performing this transformation
"""
# [TLE] Method 1: Brute Force. Keep adding one char at a time in rev order
class Solution(object):
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
l = len(s)
new = s
for i in range(l):
new = s[l-i:l][::-1] + s
if self.isPalindrome(new):
return new
return new
def isPalindrome(self, a):
return a == a[::-1]
A = Solution()
print A.shortestPalindrome("aabbc")
| true |
8ba4095cd60eeb07d9b9db3a925250dede5a5f0b | Python | ad54/aws_rest_api | /rest_lambda_function.py | UTF-8 | 2,925 | 3.234375 | 3 | [] | no_license | """This is the lambda function for rest api , which will provide data from dynamo db.
You need to pass the name of the sport and team. It will provide recent reords of the team.
If the searched sport is not availble, it will list all the available, the same for the searched team.
"""
import boto3
import json
from boto3.dynamodb.conditions import Key, Attr
# connection to dynamo table
dynamodb = boto3.resource('dynamodb')
# mention the table name
table_1 = dynamodb.Table()
def get_sports():
# get all the sports available
get_sports_data = table_1.scan()
sports_data = set()
for item in get_sports_data['Items']:
sports_data.add(item.get('sport'))
msg = f"please enter one of sports : {','.join(sports_data)}"
return msg
def get_teams(sport):
# get the teams of perticular sport
get_teams_data = table_1.scan(FilterExpression=Attr('sport').eq(str(sport)))
if get_teams_data['Items']:
teams = set()
for item in get_teams_data['Items']:
teams.add(item.get('team'))
msg = f"please enter one of teams for sport : {sport} : {','.join(teams)}"
else:
# if no teams found it will call get sports function :this scenario is like invalid team search
msg = get_sports()
return msg
def get_data(sport='', team=''):
table_1 = dynamodb.Table('sportsData')
if not sport:
msg = ("please eneter sports")
return {"msg": msg}
# if team is not given it will list all the teams for that sport
if sport and (not (team)):
msg = get_teams(sport)
return {"msg": msg}
response1 = table_1.scan(FilterExpression=Attr('sport').eq(str(sport)) & Attr('team').eq(str(team)))
if not response1['Items']:
msg = get_teams(sport)
return {"msg": msg}
else:
# on successfull request list results and generate output
msg_list = list()
for item in response1['Items']:
# print(item)
if (int(item.get('team_score', ''))) > (int(item.get('op_team_score', ''))):
status = 'beat'
else:
status = 'lose'
msg = f"{item.get('team')} {status} {item.get('op_team', '')} {item.get('team_score', '')}-{item.get('op_team_score', '')} on {item.get('date', '')}"
msg_list.append(msg)
return {"msg": (msg_list)}
def lambda_handler(event, context):
# get paramaters from the request
sport = event['queryStringParameters'].get('sport','')
team = event['queryStringParameters'].get('team','')
# call get data function
messgae = get_data(sport=sport, team=team)
# Construct http response object
responseObject = {}
responseObject['statusCode'] = 200
responseObject['headers'] = {}
responseObject['headers']['Content-Type'] = 'application/json'
responseObject['body'] = json.dumps(messgae)
return responseObject
| true |
19044ffe2c5b2cc6f609dd9f3ea48962e9ff7562 | Python | ace12358/100knock | /test65_2.py | UTF-8 | 526 | 2.609375 | 3 | [] | no_license | #coding:utf-8
import sys,os
noun100 = []
for line in open(sys.argv[1]):
noun100.append(line.strip().split()[0])
files = os.listdir('/Users/kitagawayoshiaki/Dropbox/100knock/my100knock/work_dir_n') #指定したパスのディレクトリ内のファイルをリストとして返す
for file in files:
for line in open("work_dir_n/"+file):
list = line.strip().split("\t")
noun = list[0]
if noun in noun100:
dst = list[1]
srcs = list[2:]
print noun +"->"+ dst
for src in srcs:
print noun +"<-"+ src
| true |
f08faf6b04c2fcc85178bf2161cb32b523d83b73 | Python | fzero17/college_wish | /src/test.py | UTF-8 | 523 | 2.75 | 3 | [] | no_license | import sqlite3
db_file = './utils/database/de/2016.db'
conn = sqlite3.connect(db_file)
conn.text_factory = str
c = conn.cursor()
res = c.execute("select name from sqlite_master where type='table' order by name;")
table_list = res.fetchall()
COUNT = 0
for table_name in table_list:
cursor = c.execute("SELECT count(*) FROM "+table_name[0])
res = cursor.fetchall()
print(table_name[0]," : ", res[0][0], '条')
COUNT += res[0][0]
print("共计: ", COUNT, '条')
# C += cursor.fetchall()
| true |
68aa779993c14e0f654c0fbf458bb7e0bc39a8d2 | Python | wangyf/AH-RJMCMC | /make_AH_IL_comparison.py | UTF-8 | 4,216 | 2.6875 | 3 | [
"MIT"
] | permissive | # Script to make a comparison plot from the RJ-MCMC output
# of the AH and IL methods.
# The script assumes that the current directory contains the AH-output, and the directory with the IL-output is specified.
# We also assume that the comparison is direct - i.e. that all parameters and data are shared between the two models.
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import os
import sys
if not os.path.exists('input_file'):
print('*'*50)
print('Cannot find file: input_file')
print('Check that you are running this Python code in the AH outputs directory \n E.g. cd Outputs \n python ../make_plots.py')
print('*'*50)
sys.exit(0)
#
if len(sys.argv) != 2:
print('*'*50)
print("Syntax: python make_AH_IL_comparison.py <IL_directory>")
print("where IL_directory is the relative path to the IL-output")
print('*'*50)
sys.exit(0)
# Read some basic data that was saved from the input file to "model_data"
# This can be overwritten by either altering this file, or simply hardwiring the various parameters: e.g.
# age_min, age_max = 0, 100
for line in open('input_file','r'):
if line.split()[0].upper() == 'Intensity_prior'.upper():
I_min, I_max = float(line.split()[1]),float(line.split()[2])
if line.split()[0].upper() == 'Age_bounds'.upper():
age_min, age_max = float(line.split()[1]),float(line.split()[2])
if line.split()[0].upper() == 'Num_change_points'.upper():
K_min, K_max = int(line.split()[1]), int(line.split()[2])
if line.split()[0].upper() == 'Credible'.upper():
credible = float(line.split()[1])
if line.split()[0].upper() == 'output_model'.upper():
output_model_filename = line.split()[1]
if line.split()[0].upper() == 'True_data'.upper():
true_behaviour_file = line.split()[2]
x_cts_true,y_cts_true=np.loadtxt(os.path.join(os.pardir,true_behaviour_file),unpack=True)
if line.split()[0].upper() == 'Plotting_intensity_range'.upper():
I_min,I_max = float(line.split()[1]),float(line.split()[2])
# read in the various data files that were output by the RJ-MCMC script
x, x_err, y, y_err, strat = np.loadtxt('data.dat', unpack=True)
strat = [int(a) for a in strat]
lx, ly = np.loadtxt('credible_lower.dat', unpack=True)
ux, uy = np.loadtxt('credible_upper.dat', unpack=True)
mode_x, mode_y = np.loadtxt('mode.dat', unpack=True)
median_x, median_y = np.loadtxt('median.dat', unpack=True)
av_x, av_y = np.loadtxt('average.dat', unpack=True)
best_x, best_y = np.loadtxt('best_fit.dat', unpack=True)
if not os.path.exists(sys.argv[1]):
print('Path ' + sys.argv[1] + ' does not exist')
sys.exit(0)
lx_IL, ly_IL = np.loadtxt(os.path.join(sys.argv[1],'credible_lower.dat'), unpack=True)
ux_IL, uy_IL = np.loadtxt(os.path.join(sys.argv[1],'credible_upper.dat'), unpack=True)
mode_x_IL, mode_y_IL = np.loadtxt(os.path.join(sys.argv[1],'mode.dat'), unpack=True)
median_x_IL, median_y_IL = np.loadtxt(os.path.join(sys.argv[1],'median.dat'), unpack=True)
av_x_IL, av_y_IL = np.loadtxt(os.path.join(sys.argv[1],'average.dat'), unpack=True)
best_x_IL, best_y_IL = np.loadtxt(os.path.join(sys.argv[1],'best_fit.dat'), unpack=True)
print('Building comparative figure...')
fig1, ax1 = plt.subplots(figsize=(14,5))
ax1.fill_between(lx, ly, uy, facecolor='orange', alpha=0.5, label = 'AH %i%% Credible interval' % credible)
ax1.plot(av_x_IL, av_y_IL, 'r', label = 'Average: IL', linewidth=2)
ax1.plot(av_x, av_y, 'b', label = 'Average: AH', linewidth=2)
if 'x_cts_true' in locals(): #see if "true" data are available to plot --- only for synthetic cases.
plt.plot(x_cts_true,y_cts_true,'k', linewidth=2, label='Real')
ax1.plot(lx_IL, ly_IL, 'g-', label = 'IL %i%% Credible interval' % credible,linewidth=2)
ax1.plot(ux_IL, uy_IL, 'g-',linewidth=2)
ax1.set_xlabel('Age/yr',fontsize=16)
ax1.set_ylabel('Intensity/$\mu$T',fontsize=16)
ax1.xaxis.set_tick_params(labelsize=16)
ax1.yaxis.set_tick_params(labelsize=16)
ax1.set_xlim(age_min, age_max)
ax1.set_ylim(I_min, I_max)
ax1.legend(loc = 'upper right',fontsize=12,labelspacing=0.2)
plt.savefig('AH_IL_comparison.pdf', bbox_inches='tight',pad_inches=0.0)
plt.close(fig1)
| true |
6ef05fc6e8bd533d8b3689911a62f31f4db98d49 | Python | destroyer7/puf_iot | /Protocol1Full/DriverOnlineServer.py | UTF-8 | 3,993 | 2.703125 | 3 | [] | no_license | #---------------------- DriverOnlineServer.py----------------------
import sys
from OnlineServerSocket import ServerSocket # For Server-Client communication using TCP sockets
from OnlineServerCrypto import ServerCrypto # For Cryptographic Functions
from IOTDatabase import database_server # For the Database
def device_driver_protoOne():
#Creating objects for the imported classes
sersoc_obj = ServerSocket()
ons_obj = ServerCrypto()
db_con = database_server()
db_con.startDBConnection()
db_con.create_table()
print('Database created!')
print('Inserting ID, CRP in Database')
# Assuming we have the IOT Device ID and CRP at startup
ID = 'raspberrypi@1234'
C = 'qwertyASDFGH@012'
R = '48f8878e987de1b86d9f95614227b625'
R = R.decode('hex')
db_con.insert_entry(ID,C,R)
print('\nID, CRP is inserted successfully')
print('\nContents of database:')
db_con.display_entries()
print('------------------------------------------------------------------------------------------------------------------------')
sersoc_obj.start_server()
print('------------------------------------------------------------------------------------------------------------------------')
#Postprocessing after receiving Message 1 and preprocessing before sending Message 2
print('\nReceiving message from IOT Device')
MSG = sersoc_obj.receive_from_Client()
print('\n\nMessage received from client: '+MSG)
IDa = MSG[0:16]
N1 = MSG[16:24]
print('\nReceived ID: '+IDa)
print('\nReceived Nonce N1: '+N1)
print('\nQuerying database to get CRP')
rec = db_con.return_entry(IDa)
print('\nID found in Database! Proceed!\n')
Ci1 = rec[0]
Ri1 = rec[1]
print('\nCi1: '+Ci1)
print('\nRi1: '+Ri1)
print('\nGenerate Random Nonce Rs1')
Rs1 = ons_obj.generate_Random(8)
print('\nRs1 generated: '+Rs1)
ma = ID+N1+Rs1 #16+8+8=32
print('\nMA before encrypting with R1: '+ma+'\n\n')
MA = ons_obj.encrypt_Data(ma,Ri1)
print('\nMA after encrypting with R1: '+MA+'\n\n')
MACmsg = MA+Ri1+Rs1 #32+16+8=56x2=112
print('\nGenerating MAC of MACmsg')
MACval= ons_obj.sign_MAC(IDa,MACmsg) #16
print('\nMACval of MACmsg: '+MACval+'\n\n')
MSG = Ci1+MA+MACval #16+32+16
print('\nMessage to be sent to IOT Device: '+MSG+'\n')
print('Sending message to IOT Device')
sersoc_obj.send_to_Client(MSG)
print('------------------------------------------------------------------------------------------------------------------------')
#Postprocessing after receiving Message 3
print('\nReceiving message from IOT Device')
MSG = sersoc_obj.receive_from_Client()
if (MSG==""):
print("Mutual Authentication rejected!")
sys.exit()
print('\n\nMessage received from IOT Device: '+MSG)
MS = MSG[0:64]
MACval = MSG[64:]
print('\nReceived MS: '+MS)
print('\nReceived MACval: '+MACval)
print('\nMS before decrypting with Ri1: '+MS+'\n\n')
dMS = ons_obj.decrypt_Data(MS, Ri1)
print('\nMS after decrypting with Ri1: '+dMS+'\n\n')
Na = dMS[24:32]
print('\nRandom Nonce Na: '+Na)
Ri2 = dMS[32:]
print('\nResponse Ri2: '+Ri2+'\n')
MACmsg = MS+Ri1+Na
print('\nVerifying MAC')
if (not ons_obj.verify_MAC(IDa,MACmsg,MACval)):
print("\nTerminating Program...")
sys.exit()
print('\nCreating Ci2')
Ci2 = ons_obj.generate_Hash(Na+Rs1)
print('\nCi2: '+Ci2+'\n')
print('\nMutual Authentication achieved!\n')
print('------------------------------------------------------------------------------------------------------------------------')
#Post Processing after achieving Mutual Authentication
hashRs1 = ons_obj.generate_Hash(Rs1)
hashNa = ons_obj.generate_Hash(Na)
session_key = ''.join(chr(ord(a) ^ ord(b)) for a,b in zip(hashRs1,hashNa))
print('\n\nSession Key: '+session_key+'\n\n')
print('\nUpdate ID, CRP in Database!')
db_con.update_entry(IDa,str(Ci2),str(Ri2))
print("\nUpdated CRP for IDa")
print('\nContents of database:')
db_con.display_entries()
db_con.drop_table()
#print('Table dropped successfully')
sersoc_obj.end_connection()
device_driver_protoOne() | true |
cd6b5e62eee818e61ea77c07b725244655e5727b | Python | miaofa/PythonScriptsForWork | /bin2bin/uifor2019.py | UTF-8 | 5,000 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# 全部UI设计
# 默认的输出目录是桌面,针对Linux的还没有添加(最好是home目录)
import tkinter as tk
from tkinter.filedialog import askdirectory, askopenfilename
from tkinter import scrolledtext
import os
import conversion
class UI(object):
def __init__(self, master):
self.master = master
self.master.title("bin2bin")
self.frame_upper = tk.Frame(self.master)
self.frame_upper.pack()
# self.CheckVar = tk.IntVar()
# self.Cb = tk.Checkbutton(self.frame_upper,
# text="直接使用源文件进行转换",
# variable=self.CheckVar,
# onvalue=1, offvalue=0,
# width=50, command=self.Changestatus)
self.outDir = tk.Label(self.frame_upper, text='选择输出目录', width=20)
self.dirtext = tk.StringVar()
self.entrydir = tk.Entry(self.frame_upper,
textvariable=self.dirtext,
width=20)
# img = Image.open(imgpath)
# picture = ImageTk.PhotoImage(img)
# ,command=self.Okfun)#image=picture)
self.getdirbutton = tk.Button(
self.frame_upper, text='打开', command=self.ChooseDir)
# self.Cb.pack()
self.outDir.pack(side='left')
self.entrydir.pack(side='left')
self.getdirbutton.pack(side='left')
self.frame_down = tk.Frame(self.master)
self.frame_down.pack(padx=10, pady=30)
self.ButtonTransform = tk.Button(
self.frame_down, text='转换', width=3,
height=2, command=self.Transform)
# log记录区
# 滚动文本框
# wrap=tk.WORD 这个值表示在行的末尾如果有一个单词跨行,会将该单词放到下一行显示,
# 比如输入hello,he在第一行的行尾,llo在第二行的行首, 这时如果wrap=tk.WORD,
# 则表示会将 hello 这个单词挪到下一行行首显示, wrap默认的值为tk.CHAR
# self.logarea = tk.LabelFrame(self.frame_right, text='Log Area')
self.scr = scrolledtext.ScrolledText(
self.frame_down, width=30, height=5, wrap=tk.WORD)
# 选择转换文件按钮
self.ButtonSelectFile = tk.Button(
self.frame_down, text='选择文件', width=6,
height=2, command=self.ChooseFile)
self.ButtonSelectFile.pack(side='left', padx=10)
self.scr.pack(side='left', padx=10)
self.ButtonTransform.pack(side='left', padx=10)
# 设置欢迎界面
self.InitScrSayHello()
# 设置默认状态
# 选中复选框,置灰下面一行控件
# self.InitWidgetStatus()
# 获取windows桌面
if conversion.deskdir:
self.dirtext.set(conversion.deskdir)
def Recordlog(self, strstr):
# private function to Record the log
self.scr.insert(tk.END, strstr)
self.scr.insert(tk.END, '\n')
self.scr.see(tk.END)
def InitScrSayHello(self):
self.Recordlog('欢迎,欢迎!')
# if any buy please send email to fa.miao@goodwe.com.cn.')
self.Recordlog('Pytransform 版本 0.0.0.1')
# self.Recordlog(
# '若要自己选择输出目录,请取消勾选复选框')
# active, , or normal
# def InitWidgetStatus(self):
# self.outDir['state'] = 'normal'
# self.entrydir['state'] = 'normal'
# self.getdirbutton['state'] = 'normal'
#
# self.Cb.deselect()
# def Changestatus(self):
# if self.CheckVar.get() == 0:
# self.outDir['state'] = 'normal'
# self.entrydir['state'] = 'normal'
# self.getdirbutton['state'] = 'normal'
# elif self.CheckVar.get() == 1:
# self.outDir['state'] = 'disabled'
# self.entrydir['state'] = 'disabled'
# self.getdirbutton['state'] = 'disabled'
# else:
# pass
def ChooseDir(self):
self.dirtext.set(askdirectory())
def ChooseFile(self):
self.infilename = askopenfilename()
if self.infilename:
self.Recordlog('已选中文件{}, 接着点击转换按钮'.format(self.infilename))
# 转化主逻辑,传递两个参数,输出文件和输入文件名
def Transform(self):
self.outfilename = os.path.join(
self.dirtext.get(), os.path.basename(self.infilename))
try:
conversion.enginebuf(self.infilename, self.outfilename)
except RuntimeError as e:
self.Recordlog('文件不正确,请重新选择文件')
else:
self.Recordlog('转化成功')
if __name__ == '__main__':
root = tk.Tk()
root.geometry("400x190")
app = UI(root)
root.mainloop()
| true |
bfe2f246e4e30aac57c0f9542e9a10723a8579c1 | Python | asolberg/CJ-2012-Prelim | /B - Dancing with the Googlers/dancing.py | UTF-8 | 4,630 | 3.671875 | 4 | [] | no_license | # Problem
#
# You're watching a show where Googlers (employees of Google) dance, and then each dancer is
# given a triplet of scores by three judges. Each triplet of scores consists of three
# integer scores from 0 to 10 inclusive. The judges have very similar standards, so it's
# surprising if a triplet of scores contains two scores that are 2 apart. No triplet of
# scores contains scores that are more than 2 apart.
#
# For example: (8, 8, 8) and (7, 8, 7) are not surprising. (6, 7, 8) and (6, 8, 8) are
# surprising. (7, 6, 9) will never happen.
#
# The total points for a Googler is the sum of the three scores in that Googler's triplet of
# scores. The best result for a Googler is the maximum of the three scores in that Googler's
# triplet of scores. Given the total points for each Googler, as well as the number of
# surprising triplets of scores, what is the maximum number of Googlers that could have had
# a best result of at least p?
#
# For example, suppose there were 6 Googlers, and they had the following total points: 29,
# 20, 8, 18, 18, 21. You remember that there were 2 surprising triplets of scores, and you
# want to know how many Googlers could have gotten a best result of 8 or better.
#
# With those total points, and knowing that two of the triplets were surprising, the
# triplets of scores could have been:
def search(totals, p):
combos = []
current_combo = []
for c in range(len(totals)):
combos.append(list())
for i in range(11):
for j in range(11):
for k in range(11):
current_combo = [i, j, k]
# print("Trying Googler #%d, Combo %s" % ((c+1), str(current_combo)))
if (sum(current_combo) == totals[c] and max(current_combo) >= p and
abs(current_combo[0]-current_combo[1])<=2 and
abs(current_combo[0]-current_combo[2])<=2 and
abs(current_combo[1]-current_combo[2])<=2):
try:
combos[c].append(tuple(current_combo))
# print("len of combos: %d" %len(combos))
# print("Googler #%d - Appending score: %s" % ((c+1), str(current_combo)))
except:
# print("C=%d and Length of combo list: %d" % (c, len(combos)))
combos.append(list())
combos[c].append(tuple(current_combo))
# print("Googler #%d - Extending List, appending score: %s" % ((c+1), str(current_combo)))
k+=1
j+=1
i+=1
return combos
def NumberOfAllSurprises(combos):
all_surprises=0
winning_scores=[]
num_blank_scores = 0
# print("Combos: %s" % str(combos))
# print("len of combos= %d" %len(combos))
for c in range(len(combos)):
winning_scores.append(list())
if len(combos[c]) ==0:
num_blank_scores +=1
for score in combos[c]:
# print("googler %d - trying score: %s" % ((c+1), str(score)))
if (abs(score[0]-score[1])<=1 and
abs(score[0]-score[2])<=1 and
abs(score[1]-score[2])<=1):
winning_scores[c].append(tuple(score))
continue
if (combos[c] and (len(winning_scores[c]) == 0)):
all_surprises+=1
# print("winning scores: %s, blank scores: %d, all surprises: %d" % (str(winning_scores), num_blank_scores, all_surprises))
return all_surprises
filePrefix = 'B-small-attempt0'
fin = open(filePrefix + '.in', 'r')
fout = open(filePrefix + '.out', 'w')
T = int(fin.readline())
combs = []
for i in range(T):
N, S, p, *t = [int(x) for x in fin.readline().split()]
# print("N=%d" %N)
combos = search(t, p)
# print("Number of Googlers: %d" %len(combos))
# for googler in combos:
# print("Len of scores: %d" % len(googler))
all_surprises=NumberOfAllSurprises(combos)
# print("All Surprises = %d" % all_surprises)
if all_surprises > S:
deduction = all_surprises-S
else:
deduction = 0
non_empty_scores = 0
for googler in combos:
if len(googler)>0:
non_empty_scores +=1
# print("Non-empty scores: %d" % non_empty_scores)
if non_empty_scores - deduction <= 0:
answer = 0
else:
answer = non_empty_scores-deduction
print("Case #%d: %d" % ((i+1), answer))
fout.write("Case #%d: %d\n" % ((i+1), answer)) | true |
691404d715a2931e22d99e1caf0a19d98d70f0e8 | Python | LadyM2019/Python-Fundamentals-Softuni | /05. Dictionaries - Exercises/05. Social Media Posts.py | UTF-8 | 1,070 | 3.5 | 4 | [
"MIT"
] | permissive | def Post(postName):
socialMedia.__setitem__(postName,list([0,0]))
def Like(postName):
socialMedia[postName][0] += 1
def Dislike(postName):
socialMedia[postName][1] += 1
def Comment(postName,commentator,content):
socialMedia[postName].append(f"* {commentator}: {content}")
socialMedia = dict()
while True:
command = input()
if command == "drop the media":
break
parts = command.split(" ")
if parts[0] == "post":
Post(parts[1])
elif parts[0] == "like":
Like(parts[1])
elif parts[0] == "dislike":
Dislike(parts[1])
elif parts[0] == "comment":
if len(parts) > 4:
for i in range(4,len(parts)):
parts[3] += " " + parts[i]
Comment(parts[1], parts[2] ,parts[3])
for post in socialMedia:
print(f"Post: {post} | Likes: {socialMedia[post][0]} | Dislikes: {socialMedia[post][1]}")
print("Comments:")
if len(socialMedia[post]) == 2:
print("None")
for i in range(2,len(socialMedia[post])):
print(socialMedia[post][i]) | true |
9afcbfcc8d83b5715ef9353de3c385cfac902871 | Python | Sladge17/linear_regression | /teacher.py | UTF-8 | 1,867 | 2.796875 | 3 | [] | no_license | import sys
import numpy as np
def check_argv(argv):
argv_len = len(argv)
if not argv_len:
return 'data.csv'
if argv_len != 1:
print("\033[31mNeed only one dataset\033[37m")
exit()
return argv[0]
def read_data(source):
try:
data = np.genfromtxt(source, dtype=np.uint32, delimiter=',')[1:]
except:
print("\033[31mDataset not exist\033[37m")
exit()
x = np.array(data[:, 0])
y = np.array(data[:, 1])
return (x, y)
def normalization(target, val_min, val_max):
return (target - val_min) / (val_max - val_min)
def norm_x(x):
x_minmax = np.array([np.min(x), np.max(x)])
x = normalization(x, *x_minmax)
return (x_minmax, x)
def set_hyperparameters(y):
epochs = 100
error_border = 100
alpha = np.array([0.1, 0.01], np.float32)
return (epochs, error_border, alpha)
def learning_nn(epochs, error_border, x, y, alpha):
weight = np.array([np.random.rand(), np.max(y) / 2], np.float32)
error = np.zeros(epochs, np.float32)
selection = np.array([0, 1], np.float32)
delta = np.zeros(2, np.float32)
for epoch in range(epochs):
error[epoch] = np.power(np.mean(np.stack((x,
np.ones(x.size)),
1) @ weight) - np.mean(y), 2)
if error[epoch] < error_border:
return weight
for i in range(x.size):
selection[0] = x[i]
predict = selection @ weight
delta += (predict - y[i]) * selection
weight -= delta * alpha
delta[:] = 0
return weight
def create_weightsfile(x_minmax, weight):
with open("weights", 'w') as file:
for x in x_minmax:
file.write(str(x) + '\n')
for w in weight:
file.write(str(w) + '\n')
def main(argv):
source = check_argv(argv)
x, y = read_data(source)
x_minmax, x = norm_x(x)
epochs, error_border, alpha = set_hyperparameters(y)
weight = learning_nn(epochs, error_border, x, y, alpha)
create_weightsfile(x_minmax, weight)
if __name__ == "__main__":
main(sys.argv[1:]) | true |
48795a26e884322189b14aa425dc3a7670f650a2 | Python | ww35133634/chenxusheng | /ITcoach/xlrd_xlwt处理数据/第9章 Python函数技术/9.7 递归函数写法及应用/9.7.1.py | UTF-8 | 242 | 3.390625 | 3 | [
"AFL-3.0"
] | permissive | # def fact(x):
# if x==1:
# return 1
# else:
# return x+fact(x-1)
#
# print(fact(5))
def con(l):
if len(l)==0:
return ''
else:
return con(l[:len(l)-1])+'-'+l[-1]
print(con(['a','b','c','d']))
| true |
ca7d6bbd4e41c591d6e280b481e7e3227cddca8e | Python | tedchou12/connect-pm-server | /src/pm_server/modules/security.py | UTF-8 | 1,560 | 2.546875 | 3 | [] | no_license | from flask import make_response, request, session
from datetime import date, datetime, timedelta
from .db import db
import time
import random
import string
class security :
def __init__(self):
self.table = 'security'
self.valid_duration = 60 * 10
def get_hash(self) :
obj_database = db()
security_hash = self.hash_generator()
query = ('INSERT INTO ' + self.table + ' (security_hash, security_time, security_used) VALUES (%s, %s, %s)')
data = (security_hash, time.strftime('%Y-%m-%d %H:%M:%S'), 0)
if obj_database.insert(query, data) != False :
return security_hash
else :
return False
def check_hash(self, hash='') :
obj_database = db()
query = ('SELECT * FROM ' + self.table + ' WHERE security_hash=%s AND security_used!=%s')
data = (hash, 1, )
security_records = obj_database.select(query, data)
if len(security_records) > 0 :
if int(datetime.now().timestamp()) - int(datetime.timestamp(security_records[0]['security_time'])) < self.valid_duration :
query = ('UPDATE ' + self.table + ' SET security_used=%s WHERE security_hash=%s')
data = (1, hash)
obj_database.update(query, data)
return True
else :
return False
else :
return False
def hash_generator(self) :
letters = string.ascii_lowercase + '1234567890'
return ''.join(random.choice(letters) for i in range(32))
| true |
a2d6a9d2a033aa5ad5d1cc057f40b400855b8c6f | Python | ElTrackiras/KeyboardTrainer | /main.py | UTF-8 | 7,406 | 3.40625 | 3 | [] | no_license | import pygame
import random
pygame.init()
class LetterBoxes:
monster_img = pygame.image.load('Assets/Monster.png')
all_boxes = list()
box_font = pygame.font.SysFont("monospace", 30)
def __init__(self, x, y, letter):
self.image = LetterBoxes.monster_img
self.x = x
self.y = y
self.letter = letter
self.width = 50
self.height = 50
self.image = pygame.transform.scale(self.image, (self.height, self.width))
LetterBoxes.all_boxes.append(self)
def exist(self, screen, level):
self.move(level)
box_letter = LetterBoxes.box_font.render(self.letter, True, (0, 0, 0))
screen.blit(self.image, (self.x, self.y))
screen.blit(box_letter, (self.x + (self.width/3), self.y + (self.height/5)))
def move(self, level):
self.x += 2 * level
class GameLoop:
window_width = 1000
window_height = 700
game_window = pygame.display.set_mode((window_width, window_height))
game_run = True
fps = pygame.time.Clock()
pressed_letter_box = ''
spawn_timer = 20
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
letters_taken = []
letters_available = []
in_game_font = pygame.font.SysFont("monospace", 30)
game_score = 0
lives = 10
current_screen = 'gameplay'
level = 1
@classmethod
def screen_controller(cls):
if cls.lives <= 0:
cls.current_screen = 'game_over'
@classmethod
def game_over_screen(cls):
game_over_font = pygame.font.SysFont("monospace", 30)
game_over_label = game_over_font.render('GAMEOVER', True, (255, 0, 0))
cls.game_window.blit(game_over_label, (300, 300))
@classmethod
def game_texts(cls):
score_label = cls.in_game_font.render('Score: ' + str(cls.game_score), True, (255, 0, 255))
cls.game_window.blit(score_label, (0, 0))
life_label = cls.in_game_font.render('Lives: ' + str(cls.lives), True, (255, 0, 255))
cls.game_window.blit(life_label, (300, 0))
ps = cls.in_game_font.render('Level: ' + str(cls.level), True, (255, 0, 255))
cls.game_window.blit(ps, (500, 0))
@classmethod
def spawn_boxes(cls):
cls.spawn_timer -= 1
if cls.spawn_timer <= 0:
cls.letters_available.clear()
for i in cls.alphabet:
if i not in cls.letters_taken:
cls.letters_available.append(i)
if len(cls.letters_available) > 0:
chosen_letter = random.choice(cls.letters_available)
box_monster = LetterBoxes(-50, random.randrange(50, cls.window_height-50), random.choice(chosen_letter))
cls.letters_taken.append(chosen_letter)
cls.spawn_timer = 30
else:
cls.letters_taken.clear()
cls.spawn_timer = 300
for i in LetterBoxes.all_boxes:
i.exist(cls.game_window, cls.level)
if cls.pressed_letter_box == i.letter:
cls.game_score += 1
LetterBoxes.all_boxes.remove(i)
if i.x > cls.window_width:
try:
cls.letters_taken.remove(i.letter)
LetterBoxes.all_boxes.remove(i)
cls.lives -= 1
except:
pass
cls.pressed_letter_box = ''
@classmethod
def reset_variables(cls):
cls.lives = 10
cls.spawn_timer = 30
cls.game_score = 0
cls.level = 1
LetterBoxes.all_boxes.clear()
cls.letters_taken.clear()
cls.letters_available.clear()
@classmethod
def game_runner(cls):
while cls.game_run:
cls.game_window.fill((0, 0, 0))
cls.fps.tick(60)
cls.screen_controller()
cls.level += 0.0001
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
cls.pressed_letter_box = 'q'
elif event.key == pygame.K_w:
cls.pressed_letter_box = 'w'
elif event.key == pygame.K_e:
cls.pressed_letter_box = 'e'
elif event.key == pygame.K_r:
cls.pressed_letter_box = 'r'
elif event.key == pygame.K_t:
cls.pressed_letter_box = 't'
elif event.key == pygame.K_y:
cls.pressed_letter_box = 'y'
elif event.key == pygame.K_u:
cls.pressed_letter_box = 'u'
elif event.key == pygame.K_i:
cls.pressed_letter_box = 'i'
elif event.key == pygame.K_o:
cls.pressed_letter_box = 'o'
elif event.key == pygame.K_p:
cls.pressed_letter_box = 'p'
elif event.key == pygame.K_a:
cls.pressed_letter_box = 'a'
elif event.key == pygame.K_s:
cls.pressed_letter_box = 's'
elif event.key == pygame.K_d:
cls.pressed_letter_box = 'd'
elif event.key == pygame.K_f:
cls.pressed_letter_box = 'f'
elif event.key == pygame.K_g:
cls.pressed_letter_box = 'g'
elif event.key == pygame.K_h:
cls.pressed_letter_box = 'h'
elif event.key == pygame.K_j:
cls.pressed_letter_box = 'j'
elif event.key == pygame.K_k:
cls.pressed_letter_box = 'k'
elif event.key == pygame.K_l:
cls.pressed_letter_box = 'l'
elif event.key == pygame.K_z:
cls.pressed_letter_box = 'z'
elif event.key == pygame.K_x:
cls.pressed_letter_box = 'x'
elif event.key == pygame.K_c:
cls.pressed_letter_box = 'c'
elif event.key == pygame.K_v:
cls.pressed_letter_box = 'v'
elif event.key == pygame.K_b:
cls.pressed_letter_box = 'b'
elif event.key == pygame.K_n:
cls.pressed_letter_box = 'n'
elif event.key == pygame.K_m:
cls.pressed_letter_box = 'm'
elif event.key == pygame.K_SPACE:
if cls.current_screen == 'game_over':
cls.current_screen = 'gameplay'
cls.reset_variables()
if cls.pressed_letter_box in cls.letters_taken:
cls.letters_taken.remove(cls.pressed_letter_box)
if cls.current_screen == 'game_over':
cls.game_over_screen()
else:
cls.game_texts()
cls.spawn_boxes()
pygame.display.update()
GameLoop.game_runner()
| true |
f00c2fedad62050629a90f32b1d9472f0b4154a7 | Python | siberowl/AI_CP365 | /src/Adaline/Adaline.py | UTF-8 | 1,234 | 2.734375 | 3 | [] | no_license | import numpy as np
import pandas as pd
class Adaline:
def __init__(self):
return None
def fit(self, data, labels):
self.data = data
self.labels = labels
self.nexamples = data.shape[0]
self.nfeatures = data.shape[1]
ws = np.ones((self.nfeatures,1))
lr = 0.000001
count=0
tws= np.zeros((self.nfeatures,1))
while np.sum(ws-tws)!=0: #if weights haven't reached equilibrium
#while count<100:
count+=1
activation = np.dot(np.transpose(ws),np.transpose(data)) #row vector of activations by example
diff = np.reshape(self.labels,(1,self.nexamples)) - activation #row vector of diff by examples
dw = np.transpose(lr*np.dot(diff,data))
tws=ws
ws = ws + dw
self.ws=ws
def predict(self,sample):
if np.dot(np.transpose(self.ws),sample) > 0:
return 1
else:
return -1
def copy(self):
nmodel = Adaline()
nmodel.ws=self.ws.copy()
nmodel.data=self.data.copy()
nmodel.labels=self.labels.copy()
nmodel.nexamples=self.nexamples
nmodel.nfeatures=self.nfeatures
return nmodel
| true |
bf557469d6acda997412d01f02bc55663b4a2c30 | Python | zkroliko/Abyssal-Destructor | /abyssaldestrucion/client/Client.py | UTF-8 | 5,912 | 2.53125 | 3 | [] | no_license | import paho.mqtt.client as mqtt
import sys
import random
from ControllerUtil import ControllerUtil
from Topics import Topics, main_topic
from SerialStub import *
from threading import Thread
import Message
import thread
import time
import serial
class Client:
def on_message(self, client, obj, msg):
# if no other handler serviced that message
pass
def on_message_sonar_in(self, client, userdata, message):
print("Received sonar_in " + message.payload)
l = str.split(message.payload, ":")
id = int(l[0])
distance = float(l[1])
if self.id == id:
self.ping_received(distance)
def on_message_game_state(self, client, userdata, message):
who_won = int(message.payload)
if who_won == self.id: self.game_over(True)
else: self.game_over(False)
def on_message_warning(self, client, userdata, message):
l = str.split(message.payload, ":")
id = int(l[0])
value = int(l[1])
print("Got warning with value " + str(value))
if (value >= 0 and value <= 31 and id == self.id):
self.ser.write(chr(value))
self.warning(value)
def on_message_life(self, client, userdata, message):
l = str.split(message.payload, ":")
id = int(l[0])
lives = int(l[1])
print l
if (lives == 1 or lives == 2) and id == self.id:
self.vessel_hit(lives)
def on_connect(self, client, userdata, flags, rc):
print("Client connected")
if len(userdata) > 0: print("Client has user data: " + userdata)
sys.stdout.flush()
def on_publish(self, client, obj, mid):
pass
def on_subscribe(self, client, obj, mid, granted_ops):
pass
# methods from server
def game_over(self, game_won):
if game_won:
print("Game over! You won!")
# output for winning
self.ser.write(chr(64+8+4))
self.ser.write(chr(32+1))
else:
print("Game over! You lost!")
# output for lost
self.ser.write(chr(64+32+16))
self.ser.write(chr(32+1))
self.game_on = False
self.client.loop_stop()
self.client.disconnect()
def vessel_hit(self, lives):
if lives == 2:
print("Two lifes left")
self.ser.write(chr(64+8+4))
elif lives == 1:
self.ser.write(chr(64+2+1))
print("One life left!")
else:
self.ser.write(chr(64+32+16))
def ping_received(self, rel_dist):
# rel_dist from 0 - 1: 0 - 0 distance, 1 - max distnace on map
# changing distance diode behaviour
print("ping received " + str(rel_dist))
t = rel_dist/2.0
for i in xrange(10):
self.ser.write(chr(32+1))
time.sleep(t)
self.ser.write(chr(32))
time.sleep(t)
def warning(self, value):
# warning has value from 0-31 depending on time spent in restricted area
pass
# methods to server
def change_direction(self, orientation_change):
# orientation change from 0-63
print("Changed direction to " + str(orientation_change))
self.client.publish(main_topic + "/" + Topics.direction, self.message.get_direction_msg(orientation_change, self.id))
def fire(self):
# send to server information you fired
print("Fired!")
self.client.publish(main_topic + "/" + Topics.weapon, self.message.get_fire_msg(self.id))
def send_ping(self):
# sending ping to enemy vessel
print("Ping sent!")
self.client.publish(main_topic + "/" + Topics.sonar_out, self.message.get_sonarout_msg(self.id))
def subscribe_on_topics(self):
self.client.subscribe(main_topic + "/+", 0)
def handle_methods(self):
self.client.on_message = self.on_message
self.client.message_callback_add(main_topic + "/" + Topics.life, self.on_message_life)
self.client.message_callback_add(main_topic + "/" + Topics.warning, self.on_message_warning)
self.client.message_callback_add(main_topic + "/" + Topics.game_state, self.on_message_game_state)
self.client.message_callback_add(main_topic + "/" + Topics.sonar_in, self.on_message_sonar_in)
self.client.on_connect = self.on_connect
self.client.on_publish = self.on_publish
self.client.on_subscribe = self.on_subscribe
def controller_loop(self):
self.ser.write(chr(128+32+16+8+4))
print "start"
while self.game_on:
cc = self.ser.read(1)
if len(cc) > 0:
ch = ord(cc)
print ch
# logic reading input from controller
if ControllerUtil.is_button_1_pressed(ch):
self.fire()
if ControllerUtil.is_button_2_pressed(ch):
self.send_ping()
if ControllerUtil.get_knob_position(ch) is not None:
self.change_direction(ControllerUtil.get_knob_position(ch))
#def sonar_loop(self):
# while self.game_on
def __init__(self):
self.id = random.randrange(0, 1000, 1)
self.message = Message.Message()
self.ser = serial.Serial('/dev/ttyS0', 38400, timeout=1)
self.game_on = True
self.client = mqtt.Client(str(self.id), userdata=str(self.id))
self.distance = 1
print("Client created")
self.handle_methods()
self.client.connect("192.168.17.52")
self.subscribe_on_topics()
self.client.publish(main_topic+"/"+Topics.registering, str(self.id))
thread = Thread(target=self.controller_loop, args=())
thread.start()
print("debug")
sys.stdout.flush()
self.client.loop_forever()
thread.join()
client = Client() | true |
53d67df26ea5e4b491f0c8951eab0025e94c03b7 | Python | dbconfession78/interview_prep | /leetcode/14_longest_common_prefix.py | UTF-8 | 1,568 | 3.78125 | 4 | [] | no_license | """
Write a function to find the longest common prefix string amongst an array of strings.
If there is no common prefix, return an empty string "".
Example 1:
Input: ["flower","flow","flight"]
Output: "fl"
Example 2:
Input: ["dog","racecar","car"]
Output: ""
Explanation: There is no common prefix among the input strings.
Note:
All given inputs are in lowercase letters a-z.
"""
from sgk_test import test
class Solution():
# def longest_common_prefix_PRACTICE(self, strs):
def longest_common_prefix(self, strs):
return
def longest_common_prefix_PASSED(self, strs):
# def longest_common_prefix(self, strs):
common = ''
i = 0
if len(strs) == 0:
return common
short_len = min([len(s) for s in strs])
while i < short_len:
win = strs[0][:i+1]
if not all([s.startswith(win) for s in strs]):
break
common = win
i += 1
return common
def main():
test("", Solution().longest_common_prefix([]))
test("Stu", Solution().longest_common_prefix(["StuartA", "StufartB", "StusmartC",
"StudyartD", "StudenttartE", "StupiddartF"]))
test("", Solution().longest_common_prefix([""]))
test("a", Solution().longest_common_prefix(["a"]))
test("", Solution().longest_common_prefix(["",""]))
test("c", Solution().longest_common_prefix(["c","c"]))
test("fl", Solution().longest_common_prefix(["flower","flow", "flight"]))
if __name__ == '__main__':
main()
| true |
fec4aaaa79a96677673c14ec804a11439ca637a6 | Python | yehudit96/coreferrability | /classifiers/significance_test/create_data_for_AP_significance_test.py | UTF-8 | 2,262 | 2.8125 | 3 | [] | no_license | import os
import sys
import random
import argparse
import _pickle as cPickle
from random import choices
from tqdm import tqdm
random.seed(1)
from svm_classifier import *
parser = argparse.ArgumentParser(description='Creating data for statistical significance tests')
parser.add_argument('--rules_path', type=str,
help=' The path to the rules test set')
parser.add_argument('--classifier', type=str,
help=' The path to the classifier')
parser.add_argument('--out_dir', type=str,
help='Output folder')
args = parser.parse_args()
def test_significance():
"""
Runs the whole process of creating the results for statistical significance tests.
"""
with open(args.rules_path, 'rb') as f_rules:
data = pickle.load(f_rules)
with open(args.classifier, 'rb') as f_clf:
clf = pickle.load(f_clf)
rules_scores = {}
rules, x, y = zip(*data)
pred = clf.predict_proba(x)[:, np.where(clf.classes_ == 1)[0][0]]
rules = list(zip(y, map(lambda a: a[4], x), pred))
chirps_scores = []
clf_scores = []
for i in tqdm(range(1000)):
selected = choices(rules, k=len(rules))
chirps_sort = sorted(selected, key=lambda r:r[1], reverse=True)
clf_sort = sorted(selected, key=lambda r:r[2], reverse=True)
chirps_AP = AP(map(lambda r: r[0], chirps_sort))
clf_AP = AP(map(lambda r: r[0], clf_sort))
chirps_scores.append(str(chirps_AP))
clf_scores.append(str(clf_AP))
if not os.path.exists(args.out_dir):
os.mkdir(args.out_dir)
with open(os.path.join(args.out_dir, 'a_scores.txt'), 'w') as f_a:
f_a.write('\n'.join(chirps_scores))
with open(os.path.join(args.out_dir, 'b_scores.txt'), 'w') as f_b:
f_b.write('\n'.join(clf_scores))
def main():
"""
This script runs the whole process of creating the results for statistical significance tests,
which includes sampling of 1000 topics combinations, extracting the results of system A and system
B for those combinations, running CoNLL scorer for each system in each topics combination
and extracting the CoNLL results.
:return:
"""
test_significance()
if __name__ == '__main__':
main() | true |
c88f845952f56e96430ad1829f1f19a7f522e973 | Python | YifanXu1999/AI-Learning | /Trust Region Policy Optimization/TRPO Project/CartPole A2C/agent.py | UTF-8 | 2,296 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 20:12:19 2020
@author: yifanxu
"""
from model import Actor
from model import Critic
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import hp
import torch.distributions as distributions
import gym
from collections import deque
import numpy as np
from trainer import train_actor
from trainer import train_critic
from GAE import calculate_returns
from GAE import calculate_advantages
class Agent:
def __init__(self, actor, critic):
self.actor = actor
self.critic = critic
self.critic_optimizer = optim.Adam(self.critic.parameters(), 0.01)
def select_action(self, state):
state = torch.FloatTensor(state).unsqueeze(0)
action_probs = self.actor(state)
dist = distributions.Categorical(action_probs)
action = dist.sample()
#print(action_probs)
return action
def update_policy(self, states, actions, rewards, masks):
states = torch.FloatTensor(states)
values = self.critic(states).squeeze(-1)
train_actor(self.actor, states, actions, rewards, masks, values)
train_critic(self.critic_optimizer, states, actions, rewards, masks, values)
env = gym.make('CartPole-v1')
SEED = 1234
env.seed(SEED)
torch.manual_seed(SEED)
input_dim = env.observation_space.shape[0]
output_dim = 2
hidden_dim = hp.hidden_layer_size
actor = Actor(input_dim, hidden_dim, output_dim)
critic = Critic(input_dim, hidden_dim)
agent = Agent(actor, critic)
for i in range(1000):
memory = deque()
state = env.reset()
eps_reward = 0
for t in range(500):
action = agent.select_action(state)
next_state, reward, done, _ = env.step(action.item())
mask = 1 - done
memory.append([state, action, reward, mask])
state = next_state
eps_reward += reward
#print(next_state, action)
if(done):
break
print('iter', i, eps_reward)
memory = np.array(memory)
states = np.vstack(memory[:, 0])
actions = memory[:, 1]
actions = torch.cat([ action for action in actions])
rewards = memory[:, 2]
masks = memory[:, 3]
agent.update_policy(states, actions, rewards, masks)
| true |
46b24c38c760ebad267d81f82c14929bd28ab235 | Python | pzmrzy/LeetCode | /python/reverse_integer.py | UTF-8 | 403 | 3.0625 | 3 | [] | no_license | class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
flag = 1
if (x < 0):
x = -1 * x
flag = -1
result = 0
while (x > 0):
result *= 10
result += x % 10
x /= 10
if (result > math.pow(2,31)):
result = 0
return flag * result
| true |
bd71839e7eb8b1c6840fe8f23ce4bd1edcb50918 | Python | LuminousCL/Iris_kmeans | /Iris_kmeans_decisiontree.py | UTF-8 | 804 | 3.234375 | 3 | [] | no_license | #############鸢尾花数据集的决策树分析#############
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
iris = load_iris()
x_train,x_test,y_train,y_test = train_test_split(iris.data,iris.target, test_size=0.3)
clf = DecisionTreeClassifier()
clf.fit(x_train,y_train)
predict_target = clf.predict(x_test)
print(sum(predict_target == y_test)) #预测结果与真实结果比对
print(metrics.classification_report(y_test,predict_target))
print(metrics.confusion_matrix(y_test,predict_target))
L1 = [n[0] for n in x_test]
L2 = [n[1] for n in x_test]
plt.scatter(L1,L2, c=predict_target,marker='x')
plt.title('DecisionTreeClassifier')
plt.show()
| true |
52f37705e21ffa6dd7e6a2513743e5bfd7df4101 | Python | Marist-CMPT120-FA19/-Jim-Moehringer--Project-3 | /tree.py | UTF-8 | 233 | 3.6875 | 4 | [] | no_license | def main():
height=int(input("Enter the height of the tree: "))
h= height
hashtag= 1
while h >0:
print(' ' * (h-1) + "#" * (hashtag))
h -=1
hashtag +=2
print(" " * (height-1) + "#")
main()
| true |
1f4663b8540a7d1ebe098eb1cfa7c1b7b9c0ca3e | Python | LeytonYu/python_algorithm | /sword art online/图/广度优先遍历.py | UTF-8 | 399 | 3.515625 | 4 | [] | no_license | def bfs(graph,start):
explored,queue=[],[start]
explored.append(start)
while queue:
v=queue.pop(0)
for i in graph[v]:
if i not in explored:
explored.append(i)
queue.append(i)
return explored
G = {'0': ['1', '2'],
'1': ['2', '3'],
'2': ['3', '5'],
'3': ['4'],
'4': [],
'5': []}
print(bfs(G, '0'))
| true |
920de34e0d208ffe4c83b3dfb75a93004efb5119 | Python | ehdusjenny/music | /music/data/musicnet.py | UTF-8 | 9,398 | 2.59375 | 3 | [] | no_license | #import config
import numpy as np
from tqdm import tqdm
import torch
import os
import csv
import pickle
from intervaltree import IntervalTree
import scipy
from scipy.io import wavfile
import pretty_midi
class Memoize(object):
def __init__(self, file_name, func):
self.file_name = file_name
self.func = func
def get(self):
if os.path.isfile(self.file_name):
with open(self.file_name,'rb') as f:
return pickle.load(f)
else:
val = self.func()
with open(self.file_name,'wb') as f:
pickle.dump(val,f)
return val
##################################################
# Dataset
##################################################
class MusicNetDataset(torch.utils.data.Dataset):
def __init__(self, musicnet_dir, train=True, transforms=None,
window_size=400):
self.transforms = transforms
url = 'https://homes.cs.washington.edu/~thickstn/media/musicnet.tar.gz'
if train:
self.labels_dir = os.path.join(musicnet_dir,'train_labels')
self.data_dir = os.path.join(musicnet_dir,'train_data')
labels_file_name = 'train_labels.pkl'
else:
self.labels_dir = os.path.join(musicnet_dir,'test_labels')
self.data_dir = os.path.join(musicnet_dir,'test_data')
labels_file_name = 'test_labels.pkl'
assert os.path.isdir(self.labels_dir)
assert os.path.isdir(self.data_dir)
labels = Memoize(
labels_file_name,
lambda: self.process_labels(self.labels_dir))
self.labels = labels.get()
self.keys = list(self.labels.keys())
self.data = list(self.labels.keys())
def __getitem__(self,index):
index = self.data[index]
wav_file_name = os.path.join(self.data_dir,'%d.wav'%index)
rate,data = wavfile.read(wav_file_name)
output = {'interval_tree': self.labels[index], 'audio': data}
if self.transforms is not None:
return self.transforms(output)
return output
def __len__(self):
return len(self.data)
def process_labels(self, path):
trees = dict()
for item in tqdm(os.listdir(path)):
if not item.endswith('.csv'): continue
uid = int(item[:-4])
tree = IntervalTree()
with open(os.path.join(path,item), 'r') as f:
reader = csv.DictReader(f, delimiter=',')
for label in reader:
start_time = int(label['start_time'])
end_time = int(label['end_time'])
instrument = int(label['instrument'])
note = int(label['note'])
start_beat = float(label['start_beat'])
end_beat = float(label['end_beat'])
note_value = label['note_value']
tree[start_time:end_time] = (instrument,note,start_beat,end_beat,note_value)
trees[uid] = tree
return trees
class DiscretizedMusicNetDataset(MusicNetDataset):
def __init__(self, musicnet_dir, train=True, transforms=None,
min_window_size=2048, overlap=2048, points_per_song=10):
super().__init__(musicnet_dir=musicnet_dir, train=train, transforms=transforms)
"""
window 1 window 2 window 3
|--------------| |--------------|
|--------------|
|..| |..|
overlap 1 overlap 2
- There are `points_per_song-1` overlapping regions
- i.e. Overlaps account for `overlap*(points_per_song-1)` points
- The remaining regions are split evenly between `points_per_song` windows
- i.e. `(L-overlap*(points_per_song-1))/points_per_song`
- window size = (L-overlap*(points_per_song-1))/points_per_song
- stride = window size - overlap
"""
data = []
for k,l in self.labels.items():
length = l.end()
window_size = (length-overlap*(points_per_song-1))//points_per_song
stride = window_size-overlap
if window_size < min_window_size:
window_size = min_window_size
stride = (window_size*points_per_song-length)//points_per_song
for i in range(points_per_song):
data.append((k,i*stride,i*stride+window_size))
self.data = data
def __getitem__(self,index):
song_id,start,end = self.data[index]
wav_file_name = os.path.join(self.data_dir,'%d.wav'%song_id)
rate,data = wavfile.read(wav_file_name)
output = {
'interval_tree': self.labels[song_id],
'audio': data,
'start': start,
'end': end
}
if self.transforms is not None:
return self.transforms(output)
return output
##################################################
# Transforms
##################################################
class RandomCrop(object):
def __init__(self, window_size=10000):
self.window_size = window_size
def __call__(self,sample):
interval_tree = sample['interval_tree']
audio = sample['audio']
start = sample.get('start',0)
end = sample.get('end',len(audio))
length = end-start
start = start+np.random.randint(0,length-self.window_size+1)
end = start+self.window_size
intervals = interval_tree[(start+end)//2]
audio = audio[start:end]
return {
**sample,
'intervals': intervals, 'audio': audio, 'start': start, 'end': end
}
class CentreCrop(object):
def __init__(self, window_size=10000):
self.window_size = window_size
def __call__(self,sample):
interval_tree = sample['interval_tree']
audio = sample['audio']
start = sample.get('start',0)
end = sample.get('end',len(audio))
length = end-start
start = start+(length+self.window_size)//2
end = start+self.window_size
intervals = interval_tree[(start+end)//2]
audio = audio[start:end]
return {
**sample,
'intervals': intervals, 'audio': audio, 'start': start, 'end': end
}
class CropAudio(object):
def __call__(self,sample):
audio = sample.get('audio')
start = sample.get('start')
end = sample.get('end')
return {
**sample,
'audio': audio[start:end]
}
class CropIntervals(object):
def __init__(self, window_size, stride):
self.window_size = window_size
self.stride = stride
def __call__(self,sample):
interval_tree = sample['interval_tree']
start = sample.get('start')
end = sample.get('end')
intervals = []
window_start = start
while window_start+self.window_size <= end:
intervals.append(interval_tree[window_start+self.window_size//2])
window_start += self.stride
return {
**sample,
'intervals': intervals
}
class IntervalsToNoteNumbers(object):
def __call__(self,sample):
intervals = sample['intervals']
if type(intervals) is list:
note_numbers = []
for i in intervals:
note_numbers.append([])
for (start,end,(instrument,note,measure,beat,note_value)) in i:
note_numbers[-1].append(note)
else: # if type(intervals) is set
note_numbers = []
for (start,end,(instrument,note,measure,beat,note_value)) in intervals:
note_numbers.append(note)
output = sample.copy()
output['note_numbers'] = note_numbers
return output
class NoteNumbersToVector(object):
def __call__(self, sample):
note_numbers = sample['note_numbers']
assert type(note_numbers) is list
def convert(note_numbers):
vector = torch.zeros([128])
for n in note_numbers:
vector[n] = 1
return vector
output = sample.copy()
if len(note_numbers) > 0 and type(note_numbers[0]) is list:
output['note_numbers'] = torch.stack([convert(n) for n in note_numbers])
else:
output['note_numbers'] = convert(note_numbers)
return output
def interval_tree_to_midi(interval_tree,rate=44100):
midi = pretty_midi.PrettyMIDI()
cello_program = pretty_midi.instrument_name_to_program('Cello')
instrument = pretty_midi.Instrument(program=cello_program)
for interval in interval_tree.all_intervals:
start,end,(_,note,_,_,_) = interval
note = pretty_midi.Note(velocity=100, pitch=note,
start=start/rate, end=end/rate)
instrument.notes.append(note)
midi.instruments.append(instrument)
return midi
if __name__=="__main__":
from generator import Compose, NoteNumbersToVector, Spectrogram, ToTensor
transforms = Compose([
RandomCrop(),
IntervalsToNoteNumbers(),
NoteNumbersToVector(),
ToTensor(),
Spectrogram()
])
dataset = MusicNetDataset('/home/howard/Datasets/musicnet',
train=False,transforms=transforms)
| true |
bcbbf956b590b9d4b50444cd6e1103fcfd46d9e5 | Python | ygperez/CIS-024C-HW | /helperfunctions.py | UTF-8 | 270 | 3.71875 | 4 | [] | no_license | import math
import sys
def add(x,y):
print "Add = ", x+y
def diff(x,y):
print "Subtract = ", x-y
def product(x,y):
print "Product = ", x*y
def greatest(x,y):
if x>y:
print "x is greater than y"
if y>x:
print "y is greater than x" | true |
8285fb7c6412df9a64a5c43f5aefa4dc8dfb5f7f | Python | Arcprm4/HelloGit | /ABC121/C.py | UTF-8 | 294 | 2.8125 | 3 | [] | no_license | def MAP(): return list(map(int,input().split()))
n,m = MAP()
lst = [MAP() for _ in range(n)]
lst = sorted(lst,key = lambda x:x[0])
q = 0
ans = 0
for i in lst:
if q+i[1]<m:
q+=i[1]
ans+=i[0]*i[1]
else:
ans += (m-q)*i[0]
break
print(ans) | true |
af9059fa3577c707612c39263d59fdfddfda65e6 | Python | enesgrahovac/kaggle | /nlp_tutorial/first.py | UTF-8 | 1,132 | 3.5 | 4 | [] | no_license | import spacy
nlp = spacy.load('en')
doc = nlp("Yo! My name is Enes, hello computer! How're you doing today?")
for token in doc:
print(token)
print(f"Token \t\tLemma \t\tStopword".format('Token', 'Lemma', 'Stopword'))
print("-"*40)
for token in doc:
print(f"{str(token)}\t\t{token.lemma_}\t\t{token.is_stop}")
### Matching words:
## To match individual words you use a Matcher
## To match a list of terms, use a PhraseMatcher
from spacy.matcher import PhraseMatcher
matcher = PhraseMatcher(nlp.vocab, attr='LOWER')
terms = ['Galaxy Note', 'iPhone 11', 'iPhone XS', 'Google Pixel']
patterns = [nlp(text) for text in terms]
matcher.add("TerminologyList", patterns)
# Borrowed from https://daringfireball.net/linked/2019/09/21/patel-11-pro
text_doc = nlp("Glowing review overall, and some really interesting side-by-side "
"photography tests pitting the iPhone 11 Pro against the "
"Galaxy Note 10 Plus and last year’s iPhone XS and Google Pixel 3.")
matches = matcher(text_doc)
print(matches)
match_id, start, end = matches[0]
print(nlp.vocab.strings[match_id], text_doc[start:end])
| true |
01f31dbdd5584992d6359773a976bf7f5726fe62 | Python | yabirgb/hashcode | /2018/main.py | UTF-8 | 1,543 | 3.28125 | 3 | [
"MIT"
] | permissive | import sys
from car import Car
from plan import *
from ride import Ride
def get_info(filename):
"""
Function that gets the input
Return:
params: A tuple with the input params
rides: A list with all the rides info
"""
with open(filename) as f:
#get the first line of the input
params = list(map(int, f.readline().split()))
#list with all the travels
rides = []
for line in f:
#Get values from ride line
ride = list(map(int, line.split()))
#4-uple with origin and destination in that order
coordenates = tuple(ride[:4])
#Earliest step to pick
earliest = ride[-2]
#Latest tick to arrive at destination
latest = ride[-1]
#List of all rides
rides.append((coordenates, earliest, latest))
return params, rides
if __name__ == "__main__":
"""
uso:
python3 main.py nombre
"""
filename = sys.argv[1]
data, ridesRaw = get_info('input/{}.in'.format(filename))
rows, columns, nCars, nRides, bonus, TIME = data
cars = [Car() for _ in range(nCars)]
rides = [Ride(elem) for elem in ridesRaw]
maxDistance = rows + columns
results = plan(rides, cars, bonus, TIME, maxDistance)
with open("{}.out".format(filename), "w") as f:
for l in results:
l = list(map(str, l))
line = str(len(l)) + ' ' + ' '.join(l) + '\n'
f.write(line)
| true |
b692bd7714ffeb2d415bab7c184af949ffb5ea9b | Python | leosamuel64/MPSI | /IPT/1-Python/TP2/ex11.py | UTF-8 | 297 | 3.578125 | 4 | [] | no_license | def sentenceToWords(sentence):
liste = []
word = ""
for i in range (0,len(sentence)):
if sentence[i] != " ":
word += sentence[i]
else :
liste.append(word)
word = ""
liste.append(word)
return liste
print(sentenceToWords("Arthur le glomorphe à rayure marche à vive allure"))
| true |
a3508b93ffe2d46d5af3ba5a259f3c411ac48319 | Python | daniloBlera/ProjetoPSD-RSI | /StreamProcessing/post_structure_processing.py | UTF-8 | 2,488 | 2.53125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from datetime import datetime
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.mqtt import MQTTUtils
import pika
sc = SparkContext("local[5]", "Jesus Christ that's Jason Bourne")
ssc = StreamingContext(sc, 1)
ssc.checkpoint("/tmp/spark-streaming-checkpoints")
parameters = pika.ConnectionParameters(host='localhost', port=5672)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
broker_url = 'tcp://localhost:1883'
exchange_name = "amq.topic"
queue_name = "SPARK_POST_STRUCTURES"
post_structures = MQTTUtils.createStream(ssc, broker_url, queue_name)
sep = ">>"
def push_scores_to_queue(time, rdd):
print("======{}======".format(time))
elements = None
if rdd.isEmpty():
print("-EMPTY-")
else:
elements = rdd.map(lambda pair: (str(pair[0]), str(pair[1]))).collect()
content = []
for e in elements:
print(e)
content.append(','.join(e))
elements = '>>'.join(content)
parameters = pika.ConnectionParameters(host='localhost', port=5672)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
exchange_name = "amq.topic"
queue_name = "SPARK_PROCESSING_RESPONSE"
channel.exchange_declare(exchange=exchange_name, type='topic', durable=True)
channel.basic_publish(
exchange=exchange_name,
routing_key=queue_name,
body=str(elements)
)
channel.close()
def get_days_difference_between(timestamp1, timestamp2):
ts_fmt = "%Y-%m-%dT%H:%M:%S.%f+0000"
t1 = datetime.strptime(timestamp1, ts_fmt)
t2 = datetime.strptime(timestamp2, ts_fmt)
return (t1 - t2).days
def update_event(new_event, last_event):
if new_event:
return new_event[0]
return last_event
if __name__ == "__main__":
print("Processamento de pontuações iniciado")
active_posts = post_structures.map(
lambda evt: (evt.split(sep)[1].split('|')[1] + sep + evt.split(sep)[0],
evt.split(sep)[1:])).flatMapValues(
lambda evt: evt).map(
lambda pair: (pair[0].split(sep)[0],
(pair[0].split(sep)[1], pair[1].split('|')[0]) )
).mapValues(
lambda pair: 10 - get_days_difference_between(pair[0], pair[1])
).reduceByKey(lambda x, y: x+y)
active_posts.foreachRDD(push_scores_to_queue)
ssc.start()
ssc.awaitTermination()
ssc.stop()
| true |
3944e7245529b6dc0f22ff555bc6894795f19f42 | Python | jenjouhung/DHD_Classifier | /train.py | UTF-8 | 3,739 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
import pickle
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
import dataload
import param
from model import birnn
#from model import att_birnn
WORD2IDX_FILE = param.WORD_BASED_WORD2IDX_FILE if param.USE_WORD_DATA else param.CHAR_BASED_WORD2IDX_FILE
EMBEDDING_FILE = param.WORD_BASED_EMBEDDING_FILE if param.USE_WORD_DATA else param.CHAR_BASED_EMBEDDING_FILE
#載入訓練與測試資料集
train_sentences, train_tags, test_sentences, test_tags = dataload.load()
# 載入詞彙與編號的對應表 word2idx,該表來自 CBETA word embedding 的 vocabulary
wordf = open(WORD2IDX_FILE, 'rb')
word2idx = pickle.load(wordf)
n_words = len(word2idx) #詞彙總數
tags = param.TAGS
n_tags = len(tags) #標籤類型總數:1 和 0 兩種
# 產生Deep Learning用的訓練與測試資料矩陣
# 填充每一段文字到固定的長度長度,採用Keras提供的 pad_sequences 函式進行填充
from keras.preprocessing.sequence import pad_sequences
X_train = [[word2idx[w] if w in word2idx else word2idx['UNK'] for w in s] for s in train_sentences] # 這裡順道處理如果遇到沒看過的詞的時候,會指定為 UNK 所對應的編號 (代表未知詞)
X_train = pad_sequences(maxlen=param.SENTENCE_MAX_LEN, sequences=X_train, padding="post",value=n_words - 1)
y_train = np.array(train_tags)
# 打亂訓練資料的順序,因為原本的資料前半部分是positive資料,後半部分是negative的,若不先打亂,在訓練的時候進行validation set分割時會變成全部都是某一種單一類型的資料,會影響訓練
train_index = np.arange(len(y_train))
np.random.shuffle(train_index)
X_train = X_train[train_index,:]
y_train = y_train[train_index]
# 產生測試資料矩陣
X_test = [[word2idx[w] if w in word2idx else word2idx['UNK'] for w in s] for s in test_sentences]
X_test = pad_sequences(maxlen=param.SENTENCE_MAX_LEN, sequences=X_test, padding="post",value=n_words - 1)
y_test = np.array(test_tags)
embedding_matrix = None
# 如果要使用預訓練的詞向量,載入pre-trained word2vec/fasttext
if param.USE_PRETRAINED_EMBEDDING:
from gensim import models
w2v_model = None
if param.USE_WORD_DATA:
w2v_model = models.FastText.load(EMBEDDING_FILE)
else:
w2v_model = models.Word2Vec.load(EMBEDDING_FILE)
embedding_matrix = np.zeros((n_words, param.EMBEDDING_DIMENSION))
for w, i in word2idx.items():
if w in w2v_model:
embedding_vector = w2v_model[w]
embedding_matrix[i] = embedding_vector
# 將未知詞給定一個隨機的向量
unk_index = word2idx['UNK']
embedding_matrix[unk_index] = np.random.rand(param.EMBEDDING_DIMENSION)
# 建立類神經網路模型
model = birnn.create_model(n_words, n_tags, param.SENTENCE_MAX_LEN, param.LSTM_UNITS, param.EMBEDDING_DIMENSION, embedding_matrix)
#model = att_birnn.create_model(n_words, n_tags, param.SENTENCE_MAX_LEN, param.LSTM_UNITS, param.EMBEDDING_DIMENSION, embedding_matrix)
# 印出模型各層概況
model.summary()
# 設定 Early Stopping
callback_funcs = []
early_stopping = EarlyStopping(monitor='val_acc', patience=param.EARLY_STOPPING_PATIENCE, mode='max')
if param.USE_EARLY_STOPPING:
callback_funcs.append(early_stopping)
# 開始訓練
history = model.fit(X_train, y_train, batch_size=param.BATCH_SIZE, epochs=param.EPOCHS, validation_split=0.2, verbose=1, callbacks=callback_funcs)
# 將訓練好的模型存檔
model.save(param.OUTPUT_MODEL_FILE)
# 用測試資料集進行測試,估測accuracy
p = model.evaluate(X_test, y_test, verbose=1)
for i in range(len(model.metrics_names)):
print("{0}:{1}".format(model.metrics_names[i], p[i]))
| true |
bf41f22545e55b76fbed7dbfe7d5f91ff392bab7 | Python | JackRogersMacro/ABM_Macro | /SimpleMacro3.py | UTF-8 | 17,602 | 2.671875 | 3 | [] | no_license | """
Simple Macroeconomic Model with Satisficing Behaviour
Authors: Hyun Chang Yi and Sarunas Girdenas
LastModified: 04/06/2014
"""
# from datetime import datetime # import this to calculate script execution time
# startTime=datetime.now()
from random import randrange, choice, randint
from random import uniform as uniform_range
from numpy.random import uniform
import matplotlib.pyplot as plt
from scipy import mean
from math import log
#In this model we have three types of agents: Households, Firms and Central Bank
# Bank sets nominal interest rate, operate capital market, clear payments and records economy
class Bank:
def __init__(self,TrblActn=0.05, TrbSatLv=0.05, LAMBDA=0.05, gamma=0.5,
inertia=0.9, interest=0.05, periods=2, delta=0.005, min_irate=0.001):
self.TrblActn, self.TrbSatLv, self.LAMBDA, self.gamma, self.inertia = TrblActn, TrbSatLv, LAMBDA, gamma, inertia
self.periods, self.delta, self.min_irate = periods, delta, min_irate
# self.irate_nodes = range(int(max_irate/unit) + 1) # decision nodes over interest rates
self.lqdty, self.irate = 0, interest
self.p, self.f, self.i, self.r, self.c, self.a = [], [], [], [], [], []
# self.alp_i, self.alp_p = 1, 1
self.inflation = 0
self.price = 1
self.sl_output, self.sl_infltn, self.val_output, self.val_infltn = 0, 0, 0, 0
self.ActionChanged = True
def set_interest(self, household, firm):
# Set nominal interest rate as 0,1,2,...,i,i+1,... where i stands for 0.1*i percent and announce to the public
#current_coefs = [self.alp_i, self.alp_p]
current_irate = self.irate
Tremble = uniform() < self.TrblActn
Inertia = uniform() < self.inertia
satisficing_output = self.val_output >= self.sl_output
satisficing_infltn = self.val_infltn >= self.sl_infltn
if not(Inertia):
if Tremble or not(satisficing_output) or not(satisficing_infltn):
self.irate = uniform_range(max(self.min_irate, self.irate-(self.delta)), self.irate+(self.delta))
else:
self.irate = uniform_range(max(self.min_irate, self.irate-(self.delta)), self.irate+(self.delta))
# self.irate = min(self.irate, int(self.max_irate/self.unit))
if current_irate == self.irate:
self.ActionChanged = False
# if not(Inertia):
# if (Tremble or not(satisficing_output) or not(satisficing_infltn)):
# self.alp_i = randint(max(-2, self.alp_i - self.delta), min(5, self.alp_i + self.delta))
# self.alp_p = randint(max(-2, self.alp_p - self.delta), min(5, self.alp_p + self.delta))
# # Rule for random choice randint(max(0, self.irate - self.delta), self.irate + self.delta)
# self.irate = int(min(max(0, self.alp_i*household.c[irate_node]
# + self.alp_p*self.inflation), self.max_irate/self.unit))
# if current_coefs == [self.alp_i, self.alp_p] :
# self.ActionChanged = False
Tremble = uniform() < self.TrbSatLv
lamda = uniform()**self.gamma
if not(Tremble):
self.sl_output += lamda*self.LAMBDA*min(self.val_output - self.sl_output, 0)
self.sl_infltn += lamda*self.LAMBDA*min(self.val_infltn - self.sl_infltn, 0)
else:
self.sl_output += lamda*(self.val_output-self.sl_output)
self.sl_infltn += lamda*(self.val_infltn-self.sl_infltn)
household.irate = firm.irate = self.irate
def channel(self, household, firm):
# borrow money from household and lend it to firm
self.lqdty = min(household.saving, firm.capital_demand)
household.asset += max(0, household.saving - self.lqdty)
firm.capital = self.lqdty
# firm.capital = self.lqdty = household.saving
def transfer_evaluate(self, household, firm):
# pay return from deposit and profit from firm to household
# print '3. before transfer', household.asset, (1 + self.irate*self.unit)*self.lqdty, firm.profit
household.asset = 0.3 + household.asset*(1.01) + (1 + self.irate)*self.lqdty + firm.profit
# print '3. after transfer :', self.price, household.asset
output = household.consumption/(self.price*1.0)
#infltn = -self.inflation
if len(self.p) >= self.periods:
infltn = -(self.price - mean(self.p[-self.periods:]))**2
else:
infltn = 0
#household.asset = int((1 + self.irate*self.unit)*self.lqdty + firm.profit)
self.p.append(self.price)
if len(self.p) <= 2:
self.inflation = 0
else:
self.inflation = (self.p[-1]-self.p[-2])/self.p[-2]
self.f.append(firm.profit)
self.i.append(self.irate*100)
self.r.append(self.lqdty)
self.c.append(output)
self.a.append(household.asset)
# evaluate current economy in terms of consumption level and price volatility
rho = uniform()**self.gamma
if not(self.ActionChanged):
self.val_output += rho*(output - self.val_output)
self.val_infltn += rho*(infltn - self.val_infltn)
else:
self.val_output, self.val_infltn = output, infltn
class Household:
def __init__(self, bank, firm, TrblActn=0.05, TrbSatLv=0.05, LAMBDA=0.05, gamma=0.5,
inertia=0.5, asset=5, delta=0.1, irate_unit=0.005, irate_max=0.1):
self.TrblActn, self.TrbSatLv, self.LAMBDA, self.gamma, self.inertia = TrblActn, TrbSatLv, LAMBDA, gamma, inertia
self.delta = delta
self.irate_unit, self.irate_max = irate_unit, irate_max
self.irate_nodes = range(int(irate_max/irate_unit))
self.asset = asset
self.price = self.saving = self.irate = 1
self.consumption = 1
self.c = [0.3 for n in self.irate_nodes]
self.sl_c = self.val_c = self.sl_asset = self.val_asset = \
[0 for n in self.irate_nodes]
self.ActionChanged = True
def irate_node(self, irate):
irate = min(self.irate_max - self.irate_unit, irate)
return int(irate/self.irate_unit)
def consume(self): # choose how much to consume and save
irate_node = self.irate_node(self.irate)
# print '1. consumption:', self.asset
self.consumption = self.c[irate_node]
Tremble = uniform() < self.TrblActn
Inertia = uniform() < self.inertia
satisficing_consuption = self.val_c[irate_node] >= self.sl_c[irate_node]
satisficing_asset = self.val_asset[irate_node] >= self.sl_asset[irate_node]
if not(Inertia):
if not(Tremble):
if (not(satisficing_consuption) and (satisficing_asset)):
self.c[irate_node] = uniform_range(self.c[irate_node], min(1, self.c[irate_node]*(1+self.delta)))
# self.c[irate_node] = uniform_range(self.c[irate_node],
# min(self.asset/self.price, self.c[irate_node] + self.delta))
elif (satisficing_consuption and not(satisficing_asset)):
self.c[irate_node] = uniform_range(max(0, self.c[irate_node]*(1-self.delta)), self.c[irate_node])
# self.c[irate_node] = uniform_range(max(0, self.c[irate_node] - self.delta),
# self.c[irate_node])
elif (not(satisficing_consuption) and not(satisficing_asset)):
# self.c[irate_node] = uniform_range(max(0, self.c[irate_node] - self.delta),
# min(self.asset/self.price, self.c[irate_node] + self.delta))
self.c[irate_node] = uniform_range(max(0, self.c[irate_node]*(1-self.delta)), min(1, self.c[irate_node]*(1+self.delta)))
# self.c[irate_node] = uniform_range(max(0, self.c[irate_node]*(1-self.delta)), min(self.asset, self.c[irate_node]*(1+self.delta)))
# self.c[irate_node] = uniform_range(0, self.asset/self.price)
# else:
# self.c[irate_node] = min(self.c[irate_node], self.asset/self.price)
else:
# self.c[irate_node] = uniform_range(max(0, self.c[irate_node] - self.delta),
# min(self.asset/self.price, self.c[irate_node] + self.delta))
self.c[irate_node] = uniform_range(max(0, self.c[irate_node]*(1-self.delta)), min(1, self.c[irate_node]*(1+self.delta)))
# else:
# self.c[irate_node] = min(self.c[irate_node], self.asset/self.price)
if self.consumption == self.c[irate_node]:
self.ActionChanged = False
# self.consumption = self.c[irate_node]
self.consumption = self.asset*self.c[irate_node]
self.saving = self.asset - self.consumption
self.asset = 0
# print '1. after consumption: saving and consumption', self.saving, self.consumption
Tremble = uniform() < self.TrbSatLv
lamda = uniform()**self.gamma
if not(Tremble):
self.sl_c[irate_node] += lamda*self.LAMBDA*min(self.val_c[irate_node] - self.sl_c[irate_node],0)
self.sl_asset[irate_node] += lamda*self.LAMBDA*min(self.val_asset[irate_node] - self.sl_asset[irate_node],0)
else:
self.sl_c[irate_node] += lamda*(self.val_c[irate_node] - self.sl_c[irate_node])
self.sl_asset[irate_node] += lamda*(self.val_asset[irate_node] - self.sl_asset[irate_node])
def evaluate(self):
# evaluate current saving and consumption decision in terms of current consumption level and next period asset
irate_node = self.irate_node(self.irate)
if self.asset <= 0:
print 'negative asset', self.asset
self.asset = 10
rho = uniform()**self.gamma
if not(self.ActionChanged):
self.val_c[irate_node] += rho*(self.consumption/self.price - self.val_c[irate_node])
self.val_asset[irate_node] += rho*(self.asset - self.val_asset[irate_node])
else:
self.val_c[irate_node] = self.consumption/self.price
self.val_asset[irate_node] = self.asset
class Firm:
def __init__(self, bank, TrblActn=0.05, TrbSatLv=0.05, LAMBDA=0.05, gamma=0.5, capital_power=0.4,
inertia=0.9, delta=0.05, techs=[1, 1], irate_unit=0.005, irate_max=0.1, depreciate=0.05):
self.TrblActn,self.TrbSatLv,self.LAMBDA, self.gamma, self.inertia = \
TrblActn, TrbSatLv, LAMBDA, gamma, inertia
self.capital, self.irate, self.profit, self.delta, self.techs = \
100, 0, 0, delta, techs
self.tech = choice(techs)
self.depreciate = depreciate
self.capital_power = capital_power
self.irate_unit, self.irate_max = irate_unit, irate_max
self.irate_nodes = range(int(irate_max/irate_unit))
self.capital_demand = 0
self.SatLv, self.Val = 0, 0
self.markup = [uniform() for n in self.irate_nodes]
self.price = [uniform_range(0.9, 1.0) for n in self.irate_nodes]
self.k = 10
# self.k = [10 for n in self.irate_nodes]
self.SatLv = self.Val = [0 for n in self.irate_nodes]
self.ActionChanged = True
def irate_node(self, irate):
irate = min(self.irate_max - self.irate_unit, irate)
return int(irate/self.irate_unit)
def borrow(self, bank): # set price and announce it to the public
# self.capital_demand = self.k
# Tremble = uniform() < self.TrblActn
# Inertia = uniform() < self.inertia
# Satisficing = self.Val[irate_node] >= self.SatLv[irate_node]
# if not(Inertia):
# if Tremble or not(Satisficing):
# self.k = uniform_range(max(10, self.k*(1-self.delta)), self.k*(1+self.delta))
# if self.capital_demand != self.k:
# self.ActionChanged = True
# print 'borrow: ', self.capital_power*bank.price*self.tech
self.capital_demand = (self.irate/(self.capital_power*bank.price*self.tech))**(1/(self.capital_power-1))
def set_price(self, bank, household): # set price and announce it to the public
irate_node = self.irate_node(self.irate)
self.current_price = self.price[irate_node]
Tremble = uniform() < self.TrblActn
Inertia = uniform() < self.inertia
Satisficing = self.Val[irate_node] >= self.SatLv[irate_node]
# print 'price range:', self.price[irate_node]*(1-self.delta), self.price[irate_node]*(1+self.delta)
if not(Inertia):
if Tremble or not(Satisficing):
self.price[irate_node] = uniform_range(self.price[irate_node]*(1-self.delta), self.price[irate_node]*(1+self.delta))
if self.current_price != self.price[irate_node]:
# if self.ActionChanged or self.current_price != self.price[irate_node]:
self.ActionChanged = True
# current_markup = self.markup[irate_node]
# Tremble = uniform() < self.TrblActn
# Inertia = uniform() < self.inertia
# Satisficing = self.Val[irate_node] >= self.SatLv[irate_node]
# if not(Inertia):
# if Tremble or not(Satisficing):
# self.markup[irate_node] = uniform_range(max(0, self.markup[irate_node]*(1-self.delta)), self.markup[irate_node]*(1+self.delta))
# if self.ActionChanged or current_markup != self.markup[irate_node]:
# self.ActionChanged = True
# price = (1+self.markup[irate_node])*(self.irate*self.unit)*(self.capital**(1-self.capital_power))/(self.capital_power*self.tech)
household.price = bank.price = max(0.01, self.price[irate_node])
Tremble = uniform() < self.TrbSatLv
lamda = uniform()**self.gamma
if not(Tremble):
self.SatLv[irate_node] += lamda*self.LAMBDA*min(self.Val[irate_node] - self.SatLv[irate_node], 0)
else:
self.SatLv[irate_node] += lamda*(self.Val[irate_node] - self.SatLv[irate_node])
def produce_evaluate(self, bank, household): # produce to meet demand, calculate profit and evaluate current pricing decision
irate_node = self.irate_node(self.irate)
self.tech = choice(self.techs)
capacity = bank.price*self.tech*(self.irate/(self.capital_power*bank.price*self.tech))**(self.capital_power/(self.capital_power-1))
if household.consumption > capacity:
household.asset += household.consumption - capacity
household.consumption = capacity
self.profit = household.consumption - (self.irate + self.depreciate)*self.capital
rho = uniform()**self.gamma
if self.ActionChanged:
self.Val[irate_node] = self.profit
else:
self.Val[irate_node] += rho*(self.profit - self.Val[irate_node])
# print '2. produce: asset, capital, capacity, profit', household.asset, self.capital, capacity, self.profit
b = Bank(TrblActn=0.01, TrbSatLv=0.01, LAMBDA=0.01)
f = Firm(b, TrblActn=0.01, TrbSatLv=0.01, LAMBDA=0.01)
h = Household(b,f, TrblActn=0.01, TrbSatLv=0.01, LAMBDA=0.01)
Time = 10000
for t in range(Time):
b.set_interest(h, f)
if t < 5000:
b.irate = h.irate = f.irate = 0.05
else:
b.irate = h.irate = f.irate = 0.08
f.borrow(b)
# if t % 20 == 0:
f.set_price(b, h)
h.consume()
b.channel(h, f)
f.produce_evaluate(b, h)
b.transfer_evaluate(h, f)
# h.evaluate()
data = [b.p, b.f, b.r, b.c, b.i, b.a]
data_name = ['Price', 'Profit', 'Capital' ,'Consumption', 'Nominal Interest', 'Asset']
# fig, ax0 = plt.subplots()
# plot_args = {'markersize' : 8, 'alpha' : 0.6}
# ax0.plot(data[3], label=data_name[3], **plot_args)
# ax0.plot([i*20 for i in data[4]], label=data_name[4], **plot_args)
# ax0.plot(data[5], label=data_name[5], **plot_args)
# ax0.legend(loc='upper left')
# plt.show()
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2, 3, figsize=(12, 8))
#fig.tight_layout()
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.2)
axes =[ax1, ax2, ax3, ax4, ax5, ax6]
plot_args = {'markersize' : 8, 'alpha' : 0.6}
ylims = [[0, 1.2], [0, 10], [0, 10], [0, 10], [0, 10], [5, 10]]
for i,d in enumerate(data):
axes[i].set_axis_bgcolor('white')
axes[i].plot(d)
#axes[i].plot(d, 'o', markerfacecolor='orange', label=data_name[i], **plot_args)
#axes[i].legend(loc='upper left')
axes[i].set_title('{}'.format(data_name[i]))
if i in [0, ]:
axes[i].set_ylim(ylims[i])
plt.show()
# blocks = 100
# b_name = ['b_'+i for i in data_name]
# b_data = [[] for i in data_name]
# for j, d in enumerate(data):
# for i in range(len(d)/blocks):
# b_data[j].append(sum(d[blocks*i:blocks*(i+1)])/(blocks*1.0))
# fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2, 3, figsize=(12, 8))
# #fig.tight_layout()
# fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.2, hspace=0.2)
# axes =[ax1, ax2, ax3, ax4, ax5, ax6]
# plot_args = {'markersize' : 8, 'alpha' : 0.6}
# ylims = [[0, 100], [-2, 2], [0, 0.5], [0, 100], [0, 0.5], [0, 200000]]
# for i, d in enumerate(b_data):
# axes[i].set_axis_bgcolor('white')
# axes[i].plot(d, 'o', markerfacecolor='orange')
# #axes[i].plot(d, 'o', markerfacecolor='orange', label=data_name[i], **plot_args)
# #axes[i].legend(loc='upper left')
# axes[i].set_title('{}'.format(b_name[i]))
# #axes[i].set_ylim(ylims[i])
# plt.show()
# print 'Computation time:', datetime.now()-startTime, 'seconds.' | true |
f57092e40334c6d4ea4fb7ca2a785a85e8ff667a | Python | jackyjsy/SGGAN | /data_loader.py | UTF-8 | 4,733 | 2.75 | 3 | [
"MIT"
] | permissive | import torch
import os
import random
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
from PIL import Image
import numpy as np
def to_categorical(y, num_classes):
""" 1-hot encodes a tensor """
# print(y)
# print(y.size())
y=np.asarray(y)
# print(type(y))
y=np.eye(num_classes, dtype='uint8')[y]
return y
class CelebDataset(Dataset):
def __init__(self, image_path, seg_path, metadata_path, transform, transform_seg1, transform_seg2, mode):
self.image_path = image_path
self.seg_path = seg_path
self.transform = transform
self.transform_seg1 = transform_seg1
self.transform_seg2 = transform_seg2
self.mode = mode
self.lines = open(metadata_path, 'r').readlines()
self.num_data = int(self.lines[0])
self.attr2idx = {}
self.idx2attr = {}
print ('Start preprocessing dataset..!')
self.preprocess()
print ('Finished preprocessing dataset..!')
if self.mode == 'train':
self.num_data = len(self.train_filenames)
elif self.mode == 'test':
self.num_data = len(self.test_filenames)
def preprocess(self):
attrs = self.lines[1].split()
for i, attr in enumerate(attrs):
self.attr2idx[attr] = i
self.idx2attr[i] = attr
self.selected_attrs = ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Male', 'Young']
self.train_filenames = []
self.train_labels = []
self.test_filenames = []
self.test_labels = []
lines = self.lines[2:]
random.shuffle(lines) # random shuffling
for i, line in enumerate(lines):
splits = line.split()
filename = splits[0]
values = splits[1:]
label = []
for idx, value in enumerate(values):
attr = self.idx2attr[idx]
if attr in self.selected_attrs:
if value == '1':
label.append(1)
else:
label.append(0)
if (i+1) < 2000:
self.test_filenames.append(filename)
self.test_labels.append(label)
else:
self.train_filenames.append(filename)
self.train_labels.append(label)
def __getitem__(self, index):
if self.mode == 'train':
image = Image.open(os.path.join(self.image_path, self.train_filenames[index]))
seg = Image.open(os.path.join(self.seg_path, self.train_filenames[index][:-3]+'png'))
label = self.train_labels[index]
elif self.mode in ['test']:
image = Image.open(os.path.join(self.image_path, self.test_filenames[index]))
seg = Image.open(os.path.join(self.seg_path, self.train_filenames[index][:-3]+'png'))
label = self.test_labels[index]
seg = self.transform_seg1(seg)
num_s = 7
seg_onehot = to_categorical(seg, num_s)
seg=np.asarray(seg,dtype=np.long)
return self.transform(image), torch.LongTensor(seg), self.transform_seg2(seg_onehot)*255.0, torch.FloatTensor(label)
def __len__(self):
return self.num_data
def get_loader(image_path, seg_path, metadata_path, crop_size, image_size, batch_size, dataset='CelebA', mode='train'):
"""Build and return data loader."""
if mode == 'train':
transform = transforms.Compose([
transforms.CenterCrop(crop_size),
transforms.Scale(image_size),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
transform_seg1 = transforms.Compose([
transforms.CenterCrop(crop_size),
transforms.Scale(image_size)])
transform_seg2 = transforms.Compose([
transforms.ToTensor()])
else:
transform = transforms.Compose([
transforms.CenterCrop(crop_size),
transforms.Scale(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
if dataset == 'CelebA':
dataset = CelebDataset(image_path, seg_path, metadata_path, transform, transform_seg1, transform_seg2, mode)
elif dataset == 'RaFD':
dataset = ImageFolder(image_path, transform)
shuffle = False
if mode == 'train':
shuffle = True
data_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle)
return data_loader | true |
e9d4d9ec84a2625c650510033e980fa4cb6de437 | Python | vancun/nifi_factory | /sandbox/explore_jinja2/basic_jinja.py | UTF-8 | 1,356 | 3.328125 | 3 | [] | no_license |
"""
>>> from jinja2 import Template
>>> tpl = Template(u'Greetings, {{ name }}! I am from {{ location }}.')
>>> tpl.render(name='Mr. Arda', location='Amsterdam')
'Greetings, Mr. Arda! I am from Amsterdam.'
Template variables could also be passed as a dictionary.
>>> tpl.render({'name': 'Arthur', 'location':'Stockholm'})
'Greetings, Arthur! I am from Stockholm.'
One could use FileSystemLoader, supplying search path.
>>> from os import path
>>> from jinja2 import Environment, FileSystemLoader
>>> env = Environment(loader = FileSystemLoader('{}/tpl/templates'.format(path.dirname(__file__))))
>>> tpl = env.get_template('greetings.html')
>>> tpl.render(name = 'Mr. Basar')
'Greetings, Mr. Basar!'
One could use PackageLoader. You need to supply package name and directory inside the package.
>>> from jinja2 import Environment, PackageLoader
>>> env = Environment(loader = PackageLoader('tpl', 'templates'))
>>> tpl = env.get_template('greetings.html')
>>> tpl.render(name = 'Mr. Basar')
'Greetings, Mr. Basar!'
>>> v = { 'name': 'Arda' }
Template could be included.
>>> tpl = env.get_template('with_include.html')
>>> tpl.render(v)
'Greetings, Arda!'
Using {% raw %}:
>>> tpl = Template('{% raw %}<li>{{ item }}</li>{% endraw %}')
>>> tpl.render(v)
'<li>{{ item }}</li>'
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| true |
258cbf2c0b477c80736d41cf3c03066b0809f198 | Python | finiteautomata/tp-aa | /helpers.py | UTF-8 | 1,014 | 2.984375 | 3 | [] | no_license | #! coding: utf-8
"""Auxiliares varios."""
from sklearn.metrics import precision_score, accuracy_score, f1_score, recall_score, roc_auc_score
import pandas as pd
from data_builder import load_test_data
scores = [
precision_score,
accuracy_score,
f1_score,
recall_score,
roc_auc_score
]
def add_prefix(d, prefix):
u"""
Devuelve nuevo diccionario cuyas claves son las anteriores más `prefix` como prefijo.
add_prefix({'k1: 1, 'k2': 2}, 'p__')
> {'p__k1: 1, 'p__k2': 2}
"""
return dict((prefix + k, v) for (k, v) in d.iteritems())
def get_scores(classifier, extractor):
"""Calcula scores para el clasificador usando datos de test."""
df, target = load_test_data()
x_test = extractor.transform(df)
y_test = target
results = pd.DataFrame(index=[classifier.__class__.__name__])
for other_scorer in scores:
y_pred = classifier.predict(x_test)
results[other_scorer.func_name] = [other_scorer(y_test, y_pred)]
return results
| true |
5eab3b4632de0ee5968842ebba1c87f3345b4173 | Python | AnasTaherGit/Electronic | /Arduino/PySerial/ReadValue.py | UTF-8 | 1,101 | 2.9375 | 3 | [] | no_license | import serial
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
def Voltage(x, in_min=0, in_max=1023, out_min=0, out_max=5):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
Arduino = serial.Serial('COM7', 9600)
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
DATA = []
time = [0.1 * k for k in range(0, 100)]
def animate(interv):
while (not Arduino.inWaiting()):
print("waiting data ...")
print('reading ..')
data = int(Arduino.readline())
DATA.append(Voltage(data))
print('readed')
print("{0:.2f}".format(Voltage(data)))
del DATA[0]
ax1.clear()
ax1.set_ylim([-6, 6])
ax1.plot(time, DATA)
print("plotted")
print("Collecting data ...")
while 1:
if Arduino.inWaiting():
data = int(Arduino.readline())
# print("{0:.2f}".format(Voltage(data)))
DATA.append(data)
if len(DATA) == 100:
DATA = list(map(Voltage, DATA))
break
# print(len(DATA))
ani = animation.FuncAnimation(fig, animate, interval=50)
plt.show()
| true |
6af269448984ee21b98f31ef3a9f32ac11939bc8 | Python | sklx2016/Repeat-Buyers-Prediction | /split_date.py | UTF-8 | 2,340 | 2.78125 | 3 | [
"MIT"
] | permissive | #-*-coding:utf-8-*-
'''
将usr_log按用户、商户和类别拆分为多个文件
'''
import csv
import os
'''
#记录已存在的date.csv
date_dictionary = {}
#将words写入date.csv文件最后一行,文件打开采用'a'模式,即在原文件后添加(add)
def writeByDate(date,words):
file_name = date+".csv"
os.chdir('../data/date/')
if not date_dictionary.has_key(date):
date_dictionary[date] = True
f = open(file_name,'a')
write = csv.writer(f)
write.writerow(['user_id','item_id','behavior_type','user_geohash','item_category','hour'])
write.writerow(words)
f.close()
else:
f = open(file_name,'a')
write = csv.writer(f)
write.writerow(words)
f.close()
os.chdir('../../preprocess/')
#主函数
def splitByDate():
# os.mkdir('../data/date')
f = open("../data/train_format1.csv")
rows = csv.reader(f)
rows.next()
for row in rows:
date = row[-1].split(" ")[0]
hour = row[-1].split(" ")[1]
words = row[0:-1]
words.append(hour)
writeByDate(date,words)
'''
def writeFile(files, outpath, types):
x_type = ''
for rowdata in files:
if types == "usr":
x_type = rowdata[0]
if types == "cate":
x_type = rowdata[2]
if types == "seller":
x_type = rowdata[3]
if x_type != '':
outfile_write = csv.writer(open(outpath.format(x_type), 'ab'))
outfile_write.writerow(rowdata)
#openfile.close()
if __name__ == "__main__":
files_input = "../data/user_log_format1.csv"
files_output_usr = "../data/usr_id/{}.csv"
files_output_seller_id = "../data/seller_id/{}.csv"
files_output_cate = "../data/category/{}.csv"
log_files = csv.reader(open(files_input, 'r'))
log_files.next()
#按用户id进行分类
log_files = csv.reader(open(files_input, 'r'))
log_files.next()
writeFile(log_files, files_output_usr, "usr")
log_files = csv.reader(open(files_input, 'r'))
log_files.next()
writeFile(log_files, files_output_seller_id, "seller")
log_files = csv.reader(open(files_input, 'r'))
log_files.next()
writeFile(log_files, files_output_cate, "cate")
| true |
bf4fc48a86c1dbffb1fab7ba4c6724eeb0f57429 | Python | mal2/Project-Simulation | /fusim16/PCA/pca.py | UTF-8 | 10,183 | 3.34375 | 3 | [] | no_license | import numpy as np
import scipy.spatial.distance as dst
import tempfile # For memory mapped class attributes
class PCA:
""" Apply principal component on a given data set
for the plain purpose of dimensionality reduction.
Attributes
----------
data: 2D numpy.ndarray
Data matrix of dimensions MxN with M features and N samples.
It must hold that N >= M.
m: int
Number of features of data
n: int
Number of samples of data
k: int
k-first features data should be projected onto. It must hold that 0 < k <= M.
p: float, optional, default=0.
Information preservation (0,1]. Ignored, if zero.
"""
def __init__(self, X, k, p=0.):
""" Constructor. """
# Assertions
assert type(X) is np.ndarray, "X is no numpy.ndarray"
assert len(X.shape) == 2, "X mishaped"
assert X.shape[0] <= X.shape[1], "Less observations than features"
assert type(k) is int, "k is no integer"
assert k > 0 and k <= X.shape[0], "Must hold that 0 < k <= M"
assert type(p) is float, "p is no float"
assert p >= 0. and p <= 1., "Must hold that 0 <= p <= 1"
self.data = X
self.k = k
self.p = p
self.m = self.data.shape[0]
self.n = self.data.shape[1]
def kPCA(self, sigma=0.5):
""" Apply PCA on linearly inseparable data using a gaussian radial basis
function kernel.
Parameters
----------
sigma:
Standard deviation of Gaussian radial basis function
Returns
-------
2D numpy.ndarray
Data of dimensions kxN, which has been projected on first k
dimensions with largest variances.
"""
# Compute distances between data points and store them within a (quadratic) matrix
m = dst.squareform(dst.pdist(self.data.T, "sqeuclidean"))
# Build kernel
kern = np.exp(-(1. / (2. * sigma * sigma)) * m)
# Center kernel
s = np.ones(kern.shape) / kern.shape[0]
kern = kern - s.dot(kern) - kern.dot(s) + s.dot(kern).dot(s)
# Ascending eigenvectors
val, vec = np.linalg.eigh(kern)
self.k = presQual(val[::-1], self.k, self.p)
# Return projected data
return np.fliplr(vec)[:,:self.k].T
def project(self, transMat):
""" Project data along k features with greatest variances.
Parameters
----------
transMat: numpy.ndarray
Transformation matrix of dimensions MxM
Returns
-------
2D numpy.ndarray
Data of dimensions kxN, which has been projected on first k
dimensions with largest variances.
"""
# Project and return data
return transMat[:self.k,:].dot(self.data)
def cov(self):
""" Apply PCA using covariance matrix and eigenvectors.
Returns
-------
2D numpy.ndarray
See project().
"""
# Center data
self.data -= np.mean(self.data, axis=1)[:, None]
# Compute covariance matrix
covMat = np.cov(self.data)
# Determine eigenvalues and eigenvectors (of form MxM)
eigVal, eigVec = np.linalg.eigh(covMat)
# Look at most important features
self.k = presQual(eigVal[::-1], self.k, self.p)
# Sort eigenvectors by eigenvalues
eigVec = eigVec[:, eigVal.argsort()[::-1]]
self.transMat = eigVec[:self.k,:] # Debug purpose regarding eigenvectors
# Return data, which has been projected on k-first eigenvectors
return self.project(eigVec.T)
def svd(self):
""" Apply PCA using singular value decomposition.
Returns
-------
2D numpy.ndarray
See project().
"""
# In case of SVD subtract mean off of data
self.data -= np.mean(self.data, axis=1)[:, None]
# Construct magic helper matrix according to paper
Y = self.data.T / np.sqrt(self.n - 1)
# Apply u,s,v = SVD, where v can be interpreted as transformation matrix;
# Important: Numpy's v is the transposed matrix of Octave/Matlab's v
eigVal, transMat = np.linalg.svd(Y)[1:]
# Adjust self.k to preserve a certain quality, if requested
self.k = presQual(eigVal**2, self.k, self.p)
# Project data along first k features with largest variances
return self.project(transMat)
class PCAMmap:
""" Apply PCA for memory mapped data for the purpose of dimensionality
reduction. It is assumed, that multiple square matrices of size MxM
will fit into memory. For member descriptions, see PCA.
"""
def __init__(self, X, k, p=0.):
# Assertions
assert type(X) is np.core.memmap, "X is no memmap-object"
assert len(X.shape) == 2, "X mishaped"
assert X.shape[0] <= X.shape[1], "Less observations than features"
assert X.shape[1] % X.shape[0] == 0, "In case of memmap, M needs to divide N evenly"
assert type(k) is int, "k is no integer"
assert k > 0 and k <= X.shape[0], "Must hold that 0 < k <= M"
assert type(p) is float, "p is no float"
assert p >= 0. and p <= 1., "Must hold that 0 <= p <= 1"
self.data = X
self.m = X.shape[0]
self.n = X.shape[1]
self.k = k
self.p = p
def kPCA(self, sigma=0.5):
""" Process data into kernel space, i.e. try to make linearly inseparable
data linearly separable.
Parameters
----------
sigma:
Standard deviation of Gaussian radial basis function
"""
raise Exception("kPCA() is yet to be implemented")
def project(self, transMat):
""" Project data along k features with greatest variances.
Parameters
----------
transMat: numpy.ndarray
Transformation matrix of dimensions MxM
Returns
-------
2D numpy.core.memmap
Data of dimensions kxM, which has been projected on first k
dimensions with largest variances.
"""
# Generate temp file for output
tmp = np.memmap(tempfile.TemporaryFile(), dtype="float64", mode="w+", shape=(self.k, self.n))
# Project and return data
self.transMat = transMat[0:self.k,:]
for i in range(0, self.n, self.m):
tmp[:, i:i+self.m] = self.transMat.dot(self.data[:, i:i+self.m])
return tmp
def cov(self):
""" Apply PCA using covariance matrix and eigenvectors.
Returns
-------
2D numpy.core.memmap
See project().
"""
# Compute mean along each feature
mean = np.zeros(self.m)
for i in range(0, self.n, self.m):
mean += np.sum(self.data[:, i:i+self.m], axis=1)
mean /= self.n
# Center data along features
for i in range(0, self.n, self.m):
self.data[:, i:i+self.m] -= mean[:, None]
# Build covariance matrix, which is assumed to fit into memory
covMat = np.zeros((self.m, self.m))
for i in range(0, self.n, self.m):
covMat += self.data[:, i:i+self.m].dot(self.data[:, i:i+self.m].T)
# Normalize
covMat *= (1. / (self.n - 1))
# Determine (already sorted) eigenvalues and eigenvectors
eigVal, eigVec = np.linalg.eigh(covMat)
# Look at most important features
self.k = presQual(eigVal[::-1], self.k, self.p)
# Sort eigenvectors by eigenvalues
eigVec = eigVec[:, eigVal.argsort()[::-1]]
# Return data, which has been projected on k-first eigenvectors
return self.project(eigVec.T)
def presQual(values, k, p):
""" Given a certain quality assure the selected first-k features
will preserve this quality. This adjusts self.k to a sufficient size.
Parameters
----------
values: numpy.ndarray
Eigenvalues in descending order
p: float
Quality preservation in terms of (0,1]
k: int
Previous first-k features
Returns
-------
int
First-k features which satisfy the quality preservation requirement
"""
x = (1. / np.sum(values))
if (p > 0.):
s = 0.
for idx, v in enumerate(values):
s += v
if s * x > p:
print "Quality preservation (", s * x, ") accomplished with k =", idx + 1
return idx + 1
else:
print "First", k, "features reflect", np.sum(values[:k]) * x, "of information."
return k
def pca(X, k, mode, p=0., sigma=0.5, useGpu=False, returnEigVec=False):
""" Method for use with module.
Parameters
----------
X: 2D numpy.ndarray / numpy.core.memmap
Data matrix of dimensions MxN with M features and N samples.
It must hold that N >= M.
k: int
First k features with greatest variances data should be projected onto
mode: str
Implementation method for PCA. Valid modes: 'cov', 'svd', 'kernel'.
Note: Only 'cov' is supported for numpy.core.memmap.
p: float, optional, default: 0.
Quality of information preservation in percent for use with mode="cov".
If zero, it is ignored.
sigma: float, optional, default: 0.5
Standard deviation for RBF kernel function
Returns
-------
2D numpy.ndarray / numpy.core.memmap
Projected data of dimensions kxN
Note: File underlying a numpy.core.memmap will be automatically removed
on program termination.
"""
# Exceptions in case of wrong parameters
assert any([m == mode for m in ["cov", "svd", "kernel"]]), "Invalid mode. Valid ones are cov, svd, kernel"
# Determine, which mode to use
pc = PCA(X, k, p) if type(X) is np.ndarray else PCAMmap(X, k, p)
if "cov" == mode:
if returnEigVec:
return pc.cov(), pc.transMat
else:
return pc.cov()
elif "kernel" == mode:
return pc.kPCA(sigma)
else:
return pc.svd()
| true |
daa5a3fe740eaf2a40689d9d94ea23c1f21059ed | Python | mendrugory/monkey-note-bot | /app/telegram/api.py | UTF-8 | 768 | 2.84375 | 3 | [
"MIT"
] | permissive | import json
import requests
from app.settings import TOKEN
TELEGRAM_URL_API = 'https://api.telegram.org/bot'
def __build_url(method):
url = '{}{}/{}'.format(TELEGRAM_URL_API, TOKEN, method)
return url
def __post(url, body, params=dict()):
"""
Internal post
:param url:
:param body:
:param params:
:return:
"""
try:
headers = {'Content-type': 'application/json'}
requests.post(url, data=body, params=params, headers=headers)
except Exception as e:
print(e)
def send_message(message):
"""
It sends a message to telegram
:param message: dict() whith "text" mandatory
:return:
"""
method = 'sendMessage'
url = __build_url(method)
__post(url, json.dumps(message))
| true |
630fa1acec386f39345d4d0213a3b18de4da65aa | Python | edwardsemisotov/homework | /snowfall.py | UTF-8 | 1,162 | 2.875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import random
import simple_draw as sd
snowflake_list = []
def sd_spawn_snowflake():
x_point = random.randint(10, sd.resolution[0] - 10)
snowflake_len = random.randint(10, 40)
speed = snowflake_len / 5
return [x_point, sd.resolution[1], snowflake_len, speed]
def spawn_snowflakes(N):
for i in range(N):
snowflake_list.append(sd_spawn_snowflake())
def draw_snowflakes_color(color):
for snowflake in snowflake_list:
point = sd.get_point(*snowflake[0:2])
sd.snowflake(center=point, length=snowflake[2], color=color)
def shift_snowflakes():
for snowflake in snowflake_list:
snowflake[0] -= random.randint(11, 18) / 10
snowflake[1] -= snowflake[3]
def numb_reach_down_screen():
snowflake_down_list = []
for i in range(len(snowflake_list)):
snowflake = snowflake_list[i]
if snowflake[1] < 50:
snowflake_down_list.append(i)
return snowflake_down_list
def del_snowflakes(snowflake_del_list):
for i in sorted(set(snowflake_del_list), reverse=True):
del snowflake_list[i]
| true |
f20d19c45124aa335b2d78c22e19efd1cbb6e4ca | Python | countessellis/pythonpractice | /practicepython_ex7.py | UTF-8 | 100 | 2.984375 | 3 | [] | no_license | #!/usr/bin/python
a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
print [ b for b in a if b % 2 == 0 ]
| true |
3dcc5654fd78d2980a40052edb5a69ad2738b39f | Python | williamjameshandley/williamjameshandley.github.io | /assets/students/timeline.py | UTF-8 | 2,495 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
from collections import Counter
from students import students
import matplotlib.pyplot as plt
import os
import datetime
import numpy as np
grey = '#bbbbbb'
plt.rcParams['axes.edgecolor'] = grey
plt.rcParams['xtick.color'] = grey
plt.rcParams['ytick.color'] = grey
plt.rcParams['ytick.color'] = grey
plt.rcParams['ytick.color'] = grey
students = [s for s in students if s.start <= datetime.date.today()]
students = sorted(students, key=lambda s: (s.start, min([(l.start, l.seniority) for l in s.levels])[1]))
colors = {'partiii': 'C1',
'phd': 'C0',
'postdoc': 'C2',
'summer': 'C3',
'mphil': 'C4'}
fig, ax = plt.subplots()
rects = {}
for i, student in enumerate(students):
weight = None
for l in student.levels:
start = l.start.toordinal()
if l.end is not None:
end = l.end.toordinal()
else:
end = datetime.date.today().toordinal()
if end >= datetime.date.today().toordinal():
weight = 'bold'
rect = plt.Rectangle((start, i), end-start, 1, fc=colors[l.key], ec='k')
rects[str(l)] = rect
ax.add_artist(rect)
ax.annotate(student.name, (student.start.toordinal()-10, i+0.5), va='center', ha='right', color=grey, weight=weight)
min_year = min([student.start.year for student in students])
date_labels = [datetime.date(i,1,1) for i in range(min_year+1,datetime.date.today().year+2)]
xticks = [d.toordinal() for d in date_labels]
ax.set_xticks(xticks)
ax.set_xticklabels([d.year for d in date_labels])
ax.set_xlim(min([student.start.toordinal() for student in students])-400, datetime.date.today().toordinal())
ax.set_ylim(0,i+1)
ax.set_yticks([])
starts = np.array([(min([l.start.toordinal() for l in s.levels]), i) for i, s in enumerate(students)])
academic_date_labels = [datetime.date(i,10,1).toordinal() for i in range(min_year+1,datetime.date.today().year+2)]
for date in academic_date_labels:
start = starts[starts[:,0] <= date][-1]
line, = ax.plot([date, date], [0, start[1]], color=grey, linestyle=':', zorder=-1)
labels, handles = np.transpose(list(rects.items()))
labels, handles = list(labels), list(handles)
labels += ['Academic Year']
handles += [line]
ax.legend(handles, labels, labelcolor=grey, framealpha=0.0)
png_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'students.png')
fig.set_size_inches(9,9)
fig.tight_layout()
fig.savefig(png_file, transparent=True)
| true |
835fb1084c7db85515eae6ce36c7bb31c2e5f5a0 | Python | rickgithacker/pythonclass | /quicktest.py | UTF-8 | 117 | 3 | 3 | [] | no_license | fruit='banana'
fl = len(fruit)
index = 0
while index < fl:
print index, fruit[index]
index = index +1
| true |
d9b765539ae1871986158ca9aedbcb180ab8c37f | Python | Tanvir-Aunjum-Sunny/gltf-blender-importer | /addons/io_scene_gltf/mesh.py | UTF-8 | 9,298 | 2.984375 | 3 | [
"MIT"
] | permissive | import bmesh
import bpy
def convert_coordinates(v):
"""Convert glTF coordinate system to Blender."""
return [v[0], -v[2], v[1]]
def primitive_to_mesh(op, primitive, name, layers, material_index):
"""Create a Blender mesh for a glTF primitive."""
attributes = primitive['attributes']
me = bpy.data.meshes.new(name)
# Early out if there's no POSITION data
if 'POSITION' not in attributes:
return me
positions = op.get('accessor', attributes['POSITION'])
edges = []
faces = []
# Generate the topology
mode = primitive.get('mode', 4)
if 'indices' in primitive:
indices = op.get('accessor', primitive['indices'])
else:
indices = range(0, len(positions))
# TODO: only mode TRIANGLES is tested!!
if mode == 0:
# POINTS
pass
elif mode == 1:
# LINES
# 1 3
# / /
# 0 2
edges = [tuple(indices[i:i+2]) for i in range(0, len(indices), 2)]
elif mode == 2:
# LINE LOOP
# 1---2
# / \
# 0-------3
edges = [tuple(indices[i:i+2]) for i in range(0, len(indices) - 1)]
edges.append((indices[-1], indices[0]))
elif mode == 3:
# LINE STRIP
# 1---2
# / \
# 0 3
edges = [tuple(indices[i:i+2]) for i in range(0, len(indices) - 1)]
elif mode == 4:
# TRIANGLES
# 2 3
# / \ / \
# 0---1 4---5
faces = [tuple(indices[i:i+3]) for i in range(0, len(indices), 3)]
elif mode == 5:
# TRIANGLE STRIP
# 1---3---5
# / \ / \ /
# 0---2---4
def alternate(i, xs):
ccw = i % 2 != 0
return xs if ccw else (xs[0], xs[2], xs[1])
faces = [
alternate(i, tuple(indices[i:i+3]))
for i in range(0, len(indices) - 2)
]
elif mode == 6:
# TRIANGLE FAN
# 3---2
# / \ / \
# 4---0---1
faces = [
(indices[0], indices[i], indices[i+1])
for i in range(1, len(indices) - 1)
]
else:
raise Exception('primitive mode unimplemented: %d' % mode)
# Not all the vertices in the accessor are necessarily used. Only those that
# the indices reference actually become part of the mesh. So we'll need to
# drop the unused ones and consequently relabel the vertices and indices.
used_vert_idxs = set(indices)
# If i is the blender vertex index, bl2gltf[i] is the glTF vertex index
# Don't forget to use this when you pick an attribute to assign to certain
# Blender vertex!
bl2gltf = [i for i, p in enumerate(positions) if i in used_vert_idxs]
# If i the glTF vertex index, bl2gltf[i] is the Blender index (or -1 if that
# vertex is not used)
gltf2bl = [-1] * len(positions)
for bl_idx, gltf_idx in enumerate(bl2gltf):
gltf2bl[gltf_idx] = bl_idx
# Put the positions in their Blender order (and convert coordinates while
# we're at it)
positions = [
convert_coordinates(p)
for i, p in enumerate(positions) if i in used_vert_idxs
]
# Put the topology in terms of Blender idxs
edges = [tuple(gltf2bl[x] for x in y) for y in edges]
faces = [tuple(gltf2bl[x] for x in y) for y in faces]
me.from_pydata(positions, edges, faces)
me.validate()
# Assign material to each poly
for polygon in me.polygons:
polygon.material_index = material_index
# Create the caller's requested layers; any layers needed by the attributes
# for this mesh will also be created, if they weren't created here, below.
for layer, names in layers.items():
for name in names:
if layer == 'vertex_colors': me.vertex_colors.new(name)
if layer == 'uv_layers': me.uv_textures.new(name)
if 'NORMAL' in attributes:
normals = op.get('accessor', attributes['NORMAL'])
for i, vertex in enumerate(me.vertices):
vertex.normal = convert_coordinates(normals[bl2gltf[i]])
k = 0
while 'COLOR_%d' % k in attributes:
layer_name = 'COLOR_%d' % k
if layer_name not in me.vertex_colors.keys():
me.vertex_colors.new(layer_name)
rgba_layer = me.vertex_colors[layer_name].data
colors = op.get('accessor', attributes[layer_name])
# Old Blender versions only take RGB and new ones only take RGBA
if bpy.app.version >= (2, 79, 4): # this bound is not necessarily tight
if colors and len(colors[0]) == 3:
colors = [color+[1] for color in colors]
else:
if colors and len(colors[0]) == 4:
print("your Blender version doesn't support RGBA vertex colors. Upgrade!")
colors = [color[:3] for color in colors]
for polygon in me.polygons:
for vert_idx, loop_idx in zip(polygon.vertices, polygon.loop_indices):
rgba_layer[loop_idx].color = colors[bl2gltf[vert_idx]]
k += 1
k = 0
while 'TEXCOORD_%d' % k in attributes:
layer_name = 'TEXCOORD_%d' % k
if layer_name not in me.uv_layers.keys():
me.uv_textures.new(layer_name)
uvs = op.get('accessor', attributes[layer_name])
uv_layer = me.uv_layers[layer_name].data
for polygon in me.polygons:
for vert_idx, loop_idx in zip(polygon.vertices, polygon.loop_indices):
uv = uvs[bl2gltf[vert_idx]]
uv_layer[loop_idx].uv = (uv[0], 1 - uv[1])
k += 1
# Assign joints/weights. We begin by collecting all the sets (multiple sets
# allow for >4 joint influences).
# TODO: multiple sets are untested!!
joint_sets = []
weight_sets = []
k = 0
while 'JOINTS_%d' % k in attributes and 'WEIGHTS_%d' % k in attributes:
joint_sets.append(op.get('accessor', attributes['JOINTS_%d' % k]))
weight_sets.append(op.get('accessor', attributes['WEIGHTS_%d' % k]))
k += 1
if joint_sets:
# Now create vertex groups. The only way I could find to set vertex
# groups was by round-tripping through a bmesh.
# TODO: find a better way?
bme = bmesh.new()
bme.from_mesh(me)
layer = bme.verts.layers.deform.new('Vertex Weights')
for i, vert in enumerate(bme.verts):
for joint_set, weight_set in zip(joint_sets, weight_sets):
for j in range(0, 4):
if weight_set[i][j] != 0:
vert[layer][joint_set[bl2gltf[i]][j]] = weight_set[bl2gltf[i]][j]
bme.to_mesh(me)
bme.free()
me.update()
return me
def create_mesh(op, idx):
mesh = op.gltf['meshes'][idx]
name = mesh.get('name', 'meshes[%d]' % idx)
primitives = mesh['primitives']
# We'll create temporary meshes for each primitive and merge them using
# bmesh.
# When we merge a mesh with eg. a vertex color layer with one without into
# the same bmesh, Blender will drop the vertex color layer. Therefore we
# make a pass over the primitives here collecting a list of all the layers
# we'll need so we can request they be created for each temporary mesh.
layers = {
'vertex_colors': set(),
'uv_layers': set(),
}
for primitive in primitives:
for kind, accessor_id in primitive['attributes'].items():
if kind.startswith('COLOR_'):
layers['vertex_colors'].add(kind)
if kind.startswith('TEXCOORD_'):
layers['uv_layers'].add(kind)
# Also, if any of the materials used in this mesh use COLOR_0 attributes, we
# need to request that that layer be created; else the Attribute node
# referencing COLOR_0 in those materials will produce a solid red color. See
# material.compute_materials_using_color0, which, note, must be called
# before this function.
use_color0 = any(
prim.get('material', 'default_material') in op.materials_using_color0
for prim in primitives
)
if use_color0:
layers['vertex_colors'].add('COLOR_0')
# Make a list of all the materials this mesh will need; the material on a
# poly is set by giving an index into this list.
materials = list(set(
op.get('material', primitive.get('material', 'default_material'))
for primitive in primitives
))
bme = bmesh.new()
for i, primitive in enumerate(mesh['primitives']):
blender_material = op.get('material', primitive.get('material', 'default_material'))
tmp_mesh = primitive_to_mesh(
op,
primitive,
name=name + '.primitives[i]',
layers=layers,
material_index=materials.index(blender_material)
)
bme.from_mesh(tmp_mesh)
bpy.data.meshes.remove(tmp_mesh)
me = bpy.data.meshes.new(name)
bme.to_mesh(me)
bme.free()
# Fill in the material list (we can't do me.materials = materials since this
# property is read-only).
for material in materials:
me.materials.append(material)
if op.smooth_polys:
for polygon in me.polygons:
polygon.use_smooth = True
me.update()
return me
| true |
ffa95541a193ce440badc3adcc084d78bee6e954 | Python | svmldon/IE_507_Modelling_lab | /LAB 05/lab05ex2b.py | UTF-8 | 974 | 3.234375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 23 17:45:21 2017
@author: svmldon
"""
import random
from math import floor, ceil
a=[]*12
rand = float(0)
q = 0
while q < 5000:
q = q + 1
rand = ceil(12*(random.random()))
if rand == 1:
a[0]+= 1
elif rand == 2:
a[1]+= 1
elif rand == 3:
a[2]+= 1
elif rand == 4:
a[3]+= 1
elif rand == 5:
a[4]+= 1
elif rand == 6:
a[5]+= 1
elif rand == 7:
a[6]+= 1
elif rand == 8:
a[7]+= 1
elif rand == 9:
a[8]+= 1
elif rand == 10:
a[9]+= 1
elif rand == 11:
a[10]+= 1
else:
a[11]+= 1
for i in range (0,12):
print(i+1," apears no. of times=",a[i])
b=(1,2.3,4,5,6,7,8,9,10,11,12)
import matplotlib.pyplot as plt
plt.hist(a,b) # arguments are passed to np.histogram
plt.title("Histogram with 'auto' bins")
plt.show()
| true |
bc1daf18bf1227cc083715724ad8099d47417d31 | Python | Roman43407/Sem-1 | /Lab 11/zad3.py | UTF-8 | 160 | 3.6875 | 4 | [] | no_license | import math
a = input("Podaj kąt")
sin = math.sin(int(a))
cos = math.cos(int(a))
tg = math.tan(int(a))
ctg = 1/tg
print(sin)
print(cos)
print(tg)
print(ctg) | true |
47d646ec13e426d6fca2afe6b920c7a0f33ba8c4 | Python | ride80/scrappy-functions | /Scrappy2.1.py | UTF-8 | 1,290 | 2.625 | 3 | [] | no_license | import bs4
from urllib2 import urlopen as uReq
from bs4 import BeautifulSoup as soup
my_url = 'https://www.newegg.com/global/se/Product/ProductList.aspx?Submit=ENE&N=100829626&IsNodeId=1&bop=And&PageSize=96&order=BESTMATCH'
#my_url_2 = 'https://www.newegg.com/Desktop-Graphics-Cards/SubCategory/ID-48/Page-2?Tid=7709&PageSize=96&order=BESTMATCH' ?????
print """fetching page this might take a while..."""
client = uReq(my_url)
#client = uReq(my_url_2) ???
page_html = client.read()
client.close()
print "...done"
page_soup = soup(page_html, "html.parser")
#defines where to look for info needed
containers = page_soup.findAll('div',{'class':'item-container'})
for container in containers:
brand = container.div.div.a.img["title"]
title_container = container.findAll("a", {"class":"item-title"})
product_name = title_container[0].text
shipping_container = container.findAll("li", {"class":"price-ship"})
shipping = shipping_container[0].text.strip()
price_container = container.findAll("li", {"class":"price-current"})
price = price_container[0].text.strip()
print("brand: " + brand)
print("product_name: " + product_name)
#print("shipping:{!s}".format(shipping if shipping: else "free") ????
print("shipping: " + shipping)
print("price: " + price)
| true |
01532951d2fba32b33a520778920d74e04b4ff6e | Python | JMGONB/Mirepositorio | /mi_proyecto_agosto/src/api/server.py | UTF-8 | 1,840 | 2.828125 | 3 | [] | no_license |
import os,sys
import json
import pandas as pd
from flask import Flask,render_template,redirect,request,jsonify
# ----------------------
# $$$$$$$ SERVER $$$$$$$$
app = Flask(__name__) #Inicializa el servidor
@app.route("/")
def default():
return "<h1>soy la ruta por defecto</h1>.<p>Añadir get_json?id= para obtener json</p>"
@app.route("/get_json", methods = ['GET'])
def get_json():
token = None
# Obtener ruta del archivo y almacenar en la variable
settings_file = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + "\\resources\\json\\output\\residentes_edad.json"
# Carga el json desde el archivo
with open(settings_file, "r") as json_file_readed:
field = json.load(json_file_readed)
if 'id' in request.args:
token = str(request.args['id'])
if token == 'L51391909':
return str(field)
else:
return "Error al introducir la clave. Prueba nuevamente"
def main():
print("STARTING PROCESS")
print(os.path.dirname(__file__))
# RUTA HASTA EL FICHERO JSON
settings_file = os.path.dirname(__file__) + "\\..\\..\\src\\api\\settings.json"
# ABRIR FICHERO EN MODO LECTURA Y CARGAR EN VARIABLE JSON READED
with open(settings_file, "r") as json_file_readed:
json_readed = json.load(json_file_readed)
# CARGA DE VARIABLES SERVER RUNNING Y SI ES TRUE CARGUE RESTO DE VARIABLES.
SERVER_RUNNING = json_readed["server_running"]
if SERVER_RUNNING:
DEBUG = json_readed["debug"]
HOST = json_readed["host"]
PORT_NUM = json_readed["port"]
app.run(debug=DEBUG, host=HOST, port=PORT_NUM)
else:
print("Server settings.json doesn't allow to start server." +
"Please, allow it to run it.")
if __name__ == "__main__":
main()
| true |
97f15bdeafc4d5900af5ec59f112b7a5525fc847 | Python | Dimen61/leetcode | /python_solution/DepthFirstSearch/98_ValidateBinarySearchTree.py | UTF-8 | 1,234 | 3.328125 | 3 | [] | no_license | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
# traversal_vals = []
# def in_order(root):
# if not root: return
# if root.left:
# in_order(root.left)
# traversal_vals.append(root.val)
# if root.right:
# in_order(root.right)
# in_order(root)
# for i in range(len(traversal_vals)-1):
# if traversal_vals[i] >= traversal_vals[i+1]:
# return False
# return True
traversal_vals = []
def in_order(root):
if not root: return
if root.left:
in_order(root.left)
traversal_vals.append(root.val)
if root.right:
in_order(root.right)
in_order(root)
for i in range(len(traversal_vals)-1):
if traversal_vals[i] >= traversal_vals[i+1]:
return False
return True
| true |
841735d89a2018ccfe55d7414963f3c08cca0c49 | Python | colinclement/varibayes | /varibayes/opt/adadelta.py | UTF-8 | 2,471 | 3.28125 | 3 | [
"MIT"
] | permissive | """
adadelta.py
author: Colin Clement
date: 2017-10-18
This is an implementation of the ADADELTA adaptic learning rate stochastic
gradient descent optimizer as described in https://arxiv.org/abs/1212.5701
"""
import numpy as np
class Adadelta(object):
def __init__(self, obj_grad_obj, rho=0.9, eps=1e-6):
"""
Adadelta stochastic gradient descent optimizer.
input:
obj_grad_obj: Function which takes D (int) parameters
and returns (objective, grad_objective), can take other args
rho: exponential decay rate
eps: regularization to prevent divide-by-zero
"""
self.obj_grad_obj = obj_grad_obj
self.rho = rho
self.eps = eps
def _reset(self, p0):
self.t = 0
self.obj_list = []
self.m = np.zeros(len(p0))
self.v = np.zeros(len(p0))
def step(self, params, args = ()):
"""
Take one step of Adadelta SGD.
input:
params: array of D floats
args: tuple of extra arguments to obj_grad_obj
"""
obj, g = self.obj_grad_obj(params, *args)
self.t += 1
self.v[:] = self.rho * self.v + (1 - self.rho) * g * g
delta = - np.sqrt((self.m + self.eps)/(self.v + self.eps)) * g
self.m[:] = self.rho * self.m + (1 - self.rho) * delta * delta
return delta, obj
def optimize(self, p0, itn = 1000, tol = 5E-8,
iprint = 0, args = ()):
"""
Run Adadelta SGD.
input:
p0: array of D floats to start optimization
itn : int number of iterations
tol : relative change in objective below which algorithm terminates
iprint : int for how often to print status of algorithm
args : tuple of extra arguments to obj_grad_obj
"""
self._reset(p0)
obj0, _ = self.obj_grad_obj(p0, *args)
for i in range(itn):
delta, obj = self.step(p0, args)
p0 += delta
try:
if i % iprint == 0:
print("Itn {:6d}: obj = {:8e}".format(i, obj))
except ZeroDivisionError as perror:
pass
if np.abs((obj-obj0)/obj0) < tol:
if iprint:
print("Relative change in objective less than tol")
break
self.obj_list += [obj]
obj0 = obj
return p0
| true |
ed6f2f054f13e094e2d57c45e24d2a0624a8ffd7 | Python | adamb70/RomUtilityScripts | /RomUtilityScripts/WorldGen/Environment/GenerateEnvironementFiles.py | UTF-8 | 1,381 | 2.5625 | 3 | [] | no_license | from .DataImporter import ProceduralItemGroupSheetHandler, GrowableItemGroupSheetHandler
def generate_item_groups(outfile='Output/ItemGroups.sbc', handler=None):
ss = ProceduralItemGroupSheetHandler() if not handler else handler
ss.write_item_groups(ss.get_item_group_dict(), outfile)
return outfile
def generate_growable_items(outfile='Output/GrowableEnvironmentItems.sbc', handler=None):
ss = GrowableItemGroupSheetHandler() if not handler else handler
ss.write_growable_items(ss.get_growable_items(), outfile)
return outfile
def generate_tree_items(outfile='Output/TreeEnvironmentItems.sbc', handler=None):
ss = GrowableItemGroupSheetHandler() if not handler else handler
ss.write_growable_items(ss.get_growable_items('TreeEnvironmentItems'), outfile)
return outfile
def generate_farmable_items(outfile='Output/FarmableEnvironmentItems.sbc', handler=None):
ss = GrowableItemGroupSheetHandler() if not handler else handler
ss.write_growable_items(ss.get_growable_items('FarmableEnvironmentItems', is_farmable=True), outfile)
return outfile
def generate_environment_files():
ss = GrowableItemGroupSheetHandler()
out1 = generate_growable_items(handler=ss)
out2 = generate_tree_items(handler=ss)
out3 = generate_farmable_items(handler=ss)
out4 = generate_item_groups()
return out1, out2, out3, out4
| true |
6b6759c8ad5f925f54d9bd605978f455ec070fc1 | Python | kariulele/epita-image | /cours/2020/ing2/bigdata/lesson7 Graphics/dash_fertility.py | UTF-8 | 8,510 | 2.609375 | 3 | [] | no_license | import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import numpy as np
import plotly.graph_objs as go
START = 'Start'
STOP = 'Stop'
def get_data():
incomes = pd.read_excel('data/GDPpercapitaconstant2000US.xlsx', index_col=0).round()
children = pd.read_excel('data/fertility.xlsx', index_col=0)
population = pd.read_excel('data/population.xlsx', index_col=0).round()
continent = pd.read_csv('data/countries_continent.csv', index_col=0)
idx = (list(set(incomes.index) & set(children.index)
& set(population.index) & set(continent.index)))
idx.sort()
df = pd.concat({'incomes': incomes.loc[idx, '1960':'2010'],
'population': population.loc[idx, '1960':'2010'],
'children': children.loc[idx, '1960':'2010']}, axis='columns')
df = df.swaplevel(0,1,axis='columns').sort_index(axis=1)
continent = continent.loc[idx,:]
return df, continent
continent_colors = {'Asia':'yellow', 'Europe':'green', 'Africa':'brown',
'Oceania':'blue', 'Americas':'red'}
df, continent = get_data()
years = df.columns.levels[0]
app = dash.Dash()
app.layout = html.Div(children=[
html.H3(children='World Stats'),
html.Div('Move the mouse over a bubble to get information about the country'),
html.Div([
html.Div([ dcc.Graph(id='main-graph'), ], style={'width':'90%', }),
html.Div([
dcc.Checklist(
id='crossfilter-which-continent',
options=[{'label': i, 'value': i} for i in sorted(continent_colors.keys())],
values=sorted(continent_colors.keys()),
labelStyle={'display':'block'},
),
html.P(id='placeholder'), # used when a callback should not act on another component
html.Div('X scale'),
dcc.RadioItems(
id='crossfilter-xaxis-type',
options=[{'label': i, 'value': i} for i in ['Linear', 'Log']],
value='Log',
labelStyle={'display':'block'},
)
], style={'width': '10%', 'float':'right'}),
], style={
'padding': '0px 50px',
'display':'flex',
'justifyContent':'center'
}),
html.Div([
dcc.Slider(
id='crossfilter-year-slider',
min=years[0],
max=years[-1],
value=years[0],
step = 1,
marks={str(year): str(year) for year in years[::5]},
),
dcc.Interval(
id='auto-stepper',
interval=60*60*1000, # in milliseconds
n_intervals=0 # change by itself every interval
),
html.Button(
STOP, # for some reason loading the page makes a click!
id='button-start-stop',
style={'margin-left':'30'},
),
], style={
'padding': '0px 50px',
'display':'flex',
'justifyContent':'center'
}),
html.P(),
html.Div(id='div-country'),
html.Div([
dcc.Graph(id='x-time-series',
style={'width':'33%', 'display':'inline-block'}),
dcc.Graph(id='y-time-series',
style={'width':'33%', 'display':'inline-block', 'padding-left': '0.5%'}),
dcc.Graph(id='pop-time-series',
style={'width':'33%', 'display':'inline-block', 'padding-left': '0.5%'}),
], style={ 'display':'flex', 'justifyContent':'center', }),
], style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': 'rgb(240, 240, 240)',
'padding': '10px 50px 10px 50px',
}
)
def traces(df, which_continent, year):
res = []
for c in which_continent:
dfc = df[year][continent.region == c]
res.append(
go.Scatter(
x = dfc['incomes'],
y = dfc['children'],
mode = 'markers',
marker = dict( size = np.sqrt(dfc['population']/1E5),
color = continent_colors[c],
),
text = dfc.index)
)
return res
@app.callback(
dash.dependencies.Output('main-graph', 'figure'),
[ dash.dependencies.Input('crossfilter-which-continent', 'values'),
dash.dependencies.Input('crossfilter-xaxis-type', 'value'),
dash.dependencies.Input('crossfilter-year-slider', 'value')])
def update_graph(which_continent, xaxis_type, year):
return {
'data': traces(df, which_continent, year),
'layout': go.Layout(
xaxis = dict(title='GDP per Capita (US $)',
type= 'linear' if xaxis_type == 'Linear' else 'log',
range=(0,55000) if xaxis_type == 'Linear'
else (np.log10(50), np.log10(55000))
),
yaxis = dict(title='Child per woman', range=(0,9)),
margin={'l': 40, 'b': 30, 't': 10, 'r': 0},
height=450,
hovermode='closest',
showlegend=False,
)
}
def create_time_series(country, what, axis_type, title):
return {
'data': [go.Scatter(
x=years,
y=df.loc[country, (years, what)],
mode='lines+markers',
)],
'layout': {
'height': 225,
'margin': {'l': 50, 'b': 20, 'r': 10, 't': 20},
'yaxis': {'title':title,
'type': 'linear' if axis_type == 'Linear' else 'log'},
'xaxis': {'showgrid': False}
}
}
def get_country(hoverData):
if hoverData == None: # init value
return df.index.values[np.random.randint(len(df))]
return hoverData['points'][0]['text']
@app.callback(
dash.dependencies.Output('div-country', 'children'),
[dash.dependencies.Input('main-graph', 'hoverData')])
def country_chosen(hoverData):
return get_country(hoverData)
# graph incomes vs years
@app.callback(
dash.dependencies.Output('x-time-series', 'figure'),
[dash.dependencies.Input('main-graph', 'hoverData'),
dash.dependencies.Input('crossfilter-xaxis-type', 'value')])
def update_y_timeseries(hoverData, xaxis_type):
country = get_country(hoverData)
return create_time_series(country, 'incomes', xaxis_type, 'GDP per Capita (US $)')
# graph children vs years
@app.callback(
dash.dependencies.Output('y-time-series', 'figure'),
[dash.dependencies.Input('main-graph', 'hoverData'),])
def update_x_timeseries(hoverData):
country = get_country(hoverData)
return create_time_series(country, 'children', 'linear', 'Child per woman')
# graph population vs years
@app.callback(
dash.dependencies.Output('pop-time-series', 'figure'),
[dash.dependencies.Input('main-graph', 'hoverData'),
dash.dependencies.Input('crossfilter-xaxis-type', 'value')])
def update_pop_timeseries(hoverData, xaxis_type):
country = get_country(hoverData)
return create_time_series(country, 'population', xaxis_type, 'Population')
# start and stop the movie
@app.callback(
dash.dependencies.Output('button-start-stop', 'children'),
[dash.dependencies.Input('button-start-stop', 'n_clicks')],
[dash.dependencies.State('button-start-stop', 'children')])
def button_on_click(n_clicks, text):
if text == START:
return STOP
else:
return START
# this one is triggered by the previous one because we cannot have 2 outputs
# in the same callback
@app.callback(
dash.dependencies.Output('auto-stepper', 'interval'),
[dash.dependencies.Input('button-start-stop', 'children')])
def button_on_click(text):
if text == START: # then it means we are stopped
return 60*60*1000 # just one event an hour
else:
return 0.5*1000
# see if it should move the slider for simulating a movie
@app.callback(
dash.dependencies.Output('crossfilter-year-slider', 'value'),
[dash.dependencies.Input('auto-stepper', 'n_intervals')],
[dash.dependencies.State('crossfilter-year-slider', 'value'),
dash.dependencies.State('button-start-stop', 'children')])
def on_interval(n_intervals, year, text):
if text == STOP: # then we are running
if year == years[-1]:
return years[0]
else:
return year + 1
else:
return year # nothing changes
if __name__ == '__main__':
app.run_server()
| true |
e47403b0d88387c91b54a94ab35c3df1081891af | Python | daniele-salerno/Gender-Classifier-CV | /3_image_gender_classifier.py | UTF-8 | 1,660 | 3.5 | 4 | [] | no_license | ###############################
"""
Part 3 of 4
Script used for testing the model previus saved with some random images from internet
"""
###############################
import cv2
from tensorflow.keras.models import load_model
SCALE = (200, 200)
model = load_model('model.h5')
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
image = cv2.imread('images/couple.jpg')
# returns the extremes of the faces found into the image
rects = face_cascade.detectMultiScale(image, 1.1, 15)
# iter for each faces
for rect in rects:
face = image[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]] # face cut out from the image
small_img = cv2.resize(face, SCALE) # resize images to common size
x = small_img.astype(float) # cast to float...
x/=255. # ... to be able to normalize the image
# the predict method need a list of images so we cast as a list of a single image
x = x.reshape(1, x.shape[0], x.shape[1], 3)
y = model.predict(x)
y = y[0][0] # value of the prediction
print(y)
# show on image the prediction with sex and %
label = "Man" if y>0.5 else "Woman"
percentage = y if y>0.5 else 1.0-y
percentage = round(percentage*100,1)
cv2.rectangle(image, (rect[0], rect[1]), (rect[0]+rect[2], rect[1]+rect[3]), (0,255,0), 2) # green rectangle around the detected face
cv2.rectangle(image, (rect[0], rect[1]-20), (rect[0]+170, rect[1]), (0,255,0), cv2.FILLED) # rectangle for label
cv2.putText(image, label+" ("+str(percentage)+"%)", (rect[0]+5, rect[1]), cv2.FONT_HERSHEY_PLAIN, 1.4, (255,255,255), 2) # label
cv2.imshow("Gender Classifier", image)
cv2.waitKey(0)
| true |
be289912a071b4931c29b00287db4c1a31a9a053 | Python | cladren123/study | /AlgorithmStudy/백준/4 N과 M 시리즈/N 과 M (1).py | UTF-8 | 1,132 | 3.78125 | 4 | [] | no_license |
"""
2 브루트포스 문제 대비, N과 M은 확실하게 마스터 하자.
"""
n, m = map(int, input().split())
used = [0] * m
visited = [0] * n
card = []
"""
n 은 가짓수
m 은 하나씩 뽑는다.
입력 3 1
3개 중에서 하나 고르기
n 개 중에서 m 개 고르기
즉 n과 m은 n개 중에서 m개 고르기. 모든 경우의 수를 고르기.
used 는 m개 고를걸 담는다
visited 는 n개, 즉 전체에 방문하는 것을 통해 모든 경우의 수를 확인
card는 이제 뼈대가 들어오면 생기는 몸? 같은거 덱
1
2
3
"""
for i in range(1, n+1) :
card.append(i)
"""
card는 이제 뼈대가 들어오면 출력할 몸이다.
n개중에 m개 고르기
"""
def solve(stage) :
# 종료 조건
if stage == m :
for i in used :
print(card[i], end = ' ')
print();
return
# visited를 이용해서 used 즉 뼈대를 만드는 작업
for i in range(n) :
if visited[i] == 0 :
visited[i] = 1;
used[stage] = i;
solve(stage + 1);
visited[i] = 0;
solve(0);
| true |
8812283ed58bc7521743af51e10698018191b911 | Python | rhutuja3010/function | /given 2 no. find max number.py | UTF-8 | 237 | 3.53125 | 4 | [] | no_license | # def max(a,b):
# if a > b:
# return a
# else:
# return b
# print("max number =",max(30,40))
def hello(name,mgs = "how are you"):
print("hello",name,mgs)
hello ("friend",",have a nice day")
hello("friend")
| true |
67de6e18a5add7320d21c26f2fd8812c24139290 | Python | passionzhan/design_pattern | /adapter.py | UTF-8 | 799 | 2.71875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-#
#-------------------------------------------------------------------------------
# PROJECT_NAME: design_pattern
# Name: adapter.py
# Author: 9824373
# Date: 2020-08-19 15:24
# Contact: 9824373@qq.com
# Version: V1.0
# Description:
#-------------------------------------------------------------------------------
class Target(object):
def request(self):
print("普通请求")
class Adaptee(object):
def specific_request(self):
print("特殊请求")
class Adapter(Target):
def __init__(self):
self.adaptee = Adaptee()
def request(self):
self.adaptee.specific_request()
if __name__ == "__main__":
target = Adapter()
target.request()
| true |
a3c2a5c4a88204757168015eaf9188da7a2eb02c | Python | ethanluckett/csci531-conniption | /play_test.py | UTF-8 | 2,720 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env python
from board_class import BoardState
from expand import breadth
from search import alpha_beta_search
from cevaluate.evaluate import evaluate_full
#from evaluate import Evaluator
import functools
import random
import sys
#eval = Evaluator()
def human_move(state):
print('P1 flips:', state.p1_flips)
print('P2 flips:', state.p2_flips)
move = input('Enter move: ')
move_str = ''
if state.player_turn == 1:
flips = state.p1_flips
else:
flips = state.p2_flips
if move.startswith('f') and flips > 0:
move = move[1:]
if state.flipped:
print('You cannot flip the board right now.')
else:
state.flip_board()
move_str += 'f'
if state.player_turn == 1:
state.p1_flips -= 1
else:
state.p2_flips -= 1
state.place_piece(int(move[0]), state.player_turn)
move_str += move[0]
if move.endswith('f') and flips > 0:
state.flip_board()
move_str += 'f'
if state.player_turn == 1:
state.p1_flips -= 1
else:
state.p2_flips -= 1
state.player_turn = state.player_turn % 2 + 1
return state, move_str
def random_move(state):
moves = list(breadth(state))
return random.choice(moves)
def play(player1, player2):
state = BoardState()
if player1 == human_move or player2 == human_move:
state.print_board()
while True:
state, move = player1(state)
print('Player 1 played {}'.format(move))
if player1 == human_move or player2 == human_move:
state.print_board()
# if eval.evaluate_full(state.board, 1) >= 500000:
if evaluate_full(state.board, 1) >= 500000:
return 1
state, move = player2(state)
print('Player 2 played {}'.format(move))
if player1 == human_move or player2 == human_move:
state.print_board()
# if eval.evaluate_full(state.board, 2) >= 500000:
if evaluate_full(state.board, 2) >= 500000:
return 2
def usage():
print('Usage: {} <player1> <player2>'.format(sys.argv[0]))
print('Valid players: human, random, minimax')
if __name__ == '__main__':
agents = {
'random': random_move,
'human': human_move,
'minimax': functools.partial(alpha_beta_search, depth=6),
}
if len(sys.argv) < 3 or sys.argv[1] not in agents or sys.argv[2] not in agents:
usage()
sys.exit(0)
player1 = agents[sys.argv[1]]
player2 = agents[sys.argv[2]]
result = play(player1, player2)
print('Player {} ({}) wins.'.format(result, sys.argv[result]))
| true |
f472ab4b199045a469d184065a3e838750a3903b | Python | skuxy/Advent-Of-Code-codes | /2017/day7.py | UTF-8 | 1,936 | 3.46875 | 3 | [] | no_license | #! /usr/bin/env python3
class node:
def __init__(self, name, value):
self.name = name
self.value = value
self.parent = None
self.children = []
def assign_parent(self, parent):
self.parent = parent
def assign_child(self, child):
self.children.append(child)
def sum_subvalues(self):
if not self.children:
return self.value
sum_result = self.value
for child in self.children:
sum_result += child.sum_subvalues()
return sum_result
def locate_broken(self):
if not self.children:
return self.name, self.value, [x.sum_subvalues() for x in self.parent.children]
if len(set([x.sum_subvalues() for x in self.children])) == 1:
return self.name, self.value, set([x.sum_subvalues() for x in self.parent.children])
child_values = {}
for child in self.children:
child_values[child.sum_subvalues()] = child
return child_values[sorted(child_values.keys())[-1]].locate_broken()
def extract_root(node):
while node.parent:
node = node.parent
return node
if __name__ == "__main__":
with open('/dev/stdin') as stdin:
input_data = list(map(lambda z: z.split(), stdin.readlines()))
nodes = list(map(lambda elems: node(elems[0], int(elems[1][1:-1])), input_data))
for line in input_data:
if len(line) < 3:
continue
parent = list(filter(lambda node: node.name == line[0], nodes))[0]
for potential_child in nodes:
if potential_child.name in [x.strip(',') for x in line[3:]]:
potential_child.assign_parent(parent)
parent.assign_child(potential_child)
root = extract_root(nodes[0]) # any ol node will do
print(root.name)
# part 2
print(root.locate_broken())
| true |
bb9d35ce9ac87ee6f00b982d0375794c0c7e4a35 | Python | hyfgreg/leetcode | /118.yanghuiTriangle.py | UTF-8 | 925 | 3.640625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
给定一个非负整数 numRows,生成杨辉三角的前 numRows 行。
在杨辉三角中,每个数是它左上方和右上方的数的和。
示例:
输入: 5
输出:
[
[1],
[1,1],
[1,2,1],
[1,3,3,1],
[1,4,6,4,1]
]
"""
# 动态规划, 每一行的数字都和上一行的数字有关
from typing import List
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
if not numRows:
return []
ret = []
for row in range(numRows):
if row in [0, 1]:
ret.append([1] * (row + 1))
continue
current = [1]
last_row = ret[-1]
for index in range(row - 1):
current.append(last_row[index] + last_row[index + 1])
current.append(1)
ret.append(current)
return ret
| true |