seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
5033653307
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.conf import settings
from unittest.mock import Mock, patch
from unittest import mock
from twilio.rest import Client
from help.models import Group, Event, UserProfile
from help.forms import RegisterForm, UserForm, GroupForm, ContactForm
from help.models import GroupManager, EventManager, UserProfileManager
class FormModelTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.data = {
"email": "test@test.te",
"password": "test123",
"first_name": "Test",
"last_name": "test",
"username": "Tester",
}
cls.datacontact = {
"Nom": "Test",
"Email": "test@test.com",
"Mobile": "0906070805",
"Message": "J'ai besoin d'infos"
}
cls.user2 = User.objects.create_user(
username="test",
email="test@test.te",
password="test123",
last_name="test",
first_name="Test",
)
cls.group = Group(
group_name='default',
adress='rue de la paix',
zipcode='75001',
city='Paris')
cls.group.save()
cls.user2.save()
cls.user = User.objects.get(pk=1)
cls.userprofile = UserProfile.objects.update(
user=cls.user,
phone="0607080910"
)
# cls.userprofile.save()
cls.group_name = 'default'
cls.idgroup = 1
cls.iduser = 1
cls.phonenumber = "0607080910"
def test_valid_RegisterForm(self):
self.form = RegisterForm(data=self.data)
self.assertTrue(self.form.is_valid())
def test_valid_UserForm(self):
self.UserForm = UserForm(data=self.data)
self.assertTrue(self.UserForm.is_valid())
"""
def test_valid_GroupForm(self):
self.GroupForm = GroupForm(data=self.group)
self.assertTrue(self.GroupForm.is_valid())
"""
def test_valid_ContactForm(self):
self.ContactForm = ContactForm(data=self.datacontact)
self.assertTrue(self.ContactForm.is_valid())
def test_get_id(self):
group_id = Group.objects.get_id(self.group_name)
for item in group_id:
groupid = item['id']
self.assertEquals(groupid, self.idgroup)
def test_get_group_name(self):
group_name = Group.objects.get_group_name(self.idgroup)
for item in group_name:
groupname = item['group_name']
self.assertEquals(groupname, self.group_name)
def test_get_group_info(self):
profiles = UserProfile.objects.get_group_info(self.iduser)
for elem in profiles:
phone = elem
self.assertEquals(phone, self.phonenumber)
def test_get_number(self):
number_list = []
number_list = UserProfile.objects.get_number(self.idgroup)
for item in number_list:
phonenumber = str(item.phone)
self.assertEquals(phonenumber, self.phonenumber)
"""
@mock.patch('.Event.send_message.messages.create')
def test_send_message(self, mocked_instance):
mocked_instance = mocked_instance.return_value
expected_sid = 'SM87105da94bff44b999e4e6eb90d8eb6a'
mocked_instance.send_message.return_value.sid = expected_sid
evt = Event()
# create_message_mock.return_value.sid = expected_sid
to = "+33660645522"
sid = evt.send_message(to)
sent_message = client.messages.create(
to=to,
from_=from_,
body=message_to_broadcast)
# assert create_message_mock.called is True
# assert sid == expected_sid
"""
|
davidbarat/P13
|
needhelp/help/tests/test_forms_models.py
|
test_forms_models.py
|
py
| 3,779
|
python
|
en
|
code
| 0
|
github-code
|
6
|
23474940192
|
from random import randint
x=randint(0,9)
tentative=3
while int(tentative) > int(x):
y=input("saisir la réponse")
if int(y)==int(x):
print("bravo, vous avez gagné")
|
Doumachelsea/Monprojetpython
|
troisessais.py
|
troisessais.py
|
py
| 183
|
python
|
fr
|
code
| 0
|
github-code
|
6
|
1883937681
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 10 21:31:20 2021
@author: Scott
"""
# %% Set up
import pandas as pd
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
# import plotly.graph_objects as go
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
plt.style.use('dark_background')
# %% Functions
def combine_results(folder):
folder += '/'
df = pd.DataFrame()
for file in os.listdir(folder):
roundResults = pd.read_csv(folder + file, sep=' ')
roundResults['Round'] = file[5:-4]
df = df.append(roundResults)
df['Round'] = df['Round'].astype(int)
df.sort_values('Round', ignore_index=True, inplace=True)
df.replace(0, np.nan, inplace=True)
elected = list(df['CANDIDATE '].loc[df['STATUS'].str.match(
r' ELECTED*')].drop_duplicates())
return df, elected
def plot_single_election(df, elected, incumbent, title):
dfGroup = df.groupby('CANDIDATE ')
fig, ax = plt.subplots(2, 1, figsize=(18, 10), sharex=True)
electLine = 'solid'
defeatLine = 'dashed'
exhaustLine = 'dotted'
incumbentColor = 'C0'
challengeColor = 'C1'
exhaustColor = 'gray'
for key, grp in dfGroup:
if key in elected:
linestyle = electLine
elif key == 'EXHAUSTED PILE: ':
linestyle = exhaustLine
else:
linestyle = defeatLine
if key in incumbent:
color = incumbentColor
elif key == 'EXHAUSTED PILE: ':
color = exhaustColor
else:
color = challengeColor
ax[0].plot(grp['Round'], grp['TOTAL '],
label=key, linestyle=linestyle, color=color)
grp2 = grp.loc[grp['Round'] > 1]
ax[1].plot(grp2['Round'], grp2['THIS ROUND '],
label=key, linestyle=linestyle, color=color)
ax[0].set_title('Total Votes')
ax[1].set_title('Added Votes')
ax[1].set_xlabel('Round')
# changeMin = min(df['THIS ROUND '].loc[(
# df['Round'] > 1) & (df['Round'] < max(df['Round']))])
changeMin = 0
# changeMax = max(df['THIS ROUND '].loc[(
# df['Round'] > 1) & (df['Round'] < max(df['Round']))])
changeMax = max(df['THIS ROUND '].loc[(
df['Round'] > 1) & (df['CANDIDATE '] != 'EXHAUSTED PILE: ')]) * 1.1
ax[1].set_ylim([changeMin, changeMax])
# ax[1].set_ylim(bottom=0)
xMax = max(df['Round'])
ax[1].set_xlim([1, xMax])
ax[1].xaxis.set_ticks(np.arange(1, xMax + 1, 1))
custom_lines = [Line2D([0], [0], color=incumbentColor, linestyle=electLine, lw=3),
Line2D([0], [0], color=challengeColor,
linestyle=electLine, lw=3),
Line2D([0], [0], color=incumbentColor,
linestyle=defeatLine, lw=3),
Line2D([0], [0], color=challengeColor,
linestyle=defeatLine, lw=3),
Line2D([0], [0], color=exhaustColor, linestyle=exhaustLine, lw=3)]
custom_legend = ['Relected Incumbent',
'Elected Challenger',
'Defeated Incumbent',
'Defeated Challenger',
'Exhausted Ballots'],
ax[1].legend(custom_lines, *custom_legend, loc='upper left')
fig.suptitle(title, size='x-large', weight='bold')
fig.tight_layout()
fig.savefig(outputFolder + title + '.png')
def round_gains(df):
continuing = df.loc[df['STATUS'] == ' CONTINUING']
contineGroup = continuing.groupby(['Round'])
gains = pd.DataFrame(columns=['Round', 'GainAboveExpect'])
for key, grp in contineGroup:
voteTotal = grp['THIS ROUND '].sum()
count = grp['THIS ROUND '].count()
percents = list((1/count) - (grp['THIS ROUND '] / voteTotal))
percentsArray = np.array([[key]*len(percents), percents]).transpose()
gains = gains.append(pd.DataFrame(
percentsArray, columns=gains.columns), ignore_index=True)
return gains
# %% Run script
folders = glob.glob('camb*results')
outputFolder = 'cambOutputs/'
incum2011 = ['Cheung, Leland ',
'Toomey, Jr., Timothy J. ',
'Maher, David P. ',
'Davis Henrietta ',
'Simmons, E. Denise ',
'Kelley, Craig A. ',
'Decker, Majorie C. ',
'vanBeuzekom, Minka Y. ',
'Reeves, Kenneth E. ']
# incumbent = list()
incumbent = incum2011
compareResults = pd.DataFrame()
gains = pd.DataFrame()
for folder in folders:
print(folder)
df, elected = combine_results(folder)
plot_single_election(df, elected, incumbent, folder)
gains = gains.append(round_gains(df))
candidates = list(df['CANDIDATE '].loc[~(df['CANDIDATE '].str.match(
r'Write*')) & ~(df['CANDIDATE '].str.match(
'EXHAUSTED PILE: '))].drop_duplicates())
incumAttempt = list(set(candidates) & set(incumbent))
reelected = list(set(elected) & set(incumbent))
reelectCount = len(reelected)
lowFirstWin = df['THIS ROUND '].loc[(
df['CANDIDATE '].isin(elected)) & (df['Round'] == 1)].min()
highFirstLost = df['THIS ROUND '].loc[(
~df['CANDIDATE '].isin(elected)) & (df['Round'] == 1)].max()
lowFirstIncWin = df['THIS ROUND '].loc[(df['CANDIDATE '].isin(elected)) & (
df['CANDIDATE '].isin(incumbent)) & (df['Round'] == 1)].min()
highFirstIncLoss = df['THIS ROUND '].loc[(~df['CANDIDATE '].isin(elected)) & (
df['CANDIDATE '].isin(incumbent)) & (df['Round'] == 1)].max()
effQuota = df['TOTAL '].loc[(df['CANDIDATE '].isin(elected)) & (
df['Round'] == df['Round'].max())].min()
lastOut = df['TOTAL '].loc[(~df['CANDIDATE '].isin(elected)) & (
df['Round'] == df['Round'].max()-1)].max()
compareResults['Incumbent'] = sorted(incumbent)
compareResults['Elected'] = sorted(elected)
# print(compareResults)
print('[', len(incumAttempt), '] incumbents attempted re-election.')
print('[', reelectCount, '] re-elected ')
# print(sorted(reelected))
print('Effective Quota: ', effQuota)
print('Lowest first round elected vote: ', lowFirstWin)
print('Highest first round defeated vote: ', highFirstLost)
print('Lowest first round elected incumbent: ', lowFirstIncWin)
print('Highest first round defeated incumbent: ', highFirstIncLoss)
print('10th place votes: ', lastOut)
print('9th-10th delta: ', effQuota - lastOut)
print()
incumbent = elected
# gains.plot.scatter(x='Round', y='GainAboveExpect')
# gains.plot.scatter(x='Round', y='GainAboveExpect')
# %% Election-Election Placing
print('Election to election finishing order')
placeDf = pd.DataFrame()
for folder in folders:
files = os.listdir(folder + '/')
finalRound = max([int(sub.split('.')[0][5:]) for sub in files])
file = 'Round' + str(finalRound) + '.txt'
print(file)
roundResults = pd.read_csv(folder + '/' + file, sep=' ')
year = folder[4:8]
roundResults['Place_' + folder[4:8]] = roundResults.index + 1
roundResults = roundResults[roundResults['CANDIDATE '].str.contains(
'EXHAUSTED') == False]
roundResults = roundResults[roundResults['CANDIDATE '].str.contains(
'Write') == False]
roundResults.drop(
columns=['THIS ROUND ', 'TOTAL ', 'STATUS'], inplace=True)
roundResults.set_index('CANDIDATE ', inplace=True)
placeDf = placeDf.merge(roundResults, how='outer',
left_index=True, right_index=True)
# %% Plot
x = 'Year 0'
y = 'Year +2'
yearCompare = pd.DataFrame(columns=[x, y])
for year in range(placeDf.shape[1]):
if year < placeDf.shape[1] - 1:
adjacentYear = placeDf.iloc[:, year:year+2]
adjacentYear.columns = [x, y]
yearCompare = yearCompare.append(adjacentYear, ignore_index=True)
yearCompare.dropna(inplace=True)
maxPlace = yearCompare.max().max()
fig, ax = plt.subplots(1, 1, figsize=(10, 10), sharex=True)
# Highlight Election Winners
winBox = [Rectangle((0, 0), 9, 9)]
ax.add_collection(PatchCollection(
winBox, facecolor='g', alpha=0.2, edgecolor='None'))
ax.grid(which='both', alpha=0.2)
ax.minorticks_on()
# Plot finishes
ax.scatter(yearCompare[x], yearCompare[y])
# Add linear fit
linFit = np.poly1d(np.polyfit(yearCompare[x], yearCompare[y], 1))
xs = np.arange(1, maxPlace + 1)
ax.plot(xs, linFit(xs))
title = 'Finishing Place in Subsequent Cycle'
ax.set_title(title)
ax.set_xlabel('Elected Order')
ax.set_ylabel('Elected Order in Subsequent Election')
ax.set_xlim(0, maxPlace + 1)
ax.set_ylim(0, maxPlace + 1)
ax.set_aspect('equal', adjustable='box')
fig.savefig(outputFolder + title + '.png')
|
sckilcoyne/Election_Results
|
cambridge.py
|
cambridge.py
|
py
| 8,768
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22423126020
|
import streamlit as st
import pandas as pd
import numpy as np
import pickle
import librosa
import csv
import os
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
#from audio_recorder_streamlit import audio_recorder
# def predict_age(audio_bytes):
# input = librosa.core.load(audio_bytes, sr=22050, mono=True, offset=0.0, duration=None, dtype="np.float32", res_type='kaiser_best')
#
# prediction = model.predict(input)
#
# return prediction
# st.title("Music Genre Classification System ")
# st.text("This ML model classifies the given input song from one of the ten genres<br> - rock, classical, metal,"
# " disco, blues, reggae, country, hiphop, jazz, pop")
#
# custom_css = "<p>Play an audio of 30 seconds to predict the genre</p>"
# st.markdown(custom_css, unsafe_allow_html=True)
# def save_file(sound_file):
# # save your sound file in the right folder by following the path
# with open(os.path.join('audio_files/', sound_file.name), 'wb') as f:
# f.write(sound_file.getbuffer())
# return sound_file.name
def transform_wav_to_csv(sound_saved):
# define the column names
header_test = 'filename length chroma_stft_mean chroma_stft_var rms_mean rms_var spectral_centroid_mean ' \
'spectral_centroid_var spectral_bandwidth_mean \ spectral_bandwidth_var rolloff_mean rolloff_var ' \
'zero_crossing_rate_mean zero_crossing_rate_var harmony_mean harmony_var perceptr_mean perceptr_var ' \
'tempo mfcc1_mean mfcc1_var mfcc2_mean \ mfcc2_var mfcc3_mean mfcc3_var mfcc4_mean mfcc4_var ' \
'mfcc5_mean mfcc5_var mfcc6_mean mfcc6_var mfcc7_mean mfcc7_var mfcc8_mean mfcc8_var mfcc9_mean ' \
'mfcc9_var mfcc10_mean mfcc10_var\ mfcc11_mean mfcc11_var mfcc12_mean mfcc12_var mfcc13_mean ' \
'mfcc13_var mfcc14_mean mfcc14_var mfcc15_mean mfcc15_var\ mfcc16_mean mfcc16_var mfcc17_mean ' \
'mfcc17_var mfcc18_mean mfcc18_var mfcc19_mean mfcc19_var mfcc20_mean mfcc20_var'.split()
# create the csv file
file = open(f'csv_files/{os.path.splitext(sound_saved)[0]}.csv', 'w', newline='')
with file:
writer = csv.writer(file)
writer.writerow(header_test)
# calculate the value of the librosa parameters
sound_name = f'audio_files/{sound_saved}'
y, sr = librosa.load(sound_name, mono=True, duration=30)
chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)
rmse = librosa.feature.rms(y=y)
spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
zcr = librosa.feature.zero_crossing_rate(y)
mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=27)
to_append = f'{os.path.basename(sound_name)} {np.mean(chroma_stft)} {np.mean(rmse)} {np.mean(spec_cent)} {np.mean(spec_bw)} {np.mean(rolloff)} {np.mean(zcr)}'
for e in mfcc:
to_append += f' {np.mean(e)}'
to_append += f' {np.var(e)}'
# fill in the csv file
file = open(f'csv_files/{os.path.splitext(sound_saved)[0]}.csv', 'a', newline='')
with file:
writer = csv.writer(file)
writer.writerow(to_append.split())
# create test dataframe
df_test = pd.read_csv(f'csv_files/{os.path.splitext(sound_saved)[0]}.csv')
# each time you add a sound, a line is added to the test.csv file
# if you want to display the whole dataframe, you can deselect the following line
# st.write(df_test)
st.write(df_test)
return df_test
def classification(dataframe):
# create a dataframe with the csv file of the data used for training and validation
df = pd.read_csv('csv_files/data.csv')
# OUTPUT: labels => last column
labels_list = df.iloc[:, -1]
st.write("label list shpe:", labels_list.shape)
st.write("label list = ", labels_list)
# encode the labels (0 => 44)
converter = LabelEncoder()
y = converter.fit_transform(labels_list)
# st.write("y shape:", y.shape)
# st.write("y = ", y)
# y = y.reshape(-1, 1)
# st.write("y shape:", y.shape)
# INPUTS: all other columns are inputs except the filename
scaler = StandardScaler()
X = scaler.fit_transform(np.array(df.iloc[:, 1:59]))
X_test = scaler.transform(np.array(dataframe.iloc[:, 1:59]))
# load the pretrained model
model = pickle.load(open("ModelforPrediction.pkl", "rb"))
# generate predictions for test samples
predictions = model.predict(X_test)
# st.write("X-test =", X_test)
# st.write("X-test.shape =", X_test.shape)
# st.write("X-test.ndim =", X_test.ndim)
st.write("predictions =", predictions)
st.write("predictions.shape =", predictions.shape)
st.write("predictions.ndim =", predictions.ndim)
# # generate argmax for predictions
# classes = np.argmax(predictions, axis = 1)
# st.write("classes = ", classes)
# st.write("classes.shape = ", classes.shape)
# # transform class number into class name
# result = converter.inverse_transform(classes)
result = predictions
# -------------------------------------------------------------------------
# -------------------------------------------------
return result
def choice_prediction():
st.write('# Prediction')
st.write('### Choose a audio file in .wav format')
# upload sound
uploaded_file = st.file_uploader(' ', type='wav')
if uploaded_file is not None:
# view details
file_details = {'filename': uploaded_file.name, 'filetype': uploaded_file.type, 'filesize': uploaded_file.size}
st.write(file_details)
# read and play the audio file
st.write('### Play audio')
audio_bytes = uploaded_file.read()
st.audio(audio_bytes, format='audio/wav')
# save_file function
# save_file(uploaded_file)
# define the filename
sound = uploaded_file.name
# transform_wav_to_csv function
transform_wav_to_csv(sound)
st.write('### Classification results')
# if you select the predict button
if st.button('Predict'):
# write the prediction: the prediction of the last sound sent corresponds to the first column
st.write("Genre is: ",
str(classification(transform_wav_to_csv(sound))).replace('[', '').replace(']', '').replace("'",'').replace('"', ''))
else:
st.write('The file has not been uploaded yet')
return
def main():
# st.image(Image.open('logo_ovh.png'), width=200)
model = pickle.load(open("ModelforPrediction.pkl", "rb"))
import base64
page_bg_img = '''
<style>
body{
opacity: 0.6;
background: url("https://images.unsplash.com/photo-1605731414532-6b26976cc153?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=1770&q=80");
background-position: bottom;
background-size : 100%;
}
</style>
'''
st.markdown(page_bg_img, unsafe_allow_html=True)
st.title("Music Genre Classification System ")
st.markdown("""<hr style="height:2px;border:solid #FFFFFF;color:#FFFFFF;background-color:#333;" /> """,
unsafe_allow_html=True)
choice_prediction()
if __name__ == "__main__":
main()
|
DirectorOfUnskillful/Music_Genre_Classification
|
app1.py
|
app1.py
|
py
| 7,405
|
python
|
en
|
code
| 0
|
github-code
|
6
|
32741269203
|
import matplotlib.pyplot as plt
import numpy as np
def main():
edges = np.array([
(0, 1), (0, 5), (1, 2), (2, 6), (3, 7), (3, 8), (3, 11), (4, 0), (5, 2),
(5, 4), (5, 6), (5, 10), (8, 7), (8, 12), (9, 10), (10, 11), (12, 11),
])
recursive_split(edges)
def recursive_split(edges, level=0):
nodes = np.array(list(set(np.ravel(edges))))
print(". " * level + format_nodes(nodes))
if len(edges) <= 3:
return
mapping_back = nodes
mapping_forw = np.full(nodes.max() + 1, -1, dtype=int)
mapping_forw[nodes] = np.arange(len(nodes))
a, b = split(mapping_forw[edges])
a = mapping_back[a]
b = mapping_back[b]
a_edges = np.array([(i, j) for i, j in edges if i in a and j in a])
b_edges = np.array([(i, j) for i, j in edges if i in b and j in b])
recursive_split(a_edges, level + 1)
recursive_split(b_edges, level + 1)
def split(edges):
"""
Split connected graph into two densely connected components.
"""
laplacian = make_laplacian(make_adjacency_matrix(edges))
eigvals, eigvecs = np.linalg.eigh(laplacian)
signals = eigvecs[:, 1]
indices = np.arange(len(signals))
a = indices[signals > 0]
b = indices[signals < 0]
return (a, b) if a[0] < b[0] else (b, a)
def make_laplacian(adjacency_matrix):
"""
Compute graph laplacian from adjacency matrix.
"""
return np.diag(adjacency_matrix.sum(1)) - adjacency_matrix
def make_adjacency_matrix(edges):
"""
Create adjacency matrix from edge list.
"""
n_nodes = count_nodes(edges)
adjacency_matrix = np.zeros((n_nodes, n_nodes))
for i, j in edges:
adjacency_matrix[i, j] = 1
adjacency_matrix[j, i] = 1
return adjacency_matrix
def count_nodes(edges):
"""
Deduce the number of nodes from edge list.
"""
return max(max(i, j) for i, j in edges) + 1
def format_nodes(nodes):
"""
Format list of node indices as concise string.
"""
return " ".join(str(i) for i in nodes)
main()
|
snsinfu/bit5
|
test400-graph_split/main.py
|
main.py
|
py
| 2,038
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3935444552
|
import argparse
import sys
import time
from datetime import datetime, timedelta
from glados.es.ws2es.es_util import ESUtil, num_shards_by_num_rows, DefaultMappings, CURRENT_ES_VERSION
import glados.es.ws2es.signal_handler as signal_handler
import glados.es.ws2es.resources_description as resources_description
import glados.es.ws2es.progress_bar_handler as pbh
from threading import Thread
from glados.es.ws2es.util import query_yes_no
import traceback
import yaml
import glados.es.ws2es.util as util
import os
import os.path
import datetime
__author__ = 'jfmosquera@ebi.ac.uk'
# ----------------------------------------------------------------------------------------------------------------------
# Replication
# ----------------------------------------------------------------------------------------------------------------------
class IndexReplicator(Thread):
def __init__(self, idx_name: str, es_util_origin: ESUtil, es_util_dest: ESUtil, delete_dest_idx: bool = False,
skip_update_mappings: bool = False, es_query=None):
super().__init__()
self.idx_name = idx_name
self.es_util_origin = es_util_origin
self.es_util_dest = es_util_dest
self.delete_dest_idx = delete_dest_idx
self.update_mappings = not skip_update_mappings
self.es_query = es_query
def replicate_idx(self):
origin_count = self.es_util_origin.get_idx_count(self.idx_name)
if origin_count <= 0:
print('ERROR: Skipping empty index {0} in origin cluster. COUNT: {1}'.format(self.idx_name, origin_count))
return
idx_exists = self.es_util_dest.get_idx_count(self.idx_name) >= 0
# noinspection PyBroadException
try:
if idx_exists and self.delete_dest_idx:
# self.es_util_dest.delete_idx(self.idx_name)
print('INFO: INDEX DELETED : {0}.'.format(self.idx_name), file=sys.stderr)
if not idx_exists or (idx_exists and self.delete_dest_idx):
self.es_util_dest.create_idx(
self.idx_name, num_shards_by_num_rows(origin_count), 0,
analysis=DefaultMappings.COMMON_ANALYSIS,
mappings=self.es_util_origin.get_index_mapping(self.idx_name)
)
print('INFO: INDEX CREATED : {0}.'.format(self.idx_name), file=sys.stderr)
elif self.update_mappings:
self.es_util_dest.update_mappings_idx(
self.idx_name, self.es_util_origin.get_index_mapping(self.idx_name)
)
print('INFO: INDEX MAPPINGS UPDATED : {0}.'.format(self.idx_name), file=sys.stderr)
except Exception as e:
traceback.print_exc(file=sys.stderr)
print('ERROR: INDEX CREATION/UPDATE FAILED : {0}.'.format(self.idx_name), file=sys.stderr)
return
sys.stderr.flush()
def index_doc_on_doc(scan_doc, scan_doc_id, total_docs, current_count, firts_doc, last_doc):
if 'request_date' in scan_doc:
try:
scan_doc['request_date'] = int(scan_doc['request_date'])
except:
pass
if idx_exists:
self.es_util_dest.update_doc_bulk(self.idx_name, scan_doc_id, doc=scan_doc, upsert=True)
else:
self.es_util_dest.index_doc_bulk(self.idx_name, scan_doc_id, scan_doc)
self.es_util_origin.scan_index(self.idx_name, index_doc_on_doc, query=self.es_query)
def run(self):
try:
self.replicate_idx()
except:
traceback.print_exc()
def replicate_clusters(
es_util_origin: ESUtil, es_util_dest: ESUtil,
resources_to_run=resources_description.ALL_RELEASE_RESOURCES,
delete_dest_idx: bool = False, skip_update_mappings: bool = False, unichem: bool = False,
unichem_cron_update: bool = False
):
replicators = []
if unichem:
scan_query = None
if unichem_cron_update:
max_dates_result = es_util_origin.run_yaml_query(
os.path.join(os.path.abspath(os.path.dirname(__file__)), './unichem_max_dates_query.yaml'),
'unichem', return_all=True
)
max_created_date = util.get_js_path_from_dict(
max_dates_result, 'aggregations.MAX_CREATED_DATE.value'
)
max_updated_date = util.get_js_path_from_dict(
max_dates_result, 'aggregations.MAX_UPDATED_DATE.value'
)
max_created_date = datetime.datetime.fromtimestamp(max_created_date/1000.0)
max_updated_date = datetime.datetime.fromtimestamp(max_updated_date/1000.0)
max_date = max(max_created_date, max_updated_date)
update_date = max_date - timedelta(days=14)
print(
'MAX DATE: {0} --- {1}\nUPDATE FROM: {2} --- {3}'
.format(max_date, max_date.timestamp()*1000, update_date, update_date.timestamp()*1000)
)
scan_query = {
'query': {
'query_string': {
'query': 'sources.last_updated:>={0} OR sources.created_at:>={0}'
.format(int(update_date.timestamp()*1000))
}
}
}
unichem_replicator = IndexReplicator(
'unichem', es_util_origin, es_util_dest, delete_dest_idx=delete_dest_idx,
skip_update_mappings=skip_update_mappings, es_query=scan_query
)
unichem_replicator.start()
replicators.append(unichem_replicator)
else:
for resource_i in resources_to_run:
res_it_i = IndexReplicator(resource_i.idx_name, es_util_origin, es_util_dest, delete_dest_idx=delete_dest_idx,
skip_update_mappings=skip_update_mappings)
res_it_i.start()
replicators.append(res_it_i)
for res_it_i in replicators:
res_it_i.join()
def check_origin_vs_destination_counts(
es_util_origin: ESUtil, es_util_dest: ESUtil,
resources_to_run=resources_description.ALL_RELEASE_RESOURCES
):
for resource_i in resources_to_run:
origin_count = es_util_origin.get_idx_count(resource_i.idx_name)
destination_count = es_util_dest.get_idx_count(resource_i.idx_name)
mismatch = origin_count == -1 or destination_count == -1 or origin_count != destination_count
mismatch_txt = 'MISMATCH' if mismatch else ''
formatted_ws_count = '{0:,}'.format(origin_count)
formatted_ws_count = ' ' * (12 - len(formatted_ws_count)) + formatted_ws_count
formatted_es_count = '{0:,}'.format(destination_count)
formatted_es_count = ' ' * (12 - len(formatted_es_count)) + formatted_es_count
print_txt = '{0}: origin_count: {1} - destination_count: {2} {3}' \
.format(resource_i.get_res_name_for_print(), formatted_ws_count, formatted_es_count, mismatch_txt)
print(print_txt, file=sys.stderr)
# ----------------------------------------------------------------------------------------------------------------------
# MAIN
# ----------------------------------------------------------------------------------------------------------------------
def parse_config():
parser = argparse.ArgumentParser(
description="Replicate ChEMBL and UniChem data existing in Elastic Search from origin to destination."
)
parser.add_argument("--config",
dest="config_file",
help="Configuration file for the replication process.")
args = parser.parse_args()
if not args.config_file:
print(
'ERROR: a configuration file needs to be specified using --config',
file=sys.stderr
)
sys.exit(1)
try:
config_data = None
with open(args.config_file, 'r') as conf_file:
config_data = yaml.safe_load(conf_file)
return config_data
except:
traceback.print_exc()
print(
'ERROR: could not parse the config file at {0}'.format(args.config_file),
file=sys.stderr
)
sys.exit(1)
def main():
t_ini = time.time()
config = parse_config()
progress_bar_out = config.get('progress_bar_out', None)
pbh.set_progressbar_out_path(progress_bar_out)
delete_indexes = config.get('delete_indexes', False)
skip_update_mappings = config.get('skip_update_mappings', False)
unichem = config.get('unichem', False)
unichem_cron_update = config.get('unichem-cron-update', False)
monitoring = config.get('monitoring', False)
es_host_origin = util.get_js_path_from_dict(config, 'es_clusters.origin.host')
es_port_origin = util.get_js_path_from_dict(config, 'es_clusters.origin.port')
es_user_origin = util.get_js_path_from_dict(config, 'es_clusters.origin.user')
es_password_origin = util.get_js_path_from_dict(config, 'es_clusters.origin.password')
es_host_destination = util.get_js_path_from_dict(config, 'es_clusters.destination.host')
es_port_destination = util.get_js_path_from_dict(config, 'es_clusters.destination.port')
es_user_destination = util.get_js_path_from_dict(config, 'es_clusters.destination.user')
es_password_destination = util.get_js_path_from_dict(config, 'es_clusters.destination.password')
if es_host_origin == es_host_destination and es_port_origin == es_port_destination:
print('ERROR: Origin and destination clusters are the same.')
sys.exit(1)
es_major_version_origin = util.get_js_path_from_dict(config, 'es_clusters.origin.es_client_major_version')
if es_major_version_origin is not None:
try:
es_major_version_origin = int(es_major_version_origin)
assert es_major_version_origin <= CURRENT_ES_VERSION
except:
traceback.print_exc()
print(
'ERROR: Major version for elastic "{0}" is not valid, it must be an integer lower than {1}.'
.format(es_major_version_origin, CURRENT_ES_VERSION),
file=sys.stderr
)
sys.exit(1)
selected_resources = None
resources = config.get('resource', None)
if resources is not None:
if resources is not isinstance(resources, list):
selected_resources = resources.split(',')
else:
selected_resources = resources
resources_to_run = resources_description.ALL_MONITORING_RESOURCES if monitoring else \
resources_description.ALL_RELEASE_RESOURCES
if not unichem:
if selected_resources:
resources_to_run = []
for resource_i_str in selected_resources:
resource_i = resources_description.RESOURCES_BY_RES_NAME.get(resource_i_str, None)
if resource_i is None:
print('Unknown resource {0}'.format(resource_i_str), file=sys.stderr)
sys.exit(1)
resources_to_run.append(resource_i)
# if args.delete_indexes:
# if not query_yes_no("This procedure will delete and create all indexes again in the destination server.\n"
# "Do you want to proceed?", default="no"):
# return
es_util_origin = ESUtil(es_major_version=es_major_version_origin)
es_util_origin.setup_connection(
es_host_origin, es_port_origin, es_user_origin, es_password_origin
)
es_util_destination = ESUtil()
es_util_destination.setup_connection(
es_host_destination, es_port_destination, es_user_destination, es_password_destination
)
ping_failed = False
if not es_util_origin.ping():
print('ERROR: Ping failed to origin cluster.', file=sys.stderr)
ping_failed = True
if not es_util_destination.ping():
print('ERROR: Ping failed to destination cluster.', file=sys.stderr)
ping_failed = True
if ping_failed:
sys.exit(1)
es_util_destination.bulk_submitter.start()
signal_handler.add_termination_handler(es_util_origin.stop_scan)
signal_handler.add_termination_handler(es_util_destination.stop_scan)
signal_handler.add_termination_handler(es_util_destination.bulk_submitter.stop_submitter)
replicate_clusters(
es_util_origin, es_util_destination, resources_to_run=resources_to_run, delete_dest_idx=delete_indexes,
skip_update_mappings=skip_update_mappings, unichem=unichem, unichem_cron_update=unichem_cron_update
)
es_util_destination.bulk_submitter.finish_current_queues()
es_util_destination.bulk_submitter.join()
pbh.write_after_progress_bars()
end_msg = 'REPLICATION FINISHED'
total_time = time.time() - t_ini
sec = timedelta(seconds=total_time)
d = datetime(1, 1, 1) + sec
print(end_msg, file=sys.stderr)
print(
"Finished in: {0} Day(s), {1} Hour(s), {2} Minute(s) and {3} Second(s)"
.format(d.day-1, d.hour, d.minute, d.second),
file=sys.stderr
)
check_origin_vs_destination_counts(es_util_origin, es_util_destination, resources_to_run=resources_to_run)
if __name__ == "__main__":
main()
|
chembl/chembl_ws_2_es
|
src/glados/es/ws2es/cluster_replication/cluster_replicator.py
|
cluster_replicator.py
|
py
| 13,233
|
python
|
en
|
code
| 1
|
github-code
|
6
|
5480416527
|
import os
import requests
from bs4 import BeautifulSoup
import re
import sys
import getopt
user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36'
def get_music_data(url):
"""
用于获取歌曲列表中的歌曲信息
"""
headers = {'User-Agent':user_agent}
webData = requests.get(url,headers=headers).text
soup = BeautifulSoup(webData,'lxml')
find_list = soup.find('ul',class_="f-hide").find_all('a')
tempArr = []
for a in find_list:
music_id = a['href'].replace('/song?id=','')
music_name = a.text
tempArr.append({'id':music_id,'name':music_name})
return tempArr
def get(values,output_path):
"""
用于下载歌曲
"""
downNum = 0
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
for x in values:
x['name'] = re.sub(rstr, "_", x['name'])# 替换字符串中的匹配项
if not os.path.exists(output_path + os.sep + x['name'] + '.mp3'):
print('[*] '+ x['name'] + '.mp3 Downloading...')
url = 'http://music.163.com/song/media/outer/url?id=' + x['id'] + '.mp3'
try:
save_file(url , output_path + os.sep + x['name'] + '.mp3')
downNum = downNum + 1
print('[+] '+ x['name'] + '.mp3 Download complete !')
except:
print('[+] '+ x['name'] + '.mp3 Download error !')
f = open('log_error.txt','a')
f.write(x['name']+'\n')
f.close()
print('[+] Download complete ' + str(downNum) + ' files !')
def save_file(url,path):
"""
用于保存歌曲文件
"""
headers = {'User-Agent':user_agent,'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8','Upgrade-Insecure-Requests':'1'}
response = requests.get(url,headers=headers)
f = open(path, 'wb')
f.write(response.content)
f.flush()
def poc_head():
print("""
__ _______.___._____.___. __________ __ .__.__
/ \ / \__ | |\__ | | \______ \ ____ _______/ |_|__| | ____
\ \/\/ // | | / | | | _// __ \\____ \ __\ | | _/ __ \
\ / \____ | \____ | | | \ ___/| |_> > | | | |_\ ___/
\__/\ / / ______| / ______|____|____|_ /\___ > __/|__| |__|____/\___ >
\/ \/ \/ /_____/ \/ \/|__| \/
""")
def my_help():
print("""
-h --help 查看帮助文档
-u --url 歌单列表的网址
-o --output 歌曲文件的存储路径(可使用绝对路径或相对路径)
默认为当前路径下的./music文件
eg(网易云热歌榜):
python3 pachong.py -u https://music.163.com/#/discover/toplist?id=3778678
python3 pachong.py -u https://music.163.com/#/discover/toplist?id=3778678 -o ./music
""")
def main():
url = ''
output_path = './music'
poc_head()
try:
opts, args = getopt.getopt(sys.argv[1:],
"hu:o:",
["help","url=","output="])
except getopt.GetoptError as err:
print(str(err))
my_help()
# 从opts中读取数据,o为参数,a为参数后带的值
for o,a in opts:
if o in ['-h','--help']:
my_help()
return
elif o in ['-u','--url']:
url = a.replace("#/","")
elif o in ['-o','--output']:
output_path = a
if not os.path.exists(output_path):
os.makedirs(output_path)
music_list = get_music_data(url)
print('[+] 歌单获取成功! 共计',len(music_list),'首歌曲!')
get(music_list,output_path)
main()
|
haochen1204/Reptile_WYYmusic
|
pachong.py
|
pachong.py
|
py
| 3,850
|
python
|
en
|
code
| 1
|
github-code
|
6
|
24463956390
|
import numpy as np
import pandas as pd
import seaborn as sns
#soru 1
df = pd.read_csv("persona.csv")
df.head()
df.describe()
#soru 2
df["SOURCE"].nunique()
#soru 3
df["PRICE"].nunique()
#Soru 4
df["PRICE"].value_counts()
#soru 5
df["COUNTRY"].value_counts()
#soru 6
df.groupby("COUNTRY")["PRICE"].sum()
#soru7
df.groupby("SOURCE")["PRICE"].sum()
#Soru 8
df.groupby("COUNTRY")["PRICE"].mean()
#Soru 9
df.groupby("SOURCE")["PRICE"].mean()
#Soru 10
df.groupby(["SOURCE","COUNTRY"])["PRICE"].mean()
#GÖREV 2
df.groupby(["SOURCE","COUNTRY","SEX","AGE"])["PRICE"].mean()
#GÖREV 3 df.sort_values(by=['col1']) df.sort_values(['job','count'],ascending=False).groupby('job').head(3)
agg_df = df.groupby(by=["COUNTRY", 'SOURCE', "SEX", "AGE"]).agg({"PRICE": "mean"}).sort_values("PRICE", ascending=False)
#agg_df = df.sort_values(["PRICE"],ascending=False).groupby(["SOURCE", "COUNTRY", "SEX", "AGE"])
agg_df.head(10)
#görev 4
agg_df = agg_df.reset_index()
agg_df.head()
#görev 5
bins = [0, 18, 23, 30, 40, agg_df["AGE"].max()]
mylabels = ['0_18', '19_23', '24_30', '31_40', '41_' + str(agg_df["AGE"].max())]
agg_df["age_cat"] = pd.cut(agg_df["AGE"], bins, labels=mylabels)
agg_df.head()
#görev 6
agg_df["customers_level_based"] = [row[0].upper() + "_" + row[1].upper() + "_" + row[2].upper() + "_" + row[5].upper() for row in agg_df.values]
agg_df = agg_df[["customers_level_based", "PRICE"]]
agg_df.head()
|
FatihKemalTerzi/Woking-on-Datasets
|
Kural_tabanli_siniflandirma_project.py
|
Kural_tabanli_siniflandirma_project.py
|
py
| 1,411
|
python
|
en
|
code
| 0
|
github-code
|
6
|
71483194428
|
import pyautogui as pgui
import time
# pgui.PAUSE = 2.5
pgui.FAILSAFE = True
# positional variables // TO BE CHANGED IF REUSED, using mouse_pos.py
export_position = 1250, 540
tab_delimited_file_position = 1304, 785
records_from_button_position = 1092, 724
records_from_button_position_first_box = 1207, 724
records_from_button_position_second_box = 1290, 724
record_content = 1373, 850
full_record = 1251, 918
export = 1077, 914
above_save_button = 935, 915
ok = 935, 950
text_box = 1470, 252
save_button = 1859, 264
# To be changed if we don't start downloading from 1
record_from = 1
record_to = 1000
# text_file_name
root_file_name = 1
end_file_name = ".txt"
if __name__ == "__main__":
while record_from <= 52915: # To be updated every chunk
# 1st step : click export + tab delimited file
time.sleep(1)
pgui.click(export_position, interval=0.5)
# time.sleep(2)
pgui.click(tab_delimited_file_position, interval=0.5, button='left')
# time.sleep(2)
pgui.click(records_from_button_position)
time.sleep(0.1)
# First box
pgui.click(records_from_button_position_first_box)
# First, we need to empty the box from text
for i in range(10):
pgui.press("backspace", interval=0.1)
# time.sleep(0.1)
# Then, we write the number
pgui.keyDown('shift')
pgui.write(str(record_from))
pgui.keyUp('shift')
# Second box
pgui.click(records_from_button_position_second_box)
for i in range(10):
pgui.press("backspace", interval=0.1)
# time.sleep(0.1)
pgui.keyDown('shift')
pgui.write(str(record_to))
pgui.keyUp('shift')
time.sleep(1)
pgui.click(record_content)
time.sleep(1)
pgui.click(full_record)
time.sleep(1)
pgui.click(export)
time.sleep(15)
# need to click above the save button and wait a bit
pgui.click(above_save_button)
time.sleep(1)
pgui.click(ok)
# click on the text box, erase, and write new file name
pgui.click(text_box)
pgui.hotkey('ctrl', 'a')
pgui.press('backspace')
pgui.keyDown('shift')
pgui.write(str(root_file_name))
pgui.write(end_file_name)
pgui.keyUp('shift')
pgui.click(save_button)
# Update variable
record_from += 1000
record_to += 1000
# Update this to handle the last thousand elements
if record_to > 52915:
record_to = 52915
root_file_name += 1
|
KnuxV/projet_transdisciplinaire
|
auto_clicker.py
|
auto_clicker.py
|
py
| 2,612
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37107943507
|
"""1 question 1 sprint"""
def kthTerm(n, k) -> int:
"""
n1, n1+n0,
n2, n2+n0, n2+n1, n2+n1+n0,
n3, n3+n0, n3+n1, n3+n1+n0, n3+n2, n3+n2+n1, n3+n2+n1+n0]
"""
res = []
for i in range(k):
if len(res) > k:
break
res_copy = list(res)
res.append(n**i)
for j in res_copy:
res.append(j + n**i)
return res[k-1]
if __name__ == '__main__':
result = kthTerm(3, 4)
print(result)
|
Misha86/python-online-marathon
|
1_sprint/1_Question.py
|
1_Question.py
|
py
| 466
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37307665313
|
import tkinter as tk
from tkinter import ttk
import serial
from time import sleep
# Configure the serial port settings
port = "/dev/ttyS0"
baudrate = 9600
# Open the serial port
ser = serial.Serial(port, baudrate)
def on_FOTA_selected():
data=4
ser.write(bytes([data]))
ser.flush()
received=ser.read()
print(received)
if(received=='k'):
print(received)
# Read the hex file
hex_file_path ="ACC_FOTA_BREAKING_.hex"
with open(hex_file_path, "r") as file:
i=0
while True:
char = file.read(1)
print(char)
byte_data = char.encode("utf-8")
ser.write(byte_data)
sleep(0.03)
if char=='\n':
sleep(0.03)
receive=ser.read()
print(receive)
if receive=='ok':
continue
else:
sleep(0.03)
continue
ser.flush()
if not char:
break
print("Done")
def on_speed_selected():
selected_speed = speed_combobox.get()
if selected_speed=="0":
data=0
ser.write(bytes([data]))
ser.flush()
print("Selected speed:", selected_speed)
elif selected_speed=="30":
data=3
ser.write(bytes([data]))
ser.flush()
print("Selected speed:", selected_speed)
elif selected_speed=="60":
data=6
ser.write(bytes([data]))
ser.flush()
print("Selected speed:", selected_speed)
elif selected_speed=="90":
data=9
ser.write(bytes([data]))
ser.flush()
print("Selected speed:", selected_speed)
def on_acc_selected():
selected_acc = acc_combobox.get()
if selected_acc=="Adaptive":
data=1
ser.write(bytes([data]))
ser.flush()
print("Selected ACC:", selected_acc)
elif selected_acc=="Normal":
data=2
ser.write(bytes([data]))
ser.write(data)
ser.flush()
print("Selected ACC:", selected_acc)
elif selected_acc=="Cruise Control OFF":
data=5
ser.write(bytes([data]))
ser.flush()
print("Selected ACC:", selected_acc)
# Create the main window
window = tk.Tk()
window.title("GUI with Speed and ACC Selection")
window.geometry("300x200")
window.configure(background='#213363')
# Apply a custom font to the title
title_font = ("Arial", 20, "bold")
# Title label
title_label = ttk.Label(window, text="AutoSync", font=title_font, foreground="white", background="#213363")
title_label.pack(pady=50)
# ACC selection
acc_label = ttk.Label(window, text="Cruise Control Mode:")
acc_label.pack(pady=10)
acc_combobox = ttk.Combobox(window, values=["Adaptive", "Normal","Cruise Control OFF"])
acc_combobox.pack(pady=2)
acc_button = ttk.Button(window, text="Select", command=on_acc_selected)
acc_button.pack(pady=5)
# Speed selection
speed_label = ttk.Label(window, text="Select Speed:" )
speed_label.pack(pady=10)
speed_combobox = ttk.Combobox(window, values=["0", "30", "60","90"])
speed_combobox.pack(pady=2)
speed_button = ttk.Button(window, text="Select", command=on_speed_selected)
speed_button.pack(pady=5)
#FOTA selection
FOTA_button = ttk.Button(window, text="Update New Version", command=on_FOTA_selected)
FOTA_button.place(x=440,y=400)
acc_button.pack(pady=10)
# Start the main event loop
window.mainloop()
#speed_0-->0x00
#speed_30-->0x30
#speed_60-->0x60
#speed_90-->0x90
#adaptive_mode-->0x01
#normal_mode-->0x02
#ACC_OFF-->0x03
|
nadinfromc137/AutoSync-ACCwithFOTA
|
GUI/gui.py
|
gui.py
|
py
| 3,626
|
python
|
en
|
code
| 1
|
github-code
|
6
|
18777180614
|
import base64
from io import BytesIO
from flask import request
# Get a Float parameter with name `name` from the request or
# return the specified default value if it's absent
def num(name, default=0):
val = request.args.get(name)
return float(float(val) if val is not None else default)
# Get an Int parameter with name `name` from the request
def inum(name, default=0):
return int(num(name, default))
# Get a string parameter with name `name` from the request
def arg(name):
return request.args.get(name)
# Get a bool parameter with name `name` from the request
def bln(name, default=False):
val = arg(name)
return default if arg is None else arg == "true"
class Output:
def __init__(self):
self.contents = """
<!DOCTYPE html><html>
<head>
<style>
body {
font-size: 14px;
overflow-wrap: break-word;
word-wrap: break-word;
}
pre {
font-family: "Lucida Console", "Courier New", monospace;
display: inline;
width: 100%;
margin: 0;
white-space: pre-wrap;
white-space: -moz-pre-wrap;
white-space: -pre-wrap;
white-space: -o-pre-wrap;
word-wrap: break-word;
}
img {
max-width: 100%;
}
</style>
</head
<body>
"""
def out(self, text="", end="<br>"):
if not type(text) is str:
text = str(text)
text = text.replace('\n', '<br>')
self.contents += f"<pre>{text}{end}</pre>"
return self
def plot(self, fig, end="<br>"):
buf = BytesIO()
fig.savefig(buf, format="png")
data = base64.b64encode(buf.getbuffer()).decode("ascii")
self.contents += f"<img src='data:image/png;base64,{data}'/>{end}"
return self
def get(self):
return f"{self.contents}</body></html>"
|
Hitonoriol/MOND-PI
|
lab-10-endpoints/io_utils.py
|
io_utils.py
|
py
| 2,196
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74577178748
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rest', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImageLike',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('count', models.IntegerField(default=0)),
('image', models.ForeignKey(to='rest.Image')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=500)),
('isChecked', models.BooleanField(default=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserImageAction',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(auto_now_add=True)),
('actionType', models.SmallIntegerField()),
('image', models.ForeignKey(to='rest.Image')),
('user', models.ForeignKey(to='rest.HiClothUser')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserTagLike',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(auto_now_add=True)),
('isUserLike', models.BooleanField(default=False)),
('tag', models.ForeignKey(to='rest.Tag')),
('user', models.ForeignKey(to='rest.HiClothUser')),
],
options={
},
bases=(models.Model,),
),
]
|
ayyoobimani/Cloth-server
|
hiCloth/hicloth/rest/migrations/0002_imagelike_tag_userimageaction_usertaglike.py
|
0002_imagelike_tag_userimageaction_usertaglike.py
|
py
| 2,196
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22676201180
|
import mxnet as mx
import numpy as np
import importlib
import os
import pickle
from sklearn import preprocessing
from rmacRegions import rmac_regions
if __name__ == '__main__':
featureDim = {
'vgg16': 512,
'resnet18': 512,
'resnet101': 512,
'resnet152': 512,
'custom': 512
}
predictorName = 'vgg16'
predictor = importlib.import_module('predictors.{}'.format(predictorName))
featuresFile = open('../Data/Oxford-5k/R-MAC/{}_{}_RMAC'.format(predictorName, featureDim[predictorName]), 'wb')
PCAfile = open('../Data/PCA/{}_{}_PCA'.format(predictorName, featureDim[predictorName]))
pca = pickle.load(PCAfile)
imgsDir = '../Data/Oxford-5k/oxbuild_images/'
imgNames = os.listdir(imgsDir)
features = {}
count = 0
for i, imgName in enumerate(imgNames):
featureMap = predictor.getFeatureMap(imgsDir + imgName)[0]
featureMap = featureMap[np.newaxis, :]
Wmap, Hmap = featureMap.shape[3], featureMap.shape[2]
all_regions = []
regions = rmac_regions(Wmap, Hmap, 3)
for region in regions:
x = region[0]
y = region[1]
w = region[2]
h = region[3]
x1 = x
x2 = x + w - 1
y1 = y
y2 = y + h - 1
all_regions.append([0, x1, y1, x2, y2])
featureMap = mx.nd.array(featureMap)
all_regions = mx.nd.array(all_regions)
x = mx.nd.ROIPooling(data=featureMap, rois=all_regions, pooled_size=(1,1), spatial_scale=1.0)
x = np.squeeze(x.asnumpy())
x = preprocessing.normalize(x, norm='l2', axis=1)
features[imgName] = x
count += 1
if count % 500 == 0:
print('{} images have been processed'.format(count))
for imgName in features.keys():
x = pca.transform(features[imgName])
x = preprocessing.normalize(x, norm='l2', axis=1)
x = np.sum(x, axis=0)
x = preprocessing.normalize(x.reshape(1,-1), norm='l2', axis=1)[0]
features[imgName] = x
pickle.dump(features, featuresFile)
featuresFile.close()
|
juvu/ImageSearch
|
utils/genDatabase_RMAC.py
|
genDatabase_RMAC.py
|
py
| 2,141
|
python
|
en
|
code
| null |
github-code
|
6
|
70490334907
|
"""
Converting adjacency matrix to adjacency list
"""
# python3 converter.py
def sortThird(val):
return val[2]
def main():
row1 = list(input("Row 1: "))
row2 = list(input("Row 2: "))
row3 = list(input("Row 3: "))
row4 = list(input("Row 4: "))
vlist = list(input("Vertices: "))
allRows = []
allRows.append(row1)
allRows.append(row2)
allRows.append(row3)
allRows.append(row4)
elist = ""
elist_list = []
weights = ["1", "2", "3", "4", "5", "6"]
for i in range(len(allRows)):
for j in range(len(allRows[i])):
if allRows[i][j] != "0":
fromV = vlist[i]
toV = vlist[j]
w = str(allRows[i][j])
elist_list.append([fromV, toV, int(w)])
# elist += "({0}, {1}, {2}),".format(fromV, toV, w)
elist_list.sort(key = sortThird)
for edge in elist_list:
elist += "("
for val in edge:
elist += str(val)
elist += ","
elist = elist[:-1]
elist += "),"
print()
print(elist[:-1])
if __name__ == "__main__":
main()
|
locua/algorithms-learning
|
completed-labs/14/quiz/mattolist.py
|
mattolist.py
|
py
| 1,134
|
python
|
en
|
code
| 2
|
github-code
|
6
|
73554526269
|
from hw_asr.base import BaseModel
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class MaskConv(nn.Module):
def __init__(self, seq_module):
super(MaskConv, self).__init__()
self.seq_module = seq_module
def forward(self, x, lengths):
for module in self.seq_module:
x = module(x)
mask = torch.BoolTensor(x.size()).fill_(0)
if x.is_cuda:
mask = mask.cuda()
for i, length in enumerate(lengths):
length = length.item()
if (mask[i].size(2) - length) > 0:
mask[i].narrow(2, length, mask[i].size(2) - length).fill_(1)
x = x.masked_fill(mask, 0)
return x, lengths
class BatchRNN(nn.Module):
def __init__(self, input_size, hidden_size, rnn_type=nn.LSTM, bidirectional=True, batch_norm=True):
super(BatchRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.batch_norm = nn.BatchNorm1d(input_size) if batch_norm else None
self.rnn = rnn_type(input_size=input_size, hidden_size=hidden_size,
bidirectional=bidirectional, bias=True)
def forward(self, x, output_lengths):
lens = x.size(0)
if self.batch_norm is not None:
x = self.batch_norm(x.permute(0, 2, 1)).permute(0, 2, 1)
x = nn.utils.rnn.pack_padded_sequence(x, output_lengths, enforce_sorted=False)
x, h = self.rnn(x)
x, _ = nn.utils.rnn.pad_packed_sequence(x, total_length=lens)
if self.bidirectional:
x = x.view(x.size(0), x.size(1), 2, -1).sum(2).view(x.size(0), x.size(1), -1)
return x
class DeepSpeech(BaseModel):
def __init__(self, n_feats, n_class, hidden_size=512, num_layers=5, *args, **kwargs):
super().__init__(n_feats, n_class, *args, **kwargs)
self.conv = MaskConv(nn.Sequential(
nn.Conv2d(1, 32, kernel_size=(41, 11), stride=(2, 2), padding=(20, 5)),
nn.BatchNorm2d(32),
nn.Hardtanh(0, 20, inplace=True),
nn.Conv2d(32, 32, kernel_size=(21, 11), stride=(2, 1), padding=(10, 5)),
nn.BatchNorm2d(32),
nn.Hardtanh(0, 20, inplace=True)
))
rnn_input_size = int(math.floor(n_feats + 2 * 20 - 41) / 2 + 1)
rnn_input_size = int(math.floor(rnn_input_size + 2 * 10 - 21) / 2 + 1)
rnn_input_size *= 32
self.rnns = nn.Sequential(
BatchRNN(
input_size=rnn_input_size,
hidden_size=hidden_size,
batch_norm=False
),
*(
BatchRNN(
input_size=hidden_size,
hidden_size=hidden_size,
) for _ in range(num_layers - 1)
)
)
self.fc = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, n_class, bias=False)
)
def forward(self, spectrogram, spectrogram_length, *args, **kwargs):
x = spectrogram.unsqueeze(1).permute(0, 1, 3, 2)
lengths = spectrogram_length.cpu().int()
output_lengths = self.get_seq_lens(lengths)
x, output_lengths = self.conv(x, output_lengths)
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3])
x = x.permute(2, 0, 1).contiguous()
for rnn in self.rnns:
x = rnn(x, output_lengths)
x = self.fc(x)
x = x.transpose(0, 1)
x = F.log_softmax(x)
return x
def get_seq_lens(self, input_length):
seq_len = input_length
for m in self.conv.modules():
if type(m) == nn.modules.conv.Conv2d:
seq_len = ((seq_len + 2 * m.padding[1] - m.dilation[1] * (m.kernel_size[1] - 1) - 1) // m.stride[1] + 1)
return seq_len.int()
def transform_input_lengths(self, input_lengths):
return input_lengths // 2
|
ArseniyBolotin/asr_project
|
hw_asr/model/deepspeech.py
|
deepspeech.py
|
py
| 4,068
|
python
|
en
|
code
| 0
|
github-code
|
6
|
811875796
|
# Substring with Concatenation of All Words - https://leetcode.com/problems/substring-with-concatenation-of-all-words/
'''You are given a string, s, and a list of words, words, that are all of the same length. Find all starting indices
of substring(s) in s that is a concatenation of each word in words exactly once and without any intervening characters.
'''
# Input:
# s = "barfoothefoobarman",
# words = ["foo","bar"]
# Output: [0,9]
from collections import Counter
class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
if not s or not words:
return []
wordLength = len(words[0])
output = []
for i in range(wordLength):
word_count = Counter(words) # Reset counter every time
start, end, count = i, i, len(words)
while end < len(s):
current = s[end:end+wordLength]
if current in word_count:
word_count[current] -= 1
if word_count[current] >= 0:
count -= 1
end += wordLength
if count == 0:
output.append(start)
if end - start == wordLength * len(words): # Ensure consecutive words
current = s[start:start+wordLength]
if current in word_count:
word_count[current] += 1
if word_count[current] > 0:
count += 1
start += wordLength
return output
|
Saima-Chaity/Leetcode
|
Sliding_Window/SubstringWithConcatenationOfAllWords.py
|
SubstringWithConcatenationOfAllWords.py
|
py
| 1,493
|
python
|
en
|
code
| 0
|
github-code
|
6
|
20544187876
|
from taskManager.plotting import *
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--log_path",
type=str,
required=True,
help="Input TSV file path containing the output of ResourceMonitor"
)
parser.add_argument(
"--output_dir", "-o",
type=str,
default="./output/",
required=False,
help="Desired output directory path (will be created during run time if doesn't exist)"
)
args = parser.parse_args()
plot_resources_main(file_path=args.log_path, output_dir=args.output_dir)
|
rlorigro/TaskManager
|
bin/plot_resource_usage.py
|
plot_resource_usage.py
|
py
| 630
|
python
|
en
|
code
| 4
|
github-code
|
6
|
30815309971
|
from typing import List, Tuple
import cmath
import random
_inv_root2 = 1 / cmath.sqrt(2)
_root2 = cmath.sqrt(2)
bra = List[complex]
ket = List[complex]
def vdot(v1, v2):
return sum(v1[i] * v2[i] for i in range(len(v1)))
def vinv(v):
return [-x for x in v]
def qdot(q1: ket, q2: ket) -> complex:
return sum([q1[i].conjugate() * q2[i] for i in range(len(q1))])
def qmag(q1: ket, q2: ket) -> complex:
return qdot(q1, q2) * qdot(q2, q1)
def qbra(q1: ket) -> bra:
return [q.conjugate() for q in q1]
def qbasis(a: complex, b: complex, basis_a: ket, basis_b: ket) -> ket:
return [a * q1 + b * q2 for q1, q2 in zip(basis_a, basis_b)]
class QSpin:
# static variables
_spin_states = {
'up': [1, 0],
'down': [0, 1],
'right': qbasis(_inv_root2, _inv_root2, [1, 0], [0, 1]),
'left': qbasis(_inv_root2, -_inv_root2, [1, 0], [0, 1]),
'in': qbasis(_inv_root2, 1j * _inv_root2, [1, 0], [0, 1]),
'out': qbasis(_inv_root2, -1j * _inv_root2, [1, 0], [0, 1]),
}
spin_state_type = Tuple[complex, complex]
spin_coords_type = Tuple[float, float, float]
def __init__(self):
self.state = QSpin._spin_states['up']
def set_state(self, new_state: spin_state_type):
self.state = new_state
@staticmethod
def coords_to_state(coords: spin_coords_type):
x, y, z = coords
cu = z + _inv_root2 * y + _inv_root2 * x
cd = _inv_root2 * x + 1j * _inv_root2 * y
return cu, cd
@staticmethod
def state_to_coords(state: spin_state_type):
cu = qdot(QSpin._spin_states['up'], state)
cd = qdot(QSpin._spin_states['down'], state)
y = cd.imag * _root2
x = cd.real * _root2
z = cu.real - _inv_root2 * y - _inv_root2 * x
return x.real, y.real, z.real
def set_state_with_coords(self, coords: spin_coords_type):
cu, cd = QSpin.coords_to_state(coords)
self.state = [cu, cd]
def state_as_coords(self):
return QSpin.state_to_coords(self.state)
def measure_spin(self, coords: spin_coords_type):
if coords[0] ** 2 + coords[1] ** 2 + coords[2] ** 2 != 1:
raise ValueError('Spin coordinates must be of unit length')
# get the angle between the current spin state and the desired spin state
angle = cmath.acos(vdot(self.state_as_coords(), coords))
# the probability of measuring +1
p1 = cmath.cos(0.5 * angle).real ** 2
# pn1 = cmath.sin(0.5 * angle).real ** 2
if random.random() < p1:
# if +1, set state to +1
self.set_state(QSpin.coords_to_state(coords))
return 1
else:
# else, set state to -1
self.set_state(QSpin.coords_to_state(vinv(coords)))
return -1
def matrix_mul(A, v):
return [vdot(A[i], v) for i in range(len(A))]
def test_xyz():
x, y, z = [0, _inv_root2, -_inv_root2]
print(x ** 2 + y ** 2 + z ** 2)
I = 1j
M = [[z, x-y*I], [x+y*I, -z]]
A = [
_inv_root2 * x + _inv_root2 * y + z,
_inv_root2 * x - _inv_root2 * I * y
]
print(matrix_mul(M, A))
print(A)
if __name__ == '__main__':
test_xyz()
exit()
spin = QSpin()
# print(QSpin._spin_states['up'])
# print(QSpin._spin_states['right'])
# print(qmag(QSpin._spin_states['up'], QSpin._spin_states['right']))
# print("initial state")
print(spin.state_as_coords())
print(spin.measure_spin([0, 1, 0]))
print(spin.measure_spin([0, 1, 0]))
print(spin.measure_spin([0, 1, 0]))
print(spin.measure_spin([0, 1, 0]))
print(spin.measure_spin([0, 1, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print(spin.measure_spin([1, 0, 0]))
# print("final state")
# print(spin.state_as_coords())
|
Wroppy/werry_math
|
physics/quantum/systems.py
|
systems.py
|
py
| 3,990
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7796630315
|
import ZSI;
from ZSIPatch import Struct
class GetStatus_Dec(Struct):
schema = 'http://opcfoundation.org/webservices/XMLDA/1.0/'
literal = 'GetStatus'
def __init__(self, name=None, ns=None, **kw):
name = name or self.__class__.literal
ns = ns or self.__class__.schema
self._LocaleID = None;
self._ClientRequestHandle = None;
TClist = [];
AttrList = [ ZSI.TC.String(pname="LocaleID", aname="_LocaleID"),
ZSI.TC.String(pname="ClientRequestHandle", aname="_ClientRequestHandle"), ];
oname = name
if name:
aname = '_%s' % name
if ns:
oname += ' xmlns="%s"' % ns
else:
oname += ' xmlns="%s"' % self.__class__.schema
else:
aname = None
Struct.__init__( self, self.__class__, TClist, AttrList,
pname=name, inorder=0,
aname=aname, oname=oname,
**kw)
def Get_LocaleID(self):
return(self._LocaleID);
def Set_LocaleID(sefl, ID): self._LocaleID = ID;
def Get_ClientRequestHandle(self): return(self._ClientRequestHandle);
def Set_ClientRequestHandle(self, Handle): self._ClientRequestHandle = Handle;
|
BackupTheBerlios/pplt-svn
|
PPLT/Modules/Core/Server/OPCXML/OPCTypes/GetStatus.py
|
GetStatus.py
|
py
| 1,073
|
python
|
en
|
code
| 0
|
github-code
|
6
|
32603947460
|
from django.db import models
from simple_history.models import HistoricalRecords
from base.models.base import AuthBaseEntity
from base.models.inventory import Inventory
class Promotion(AuthBaseEntity):
class Meta:
ordering = ['-modified', '-created']
inventory = models.ForeignKey(Inventory, on_delete=models.CASCADE)
promotion_price = models.IntegerField(default=0)
promotion_start_date = models.DateField(null=False, blank=False)
promotion_end_date = models.DateField(null=False, blank=False)
description = models.CharField(max_length=250)
history = HistoricalRecords()
def __str__(self):
return f"{self.inventory.name}"
|
SainezKimutai/test-capital
|
base/models/promotion.py
|
promotion.py
|
py
| 678
|
python
|
en
|
code
| 0
|
github-code
|
6
|
28990015892
|
from sys import platform, version_info
if True:
from PyQt5.QtCore import pyqtSlot, Qt, QSettings, QTimer
from PyQt5.QtGui import QFontMetrics
from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QMessageBox
else:
from PyQt4.QtCore import pyqtSlot, Qt, QSettings, QTimer
from PyQt4.QtGui import QFontMetrics
from PyQt4.QtGui import QDialog, QDialogButtonBox, QMessageBox
# ------------------------------------------------------------------------------------------------------------
# Imports (Custom Stuff)
import ui_settings_jack
# ------------------------------------------------------------------------------------------------------------
# Try Import DBus
try:
import dbus
except:
dbus = None
# ------------------------------------------------------------------------------------------------------------
# Global object
global gJackctl, gResetNeeded
gJackctl = None
gResetNeeded = False
# ------------------------------------------------------------------------------------------------------------
# enum jack_timer_type_t
JACK_TIMER_SYSTEM_CLOCK = 0
JACK_TIMER_CYCLE_COUNTER = 1
JACK_TIMER_HPET = 2
# ------------------------------------------------------------------------------------------------------------
# Set Platform
if "linux" in platform:
LINUX = True
if version_info >= (3, 0):
from subprocess import getoutput
else:
from commands import getoutput
else:
LINUX = False
# ------------------------------------------------------------------------------------------------------------
# Init DBus
def initBus(bus):
global gJackctl
if not bus:
gJackctl = None
return 1
try:
gJackctl = dbus.Interface(bus.get_object("org.jackaudio.service", "/org/jackaudio/Controller"), "org.jackaudio.Configure")
return 0
except:
gJackctl = None
return 1
def needsInit():
global gJackctl
return bool(gJackctl is None)
def setResetNeeded(yesNo):
global gResetNeeded
gResetNeeded = yesNo
# ------------------------------------------------------------------------------------------------------------
# Helper functions
def getBufferSize():
return getDriverParameter("period", -1)
def getSampleRate():
return getDriverParameter("rate", -1)
def isRealtime():
return getEngineParameter("realtime", False)
def setBufferSize(bsize):
return setDriverParameter("period", dbus.UInt32(bsize))
def setSampleRate(srate):
return setDriverParameter("rate", dbus.UInt32(srate))
# ------------------------------------------------------------------------------------------------------------
# Helper functions (engine)
def engineHasFeature(feature):
if gJackctl is None:
return False
try:
featureList = gJackctl.ReadContainer(["engine"])[1]
except:
featureList = ()
return bool(dbus.String(feature) in featureList)
def getEngineParameter(parameter, fallback):
if gJackctl is None or not engineHasFeature(parameter):
return fallback
else:
try:
return gJackctl.GetParameterValue(["engine", parameter])[2]
except:
return fallback
def setEngineParameter(parameter, value, optional=True):
if not engineHasFeature(parameter):
return False
elif optional:
paramValueTry = gJackctl.GetParameterValue(["engine", parameter])
if paramValueTry is None:
return False
paramValue = paramValueTry[2]
if value != paramValue:
return bool(gJackctl.SetParameterValue(["engine", parameter], value))
else:
return False
else:
return bool(gJackctl.SetParameterValue(["engine", parameter], value))
# ------------------------------------------------------------------------------------------------------------
# Helper functions (driver)
def driverHasFeature(feature):
if gJackctl is None:
return False
try:
featureList = gJackctl.ReadContainer(["driver"])[1]
except:
featureList = ()
return bool(dbus.String(feature) in featureList)
def getDriverParameter(parameter, fallback):
if gJackctl is None or not driverHasFeature(parameter):
return fallback
else:
try:
return gJackctl.GetParameterValue(["driver", parameter])[2]
except:
return fallback
def setDriverParameter(parameter, value, optional=True):
if not driverHasFeature(parameter):
return False
elif optional:
if value != gJackctl.GetParameterValue(["driver", parameter])[2]:
return bool(gJackctl.SetParameterValue(["driver", parameter], value))
else:
return False
else:
return bool(gJackctl.SetParameterValue(["driver", parameter], value))
# ------------------------------------------------------------------------------------------------------------
# JACK Settings Dialog
class JackSettingsW(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.ui = ui_settings_jack.Ui_JackSettingsW()
self.ui.setupUi(self)
# -------------------------------------------------------------
# Check if we've got valid control interface
global gJackctl
if gJackctl is None:
QTimer.singleShot(0, self, SLOT("slot_closeWithError()"))
return
# -------------------------------------------------------------
# Align driver text and hide non available ones
driverList = gJackctl.ReadContainer(["drivers"])[1]
fontMetris = QFontMetrics(self.ui.obj_server_driver.font())
maxWidth = 75
for i in range(self.ui.obj_server_driver.rowCount()):
item = self.ui.obj_server_driver.item(i, 0)
item.setTextAlignment(Qt.AlignCenter)
itexText = item.text()
itemWidth = fontMetris.width(itexText)+25
if itemWidth > maxWidth:
maxWidth = itemWidth
if dbus.String(itexText.lower()) not in driverList:
self.ui.obj_server_driver.hideRow(i)
self.ui.obj_server_driver.setMinimumWidth(maxWidth)
self.ui.obj_server_driver.setMaximumWidth(maxWidth)
# -------------------------------------------------------------
# Set-up connections
self.accepted.connect(self.slot_saveJackSettings)
self.ui.buttonBox.button(QDialogButtonBox.Reset).clicked.connect(self.slot_resetJackSettings)
self.ui.obj_driver_duplex.clicked.connect(self.slot_checkDuplexSelection)
self.ui.obj_server_driver.currentCellChanged.connect(self.slot_checkDriverSelection)
self.ui.obj_driver_capture.currentIndexChanged[int].connect(self.slot_checkALSASelection)
self.ui.obj_driver_playback.currentIndexChanged[int].connect(self.slot_checkALSASelection)
# -------------------------------------------------------------
# Load initial settings
self.fDriverName = ""
self.fBrokenServerClockSource = False
self.checkEngine()
self.loadServerSettings()
self.loadDriverSettings(True) # reset because we'll change it below
# -------------------------------------------------------------
# Load selected JACK driver
self.fDriverName = str(gJackctl.GetParameterValue(["engine", "driver"])[2])
for i in range(self.ui.obj_server_driver.rowCount()):
if self.ui.obj_server_driver.item(i, 0).text().lower() == self.fDriverName:
self.ui.obj_server_driver.setCurrentCell(i, 0)
break
# Special ALSA check
self.slot_checkALSASelection()
# -------------------------------------------------------------
# Load last GUI settings
self.loadSettings()
# -----------------------------------------------------------------
# Engine calls
def checkEngine(self):
self.ui.obj_server_realtime.setEnabled(engineHasFeature("realtime"))
self.ui.obj_server_realtime_priority.setEnabled(engineHasFeature("realtime-priority"))
self.ui.obj_server_temporary.setEnabled(engineHasFeature("temporary"))
self.ui.obj_server_verbose.setEnabled(engineHasFeature("verbose"))
self.ui.obj_server_alias.setEnabled(engineHasFeature("alias"))
self.ui.obj_server_client_timeout.setEnabled(engineHasFeature("client-timeout"))
self.ui.obj_server_clock_source.setEnabled(engineHasFeature("clock-source"))
self.ui.obj_server_port_max.setEnabled(engineHasFeature("port-max"))
self.ui.obj_server_replace_registry.setEnabled(engineHasFeature("replace-registry"))
self.ui.obj_server_sync.setEnabled(engineHasFeature("sync"))
self.ui.obj_server_self_connect_mode.setEnabled(engineHasFeature("self-connect-mode"))
# Disable clock-source if not on Linux
if not LINUX:
self.ui.obj_server_clock_source.setEnabled(False)
# -----------------------------------------------------------------
# Server calls
def saveServerSettings(self):
# always reset server name
if engineHasFeature("name"):
setEngineParameter("name", "default", True)
if self.ui.obj_server_realtime.isEnabled():
value = dbus.Boolean(self.ui.obj_server_realtime.isChecked())
setEngineParameter("realtime", value, True)
if self.ui.obj_server_realtime_priority.isEnabled():
value = dbus.Int32(self.ui.obj_server_realtime_priority.value())
setEngineParameter("realtime-priority", value, True)
if self.ui.obj_server_temporary.isEnabled():
value = dbus.Boolean(self.ui.obj_server_temporary.isChecked())
setEngineParameter("temporary", value, True)
if self.ui.obj_server_verbose.isEnabled():
value = dbus.Boolean(self.ui.obj_server_verbose.isChecked())
setEngineParameter("verbose", value, True)
if self.ui.obj_server_alias.isEnabled():
value = dbus.Boolean(self.ui.obj_server_alias.isChecked())
setEngineParameter("alias", value, True)
if self.ui.obj_server_client_timeout.isEnabled():
value = dbus.Int32(int(self.ui.obj_server_client_timeout.currentText()))
setEngineParameter("client-timeout", value, True)
if self.ui.obj_server_clock_source.isEnabled():
if self.ui.obj_server_clock_source_system.isChecked():
if self.fBrokenServerClockSource:
value = dbus.UInt32(JACK_TIMER_SYSTEM_CLOCK)
else:
value = dbus.Byte("s".encode("utf-8"))
elif self.ui.obj_server_clock_source_cycle.isChecked():
if self.fBrokenServerClockSource:
value = dbus.UInt32(JACK_TIMER_CYCLE_COUNTER)
else:
value = dbus.Byte("c".encode("utf-8"))
elif self.ui.obj_server_clock_source_hpet.isChecked():
if self.fBrokenServerClockSource:
value = dbus.UInt32(JACK_TIMER_HPET)
else:
value = dbus.Byte("h".encode("utf-8"))
else:
value = None
print("JackSettingsW::saveServerSettings() - Cannot save clock-source value")
if value != None:
setEngineParameter("clock-source", value, True)
if self.ui.obj_server_port_max.isEnabled():
value = dbus.UInt32(int(self.ui.obj_server_port_max.currentText()))
setEngineParameter("port-max", value, True)
if self.ui.obj_server_replace_registry.isEnabled():
value = dbus.Boolean(self.ui.obj_server_replace_registry.isChecked())
setEngineParameter("replace-registry", value, True)
if self.ui.obj_server_sync.isEnabled():
value = dbus.Boolean(self.ui.obj_server_sync.isChecked())
setEngineParameter("sync", value, True)
if self.ui.obj_server_self_connect_mode.isEnabled():
if self.ui.obj_server_self_connect_mode_0.isChecked():
value = dbus.Byte(" ".encode("utf-8"))
elif self.ui.obj_server_self_connect_mode_1.isChecked():
value = dbus.Byte("E".encode("utf-8"))
elif self.ui.obj_server_self_connect_mode_2.isChecked():
value = dbus.Byte("e".encode("utf-8"))
elif self.ui.obj_server_self_connect_mode_3.isChecked():
value = dbus.Byte("A".encode("utf-8"))
elif self.ui.obj_server_self_connect_mode_4.isChecked():
value = dbus.Byte("a".encode("utf-8"))
else:
value = None
print("JackSettingsW::saveServerSettings() - Cannot save self-connect-mode value")
if value != None:
setEngineParameter("self-connect-mode", value, True)
def loadServerSettings(self, reset=False, forceReset=False):
global gJackctl
settings = gJackctl.ReadContainer(["engine"])
for i in range(len(settings[1])):
attribute = str(settings[1][i])
if reset:
valueTry = gJackctl.GetParameterValue(["engine", attribute])
if valueTry is None:
continue
else:
value = valueTry[1]
if forceReset and attribute != "driver":
gJackctl.ResetParameterValue(["engine", attribute])
else:
valueTry = gJackctl.GetParameterValue(["engine", attribute])
if valueTry is None:
continue
else:
value = valueTry[2]
if attribute == "name":
pass # Don't allow to change this
elif attribute == "realtime":
self.ui.obj_server_realtime.setChecked(bool(value))
elif attribute == "realtime-priority":
self.ui.obj_server_realtime_priority.setValue(int(value))
elif attribute == "temporary":
self.ui.obj_server_temporary.setChecked(bool(value))
elif attribute == "verbose":
self.ui.obj_server_verbose.setChecked(bool(value))
elif attribute == "alias":
self.ui.obj_server_alias.setChecked(bool(value))
elif attribute == "client-timeout":
self.setComboBoxValue(self.ui.obj_server_client_timeout, str(int(value)))
elif attribute == "clock-source":
if len(str(value)) == 1 and not isinstance(value, dbus.UInt32):
value = str(value)
if value == "c":
self.ui.obj_server_clock_source_cycle.setChecked(True)
elif value == "h":
self.ui.obj_server_clock_source_hpet.setChecked(True)
elif value == "s":
self.ui.obj_server_clock_source_system.setChecked(True)
else:
value = int(value)
self.fBrokenServerClockSource = True
if value == JACK_TIMER_SYSTEM_CLOCK:
self.ui.obj_server_clock_source_system.setChecked(True)
elif value == JACK_TIMER_CYCLE_COUNTER:
self.ui.obj_server_clock_source_cycle.setChecked(True)
elif value == JACK_TIMER_HPET:
self.ui.obj_server_clock_source_hpet.setChecked(True)
else:
self.ui.obj_server_clock_source.setEnabled(False)
print("JackSettingsW::saveServerSettings() - Invalid clock-source value '%s'" % value)
elif attribute == "port-max":
self.setComboBoxValue(self.ui.obj_server_port_max, str(int(value)))
elif attribute == "replace-registry":
self.ui.obj_server_replace_registry.setChecked(bool(value))
elif attribute == "sync":
self.ui.obj_server_sync.setChecked(bool(value))
elif attribute == "self-connect-mode":
value = str(value)
if value == " ":
self.ui.obj_server_self_connect_mode_0.setChecked(True)
elif value == "E":
self.ui.obj_server_self_connect_mode_1.setChecked(True)
elif value == "e":
self.ui.obj_server_self_connect_mode_2.setChecked(True)
elif value == "A":
self.ui.obj_server_self_connect_mode_3.setChecked(True)
elif value == "a":
self.ui.obj_server_self_connect_mode_4.setChecked(True)
else:
self.ui.obj_server_self_connect_mode.setEnabled(False)
print("JackSettingsW::loadServerSettings() - Invalid self-connect-mode value '%s'" % value)
elif attribute in ("driver", "slave-drivers"):
pass
else:
print("JackSettingsW::loadServerSettings() - Unimplemented server attribute '%s', value: '%s'" % (attribute, str(value)))
# -----------------------------------------------------------------
# Driver calls
# resetIfNeeded: fix alsa parameter re-order bug in JACK 1.9.8 (reset/remove non-used values)
def saveDriverSettings(self, resetIfNeeded):
global gJackctl, gResetNeeded
if resetIfNeeded and not gResetNeeded:
resetIfNeeded = False
if self.ui.obj_driver_device.isEnabled():
value = dbus.String(self.ui.obj_driver_device.currentText().split(" [")[0])
if value != gJackctl.GetParameterValue(["driver", "device"])[2]:
gJackctl.SetParameterValue(["driver", "device"], value)
elif resetIfNeeded:
gJackctl.ResetParameterValue(["driver", "device"])
if self.ui.obj_driver_capture.isEnabled():
if self.fDriverName == "alsa":
value = dbus.String(self.ui.obj_driver_capture.currentText().split(" ")[0])
elif self.fDriverName == "dummy":
value = dbus.UInt32(int(self.ui.obj_driver_capture.currentText()))
elif self.fDriverName == "firewire":
value = dbus.Boolean(self.ui.obj_driver_capture.currentIndex() == 1)
else:
value = None
print("JackSettingsW::saveDriverSettings() - Cannot save capture value")
if value != None:
setDriverParameter("capture", value, True)
elif resetIfNeeded:
gJackctl.ResetParameterValue(["driver", "capture"])
if self.ui.obj_driver_playback.isEnabled():
if self.fDriverName == "alsa":
value = dbus.String(self.ui.obj_driver_playback.currentText().split(" ")[0])
elif self.fDriverName == "dummy":
value = dbus.UInt32(int(self.ui.obj_driver_playback.currentText()))
elif self.fDriverName == "firewire":
value = dbus.Boolean(self.ui.obj_driver_playback.currentIndex() == 1)
else:
value = None
print("JackSettingsW::saveDriverSettings() - Cannot save playback value")
if value != None:
setDriverParameter("playback", value, True)
elif resetIfNeeded:
gJackctl.ResetParameterValue(["driver", "playback"])
if self.ui.obj_driver_rate.isEnabled():
value = dbus.UInt32(int(self.ui.obj_driver_rate.currentText()))
setDriverParameter("rate", value, True)
if self.ui.obj_driver_period.isEnabled():
value = dbus.UInt32(int(self.ui.obj_driver_period.currentText()))
setDriverParameter("period", value, True)
if self.ui.obj_driver_nperiods.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_nperiods.value())
setDriverParameter("nperiods", value, True)
if self.ui.obj_driver_hwmon.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_hwmon.isChecked())
setDriverParameter("hwmon", value, True)
if self.ui.obj_driver_hwmeter.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_hwmeter.isChecked())
setDriverParameter("hwmeter", value, True)
if self.ui.obj_driver_duplex.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_duplex.isChecked())
setDriverParameter("duplex", value, True)
if self.ui.obj_driver_hw_alias.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_hw_alias.isChecked())
setDriverParameter("hw-alias", value, True)
if self.ui.obj_driver_softmode.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_softmode.isChecked())
setDriverParameter("softmode", value, True)
if self.ui.obj_driver_monitor.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_monitor.isChecked())
setDriverParameter("monitor", value, True)
if self.ui.obj_driver_dither.isEnabled():
if self.ui.obj_driver_dither.currentIndex() == 0:
value = dbus.Byte("n".encode("utf-8"))
elif self.ui.obj_driver_dither.currentIndex() == 1:
value = dbus.Byte("r".encode("utf-8"))
elif self.ui.obj_driver_dither.currentIndex() == 2:
value = dbus.Byte("s".encode("utf-8"))
elif self.ui.obj_driver_dither.currentIndex() == 3:
value = dbus.Byte("t".encode("utf-8"))
else:
value = None
print("JackSettingsW::saveDriverSettings() - Cannot save dither value")
if value != None:
setDriverParameter("dither", value, True)
if self.ui.obj_driver_inchannels.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_inchannels.value())
setDriverParameter("inchannels", value, True)
if self.ui.obj_driver_outchannels.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_outchannels.value())
setDriverParameter("outchannels", value, True)
if self.ui.obj_driver_shorts.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_shorts.isChecked())
setDriverParameter("shorts", value, True)
if self.ui.obj_driver_input_latency.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_input_latency.value())
setDriverParameter("input-latency", value, True)
if self.ui.obj_driver_output_latency.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_output_latency.value())
setDriverParameter("output-latency", value, True)
if self.ui.obj_driver_midi_driver.isEnabled():
if self.ui.obj_driver_midi_driver.currentIndex() == 0:
value = dbus.String("none")
elif self.ui.obj_driver_midi_driver.currentIndex() == 1:
value = dbus.String("seq")
elif self.ui.obj_driver_midi_driver.currentIndex() == 2:
value = dbus.String("raw")
else:
value = None
print("JackSettingsW::saveDriverSettings() - Cannot save midi-driver value")
if value != None:
if driverHasFeature("midi"):
setDriverParameter("midi", value, True)
else:
setDriverParameter("midi-driver", value, True)
if self.ui.obj_driver_wait.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_wait.value())
setDriverParameter("wait", value, True)
if self.ui.obj_driver_verbose.isEnabled():
value = dbus.UInt32(self.ui.obj_driver_verbose.value())
setDriverParameter("verbose", value, True)
if self.ui.obj_driver_snoop.isEnabled():
value = dbus.Boolean(self.ui.obj_driver_snoop.isChecked())
setDriverParameter("snoop", value, True)
if self.ui.obj_driver_channels.isEnabled():
value = dbus.Int32(self.ui.obj_driver_channels.value())
setDriverParameter("channels", value, True)
def loadDriverSettings(self, reset=False, forceReset=False):
global gJackctl
settings = gJackctl.ReadContainer(["driver"])
for i in range(len(settings[1])):
attribute = str(settings[1][i])
if reset:
value = gJackctl.GetParameterValue(["driver", attribute])[1]
if forceReset:
gJackctl.ResetParameterValue(["driver", attribute])
else:
value = gJackctl.GetParameterValue(["driver", attribute])[2]
if attribute == "device":
self.setComboBoxValue(self.ui.obj_driver_device, str(value), True)
elif attribute == "capture":
if self.fDriverName == "firewire":
self.ui.obj_driver_capture.setCurrentIndex(1 if bool(value) else 0)
elif self.fDriverName == "dummy":
self.setComboBoxValue(self.ui.obj_driver_capture, str(int(value)), True)
else:
self.setComboBoxValue(self.ui.obj_driver_capture, str(value), True)
elif attribute == "playback":
if self.fDriverName == "firewire":
self.ui.obj_driver_playback.setCurrentIndex(1 if bool(value) else 0)
elif self.fDriverName == "dummy":
self.setComboBoxValue(self.ui.obj_driver_playback, str(int(value)), True)
else:
self.setComboBoxValue(self.ui.obj_driver_playback, str(value), True)
elif attribute == "rate":
self.setComboBoxValue(self.ui.obj_driver_rate, str(int(value)))
elif attribute == "period":
self.setComboBoxValue(self.ui.obj_driver_period, str(int(value)))
elif attribute == "nperiods":
self.ui.obj_driver_nperiods.setValue(int(value))
elif attribute == "hwmon":
self.ui.obj_driver_hwmon.setChecked(bool(value))
elif attribute == "hwmeter":
self.ui.obj_driver_hwmeter.setChecked(bool(value))
elif attribute == "duplex":
self.ui.obj_driver_duplex.setChecked(bool(value))
elif attribute == "hw-alias":
self.ui.obj_driver_hw_alias.setChecked(bool(value))
elif attribute == "softmode":
self.ui.obj_driver_softmode.setChecked(bool(value))
elif attribute == "monitor":
self.ui.obj_driver_monitor.setChecked(bool(value))
elif attribute == "dither":
value = str(value)
if value == "n":
self.ui.obj_driver_dither.setCurrentIndex(0)
elif value == "r":
self.ui.obj_driver_dither.setCurrentIndex(1)
elif value == "s":
self.ui.obj_driver_dither.setCurrentIndex(2)
elif value == "t":
self.ui.obj_driver_dither.setCurrentIndex(3)
else:
self.ui.obj_driver_dither.setEnabled(False)
print("JackSettingsW::loadDriverSettings() - Invalid dither value '%s'" % value)
elif attribute == "inchannels":
self.ui.obj_driver_inchannels.setValue(int(value))
elif attribute == "outchannels":
self.ui.obj_driver_outchannels.setValue(int(value))
elif attribute == "shorts":
self.ui.obj_driver_shorts.setChecked(bool(value))
elif attribute == "input-latency":
self.ui.obj_driver_input_latency.setValue(int(value))
elif attribute == "output-latency":
self.ui.obj_driver_output_latency.setValue(int(value))
elif attribute in ("midi", "midi-driver"):
value = str(value)
if value == "none":
self.ui.obj_driver_midi_driver.setCurrentIndex(0)
elif value == "seq":
self.ui.obj_driver_midi_driver.setCurrentIndex(1)
elif value == "raw":
self.ui.obj_driver_midi_driver.setCurrentIndex(2)
else:
self.ui.obj_driver_midi_driver.setEnabled(False)
print("JackSettingsW::loadDriverSettings() - Invalid midi-driver value '%s'" % value)
elif attribute == "wait":
self.ui.obj_driver_wait.setValue(int(value))
elif attribute == "verbose":
self.ui.obj_driver_verbose.setValue(int(value))
elif attribute == "snoop":
self.ui.obj_driver_snoop.setChecked(bool(value))
elif attribute == "channels":
self.ui.obj_driver_channels.setValue(int(value))
else:
print("JackSettingsW::loadDriverSettings() - Unimplemented driver attribute '%s', value: '%s'" % (attribute, str(value)))
# -----------------------------------------------------------------
# Helper functions
def getAlsaDeviceList(self, playback=True):
alsaDeviceList = []
executable = 'aplay' if playback else 'arecord'
aplay_out = getoutput("env LANG=C LC_ALL=C {} -l".format(executable)).split("\n")
for line in aplay_out:
line = line.strip()
if line.startswith("card "):
cardInfo = line.split(", ", 1)[0].split(": ")
cardIndex = cardInfo[0].replace("card ", "")
cardName = cardInfo[1].split(" [")[0]
deviceInfo = line.split(", ", 1)[1].split(": ")
deviceIndex = deviceInfo[0].replace("device ", "")
deviceName = deviceInfo[1].split(" [")[0]
if cardName != "Loopback":
fullName = "hw:%s,%s [%s]" % (cardName, deviceIndex, deviceName)
alsaDeviceList.append(fullName)
return alsaDeviceList
def setComboBoxValue(self, box, text, split=False):
for i in range(box.count()):
if box.itemText(i) == text or (box.itemText(i).split(" ")[0] == text and split):
box.setCurrentIndex(i)
break
else:
if text:
box.addItem(text)
box.setCurrentIndex(box.count() - 1)
# -----------------------------------------------------------------
# Qt SLOT calls
@pyqtSlot(int)
def slot_checkALSASelection(self, ignored=0):
if self.fDriverName == "alsa":
check = bool(self.ui.obj_driver_duplex.isChecked() and (self.ui.obj_driver_capture.currentIndex() > 0 or self.ui.obj_driver_playback.currentIndex() > 0))
self.ui.obj_driver_device.setEnabled(not check)
@pyqtSlot(bool)
def slot_checkDuplexSelection(self, active):
if driverHasFeature("duplex"):
self.ui.obj_driver_capture.setEnabled(active)
self.ui.obj_driver_capture_label.setEnabled(active)
self.ui.obj_driver_playback.setEnabled(active)
self.ui.obj_driver_playback_label.setEnabled(active)
#self.ui.obj_driver_inchannels.setEnabled(active)
#self.ui.obj_driver_inchannels_label.setEnabled(active)
#self.ui.obj_driver_input_latency.setEnabled(active)
#self.ui.obj_driver_input_latency_label.setEnabled(active)
self.slot_checkALSASelection()
@pyqtSlot(int)
def slot_checkDriverSelection(self, row):
global gJackctl
# Save previous settings
self.saveDriverSettings(False)
# Set new Jack driver
self.fDriverName = dbus.String(self.ui.obj_server_driver.item(row, 0).text().lower())
gJackctl.SetParameterValue(["engine", "driver"], self.fDriverName)
# Add device list
self.ui.obj_driver_device.clear()
if driverHasFeature("device"):
if LINUX and self.fDriverName == "alsa":
dev_list = self.getAlsaDeviceList()
for dev in dev_list:
self.ui.obj_driver_device.addItem(dev)
else:
dev_list = gJackctl.GetParameterConstraint(["driver", "device"])[3]
for i in range(len(dev_list)):
self.ui.obj_driver_device.addItem(dev_list[i][0] + " [%s]" % str(dev_list[i][1]))
# Custom 'playback' and 'capture' values
self.ui.obj_driver_capture.clear()
self.ui.obj_driver_playback.clear()
if self.fDriverName == "alsa":
self.ui.obj_driver_capture.addItem("none")
self.ui.obj_driver_playback.addItem("none")
if LINUX:
dev_list_playback = self.getAlsaDeviceList(playback=True)
dev_list_record = self.getAlsaDeviceList(playback=False)
for dev in dev_list_playback:
self.ui.obj_driver_playback.addItem(dev)
for dev in dev_list_record:
self.ui.obj_driver_capture.addItem(dev)
else:
dev_list = gJackctl.GetParameterConstraint(["driver", "device"])[3]
for i in range(len(dev_list)):
self.ui.obj_driver_capture.addItem(dev_list[i][0] + " [" + dev_list[i][1] + "]")
self.ui.obj_driver_playback.addItem(dev_list[i][0] + " [" + dev_list[i][1] + "]")
elif self.fDriverName == "dummy":
for i in range(16):
self.ui.obj_driver_capture.addItem("%i" % int((i * 2) + 2))
self.ui.obj_driver_playback.addItem("%i" % int((i * 2) + 2))
elif self.fDriverName == "firewire":
self.ui.obj_driver_capture.addItem("no")
self.ui.obj_driver_capture.addItem("yes")
self.ui.obj_driver_playback.addItem("no")
self.ui.obj_driver_playback.addItem("yes")
elif driverHasFeature("playback") or driverHasFeature("capture"):
print("JackSettingsW::slot_checkDriverSelection() - Custom playback/capture for driver '%s' not implemented yet" % self.fDriverName)
# Load Driver Settings
self.loadDriverSettings()
# Enable widgets according to driver
self.ui.obj_driver_capture.setEnabled(driverHasFeature("capture"))
self.ui.obj_driver_capture_label.setEnabled(driverHasFeature("capture"))
self.ui.obj_driver_playback.setEnabled(driverHasFeature("playback"))
self.ui.obj_driver_playback_label.setEnabled(driverHasFeature("playback"))
self.ui.obj_driver_device.setEnabled(driverHasFeature("device"))
self.ui.obj_driver_device_label.setEnabled(driverHasFeature("device"))
self.ui.obj_driver_rate.setEnabled(driverHasFeature("rate"))
self.ui.obj_driver_rate_label.setEnabled(driverHasFeature("rate"))
self.ui.obj_driver_period.setEnabled(driverHasFeature("period"))
self.ui.obj_driver_period_label.setEnabled(driverHasFeature("period"))
self.ui.obj_driver_nperiods.setEnabled(driverHasFeature("nperiods"))
self.ui.obj_driver_nperiods_label.setEnabled(driverHasFeature("nperiods"))
self.ui.obj_driver_hwmon.setEnabled(driverHasFeature("hwmon"))
self.ui.obj_driver_hwmeter.setEnabled(driverHasFeature("hwmeter"))
self.ui.obj_driver_duplex.setEnabled(driverHasFeature("duplex"))
self.ui.obj_driver_hw_alias.setEnabled(driverHasFeature("hw-alias"))
self.ui.obj_driver_softmode.setEnabled(driverHasFeature("softmode"))
self.ui.obj_driver_monitor.setEnabled(driverHasFeature("monitor"))
self.ui.obj_driver_dither.setEnabled(driverHasFeature("dither"))
self.ui.obj_driver_dither_label.setEnabled(driverHasFeature("dither"))
self.ui.obj_driver_inchannels.setEnabled(driverHasFeature("inchannels"))
self.ui.obj_driver_inchannels_label.setEnabled(driverHasFeature("inchannels"))
self.ui.obj_driver_outchannels.setEnabled(driverHasFeature("outchannels"))
self.ui.obj_driver_outchannels_label.setEnabled(driverHasFeature("outchannels"))
self.ui.obj_driver_shorts.setEnabled(driverHasFeature("shorts"))
self.ui.obj_driver_input_latency.setEnabled(driverHasFeature("input-latency"))
self.ui.obj_driver_input_latency_label.setEnabled(driverHasFeature("input-latency"))
self.ui.obj_driver_output_latency.setEnabled(driverHasFeature("output-latency"))
self.ui.obj_driver_output_latency_label.setEnabled(driverHasFeature("output-latency"))
self.ui.obj_driver_midi_driver.setEnabled(driverHasFeature("midi") or driverHasFeature("midi-driver"))
self.ui.obj_driver_midi_driver_label.setEnabled(driverHasFeature("midi") or driverHasFeature("midi-driver"))
self.ui.obj_driver_wait.setEnabled(driverHasFeature("wait"))
self.ui.obj_driver_wait_label.setEnabled(driverHasFeature("wait"))
self.ui.obj_driver_verbose.setEnabled(driverHasFeature("verbose"))
self.ui.obj_driver_verbose_label.setEnabled(driverHasFeature("verbose"))
self.ui.obj_driver_snoop.setEnabled(driverHasFeature("snoop"))
self.ui.obj_driver_channels.setEnabled(driverHasFeature("channels"))
self.ui.obj_driver_channels_label.setEnabled(driverHasFeature("channels"))
# Misc stuff
if self.ui.obj_server_driver.item(row, 0).text() == "ALSA":
self.ui.toolbox_driver_misc.setCurrentIndex(1)
self.ui.obj_driver_capture_label.setText(self.tr("Input Device:"))
self.ui.obj_driver_playback_label.setText(self.tr("Output Device:"))
elif self.ui.obj_server_driver.item(row, 0).text() == "Dummy":
self.ui.toolbox_driver_misc.setCurrentIndex(2)
self.ui.obj_driver_capture_label.setText(self.tr("Input Ports:"))
self.ui.obj_driver_playback_label.setText(self.tr("Output Ports:"))
elif self.ui.obj_server_driver.item(row, 0).text() == "FireWire":
self.ui.toolbox_driver_misc.setCurrentIndex(3)
self.ui.obj_driver_capture_label.setText(self.tr("Capture Ports:"))
self.ui.obj_driver_playback_label.setText(self.tr("Playback Ports:"))
elif self.ui.obj_server_driver.item(row, 0).text() == "Loopback":
self.ui.toolbox_driver_misc.setCurrentIndex(4)
else:
self.ui.toolbox_driver_misc.setCurrentIndex(0)
self.slot_checkDuplexSelection(self.ui.obj_driver_duplex.isChecked())
@pyqtSlot()
def slot_saveJackSettings(self):
self.saveServerSettings()
self.saveDriverSettings(True)
@pyqtSlot()
def slot_resetJackSettings(self):
if self.ui.tabWidget.currentIndex() == 0:
self.loadServerSettings(True, True)
elif self.ui.tabWidget.currentIndex() == 1:
self.loadDriverSettings(True, True)
@pyqtSlot()
def slot_closeWithError(self):
QMessageBox.critical(self, self.tr("Error"), self.tr("jackdbus is not available!\nIt's not possible to configure JACK at this point."))
self.close()
def saveSettings(self):
settings = QSettings("Cadence", "JackSettings")
settings.setValue("Geometry", self.saveGeometry())
settings.setValue("CurrentTab", self.ui.tabWidget.currentIndex())
def loadSettings(self):
settings = QSettings("Cadence", "JackSettings")
self.restoreGeometry(settings.value("Geometry", b""))
self.ui.tabWidget.setCurrentIndex(settings.value("CurrentTab", 0, type=int))
def closeEvent(self, event):
self.saveSettings()
QDialog.closeEvent(self, event)
def done(self, r):
QDialog.done(self, r)
self.close()
# ------------------------------------------------------------------------------------------------------------
# Allow to use this as a standalone app
if __name__ == '__main__':
# Additional imports
import resources_rc
from sys import argv as sys_argv, exit as sys_exit
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QApplication
# App initialization
app = QApplication(sys_argv)
# Connect to DBus
if dbus:
if initBus(dbus.SessionBus()):
QMessageBox.critical(None, app.translate("JackSettingsW", "Error"), app.translate("JackSettingsW",
"jackdbus is not available!\n"
"Is not possible to configure JACK at this point."))
sys_exit(1)
else:
QMessageBox.critical(None, app.translate("JackSettingsW", "Error"),
app.translate("JackSettingsW", "DBus is not available, cannot continue."))
sys_exit(1)
# Show GUI
gui = JackSettingsW(None)
gui.setWindowIcon(QIcon(":/scalable/jack.svg"))
gui.show()
# App-Loop
sys_exit(app.exec_())
|
falkTX/Cadence
|
src/jacksettings.py
|
jacksettings.py
|
py
| 41,004
|
python
|
en
|
code
| 361
|
github-code
|
6
|
44724952954
|
# !/usr/bin/python
"""Main File to embedd and SRT or ASS subtitle file into an MKV file."""
import os
import sys
from os.path import basename
from mkv import mkv
import argparse
import time
def initParser():
parser = argparse.ArgumentParser()
parser.add_argument("inputMkv", type=argparse.FileType('r'),
help="Path of the input mkv file.")
parser.add_argument("inputSRT", type=argparse.FileType('r'),
help="Path of the output mkv file.")
parser.add_argument('output', type=str,
help="Path and Name of the combined File MKV: e.g. \"~/output.mkv\"")
parser.add_argument("-v", "--verbose", action="count", default=0,
help="Increase output verbosity")
return parser
# _______________Can be called as main__________________
if __name__ == '__main__':
parser = initParser()
args = parser.parse_args()
meta = mkv.getMeta(args.inputMkv.name)
key = set(["Title", "title", "TITLE", "NAME", "Name", "name"]).intersection(set(meta.keys()))
if len(key) > 0:
title = meta[next(iter(key))]
else:
title = os.path.basename(args.inputMkv.name).split(".")[0]
mkv.bakeSubtitles(args.inputMkv.name, args.inputSRT.name, args.output, titles=title)
print("Bye Bye from " + str(os.path.basename(__file__)))
|
voelkerb/matroskaPlotter
|
bakeSub.py
|
bakeSub.py
|
py
| 1,360
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36644827726
|
def log(message: str):
with open('progress.log', 'a') as f:
f.write(message+'\n')
def log_broken_file(e, broken_filename: str):
with open('broken_files.log', 'a') as f:
f.write(broken_filename+'\n')
f.write(str(e)+'\n')
import os
import json
# import sys
import random
from datetime import date
import gensim
from helpers import lemmatize_stemming, get_preprocessed, compute_coherence_values, load_tweets
from nltk.stem import WordNetLemmatizer, SnowballStemmer
import nltk
nltk.download('wordnet')
log('loading filenames')
filenames = os.listdir('twitter_data')
filenames = [ 'twitter_data/'+filename for filename in filenames ]
save_dirname = 'trained-' + str(date.today())
log('done')
class TweetLoader:
"""Iterable that loades, filters, stems and vectorizes tweets (english non-retweets only)"""
def __init__(self, filenames):
self.filenames = filenames
stemmer = SnowballStemmer('english')
lemmatizer = WordNetLemmatizer()
self.preprocess = lambda text: get_preprocessed(text, stemmer, lemmatizer)
def __iter__(self):
for i in range(len(self.filenames)):
filename = self.filenames[i]
if i % int(len(self.filenames)/100) == 0:
log('loading files: ' + str(int(100*i/len(self.filenames))) + '%')
with open(filename, 'r', errors='replace') as f:
# add commas between tweets to correct json syntax (doesn't always work, as expected)
try:
tweet_list = json.loads('['+f.read().replace('}{','},{')+']')
except Exception as e:
log_broken_file(e, filename)
continue
for tweet in tweet_list:
if 'retweeted_status' not in tweet and tweet['lang'] == 'en':
if 'full_text' in tweet:
text = tweet['full_text']
elif 'extended_tweet' in tweet and 'full_text' in tweet['extended_tweet']:
text = tweet['extended_tweet']['full_text']
else:
text = tweet['text']
yield self.preprocess(text)
tweet_loader = TweetLoader(filenames)
# log('building dictionary...')
# dictionary = gensim.corpora.Dictionary(tweet_loader)
# dictionary.filter_extremes(no_below=100, no_above=0.5)
if not os.path.isdir(save_dirname):
os.makedirs(save_dirname)
# dictionary.save(save_dirname + '/dictionary')
# log('saved')
log('loading saved dictionary...')
dictionary = gensim.corpora.Dictionary.load('trained-2021-06-02/filtered_dictionary')
log('loaded')
class BowCorpus:
def __init__(self, dictionary, token_corpus):
self.token_corpus = token_corpus
self.dictionary = dictionary
def __iter__(self):
for tokenized in self.token_corpus:
yield self.dictionary.doc2bow(tokenized)
# log('building tfidf')
# tfidf = gensim.models.TfidfModel(BowCorpus(dictionary, tweet_loader))
# tfidf.save(save_dirname + '/tfidf')
# log('saved')
log('loading saved tfidf...')
tfidf = gensim.models.TfidfModel.load('trained-2021-06-09/tfidf')
log('loaded')
class TfidfCorpus:
def __init__(self, tfidf_model, BowCorpus):
self.tfidf = tfidf_model
self.bow_corpus = BowCorpus
def __iter__(self):
for doc in self.bow_corpus:
yield self.tfidf[doc]
log('building lda model')
lda_model = gensim.models.LdaMulticore(TfidfCorpus(tfidf, BowCorpus(dictionary, tweet_loader)), num_topics=14, id2word=dictionary, passes=5, workers=8, alpha=0.01, eta=.91)
lda_model.save(save_dirname + '/trained_lda')
log('saved')
# for idx, topic in lda_model.print_topics(-1):
# print('Topic: {} \nWords: {}'.format(idx, topic))
|
AdrienSF/twitter-analysis
|
older/save_trained_lda.py
|
save_trained_lda.py
|
py
| 3,790
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7005065421
|
import logging
import logging.config
from huggingface_hub import HfApi
from typing import Text, Optional
from .config_parser import ConfigParser
from .exceptions import TaskModelMismatchException
logging_config_parser = ConfigParser('config/logging.yaml')
logging.config.dictConfig(logging_config_parser.get_config_dict())
logger = logging.getLogger()
class BaseAPI:
def __init__(self, api_token: Text, api_url: Optional[Text] = None):
self.api_token = api_token
config_parser = ConfigParser()
self.config = config_parser.get_config_dict()
if api_url:
self.api_url = api_url
else:
self.api_url = self.config['BASE_URL']
self.logger = logger
self.hf_api = HfApi()
def _check_model_task_match(self, model: Text, task: Text) -> None:
metadata = self.hf_api.model_info(model)
if task != metadata.pipeline_tag:
raise TaskModelMismatchException(f"The task {task} is not supported by the model {model}.")
|
MinuraPunchihewa/hugging-py-face
|
hugging_py_face/base_api.py
|
base_api.py
|
py
| 1,024
|
python
|
en
|
code
| 1
|
github-code
|
6
|
37276060555
|
import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.modules.utils import _pair, _quadruple
import math
class MedianPool2d(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
super(MedianPool2d, self).__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding) # convert to l, r, t, b
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - (ih % self.stride[0]), 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - (iw % self.stride[1]), 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = (pl, pr, pt, pb)
else:
padding = self.padding
return padding
def forward(self, x):
# using existing pytorch functions and tensor ops so that we get autograd,
# would likely be more efficient to implement from scratch at C/Cuda level
x = F.pad(x, self._padding(x), mode='reflect')
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1])
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
return x
def sobel_xy(image, norm='L1'):
"""
Args : Tensor N x H x W or N x C x H x W
"""
if len(image.shape) == 3:
image = image.unsqueeze(1)
kernel_x = torch.FloatTensor([[[[-1,0,1],[-2,0,2],[-1,0,1]]]])
#kernel_x = torch.FloatTensor([[[[3,0,-3],[10,0,-10],[3,0,-3]]]])
kernel_y = torch.FloatTensor([[[[1,2,1],[0,0,0],[-1,-2,-1]]]])
#kernel_y = torch.FloatTensor([[[[3,10,3],[0,0,0],[-3,-10,-3]]]])
gradient_x = F.conv2d(image, kernel_x.cuda(), padding=1)
gradient_y = F.conv2d(image, kernel_y.cuda(), padding=1)
if norm=='L1':
gradient = torch.abs(gradient_x) + torch.abs(gradient_y)
elif norm=='L2':
gradient = torch.sqrt(torch.pow(gradient_x, 2) + torch.pow(gradient_y, 2))
return gradient
def blur(image, filter_size=5):
"""
Args : Tensor N x H x W or N x C x H x W
"""
if len(image.shape) == 3:
image = image.unsqueeze(1)
channels = image.shape[1]
kernel = torch.ones(1, 1, filter_size, filter_size) / (filter_size*filter_size)
out = None
padding = (filter_size-1)//2
for channel in range(channels):
_out = F.conv2d(image[:,channel,...].unsqueeze(1), kernel.cuda(), padding=padding)
if out is None:
out = _out
else:
out = torch.cat([out, _out], dim=1)
return out
def erosion(image, filter_size=5):
"""
Args : Tensor N x H x W or N x C x H x W
"""
if len(image.shape) == 3:
image = image.unsqueeze(1)
pad_total = filter_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
image = F.pad(image, (pad_beg, pad_end, pad_beg, pad_end))
kernel = torch.zeros(1, 1, filter_size, filter_size).to(image.device)
image = F.unfold(image, filter_size, dilation=1, padding=0, stride=1)
image = image.unsqueeze(1)
L = image.size(-1)
L_sqrt = int(math.sqrt(L))
kernel = kernel.view(1, -1)
kernel = kernel.unsqueeze(0).unsqueeze(-1)
image = kernel - image
image, _ = torch.max(image, dim=2, keepdim=False)
image = -1 * image
image = image.view(-1, 1, L_sqrt, L_sqrt)
return image
def dilation(image, filter_size=7):
"""
Args : Tensor N x H x W or N x C x H x W
"""
if len(image.shape) == 3:
image = image.unsqueeze(1)
pad_total = filter_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
image = F.pad(image, (pad_beg, pad_end, pad_beg, pad_end))
kernel = torch.zeros(1, 1, filter_size, filter_size).to(image.device)
image = F.unfold(image, filter_size, dilation=1, padding=0, stride=1)
image = image.unsqueeze(1)
L = image.size(-1)
L_sqrt = int(math.sqrt(L))
kernel = kernel.view(1, -1)
kernel = kernel.unsqueeze(0).unsqueeze(-1)
image = kernel + image
image, _ = torch.max(image, dim=2, keepdim=False)
image = image.view(-1, 1, L_sqrt, L_sqrt)
return image
def rgb_to_lab(srgb):
srgb_pixels = torch.reshape(srgb, [-1, 3])
linear_mask = (srgb_pixels <= 0.04045).type(torch.FloatTensor).cuda()
exponential_mask = (srgb_pixels > 0.04045).type(torch.FloatTensor).cuda()
rgb_pixels = (srgb_pixels / 12.92 * linear_mask) + (((srgb_pixels + 0.055) / 1.055) ** 2.4) * exponential_mask
rgb_to_xyz = torch.tensor([
# X Y Z
[0.412453, 0.212671, 0.019334], # R
[0.357580, 0.715160, 0.119193], # G
[0.180423, 0.072169, 0.950227], # B
]).type(torch.FloatTensor).cuda()
xyz_pixels = torch.mm(rgb_pixels, rgb_to_xyz)
# XYZ to Lab
xyz_normalized_pixels = torch.mul(xyz_pixels, torch.tensor([1/0.950456, 1.0, 1/1.088754]).type(torch.FloatTensor).cuda())
epsilon = 6.0/29.0
linear_mask = (xyz_normalized_pixels <= (epsilon**3)).type(torch.FloatTensor).cuda()
exponential_mask = (xyz_normalized_pixels > (epsilon**3)).type(torch.FloatTensor).cuda()
fxfyfz_pixels = (xyz_normalized_pixels / (3 * epsilon**2) + 4.0/29.0) * linear_mask + ((xyz_normalized_pixels+0.000001) ** (1.0/3.0)) * exponential_mask
# convert to lab
fxfyfz_to_lab = torch.tensor([
# l a b
[ 0.0, 500.0, 0.0], # fx
[116.0, -500.0, 200.0], # fy
[ 0.0, 0.0, -200.0], # fz
]).type(torch.FloatTensor).cuda()
lab_pixels = torch.mm(fxfyfz_pixels, fxfyfz_to_lab) + torch.tensor([-16.0, 0.0, 0.0]).type(torch.FloatTensor).cuda()
return torch.reshape(lab_pixels, srgb.shape)
def gaussian(window_size, sigma):
gauss = torch.Tensor([math.exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = torch.autograd.Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
# luminance = (2*mu1_mu2 + C1)/(mu1_sq + mu2_sq + C1)
# contrast = (2*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2)
# structure = (2*sigma12 + C2)/(2*sigma1_2 + C2)
con_str = (2*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2)
return con_str
class SCDiffer(nn.Module):
"""
Structure and Color Difference
"""
def __init__(self, filter_size=5, size_average = True):
super(SCDiffer, self).__init__()
self.filter_size = filter_size
self.size_average = size_average
self.channel = 3
self.medianblur = MedianPool2d(kernel_size=5, padding=2)
self.window = create_window(filter_size, self.channel).cuda()
def forward(self, img1, img2, alpha=0.33):
# self.window = window.cuda(img1.get_device()).type_as(img1)
self.window = self.window.type_as(img1)
con_str = _ssim(img1, img2, self.window, self.filter_size, self.channel)
con_str = torch.mean(1.-con_str, dim=1, keepdim=True)
img1_labs = blur(rgb_to_lab(img1), filter_size=3)
img2_labs = blur(rgb_to_lab(img2), filter_size=3)
lab_diff = torch.square(img1_labs - img2_labs)
lab_diff = torch.pow(torch.sum(lab_diff, dim=1, keepdim=True), 0.5)
con_str = (con_str-torch.mean(con_str, dim=[1,2,3], keepdim=True)) / torch.std(con_str, dim=[1,2,3], keepdim=True)
lab_diff = (lab_diff-torch.mean(lab_diff, dim=[1,2,3], keepdim=True)) / torch.std(lab_diff, dim=[1,2,3], keepdim=True)
# occ_map = (con_str + 1.0) * lab_diff
occ_map = lab_diff*con_str + lab_diff + con_str*alpha
occ_map = (occ_map - torch.mean(occ_map, dim=[1,2,3], keepdim=True)) / torch.std(occ_map, dim=[1,2,3], keepdim=True)
occ_map = torch.clamp(occ_map, 0.0, 1.0)
occ_map = self.medianblur(occ_map)
# occ_map = erosion(occ_map, filter_size=2)
return occ_map
|
yeongjoonJu/CFR-GAN
|
tools/ops.py
|
ops.py
|
py
| 9,407
|
python
|
en
|
code
| 74
|
github-code
|
6
|
31482179843
|
from tkinter import *
from tkinter import messagebox
window = Tk()
window.geometry("500x400")
window.title("Medical Records")
frame = Frame(window)
#The layout of The Sick Class
illness_ID = Label(window, text = "Illness Code")
illness_ID.pack(side = LEFT)
illness_ID.place(x = 20, y = 20)
illness_entry = Entry(window, bd =1)
illness_entry.pack(side = RIGHT)
illness_entry.place(x = 300, y = 20)
treatment_duration = Label(window, text = "Duration Of Treatment")
treatment_duration.pack(side =LEFT )
treatment_duration.place(x = 20, y= 80)
week_month = Label(window, text = "Weekly/Months")
week_month.pack(side = RIGHT)
week_month.place(x = 380, y = 80)
due_entry = Entry(window, bd =1, width = 8)
due_entry.pack(side =RIGHT)
due_entry.place(x = 300, y = 80)
dr_number = Label(window, text="Dr Practice Number")
dr_number.pack(side = LEFT)
dr_number.place(x = 20, y = 150)
doc_entry = Entry(window, bd =1)
doc_entry.pack(side = RIGHT)
doc_entry.place(x = 300, y =150)
scan_fee = Label(window, text = "Scan/Consultation Fee")
scan_fee.pack(side = LEFT)
scan_fee.place(x = 20, y = 190)
scan_entry = Entry(window, bd =1)
scan_entry.pack(side = RIGHT)
scan_entry.place(x = 301, y = 190)
amount_paid = Label(window)
amount_paid.pack(side = LEFT)
amount_paid.place(x = 20, y = 260)
var = StringVar()
# The Calculations for the Sick Class
class Sick():
def sickness(self):
self.illness_ID = illness_ID
self.treatment_duration = treatment_duration
self.dr_number = dr_number
self.medcancer = 400
self.medinflu = 350.50
# Calculating Cancer
def illness():
if var.get() == "Cancer":
if int(scan_entry.get()) > 600:
messagebox.showinfo("Message", "Sorry we cannot treat you") # Error message will display
elif int(scan_entry.get()) < 600:
cancer_answer = int(scan_entry.get()) + 400
amount_paid.config(text="Amount Paid For Treatment: " + str(cancer_answer))
if var.get() == "Influenza": # Calculating Influenza
if int(scan_entry.get()) >= 600:
influ_answer = 350.50 + int(scan_entry.get())
amount_paid.config(text="Amount Paid For Treatment: " + str(influ_answer))
elif int(scan_entry.get()) < 600:
influ_answer = 350.50 + int(scan_entry.get())
discount = (influ_answer * (2/100)) + influ_answer # Calculating the discount recieve
messagebox.showinfo("Message", "2% discount")
amount_paid.config(text="Amount Paid For Treatment: " + str(discount)) #discount will be included in the calculation
radio_btn1 = Radiobutton(window, text = "Cancer" , variable = var, value ="Cancer") # Radiobutton for Cancer
radio_btn1.pack(side = LEFT)
radio_btn1.place(x = 20, y= 220)
radio_btn2 = Radiobutton(window, text = "Influenza", variable = var, value = "Influenza")# Radiobutton for Influenza
radio_btn2.pack(side = LEFT)
radio_btn2.place(x = 20, y= 240)
calculate_btn = Button(window, text = "Calculate", command = illness) # Calculates the amount paid for treatment once pushed
calculate_btn.pack(side = LEFT)
calculate_btn.place(x = 20, y = 300)
# Function on the clear all button
def clear_all():
illness_entry.delete(0,END)
due_entry.delete(0,END)
doc_entry.delete(0,END)
scan_entry.delete(0,END)
clear_btn = Button(window, text = "Clear", command = clear_all) #Clears everything when the button is pushed
clear_btn.pack(side = RIGHT)
clear_btn.place(x = 300, y = 300)
window.mainloop()
|
m-kona/medical-records
|
medical-records.py
|
medical-records.py
|
py
| 3,509
|
python
|
en
|
code
| 0
|
github-code
|
6
|
189554987
|
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
with open('/home/ubuntu/environment/huntsman_scholar_final_proj/results.txt','r') as f:
content = f.readlines()
contents = ''.join(content)
s = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
s.starttls()
# Authentication
s.login("usu.resource@gmail.com", "slphpctqfnnzlfph")
# message to be sent
message = '''Thank you for reaching out to ask about the USU resources available to you, '''+ first_name + '''. Your results are below. \n\n
''' + contents + '''
'''
# sending the mail
s.sendmail("usu.resource@gmail.com", "ironman7699@gmail.com", message)
print('Email Sent.')
# terminating the session
s.quit()
|
lawilding/huntsman_scholar_final_proj
|
sendEmail.py
|
sendEmail.py
|
py
| 736
|
python
|
en
|
code
| 0
|
github-code
|
6
|
12477052664
|
from cProfile import label
from multiprocessing.sharedctypes import Value
from re import T
from typing import Dict, List, Optional, Tuple
import torch
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import transformers
import numpy as np
import random
import argparse
from collections import defaultdict
import json
import os
from rouge_score import rouge_scorer
import tqdm
import utils
DEVICE = os.environ["DEVICE"] if "DEVICE" in os.environ else "cpu"
if DEVICE == "gpu" and torch.backends.mps.is_available() and torch.backends.mps.is_built():
# on MPS the metrics don't improve ... Waiting for PyTorch 2.0
# device = torch.device("mps")
# Due to the above, default for now to cpu
DEVICE = torch.device("cpu")
elif DEVICE == "gpu" and torch.cuda.is_available():
DEVICE = torch.device("cuda")
else:
DEVICE = torch.device("cpu")
print("In-context learning using device: ", DEVICE)
def get_icl_prompts(
support_inputs: List[str],
support_labels: List[str],
test_input: str,
prompt_mode: str = 'qa') -> str:
"""
Take a list of contexts and combine them into k-shot prompts.
**Note**: Be sure to shuffle the support examples and labels
*together* (i.e. so the pairings of support input/label is preserved)
before constructing the prompt. np.random.permutation may be helpful.
Args:
support_inputs: The k inputs used for in-context learning (k may be zero!)
support_labels: The k labels used for in-context learning (k may be zero!)
test_input: The input we are evaluating on
prompt_mode: The task description mode we're using; 'none' means we're only using
k-shot examples, 'tl;dr' means we're using the tl;dr prompt from the GPT-2 paper,
'qa' means we are adding "In the" after the question and before the answer and
'custom' means your own prompt format for article summarization
**different from the all the prompt formats previously mentioned**
Returns:
A string containing the complete input to the model.
"""
prompt = ''
### START CODE HERE ###
combination = []
for i in range(len(support_inputs)):
combination.append((support_inputs[i], support_labels[i]))
random.shuffle(combination)
if prompt_mode == "none":
for input, label in combination:
prompt += input + label
prompt += test_input
elif prompt_mode == "tldr":
for input, label in combination:
prompt += input + " TL;DR: " + label
prompt += test_input + " TL;DR:"
elif prompt_mode == "qa":
for input, label in combination:
prompt += input + " In the " + label + ". "
prompt += test_input + " In the"
elif prompt_mode == "custom":
for input, label in combination:
prompt += "TEXT: " + input + " SUMMARY: " + label
prompt += "TEXT: " + test_input + " SUMMARY:"
### END CODE HERE ###
return prompt
def get_performance_metric(predictions: List[str], targets: List[str], metric: str) -> float:
if metric == 'rouge':
scorer = rouge_scorer.RougeScorer(['rouge1'], use_stemmer=True)
scores = []
for p, t in zip(predictions, targets):
score = scorer.score(p, t)['rouge1'].fmeasure
scores.append(score)
return sum(scores) / len(scores)
elif metric == 'exact match':
if isinstance(targets[0], str):
return sum([p.strip() == t.strip() for p, t in zip(predictions, targets)]) / len(predictions)
else:
def _normalize(prediction):
if prediction.endswith('Q'):
prediction = prediction[:-1]
elif 'Q:' in prediction:
prediction = prediction[:prediction.index('Q:')]
return prediction.strip('. ').lower()
normalized = [_normalize(p) for p in predictions]
def contains(key, candidates):
for c in candidates:
if key in c:
return True
return False
return sum([contains(n, t) for n, t in zip(normalized, targets)]) / len(normalized)
else:
raise NotImplementedError()
def do_sample(model, input_ids, stop_tokens, max_tokens):
"""
Sample from the model using the given input_ids as a prefix until we either
hit the stop token or we have sampled max_tokens tokens.
(Don't use model.generate; implement this yourself in a loop)
Note: when calling the model here, be sure to wrap the call with
torch.inference_mode() to save memory!
Args:
model: A transformers.PreTrainedModel that we will sample from.
input_ids: An integer tensor of shape [1, prefix_len]
stop_tokens: A list of token ids that indicates that we should stop sampling (e.g., a period)
max_tokens: Stop sampling if we've sampled this many tokens
Returns:
The sampled tokens (a python list of ints/zero-dim tensors), not including the input_ids prefix
OR the stop token (if we hit the stop token before max_tokens)
"""
sampled_tokens = []
### START CODE HERE ###
for i in range(max_tokens):
with torch.inference_mode():
outputs = model(input_ids)
logits = outputs.logits
# print("logits shape: ", logits.shape)
next_token_logits = logits[0, -1, :]
# print("next token logits shape: ", next_token_logits.shape)
next_token = torch.argmax(next_token_logits)
if next_token in stop_tokens:
break
sampled_tokens.append(next_token)
next_token = next_token.unsqueeze(0).unsqueeze(0)
input_ids = torch.cat((input_ids, next_token), dim=-1)
### END CODE HERE ###
return sampled_tokens
def run_icl(models: List[str], datasets_: List[str], ks: List[int], prompt_modes: List[str], debug: bool, repeats: int, n_val: int = 125):
results = {}
for model_name in models:
print(f'Loading model {model_name}...')
utils.fix_random_seeds()
model, tokenizer = utils.get_model_and_tokenizer(model_name, transformers.AutoModelForCausalLM)
stop_tokens = utils.stop_tokens(tokenizer)
model.to(DEVICE)
for dataset in datasets_:
print(f'Loading dataset {dataset}...')
if debug:
n_val = 1
utils.fix_random_seeds()
max_tokens = utils.max_sampled_tokens_for_dataset(dataset)
train, val = utils.get_dataset(dataset, n_train=max(ks), n_val=n_val)
for prompt_mode in prompt_modes:
for k in ks:
print(f'Running in-context learning with {model_name} on {dataset} with k={k} and prompt_mode={prompt_mode}')
utils.fix_random_seeds()
for repeat in range(repeats):
if repeat > 0:
print(f'Beginning repeat #{repeat}')
support_idxs = random.choices(range(len(train['x'])), k=k)
support_x = [train['x'][idx].replace('\n', ' ') for idx in support_idxs]
support_y = [train['simple_y'][idx].replace('\n', ' ') for idx in support_idxs]
targets = []
predictions = []
pbar = tqdm.tqdm(list(range(min(n_val, len(val['x'])))))
for row in pbar:
test_input = val['x'][row]
targets.append(val['y'][row])
# Ingredients you'll need:
# get_icl_prompts() [which you implemented]
# do_sample() [which you implemented]
# tokenizer() (for encoding text into tokens) and tokenizer.decode() (for decoding tokens back into text)
# See the documentation for the tokenizer encoder function here:
# https://huggingface.co/docs/transformers/v4.23.1/en/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__
# Note that the tokenizer by default will give you results on the CPU, so you will need to move them to the
# proper device.
decoded_prediction = ''
### START CODE HERE ###
icl_prompts = get_icl_prompts(support_x, support_y, test_input, prompt_mode)
#print(support_x, support_y)
#print("icl prompts: ", icl_prompts)
#print("test input: ", test_input)
input_ids = tokenizer(icl_prompts, return_tensors='pt', padding=True).input_ids.to(DEVICE)
#print("input ids shape: ", input_ids.shape)
#print("input ids: ", input_ids)
sampled_tokens = do_sample(model, input_ids, stop_tokens, max_tokens)
decoded_prediction = tokenizer.decode(sampled_tokens)
#print("decoded prediction", decoded_prediction)
### END CODE HERE ###
predictions.append(decoded_prediction)
metric = get_performance_metric(predictions, targets, utils.metric_for_dataset(dataset))
pbar.set_description(f'Eval: {metric:.04f}')
results['_'.join([model_name, dataset, str(k), prompt_mode])] = metric
print('Evaluation results:', results)
if not os.path.exists('submission/results/icl'):
os.makedirs('submission/results/icl')
for k_, v in results.items():
with open(f'submission/results/icl/{k_}.json', 'w') as f:
json.dump({'metric': v}, f)
results = {}
def plot_icl(models, dataset, ks, prompt_modes, output):
data = defaultdict(lambda: defaultdict(list))
symbols = ['solid', 'dashed', 'dotted', 'dashdot']
x_vals = set()
for model in models:
symbol = symbols.pop(0)
for prompt_mode in prompt_modes:
for k in ks:
fn = '_'.join([model, dataset, str(k), prompt_mode])
id_ = '_'.join([model, dataset, prompt_mode])
with open(f'submission/results/icl/{fn}.json', 'r') as f:
score = json.load(f)['metric']
data[id_]['x'].append(k)
x_vals.add(k)
data[id_]['y'].append(score)
data[id_]['linestyle'] = symbol
for k, v in data.items():
plt.plot(v['x'], v['y'], label=k, linestyle=v['linestyle'])
if max(x_vals) > 4:
plt.xscale('symlog')
ax = plt.gca()
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.xaxis.set_ticks(v['x'])
plt.legend()
plt.title(dataset)
plt.ylabel(utils.metric_for_dataset(dataset))
plt.xlabel('Number of support examples')
plt.savefig(output, bbox_inches='tight')
|
mariopenglee/llm-metalearning
|
src/submission/icl.py
|
icl.py
|
py
| 11,435
|
python
|
en
|
code
| 0
|
github-code
|
6
|
5290534852
|
# This file is part of Channel Capacity Estimator,
# licenced under GNU GPL 3 (see file License.txt).
# Homepage: http://pmbm.ippt.pan.pl/software/cce
from collections import Counter
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def weight_optimizer(neighb_count, labels) -> (float, list):
"""Returns loss and optimized weights for given neighbors description.
Parameters
----------
neighb_count : numpy array of shape (# of data points, # of labels)
describes for each point the number of neighbors with each label
labels : numpy array of shape (# of data points, )
label for each point
Returns
-------
float
loss
list
weight for each label
"""
# reset graph before each run
tf.reset_default_graph()
num_data, num_labels = neighb_count.shape
label_counts = np.zeros([num_labels])
for label, count in Counter(labels).most_common():
label_counts[label] = count
# neighbors matrix
neigh_matx = tf.constant(neighb_count, dtype=tf.float32)
# label count vector
label_cnts = tf.constant(label_counts, dtype=tf.float32)
# logits -- to be optimized
logits = tf.Variable(np.ones(num_labels), dtype=tf.float32)
# weights
w = tf.nn.softmax(logits)
# weight lookup list
w_list = tf.reduce_sum(tf.one_hot(labels, num_labels) * w, axis=1)
# label cnts lookup list
label_cnts_list = tf.reduce_sum(tf.one_hot(labels, num_labels)
* label_cnts, axis=1)
nx = w * num_data
ny = label_cnts_list / w_list \
* tf.reduce_sum(neigh_matx * (w/label_cnts), axis=1)
loss = (tf.reduce_sum(tf.digamma(nx) * w) +
tf.reduce_sum(tf.digamma(ny) * w_list / label_cnts_list))
optimizer = tf.train.AdamOptimizer()
train = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#print("Starting training...")
for _ in range(5000):
curr_loss, curr_w, __ = sess.run([loss, w, train])
#if _ % 250 == 0:
# print("steps: %s, loss: %s, w: %s"
# % (_, curr_loss, curr_w))
#print("Done.")
return sess.run([loss, w])
|
pawel-czyz/channel-capacity-estimator
|
cce/optimization.py
|
optimization.py
|
py
| 2,289
|
python
|
en
|
code
| 6
|
github-code
|
6
|
74175180349
|
# -*- coding:utf-8 -*-
"""
题目描述:请实现两个函数,分别用来序列化和反序列化二叉树
解题思路:
序列化二叉树:把一棵二叉树按照某种遍历方式的结果以某种格式保存为字符串。需要注意
的是,序列化二叉树的过程中,如果遇到空节点,需要以某种符号(这里用#)表示。
序列化可以基于先序/中序/后序/按层等遍历方式进行,这里采用先序遍历的方式实现,
字符串之间用","隔开。
主要用递归思想,每次递归返回的是结点,递归中处理好结点终止的条件,并且递归
处理该节点的左右子结点
类中变量self.count记录每次调用递归时的结点在序列化中的位置
"""
class TreeNode:
def __init__(self,x):
self.val = x
self.left = None
self.right =None
class Solution:
def __init__(self):
self.count = -1
def Serialize(self, root):
# 序列化
if not root:
return '#'
return str(root.val)+','+self.Serialize(root.left)+','+self.Serialize(root.right)
def Deserialize(self, s):
# 反序列化
self.count += 1
tree_list = s.split(',')
if tree_list[self.count] == '#':
return None
node = TreeNode(int(tree_list[self.count]))
node.left = self.Deserialize(s)
node.right = self.Deserialize(s)
return node
|
xxxsssyyy/offer-Goal
|
61序列化二叉树.py
|
61序列化二叉树.py
|
py
| 1,469
|
python
|
zh
|
code
| 7
|
github-code
|
6
|
17079551201
|
from gensim.models import KeyedVectors
from anki_corpus_for_gensim import bg_stopwords,en_stopwords
import json, argparse, time
from flask import Flask, request
from flask_cors import CORS
##################################################
# API part
##################################################
app = Flask(__name__)
cors = CORS(app)
def get_wmdist_bg(bg_sent, en_sent):
return multilingual_model.wmdistance(
['bg:'+x for x in bg_sent if x not in bg_stopwords],
['en:'+x for x in en_sent if x not in en_stopwords]
)
@app.route('/', methods=['POST'])
def predict():
start = time.time()
data = request.data.decode("utf-8")
results = list()
if data == "":
params = request.form
sentences = json.loads(params)
print(sentences)
# bg_sent = json.loads(params['bg']).split()
# en_sent = json.loads(params['en']).split()
else:
params = json.loads(data)
sentences = params
print(sentences)
# bg_sent = params['bg'].split()
# en_sent = params['en'].split()
for sent in sentences['sent']:
print(sent)
bg_sent = sent['bg'].split()
en_sent = sent['en'].split()
print(bg_sent)
print(en_sent)
match = get_wmdist_bg(bg_sent, en_sent)
results.append(match)
print('Returning', results)
json_data = json.dumps({'results': results})
print("Time spent handling the request: %f" % (time.time() - start))
return json_data
##################################################
# END API part
##################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = parser.parse_args()
multilingual_model = KeyedVectors.load('glove_word_embeddings/multilingual.gensim')
print('Starting the API')
app.run(host="0.0.0.0", debug=True)
|
teodorToshkov/sentencesimilarity
|
app.py
|
app.py
|
py
| 1,897
|
python
|
en
|
code
| 0
|
github-code
|
6
|
5264982546
|
#!<path_from_which_python_command>
import sys
import os
from datetime import date
import datetime
import pkgutil
import os
import shutil
import subprocess
import time
import urllib.request # To download files
from tkinter import Tk, ttk # Download bar
import zipfile
from pathlib import Path
import urllib.request as requests
import tkinter
import CustomLogger
logger = CustomLogger.logger
import tempfile
# import Vendor libraries
# Unity Unpacker/Extractor
from UnityPy import AssetsManager
from collections import Counter
# https://github.com/K0lb3/UnityPy
# https://github.com/K0lb3/UnityPy/blob/master/AssetBatchConverter.py
TYPES = ['TextAsset','Sprite', 'Texture2D', 'MonoScript','MonoBehaviour']
IGNOR_DIR_COUNT = 2
# Convert lua table to json
# https://github.com/SirAnthony/slpp
from slpp import slpp as lua
def validateURL(url:str):
try:
rsp = requests.get(url)
logger.info("%s - %s"% (rsp.status_code, url))
if rsp.status_code == 200:
return True
except:
logger.info("%s does not exist on Internet" % url)
return False
import requests
def downloadFile(url, save_path, chunk_size=128):
r = requests.get(url, stream=True)
with open(save_path, 'wb') as fd:
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
def ValidateJava():
try:
proc = subprocess.Popen('java -version', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = proc.stdout.read().decode('utf-8')
if "command not found" not in result:
logger.info("Located JRE \n"+result)
else:
logger.error("Java Runtime Environment (JRE) is required to decrypt the byte files.")
except:
logger.error("Java Runtime Environment (JRE) is required to decrypt the byte files.")
def GetPatchLink(p_serv : str, p_name:str) -> str:
serverLink : str = ""
if p_serv == "CN":
serverLink = "http://ro.xdcdn.net/res/Alpha/Android/" + p_name + ".zip"
elif p_serv == "SEA":
serverLink = "http://sea-cdn.ro.com/assets/Release/Android/" + p_name + ".zip"
elif p_serv == "GLOBAL":
serverLink = "http://na-cdn.ro.com/assets/Release/Android/" +p_name + ".zip"
elif p_serv == "Others":
serverLink = p_name
return serverLink
#End of GetPatchLink
def DownloadPatchFile(p_server:str, p_patchString:str, p_outputPath:str):
file_url = GetPatchLink(p_server,p_patchString)
if "Others" in p_server:
p_server, p_patchString = os.path.split(p_patchString)
p_patchString, ext = os.path.splitext(p_patchString)
file_name ="ROM_Patch_"+p_patchString
file_path = p_outputPath + "/" + file_name +".zip"
logger.info("Downloading to %s" % file_path)
downloadFile(file_url, file_path)
logger.info ("Download complete")
return file_path
def UnloadZip(p_zipFilePath:str, p_outputPath:str):
# Extract to folder with same name as zip file
zfFolder, zfFileName = os.path.split(p_zipFilePath)
zfFileName, ext = os.path.splitext(zfFileName)
extracted_zfPath = os.path.join(p_outputPath, zfFileName)
logger.info("Beginning extraction of %s" % zfFileName)
with zipfile.ZipFile(p_zipFilePath) as zf:
zf.extractall(extracted_zfPath)
logger.info("Complete extraction of %s" % zfFileName)
os.remove(p_zipFilePath) # delete zip file
return zfFileName
def UnloadAPK(p_apkFilePath : str, p_outputPath:str) -> bool:
hasAPK:bool = False
obbPath = ""
if "apk" in p_apkFilePath:
hasAPK = True
elif not "obb" in p_apkFilePath:
logger.error("No valid apk or obb file selected")
return False
# create a copy of the apk to tmp folder
fileDir, fileName = os.path.split(p_apkFilePath) # split folder and file name
CopyFile(fileDir, p_outputPath,fileName) # Copy file to output
newApkFilePath = os.path.join(p_outputPath, fileName)
# Rename apk to zip
apkfilename,ext = os.path.splitext(newApkFilePath)
os.rename(newApkFilePath, apkfilename +".zip" )
newApkFilePath = apkfilename +".zip"
apkExtractedFolder = os.path.join(p_outputPath, apkfilename)
if hasAPK:
# extract apk zip
logger.info("Beginning extraction of raw apk")
with zipfile.ZipFile(newApkFilePath) as zf:
zf.extractall(apkExtractedFolder)
logger.info("Completed APK extraction")
else:
obbPath = newApkFilePath
if not obbPath:
for path in Path(p_outputPath).rglob('*.obb'):
obbPath = path
break
# Rename obb to zip
filepath,ext = os.path.splitext(obbPath)
os.rename(obbPath, filepath +".zip" )
obbPath = obbPath + filepath +".zip"
logger.info("Extracting obb")
with zipfile.ZipFile(obbPath) as zf:
zf.extractall(p_outputPath)
logger.info("Completed obb extraction")
# Cleanup copied apk file and folder after obb extraction
if hasAPK:
os.remove(newApkFilePath)
shutil.rmtree(apkExtractedFolder)
return True
def GetUnityFiles(p_folderPath :str) ->list:
fileList : list = []
for subdir, dirs, files in os.walk(p_folderPath):
for dir in dirs:
fileList.extend(GetUnityFiles(dir))
for file in files:
if R".unity3d" in file:
fileList.append(os.path.join(subdir, file))
return fileList
#end of GetUnityFiles
def CreateFolder(p_path:str, p_foldername:str):
if p_foldername in p_path:
return p_path
newpath = os.path.join(p_path, p_foldername)
if not os.path.exists(newpath):
os.makedirs(newpath)
return newpath
#end of CreateFolder
def CopyFile(p_src:str, p_dst:str, p_fileName:str):
if os.path.exists(p_dst+"/"+p_fileName):
logger.info("Destination: %s/%s already exists. Aborting copy!" % (p_dst,p_fileName))
return "dst"
elif not os.path.exists(p_src+"/"+p_fileName):
logger.info("Source file: %s/%s does not exist. Aborting copy!" % (p_src, p_fileName))
return "src"
else:
shutil.copy(p_src+"/"+p_fileName,p_dst)
logger.info("Succesfully copied %s to %s" %(p_fileName, p_dst))
return p_dst+"/"+p_fileName
#end of CopyFile
# Create output directory based on today date
def GetOutputFolder(p_outputPath :str):
return CreateFolder(p_outputPath, "OutputFile" + datetime.datetime.today().strftime("_%d%m%Y_%H%M%S"))
#end of SetupOutputFolder
def DecryptLuaFiles(p_workingDir:str, tkWin : tkinter.Tk):
# Make sure execution files exists
exeNP = CopyFile(os.getcwd(), p_workingDir, "RomEncryption.exe" )
unluacNP = CopyFile(os.getcwd(), p_workingDir, "unLuac.jar" )
if exeNP == "src":
logger.error("Cannot find RomEncryption.exe")
return False
if unluacNP == "src":
logger.error("Cannot find unLuac.exe")
return False
logger.info ("Decrypting to lua files...")
cmd = "RomEncryption.exe";
outputString = ""
with tempfile.TemporaryFile(newline='\n', mode='w+', encoding='utf-8') as tempf:
proc = subprocess.Popen(cmd, stdout=tempf, cwd=p_workingDir)
proc.wait()
tempf.seek(0)
outputString = tempf.read()
outList = outputString.split('\n')
for line in outList:
if "Exception" in line:
logger.error(line)
else:
logger.info("Extracted file: " + line)
tkWin.update()
logger.info ("Decryption task completed")
os.remove(exeNP)
os.remove(unluacNP)
return True
def unpack_all_assets(p_folderPath : str, p_outputPath : str):
logger.info ("Start unpacking unity files via UnityPy")
for root, dirs, files in os.walk(p_folderPath, topdown=False):
for f in files:
logger.info(f)
extension = os.path.splitext(f)[1]
src = os.path.realpath(os.path.join(root, f))
if extension == ".zip":
archive = zipfile.ZipFile(src, 'r')
for zf in archive.namelist():
extract_assets(archive.open(zf),p_outputPath)
else:
extract_assets(src,p_outputPath)
logger.info ("Completed unpacking of unity files")
shutil.rmtree(p_folderPath)
def extract_assets(src, output_path):
# load source
am = AssetsManager(src)
# iterate over assets
for asset in am.assets.values():
# assets without container / internal path will be ignored for now
if not asset.container:
continue
# check which mode we will have to use
num_cont = sum(1 for obj in asset.container.values() if obj.type in TYPES)
num_objs = sum(1 for obj in asset.objects.values() if obj.type in TYPES)
# check if container contains all important assets, if yes, just ignore the container
if num_objs <= num_cont * 2:
for asset_path, obj in asset.container.items():
fp = os.path.join(output_path, *asset_path.split('/')[IGNOR_DIR_COUNT:])
export_obj(obj, fp)
# otherwise use the container to generate a path for the normal objects
else:
extracted = []
# find the most common path
occurence_count = Counter(os.path.splitext(asset_path)[0] for asset_path in asset.container.keys())
local_path = os.path.join(output_path, *occurence_count.most_common(1)[0][0].split('/')[IGNOR_DIR_COUNT:])
for obj in asset.objects.values():
if obj.path_id not in extracted:
extracted.extend(export_obj(obj, local_path, append_name=True))
def export_obj(obj, fp: str, append_name: bool = False) -> list:
if obj.type not in TYPES:
return []
data = obj.read()
if append_name:
fp = os.path.join(fp, data.name)
fp, extension = os.path.splitext(fp)
os.makedirs(os.path.dirname(fp), exist_ok=True)
if obj.type == 'TextAsset':
if not extension:
extension = '.txt'
with open(f"{fp}{extension}", 'wb') as f:
f.write(data.script)
elif obj.type == "Sprite":
extension = ".png"
data.image.save(f"{fp}{extension}")
return [obj.path_id, data.m_RD.texture.path_id, getattr(data.m_RD.alphaTexture, 'path_id', None)]
elif obj.type == "Texture2D":
extension = ".png"
fp = f"{fp}{extension}"
if not os.path.exists(fp):
try:
data.image.save(fp)
except EOFError:
pass
return [obj.path_id]
|
gomuG/ROMiner
|
gUtil.py
|
gUtil.py
|
py
| 10,668
|
python
|
en
|
code
| 1
|
github-code
|
6
|
12018146730
|
import mysql.connector
config = {
'user': 'root',
'password': '',
'host': 'localhost',
'database': 'quotes_test'
}
tab = 'tereshkova_table'
def init_connection():
return mysql.connector.connect(**config)
def close_connection(con):
con.close()
def copy_all_in_table(con, row_list):
cursor = con.cursor()
query = ("INSERT INTO "+ tab +
" (goods, count, cost, action) "
"VALUES (%s, %s, %s, %s)")
for row in row_list:
data = row
cursor.execute(query, data)
con.commit()
cursor.close()
def select_all_from_db(con):
cursor = con.cursor()
query = ('SELECT * FROM ' + tab)
cursor.execute(query)
row_list = []
for row in cursor:
row_list.append(row[1:])
cursor.close()
return row_list
def delete_all_from_db(con):
cursor = con.cursor()
query = ('DELETE FROM ' + tab)
cursor.execute(query)
con.commit()
cursor.close()
|
arkuz/quotes_test
|
helpers/DB.py
|
DB.py
|
py
| 965
|
python
|
en
|
code
| 0
|
github-code
|
6
|
42174822199
|
PAYMENT = "💰 افزایش موجودی"
BUY_NEW_SERVICE = "🛍 خرید سرویس"
GET_TEST_SERVICE = "🎁 دریافت سرویس رایگان"
MY_SERVICES = "💎 سرویس های من"
PRICE_LIST = "💲تعرفه سرویس"
MY_PROFILE = "👤 پروفایل من"
HELP = "❔ راهنما"
SUPPORT = "💭 پشتیبانی انلاین"
EXIT = "🔙 خروج"
CHANNEL = "عضویت در کانال"
I_HAVE_SUBSCRIBED = " ✅ عضو شدم"
ENABLE = "فعال"
DISABLE = "غیرفعال"
ACCOUNT_LIST_ITEM = "سرویس {} معتبر تا {} ({})"
SERVICE_LIST_ITEM = "سرویس {} ماهه {} {}GB {}"
EXPIRE_TIME = "تاریخ انقضا"
EXCEEDED_DATA_LIMIT = "اتمام ترافیک"
|
eloravpn/EloraVPNManager
|
src/telegram/user/captions.py
|
captions.py
|
py
| 701
|
python
|
fa
|
code
| 15
|
github-code
|
6
|
21628349339
|
#!/usr/bin/python3
# this file contains the code to publish parsed messages to your system
# currently there is only MQTT implemented, which can be configured in the central config
import logging
import paho.mqtt.publish
#internal imports
import config
def is_interval_matched(dateTime):
dateTime
seconds = int(dateTime['hour']) * 3600 + int(dateTime['minute']) * 60 + int(dateTime['second'])
if seconds % config.mqtt_publish_interval_seconds == 0:
return True
else:
return False
def process(msg):
if config.mqtt_enabled:
dateTime = msg["dateTime"]
if is_interval_matched(dateTime):
# multiple topics: topic, payload, qos, retain
mqttMsg = [(config.mqtt_topic_prefix + "zaehlerstand_bezug", msg["A+"], config.mqtt_qos, config.mqtt_retain),
(config.mqtt_topic_prefix + "zaehlerstand_lieferung", msg["A-"], config.mqtt_qos, config.mqtt_retain),
(config.mqtt_topic_prefix + "leistung_bezug", msg["P+"], config.mqtt_qos, config.mqtt_retain),
(config.mqtt_topic_prefix + "leistung_lieferung", msg["P-"], config.mqtt_qos, config.mqtt_retain),
(config.mqtt_topic_prefix + "timestamp", f"{dateTime['year']}-{dateTime['day']}-{dateTime['month']} {dateTime['hour']}:{dateTime['minute']}:{dateTime['second']}", config.mqtt_qos, config.mqtt_retain),]
logging.debug("publishing: " + "{0}".format(mqttMsg))
try:
paho.mqtt.publish.multiple(mqttMsg, hostname=config.mqtt_hostname, auth=config.mqtt_auth)
except Exception as e:
logging.error("error while publishing {0}".format(e))
else:
logging.debug("publishing skipped: " + "{0}".format(msg))
|
aburgr/smartmeter-reader
|
publish.py
|
publish.py
|
py
| 1,793
|
python
|
en
|
code
| 14
|
github-code
|
6
|
19235406042
|
from dash import dcc, html, dash_table
import config
import sites
import numpy as np
import pandas as pd
def call_layout(site_config):
layout = html.Div([
html.Div(id='dash-header',
children=[
html.H1(children=config.title),
html.H3(children=dcc.Markdown(
site_config['loc_msg_markdown'])),
# html.H3(children=site_config['dates_msg']),
]),
html.Div(id='dash-controls',
children=[
html.Div(id='select-date-range',
children=[
html.Span(children='Select dates', style={'font-weight': 'bold'}),
dcc.DatePickerRange(id='my-date-picker-range',
min_date_allowed=site_config['date_range'][0],
max_date_allowed=site_config['date_range'][1],
start_date=site_config['date_range'][0],
end_date=site_config['date_range'][1],
first_day_of_week=1, # start on Mondays
minimum_nights=0,
updatemode='singledate',
),
]),
html.Div(id='select-direction',
children=[
html.Span(children='Traffic direction', style={'font-weight': 'bold'}),
dcc.RadioItems(options={'bi_direction': 'Both',
'in': site_config['config_direction']['in'] + ' only',
'out': site_config['config_direction']['out'] + ' only'},
value='bi_direction',
inputStyle={"margin-left": "10px"},
inline=True,
id='data-dir-radio'),
]),
]),
html.Div(id='bar-graph-div',
children=[
html.Div(id='select-resolution',
children=[
html.Span(children='Select time resolution', style={'font-weight': 'bold'}),
dcc.RadioItems(options={'1_month': 'monthly',
'1_week': 'weekly',
'1_day': 'daily',
'1_hour': 'hourly',
'30_min': '30 min',
'15_min': '15 min'},
value=site_config['default_res'],
inputStyle={"margin-left": "10px"},
inline=True,
id='data-agg-radio'),
]),
dcc.Graph(id='bar-graph',
config={'toImageButtonOptions': {
'format': 'png', 'filename': 'bar_chart', 'height': None, 'width': None, 'scale': 10}, 'displaylogo': False}
)]),
html.Div(id='table-div',
children=[
html.H3(children='Traffic summary on the selected dates'),
dash_table.DataTable(data=pd.DataFrame(np.zeros((3, 4))).to_dict('records'),
columns=[
dict(id='dir', name=''),
dict(id='total_vol', name='Total traffic', type='numeric',
format=dash_table.Format.Format().group(True)),
dict(id='daily_avg', name='Average daily traffic', type='numeric', format=dash_table.Format.Format(
precision=1, scheme=dash_table.Format.Scheme.fixed)),
dict(id='perc', name='Percent', type='numeric',
format=dash_table.FormatTemplate.percentage(1))
],
style_cell_conditional=[
{'if': {'column_id': 'dir'},
'width': '25%'},
{'if': {'column_id': 'total_vol'},
'width': '25%'},
{'if': {'column_id': 'daily_avg'},
'width': '25%'},
{'if': {'column_id': 'perc'},
'width': '20%'},
],
style_cell={'font-family': 'Roboto',
'padding-right': '10px',
'padding-left': '10px'},
id='avg-table'),
]),
html.Div(id='time-of-day-div',
children=[
html.Div(id='select-dayofweek-1',
children=[
# html.Span(children='Select day of week', style={'font-weight': 'bold'}),
dcc.Checklist(id='time-day-checklist',
options=config.weekday_list,
value=config.weekday_list,
inputStyle={"margin-left": "10px"},
inline=True,
)]),
dcc.Graph(id='time-of-day',
config={'toImageButtonOptions': {'format': 'png', 'filename': 'time_of_day_chart', 'height': None, 'width': None, 'scale': 10}, 'displaylogo': False}
)]),
html.Div(children=[dcc.Graph(id='avg-hour-traffic',
config={'toImageButtonOptions': {'format': 'png', 'filename': 'avg_hourly_traffic_chart', 'height': 350, 'width': 750, 'scale': 10}, 'displaylogo': False})]),
html.Div(children=[dcc.Graph(id='day-of-week',
config={'toImageButtonOptions': {'format': 'png', 'filename': 'day_of_week_chart', 'height': 350, 'width': 750, 'scale': 10}, 'displaylogo': False})]),
html.Div(children=[dcc.Checklist(id='day-checklist',
options=config.weekday_list,
value=config.weekday_list,
inputStyle={"margin-left": "10px"},
inline=True,
)]),
html.Div(children=[dcc.RadioItems(id='rain-radio',
options=['All days', 'Only days without rain'],
value='All days',
inputStyle={"margin-left": "10px"},
style={"margin-top": "15px",
"margin-bottom": "5px",},
inline=True,
)]),
html.Div(children=[dcc.Graph(id='weather-plot',
config={'toImageButtonOptions': {'format': 'png', 'filename': 'weather_chart', 'height': 350, 'width': 750, 'scale': 10}, 'displaylogo': False})]),
html.Div(id='footer',
children=[
html.H4(children=dcc.Markdown('This dashboard is open source and hosted on a [GitHub repository](https://github.com/fenggroup/bike-traffic-plotly-dash).')),
html.H4(children=dcc.Markdown('Download the [bike counter data](https://github.com/fenggroup/bike-traffic-plotly-dash/tree/main/data/counter)')),
# html.H4(children=dcc.Markdown('[Click here](https://fenggroup.org/bike-counter/) to learn more about our bike counting project.')),
# html.H4(children=dcc.Markdown('[Feng Group](https://fenggroup.org/) 2022'))
]),
# dcc.Store stores the values
dcc.Store(id='intermediate-value'),
dcc.Store(id='weather-value'),
dcc.Store(id='daily-notes'),
dcc.Store(id='site-config'),
])
return layout
home_layout = html.Div(children=[
html.H1(children='Bike counter dashboard'),
html.H3(children='Select a bike counter below to see its dashboard.'),
html.Div([html.Br(),
dcc.Link(sites.site_01['site_title'], href=sites.site_01['site_url']),
html.Br(),
html.Br(),
dcc.Link(sites.site_02['site_title'], href=sites.site_02['site_url']),
html.Br(),
html.Br(),
dcc.Link(sites.site_03['site_title'], href=sites.site_03['site_url']),
html.Br(),
html.Br(),
]),
html.Div(children=[
html.H4(children=dcc.Markdown('The dashboards are open source and hosted on [our GitHub repository](https://github.com/fenggroup/bike-traffic-plotly-dash).')),
html.H4(children=dcc.Markdown('[Feng Group](https://fenggroup.org/) 2022'))
]),
])
|
fenggroup/bike-traffic-plotly-dash
|
layouts.py
|
layouts.py
|
py
| 9,242
|
python
|
en
|
code
| 5
|
github-code
|
6
|
367509253
|
import streamlit as st
import pandas as pd
import random
from stqdm import stqdm
df_64 = pd.read_csv("juyok_DB.csv", encoding='UTF8')
df_est = pd.read_csv("juyok_DB_est.csv", encoding='UTF8')
# st.write(df_64)
st.markdown("## **당신의 이름은 무엇인가요?**")
name = st.text_input("이름: ")
st.markdown("## **당신의 성별은 무엇인가요?**")
sex = st.selectbox("성별: ",("여성","남성") )
st.write('성별:', sex)
st.markdown('## **당신이 궁금한 사항을 입력해 주세요.**')
question = st.text_input("질문: ")
df_64_count = pd.DataFrame(df_64["Name"])
df_64_count["Count"] = 0
# for _ in stqdm(range(50)):
#print(df_est.at[index2, "step2"])
#print(df_est.at[index2, "step3"])
#print(df_est.at[index2, "step4"])
if st.button("실행"):
output_area = st.empty()
for i in range(0,1):
l1 = random.randrange(0,2)
l2 = random.randrange(0,2)
l3 = random.randrange(0,2)
l4 = random.randrange(0,2)
l5 = random.randrange(0,2)
l6 = random.randrange(0,2)
a = df_64["Name"].loc[(df_64.layer1 == l1)&(df_64.layer2 == l2)&(df_64.layer3 == l3)&(df_64.layer4 == l4)&(df_64.layer5 == l5)&(df_64.layer6 == l6)]
index = df_64.index[df_64["Name"] == a.values[0]][0]
# if i == 0:
# first_row = df_64["Name"].loc[index]
df_64_count.at[index, "Count"] += 1
max_index = df_64_count["Count"].idxmax()
max_row = df_64_count.loc[max_index]
# output_area.write(first_row)
for j in stqdm(range(0,1000)):
l1 = str(random.randrange(0,2))
l2 = str(random.randrange(0,2))
l3 = str(random.randrange(0,2))
l4 = str(random.randrange(0,2))
l5 = str(random.randrange(0,2))
l6 = str(random.randrange(0,2))
# a = df_64["Name"].loc[(df_64.layer1 == l1)&(df_64.layer2 == l2)&(df_64.layer3 == l3)&(df_64.layer4 == l4)&(df_64.layer5 == l5)&(df_64.layer6 == l6)]
# index = df_64.index[df_64["Name"] == a.values[0]][0]
a = l1+l2+l3+l4+l5+l6
output_area.write(a)
output_area.write(max_row["Name"])
index = df_64.index[df_64["Name"] == max_row["Name"]][0]
st.write('현재- ', max_row["Name"],":", df_64.at[index, "explain_short"])
index2 = df_est.index[df_est["step1"] == max_row["Name"]][0]
con = st.container()
con.caption("Result")
index1 = df_64.index[df_64["Name"] == df_est.at[index2, "step2"]][0]
st.write( "3개월후","-",df_est.at[index2, "step2"],":", df_64.at[index1, "explain_short"])
index1 = df_64.index[df_64["Name"] == df_est.at[index2, "step3"]][0]
st.write( "6개월후","-",df_est.at[index2, "step3"],":", df_64.at[index1, "explain_short"])
index1 = df_64.index[df_64["Name"] == df_est.at[index2, "step4"]][0]
st.write( "9개월후","-",df_est.at[index2, "step4"],":", df_64.at[index1, "explain_short"])
# total_q = [max_row["Name"],df_est.at[index2, "step2"],df_est.at[index2, "step3"],df_est.at[index2, "step4"]]
# st.write(total_q)
# if first_row in total_q:
# st.write(first_row)
# else:
# print("다시")
|
baemsu/juyok
|
app.py
|
app.py
|
py
| 3,094
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3200668360
|
import rodMassParam as P
import matplotlib.pyplot as plt
from control import TransferFunction as tf
import control as cnt
import numpy as np
import rodMassParam as P
# ----------- noise specification --------
# attenuate noise above omega_n by gamma_n
def add_spec_noise(gamma, omega, flag):
w = np.logspace(np.log10(omega), np.log10(omega) + 4)
if flag==True:
plt.subplot(211)
plt.plot(w,
(20 * np.log10(gamma)) * np.ones(len(w)),
color='g',
label='noise spec')
#----------- input disturbance specification --------
# reject disturbance above omega by gamma
def add_spec_disturbance(gamma, omega, flag):
w = np.logspace(np.log10(omega)-4, np.log10(omega))
if flag==True:
plt.subplot(211)
plt.plot(w,
20*np.log10(1.0/gamma)*np.ones(len(w)),
color='g',
label='disturbance spec')
#----------- general tracking specification --------
# track references below omega by gamma
def add_spec_tracking(gamma, omega, flag):
w = np.logspace(np.log10(omega) - 2, np.log10(omega))
if flag==True:
plt.subplot(211)
plt.plot(w,
20*np.log10(1/gamma)*np.ones(len(w)),
color='g',
label='tracking spec')
#----------- steady state tracking of step --------
# track step by gamma
def add_spec_tracking_step(gamma, flag):
omega = 0.01
w = np.logspace(np.log10(omega)-4, np.log10(omega))
if flag==True:
plt.subplot(211)
plt.plot(w,
20*np.log10(1.0/gamma),
color='g',
label='tracking spec')
#----------- steady state tracking of ramp --------
# track ramp by gamma
def add_spec_tracking_ramp(gamma, flag):
omega = 0.01
w = np.logspace(np.log10(omega)-4, np.log10(omega))
if flag==True:
plt.subplot(211)
plt.plot(w,
20*np.log10(1.0/gamma)-20*np.log10(w),
color='g',
label='tracking spec')
# proportional control: change cross over frequency
def add_control_proportional(C, kp):
proportional = tf([kp], [1])
return C * proportional
# integral control: increase steady state tracking and dist rejection
# ki: frequency at which integral action ends
def add_control_integral(C, ki):
integrator = tf([1, ki], [1, 0])
return C * integrator
# phase lag: add gain at low frequency
# z: frequency at which gain ends
# M: separation between pole and zero
def add_control_lag(C, z, M):
Lag = tf([1, z], [1, z/M])
return C * Lag
# low pass filter: decrease gain at high frequency (noise)
# p: lpf cutoff frequency
def add_control_lpf(C, p):
LPF = tf(p, [1, p])
return C * LPF
# phase lead: increase PM (stability)
# w_L: location of maximum frequency bump
# M: separation between zero and pole
def add_control_lead(C, w_L, M):
gain = (1.0+np.sqrt(M))/(1.0+1.0/np.sqrt(M))
Lead = tf([gain * 1.0, gain * w_L / np.sqrt(M)],
[1.0, w_L * np.sqrt(M)])
return C * Lead
# Compute plant transfer functions
Plant = cnt.tf([1.0 / (P.m * P.ell**2)], [1, P.b/(P.m*P.ell**2), P.k1/(P.m*P.ell**2)])
C_pid = cnt.tf([(P.kd+P.kp*P.sigma), (P.kp+P.ki*P.sigma), P.ki], [P.sigma, 1, 0])
PLOT = True
# PLOT = False
# calculate bode plot and gain and phase margin
mag, phase, omega = cnt.bode(Plant, dB=True, omega=np.logspace(-3, 5), Plot=False)
gm, pm, Wcg, Wcp = cnt.margin(Plant*C_pid)
print(" pm: ", pm, " Wcp: ", Wcp, "gm: ", gm, " Wcg: ", Wcg)
if PLOT:
plt.figure(3), plt.clf()
plt.subplot(211), plt.grid(True)
plantMagPlot, = plt.semilogx(omega, 20*np.log10(mag), label='Plant')
plt.subplot(212), plt.grid(True)
plantPhasePlot, = plt.semilogx(omega, phase, label='Plant')
#########################################
# Define Design Specifications
add_spec_noise(gamma=10, omega=2000, flag=PLOT)
#add_spec_disturbance(gamma=0.1, omega=0.1, flag=PLOT)
add_spec_tracking(gamma=10, omega=0.02, flag=PLOT)
#add_spec_tracking_ramp(gamma=0.03, flag=PLOT)
#########################################
# Control Design
C = C_pid
C = add_control_lag(C, z=0.02, M=10)
C = add_control_lpf(C, p=10)
#C = add_control_lead(C, w_L=, M=)
#C = add_control_integral(C, ki=)
#C = add_control_proportional(C, kp=)
mag, phase, omega = cnt.bode(Plant*C, dB=True, omega=np.logspace(-5, 5), Plot=False)
gm, pm, Wcg, Wcp = cnt.margin(Plant*C)
print(" pm: ", pm, " Wcp: ", Wcp, "gm: ", gm, " Wcg: ", Wcg)
if PLOT:
plt.subplot(211),
plantMagPlot, = plt.semilogx(omega, 20*np.log10(mag), label='PC')
plt.subplot(212),
plantPhasePlot, = plt.semilogx(omega, phase, label='PC')
###########################################################
# add a prefilter to eliminate the overshoot
F = tf(1, 1)
F = add_control_lpf(F, p=1)
############################################
# Create Plots
############################################
# Closed loop transfer function from R to Y - no prefilter
CLOSED_R_to_Y = (Plant*C/(1.0+Plant*C))
# Closed loop transfer function from R to Y - with prefilter
CLOSED_R_to_Y_with_F = (F*Plant*C/(1.0+Plant*C))
# Closed loop transfer function from R to U
CLOSED_R_to_U = (C/(1.0+Plant*C))
if PLOT:
plt.figure(4), plt.clf()
plt.subplot(311), plt.grid(True)
mag, phase, omega = cnt.bode(CLOSED_R_to_Y, dB=True, Plot=False)
plt.semilogx(omega, mag, color='b')
mag, phase, omega = cnt.bode(CLOSED_R_to_Y_with_F, dB=True, Plot=False)
plt.semilogx(omega, mag, color='g')
plt.title('Close Loop Bode Plot')
plt.subplot(312), plt.grid(True)
T = np.linspace(0, 2, 100)
T, yout = cnt.step_response(CLOSED_R_to_Y, T)
plt.plot(T, yout, color='b')
plt.ylabel('Step Response')
plt.subplot(313), plt.grid(True)
T = np.linspace(0, 2, 100)
T, yout = cnt.step_response(CLOSED_R_to_U, T)
plt.plot(T, yout, color='b')
plt.ylabel('Control Effort')
# Keeps the program from closing until the user presses a button.
plt.pause(0.0001) # not sure why this is needed for both figures to display
print('Press key to close')
plt.waitforbuttonpress()
plt.close()
##############################################
# Convert Controller to State Space Equations
##############################################
C_num = np.asarray(C.num[0])
C_den = np.asarray(C.den[0])
F_num = np.asarray(F.num[0])
F_den = np.asarray(F.den[0])
Css=cnt.tf2ss(C)
Fss=cnt.tf2ss(F)
|
mebach/me431
|
homework_template_folders/homework_template_folders/practice_final/python/loopshape_rodMass.py
|
loopshape_rodMass.py
|
py
| 6,465
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37695958743
|
zvirata = ["pes", "kočka", "králík", "had"]
zvirata.append("andulka")
# Slovník ze seznamu, klíč je druhý znak ve slově a hodnota je slovo
zvirata_dict = {}
for zvire in zvirata:
zvirata_dict[zvire[1]] = zvire
# Seřazení klíčů
keys = zvirata_dict.keys()
sort_letters = sorted(keys)
# Vytvoření nového seznamu dle seřazených klíčů
sort_value = []
for key in sort_letters:
sort_value.append(zvirata_dict[key])
print(sort_value)
# Vytvoření nového slovníku dle seřazených klíčů
# sort_value = {}
# for key in sort_letters:
# sort_value[key] = zvirata_dict[key]
# Vypsání pouze hodnot slovníku jako seznam
# seznam_zvirat = list(sort_value.values())
# print(seznam_zvirat)
# Než jsem si přečetla pořádně zadání, že to má být přes slovníky, tak jsem
# vytvořila toto a také fungovalo, ale pak jsem předělala podle zadání:
#for zvire in zvirata:
# zvirata.sort(key=lambda zvire: zvire[1])
#print(f"Zvířata seřazena dle druhého písmene: {zvirata}")
# lambda se používá, když parametr klíče má být funkce (nebo jiná volatelná),
# která převezme jeden argument a vrátí klíč k použití pro účely řazení
|
Pavucinap/PyLadies
|
ulohy_05/povinna_uloha_6_zviratka.py
|
povinna_uloha_6_zviratka.py
|
py
| 1,197
|
python
|
cs
|
code
| 0
|
github-code
|
6
|
29834178126
|
'''
Hacer un programa que lea el nombre y precio de un producto,
el programa repetira esta accion hasta que el usuario lo desee,
al finalisar mostrara el total de productos, la sumatoria de los precios, el porcentaje de IVA respecto al total y el total a pagar
'''
cantidadProductos = 0
precioTotal = 0.0
totalPagar = 0.0
iva = 0.0
confirmacion = 0
print('Bienvenido a la calculadora de precios de IBM: \nA continuacion Ingresa los nombres y precios de los productos:\n')
while(confirmacion == 0):
nombre = input('Ingresa el nombre del producto: \n')
precioProducto = float(input('Ingresa el precio del producto: \n'))
precioTotal += precioProducto
cantidadProductos += 1
confirmacion = int(input('Deseas agregar mas productos? 0 = SI 1 = NO\n'))
iva = precioTotal * 0.16
totalPagar = precioTotal + iva
print(f'Cantidad de productos ingresados: {cantidadProductos} \n Precio Total: {precioTotal} \n Total Iva: {iva} \n Total a pagar: {totalPagar}')
|
Developer2022004/TercerSemestre
|
Parcial_uno/practicaCinco.py
|
practicaCinco.py
|
py
| 978
|
python
|
es
|
code
| 0
|
github-code
|
6
|
37016357191
|
#!/usr/bin/env python3
from __future__ import print_function
import ase.io
import sys
import numpy as np
if len(sys.argv) != 3:
sys.stderr.write("Usage: %s model.so model_params < input.xyz\n" % sys.argv[0])
sys.exit(1)
at = ase.io.read(sys.stdin, format="extxyz")
import fortranMCMDpy
FORTRAN_model = sys.argv[1]
FORTRAN_model_params = sys.argv[2]
f_MC_MD = fortranMCMDpy.fortran_MC_MD(FORTRAN_model)
params = np.array([ float(x) for x in FORTRAN_model_params.split() ])
f_MC_MD.init_model(params)
f0 = np.zeros((len(at),3))
e0 = f_MC_MD.eval_forces(at, f0)
print("e0 ", e0)
print("f0 ", f0)
print("")
pos_0 = at.get_positions()
for i_at in range(len(at)):
for i_cart in range(3):
pos_pert = pos_0.copy()
for i_dx in range(8):
dx = 10.0**(-i_dx)
pos_pert[i_at, i_cart] = pos_0[i_at, i_cart] + dx
at.set_positions(pos_pert)
ep = f_MC_MD.eval_energy(at)
pos_pert[i_at, i_cart] = pos_0[i_at, i_cart] - dx
at.set_positions(pos_pert)
em = f_MC_MD.eval_energy(at)
print(i_at, i_cart, dx, (ep-em)/(2.0*dx), f0[i_at,i_cart], (ep-em)/(2.0*dx)+ f0[i_at,i_cart])
print("")
|
libAtoms/pymatnest
|
test_fortran_model.py
|
test_fortran_model.py
|
py
| 1,204
|
python
|
en
|
code
| 26
|
github-code
|
6
|
17878868675
|
####
# Each team's file must define four tokens:
# team_name: a string
# strategy_name: a string
# strategy_description: a string
# move: A function that returns 'c' or 'b'
####
#Idea: run your own historical simulation before you play against anyone. This will allow you to determine how your opponent will play and give the percentage chances. Then I will play according to what the best play is against my opponent, based on the data from the simulation.
team_name = 'maggin'
strategy_name = 'Pattern Recognition'
strategy_description = 'Identify pattern that each player engages in.'
# This player always adapts then either colludes or betrays based on opponent's historical pattern.
def move(my_history, their_history, my_score, their_score):
if len(my_history)==0:
return 'b'
else:
# View last round
recent_round_them = their_history[-1]
recent_round_me = my_history[-1]
# Examine rounds before that one
for round in range(len(my_history)-1):
prior_round_them = their_history[round]
prior_round_me = my_history[round]
# If one matches
if (prior_round_me == recent_round_me) and \
(prior_round_them == recent_round_them):
return their_history[round]
# No match found
if my_history[-1]=='c' and their_history[-1]=='b':
return 'b'
|
rpyle/IPD2022
|
maggin.py
|
maggin.py
|
py
| 1,493
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73761204349
|
import numpy as np
from constants.constants import UNI, EXTENDED
from utils.progress_bar import progress_bar
def create(img:np.array, UNICODE=EXTENDED) -> str:
res = ""
for i,line in enumerate(img):
progress_bar(i/img.shape[0])
for pixel in line:
res += UNICODE[int(sum(pixel)/768*len(UNI))]
res += "\n"
return res
|
carlospuenteg/Image-to-Unicode
|
create_txt.py
|
create_txt.py
|
py
| 363
|
python
|
en
|
code
| 9
|
github-code
|
6
|
4548632854
|
#
num = int(input())
if num % 2 != 0 and num >= 1 and num <= 100:
foo = (num / 2)+1
count = [int(i) for i in range(num) if i % 2 != 0][::-1]
rnum = num
#print(count)
for i in range(num):
print(" "*int(i),end='')
print(i+1,end="")
if int(foo) == i+1:
print("")
break
else:
print(" "*int(count[i]),end="")
print(int(rnum))
rnum -= 1
|
SheikhAnas23/DLithe_Python_Internship_Report
|
day6/assingnment 3/prob5.py
|
prob5.py
|
py
| 441
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19052278908
|
'''
binary search (이진탐색) : 반드시 정렬된 상태에서 시작해야한다. 로그실행시간을 보장한다.
용어 설명
target : 찾고자하는 값
data: 오름차순으로 정렬된 list
start: data의 처음 값 인덱스
end: data의 마지막 값 인덱스
mid: start,end의 중간 인덱스
바이너리 서치
data중 target을 검색하여 index 값을 return 한다.
없으면 None을 return
'''
# target = 찾고자하는값, data: list값
def binary_search(target,data):
data.sort() # 먼저 정렬부터한다 (오름차순)
start = 0
end = len(data) - 1
while start <= end: # 반복문은 인덱스 끝까지돌린다.
mid = (start+end) // 2 # 중간 인덱스
if data[mid] == target:
return mid # 함수를 끝낸다.
elif data[mid] < target: #중간값 보다 타겟값이 크면 시작인덱스를 mid보다 뒤로
start = mid + 1
else:
end = mid - 1
return None
li = [i ** 2 for i in range(11)]
print(li)
target = 9
idx = binary_search(target,li)
if idx:
print(li[idx])
else:
print('찾는게없다.')
|
parkjunga/algorithm
|
binary_search.py
|
binary_search.py
|
py
| 1,134
|
python
|
ko
|
code
| 0
|
github-code
|
6
|
1919214297
|
from lxml import etree
import requests
import pymysql
def getdel():
url = 'https://book.douban.com/top250'
data = requests.get(url).text
s=etree.HTML(data)
file=s.xpath('//*[@id="content"]/div/div[1]/div/table/tr/td[2]/div[1]/a/@title')
print(file)
return file
l1=getdel()
db = pymysql.connect("localhost", "root", "admin", "test", charset="utf8")
cs = db.cursor()
for i in range(len(l1)):
sql = "insert into py(jing) values('{0}')".format(l1[i])
cs.execute(sql)
db.commit()
print('数据插入完成')
db.close()
|
cxzw/python--
|
16219111435/席子文爬虫作业/静态爬虫.py
|
静态爬虫.py
|
py
| 601
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1822549193
|
#class without chaining
##*********************************
##class MyHashMap:
## def __init__(self):
## self.size = 10
## self.arr = [None for i in range(self.size)]
## #print(self.arr)
##
## def put(self,key,val):
## hsh = self.gethash(key)
## self.arr[hsh] = val
##
## def get(self,key):
## print("Get value at {}:{}".format(key,self.arr[self.gethash(key)]))
## return self.arr[self.gethash(key)]
##
## def remove(self,key):
## self.arr[self.gethash(key)] = None
##
## def gethash(self,key):
## #print("\thash of {} is {}".format(key,key%self.size))
## return key%self.size
#class with chainging
class MyHashMap:
def __init__(self):
self.size = 100
self.arr = [[] for i in range(self.size)]
def gethash(self,key):
print("hash of {} is {}".format(key,key%self.size))
return key%self.size
def put(self,key,val):
print("\nPUT ",val)
hsh = self.gethash(key)
found = False
for i,v in enumerate(self.arr[hsh]):
print("i:{},v:{}".format(i,v))
if v:
if v[0] == key:
print(v)
arr[hsh][i] = (key,val)
found = True
break
if found == False:
self.arr[hsh].append((key,val))
print("\tCurrent Arr:",self.arr)
def get(self,key):
print("\nGET ",key)
hsh = self.gethash(key)
for v in self.arr[hsh]:
if v[0] == hsh:
print("value of {} is {}".format(v[0],v[1]))
return v[1]
return -1
def remove(self,key):
hsh = self.gethash(key)
for i,v in enumerate(self.arr):
if v[0] == key:
print("{} to be deleted".format(self.arr[hsh][i]))
self.arr[hsh][i] = None
M = MyHashMap()
M.put(1,1)
M.put(2,2)
M.put(11,3)
M.get(1)
M.get(3)
print(M.arr)
|
sushasru/LeetCodeCrunch
|
LeetCode_E_DesignHashMap.py
|
LeetCode_E_DesignHashMap.py
|
py
| 2,082
|
python
|
en
|
code
| 0
|
github-code
|
6
|
43371076004
|
import requests
from bs4 import BeautifulSoup
import os
def get_book(url):
response = requests.get(url)
response.encoding = "utf-8"
text = response.text
soup = BeautifulSoup(text)
div_show = soup.select("div.dirShow")[0]
# print(div_show)
dirs = div_show.select("li a")
count = len(dirs)
for num in range(1, count + 1):
url = url.replace('0.html', '%s.html').replace('bookDir', 'book')
url_after = url % (num)
print(url_after)
response = requests.get(url_after)
response.encoding = 'utf-8'
text = response.text
soup = BeautifulSoup(text)
titles = soup.select("table td")
print(titles[3].text)
articles = soup.select("div#Content")
print(articles[0].text)
# 保存文件
is_exist = os.path.exists(r"D:\flaskDemo\static\books")
if not is_exist:
os.mkdir(r"D:\flaskDemo\static\books")
title = titles[3].text + ".txt"
with open(r"D:\flaskDemo\static\books\\" + title, "a", encoding="utf-8") as f:
f.write(articles[0].text)
if __name__== "__main__":
# 沉默的大多数
# url="http://t.icesmall.cn/bookDir/2/335/0.html"
# 麦田守望者
# http://t.icesmall.cn/bookDir/1/33/0.html
# 挪威的森林
# http://t.icesmall.cn/bookDir/3/423/0.html
# 输入世界名著网书籍目录地址
get_book("http://t.icesmall.cn/bookDir/3/423/0.html")
|
frebudd/python
|
book_spider.py
|
book_spider.py
|
py
| 1,460
|
python
|
en
|
code
| 2
|
github-code
|
6
|
38217350686
|
#!/usr/bin/env python3
import argparse
import sys
from typing import List, Union
import uuid
import capstone_gt
import gtirb
from gtirb_capstone.instructions import GtirbInstructionDecoder
def lookup_sym(node: gtirb.Block) -> Union[str, None]:
"""
Find a symbol name that describes the node.
"""
for sym in node.module.symbols:
if sym._payload == node:
return sym.name
def node_str(node: gtirb.Block) -> str:
"""
Generate a string that uniquely identifies the node
"""
if isinstance(node, gtirb.ProxyBlock):
return lookup_sym(node) or node.uuid
else:
return hex(node.address)
def has_undefined_branch(branches: List[gtirb.Edge]) -> bool:
"""
Determine if any of the branches are not resolved to a target.
"""
for branch in branches:
if isinstance(branch.target, gtirb.ProxyBlock) and not lookup_sym(
branch.target
):
return True
return False
def has_symbolic_branch(branches: List[gtirb.Edge]) -> bool:
"""
Determine if any of the branches are to a defined symbol.
"""
for branch in branches:
if lookup_sym(branch.target):
return True
return False
def is_skipped_section(node: gtirb.CodeBlock) -> bool:
"""
Determine if the node is part of an uninteresting section.
"""
skipped_sections = [
".plt",
".init",
".fini",
".MIPS.stubs",
]
for section in node.module.sections:
if section.name not in skipped_sections:
continue
for interval in section.byte_intervals:
start = interval.address
end = interval.address + interval.size
if start <= node.address and node.address < end:
return True
return False
def get_func_entry_name(node: gtirb.CodeBlock) -> Union[str, None]:
"""
If the node is the entry point to a function, return the function name.
Otherwise returns None
"""
for key, value in node.module.aux_data["functionNames"].data.items():
if node in node.module.aux_data["functionEntries"].data[key]:
return value.name
def belongs_to_skipped_func(node: gtirb.CodeBlock) -> bool:
"""
Determine if a CFG node is
"""
skipped_funcs = [
"__do_global_ctors_aux",
"__do_global_dtors_aux",
"__libc_csu_fini",
"__libc_csu_init",
"_dl_relocate_static_pie",
"_start",
"deregister_tm_clones",
"frame_dummy",
"register_tm_clones",
]
for name in skipped_funcs:
for key, value in node.module.aux_data["functionNames"].data.items():
if value.name == name:
if node in node.module.aux_data["functionBlocks"].data[key]:
return True
return is_skipped_section(node)
def is_padding(node: gtirb.CodeBlock) -> bool:
"""
Determine if a CFG node is padding
"""
for key, padding_size in node.module.aux_data["padding"].data.items():
padding_addr = key.element_id.address + key.displacement
if padding_addr == node.address:
return True
return False
def check_unreachable(module: gtirb.Module) -> int:
"""
Check a GTIRB module for unexpected unreachable code
"""
error_count = 0
for node in module.cfg_nodes:
if (
not isinstance(node, gtirb.CodeBlock)
or belongs_to_skipped_func(node)
or is_padding(node)
):
continue
func = get_func_entry_name(node)
if len(list(node.incoming_edges)) == 0 and func != "main":
if func:
# In some cases in our examples, function call sites are
# optimized away, but the function is left in the binary.
# We warn for these - if this code isn't being run, we're not
# testing whether ddisasm disassembled it well, and we may want
# to consider reworking those examples.
print(
'WARNING: unreachable function "{}" at {}'.format(
func, node_str(node)
)
)
else:
# Unreachable code that is not a function entry is likely to
# be an error, such as jump table where not all possible
# targets were discovered.
print("ERROR: unreachable code at", node_str(node))
error_count += 1
return error_count
def check_unresolved_branch(module: gtirb.Module) -> int:
"""
Check a GTIRB module for unresolved branches
"""
error_count = 0
for node in module.cfg_nodes:
if (
not isinstance(node, gtirb.CodeBlock)
or belongs_to_skipped_func(node)
or is_padding(node)
):
continue
branches = []
for edge in node.outgoing_edges:
if edge.label.type not in (
gtirb.Edge.Type.Return,
gtirb.Edge.Type.Fallthrough,
):
branches.append(edge)
# Calls to PLT functions seem to have a branch to a ProxyBlock for
# that symbol and a branch to the original PLT function.
if has_undefined_branch(branches) and not has_symbolic_branch(
branches
):
print("ERROR: unresolved jump in", node_str(node))
error_count += 1
return error_count
def check_cfg_empty(module: gtirb.Module) -> int:
"""
Check if a GTIRB module has an empty CFG
"""
if len(list(module.cfg_nodes)) == 0:
print("ERROR: CFG has no nodes")
return 1
return 0
def check_main_is_code(module: gtirb.Module) -> int:
"""
Check a GTIRB module for a `main` symbol that is not a CodeBlock.
Returns the number of errors found.
"""
error_count = 0
for sym in module.symbols:
if sym.name == "main":
if not isinstance(sym.referent, gtirb.CodeBlock):
print("ERROR: main is not code")
error_count += 1
return error_count
def check_decode_mode_matches_arch(module: gtirb.Module) -> int:
"""
Ensure a GTIRB only uses DecodeMode values that match the architecture
Returns the number of errors found.
"""
error_count = 0
# if a new mode is added, we will raise a KeyError unless it is added
# to this dictionary.
mode_to_arch = {
gtirb.CodeBlock.DecodeMode.Thumb: gtirb.module.Module.ISA.ARM
}
for block in module.code_blocks:
if block.decode_mode == gtirb.CodeBlock.DecodeMode.Default:
# "Default" is correct on every arch
continue
if module.isa != mode_to_arch[block.decode_mode]:
print(f"ERROR: {module.isa} does not support {block.decode_mode}")
error_count += 1
return error_count
def check_outgoing_edges(module: gtirb.Module) -> int:
"""
Check outgoing edges for invalid configurations
"""
error_count = 0
for node in module.cfg_nodes:
fallthrough_count = 0
direct_call_count = 0
direct_jump_count = 0
for edge in node.outgoing_edges:
if edge.label.direct and edge.label.type == gtirb.Edge.Type.Call:
direct_call_count += 1
elif (
edge.label.direct and edge.label.type == gtirb.Edge.Type.Branch
):
direct_jump_count += 1
elif edge.label.type == gtirb.Edge.Type.Fallthrough:
fallthrough_count += 1
if fallthrough_count > 1:
print("ERROR: multiple fallthrough from ", node_str(node))
error_count += 1
if direct_call_count > 1:
print("ERROR: multiple direct call from ", node_str(node))
error_count += 1
if direct_jump_count > 1:
print("ERROR: multiple direct jump from ", node_str(node))
error_count += 1
return error_count
def is_rep_loop(inst: capstone_gt.CsInsn) -> bool:
"""
Check if an instruction is a rep/repe/repne loop
"""
return inst.prefix[0] in [
capstone_gt.x86.X86_PREFIX_REP,
capstone_gt.x86.X86_PREFIX_REPE,
capstone_gt.x86.X86_PREFIX_REPNE,
]
def is_direct(inst: capstone_gt.CsInsn) -> bool:
"""
Check if a call or jump instruction is direct
"""
assert any(
inst.group(grp)
for grp in (
capstone_gt.x86.X86_GRP_CALL,
capstone_gt.x86.X86_GRP_JUMP,
capstone_gt.x86.X86_GRP_BRANCH_RELATIVE,
)
)
target = inst.operands[0]
return target.type == capstone_gt.CS_OP_IMM
def is_pc_relative(inst: capstone_gt.CsInsn) -> bool:
"""
Check if a call or jump instruction is pc-relative
"""
assert any(
inst.group(grp)
for grp in (
capstone_gt.x86.X86_GRP_CALL,
capstone_gt.x86.X86_GRP_JUMP,
capstone_gt.x86.X86_GRP_BRANCH_RELATIVE,
)
)
target = inst.operands[0]
return (
target.type == capstone_gt.CS_OP_MEM
and inst.reg_name(target.mem.base) == "rip"
)
def check_edge_instruction_group(module: gtirb.Module) -> int:
"""
Check edges for valid instruction groups
"""
# TODO: support non-x86 checks
if module.isa not in [gtirb.Module.ISA.X64, gtirb.Module.ISA.IA32]:
return 0
err_count = 0
decoder = GtirbInstructionDecoder(module.isa)
# TODO: there is one more generic capstone group, X86_GRP_PRIVILEGE.
# does it belong in Syscall?
edge_type_groups = {
gtirb.Edge.Type.Branch: set(
(
capstone_gt.x86.X86_GRP_JUMP,
capstone_gt.x86.X86_GRP_BRANCH_RELATIVE,
)
),
gtirb.Edge.Type.Call: set((capstone_gt.x86.X86_GRP_CALL,)),
gtirb.Edge.Type.Return: set((capstone_gt.x86.X86_GRP_RET,)),
gtirb.Edge.Type.Syscall: set((capstone_gt.x86.X86_GRP_INT,)),
gtirb.Edge.Type.Sysret: set((capstone_gt.x86.X86_GRP_IRET,)),
}
for edge in module.ir.cfg:
if edge.label.type == gtirb.Edge.Type.Fallthrough:
# fallthrough edges do not map to a specified instruction group
continue
block = edge.source
# get the last instruction
for instruction in decoder.get_instructions(block):
last_inst = instruction
# ensure instruction can be an edge
# Instructions with rep prefix can have self-edge
if (
edge.label.type == gtirb.Edge.Type.Branch
and is_rep_loop(last_inst)
and edge.target == block
):
continue
valid_groups = edge_type_groups[edge.label.type]
if not any(last_inst.group(grp) for grp in valid_groups):
print(
"ERROR: invalid edge instruction group at 0x{:08x}: {}".format(
last_inst.address, last_inst.groups
)
)
err_count += 1
return err_count
def check_cfg_completeness(module: gtirb.Module) -> int:
"""
Check we have 1 call/branch edge from all direct or
pc-relative calls/jumps.
"""
# TODO: support non-x86 checks
if module.isa not in [gtirb.Module.ISA.X64, gtirb.Module.ISA.IA32]:
return 0
err_count = 0
decoder = GtirbInstructionDecoder(module.isa)
for block in module.code_blocks:
# get the last instruction
for instruction in decoder.get_instructions(block):
last_inst = instruction
if last_inst.group(capstone_gt.x86.X86_GRP_CALL):
call_edges = [
edge
for edge in block.outgoing_edges
if edge.label.type == gtirb.EdgeType.Call
]
if is_direct(last_inst) or is_pc_relative(last_inst):
# do not count if we are using the 'call next; next: pop'
# trick to get the PC value.
if (
is_direct(last_inst)
and module.isa == gtirb.Module.ISA.IA32
and last_inst.operands[0].imm
== last_inst.address + last_inst.size
):
continue
if len(call_edges) != 1:
print(
"ERROR: expected 1 call edge at "
f"0x{last_inst.address:08x} and got {len(call_edges)}"
)
err_count += 1
elif last_inst.group(capstone_gt.x86.X86_GRP_JUMP):
# The first block of plt sections looks like:
# pushq .got.plt+8(%rip)
# jmpq *.got.plt+16(%rip) <----
# And the first 3 entries of .got.plt (or .got) are:
# .quad link-time address of _DYNAMIC # set by linker
# .quad Obj_Entry # set by ld.so
# .quad _rtld_bind_start # set by ld.so
# Currently we don't generate an edge for that
# jump because .got.plt+16 has a 0 and no relocations.
if (
block.section.address == block.address
and block.section.name in [".plt", ".plt.sec", ".plt.got"]
):
continue
branch_edges = [
edge
for edge in block.outgoing_edges
if edge.label.type == gtirb.EdgeType.Branch
]
if is_direct(last_inst) or is_pc_relative(last_inst):
if len(branch_edges) != 1:
print(
"ERROR: expected 1 branch edge at "
f"0x{last_inst.address:08x} and got"
f" {len(branch_edges)}"
)
err_count += 1
return err_count
def check_dangling_auxdata(module: gtirb.Module) -> int:
"""
Check for dangling UUIDs in elfSymbolTabIdxInfo auxdata
"""
err_count = 0
for k, v in module.aux_data["elfSymbolTabIdxInfo"].data.items():
if not isinstance(k, gtirb.Symbol):
if isinstance(k, uuid.UUID):
print(
"ERROR: expected elfSymbolTabInfo key to be Symbol, but "
f"it is a dangling UUID: {k}, {v}"
)
else:
print(
"ERROR: expected elfSymbolTabInfo key to be Symbol, but "
f"it is {type(k)}: {k}, {v}"
)
err_count += 1
return err_count
CHECKS = {
"unreachable": check_unreachable,
"unresolved_branch": check_unresolved_branch,
"cfg_empty": check_cfg_empty,
"main_is_code": check_main_is_code,
"decode_mode_matches_arch": check_decode_mode_matches_arch,
"outgoing_edges": check_outgoing_edges,
"edge_instruction_group": check_edge_instruction_group,
"cfg_completeness": check_cfg_completeness,
"dangling_auxdata": check_dangling_auxdata,
}
class NoSuchCheckError(Exception):
"""Indicates an invalid GTIRB check was specified"""
pass
def run_checks(module: gtirb.Module, selected_checks: List[str]):
"""
Run specified checks
Raises NoSuchCheckError for unexpected names in selected_checks
"""
error_count = 0
for selected_check in selected_checks:
if selected_check not in CHECKS:
raise NoSuchCheckError(f"No such check: {selected_check}")
error_count += CHECKS[selected_check](module)
return error_count
def main():
parser = argparse.ArgumentParser()
parser.add_argument("path")
check_names = list(CHECKS.keys())
check_names.append("all")
parser.add_argument(
"--check",
choices=check_names,
default="all",
help="The name of the check to run",
)
args = parser.parse_args()
module = gtirb.IR.load_protobuf(args.path).modules[0]
checks = list(CHECKS.keys()) if args.check == "all" else [args.check]
error_count = run_checks(module, checks)
sys.exit(error_count)
if __name__ == "__main__":
main()
|
GrammaTech/ddisasm
|
tests/check_gtirb.py
|
check_gtirb.py
|
py
| 16,220
|
python
|
en
|
code
| 581
|
github-code
|
6
|
34508588550
|
# https://www.geeksforgeeks.org/find-sum-modulo-k-first-n-natural-number/
def find_sum(N,K):
ans = 0
# Counting the number of times
# 1, 2, .., K-1, 0 sequence occurs.
y = N / K
# Finding the number of elements
# left which are incomplete of
# sequence Leads to Case 1 type.
x = N % K
# adding multiplication of number
# of times 1, 2, .., K-1, 0
# sequence occurs and sum of
# first k natural number and
# sequence from case 1.
ans = ((K * (K - 1) / 2) * y +
(x * (x + 1)) / 2)
return int(ans)
if __name__ == "__main__":
N = 10
K = 2
print(find_sum(N, K))
|
ved93/deliberate-practice-challenges
|
code-everyday-challenge/n131_sum_of_modulo_k.py
|
n131_sum_of_modulo_k.py
|
py
| 661
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37065823559
|
class myclass: #aclass
def inherit(self):
self.A=int(input("enter the A value:"))
self.B=int(input("enter the B value:"))
class Addtion(myclass):
def add(self):
self.inherit()
c=self.A+self.B
print("Addition value are",c)
class multiplication(myclass):
def mul(self):
self.inherit()
c=self.A*self.B
print("multiplication is :",c)
class subraction(myclass):
def sub(self):
self.inherit()
c=self.A-self.B
print("subraction",c)
obj1=Addtion()
obj1.add()
obj=multiplication()
obj.mul()
obj=subraction()
obj.sub()
|
duraipandiyan/inheritance_opps
|
hirarichrical inheritance.py
|
hirarichrical inheritance.py
|
py
| 599
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22047012801
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import RangeSlider
import pims
def gui(input_img):
# read in an image, usually np.array kind. This one will be a stack of images
# Initialize the frame number to be zero, since array indexing
frame = 0
img = input_img
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
plt.subplots_adjust(bottom=0.25)
im = axs[0].imshow(img[frame])
axs[1].hist(img.flatten(), bins='auto')
axs[1].set_title('Histogram of pixel intensities')
# Create the RangeSlider
slider_ax = plt.axes([0.20, 0.1, 0.60, 0.03])
slider = RangeSlider(slider_ax, "Threshold", img[frame].min(), img[frame].max())
# Create the Vertical lines on the histogram
lower_limit_line = axs[1].axvline(slider.val[0], color='k')
upper_limit_line = axs[1].axvline(slider.val[1], color='k')
def update(val):
# The val passed to a callback by the RangeSlider will
# be a tuple of (min, max)
# Update the image's colormap
im.norm.vmin = val[0]
im.norm.vmax = val[1]
# Update the position of the vertical lines
lower_limit_line.set_xdata([val[0], val[0]])
upper_limit_line.set_xdata([val[1], val[1]])
# Redraw the figure to ensure it updates
fig.canvas.draw_idle()
slider.on_changed(update)
plt.show()
# Test case
gui(np.array(pims.open("./example/Single_molecule_moving.tif")))
|
kzhang425/PyImgTracker
|
gui_plots.py
|
gui_plots.py
|
py
| 1,454
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6841379030
|
import itertools
from collections import OrderedDict
import nltk
from spellchecker import SpellChecker
from nltk.corpus import wordnet as wn
from ranker import Ranker
#import utils
"""
search engine for spell checker
"""
# DO NOT MODIFY CLASS NAME
class Searcher:
# DO NOT MODIFY THIS SIGNATURE
# You can change the internal implementation as you see fit. The model
# parameter allows you to pass in a precomputed model that is already in
# memory for the searcher to use such as LSI, LDA, Word2vec models.
# MAKE SURE YOU DON'T LOAD A MODEL INTO MEMORY HERE AS THIS IS RUN AT QUERY TIME.
def __init__(self, parser, indexer, model=None):
self._parser = parser
self._indexer = indexer
self._ranker = Ranker()
self._model = model
# DO NOT MODIFY THIS SIGNATURE
# You can change the internal implementation as you see fit.
def search(self, query, k=None):
"""
Executes a query over an existing index and returns the number of
relevant docs and an ordered list of search results (tweet ids).
Input:
query - string.
k - number of top results to return, default to everything.
Output:
A tuple containing the number of relevant search results, and
a list of tweet_ids where the first element is the most relavant
and the last is the least relevant result.
"""
query_list = query.split(" ")
query_as_list = self._parser.text_operation(query_list)
# extension by spell checker
queary_list_after_word_net = self.q_spell_check(query_as_list)
#remove stop words
query_as_list = self._parser.parse_sentence(queary_list_after_word_net)
# find the docs
relevant_docs = self._relevant_docs_from_posting(query_as_list) # return all the rel doc for the quiry
#ranked_doc_ids = Ranker.rank_relevant_docs(relevant_docs)
relevant_docs = OrderedDict(sorted(relevant_docs.items(), key=lambda item: item[1], reverse=True))
relevant_docs = dict(itertools.islice(relevant_docs.items(), 2000)) #max is 2000 docs
relevant_docs_sort = self._ranker.dot_prodact_and_cos(relevant_docs, self._indexer, len(query_as_list))
n_relevant = len(relevant_docs)
if k is not None:
relevant_docs_sort = self.ranker.retrieve_top_k(relevant_docs_sort, self.k)
return n_relevant, relevant_docs_sort
# feel free to change the signature and/or implementation of this function
# or drop altogether.
def _relevant_docs_from_posting(self, query_as_list):
"""
This function loads the posting list and count the amount of relevant documents per term.
:param query_as_list: parsed query tokens
:return: dictionary of relevant documents mapping doc_id to document frequency.
"""
relevant_docs = {}
for word in query_as_list:
posting_list = self._indexer.get_term_posting_list(word) # get all the twite with this word
for doc in posting_list:
tf = self._indexer.get_term_inverted_idx(word)[2]
id = doc[0]
if id not in relevant_docs.keys():
relevant_docs[id] = [1, []]
# self._indexer.get_term_inverted_idx[word]
tfidf = doc[4] * tf
relevant_docs[id][1].append(tfidf)
else:
tfidf = doc[4] * tf
relevant_docs[id][1].append(tfidf)
relevant_docs[id][0] += 1
return relevant_docs
"""
this function expand the query by using spell checker
get query as list and add words by checker
"""
def q_spell_check(self, query):
spell = SpellChecker()
corr_q = []
corr_q.extend(query)
i = 0
for word in query:
new_word = spell.correction(word)
if new_word != word:
if self._indexer._is_term_exist_in_idx(new_word):
corr_q[i] = new_word
i += 1
return corr_q
|
hallelhel/Search_Engine
|
searcher_4.py
|
searcher_4.py
|
py
| 4,158
|
python
|
en
|
code
| 0
|
github-code
|
6
|
71749352509
|
import random
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers='0.0.0.0:9092')
num = random.randint(0, 10)
num_bytes = bytes(str(num), encoding='utf-8')
is_send = producer.send('test-topic', value=num_bytes, key=num_bytes)
# Block for 'synchronous' sends
try:
record_metadata = is_send.get(timeout=2)
print(record_metadata)
except Exception as exc:
# Decide what to do if produce request failed...
#log.exception()
print('Exception: ', exc)
print(' finish ')
# Successful result returns assigned partition and offset
# print(record_metadata.partition)
# print(record_metadata.offset)
|
makseli/kafka-docker-python
|
producer.py
|
producer.py
|
py
| 639
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22812799305
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 24 10:30:26 2018
@author: Puneet Kumar
"""
x = input("enter the text : ")
#x = 'defgh'
k = x = input("enter the text : ")
#x = 'defgh'
k = ['q','w','e','r','t','y','u','i','o','p','a','s','d','f','g','h','j','k','l','z','x','c','v','b','n','m']
alpha = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
'''
with open('substi.txt','r') as f:
x = f.read()
'''
b =[]
t = []
for i in range(len(x)):
t.append(k[alpha.index(x[i])])
#b.extend(k[int(ord(x[(i)]) - 97)])
print(t)
for i in range(len(t)):
b.append(alpha[k.index(t[i])])
#b.extend(k[int(ord(x[(i)]) - 97)])
print(b)
'''
str = ("".join(b))
print("The Encrypted Massage : ",str)
#print("Entrypted massage is: ",b)
with open('substi.txt','a') as f:
f.write(str)'''
|
pappukr4444/M.Tech-Labs
|
subtitution.py
|
subtitution.py
|
py
| 915
|
python
|
en
|
code
| 1
|
github-code
|
6
|
15774686000
|
import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
items = [rock, paper, scissors]
rand_no = random.randint(0 , 2)
user_no = int(input("What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors.\n"))
if(user_no >= 0 and user_no <= 2):
print(f"{items[user_no]}\n\nComputer chose:\n{items[rand_no]}\n")
if(user_no == 0 and rand_no == 2):
print("You win!")
elif(user_no == 2 and rand_no == 0):
print("You lose")
elif(rand_no > user_no):
print("You lose")
elif(user_no > rand_no):
print("You win!")
else:
print("It's a draw")
else:
print("You typed an invalid number, you lose!")
|
pranayprasad7001/rock-paper-scissors
|
rock-paper-scissors.py
|
rock-paper-scissors.py
|
py
| 991
|
python
|
en
|
code
| 0
|
github-code
|
6
|
34450081054
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.shortcuts import render
from django.views.generic.base import View
from django.http import JsonResponse
from participa.settings import SEFAZ_API_URL
from participa.auth_sefaz.views import ParticipaSefazRequest, BaseView
from participa.auth_sefaz.models import User
from participa.report.models import Report, MonitoredNFe
from rest_framework.renderers import JSONRenderer
import json
class ReportView(BaseView):
def get(self):
pass
def post(self, *args, **kwargs):
get_token = self.request.META['HTTP_AUTHORIZATION']
data = json.loads(str(self.request.body, "utf_8"))
user = User.objects.filter(cpf=data.get("cpfDestinatario", None)).first()
if user:
return_report = self.send_report(json.dumps(data), get_token)
if return_report:
report = Report(id_report=return_report.text, user=user)
report.save()
return self.success_recive()
else:
return self.error_recive()
class QRCodeMonitorView(BaseView):
def post(self, *args, **kwargs):
data = json.loads(str(self.request.body, "utf_8"))
user = User.objects.filter(cpf=data.get("cpf", None)).first()
qr_code_data = data.get("qrcode_data", None)
if user and qr_code_data:
monitor = MonitoredNFe(user=user, qr_code_data=qr_code_data)
monitor.save()
return self.success_recive()
else:
return self.error_recive()
class QRCodeMonitorListView(BaseView):
def post(self, *args, **kwargs):
data = json.loads(str(self.request.body, "utf_8"))
user = User.objects.filter(cpf=data.get("cpf", None)).first()
if user:
monitoreds = MonitoredNFe.objects.filter(user=user)
monitoreds_serialized = dict(monitoreds=list(monitoreds.values('pk', 'status', 'qr_code_data', 'created_at', 'updated_at')))
return JsonResponse(monitoreds_serialized)
else:
return self.error_recive()
|
vctrferreira/hackathon-sefaz
|
participa/report/views.py
|
views.py
|
py
| 2,135
|
python
|
en
|
code
| 1
|
github-code
|
6
|
17598457644
|
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from PIL import Image
def make_pages(page, item_list, page_size):
paginator = Paginator(item_list, page_size)
try:
images = paginator.page(int(page))
except PageNotAnInteger:
images = paginator.page(1)
except EmptyPage:
images = paginator.page(paginator.num_pages)
return images
def image_rotate(image_url, angle):
path = '.' + image_url
oi = Image.open(path)
oi = oi.rotate(angle)
oi.save(path)
return 'success'
|
mr-shubhamsinghal/gallery-app
|
gallery/utils.py
|
utils.py
|
py
| 510
|
python
|
en
|
code
| 0
|
github-code
|
6
|
71798191227
|
class NegativeDigitError(ValueError):
"""Exception raised when digit is negative in power and square_root calculations"""
def __init__(self, digit):
self.digit = digit
self.message = f'Digit "{digit}" has to be positive for powering or square rooting'
super().__init__(self.message)
class NotIntFloatError(ValueError):
"""Exception raised when input data neither int nor float for any action in calculations"""
def __init__(self, args):
self.args = args
self.types = {arg: type(arg) for arg in args}
self.message = f'Values "{self.types}" has to be int or float to proceed calculation'
super().__init__(self.message)
class Calculator:
def is_int_or_float(self, x, y) :
return isinstance(x, (int, float)) and isinstance(y, (int, float))
def add(self, x, y):
try:
if not self.is_int_or_float(x, y) :
raise NotIntFloatError((x, y))
return x + y
except NotIntFloatError as e :
return f'Error: {e}'
except Exception as e:
return f'Error: {e}'
def subtract(self, x, y):
try:
if not self.is_int_or_float(x, y) :
raise NotIntFloatError((x, y))
return x - y
except NotIntFloatError as e :
return f'Error: {e}'
except Exception as e:
return f'Error: {e}'
def multiply(self, x, y):
try:
if not self.is_int_or_float(x, y) :
raise NotIntFloatError((x, y))
return round(x * y, 2)
except NotIntFloatError as e :
return f'Error: {e}'
except Exception as e:
return f'Error: {e}'
def divide(self, x, y):
try:
if not self.is_int_or_float(x, y) :
raise NotIntFloatError((x, y))
return round(x / y, 5)
except NotIntFloatError as e :
return f'Error: {e}'
except ZeroDivisionError as e:
return f'Error: {e}'
except Exception as e:
return f'Error: {e}'
def power(self, x, y):
try:
if not self.is_int_or_float(x, y) :
raise NotIntFloatError((x, y))
if y < 0:
raise NegativeDigitError(y)
return x ** y
except NotIntFloatError as e :
return f'Error: {e}'
except NegativeDigitError as e:
return f'Error: {e}'
except Exception as e:
return f'Error: {e}'
def square_root(self, x):
try:
if not isinstance(x, (int, float)) :
raise NotIntFloatError((x))
if x < 0:
raise NegativeDigitError(x)
return round(x ** 0.5, 5)
except NotIntFloatError as e :
return f'Error: {e}'
except NegativeDigitError as e:
return f'Error: {e}'
except Exception as e:
return f'Error: {e}'
calc = Calculator()
print('Except add :' + calc.add('a', 3))
print('Normal add :' + str(calc.add(5, 3)))
print('Except sub :' + calc.subtract(10, [10,15])) # Не понимаю почему здесь выскакивает просто Exception но не NotIntFloatError когда прокидываю list
print('Normal sub :' + str(calc.subtract(10, 4)))
print('Except mul :' + calc.multiply(3, 'hello'))
print('Normal mul :' + str(calc.multiply(3, 0.1111)))
print('Except div :' + calc.divide(8, 0))
print('Except div :' + calc.divide(8, 'bbb'))
print('Normal div :' + str(calc.divide(10, 3)))
print('Except pow :' + calc.power(2, -3))
print('Except pow :' + calc.power(2, 'error'))
print('Normal pow :' + str(calc.power(2, 3)))
print('Except sqr :' + calc.square_root(-3))
print('Except sqr :' + calc.square_root((5,)))
print('Normal sqr :' + str(calc.square_root(8)))
|
withaim/ithillel
|
exceptions.py
|
exceptions.py
|
py
| 3,907
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18183301241
|
from django.shortcuts import render, redirect
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from .models import Post, Comment
from .forms import CommentForm, ContactForm
# Create your views here.
def home(request):
return render(request, 'home.html')
def about(request):
return render(request, 'about.html')
def market_index(request):
posts = Post.objects.all()
return render(request, 'market/index.html', {
'posts': posts
})
def market_detail(request, post_id):
post = Post.objects.get(id=post_id)
return render(request, 'market/detail.html', {
'post': post, 'comment_form': CommentForm()
})
def add_comment(request, post_id):
form = CommentForm(request.POST)
if form.is_valid():
new_comment = form.save(commit=False)
new_comment.post_id = post_id
new_comment.save()
return redirect('detail', post_id=post_id)
def add_contact(request, comment_id, post_id):
form = ContactForm(request.POST)
if form.is_valid():
new_contact = form.save(commit=False)
new_contact.comment_id = comment_id
new_contact.save()
return redirect('detail', post_id=post_id)
class PostCreate(CreateView):
model = Post
fields = ['item', 'picture', 'description', 'price', 'user']
class PostUpdate(UpdateView):
model = Post
fields = ['item', 'picture', 'description', 'price']
class PostDelete(DeleteView):
model = Post
success_url = '/market'
|
gollobc/Meridio-Full-Stack-App
|
meridio/main_app/views.py
|
views.py
|
py
| 1,500
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21390773853
|
def oddHelper(s, k):
"""1 <= k <= n-2"""
i = 1
N = min(k, len(s) - 1 - k)
while i <= N:
if s[k - i] != s[k + i]:
break
else:
i += 1
return (1 + 2 * (i - 1), s[k - i + 1 : k + i])
def evenHelper(s, k):
"""1 <= i <= n-2"""
if s[k] != s[k + 1]:
return (0, '')
i = 1
N = min(k, len(s) - 2 - k)
while i <= N:
if s[k - i] != s[k + 1 + i]:
break
else:
i += 1
return (2 + 2 * (i - 1), s[k - i + 1 : k + i + 1])
def longestPalindrome(s):
"""
:type s: str
:rtype: str
"""
if len(s) == 1:
return s
if len(s) == 2:
if s[0] != s[1]:
return s[1]
else:
return s
if s[0] == s[1]:
s_max = (2, s[0:2])
else:
s_max = (1, s[0])
for k in range(1, len(s) - 1):
tmp_odd = oddHelper(s, k)
tmp_even = evenHelper(s, k)
if s_max[0] <= tmp_odd[0]:
s_max = tmp_odd
if s_max[0] <= tmp_even[0]:
s_max = tmp_even
return s_max[1]
|
Howii/LeetCode
|
prob_0005-Longest_Palindromic_Substring.py
|
prob_0005-Longest_Palindromic_Substring.py
|
py
| 1,091
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4142566592
|
import ast
import networkx as nx
import numpy as np
from collections import defaultdict
import json
from tqdm import tqdm
# nodes = [0, 1, 2, 3, 4]
# graph = [[4, 3, 0.75], [4, 1, 0.81], [4, 2, 0.97], [4, 0, 0.52]]
# page_rank_probs = defaultdict(float)
# DG = nx.DiGraph()
# DG.add_nodes_from(nodes)
# DG.add_weighted_edges_from(graph)
# PAGE_RANK = nx.pagerank(DG, alpha=0.95)
# # for sub_graph in nx.weakly_connected_components(DG):
# # sub_graph_size = len(sub_graph)
# # PAGE_RANK = nx.pagerank(DG.subgraph(list(sub_graph)))
# #
# # normalized_PAGERANK = {k: v * (sub_graph_size) / 5 for k, v in PAGE_RANK.items()}
# # page_rank_probs.update(normalized_PAGERANK)
# # # print ('normalized_PAGERANK', normalized_PAGERANK)
# #
# print(page_rank_probs)
# print(PAGE_RANK)
#
# # def find_ngrams(input_list, n):
# # print ([input_list[i:] for i in range(n)])
# # return zip(*[input_list[i:] for i in range(n)])
# #
# # # for ng in find_ngrams(['I', 'live', 'in', 'kingston'], 2):
# # # print ('ng', ng)
# # print (range(5))
#
# # a = np.array([[1, 4, 2], [3, 5, 6]])
# # b = np.array([[1,1], [2,2], [3, 3], [4, 4], [5, 5], [6, 6]])
# # print (b[a[0]])
# a = np.array([1, 2, 4, 5, 56])
# print ( int(np.sum(a > 5)))
# def argsort(seq):
# # http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
# return sorted(range(len(seq)), key=seq.__getitem__)
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp((x - np.max(x)) * 1)
return e_x / e_x.sum()
def artificial_nli(cider_data, nli_data, nli_origin, thres):
d = json.load(open(cider_data, 'r'))
dn = json.load(open(nli_data, 'r'))
do = json.load(open(nli_origin, 'r'))['prob_graph']
prob_array = []
cnt = 0
for e in do.values():
if cnt >= 500:
break
for ee in e:
# print ('ee', ee, 'e', e)
if ee[2] > 0.5:
prob_array.append(ee[2])
cnt += 1
prob_array = np.array(prob_array)
prob_edges = []
node_nums = []
has_edges = 0
xxx = 0
for e, en in tqdm(zip(d, dn)):
tmp_edges = []
num = len(en)
new_e = softmax(np.array([ele[1] for ele in e[:num]]))
# print (new_e)
arg_new_e = np.argsort(new_e)
low = 1.0 / len(en) * (thres)
high = 1.0 / len(en) * (1.0 / thres)
small_num = int(np.sum(new_e < low))
# print ('small_num', small_num)
large_num = int(np.sum(new_e > high))
# print ('large_num', large_num)
if small_num > 0:
# print (np.random.choice(prob_array, size=(num - small_num,)))
# print (np.random.rand(num - small_num,) * 0.1)
sampl = np.random.choice(prob_array, size=(num - small_num,)) + (np.random.rand(num - small_num,) - 0.5) * 0.05
sampl = np.sort(sampl)
sampl = np.clip(sampl, 0.501, 0.999)
# sampl = np.sort(np.random.uniform(low=0.5, high=1.0, size=(num - small_num,)))
for i in range(num - small_num):
tmp_edges.append([arg_new_e[small_num-1], arg_new_e[small_num + i], sampl[i]])
if large_num > 0:
sampl = np.random.choice(prob_array, size=(large_num,)) + (np.random.rand(large_num,) - 0.5) * 0.1
sampl = np.sort(sampl)
sampl = np.clip(sampl, 0.501, 0.999)
for i in range(1, large_num+1):
if num - i > small_num:
tmp_edges.append([arg_new_e[small_num], arg_new_e[-i], sampl[-i]])
else:
break
if small_num > 0 or large_num > 0:
has_edges += 1
ext_edges = []
for i in range(len(tmp_edges)):
cur = tmp_edges[i][1]
for j in range(i+1, len(tmp_edges)):
if cur == tmp_edges[j][0]:
sampl = np.random.choice(prob_array, size=(1)) + (np.random.rand(1)) * 0.1
ext_edges.append([tmp_edges[i][0], tmp_edges[j][1], sampl[0]])
xxx += 1
tmp_edges.extend(ext_edges)
prob_edges.append(tmp_edges)
node_nums.append(num)
print('xxx', xxx)
json.dump({'edges': prob_edges, 'nodes': node_nums}, open('experiment/coco_nli_graph_pg1.json', 'w'))
print ('has_edges', has_edges)
return prob_edges
def minorchanges_nli(cider_data, nli_data, nli_origin, change=True):
d = json.load(open(cider_data, 'r')) # test and val data excluded
dn = json.load(open(nli_data, 'r'))
do = json.load(open(nli_origin, 'r'))
h_adj, l_adj, no_change, no_change_hi = 0, 0, 0, 0
for idx, (e, en) in tqdm(enumerate(zip(d, dn))):
num = len(en)
new_e = softmax(np.array([ele[1] for ele in e[:num]]))
# print (new_e)
if np.array_equal(new_e, np.array([0.2, 0.2, 0.2, 0.2, 0.2])):
# print ('skip')
continue
arg_new_e = np.argsort(new_e)
lo, hi = arg_new_e[0], arg_new_e[-1]
nli_e = np.array([ele[1] for ele in en])
arg_nli_e = np.argsort(nli_e)
nlo, nhi = arg_nli_e[0], arg_nli_e[-1]
if lo == nlo and hi == nhi:
no_change += 1
continue
if hi == nhi:
no_change_hi += 1
if change:
current_prob = do['prob_graph'][str(idx)]
current_edge = do['graph'][str(idx)]
forward_ix_list, backward_ix_list = [], []
for ix in range(len(current_prob)):
if current_prob[ix][0] == lo:
forward_ix_list.append(ix)
if current_prob[ix][0] == hi:
backward_ix_list.append(ix)
assert len(forward_ix_list) > 0 and len(backward_ix_list) > 0, 'has to find the index'
def samp(a, b):
return np.random.uniform(low=a, high=b, size=1)[0]
# sampl2 = np.random.uniform(low=0.01, high=0.05, size=1)[0]
for f_ix in forward_ix_list:
if current_prob[f_ix][2] < 0.5:
if current_prob[f_ix][1] == hi:
current_prob[f_ix][2] = samp(0.89, 0.99)
current_edge[f_ix][2] = 1.0
h_adj += 1
elif np.random.uniform() > 0.85:
current_prob[f_ix][2] = samp(0.51, 0.95)
current_edge[f_ix][2] = 1.0
h_adj += 1
for b_ix in backward_ix_list:
if current_prob[b_ix][2] > 0.5:
current_prob[b_ix][2] = samp(0.01, 0.05)
current_edge[b_ix][2] = 0.0
l_adj += 1
do['prob_graph'][str(idx)] = current_prob
do['graph'][str(idx)] = current_edge
print('no need to change', no_change)
print('h_adj', h_adj, 'l_adj', l_adj, 'nochangehi', no_change_hi)
if change:
json.dump(do, open('experiment/coco_nli_new.json', 'w'))
return
def show_lo_and_high(cider_data, nli_data, data_json):
d = json.load(open(cider_data, 'r')) # test and val data excluded
dn = json.load(open(nli_data, 'r'))
da = json.load(open(data_json, 'r'))['images']
cnt = 0
for idx, (e, en, enn) in tqdm(enumerate(zip(d, dn, da))):
num = len(en)
new_e = softmax(np.array([ele[1] for ele in e[:num]]))
if np.array_equal(new_e, np.array([0.2, 0.2, 0.2, 0.2, 0.2])):
continue
arg_new_e = np.argsort(new_e)
lo, hi = arg_new_e[0], arg_new_e[-1]
cnt += 1
if cnt > 100:
break
print ("*"*20)
print ('low:', enn['sentences'][lo]['raw'])
print ('high', enn['sentences'][hi]['raw'])
print ("*"*20)
return
minorchanges_nli('data/prob_cand_inst_v3', 'data/nli_dist_rl', 'experiment/coco_nli_relation.json')
# show_lo_and_high('data/prob_cand_inst_v3', 'data/nli_weights_v2', 'data/dataset_karparthy.json')
# print (np.array([1.0/5]*5))
|
Gitsamshi/Nli-image-caption
|
playground.py
|
playground.py
|
py
| 8,056
|
python
|
en
|
code
| 3
|
github-code
|
6
|
833697082
|
import numpy as np
import matplotlib.pyplot as plt
n = 6
maxI = 10000000
eps = 1e-6
def f(x):
return (0.5*np.reshape(x, (1, n))).dot(A).dot(np.reshape(x, (n, 1))) + b.dot(x)
def grad(x):
return np.reshape(A.dot(x), (1, n)) + b
def H(x):
return A
def Hinv(x):
return np.linalg.inv(A)
A = np.random.randn(n, n)*10
b = np.random.rand(n)
x = np.zeros((n,1))
x0 = x
i = 0
while np.linalg.norm(grad(x)) > eps:
x = x - Hinv(x).dot(np.reshape(grad(x), (n, 1)))
i += 1
if (i >= maxI):
print("Ошибка")
break
print("Безусловный минимум методом Ньютона")
print(x)
r = np.random.rand(1, 1)
c = np.reshape((x - x0), (1, n)).dot(x - x0)
while c - r*r <= 0:
r = np.random.rand(1, 1)
print("Число r")
print(r)
print("Проверим, что для безусловного минимума не выполнено условие (x - x0)*(x - x0) -r*r <= 0")
print("(x - x0)*(x - x0) -r*r = ", str(c - r*r))
M = A
col = [0. for i in range(0,n)]
col[0] = float(c)
col = np.array(col, dtype=np.float64)
M = np.column_stack((M, np.reshape(col, (n, 1))))
c1 = np.append(x-x0, 0)
M = np.vstack((M, c1))
B = np.append(-b, r*r)
Y = np.linalg.inv(M.astype('float')).dot(B)
x = Y[0:n]
print("Условный минимум")
print(x)
print("Проверка")
x = np.reshape(x, (n, 1))
print(np.reshape((x - x0), (1, n)).dot(x - x0) - r*r)
|
UIIf/Study
|
3course/Optimization/FirstLab/lab3.py
|
lab3.py
|
py
| 1,423
|
python
|
ru
|
code
| 0
|
github-code
|
6
|
42493202461
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 2 16:13:15 2019
@author: michal
"""
from IOtemp import readAuthors, readPublications
from MKAR_flow import MKAR_FlowTheory
from math import ceil
import networkx as nx
def checkN(workersList):
N = 0
for w in workersList:
N += w.time
return ceil(N/4.)*3
#
#workersList = readAuthors("data/pracownicy.xls")
workersList = readAuthors("data/adamczykGroup.xls")
publicationsList = readPublications("data/publikacje_update.xls", workersList)
postFix = "All"
N = checkN(workersList)
pm = MKAR_FlowTheory(workersList, publicationsList)
pm.preprocessing()
publicationsIds = pm.getAllPublicationsFromMainGraph()
data = pm.solveFlowProblem(publicationsIds, N, True)
maxPoinst = data["maxPoints"]
maxSlots = data["maxSlots"]
flowG = data["flowGraph"]
flowDict = data["flowDict"]
print("Maksymalna liczba punktow: ", maxPoinst)
print("Maksymalna liczba wykorzystanych slotów: ", maxSlots)
uniqueWeights = set()
for p in pm.publicationList:
uniqueWeights.add( p.size)
def writeSlotsUsageLog( pubMatch, flowDict , logName ):
authorsIds = pubMatch.authorsDict.keys()
log = open(logName, "w")
log.write("Author\tSlots available\tSlots used\tSlots used[%]\tall publication no\n")
for a in authorsIds:
if a in flowDict:
slotsUsed = flowDict[a]["t"]/100.
pubNo = len(list(pubMatch.pubGraph.neighbors(a)))
else:
slotsUsed = 0
pubNo = 0
slotsAvailable = pm.authorsDict[a].slots/100.
if slotsAvailable > 0:
log.write(a+"\t"+str(slotsAvailable)+"\t"+str(slotsUsed)+"\t"+str(slotsUsed*100/slotsAvailable)+"\t"+str(pubNo)+"\n")
log.close()
writeSlotsUsageLog( pm, flowDict, "slotsUsage"+postFix+".csv" )
def writeUnusedPublicationsLog(pubMatch, flowDict, logName):
pubIds = pubMatch.publicationDict.keys()
log = open(logName, "w")
log.write("Tytul\tRok\tAutorzy\tPunkty\tSlots available\n")
for p in pubIds:
if p in flowDict:
slotsUsed = 0
for c in flowDict[p]:
slotsUsed += flowDict[p][c]
else:
slotsUsed = 0
pObj = pm.publicationDict[p]
if slotsUsed == 0:
authors = [ a.name for a in pObj.authors ]
authors = ", ".join(authors)
log.write(pObj.title.replace("\t", "") +"\t"+str(pObj.year)+"\t"+str(authors)+"\t"+str(pObj.points/100.)+"\t"+str(pObj.size/100.)+"\n")
log.close()
writeUnusedPublicationsLog(pm, flowDict, "unusedPublications"+postFix+".csv")
def writeUsedPublicationsLog(pubMatch, flowDict, logName):
pubIds = pubMatch.publicationDict.keys()
log = open(logName, "w")
log.write("Tytul\tRok\tAutorzy\tPunkty\tSlots\tFractional\n")
for p in pubIds:
authors = []
if p in flowDict:
slotsUsed = 0
for c in flowDict[p]:
slotsUsed += flowDict[p][c]
if flowDict[p][c] > 0:
authors.append(c+ "("+str(flowDict[p][c])+")")
else:
slotsUsed = 0
pObj = pm.publicationDict[p]
if slotsUsed > 0:
authorsNo = len(authors)
fractional = False
if authorsNo > 1 or slotsUsed < pObj.size:
fractional = True
authors = ", ".join(authors)
log.write(pObj.title.replace("\t", "") +"\t"+str(pObj.year)+"\t"+str(authors)+"\t"+str(pObj.points/100.)+"\t"+str(pObj.size/100.)+"\t"+str(fractional)+"\n")
log.close()
writeUsedPublicationsLog(pm, flowDict, "usedPublications"+postFix+".csv")
fractional = pm.extractFractionalGraph(flowDict)
pm.pubGraph = fractional
pm.printStatus()
#layout = nx.spring_layout(fractional)
#nx.draw_networkx(fractional, layout)
|
chemiczny/pubMatch
|
pubMatch/fordFulkersonAproximation.py
|
fordFulkersonAproximation.py
|
py
| 3,927
|
python
|
en
|
code
| 0
|
github-code
|
6
|
29437968143
|
tabby_cat = "\tI'm tabbed in."
#The variable tabby_cat strores the string with a tab\
persian_cat = "I'm split\non a line."
#The variable persian_cat stores the string and creates a new line within the text
backslash = "I'm\\a\\cat."
#The Varibale backslash stores the string and backslashes within
fat_cat = """"
I'will do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
"""
#The variable fat_cat stores the string with tab\ and newline\ within the string
print(tabby_cat)
#Displays the value of the variable tabby_cat
print(persian_cat)
#Displays the value of the variable persian_cat
print(backslash)
#Displays the value of the variable backslash
print(fat_cat)
#Displays the value of the variable fat_cat
|
ERICMUKONGE/TRY4
|
ex10.py
|
ex10.py
|
py
| 719
|
python
|
en
|
code
| 1
|
github-code
|
6
|
15393475408
|
# -*- coding:utf-8 -*-
class Solution:
# 返回[a,b] 其中ab是出现一次的两个数字
def FindNumsAppearOnce(self, array):
# write code here
resultEx = 0
for obj in array:
resultEx ^= obj
indexof1 = self.findFirstBit1(resultEx)
num1,num2 = 0,0
for obj in array:
if self.isIndex1(obj,indexof1):
num1 ^= obj
else:
num2 ^= obj
return num1, num2
def findFirstBit1(self, num):
indexBit = 0
while not num & 1:
num >>= 1
indexBit += 1
return indexBit
def isIndex1(self, num,index):
num >>= index
return num & 1
|
shakesVan/Playground
|
Nowcoder/56.py
|
56.py
|
py
| 717
|
python
|
en
|
code
| 0
|
github-code
|
6
|
7176590579
|
import secrets
from eth_keys import (
keys,
)
from eth_utils import (
int_to_big_endian,
)
try:
import factory
except ImportError:
raise ImportError(
"The p2p.tools.factories module requires the `factory_boy` library."
)
def _mk_private_key_bytes() -> bytes:
return int_to_big_endian(secrets.randbits(256)).rjust(32, b"\x00")
class PrivateKeyFactory(factory.Factory):
class Meta:
model = keys.PrivateKey
private_key_bytes = factory.LazyFunction(_mk_private_key_bytes)
def _mk_public_key_bytes() -> bytes:
return PrivateKeyFactory().public_key.to_bytes()
class PublicKeyFactory(factory.Factory):
class Meta:
model = keys.PublicKey
public_key_bytes = factory.LazyFunction(_mk_public_key_bytes)
|
ethereum/py-evm
|
eth/tools/factories/keys.py
|
keys.py
|
py
| 772
|
python
|
en
|
code
| 2,109
|
github-code
|
6
|
39915090383
|
import numpy as np
import pandas as pd
import math
import requests, json, time
from datetime import datetime
class Processor:
def __init__(self, history_length):
self.history_length = history_length
def fetchHistoricalDataForTicker(self, fsym, tsym, lim):
df_cols = ['time', 'open', 'high', 'low', 'close', 'volumefrom', 'volumeto']
curr_ts = str(int(time.time()))
limit = str(lim)
histURL = 'https://min-api.cryptocompare.com/data/histominute?fsym=' + fsym + '&tsym=' + tsym + '&limit=' + limit + '&toTs=' + curr_ts + '&aggregate=1' + '&e=Coinbase' #CCCAGG for aggregated
resp = requests.get(histURL)
resp_json = json.loads(resp.content.decode('utf-8'))
df = pd.DataFrame(columns = df_cols)
for i in range(0, lim):
data = []
for count, val in enumerate(df_cols):
entry = resp_json['Data'][i][val]
data.append(entry)
row = pd.Series(data, df_cols)
df = df.append(row, ignore_index = True)
if(df.empty):
return
df = df.rename(index=str, columns={"time": "ts"})
df.index = pd.to_datetime(df.ts, unit = 's')
df = df.drop('ts', axis = 1)
matrix_df = df.as_matrix()
return matrix_df
def fetchData(self):
data = self.fetchHistoricalDataForTicker('ETH', 'USD', 2000)
#Arbitrary 1500/500 split
train_data = data[:1500, :]
test_data = data[1501:, :]
return {'train': train_data, 'test': test_data}
|
kwhuo68/rl-btc
|
processor.py
|
processor.py
|
py
| 1,365
|
python
|
en
|
code
| 3
|
github-code
|
6
|
31309035194
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#Library Imports
from __future__ import print_function, division
from keras.models import Sequential, Model
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM, GRU, SimpleRNN
from keras.layers import Input
from keras.utils.data_utils import get_file
from keras.optimizers import Nadam
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.layers.normalization import BatchNormalization
from collections import Counter
import numpy as np
import random
import sys
import os
import getopt
import copy
import csv
import io
import time
import ipywidgets
import traitlets
from datetime import datetime
from math import log
import pandas as pd
from ATLSTM_layer import ATLSTM_layer
# In[ ]:
def load_data(eventlog, path, sep="|"):
return pd.read_csv('../../../dataset/'+path+'/%s' % eventlog, sep=sep, error_bad_lines=False).values
# # In[ ]:
def get_divisor(timeseqs):
return np.mean([item for sublist in timeseqs for item in sublist])
def create_model_folder(name, dirc):
i = 1
path_dir = "../../../results/output_files/models/"+dirc
if os.path.isdir(path_dir) == False:
try:
os.mkdir(path_dir)
except OSError:
print ("Creation of the directory %s failed" % path_dir)
else:
print ("Successfully created the directory %s " % path_dir)
for i in range(100):
new_name = name + "_v" + str(i)
path_name = path_dir + "/" + new_name
if os.path.isdir(path_name) == False:
try:
os.mkdir(path_name)
except OSError:
continue
else:
print ("Successfully created the directory %s " % path_name)
break
return new_name
# In[ ]:
def main(argv = None):
if argv is None:
argv = sys.argv
inputfile = ""
directory = ""
sep=""
num_add_feats = 0
try:
opts, args = getopt.getopt(argv, "hi:d:n:")
except getopt.GetoptError:
print(os.path.basename(__file__),
"-i <input_file> -d <directory> -s <separator> -n <num_add_feats>")
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(os.path.basename(__file__),
"-i <input_file> -d <directory> -s <separator> -n <num_add_feats>")
sys.exit()
elif opt == "-i":
inputfile = arg
elif opt == "-d":
directory = arg
elif opt == "-s":
sep = arg
elif opt == "-n":
num_add_feats = int(arg)
begin_time = datetime.now()
#helper variables
lines = [] #these are all the activity seq
# timeseqs = [] #time sequences (differences between two events)
# timeseqs2 = [] #time sequences (differences between the current and first)
lastcase = ''
line = ''
firstLine = True
lines = []
timeseqs = []
timeseqs2 = []
timeseqs3 = []
timeseqs4 = []
add_feats = []
times = []
times2 = []
times3 = []
times4 = []
add_feat = -1
numlines = 0
casestarttime = None
lasteventtime = None
ascii_offset = 161
spamreader = load_data(inputfile, directory, sep)
for row in spamreader:
t = time.strptime(row[2], "%Y-%m-%d %H:%M:%S")
if row[0]!=lastcase:
casestarttime = t
lasteventtime = t
lastcase = row[0]
if not firstLine:
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
add_feats.append(list(add_feat))
line = ''
times = []
times2 = []
times3 = []
times4 = []
add_feat = row[3:]
# add_feat = int(row[3])
numlines+=1
line+=chr(int(row[1])+ascii_offset)
timesincelastevent = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(lasteventtime))
timesincecasestart = datetime.fromtimestamp(time.mktime(t))-datetime.fromtimestamp(time.mktime(casestarttime))
midnight = datetime.fromtimestamp(time.mktime(t)).replace(hour=0, minute=0, second=0, microsecond=0)
timesincemidnight = datetime.fromtimestamp(time.mktime(t))-midnight
timediff = 86400 * timesincelastevent.days + timesincelastevent.seconds
timediff2 = 86400 * timesincecasestart.days + timesincecasestart.seconds
timediff3 = timesincemidnight.seconds #this leaves only time even occured after midnight
timediff4 = datetime.fromtimestamp(time.mktime(t)).weekday() #day of the week
times.append(timediff)
times2.append(timediff2)
times3.append(timediff3)
times4.append(timediff4)
# add_feats.append(add_feat)
lasteventtime = t
firstLine = False
lines.append(line)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
add_feats.append(add_feat)
numlines+=1
divisor = get_divisor(timeseqs) #average time between events
print('divisor: {}'.format(divisor))
divisor2 = get_divisor(timeseqs2) #average time between current and first events
print('divisor2: {}'.format(divisor2))
elems_per_fold = int(round(numlines/3))
fold1 = lines[:elems_per_fold]
fold1_t = timeseqs[:elems_per_fold]
fold1_t2 = timeseqs2[:elems_per_fold]
fold1_t3 = timeseqs3[:elems_per_fold]
fold1_t4 = timeseqs4[:elems_per_fold]
fold1_ft = add_feats[:elems_per_fold]
with open('../../../results/output_files/folds/fold1.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in zip(fold1, fold1_t):
spamwriter.writerow([s +'#{}'.format(t) for s, t in zip(row, timeseq)])
fold2 = lines[elems_per_fold:2*elems_per_fold]
fold2_t = timeseqs[elems_per_fold:2*elems_per_fold]
fold2_t2 = timeseqs2[elems_per_fold:2*elems_per_fold]
fold2_t3 = timeseqs3[elems_per_fold:2*elems_per_fold]
fold2_t4 = timeseqs4[elems_per_fold:2*elems_per_fold]
fold2_ft = add_feats[elems_per_fold:2*elems_per_fold]
with open('../../../results/output_files/folds/fold2.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in zip(fold2, fold2_t):
spamwriter.writerow([s +'#{}'.format(t) for s, t in zip(row, timeseq)])
fold3 = lines[2*elems_per_fold:]
fold3_t = timeseqs[2*elems_per_fold:]
fold3_t2 = timeseqs2[2*elems_per_fold:]
fold3_t3 = timeseqs3[2*elems_per_fold:]
fold3_t4 = timeseqs4[2*elems_per_fold:]
fold3_ft = add_feats[2*elems_per_fold:]
with open('../../../results/output_files/folds/fold3.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row, timeseq in zip(fold3, fold3_t):
spamwriter.writerow([s +'#{}'.format(t) for s, t in zip(row, timeseq)])
lines = fold1 + fold2
lines_t = fold1_t + fold2_t
lines_t2 = fold1_t2 + fold2_t2
lines_t3 = fold1_t3 + fold2_t3
lines_t4 = fold1_t4 + fold2_t4
lines_ft = fold1_ft + fold2_ft
step = 1
sentences = []
softness = 0
next_chars = []
lines = list(map(lambda x: x+'!',lines)) #put delimiter symbol
maxlen = max(list(map(lambda x: len(x),lines))) #find maximum line size
# next lines here to get all possible characters for events and annotate them with numbers
chars = list(map(lambda x: set(x),lines))
chars = list(set().union(*chars))
chars.sort()
target_chars = copy.copy(chars)
chars.remove('!')
print('total chars: {}, target chars: {}'.format(len(chars), len(target_chars)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
target_char_indices = dict((c, i) for i, c in enumerate(target_chars))
target_indices_char = dict((i, c) for i, c in enumerate(target_chars))
print(indices_char)
sentences_t = []
sentences_t2 = []
sentences_t3 = []
sentences_t4 = []
sentences_ft = []
next_chars_t = []
next_chars_t2 = []
next_chars_t3 = []
next_chars_t4 = []
next_chars_ft = []
for line, line_t, line_t2, line_t3, line_t4, line_ft in zip(lines, lines_t, lines_t2, lines_t3, lines_t4, lines_ft):
for i in range(0, len(line), step):
if i==0:
continue
#we add iteratively, first symbol of the line, then two first, three...
sentences.append(line[0: i])
sentences_t.append(line_t[0:i])
sentences_t2.append(line_t2[0:i])
sentences_t3.append(line_t3[0:i])
sentences_t4.append(line_t4[0:i])
sentences_ft.append(line_ft)
next_chars.append(line[i])
if i==len(line)-1: # special case to deal time of end character
next_chars_t.append(0)
next_chars_t2.append(0)
next_chars_t3.append(0)
next_chars_t4.append(0)
else:
next_chars_t.append(line_t[i])
next_chars_t2.append(line_t2[i])
next_chars_t3.append(line_t3[i])
next_chars_t4.append(line_t4[i])
next_chars_ft.append(line_ft)
print('nb sequences:', len(sentences))
print('Vectorization...')
num_features = len(chars)+5+num_add_feats+1
print('num features: {}'.format(num_features))
X = np.zeros((len(sentences), maxlen, num_features), dtype=np.float32)
y_a = np.zeros((len(sentences), len(target_chars)), dtype=np.float32)
y_t = np.zeros((len(sentences)), dtype=np.float32)
for i, sentence in enumerate(sentences):
leftpad = maxlen-len(sentence)
next_t = next_chars_t[i]
sentence_t = sentences_t[i]
sentence_t2 = sentences_t2[i]
sentence_t3 = sentences_t3[i]
sentence_t4 = sentences_t4[i]
# sentence_ft = sentences_ft[i][0]
# sentence_ft2 = sentences_ft[i][1]
# sentence_ft3 = sentences_ft[i][2]
for t, char in enumerate(sentence):
multiset_abstraction = Counter(sentence[:t+1])
for c in chars:
if c==char: #this will encode present events to the right places
X[i, t+leftpad, char_indices[c]] = 1
X[i, t+leftpad, len(chars)] = t+1
X[i, t+leftpad, len(chars)+1] = sentence_t[t]/divisor
X[i, t+leftpad, len(chars)+2] = sentence_t2[t]/divisor2
X[i, t+leftpad, len(chars)+3] = sentence_t3[t]/86400
X[i, t+leftpad, len(chars)+4] = sentence_t4[t]/7
# X[i, t+leftpad, len(chars)+5] = next_chars_t[t]
if num_add_feats > 0:
for f in range(num_add_feats):
X[i, t+leftpad, len(chars)+f+5] = sentences_ft[i][f]
# X[i, t+leftpad, len(chars)+6] = sentence_ft
# X[i, t+leftpad, len(chars)+7] = sentence_ft2
# X[i, t+leftpad, len(chars)+8] = sentence_ft3
for c in target_chars:
if c==next_chars[i]:
y_a[i, target_char_indices[c]] = 1-softness
else:
y_a[i, target_char_indices[c]] = softness/(len(target_chars)-1)
y_t[i] = next_t/divisor
np.set_printoptions(threshold=sys.maxsize)
# build the model:
print('Build model...')
print(X.shape)
main_input = Input(shape=(maxlen, num_features), name='main_input')
# train a 2-layer LSTM with one shared layer
l1 = ATLSTM_layer(128, return_sequences=True)(main_input) # the shared layer
l2_1 = ATLSTM_layer(128, return_sequences=False)(l1)
l2_2 = ATLSTM_layer(128, return_sequences=False)(l1)
d1 = keras.layers.Dropout(.2)(l1)
d2_1 = keras.layers.Dropout(.2)(l2_1)
d2_2 = keras.layers.Dropout(.2)(l2_2)
act_output = Dense(len(target_chars), activation='softmax', kernel_initializer='glorot_uniform', name='act_output')(d2_1)
time_output = Dense(1, kernel_initializer='glorot_uniform', name='time_output')(d2_2)
model = Model(inputs=[main_input], outputs=[act_output, time_output])
model_folder = ""
if num_add_feats == 0:
model_folder = create_model_folder("model_nofeat",directory)
else:
model_folder = create_model_folder("model_"+num_add_feats+"_feat", directory)
opt = Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004, clipvalue=3)
model.compile(loss={'act_output':'categorical_crossentropy', 'time_output':'mae'}, optimizer=opt)
early_stopping = EarlyStopping(monitor='val_loss', patience=42)
model_checkpoint = ModelCheckpoint("../../../results/output_files/models/"+directory+"/"+model_folder+'/model_{epoch:02d}-{val_loss:.2f}.h5',
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=False,
mode='auto')
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0)
model.fit({'main_input': X}, {'act_output':y_a, 'time_output':y_t},
validation_split=0.2,
verbose=2,
callbacks=[early_stopping, model_checkpoint, lr_reducer],
batch_size=maxlen,
epochs=200
)
print(datetime.now() - begin_time)
if __name__ == "__main__":
main(sys.argv[1:])
|
RenatoMAlves/context-aware-time-prediction
|
code/Context-LSTM/train_addittional_feats_py3.py
|
train_addittional_feats_py3.py
|
py
| 13,972
|
python
|
en
|
code
| 0
|
github-code
|
6
|
75113992506
|
from timeit import default_timer as timer
target = 150
start = timer()
file = open('input.txt')
def permute(vals, target):
if len(vals) <=1:
if len(vals) == 1 and vals[0] == target:
return [vals]
return []
else:
ret = []
vals = [] + vals
while(len(vals) > 0):
val = vals[0]
vals.pop(0)
if val > target:
continue
if val == target:
ret.append([val])
continue
for perm in permute(vals, target - val):
ret.append([val] + perm)
return ret
containers = []
for line in file.readlines():
containers.append(int(line.strip()))
containers.sort(reverse=True)
# matches = []
# for perm in permute(containers, target):
# print perm
# total = 0
# for container in perm:
# total += container
# if target == total:
# print perm
# matches.append(perm)
result = len(permute(containers, target))
print("Completed in %fms" % ((timer() - start) * 1000))
print("%d is the result" % result)
|
kmckenna525/advent-of-code
|
2015/day17/part1.py
|
part1.py
|
py
| 932
|
python
|
en
|
code
| 2
|
github-code
|
6
|
38756128140
|
"""
Kubernetes server class implementation.
"""
from __future__ import absolute_import
import os
import logging
import uuid
from kubernetes import config
from kubernetes import client as k8sclient
from kubernetes.client.rest import ApiException
from retry import retry
from pytest_server_fixtures import CONFIG
from .common import (ServerClass,
merge_dicts,
ServerFixtureNotRunningException,
ServerFixtureNotTerminatedException)
log = logging.getLogger(__name__)
IN_CLUSTER = os.path.exists('/var/run/secrets/kubernetes.io/namespace')
fixture_namespace = CONFIG.k8s_namespace
if IN_CLUSTER:
config.load_incluster_config()
if not fixture_namespace:
with open('/var/run/secrets/kubernetes.io/namespace', 'r') as f:
fixture_namespace = f.read().strp()
log.info("SERVER_FIXTURES_K8S_NAMESPACE is not set, using current namespace '%s'", fixture_namespace)
if CONFIG.k8s_local_test:
log.info("====== Running K8S Server Class in Test Mode =====")
config.load_kube_config()
fixture_namespace = 'default'
class NotRunningInKubernetesException(Exception):
"""Thrown when code is not running as a Pod inside a Kubernetes cluster."""
pass
class KubernetesServer(ServerClass):
"""Kubernetes server class."""
def __init__(self,
server_type,
cmd,
get_args,
env,
image,
labels={}):
super(KubernetesServer, self).__init__(cmd, get_args, env)
if not fixture_namespace:
raise NotRunningInKubernetesException()
self._image = image
self._labels = merge_dicts(labels, {
'server-fixtures': 'kubernetes-server-fixtures',
'server-fixtures/server-type': server_type,
'server-fixtures/session-id': CONFIG.session_id,
})
self._v1api = k8sclient.CoreV1Api()
def launch(self):
try:
log.debug('%s Launching pod' % self._log_prefix)
self._create_pod()
self._wait_until_running()
log.debug('%s Pod is running' % self._log_prefix)
except ApiException as e:
log.warning('%s Error while launching pod: %s', self._log_prefix, e)
raise
def run(self):
pass
def teardown(self):
self._delete_pod()
# TODO: provide an flag to skip the wait to speed up the tests?
self._wait_until_teardown()
@property
def is_running(self):
try:
return self._get_pod_status().phase == 'Running'
except ApiException as e:
if e.status == 404:
# return false if pod does not exists
return False
raise
@property
def hostname(self):
if not self.is_running:
raise ServerFixtureNotRunningException()
return self._get_pod_status().pod_ip
@property
def namespace(self):
return fixture_namespace
@property
def labels(self):
return self._labels
def _get_pod_spec(self):
container = k8sclient.V1Container(
name='fixture',
image=self._image,
command=self._get_cmd(),
env=[k8sclient.V1EnvVar(name=k, value=v) for k, v in self._env.iteritems()],
)
return k8sclient.V1PodSpec(
containers=[container]
)
def _create_pod(self):
try:
pod = k8sclient.V1Pod()
pod.metadata = k8sclient.V1ObjectMeta(name=self.name, labels=self._labels)
pod.spec = self._get_pod_spec()
self._v1api.create_namespaced_pod(namespace=self.namespace, body=pod)
except ApiException as e:
log.error("%s Failed to create pod: %s", self._log_prefix, e.reason)
raise
def _delete_pod(self):
try:
body = k8sclient.V1DeleteOptions()
# delete the pod without waiting
body.grace_period_seconds = 1
self._v1api.delete_namespaced_pod(namespace=self.namespace, name=self.name, body=body)
except ApiException as e:
log.error("%s Failed to delete pod: %s", self._log_prefix, e.reason)
def _get_pod_status(self):
try:
resp = self._v1api.read_namespaced_pod_status(namespace=self.namespace, name=self.name)
return resp.status
except ApiException as e:
log.error("%s Failed to read pod status: %s", self._log_prefix, e.reason)
raise
@retry(ServerFixtureNotRunningException, tries=28, delay=1, backoff=2, max_delay=10)
def _wait_until_running(self):
log.debug("%s Waiting for pod status to become running", self._log_prefix)
if not self.is_running:
raise ServerFixtureNotRunningException()
@retry(ServerFixtureNotTerminatedException, tries=28, delay=1, backoff=2, max_delay=10)
def _wait_until_teardown(self):
try:
self._get_pod_status()
# waiting for pod to be deleted (expect ApiException with status 404)
raise ServerFixtureNotTerminatedException()
except ApiException as e:
if e.status == 404:
return
raise
@property
def _log_prefix(self):
return "[K8S %s:%s]" % (self.namespace, self.name)
|
man-group/pytest-plugins
|
pytest-server-fixtures/pytest_server_fixtures/serverclass/kubernetes.py
|
kubernetes.py
|
py
| 5,398
|
python
|
en
|
code
| 526
|
github-code
|
6
|
7722813982
|
from pacman.model.partitioned_graph.multi_cast_partitioned_edge import \
MultiCastPartitionedEdge
from spynnaker.pyNN.models.abstract_models.abstract_filterable_edge import \
AbstractFilterableEdge
class ProjectionPartitionedEdge(MultiCastPartitionedEdge,
AbstractFilterableEdge):
def __init__(self, presubvertex, postsubvertex, constraints):
MultiCastPartitionedEdge.__init__(
self, presubvertex, postsubvertex, constraints)
AbstractFilterableEdge.__init__(self)
self._synapse_sublist = None
self._weight_scales = None
@property
def weight_scales(self):
return self._weight_scales
# **YUCK** setters don't work properly with inheritance
def weight_scales_setter(self, value):
self._weight_scales = value
def get_synapse_sublist(self, graph_mapper):
"""
Gets the synapse list for this subedge
"""
pre_vertex_slice = \
graph_mapper.get_subvertex_slice(self._pre_subvertex)
post_vertex_slice = \
graph_mapper.get_subvertex_slice(self._post_subvertex)
if self._synapse_sublist is None:
associated_edge = \
graph_mapper.get_partitionable_edge_from_partitioned_edge(self)
self._synapse_sublist = \
associated_edge.synapse_list.create_atom_sublist(
pre_vertex_slice, post_vertex_slice)
return self._synapse_sublist
def get_n_rows(self, graph_mapper):
pre_vertex_slice = graph_mapper.get_subvertex_slice(
self._pre_subvertex)
return pre_vertex_slice.n_atoms
def free_sublist(self):
"""
Indicates that the list will not be needed again
"""
self._synapse_sublist = None
def filter_sub_edge(self, graph_mapper, common_report_folder):
"""determines if theres an actual connection in this subedge in temrs of
synaptic data
"""
pre_vertex_slice = graph_mapper.get_subvertex_slice(
self._pre_subvertex)
post_vertex_slice = graph_mapper.get_subvertex_slice(
self._post_subvertex)
edge = graph_mapper.get_partitionable_edge_from_partitioned_edge(self)
return not edge.synapse_list.is_connected(pre_vertex_slice,
post_vertex_slice)
@property
def synapse_sublist(self):
return self._synapse_sublist
def is_multi_cast_partitioned_edge(self):
return True
|
ominux/sPyNNaker
|
spynnaker/pyNN/models/neural_projections/projection_partitioned_edge.py
|
projection_partitioned_edge.py
|
py
| 2,558
|
python
|
en
|
code
| null |
github-code
|
6
|
31672849616
|
'''
from PyQt5 import QtCore, QtWidgets, QtGui
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtCore import QUrl
xcept ImportError:
print("PyQt5 is not installed")
'''
from PySide2 import QtCore, QtWidgets, QtGui
from PySide2.QtWebEngineWidgets import QWebEngineView
from PySide2.QtCore import QUrl
#except ImportError:
#print("PySide2 is not installed")
#from PySide2.QtWebKit import *
#from PySide2.QtWebKitWidgets import *
import sys
import socket
import random
#import discovery as discovery
def discover(port):
print("Discovering on port: {}".format(port))
host = '255.255.255.255'
data = bytes([0x70, 0x63, 0x00, 0x06, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF])
timeout = 1
results = []
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.settimeout(timeout)
try:
s.sendto(data, (host, port))
except socket.error:
print(socket.error)
try:
while True:
data = s.recv(1024)
try:
modem = parse(data)
results.append(modem)
print("{0} - {1} - {2}".format(modem["mac"], modem["ip"], modem["product"]))
except Exception as e:
print("Parse exception: {0}".format(e))
except socket.timeout:
#print(socket.timeout)
pass
except socket.error:
print(socket.error)
finally:
return results
except Exception as e:
print("Exception: {0}".format(e))
#print("Exception{0}".format(e.args))
def parseMac(data):
#mac = line.subsring(2,8)
mac = ""
try:
for x in range(2,8):
n = hex(data[x])[2:4]
if len(n) == 1:
n = "" + "0" + n
mac += n.upper()
if x < 7:
mac += ":"
return mac
except Exception as e:
print("Parse Mac exception: {0}".format(e))
return mac
def parseIp(data):
ip = ""
try:
for x in range(9,13):
ip += str(data[x])
if x < 12:
ip += "."
return ip
except Exception as e:
print("Parse IP exception: {0}".format(e))
return ip
def rawToStr(data):
s = ""
for each in data:
s += chr(each)
return s
def parse(data):
microhard = "00:0F:92"
mac = parseMac(data)
modem = {}
if mac.count(microhard):
ip = parseIp(data)
chars = rawToStr(data)
line = chars[13:len(chars)].split("\0")
#print(line)
description = line[0]
address = line[1]
product = line[2]
software = line[3]
mode = line[4]
network = line[5]
modem["mac"]= mac
modem["ip"] = ip
modem["description"] = description
modem["address"] = address
modem["product"] = product
modem["software"] = software
modem["mode"] = mode
modem["network"] = network
modem["apn"] = ''
modem["domain"] = ''
cellModems = ["VIP4G", "IPn3G", "IPn4G", "Bullet", "Dragon"]
for each in cellModems:
if modem["product"].count(each):
apn = line[7]
domain = line[9]
modem["apn"] = apn
modem["domain"] = domain
return modem
else:
print("Not a microhard modem: {0}". format(mac))
class DiscoveryWorker(QtCore.QThread):
completedSignal = QtCore.Signal(dict, str)
def __init__(self, port):
QtCore.QThread.__init__(self)
self.port = port
def run(self):
print("Discovery worker... on port {}".format(self.port))
result = discover(self.port)
self.completedSignal.emit(result, self.objectName())
class DiscoverWidget(QtWidgets.QWidget):
cellPort = 20097
vipPort = 20077
ipPort= 20087
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.threads = {}
self.cellButton = QtWidgets.QPushButton("Cell")
self.cellButton.clicked.connect(self.cell_button_action)
self.vipButton = QtWidgets.QPushButton("VIP")
self.vipButton.clicked.connect(self.vip_button_action)
self.ipButton = QtWidgets.QPushButton("IP")
self.ipButton.clicked.connect(self.ip_button_action)
self.allButton = QtWidgets.QPushButton("All")
self.allButton.clicked.connect(self.all_button_action)
self.clearButton = QtWidgets.QPushButton("Clear")
self.clearButton.clicked.connect(self.clear_button_action)
self.buttonsLayout = QtWidgets.QHBoxLayout()
self.buttonsLayout.addWidget(self.cellButton)
self.buttonsLayout.addWidget(self.vipButton)
self.buttonsLayout.addWidget(self.ipButton)
self.buttonsLayout.addWidget(self.allButton)
self.buttonsLayout.addWidget(self.clearButton)
self.headers = ["MAC", "IP Address", "Product", "Description", "Software", "APN", "Domain", "Mode", "Network", "Address"]
self.table = QtWidgets.QTableWidget(0, len(self.headers), self)
self.table.setHorizontalHeaderLabels(self.headers)
self.table.resizeColumnsToContents()
self.table.horizontalHeader().setStretchLastSection(True)
#self.table.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
#self.table.horizontalHeader().setStretchLastSection(True)
self.table.doubleClicked.connect(self.on_click)
#self.label = QtWidgets.QLabel("Discovery")
#self.label.setAlignment(QtCore.Qt.AlignCenter)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addLayout(self.buttonsLayout)
self.layout.addWidget(self.table)
self.setLayout(self.layout)
self.resize(800, 400)
self.setWindowTitle("Discover IP")
self.show()
def on_click(self):
for item in self.table.selectedItems():
#print(item.row(), item.column(), item.text())
if item.column() == 1:
url = "https://" + item.text()
#self.view = QWebEngineView()
#self.view.load(QUrl(url))
#self.view.show()
browser = "firefox --new-tab " + url
print(browser)
self.process = QtCore.QProcess()
self.process.start(browser)
def completeAction(self, modems, name):
print("Completed Action... thread name {}".format(name))
self.setTable(modems)
thread = self.threads.get(name)
#if thread.isFinished():
self.threads.pop(name)
#else handle active thread...wait?
print("Active workers {}".format(len(self.threads)))
def cell_button_action(self):
#QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
name = str(random.randint(0, 65534))
worker = DiscoveryWorker(self.cellPort)
worker.completedSignal.connect(self.completeAction)
worker.setObjectName(name)
worker.start()
self.threads.update({name: worker})
#print(worker.currentThread(), QtCore.QThread.currentThread(), worker.thread())
def vip_button_action(self):
#QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
name = str(random.randint(0, 65534))
worker = DiscoveryWorker(self.vipPort)
worker.completedSignal.connect(self.completeAction)
worker.setObjectName(name)
worker.start()
self.threads.update({name: worker})
def ip_button_action(self):
#QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
name = str(random.randint(0, 65534))
worker = DiscoveryWorker(self.ipPort)
worker.completedSignal.connect(self.completeAction)
worker.setObjectName(name)
worker.start()
self.threads.update({name: worker})
def all_button_action(self):
self.cell_button_action()
self.vip_button_action()
self.ip_button_action()
def setTable(self, modems):
self.table.setSortingEnabled(False)
index = 0
for modem in modems:
index += 1
self.table.setRowCount(index)
row = index - 1
keys = ["mac", "ip", "product", "description", "software", "apn", "domain", "mode", "network", "address"]
col = 0
for key in keys:
cell = modem[key]
self.item = QtWidgets.QTableWidgetItem(cell)
if key == "ip":
self.item.setForeground(QtGui.QColor(0, 0, 255))
self.table.setItem(row, col, self.item)
col += 1
self.table.resizeColumnsToContents()
self.table.setSortingEnabled(True)
#Not needed with threads
#QtWidgets.QApplication.restoreOverrideCursor()
def clear_button_action(self):
self.table.clear()
self.table.setRowCount(0)
self.table.setHorizontalHeaderLabels(self.headers)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
widget = DiscoverWidget()
sys.exit(app.exec_())
else:
print("Importing {0}".format(__name__))
|
ArturW/Discovery
|
pydiscoveryt.py
|
pydiscoveryt.py
|
py
| 10,341
|
python
|
en
|
code
| 0
|
github-code
|
6
|
73051866108
|
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
from tensorflow import keras
model = tf.keras.models.load_model('C:/AI/model.h5')
class_names = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
img_height = 180
img_width = 180
sunflower_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/592px-Red_sunflower.jpg"
sunflower_path = tf.keras.utils.get_file('Red_sunflower', origin=sunflower_url)
img = keras.preprocessing.image.load_img(
sunflower_path, target_size=(img_height, img_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.imshow(img)
plt.axis('off')
plt.show()
print(score)
print(class_names[np.argmax(score)])
print(100 * np.max(score))
|
dasfef/PyQt5
|
Ex20221202_3(h5 activate).py
|
Ex20221202_3(h5 activate).py
|
py
| 925
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16304234084
|
f = open("Data/URL.txt", "a")
#inp=input("Enter the url\n")
#inp+="\n"
import sys
print("Enter the data")
data = sys.stdin.read() # Use Ctrl d to stop the input
data=list(data)
indices = [i for i, x in enumerate(data) if x == "\n"]
j=0
inp=input("enter label\n")
for i in indices:
ted=i+j
data.insert(ted,"\t"+inp)
j+=1
f.write("".join(data))
|
SK9712/Detecting-Malicious-Url-Using-Character-level-CNN
|
dataset_creator.py
|
dataset_creator.py
|
py
| 350
|
python
|
en
|
code
| 8
|
github-code
|
6
|
22049207954
|
# macros.py contains the SavedFiles class, which is used
# to maintain a directory of files with saved macro inputs
import os
import player
import record
DEFAULT_LOG = "action_log.txt"
class SavedFiles:
DIRECTORY_NAME = "Macros" # name of directory to save files in
files = ['' for x in range(0, 1)] # list of file names
current_idx = 0 # current index of objects
current_size = 1 # current maximum number of files
def __init__(self):
try:
os.mkdir(self.DIRECTORY_NAME)
except FileExistsError:
for filename in os.listdir(self.DIRECTORY_NAME):
self.files[self.current_idx] = filename.replace(".txt", "")
self.current_idx += 1
if self.current_idx == self.current_size:
self.files.append('')
self.current_size += 1
self.current_idx = 0
def get_current_idx(self):
return self.current_idx
def set_current_idx(self, idx):
self.current_idx = idx
def get_current_size(self):
return self.current_size
def set_current_size(self, size):
self.current_size = size
# get_file_data returns a tuple containing the current file's information.
# * returns data of form (file number, file name)
# * get_file_data()[0] = file number
# * get_file_data()[1] = file name
def get_file_data(self):
return (self.current_idx + 1, self.files[self.current_idx])
# get_current_file returns the current file location
def get_current_file(self):
if self.files[self.current_idx] == '': return
return self.DIRECTORY_NAME + '\\' + self.files[self.current_idx] + ".txt"
# set_current_file sets the current file to have name filename
def set_current_file(self, filename):
try:
if self.files[self.current_idx] == '':
open(self.DIRECTORY_NAME + '/' + filename + ".txt", "x")
else:
os.rename(self.get_current_file(),
self.DIRECTORY_NAME + "/" + filename + ".txt")
self.files[self.current_idx] = filename
except FileExistsError:
pass
# to_prev_file sets the current file to the previous file
# * if the current file is the first one, we instead loop to the end
def to_prev_file(self):
if self.current_idx == 0:
self.current_idx = self.current_size - 1
else:
self.current_idx -= 1
# to_next_file sets the current file to the next file
# * if the current file is the last one, we instead loop to the front
def to_next_file(self):
if self.current_idx == self.current_size - 1:
self.current_idx = 0
else:
self.current_idx += 1
# to_idx sets the current file to the one specified by index idx
# * idx starts from 0, so recording #1 corresponds to idx 0
def to_idx(self, idx):
if 0 <= int(idx) and int(idx) < self.current_size:
self.current_idx = int(idx)
else:
raise ValueError("bad index")
# new_file produces a new file with name filename, and
# changes the current file as such:
# * if current file is empty, do not change
# * else, chooses first available empty file
# * if no empty files, creates new file
# returns new index
def new_file(self, filename="Macro"):
if self.files[self.current_idx] == '':
pass
else:
all_full = True
for i in range(0, len(self.files)):
if filename == self.files[i]:
self.to_idx(i)
self.set_current_file(filename)
return self.current_idx
for i in range(0, len(self.files)):
if self.files[i] == '':
all_full = False
self.to_idx(i)
break
if all_full:
self.files.append('')
self.current_size += 1
self.to_idx(self.current_size - 1)
self.set_current_file(filename)
return self.current_idx
# delete_file deletes the current file, if it exists
def delete_file(self):
if self.files[self.current_idx] != '':
os.remove(self.get_current_file())
self.files[self.current_idx] = ''
# play_recording plays the current file using
# the Player object, plyr
def play_recording(self, plyr):
if self.files[self.current_idx] != '':
plyr.play(self.get_current_file())
def log_to_macros(self, filename):
self.new_file(filename)
open(self.get_current_file(), 'w').close()
with open(DEFAULT_LOG, 'r') as infile, open(self.get_current_file(), 'a') as outfile:
for line in infile:
outfile.write(line)
return self.current_idx
|
Jedi123jet/Ekko
|
macros.py
|
macros.py
|
py
| 5,066
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72531788029
|
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
# pylint: disable=too-many-arguments
import pytest
from models_library.api_schemas_webserver.projects import (
ProjectCreateNew,
ProjectGet,
ProjectListItem,
ProjectReplace,
TaskProjectGet,
)
from models_library.generics import Envelope
from models_library.rest_pagination import Page
from pydantic import parse_obj_as
from pytest_simcore.simcore_webserver_projects_rest_api import (
CREATE_FROM_SERVICE,
CREATE_FROM_TEMPLATE,
CREATE_FROM_TEMPLATE__TASK_RESULT,
GET_PROJECT,
LIST_PROJECTS,
NEW_PROJECT,
REPLACE_PROJECT,
REPLACE_PROJECT_ON_MODIFIED,
HttpApiCallCapture,
)
@pytest.mark.parametrize(
"api_call",
(NEW_PROJECT, CREATE_FROM_SERVICE, CREATE_FROM_TEMPLATE),
ids=lambda c: c.name,
)
def test_create_project_schemas(api_call: HttpApiCallCapture):
request_payload = ProjectCreateNew.parse_obj(api_call.request_payload)
assert request_payload
response_body = parse_obj_as(
Envelope[ProjectGet] | Envelope[TaskProjectGet], api_call.response_body
)
assert response_body
@pytest.mark.parametrize(
"api_call",
(LIST_PROJECTS,),
ids=lambda c: c.name,
)
def test_list_project_schemas(api_call: HttpApiCallCapture):
assert api_call.request_payload is None
response_body = parse_obj_as(Page[ProjectListItem], api_call.response_body)
assert response_body
@pytest.mark.parametrize(
"api_call",
(GET_PROJECT, CREATE_FROM_TEMPLATE__TASK_RESULT),
ids=lambda c: c.name,
)
def test_get_project_schemas(api_call: HttpApiCallCapture):
# NOTE: that response_body here is the exported values
# and therefore ProjectGet has to be implemented in such a way that
# can also parse exported values! (e.g. Json does not allow that, or ocassionaly exclude_none)
response_body = parse_obj_as(Envelope[ProjectGet], api_call.response_body)
assert response_body
@pytest.mark.parametrize(
"api_call",
(REPLACE_PROJECT, REPLACE_PROJECT_ON_MODIFIED),
ids=lambda c: c.name,
)
def test_replace_project_schemas(api_call: HttpApiCallCapture):
request_payload = parse_obj_as(ProjectReplace, api_call.request_payload)
assert request_payload
response_body = parse_obj_as(Envelope[ProjectGet], api_call.response_body)
assert response_body
|
ITISFoundation/osparc-simcore
|
packages/models-library/tests/test_api_schemas_webserver_projects.py
|
test_api_schemas_webserver_projects.py
|
py
| 2,403
|
python
|
en
|
code
| 35
|
github-code
|
6
|
14950042406
|
def add_task(todo_list):
task = input("Enter the task: ")
todo_list.append(task)
print(f"Task '{task}' added to the to-do list.")
def view_tasks(todo_list):
if not todo_list:
print("No tasks in the to-do list.")
else:
print("To-Do List:")
for index, task in enumerate(todo_list, start=1):
print(f"{index}. {task}")
def mark_completed(todo_list):
view_tasks(todo_list)
if not todo_list:
return
task_num = int(input("Enter the number of the task you want to mark as completed: "))
if 1 <= task_num <= len(todo_list):
completed_task = todo_list.pop(task_num - 1)
print(f"Task '{completed_task}' marked as completed.")
else:
print("Invalid task number.")
def main():
print("Simple To-Do App")
todo_list = []
while True:
print("\nOptions:")
print("1. Add a task")
print("2. View tasks")
print("3. Mark task as completed")
print("4. Exit")
choice = input("Enter the number of your choice: ")
if choice == "1":
add_task(todo_list)
elif choice == "2":
view_tasks(todo_list)
elif choice == "3":
mark_completed(todo_list)
elif choice == "4":
print("Exiting the to-do app.")
break
else:
print("Invalid choice. Please try again.")
if __name__ == "__main__":
main()
|
SanjanaLakkimsetty/CodersCave
|
coderscave todo/todo_app.py
|
todo_app.py
|
py
| 1,497
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26102744693
|
#!/usr/bin/python
import os, sys
# copied from:
# https://github.com/rose/nand2tetris/blob/master/assembler.py
# these three dictionaries store the translations of the 3 parts
# of a c-instruction
comp = {
"0": "0101010",
"1": "0111111",
"-1": "0111010",
"D": "0001100",
"A": "0110000",
"!D": "0001101",
"!A": "0110001",
"-D": "0001111",
"-A": "0110011",
"D+1": "0011111",
"A+1": "0110111",
"D-1": "0001110",
"A-1": "0110010",
"D+A": "0000010",
"D-A": "0010011",
"A-D": "0000111",
"D&A": "0000000",
"D|A": "0010101",
"M": "1110000",
"!M": "1110001",
"-M": "1110011",
"M+1": "1110111",
"M-1": "1110010",
"D+M": "1000010",
"D-M": "1010011",
"M-D": "1000111",
"D&M": "1000000",
"D|M": "1010101"
}
dest = {
"null": "000",
"M": "001",
"D": "010",
"A": "100",
"MD": "011",
"AM": "101",
"AD": "110",
"AMD": "111"
}
jump = {
"null": "000",
"JGT": "001",
"JEQ": "010",
"JGE": "011",
"JLT": "100",
"JNE": "101",
"JLE": "110",
"JMP": "111"
}
# table of symbols used in assembly code, initialized to include
# standard ones
table = {
"SP": 0,
"LCL": 1,
"ARG": 2,
"THIS": 3,
"THAT": 4,
"SCREEN": 16384,
"KBD": 24576,
}
for i in range(0,16):
label = "R" + str(i)
table[label] = i
variableCursor = 16 # next available memory location for variables
root = sys.argv[1] # name of file to be translated
def strip(line):
# removes whitespace and comments; returns line without a closing \n
char = line[0]
if char == "\n" or char == "/":
return ""
elif char == " ":
return strip(line[1:])
else:
return char + strip(line[1:])
def normalize(line):
# normalizes c-instructions by adding null dest & jump fields
# if they're unspecified
line = line[:-1]
if not "=" in line:
line = "null=" + line
if not ";" in line:
line = line + ";null"
return line
def addVariable(label):
# allocates a memory location for new variables
global variableCursor
table[label] = variableCursor
variableCursor += 1
return table[label]
def aTranslate(line):
# translates a symbolic a-instruction into an int (if necessary)
# then translates that into a binary machine instruction
if line[1].isalpha():
label = line[1:-1]
aValue = table.get(label, -1)
if aValue == -1:
aValue = addVariable(label)
else:
aValue = int(line[1:])
bValue = bin(aValue)[2:].zfill(16)
return bValue
def cTranslate(line):
# splits a c-instruction into its components & translates them
line = normalize(line)
temp = line.split("=")
destCode = dest[temp[0]]#dest.get(temp[0], "destFAIL")
temp = temp[1].split(";")
compCode = comp[temp[0]]#comp.get(temp[0], "compFAIL")
jumpCode = jump[temp[1]] #jump.get(temp[1], "jumpFAIL")
return compCode, destCode, jumpCode
def translate(line):
# distinguishes a- and c-instructions, calls appropriate function
# to translate each
if line[0] == "@":
return aTranslate(line)
else:
codes = cTranslate(line)
return "111" + codes[0] + codes[1] + codes[2]
def firstPass():
# searches file for jump labels and enters them into the symbol table
# also strips out comments & empty lines
infile = open(root + ".asm")
outfile = open(root + ".tmp", "w")
lineNumber = 0
for line in infile:
sline = "".join(line.split())
if sline != "":
if sline[0] == "(":
label = sline[1:-1]
table[label] = lineNumber
sline = ""
else:
lineNumber += 1
outfile.write(sline + "\n")
infile.close()
outfile.close()
def assemble():
# takes file stripped of labels and translates it into .hack
infile = open(root + ".tmp")
outfile = open(root + ".hack", "w")
for line in infile:
tline = translate(line)
outfile.write(tline + "\n")
infile.close()
outfile.close()
os.remove(root + ".tmp")
# actual program is just calls to these two functions
firstPass()
assemble()
|
philzook58/nand2coq
|
verilog/assembly.py
|
assembly.py
|
py
| 4,058
|
python
|
en
|
code
| 49
|
github-code
|
6
|
21666324114
|
#https://leetcode.com/problems/3sum/
from collections import defaultdict
class Solution:
#Fast and extremely clever solution, need to study this further to understand how it works
def threeSum(self, nums: list[int]) -> list[list[int]]:
negative = defaultdict(int)
positive = defaultdict(int)
zeros = 0
for num in nums:
if num < 0:
negative[num] += 1
elif num > 0:
positive[num] += 1
else:
zeros += 1
result = []
if zeros:
for n in negative:
if -n in positive:
result.append((0, n, -n))
if zeros > 2:
result.append((0,0,0))
for set1, set2 in ((negative, positive), (positive, negative)): #This for-loop runs twice.
#In the first iteration of this for loop, set1 is negative defaultdict and set2 is positive defaultdict
#In the second iteration of this for loop, set1 is positive defaultdict and set2 is negative defaultdict
#print(f'set1 = {set1}')
#print(f'set2 = {set2}\n\n')
set1Items = list(set1.items()) #List of key-value pairs in set1. Each element in this list is a tuple (num, frequency)
#print(f'set1Items = {set1Items}')
for i, (j, k) in enumerate(set1Items): # i is the index of list set1Items and (j,k) is a tuple (num, frequency) of set1
#Only considering pair (j,k) at set1Items[i] while entering below for-loop
for j2, k2 in set1Items[i:]: #
if j != j2 or (j == j2 and k > 1): #If the two nums j and j2 are different we can proceed. Or we can proceed if they are equal but that num is present multiple times
if -j-j2 in set2: #We have two numbers j and j2 that could form a 3sum. If (-j-j2) is also present, then we have found a 3sum
result.append([j, j2, -j-j2])
return result
def main():
solution = Solution()
list1 = [-1,0,1,2,-1,-4]
print(solution.threeSum(list1)) #[[-1,-1,2],[-1,0,1]]
if __name__ == "__main__": #Entry point
main() #Calling main method
|
Adam-1776/Practice
|
DSA/3sum/solution.py
|
solution.py
|
py
| 2,243
|
python
|
en
|
code
| 0
|
github-code
|
6
|
42085468466
|
import math
print("welcome")
# addition of two numbers
print("enter two numbers")
a = input("num1:")
b = input("num2:")
c = int(a) + int(b)
print(c)
# fibonacci series
n1, n2 = 0, 1
count = 0
n = int(input("enter range"))
if n <= 0:
print("Please enter a positive integer")
elif n == 1:
print("Fibonacci sequence: ", n)
else:
print("Fibonacci sequence:")
while count < n:
print(n1)
n3 = n1 + n2
n1 = n2
n2 = n3
count += 1
# ascii value of a character
a1 = input("enter a character: ")
print(ord(a1))
# area of triangle
print("enter sides of a triangle:")
s1 = int(input("side1:"))
s2 = int(input("side2:"))
s3 = int(input("side3:"))
p = s1 + s2 + s3
area = math.sqrt(p * (p - s1) * (p - s2) * (p - s3))
print("area =", area)
# sum of digits
num = int(input("enter any number:"))
sum = 0
while num != 0:
sum = sum + (num % 10)
num //= 10
print("sum =", sum)
# area of circle
r = int(input("enter radius of circle:"))
area = math.pi * r * r
print("are of circle =", area)
# Quotient and Remainder of Two Numbers
a = int(input("num1:"))
b = int(input("num2:"))
print("quotient of a and b is", a//b)
print(("remainder of a and b is", a%b))
|
Ramcharantejpaka/python_lab
|
main.py
|
main.py
|
py
| 1,279
|
python
|
en
|
code
| 0
|
github-code
|
6
|
12096156845
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import requests
from bs4 import BeautifulSoup, Tag
from workflow import Workflow
def main(wf):
if not wf.args:
return
word = wf.args[0].strip()
resp = requests.post("http://www.zdic.net/sousuo/", data={"q": word})
soup = BeautifulSoup(resp.content, "html.parser")
#soup = BeautifulSoup(open("./test/%s.html"% word, "rb").read(), "html.parser")
# 拼音, 拼音作为title用
title= None
pinyin = soup.find("span", attrs={"class": "dicpy"})
if pinyin:
title = pinyin.string
if not title and pinyin.find("a"):
title = pinyin.find("a").string
if not title:
title = word
# 获取解释
explain = None
for py in soup.findAll("span", attrs={"class": "dicpy"}):
p = py.findParent()
if p.name == "p" and p.attrs.get("class", [""])[0].startswith("zdct"):
explain = p
break
texts = []
if explain:
for e in explain.nextSiblingGenerator():
if not isinstance(e, Tag): continue
if e.attrs.get("class", [""])[0] != explain.attrs['class'][0]:
break
texts.append(e.text)
wf.add_item(title, " ".join(texts), arg=resp.url, valid=True, largetext="\n".join(texts))
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
jinuljt/zdic.alfredworkflow
|
zdic.py
|
zdic.py
|
py
| 1,421
|
python
|
en
|
code
| 6
|
github-code
|
6
|
30357445001
|
from traits.api import HasTraits, Code
from traitsui.api import Item, Group, View
# The main demo class:
class CodeEditorDemo(HasTraits):
"""Defines the CodeEditor demo class."""
# Define a trait to view:
code_sample = Code('import sys\n\nsys.print("hello world!")')
# Display specification:
code_group = Group(
Item('code_sample', style='simple', label='Simple'),
Item('_'),
Item('code_sample', style='custom', label='Custom'),
Item('_'),
Item('code_sample', style='text', label='Text'),
Item('_'),
Item('code_sample', style='readonly', label='ReadOnly'),
)
# Demo view:
traits_view = View(
code_group, title='CodeEditor', width=600, height=600, buttons=['OK']
)
# Create the demo:
demo = CodeEditorDemo()
# Run the demo (if invoked from the command line):
if __name__ == "__main__":
demo.configure_traits()
|
enthought/traitsui
|
traitsui/examples/demo/Standard_Editors/CodeEditor_demo.py
|
CodeEditor_demo.py
|
py
| 922
|
python
|
en
|
code
| 290
|
github-code
|
6
|
2653030207
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class BiLSTM(nn.Module):
def __init__(self, args):
super(BiLSTM, self).__init__()
self.embedding_size = args.embedding_size
self.hidden_size = args.hidden_size
self.vocab_size = args.vocab_size #args.vocab_size
self.label_size = args.num_classes
self.batch_size = args.K # batch size is per task here
self.use_gpu = args.use_gpu
self.embeddings = nn.Embedding(self.vocab_size, self.embedding_size)
self.lstm = nn.LSTM(input_size=self.embedding_size,
hidden_size=self.hidden_size,
bidirectional=True,
batch_first=True)
self.hidden = self.init_hidden(self.use_gpu)
self.classifier = nn.Linear(self.hidden_size*2, self.label_size)
if self.use_gpu:
self.embeddings = self.embeddings.cuda()
self.lstm = self.lstm.cuda()
self.classifier = self.classifier.cuda()
def init_hidden(self, use_gpu):
h_0 = Variable(torch.zeros(2, self.batch_size, self.hidden_size), requires_grad=True)
c_0 = Variable(torch.zeros(2, self.batch_size, self.hidden_size), requires_grad=True)
if use_gpu:
return h_0.cuda(), c_0.cuda()
else:
return h_0, c_0
def forward(self, word_ids, lengths):
# sort by length
lengths, perm_idx = lengths.sort(0, descending=True)
word_ids = word_ids[perm_idx]
# print('word ids', word_ids.size())
# word_ids = word_ids.permute(1, 0)
embs = self.embeddings(word_ids)#.view(word_ids.size(1), self.batch_size, -1) # maybe permute instead
# print('embs', embs.size())
# print('lengths', lengths.size())
packed = pack_padded_sequence(embs, lengths, batch_first=True)
# print('packed', packed.size())
# embs = embs.permute(1, 0, 2)
# print(packed.size())
output, self.hidden = self.lstm(packed, self.hidden)
output, _ = pad_packed_sequence(output, batch_first=True)
# unsort
_, unperm_idx = perm_idx.sort(0)
output = output[unperm_idx]
lengths = lengths[unperm_idx]
# get final output state
last_indices = (lengths - 1).view(-1, 1).expand(len(lengths), output.size(2)).unsqueeze(1) # 1 = time dimension
if self.use_gpu:
last_indices = last_indices.cuda()
last_output = output.gather(1, last_indices).squeeze(1)
# print(last_output.size())
logits = self.classifier(last_output)
return logits
|
wbakst/meta-learned-embeddings
|
lstm.py
|
lstm.py
|
py
| 2,741
|
python
|
en
|
code
| 1
|
github-code
|
6
|
34834842463
|
import re
f = open("processos.txt")
linhas = f.readlines()
datas = {}
for i in linhas:
new_text = re.search(r'([0-9]+)::([0-9]{4})', i)
if new_text:
data = new_text.group(2)
processo = new_text.group(1)
if (data,processo) not in datas:
datas[(data,processo)] = 1
else:
datas[((data,processo))] += 1
f.close()
for (a,b) in datas:
print("No ano " + str(a) + ", o processo " + str(b) + " aconteceu " + str(datas[a,b]) + " vezes")
|
Miguelii/uminho.PLC-Project
|
PLC TP1/a.py
|
a.py
|
py
| 524
|
python
|
pt
|
code
| 0
|
github-code
|
6
|
71888961147
|
line3 = ''
import re
with open('txt\\论语-提取版.txt','r+',encoding="UTF-8") as f2,open('txt\\论语-原文.txt','w',encoding="UTF-8") as f3:
for line in f2.readlines() :
line3 = line3 + line
line3 = re.sub(u'\(\d\)','',line3)
# if line == '】':
# del line3[-4, -1]
# continue
f3.write(line3)
# f3=open('txt\\论语-原文.txt','r+',encoding="UTF-8")
# f3.truncate()
# f3.close()
|
fivespeedasher/Pieces
|
luanyu2.py
|
luanyu2.py
|
py
| 444
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36396608095
|
"""
Continuous Statistics Class
"""
from numbers import Number
from typing import Union, Tuple
from functools import wraps
import inspect
import numpy as np
from gval.statistics.base_statistics import BaseStatistics
import gval.statistics.continuous_stat_funcs as cs
class ContinuousStatistics(BaseStatistics):
"""
Class for Running Continuous Statistics on Agreement Maps
Attributes
----------
registered_functions : dict
Available statistical functions with names as keys and parameters as values
"""
def __init__(self):
# Automatically populates and numba vectorizes all functions in categorical_stat_funcs.py
self.required_param = 1
self.optional_param = 0
self._func_names = [
fn
for fn in dir(cs)
if len(fn) > 5
and "__" not in fn
and "Number" not in fn
and "convert_output" not in fn
]
self._funcs = [getattr(cs, name) for name in self._func_names]
for name, func in zip(self._func_names, self._funcs):
setattr(self, name, func)
self._signature_validation = {
"names": {
"error": self.required_param,
"candidate_map": self.optional_param,
"benchmark_map": self.optional_param,
},
"required": [
self.required_param,
self.optional_param,
self.optional_param,
],
"param_types": [
"xarray.core.dataset.Dataset",
"xarray.core.dataarray.DataArray",
"Union[xarray.core.dataarray.DataArray, xarray.core.dataset.Dataset]",
"Union[xarray.core.dataset.Dataset, xarray.core.dataarray.DataArray]",
],
"return_type": [float, Number],
"no_of_args": [1, 2, 3],
}
self.registered_functions = {
name: {"params": [param for param in inspect.signature(func).parameters]}
for name, func in zip(self._func_names, self._funcs)
}
def available_functions(self) -> list:
"""
Lists all available functions
Returns
-------
List of available functions
"""
return list(self.registered_functions.keys())
def get_all_parameters(self):
"""
Get all the possible arguments
Returns
-------
List of all possible arguments for functions
"""
return list(self._signature_validation["names"].keys())
def register_function(self, name: str):
"""
Register decorator function in statistics class
Parameters
----------
name: str
Name of function to register in statistics class
Returns
-------
Decorator function
"""
def decorator(func):
self.function_signature_check(func)
if name not in self.registered_functions:
self.registered_functions[name] = {
"params": [
param
for param in inspect.signature(func).parameters
if param != "self"
]
}
setattr(self, name, func)
else:
raise KeyError("This function name already exists")
@wraps(func)
def wrapper(*args, **kwargs): # pragma: no cover
result = func(*args, **kwargs)
return result
return wrapper
return decorator
def register_function_class(self):
"""
Register decorator function for an entire class
Parameters
----------
vectorize_func: bool
Whether to vectorize the function
"""
def decorator(dec_self: object):
"""
Decorator for wrapper
Parameters
----------
dec_self: object
Class to register stat functions
"""
for name, func in inspect.getmembers(dec_self, inspect.isfunction):
if name not in self.registered_functions:
self.function_signature_check(func)
self.registered_functions[name] = {
"params": [
param
for param in inspect.signature(func).parameters
if param != "self"
]
}
setattr(self, name, func)
else:
raise KeyError("This function name already exists")
return decorator
def function_signature_check(self, func):
"""
Validates signature of registered function
Parameters
----------
func: function
Function to check the signature of
"""
signature = inspect.signature(func)
names = self._signature_validation["names"]
param_types = self._signature_validation["param_types"]
return_type = self._signature_validation["return_type"]
no_of_args = self._signature_validation["no_of_args"]
# Checks if param names, type, and return type are in valid list
# Considered no validation if either are empty
for key, val in signature.parameters.items():
if (key not in names and len(names) > 0) or (
not str(val).split(": ")[-1] in param_types and len(param_types) > 0
):
raise TypeError(
"Wrong parameters in function: \n"
f"Valid Names: {names} \n"
f"Valid Types: {param_types} \n"
)
if len(no_of_args) > 0 and len(signature.parameters) not in no_of_args:
raise TypeError(
"Wrong number of parameters: \n"
f"Valid number of parameters: {no_of_args}"
)
if signature.return_annotation not in return_type and len(return_type) > 0:
raise TypeError("Wrong return type \n" f"Valid return Type {return_type}")
def get_parameters(self, func_name: str) -> list:
"""
Get parameters of registered function
Parameters
----------
func_name: str
Returns
-------
List of parameter names for the associated function
"""
if func_name in self.registered_functions:
return self.registered_functions[func_name]["params"]
else:
raise KeyError("Statistic not found in registered functions")
def process_statistics(
self, func_names: Union[str, list], **kwargs
) -> Tuple[float, str]:
"""
Parameters
----------
func_names: Union[str, list]
Name of registered function to run
**kwargs: dict or keyword arguments
Dictionary or keyword arguments of to pass to metric functions.
Returns
-------
Tuple[float, str]
Tuple with metric values and metric names.
"""
func_names = (
list(self.registered_functions.keys())
if func_names == "all"
else func_names
)
func_list = [func_names] if isinstance(func_names, str) else func_names
return_stats, return_funcs = [], []
for name in func_list:
if name in self.registered_functions:
params = self.get_parameters(name)
required = self._signature_validation["required"]
func = getattr(self, name)
# Necessary for numba functions which cannot accept keyword arguments
func_args, skip_function, return_nan = [], False, False
for param, req in zip(params, required):
if param in kwargs and kwargs[param] is not None:
func_args.append(kwargs[param])
elif not self._signature_validation["names"][param]:
skip_function = True
break
else:
print(
f"Parameter {param} missing from kwargs of {name}, returning nan"
)
return_nan = True
break
if skip_function:
continue
stat_val = np.nan if return_nan else func(*func_args)
def check_value(stat_name: str, stat: Number):
if (np.isnan(stat) or np.isinf(stat)) and not return_nan:
print(
"Warning:",
f"Invalid value calculated for {stat_name}:",
stat,
)
if isinstance(stat_val, dict):
for st_name, val in stat_val.items():
check_value(st_name, val)
else:
check_value(name, stat_val)
return_stats.append(stat_val)
return_funcs.append(name)
else:
raise KeyError(f"Statistic, {name}, not found in registered functions")
return return_stats, return_funcs
|
NOAA-OWP/gval
|
src/gval/statistics/continuous_statistics.py
|
continuous_statistics.py
|
py
| 9,420
|
python
|
en
|
code
| 14
|
github-code
|
6
|
6634407633
|
class Solution(object):
def rotatev1(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: None Do not return anything, modify nums in-place instead.
"""
if nums is None or len(nums) ==0:
return
k %= n
for i in range(k):
nums.insert(0, nums.pop())
def rotate(self, nums, k):
n = len(nums)
k %= n
nums.reverse()
def reverse(nums, start, end):
while start < end:
nums[start], nums[end] = nums[end], nums[start]
start+=1
end -=1
reverse(nums, 0, k-1)
reverse(nums, k, n-1)
#nums = nums[-k:] + nums[:-k]
sol = Solution()
arr = [1,2,3,4,5,6,7]
sol.rotate(arr, k = 3)
print(arr)
|
rh01/gofiles
|
lcode100-199/ex113/rotate.py
|
rotate.py
|
py
| 821
|
python
|
en
|
code
| 0
|
github-code
|
6
|
44855832216
|
"""
Question 4.3: List of Depths: Given a binary tree, design an algorithm which creates a linked list of all the nodes
at each depth (e.g., if you have a tree with depth 0, you'll have 0 linked lists).
"""
class TNode:
def __init__(self,value):
self.data = value
self.left = None
self.right = None
class Tree:
def __init__(self):
self.root = None
def addData(self,data,node):
if node == None:
self.root = TNode(data)
return
if data > node.data:
if node.right != None:
self.addData(data,node.right)
else:
node.right = TNode(data)
return
else:
if node.left != None:
self.addData(data,node.left)
else:
node.left = TNode(data)
return
def inorder(self,node):
if node.left == None and node.right == None:
print(node.data)
return
if node.left != None:
self.inorder(node.left)
print(node.data)
if node.right != None:
self.inorder(node.right)
def get_depth_lists(self):
que = []
values = []
que.append(self.root)
while len(que) != 0:
# list.pop(0) pops the first element
node = que.pop(0)
values.append(node.data)
if node.left != None:
que.append(node.left)
if node.right != None:
que.append(node.right)
num_nodes_in_level = 1
level_lists = []
while True:
if len(values) >= num_nodes_in_level:
level_list = values[0:num_nodes_in_level]
level_lists.append(level_list)
values = values[num_nodes_in_level:]
num_nodes_in_level = num_nodes_in_level * 2
else:
level_lists.append(values)
break
print(level_lists)
nums = [10,4,1,12,3,9,5,6,14,71,33,24,62,20,74,21,82,16,15,11,45,23]
t = Tree()
for num in nums:
t.addData(num,t.root)
t.get_depth_lists()
|
sandeepjoshi1910/Algorithms-and-Data-Structures
|
List_of_Depths.py
|
List_of_Depths.py
|
py
| 2,206
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3096449689
|
from django.shortcuts import render, render_to_response
from django.utils import timezone
from django.http import HttpResponse, Http404
#from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.paginator import EmptyPage, PageNotAnInteger
from flynsarmy_paginator.paginator import FlynsarmyPaginator as Paginator
from .models import foodall, gn
from django.db.models import Q
import random
import pymysql
ITEMS_PER_PAGE = 10
PAGE_GROUP = 10
#-- TemplateView
def home(request):
return render(request, 'foodle/home.html')
def search(request):
wisesaying = [
'고기 먹다 체하면 냉면으로 쑥 눌러줘라',
'기분이 저기압일 땐 반드시 고기 앞으로 가라',
'내가 아는 맛이 가장 맛있는 맛',
'치킨 뼈를 봤을 때 앙념을 먹었는지 후라이드를 먹었는지 모르게 하라',
'맛있게 먹으면 0칼로리',
'탕수육은 부먹도 찍먹도 아닌 쳐먹이 진리',
'일찍 일어나는 새가 많이 먹는다',
'먹었던 뼈도 다시보자',
'인생은 치킨의 연속이다',
'내가 먹겠다는 의지만 있으면 위가 늘어난다',
'B(birth)와 D(death)사이에는 C(chicken)이 있다',
'오늘 먹을 치킨을 내일로 미루지 말자',
'튀긴 음식은 신발을 튀겨도 맛있다',
'맛집이 있다면 지옥도 가겠다',
'현기증 난단 말이에요. 빨리 라면 끓여 주세요',
'물이 너무 많으면 라면을 더 넣어라',
]
if not hasattr(search, "searchwords"):
search.searchwords = ''
if not hasattr(search, "foodle_list"):
search.foodle_list = []
if not hasattr(search, "w_list"):
search.w_list = []
if 'search_words' in request.GET:
search.searchwords = request.GET['search_words']
if search.searchwords == '':
search.foodle_list = []
else:
#search.foodle_list = mysqlexport(search.searchwords)
search.w_list = wordslist(search.searchwords)
q = Q()
for wlist in search.w_list:
q = q & ((Q(title__contains=wlist) | Q(subtitle__contains=wlist)) | Q(ind__contains=wlist))
search.foodle_list = foodall.objects.filter(q).order_by('-data')
# Paging
paginator = Paginator(search.foodle_list, 10, adjacent_pages = 5)
page = request.GET.get('page')
try:
lists = paginator.page(page)
except PageNotAnInteger:
lists = paginator.page(1)
except EmptyPage:
lists = paginator.page(paginator.num_pages)
return render_to_response('foodle/search.html', {"lists": lists, "searchwords": search.searchwords, "wisesaying": wisesaying[random.randint(0, len(wisesaying)-1)]})
#search_list.searchwords=''
def mysqlexport(key):
conn=pymysql.connect(host='127.0.0.1',charset='utf8',user='root',passwd='root',db='food')
conn.query("set character_set_connection=utf8;")
conn.query("set character_set_server=utf8;")
conn.query("set character_set_client=utf8;")
conn.query("set character_set_results=utf8;")
conn.query("set character_set_database=utf8;")
curs = conn.cursor(pymysql.cursors.DictCursor)
key = searchword(key)
sql="SELECT * FROM gn where title like" + key + " or ind like " + key + " or subtitle like " + key + "ORDER BY data DESC"
print(sql)
curs.execute(sql)
rows=curs.fetchall()
return rows
def searchword(key):
sc=0
ec=0
cc=0
wnum=0
word=''
for a in key:
cc+=1
if a.count(' ')>0:
ec=cc-1
word+=" '%"+key[sc:ec]+"%' and ind like"
sc = cc
if cc==key.__len__():
ec=cc
word += " '%" + key[sc:ec] + "%'"
sc ==cc
return word
def wordslist(key):
words_list = []
temp = ''
cc = 0
for a in key:
cc += 1
if a==' ':
words_list.append(temp)
temp = ''
else:
temp += a
if(cc == len(key)):
words_list.append(temp)
return words_list
#mysqlexport('서울 프뤼엥 ')
|
seolakim/reve-web
|
foodle/views.py
|
views.py
|
py
| 4,218
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4457145906
|
# -*- coding: utf-8 -*-
# Scrapy settings for wikicrawler project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'wikicrawler'
SPIDER_MODULES = ['wikicrawler.spiders']
NEWSPIDER_MODULE = 'wikicrawler.spiders'
EXTENSIONS = {
'scrapy.contrib.closespider.CloseSpider': 0
}
CLOSESPIDER_PAGECOUNT = 10000
DEPTH_LIMIT = 4
CONCURRENT_REQUESTS_PER_IP = 1
DOWNLOAD_DELAY = 1.5
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'wikicrawler (+http://www.yourdomain.com)'
|
Ugon/mownit
|
document-search-engine/wikicrawler/wikicrawler/settings.py
|
settings.py
|
py
| 673
|
python
|
en
|
code
| 0
|
github-code
|
6
|
23996964324
|
# Imports
import pandas as pd
from numpy.linalg import norm
from numpy import dot
from ML_Pipeline.preprocessing import preprocessing_input
# Define cosine simialrity function
def cos_sim(a,b):
"""
In our context:
a: Vector 'a' represents emebedding/vector rep. of query passed
b: The average vector of each abstract in our dataset
So, we need to find cosine dist. b/w then to see how similar they are.
"""
return dot(a,b)/(norm(a)*norm(b))
def top_n(n:int, query:str, model, abs_vectors, df:pd.DataFrame):
"""
Function to return top n similar results
n - to get top n
query - input query
model - trained model
abs_vectors - average vectors for all abstracts obtained from the model
df - original dataset
"""
try:
# n = int(input("Enter a integer value for n: "))
print("\nQuery:",query,"\n")
query = preprocessing_input(query, model)
# Converting cosine similarities of overall dataset with i/p querie(s) into List
query_cos_sim = []
for idx,abs_vec in enumerate(abs_vectors):
# Also appending there index
tup = (cos_sim(query, abs_vec), idx)
query_cos_sim.append(tup)
# Sort list in descending order based on cosine values
top_n_dist_values = sorted(query_cos_sim, reverse=True)[:n]
# index_of_similar_abstract
idxs = [i[-1] for i in top_n_dist_values]
# cosine values
cosine_vals = [i[0] for i in top_n_dist_values]
print(cosine_vals)
# returning dataframe (id, title,abstract ,publication date)
return df.iloc[idxs, [1,2,5,6]], cosine_vals
except Exception as e:
print(e)
return
|
avr2002/Medical-Embeddings-and-Clinical-Trial-Search-Engine
|
src/ML_Pipeline/top_n.py
|
top_n.py
|
py
| 1,894
|
python
|
en
|
code
| 0
|
github-code
|
6
|
33422541294
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, models, transforms
from torchvision.utils import save_image
from torch.utils.data import Dataset
from torchvision import datasets, models, transforms
from Regressor_and_loss import disparityregression
# input = (B,3,256,512)
class baseline_model(nn.Module):
def __init__(self,B,C,H,W,newmodel):
super(baseline_model,self).__init__()
self.B=B
self.C=C
self.H=H
self.W=W
self.device = "cuda"
self.max_disp = 192
self.cnn_Shared = newmodel
self.cnn_3dims = nn.Sequential(
nn.Conv3d(64, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(64, 64, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(64, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(128, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(128, 32, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(32, 32, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
nn.BatchNorm3d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.ReLU(),
nn.Conv3d(32, 1, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1)),
)
#y = x.flatten(start_dim=1, end_dim=2) #https://stackoverflow.com/questions/65465646/convert-5d-tensor-to-4d-tensor-in-pytorch"""
# size = (B, 192, 256 , 512)
def concat_for_3D(self, left_feats, right_feats):
cost = torch.Tensor(self.B,self.C*2, self.max_disp//4, self.H//4, self.W//4).to(self.device)
for i in range(self.max_disp // 4):
if(i==0):
cost[:, :self.C, i, :, :] = left_feats
cost[:, self.C:, i, :, :] = right_feats
else:
cost[:, :self.C, i, :, i:] = left_feats[:,:,:,i:]
cost[:, self.C:, i, :, i:] = right_feats[:,:,:,:-i]
return cost
def forward(self,x_left,x_right):
im_left = self.cnn_Shared(x_left)
im_right = self.cnn_Shared(x_right)
cost_vol = self.concat_for_3D(im_left,im_right)
score_volume = self.cnn_3dims(cost_vol)
m = nn.Upsample(scale_factor=4, mode='trilinear')
score_volume = m(score_volume)
y = score_volume.flatten(start_dim=1, end_dim=2)
prob=F.softmax(y,1)
#https://github.com/jzhangbs/DSM/blob/master/model.py
prob = disparityregression(self.max_disp)(prob)
return prob
def create_mod():
model = models.resnet18(pretrained=True) # https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
model.conv1 = nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), bias=False)
#model.layer4[0].conv1 = nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
newmodel = torch.nn.Sequential(*(list(model.children())[0:8]))
newmodel[5][0].conv1 = nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
newmodel[5][0].downsample = nn.Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
newmodel[6][0].conv1 = nn.Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
newmodel[6][0].downsample[0] = nn.Conv2d(128, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
newmodel = newmodel[:7]
newmodel.newconv1 = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
newmodel.newbn1 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
#newmodel.ndownsample1 = nn.Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
newmodel.newconv2 = nn.Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
newmodel.newbn2 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
newmodel.newconv3 = nn.Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
#print(newmodel)
#newmodel.to(device)
return newmodel
|
Pahulmeet/Stereo_Depth_Estimation
|
model8.py
|
model8.py
|
py
| 4,908
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74221916988
|
#! /usr/bin/env python3
# utf-8
# importing Hashs
import hashlib
from hashlib import md5
# imports sys related
import os, sys
import subprocess
from os import system, name
from time import gmtime, strftime, sleep
from sys import argv
# EX
from zlib import *
import socket
import random
import glob
import base64
import getpass
from colorama import *
# path for decrypting
def dec():
print(RED +'''
{1} -- DES
{2} -- ALLEGED RC4
{3} -- RC4
{4} -- RC5
{5} -- DIAMOND
{6} -- IDEA
{7} -- LOKI91
{8} -- REDOC III
{9} -- SAPPHIR
''')
HEADER = '\033[95m'
IMPORTANT = '\33[35m'
NOTICE = '\033[33m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
RED = '\033[91m'
END = '\033[0m'
UNDERLINE = '\033[4m'
LOGGING = '\33[34m'
color_random=[HEADER,IMPORTANT,NOTICE,OKBLUE,OKGREEN,WARNING,RED,END,UNDERLINE,LOGGING]
random.shuffle(color_random)
mushlogo = color_random[0] + '''
__ _____ _______
/ | / / / / / ___// / / /
/ /|_/ / / / /\__ \/ /_/ /
/ / / / /_/ /___/ / __ /
/_/ /_/\____//____/_/ /_/
'''
mushprompt = "mush ~# "
class mush:
print(mushlogo + RED + '''
[*] The Quiter You Become, The More You Are Able To Hear!
[!] Mush is a multiple hashing/encrypting tool. ENJOY hashing/encrypting things!
''' + END +'''
{1} -- HASHING
{2} -- ENCRYPTING
{3} -- DECRYPTING
{4} -- ABOUT \n
''')
choice = input(mushprompt)
if choice == "1":
hash()
elif choice == "2":
dec()
elif choice == "3":
enc()
elif choice == "4":
about()
else:
print(RED + " WRONG ANSWER!!! \n")
print()
mush()
# path for hashing functions
def hash():
pass
# decrypting using md5
def Dmd5():
string = input("Enter the word/sentence you want to decrypt?\n")
result = hashlib.md5(string.encode())
print("the Hexadecimal equivalent of hash is: ", end="")
print(result.hexdigest())
def cod():
zz = open("prompts/intro2.txt", "r")
zzz = zz.read()
print(zzz)
ol = input("$ ")
if ol == 1:
Emorse()
elif ol == 2:
Ebinary()
if __name__=='__main__':
try:
os.system('clear')
mush()
except KeyboardInterrupt:
print(" SOMETHIGN WRONG! ...\n")
time.sleep(0.25)
|
elfalehed/mush
|
mush.py
|
mush.py
|
py
| 2,562
|
python
|
en
|
code
| 7
|
github-code
|
6
|
14890429316
|
# coding:utf-8
import numpy as np
import math
type_ir = 5
def atan(a):
val = float(a)
return math.atan(val)*2/math.pi
def linear_state(single_state):
#print('line/single_state',single_state.shape[0])
#print('line/single_state',single_state.shape[1])
a = np.zeros((single_state.shape[0],1))
for i in range(single_state.shape[0]):
x = np.linspace(0,1,single_state[i,:].shape[0])
#fitting
b, c = np.polyfit(x, single_state[i,:], 1)
#fitting line
a[i,0] = atan(b)
return a[:,0]
#return a
def check_thre(ir_sensor,thre):
ret = -1
ir_no = -1
for i in range(type_ir):
if ir_sensor[i]<thre:
ret = 1
ir_no = i
break
else:
ret = 0
return ir_no, ret
if __name__ == '__main__':
print('input a to test atan')
a = raw_input()
print(atan(a))
type_face = 5
type_ir = 5
state1 = np.zeros((type_face+type_ir,1))
state2 = np.zeros((type_face+type_ir,1))
state3 = np.zeros((type_face+type_ir,1))
state1[:,0]=np.array([2,3,1,2,3,4,2,3,1,12])
state2[:,0]=np.array([3,3,1,2,3,4,2,3,1,1])
state3[:,0]=np.array([4,3,1,2,3,4,2,3,1,-10])
state=np.hstack((state1,state2,state3))
print('state',state)
print(linear_state(state))
|
kkkazumi/kensyu
|
test_linear.py
|
test_linear.py
|
py
| 1,316
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36076113426
|
# instructions = open("input_day10.txt").read().strip().split("\n")
# print('there are', len(instructions), 'instructions')
# X = 1
# cycle = 0
# cycle_watchlist = [20, 60, 100, 140, 180, 220]
# signal_strengths = []
# for ins in instructions:
# print(cycle)
# if ins.split()[0] == 'noop':
# cycle += 1
# else:
# cycle += 1
# if cycle in cycle_watchlist:
# signal_strengths.append(cycle*X)
# cycle += 1
# if cycle in cycle_watchlist:
# signal_strengths.append(cycle*X)
# X += int(ins.split()[1])
# print('the signal strengths are', signal_strengths)
# answer_1 = sum(signal_strengths)
# print('the answer is', answer_1)
lines = iter([x.split()for x in open("input_day10.txt")])
cycle, busy, x, add, s1, buf = 0, 0, 1, 0, 0, ''
check_on_cycle = (20, 60, 100, 140, 180, 220)
def get_pixel(pos):
if pos in range (x,x+3): return '#'
return '.'
for i in range(240):
if not busy:
x += add
l = next(lines)
if l[0] == 'addx':
add = int(l[1])
busy += 1
else: add = 0
else: busy -= 1
if i+1 in check_on_cycle: s1 += x * (i+1)
imod = i % 40 +1
if imod == 1: buf+='\n'
buf+=get_pixel(imod)
print(f'task1: {s1}')
print(f'task2: {buf}')
|
Skaleras/AoC_22
|
day10.py
|
day10.py
|
py
| 1,348
|
python
|
en
|
code
| 0
|
github-code
|
6
|
40555963992
|
from django.urls import path
from Web.views import Index, Persons, Random, QuotesByPerson, QuotesByCategory, CategoryCreateView, PersonCreateView, QuoteCreateView, LogoutView
from Web.api_views import APIPersons, APICategories, APIQuotes, APIQuotesByPerson, APIQuotesByCategory, APIQuotesRandom
urlpatterns = [
path('', Index.as_view(), name='index'),
path('persons/', Persons.as_view(), name='persons'),
path('create/category', CategoryCreateView.as_view(), name='create_category'),
path('create/person', PersonCreateView.as_view(), name='create_person'),
path('create/quote', QuoteCreateView.as_view(), name='create_quote'),
path('qbp/<person_pk>/', QuotesByPerson.as_view(), name='quotes_by_person'),
path('qbc/<category_pk>/', QuotesByCategory.as_view(), name='quotes_by_category'),
path('random/', Random.as_view(), name='random'),
path('account/logout', LogoutView.as_view(), name='logout'),
path('api/persons/', APIPersons.as_view(), name="api_persons"),
path('api/categories/', APICategories.as_view(), name="api_categories"),
path('api/quotes/', APIQuotes.as_view(), name="api_quotes"),
path('api/qbp/<int:pk>/', APIQuotesByPerson.as_view(), name="api_quotes_by_person"),
path('api/qbc/<int:pk>/', APIQuotesByCategory.as_view(), name="api_quotes_by_category"),
path('api/quotes_random/', APIQuotesRandom.as_view(), name="api_quotes_random"),
]
|
mavenium/PyQuotes
|
Web/urls.py
|
urls.py
|
py
| 1,415
|
python
|
en
|
code
| 27
|
github-code
|
6
|
36668954335
|
from abc import ABC, abstractmethod
# enum and constants
from enum import Enum
from uuid import UUID
from time import time
import threading
class VehicleType(Enum):
# supported vehicle
CAR = 'car'
TRUCK = 'truck'
VAN = 'van'
MOTORBIKE = 'motorbike'
class ParkingSpotType(Enum):
# available spot type in parking lot
COMPACT = 'compact'
LARGE = 'large'
MOTORBIKE = 'motorbike'
ELECTRIC = 'electric'
class ParkingSpotStatus(Enum):
AVAILABLE = 'available'
UNAVAILABLE = 'unavailable'
class AccountStatus(Enum):
# account status of users
ACTIVE = 'active'
BLOCKED = 'blocked'
BANNED = 'banned'
class ParkingTicketStatus(Enum):
ACTIVE = 'active'
PAID = 'paid'
LOST = 'lost'
class ContactType:
PHONE = 'phone'
EMAIL = 'email'
# Informational class
class Address:
# to store address information of PLOT
def __init__(self, state, city, country, zip_code, street=None):
self.state = state
self.city = city
self.country = country
self.zip_code = zip_code
self.street = street
class Contact:
# TO store contact information of PLOT
def __init__(self, contact_type, name, value, sequence):
self.type = contact_type
self.name = name
self.value = value
self.sequence = sequence
# Parking spot
class ParkingSpot(ABC):
def __init__(
self, spot_number, status, base_charge, special_charge,
spot_type
):
self.spot_number = spot_number
self.status = status
self.base_charge = base_charge
self.special_charge_per_hour = special_charge
self.type = spot_type
self.allocated_vehicle = None
def set_spot_status(self, status):
self.status = status
def get_spot_status(self):
return self.status
def allocate_vehicle(self, vehicle):
self.allocated_vehicle = vehicle
self.set_spot_status(ParkingSpotStatus.UNAVAILABLE)
def remove_vehicle(self):
self.allocated_vehicle = None
self.set_spot_status(ParkingSpotStatus.AVAILABLE)
class CompactSpot(ParkingSpot):
def __init__(self, spot_number, base_charge, special_charge):
super().__init__(
spot_number, ParkingSpotStatus.AVAILABLE, base_charge,
special_charge, ParkingSpotType.COMPACT
)
class LargeSpot(ParkingSpot):
def __init__(self, spot_number, base_charge, special_charge):
super().__init__(
spot_number, ParkingSpotStatus.AVAILABLE, base_charge,
special_charge, ParkingSpotType.COMPACT
)
class MotorbikeSpot(ParkingSpot):
def __init__(self, spot_number, base_charge, special_charge):
super().__init__(
spot_number, ParkingSpotStatus.AVAILABLE, base_charge,
special_charge, ParkingSpotType.COMPACT
)
# Vehicle
class Vehicle(ABC):
def __init__(
self, vehicle_number, vehicle_type, ticket=None, color=None
):
self.number = vehicle_number
self.type = vehicle_type
self.ticket = ticket
self.color = color
def assign_ticket(self, ticket):
self.ticket = ticket
class Car(Vehicle):
def __init__(self, vehicle_number, ticket=None):
super().__init__(vehicle_number, VehicleType.CAR, ticket=ticket)
class Truck(Vehicle):
def __init__(self, vehicle_number, ticket=None):
super().__init__(vehicle_number, VehicleType.TRUCK, ticket=ticket)
class Motorbike(Vehicle):
def __init__(self, vehicle_number, ticket=None):
super().__init__(vehicle_number, VehicleType.MOTORBIKE, ticket=ticket)
class VehicleFactory:
@classmethod
def get_vehicle(cls, vehicle_number, vehicle_type):
if vehicle_type == VehicleType.CAR:
return Car(vehicle_number)
if vehicle_type == VehicleType.TRUCK:
return Truck(vehicle_number)
if vehicle_type == VehicleType.MOTORBIKE:
return Motorbike(vehicle_number)
else:
raise Exception("Unsupported vehicle type")
# Parking ticket
class Ticket:
def __init__(
self, gate_number,
payment_status=ParkingTicketStatus.ACTIVE,
):
self.ticket_number = str(int(time())) + '_' + str(gate_number)
self.payment_status = payment_status
def get_payment_status(self):
return self.payment_status
# Parking floors
class ParkingFloor:
def __init__(self, floor_number, spot_limits):
self.number = floor_number
self.spots = []
self.spot_sequence_mapping = {}
self.spot_limits = spot_limits
def add_spots(self, spot):
if spot.spot_number in self.spot_sequence_mapping:
raise Exception('This spot is already present')
current_len = len(self.spots)
if current_len == self.spot_limits:
raise Exception('Maximum limit reached')
self.spots.append(spot)
self.spot_sequence_mapping[spot.spot_number] = current_len
def remove_spot(self, spot):
if spot.spot_number not in self.spot_sequence_mapping:
raise Exception('Invalid spot number')
spot_index = self.spot_sequence_mapping.get(spot.spot_number)
del self.spot_sequence_mapping[spot.spot_number]
self.spots.pop(spot_index)
def get_total_spots(self):
return len(self.spots)
def get_available_spots_count(self, spot_types=[]):
count = 0
for spot in self.spots:
if spot_types and spot.type not in spot_types:
continue
if spot.get_spot_status() == ParkingSpotStatus.AVAILABLE:
count = count+1
return count
def get_unavailable_spots_count(self, spot_types=[]):
count = 0
for spot in self.spots:
if spot_types and spot.type not in spot_types:
continue
if spot.get_spot_status() == ParkingSpotStatus.UNAVAILABLE:
count = count + 1
return count
def get_first_free_spot(self, spot_type_list=[]):
for spot in self.spots:
if spot.type in spot_type_list:
return spot
return None
@property
def is_full(self):
for spot in self.spots:
if spot.status == ParkingSpotStatus.AVAILABLE:
return False
return True
class ParkingLot:
# singleton ParkingLot to ensure only one object of ParkingLot in the
# system
instance = None
class __OnlyOne:
def __init__(self, name, floor_limits):
self.name = name
self.addresses = []
self.contacts = []
self.floors = []
self.floor_sequence_mapping = {}
self.floor_limits = floor_limits
self.entrance_panels = [1, 2, 3] # for example
self.exit_panels = [] = [1, 2, 3] # for example
self.lock = threading.Lock()
def add_plot_address(self, address):
self.addresses.append(address)
def add_contacts(self, contact):
self.contacts.append(contact)
def add_floor(self, floor):
if floor.number in self.floor_sequence_mapping:
raise Exception('This floor is already present')
curr_floor_size = len(self.floors)
if curr_floor_size == self.floor_limits:
raise Exception('Maximum limit reached')
self.floors.append(floor)
self.floor_sequence_mapping[floor.number] = curr_floor_size
def remove_floor(self):
pass
def get_free_spot(self, spot_types=[]):
for floor in self.floors:
free_spot = floor.get_first_free_spot(spot_type_list=spot_types)
if free_spot:
return free_spot
raise Exception("No available slots")
def generate_ticket(self, vehicle_number, vehicle_type):
if self.is_full:
raise Exception("Parking full")
self.lock.acquire()
ticket = Ticket()
vehicle = VehicleFactory.get_vehicle(vehicle_number, vehicle_type)
vehicle.assign_ticket(ticket)
first_free_spot = self.get_free_spot(
self._spot_types(vehicle_type)
)
first_free_spot.allocate_vehicle(vehicle)
self.lock.release()
def _spot_types(self, vehicle_type):
if vehicle_type == VehicleType.MOTORBIKE:
return [
VehicleType.CAR, VehicleType.TRUCK, VehicleType.MOTORBIKE
]
if vehicle_type == VehicleType.CAR:
return [VehicleType.TRUCK, VehicleType.CAR]
if vehicle_type == VehicleType.MOTORBIKE:
return [VehicleType.MOTORBIKE]
return []
@property
def is_full(self):
for floor in self.floors:
if not floor.is_full:
return False
return True
def __init__(self, name, floor_limit):
if not ParkingLot.instance:
ParkingLot.instance = ParkingLot.__OnlyOne(name, floor_limit)
else:
ParkingLot.instance.name = name
ParkingLot.instance.floor_limit = floor_limit
def __getattr__(self, name):
return getattr(self.instance, name)
class UserDetails:
# TO store user personal details
def __init__(self, name, addresses=[], contacts=[]):
self.name = name
self.addresses = addresses
self.contacts = contacts
class Account:
# to store user account related information
def __init__(self, username, password, user_details, status):
self.username = username
self.password = password
self.user_info = user_details
self.status = status
self.parking_lot = ParkingLot(name="XXXX", floor_limit=10).instance
class Admin(Account):
def __init__(
self, username, password, user_details,
status=AccountStatus.ACTIVE
):
super().__init__(username, password, user_details, status)
def add_floor(self, floor_number):
floor = ParkingFloor(floor_number, 100)
self.parking_lot.add_floor(floor)
def add_spot(
self, floor, spot_number, base_charge, special_charge_per_hour,
spot_type
):
spot = ParkingSpot(
spot_number, ParkingSpotStatus.AVAILABLE, base_charge,
special_charge_per_hour, spot_type
)
floor.add_spot(spot)
class ParkingAttendant(Account):
def __init__(
self, username, password, user_details,
status=AccountStatus.ACTIVE
):
super().__init__(username, password, user_details, status)
def generate_ticket(self, vehicle_number, vehicle_type):
parking_lot = self.parking_lot
return parking_lot.generate_ticket(vehicle_number, vehicle_type)
|
manofsteel-ab/design-patterns
|
oo_design/parking_lot.py
|
parking_lot.py
|
py
| 11,034
|
python
|
en
|
code
| 0
|
github-code
|
6
|
35326364696
|
import turtle
def draw_flower(animal):
animal.right(20)
animal.forward(60)
animal.right(20)
animal.forward(60)
animal.right(100)
animal.forward(60)
animal.right(20)
animal.forward(60)
def draw_something(animal):
animal.forward(100)
animal.right(150)
animal.forward(200)
animal.right(150)
draw_flower(animal)
# draw_something(animal)
def draw():
window=turtle.Screen()
window.bgcolor("pink")
gato=turtle.Turtle()
perro=turtle.Turtle()
pez=turtle.Turtle()
gato.speed(1000)
perro.speed(1000)
perro.shape("arrow")
gato.shape("arrow")
perro.color("red")
gato.color("blue")
for i in range(1,35):
draw_flower(perro)
draw_something(perro)
perro.right(29)
draw_flower(gato)
draw_something(gato)
gato.right(35)
pez.right(180)
pez.forward(60)
pez.right(270)
pez.forward(400)
window.exitonclick()
draw()
|
RodolfoFerro/muk
|
Lesson03/flower.py
|
flower.py
|
py
| 1,016
|
python
|
en
|
code
| 0
|
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.