blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d6f400c8e900cfa2adabecd58ef963c067396287
|
1abdbf49bf7c75ebf75f6d30e6c04747c84b927d
|
/models/pacient/profession_history.py
|
4619c59e2a40a8c1852547525dc9b78a032afd9a
|
[] |
no_license
|
JoseMOrellana/neuro_app_api
|
160577a0de1efa20934c1ee150d34abb591295ee
|
46797375afc66392452a08f28ee6ebee716d8c14
|
refs/heads/master
| 2022-11-20T01:18:46.880451
| 2020-07-24T13:33:07
| 2020-07-24T13:33:07
| 262,230,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 844
|
py
|
from db import Column, Integer, ForeignKey, String, Date
from models.abstract_models import BaseModel
from .profession_type import ProfessionTypeModel
class ProfessionHistoryModel(BaseModel):
__tablename__ = "profesion"
profession_type_id = Column("tipo_profesiones_id", Integer, ForeignKey(ProfessionTypeModel.id))
pacient_id = Column("pacientes_id", Integer, ForeignKey('pacientes.id'))
start = Column("inicio", Date)
end = Column("fin", Date)
def __init__(self, profession_type_id, pacient_id, start=None, end=None):
self.profession_type_id = profession_type_id
self.pacient_id = pacient_id
self.start = start
self.end = end
@classmethod
def find_by_pacient_id(cls, pacient_id: str) -> "ClinicalStoryModel":
return cls.query.filter_by(pacient_id=pacient_id).all()
|
[
"jose.mom.1304@gmail.com"
] |
jose.mom.1304@gmail.com
|
a57e3e77d893b00ed27294b3bfd85f27a0e37ff0
|
704fda0d0e05f66f0c4f3c17cc4b39e2b0bc6220
|
/homework5/task1.py
|
abca4dc5606ab449987bfe8f42e4a8d55ed56536
|
[] |
no_license
|
humantom88/geekbrains-python-basics
|
71eae1be4f8b1d757db17200d64d2b14ea51d83f
|
ebde8e0a9b92386f2a5b994a880184adde8c7454
|
refs/heads/master
| 2021-05-17T10:48:56.788147
| 2020-04-23T09:51:10
| 2020-04-23T09:51:10
| 250,743,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
# 1. Создать программно файл в текстовом формате,
# записать в него построчно данные, вводимые пользователем.
# Об окончании ввода данных свидетельствует пустая строка.
print('Введите текст: ')
with open('task1.txt', 'w') as f:
is_input_over = False
while not is_input_over:
user_string = input()
if len(user_string) == 0:
is_input_over = True
f.write(user_string + '\n')
|
[
"Belov.A.Andr@sberbank.ru"
] |
Belov.A.Andr@sberbank.ru
|
aca8286bd93b871eb0b572ac027266f46511234a
|
7405aff080f8f9f915c1989dc7eb5a16d0e3c54c
|
/data_viz.py
|
1a9f146a9a1a844f9ab1678c4b80909a02920f11
|
[] |
no_license
|
peter-haferl/reddit_hockey_sentiment_analysis
|
9dc3cbca3e7f05b0550fdcfda7bc30c733e7aa17
|
a341f8cdd623758fa3eedcb8dc9c4a3c3f9b3596
|
refs/heads/master
| 2022-03-30T18:43:45.113783
| 2020-01-30T21:32:30
| 2020-01-30T21:32:30
| 212,584,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,092
|
py
|
'''
Functions for visualizing data for EDA
'''
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from IPython.display import display, Markdown
from sklearn.preprocessing import MinMaxScaler
# game_data = ['num_comments', 'num_fights', 'attendance', 'len_game', 'corsi_for%',
# 'fenwick_for%', 'faceoff%', 'off_zone_start%', 'pdo', 'total_sog',
# 'total_pp_goals', 'total_goals', 'total_pp_opportunity', 'total_sh_goals',
# 'sog_diff', 'pp_goals_diff', 'goal_diff', 'pp_opportunity_diff', 'sh_goals_diff',
# 'sog_norm_diff', 'pp_goals_norm_diff', 'goal_norm_diff',
# 'pp_opportunity_norm_diff','sh_goals_norm_diff', 'ot/so', 'comeback?', 'win/loss']
game_data = ['num_comments', 'num_fights', 'attendance', 'len_game', 'corsi_for%',
'fenwick_for%', 'faceoff%', 'off_zone_start%', 'pdo', 'total_sog',
'total_pp_goals', 'total_goals', 'total_pp_opportunity', 'total_sh_goals',
'sog_diff', 'pp_goals_diff', 'goal_diff', 'pp_opportunity_diff', 'sh_goals_diff',
'ot/so', 'comeback?', 'win/loss']
def plot_team_effect_binary_data(data, statistic=None, game_venue='home'):
'''Plot Means of binary data stats ('win/loss', 'comeback?')
'''
if game_venue == 'home':
home_data = data.groupby(by='home_team').mean()[statistic]
plt.figure(figsize=(25,10))
sns.scatterplot(x='home_team', y=statistic, data=home_data.reset_index(drop=False),
s=400, marker='s', color='darkblue')
plt.hlines(y=home_data.mean(), xmin=-1, xmax=30.4, colors='red',
label='Population Mean', linewidth=2)
elif game_venue == 'away':
away_data = data.groupby(by='away_team').mean()[statistic]
plt.figure(figsize=(25,10))
sns.scatterplot(x='away_team', y=statistic, data=away_data.reset_index(drop=False),
s=400, marker='s', color='darkblue')
plt.hlines(y = away_data.mean(), xmin=-1, xmax=30.4, colors='red',
label='Population Mean', linewidth=2)
elif game_venue == 'all':
home_data = data.groupby(by='home_team').mean()[statistic]
away_data = data.groupby(by='away_team').mean()[statistic]
if statistic == 'win/loss':
away_data = away_data.map(lambda x: 1-x)
all_data = (home_data + away_data)/2
all_data.rename_axis('team', inplace=True)
plt.figure(figsize=(25,10))
sns.scatterplot(x='team', y=statistic, data=all_data.reset_index(drop=False),
s=400, marker='s', color='darkblue')
plt.hlines(y = all_data.mean(), xmin=-1, xmax=30.4, colors='red',
label='Population Mean', linewidth=2)
title_font = {'size':'22',
'color':'black',
'weight':'medium',
'verticalalignment':'bottom'}
axis_font = {'size':'20',
'weight': 'medium'}
tick_font = {'size': '16',
'weight': 'medium'}
statistic = statistic.replace('_', ' ').upper()
plt.title(f'Team Effect on {statistic} in {game_venue.title()} Games', **title_font)
plt.ylabel(f'{statistic}', **axis_font)
plt.xlabel('Team', **axis_font)
plt.xticks(**tick_font)
plt.yticks(**tick_font);
# def plot_team_effect_binary_data(data, statistic=None, game_venue='home'):
# '''Plot Means of binary data stats ('win/loss', 'comeback?')
# '''
# if game_venue == 'home':
# home_data = data.groupby(by='home_team').mean()[statistic]
# plt.figure(figsize=(25,10))
# sns.scatterplot(x='home_team', y=statistic, data=home_data.reset_index(drop=False),
# s=400, marker='s', color='darkblue')
# plt.hlines(y=home_data.mean(), xmin=-1, xmax=30.4, colors='red',
# label='Population Mean', linewidth=2)
# elif game_venue == 'away':
# away_data = data.groupby(by='away_team').mean()[statistic]
# plt.figure(figsize=(25,10))
# sns.scatterplot(x='away_team', y=statistic, data=away_data.reset_index(drop=False),
# s=400, marker='s', color='darkblue')
# plt.hlines(y = away_data.mean(), xmin=-1, xmax=30.4, colors='red',
# label='Population Mean', linewidth=2)
# title_font = {'size':'22',
# 'color':'black',
# 'weight':'medium',
# 'verticalalignment':'bottom'}
# axis_font = {'size':'20',
# 'weight': 'medium'}
# tick_font = {'size': '16',
# 'weight': 'medium'}
# statistic = statistic.replace('_', ' ').upper()
# plt.title(f'Team Effect on {statistic} in {game_venue.title()} Games', **title_font)
# plt.ylabel(f'{statistic}', **axis_font)
# plt.xlabel('Team', **axis_font)
# plt.xticks(**tick_font)
# plt.yticks(**tick_font);
def plot_team_effect(data, statistic=None, game_venue='all', kind='boxplot'):
'''Plot distribution of game statistics in which a given team plays
statistic = string of game statistic (e.g total_goals, sog_diff). Default = None.
game_venue = 'home' or 'away' or 'all'. Selects venue to show game stats from (i.e.
'home' shows game stats for all home games of a given team)
kind = 'boxplot' or 'violinplot' or 'stripplot'. Selects which type of distribution
plot to display.
'''
if game_venue == 'all':
teams = sorted(list(data.home_team.unique()))
team_data = dict()
for team in teams:
one_team_data = data.loc[(data.home_team == team) | (data.away_team == team)].reset_index(drop=True)
team_data[team] = one_team_data[statistic]
team_data = pd.DataFrame(team_data)
if kind == 'boxplot':
plt.figure(figsize=(25,10))
sns.boxplot(data=team_data)
plt.hlines(y=data[statistic].median(), xmin=-1, xmax=30.4, colors='red',
label='Population Median', linewidth=2)
plt.legend(loc=(0,0.95), fontsize=16, edgecolor=None)
elif kind == 'violinplot':
plt.figure(figsize=(25,10))
sns.violinplot(data=team_data)
plt.hlines(y=data[statistic].median(), xmin=-1, xmax=30.4, colors='red',
label='Population Median', linewidth=2)
plt.legend(loc=(0,0.95), fontsize=16, edgecolor=None)
elif kind == 'stripplot':
plt.figure(figsize=(25,10))
sns.stripplot(data=team_data, jitter=0.35)
plt.hlines(y=data[statistic].median(), xmin=-1, xmax=30.4, colors='red',
label='Population Median', linewidth=2)
plt.legend(loc=(0,0.95), fontsize=16, edgecolor=None)
else:
return KeyError('Invalid Plot Kind')
elif game_venue == 'home' or 'away':
if kind == 'boxplot':
plt.figure(figsize=(25,10))
sns.boxplot(x=f'{game_venue}_team', y=statistic,
data=data.sort_values(by=f'{game_venue}_team'))
plt.hlines(y=data[statistic].median(), xmin=-1, xmax=30.4, colors='red',
label='Population Median', linewidth=2)
plt.legend(loc=(0,0.95), fontsize=16, edgecolor=None)
elif kind == 'violinplot':
plt.figure(figsize=(25,10))
sns.violinplot(x=f'{game_venue}_team', y=statistic,
data=data.sort_values(by=f'{game_venue}_team'))
plt.hlines(y=data[statistic].mean(), xmin=-1, xmax=30.4, colors='red',
label='Population Median', linewidth=2)
plt.legend(loc=(0,0.95), fontsize=16, edgecolor=None)
elif kind == 'stripplot':
plt.figure(figsize=(25,10))
sns.stripplot(x=f'{game_venue}_team', y=statistic, jitter=0.35,
data=data.sort_values(by=f'{game_venue}_team'))
plt.hlines(y=data[statistic].median(), xmin=-1, xmax=30.4, colors='red',
label='Population Median', linewidth=2)
plt.legend(loc=(0,0.95), fontsize=16, edgecolor=None)
else:
return KeyError('Invalid Plot Kind')
else:
return KeyError('Invalid game_venue')
title_font = {'size':'22',
'color':'black',
'weight':'medium',
'verticalalignment':'bottom'}
axis_font = {'size':'20',
'weight': 'medium'}
tick_font = {'size': '16',
'weight': 'medium'}
sns.despine(offset=0, trim=True)
statistic = statistic.replace('_', ' ').upper()
plt.title(f'Team Effect on {statistic} in {game_venue.title()} Games', **title_font)
plt.ylabel(f'{statistic}', **axis_font, labelpad=5)
plt.xlabel('Team', **axis_font, labelpad=5)
plt.xticks(**tick_font)
plt.yticks(**tick_font);
def plot_team_specific_stats(data, statistic=None, against=False):
'''Plot distribution of team-specific statistics
statistic = string of statistic that is available for both home and away (e.g corsi,
goals, sog, etc). Default = None.
against = Boolean. False will show distribution of statistic for a given team, True will
show distribution of statistic against a given team. Default = False.
'''
teams = list(data.home_team.unique())
team_data = dict()
for team in teams:
if against == False:
home_statistic = data.loc[(data.home_team == team)]['home_' + statistic]
away_statistic = data.loc[(data.away_team == team)]['away_' + statistic]
elif against == True:
home_statistic = data.loc[(data.home_team == team)]['away_' + statistic]
away_statistic = data.loc[(data.away_team == team)]['home_' + statistic]
statistic_data = pd.concat([home_statistic, away_statistic], ignore_index=True)
team_data[team] = statistic_data
team_data = pd.DataFrame(team_data)
plt.figure(figsize=(25,10))
sns.boxplot(data=team_data)
population_stat_data = (list(data['home_' + statistic]) +
(list(data['away_' + statistic])))
plt.hlines(y=np.median(population_stat_data), xmin=-1, xmax=30.4,
colors='red', label='Population Median', linewidth=2)
plt.legend(loc=(0,0.95), fontsize=16, edgecolor=None)
title_font = {'size':'22',
'color':'black',
'weight':'medium',
'verticalalignment':'bottom'}
axis_font = {'size':'20', 'weight': 'medium'}
tick_font = {'size': '16', 'weight': 'medium'}
sns.despine(offset=0, trim=True)
if against == False:
statistic = statistic + '_for'
elif against == True:
statistic = statistic + '_against'
statistic = statistic.replace('_', ' ').upper()
plt.title(f'Team Effect on {statistic} in Games', **title_font)
plt.ylabel(f'{statistic}', **axis_font, labelpad=5)
plt.xlabel('Team', **axis_font, labelpad=5)
plt.xticks(**tick_font)
plt.yticks(**tick_font);
def plot_numeric_features_dist(data, kind='boxplot', size=(12,8), normalize=True):
'''
Plot distributions of numeric features within a DataFrame
size = tuple with desired size of output figure (width, height)
normalize = Boolean (Default = True). If True, numerical data is normalized using
sklearn.preprocessing.MinMaxScaler(). If False, numerical data is not altered.
'''
columns = []
for column in data.columns:
if (data[column].dtype != 'O'):
columns.append(column)
if normalize == True:
scaler = MinMaxScaler()
scaled_data = scaler.fit_transform(data[columns])
data = pd.DataFrame(scaled_data, columns=columns)
if kind == 'boxplot':
plt.figure(figsize=(size))
sns.boxplot(data=data[columns])
elif kind == 'violinplot':
plt.figure(figsize=(size))
sns.violinplot(data=data[columns], scale='count', cut=0)
elif kind == 'stripplot':
plt.figure(figsize=(size))
sns.stripplot(data=data[columns], jitter=0.25)
title_font = {'size':'18',
'color':'black',
'weight':'medium',
'verticalalignment':'bottom'}
tick_font = {'size': '12',
'weight': 'medium'}
if normalize == True:
plt.title(f'Normalized Distribution of Numeric Features', **title_font)
elif normalize == False:
plt.title(f'Distribution of Numeric Features', **title_font)
formatted_x_ticks = [x.replace('_', ' ').replace(' polarity', '').title() for x in columns]
plt.yticks(**tick_font)
plt.xticks(ticks=plt.xticks()[0], labels=formatted_x_ticks, rotation=60, ha='right', **tick_font);
def plot_game_binary_data(data, size=(16, 5)):
'''
Plot distributions of binary/ordinal game data
'''
title_font = {'size':'16',
'color':'black',
'weight':'medium',
'verticalalignment':'bottom'}
axis_font = {'size':'14', 'weight': 'medium'}
tick_font = {'size': '16', 'weight': 'medium'}
fig, axs = plt.subplots(1, len(data.columns), figsize=size)
for i, column in enumerate(data.columns):
axs[i].hist(data[column])
column = column.upper()
axs[i].set_title(column, **title_font)
axs[0].set_ylabel('Frequency', **axis_font);
def plot_team_effect_comment_fandom(comment_data, size=(12,6)):
'''
Plots percentage of comments from a team fanbase that are in game threads that their respective team played in
comment_data = output of clean_data.extract_comment_data()
'''
nhl_abbreviations = ['CBJ', 'TBL', 'MTL', 'FLA', 'NSH', 'SJS', 'LAK', 'BOS', 'COL', 'OTT', 'DAL', 'CGY', 'MIN',
'NYI', 'NYR', 'BUF', 'WSH', 'PIT', 'WPG', 'VAN', 'PHI', 'CHI', 'CAR', 'ANA', 'EDM', 'STL',
'DET', 'TOR', 'NJD', 'ARI', 'VGK']
title_font = {'size':'20',
'color':'black',
'weight':'medium',
'verticalalignment':'bottom'}
axis_font = {'size':'16', 'weight': 'medium'}
tick_font = {'size': '14', 'weight': 'medium'}
plt.figure(figsize=size)
comment_data.groupby('flair').mean().loc[nhl_abbreviations]['fan'].plot(kind='bar')
plt.hlines(y=np.mean(comment_data.groupby('flair').mean().loc[nhl_abbreviations]['fan']), xmin=0, xmax=31, color='red')
plt.title(label='Percentage of Fan Comments by Team', **title_font)
plt.xlabel('Team', **axis_font)
plt.ylabel('Percetage of Fan Comments', **axis_font)
plt.xticks(**tick_font)
plt.yticks(**tick_font);
def plot_scoring_dist(comment_data, scoring=None, size=(10, 6)):
'''
Plot distributions of binary/ordinal game data
'''
perc_neutral = np.round(len(comment_data.loc[comment_data[scoring] == 0])/len(comment_data),
decimals=3)
title_font = {'size':'16',
'color':'black',
'weight':'medium',
'verticalalignment':'bottom'}
axis_font = {'size':'14', 'weight': 'medium'}
tick_font = {'size': '12', 'weight': 'medium'}
plt.figure(figsize=size)
comment_data[scoring].plot(kind='hist', bins=100)
plt.ylabel('Frequency', **axis_font)
scoring = scoring.replace('_', ' ').title()
plt.xlabel(f'{scoring}', **axis_font)
plt.xticks(**tick_font)
plt.yticks(**tick_font)
plt.show();
print(f'Percentage of neutral comments: {perc_neutral}')
def plot_nlp_heatmap(thread_data, size=(14,17)):
'''
Plot heatmap showing correlation between sentiment scores and game features
'''
nlp_data = ['fan%', 'vader_comment',
'socialsent_comment', 'vader_context',
'socialsent_context', 'positive_context', 'non-fan_context',
'no_ref_context']
corr_data = thread_data[game_data + nlp_data].corr().applymap(lambda x: np.round(x, 2))
corr_data = corr_data.drop(index=nlp_data)[nlp_data]
color_bar = {
'shrink': 0.5,
'pad': 0.01
}
title_font = {'size':'20',
'color':'black',
'weight':'medium',
'verticalalignment':'bottom'}
axis_font = {'size':'20',
'weight': 'medium'}
tick_font = {'size': '12',
'weight': 'medium'}
plt.figure(figsize=size)
sns.heatmap(corr_data, annot=True, linewidths=0.1,
square=False, cbar=True, cbar_kws=color_bar,
vmin=-1, vmax=1, cmap="RdBu_r");
plt.xticks(rotation=30, ha='right', **tick_font)
plt.yticks(rotation=0, ha='right', **tick_font)
plt.title('Correlation of Game Features with Sentiment Scores', **title_font);
def plot_game_feature_heatmap(total_data):
'''
Plot showing correlation between game features
'''
corr_data = total_data[game_data].corr().applymap(lambda x: np.round(x, 2))
mask = np.zeros_like(corr_data)
mask[np.triu_indices_from(mask)] = True
color_bar = {
'shrink': 0.5,
'pad': 0.01
}
title_font = {'size':'20',
'color':'black',
'weight':'medium',
'verticalalignment':'bottom'}
axis_font = {'size':'20',
'weight': 'medium'}
tick_font = {'size': '12',
'weight': 'medium'}
plt.figure(figsize=(17,17))
sns.heatmap(corr_data, annot=True, linewidths=0.1,
square=False, cbar=True, cbar_kws=color_bar,
vmin=-1, vmax=1, cmap="RdBu_r", mask=mask);
plt.xticks(rotation=90, ha='right', **tick_font)
plt.yticks(rotation=0, ha='right', **tick_font)
plt.title('Correlation of Game Features', **title_font);
|
[
"peter.haferl@gmail.com"
] |
peter.haferl@gmail.com
|
310e69fe2834a6bb85c8c684f7ddfd3ff3c07bea
|
ccd2851c8eec6c5b9175afc847bf300fe6a0d3d0
|
/ninfo_plugins_splunk_bro/bro_user_agents/bro_user_agents_plugin.py
|
b02bf1548eb075314bdd7e21ad2a9408af32e0e1
|
[] |
no_license
|
JustinAzoff/ninfo_plugins_splunk_bro
|
fdeb6eda7fd7af4aa8208481aef2547af5461d86
|
526f6f3541ac87165f8c5d0c97019247569d03ae
|
refs/heads/master
| 2020-09-12T20:33:00.326776
| 2014-02-18T14:03:49
| 2014-02-18T14:03:49
| 14,694,629
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
from ninfo.helpers.splunk import SplunkBase
class useragents(SplunkBase):
"""This plugin returns recently seen user-agents as detected by Bro for this IP"""
name = "bro_user_agents"
title = "Recent User Agents"
description = "Recently seen HTTP User Agents"
types = ['ip','ip6']
TEMPLATE = "hoursago=48 source=*software.log* %s | dedup unparsed_version | fields unparsed_version"
plugin_class = useragents
|
[
"justin.azoff@gmail.com"
] |
justin.azoff@gmail.com
|
eecdec997a08d29cce86c2862764075dd2b29b75
|
591ee12e20e0ce1cdda1e33441d55ba6fce27566
|
/fermat.py
|
4c0d0614ad2bb64aa93255b13ec61f3a2b89c96f
|
[
"MIT"
] |
permissive
|
Seaoftrees08/project479_python
|
1fa226f316c22208255aa75a4af297347d7ec9e7
|
464c246644e67bccb78b9ca25c02f3f98437ee4a
|
refs/heads/master
| 2020-05-23T11:09:30.258286
| 2019-05-15T03:35:34
| 2019-05-15T03:35:34
| 186,574,503
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
import time
import time
import sqrtlib
#main 250,000 trials/sec
try:
n = int(input(">> "))
y = 1
start = time.time()
if n%2!=0:
x = sqrtlib.sqrtInt(n)+1
while not sqrtlib.isSqr(x*x-n):
x += 1
y = sqrtlib.sqrtInt(x*x - n)
print("result: " + str(x-y) + " " + str(x+y))
else:
print("result: 2 " + str(n>>1))
print("time: " + str(time.time() - start))
print("Number of trials: " + str(y))
except ValueError:
print("You can input only number.")
|
[
"tasuzuya@gmail.com"
] |
tasuzuya@gmail.com
|
6eddd42a5519b03d2935288ced48109a65ebb949
|
e4868d19343a26e17896aa30246741d6e9b02797
|
/receive.py
|
93b67f5339b5b9ade23122941679b43734111d21
|
[] |
no_license
|
georgeredinger/WiFly_SPI_UDP
|
68fd35fa549e494d6562022ff388491332a027a3
|
9b9c180968fbc956e39e971158254d2de4548dc3
|
refs/heads/master
| 2020-06-26T03:20:58.175217
| 2013-05-23T14:22:46
| 2013-05-23T14:22:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
import select, socket
#port = 5005 # where do you expect to get a msg?
port = 55555 # where do you expect to get a msg?
bufferSize = 1024 # whatever you need
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('<broadcast>', port))
s.setblocking(0)
while True:
result = select.select([s],[],[])
msg = result[0][0].recv(bufferSize)
print msg
|
[
"george@georgeredinger.com"
] |
george@georgeredinger.com
|
baf42a120fbd7ad0269031a40b68a46cc11a055a
|
c6391327d2876f02c5277d9d058a11086f7dcf3d
|
/server/src/objects/__init__.py
|
9161de3c0b3471758ff087fd16cfbe093a7d573c
|
[] |
no_license
|
strands-project/qrobot
|
680b8bb74587408aeb68500c997edd639a1bde67
|
59f7e96211ea7328ba334da261f0e62bfe8f1cc0
|
refs/heads/master
| 2021-03-29T18:23:12.280196
| 2016-07-01T13:50:23
| 2016-07-01T13:50:23
| 59,102,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
# from flask_classful import FlaskView
# from app.objects.models import Object
# from app.helpers import send_data, jsonify
# class ObjectsView(FlaskView):
# trailing_slash = False
# def index(self):
# return jsonify(Object.objects.exclude('pcd'))
# def pcd(self, id):
# obj = Object.get_by_id(id)
# if obj:
# return send_data(obj.pcd, 'application/pcd')
# return 'Not found', 404
|
[
"alexandrov88@gmail.com"
] |
alexandrov88@gmail.com
|
509fee19ac22160dda30942e1f3bc043af305a98
|
7584585becdb21bd13a64340fe484e9ff09b2661
|
/another_project.py
|
131ed5859423001ee7469c86014daacff81e8ea5
|
[] |
no_license
|
LahavNanovel/hello-world-usage
|
cc55fa4e5c79a6aeed18af5ca95b8c06c91431b0
|
f0bb8124e021d133c7454f673f868bbb5e8e27dd
|
refs/heads/master
| 2023-03-09T11:57:21.252352
| 2021-02-22T12:42:44
| 2021-02-22T12:42:44
| 341,181,686
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
from helloworld.main import print_hi
print_hi()
|
[
"lahav@nanovel.co.il"
] |
lahav@nanovel.co.il
|
5033788886ca0a031be15dcc3ead9afa0c97c340
|
22e37de74bbd859791dccb147284527ebd6674e3
|
/image_handler.py
|
05ccb9bc709d2e4bbcbc61b4e59867ebdab9528d
|
[] |
no_license
|
fnrcum/pjing_client
|
719fd997beafb65d4490032a414124dad093c19f
|
b0dc51d7dd8da969544844b3c19773a816aff600
|
refs/heads/master
| 2020-07-26T12:30:06.307678
| 2016-11-15T12:40:58
| 2016-11-15T12:40:58
| 73,721,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,959
|
py
|
import pygame
class ImageHandler:
def displayImage(self, screen, px, topleft, prior):
# ensure that the rect always has positive width, height
x, y = topleft
width = pygame.mouse.get_pos()[0] - topleft[0]
height = pygame.mouse.get_pos()[1] - topleft[1]
if width < 0:
x += width
width = abs(width)
if height < 0:
y += height
height = abs(height)
# eliminate redundant drawing cycles (when mouse isn't moving)
current = x, y, width, height
if not (width and height):
return current
if current == prior:
return current
# draw transparent box and blit it onto canvas
screen.blit(px, px.get_rect())
im = pygame.Surface((width, height))
im.fill((128, 128, 128))
pygame.draw.rect(im, (32, 32, 32), im.get_rect(), 1)
im.set_alpha(128)
screen.blit(im, (x, y))
pygame.display.flip()
# return current box extents
return x, y, width, height
def setup(self, path):
px = pygame.image.load(path)
rect = px.get_rect()
wg = rect[2]
hg = rect[3]
pygame.font.SysFont("Arial", 16)
screen = pygame.display.set_mode([wg, hg], pygame.FULLSCREEN)
screen.blit(px, px.get_rect())
pygame.display.flip()
return screen, px
def mainLoop(self, screen, px):
topleft = bottomright = prior = None
end = False
while not end:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
if not topleft:
topleft = event.pos
else:
bottomright = event.pos
end = True
if topleft:
prior = self.displayImage(screen, px, topleft, prior)
return topleft + bottomright
|
[
"nicolae.farcas@softvision.ro"
] |
nicolae.farcas@softvision.ro
|
7cad7992eeac85db66f3ce5104c3ea5578c661fc
|
892ff205c217f3ff0a0e3aef17aa037828b0383a
|
/IMDB Scraper/updateDB.py
|
e4b679138418d99bb44767cba1564febfab37519
|
[
"MIT"
] |
permissive
|
MTashreeqWaggie/Video-Store-Management-System
|
f73cce2a268344511735d73ddb8fdb9da98db8cb
|
72ccccd9541780d0a416b73a65ad0158be58bd32
|
refs/heads/main
| 2023-04-29T01:42:33.834367
| 2021-05-23T19:24:14
| 2021-05-23T19:24:14
| 283,135,666
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
from store.models import Product
Product.objects.update(poster = "")
f=open("forDownloads.tsv", "r")
movies = f.read().splitlines()
for i in range(0, len(movies)):
if(i == 0):
continue
lst = movies[i].split("\t")
id = lst[0]
relativePath = "/store/static/images/"+id+".jpg" # dean to explain this part
print(relativePath)
p = Product.objects.get(ID = id)
print(p)
p.poster = relativePath
p.save()
|
[
"TashreeqWaggiie@gmail.com"
] |
TashreeqWaggiie@gmail.com
|
4c795c026fff52696fabaad9bcac64c4f44ed866
|
2ad59884af3eef1d1e691ce3619627d2770f3788
|
/website/theses/migrations/0009_auto_20170805_0851.py
|
e3adc63aaab0bb7a4b77e7ca88d4ce4f3f723b39
|
[] |
no_license
|
ea-czech-republic/efektivnialtruismus.cz
|
ee9058021bc86cda2ab52c7988f9c85e9605865b
|
77e142e0689eab491493109c804c3bc9c0bf6d58
|
refs/heads/master
| 2022-03-22T21:47:53.383690
| 2022-02-22T13:02:02
| 2022-02-22T13:02:02
| 72,468,463
| 14
| 20
| null | 2022-01-24T22:35:25
| 2016-10-31T18:57:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-08-05 08:51
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('theses', '0008_auto_20170717_2010'),
]
operations = [
migrations.AddField(
model_name='thesisindexpage',
name='our_topics',
field=wagtail.core.fields.StreamField((('rawHtml', wagtail.core.blocks.RawHTMLBlock()), ('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock())), default=None),
preserve_default=False,
),
migrations.AddField(
model_name='thesisindexpage',
name='process',
field=wagtail.core.fields.StreamField((('rawHtml', wagtail.core.blocks.RawHTMLBlock()), ('heading', wagtail.core.blocks.CharBlock(classname='full title')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('embed', wagtail.embeds.blocks.EmbedBlock())), default=None),
preserve_default=False,
),
]
|
[
"hnykda@gmail.com"
] |
hnykda@gmail.com
|
b1480d429e722377d42d9397f4d3dd57384079cc
|
0add471d82edab23894421dc17429da39b92cc73
|
/heaviside/ast.py
|
db8d24717141b5929f91b3a0e6c2f4d5e1c6f583
|
[
"Apache-2.0"
] |
permissive
|
MHova/heaviside
|
fac09ae7608306665ee01a46baa2197dc81d649d
|
9ee9e69343c58124b8c7a119888a195794978cd6
|
refs/heads/master
| 2020-05-30T10:08:11.110234
| 2019-05-30T20:16:52
| 2019-05-30T20:16:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,872
|
py
|
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from collections import OrderedDict
from funcparserlib.parser import (some, a, skip)
from .lexer import Token
from .exceptions import CompileError
from .utils import isstr
#################################################
#### Load AWS Service Integration Definitions####
#################################################
import os
import json
cur_dir = os.path.dirname(__file__)
definitions = os.path.join(cur_dir, 'aws_services.json')
with open(definitions, 'r') as fh:
AWS_SERVICES = json.load(fh)
#################################################
#################################################
# AST Objects
class ASTNode(object):
def __init__(self, token=None):
if token is not None:
self.token = token
else:
self.token = None
@property
def lineno(self):
return self.token.start[0] if self.token else 0
@property
def pos(self):
return self.token.start[1] if self.token else 0
@property
def line(self):
return self.token.line if self.token else ''
def raise_error(self, msg):
raise CompileError(self.lineno,
self.pos,
self.line,
msg)
class ASTValue(ASTNode):
def __init__(self, value, token):
super(ASTValue, self).__init__(token)
self.value = value
def __repr__(self):
return "ASTValue({!r})".format(self.value)
class ASTCompOp(ASTNode):
def __init__(self, var, op, val):
# Use the first token of the expression
super(ASTCompOp, self).__init__(var.token)
self.var = var
self.op = op
self.val = val
def __repr__(self):
return "ASTCompOp({!r} {!r} {!r})".format(self.var, self.op, self.val)
class ASTCompNot(ASTNode):
def __init__(self, not_, comp):
super(ASTCompNot, self).__init__(not_.token)
self.comp = comp
def __repr__(self):
return "ASTCompNot({!r})".format(self.comp)
class ASTCompAndOr(ASTNode):
op = None
def __init__(self, comp, comps):
super(ASTCompAndOr, self).__init__(comp.token)
self.comps = [comp]
for c in comps:
self.comps.append(c)
def __repr__(self):
return "ASTComp{}({!r})".format(self.op, self.comps)
class ASTCompAnd(ASTCompAndOr):
op = 'And'
class ASTCompOr(ASTCompAndOr):
op = 'Or'
class ASTModKV(ASTValue):
def __init__(self, key, value):
super(ASTModKV, self).__init__(value, key.token)
def __repr__(self):
return "<ASTModKV {}:{}>".format(self.name, self.value)
class ASTModNext(ASTModKV):
name = 'Next'
class ASTModTimeout(ASTModKV):
name = 'Timeout'
class ASTModHeartbeat(ASTModKV):
name = 'Heartbeat'
class ASTModInput(ASTModKV):
name = 'Input'
class ASTModResult(ASTModKV):
name = 'Result'
class ASTModOutput(ASTModKV):
name = 'Output'
class ASTModData(ASTModKV):
name = 'Data'
class ASTModParameters(OrderedDict, ASTNode):
name = 'Parameters'
# NOTE: kpv stands for (key, path marker, value)
# where `path marker` is the token for the `$` that denotes is the
# value contains a JsonPath
def __init__(self, parameters, kpv, kpvs):
OrderedDict.__init__(self)
ASTNode.__init__(self, parameters.token)
self.add_parameter(kpv)
for kpv in kpvs:
self.add_parameter(kpv)
def add_parameter(self, kpv):
k,p,v = kpv
if p is not None:
k.value += '.$' # Parameters that use a JsonPath must have the key
# end with `.$`
self[k] = v
class ASTModRetry(ASTNode):
name = 'Retry'
def __init__(self, retry, errors, interval, max, backoff):
super(ASTModRetry, self).__init__(retry.token)
self.errors = errors
self.interval = interval
self.max = max
self.backoff = backoff
class ASTModCatch(ASTNode):
name = 'Catch'
def __init__(self, catch, errors, path, block):
super(ASTModCatch, self).__init__(catch.token)
self.errors = errors
self.path = path
self.block = block
class ASTModifiers(ASTNode): #??? Subclass dict as well?
def __init__(self, mod, mods):
super(ASTModifiers, self).__init__(mod.token)
self.mods = {}
self.add(mod)
for m in mods:
self.add(m)
def add(self, mod):
t = type(mod)
if t not in self.mods:
self.mods[t] = []
self.mods[t].append(mod)
def update(self, other):
for key in other.mods.keys():
if key not in self.mods:
self.mods[key] = []
self.mods[key].extend(other.mods[key])
def __repr__(self):
return "\n".join(repr(mod) for mod in self.mods.values())
class ASTState(ASTNode):
state_type = ''
valid_modifiers = []
multi_modifiers = [ASTModRetry, ASTModCatch]
def __init__(self, state, block):
super(ASTState, self).__init__(state.token)
self.next = None
self.end = False
if block:
comment, modifiers = block
else:
comment = None
modifiers = None
if comment:
tmp = comment.value.split('\n', 1)
if len(tmp) == 1:
self.name = tmp[0].strip()
self.comment = None
else:
name, comment = tmp
self.name = name.strip()
self.comment = '\n'.join([l.strip() for l in comment.split('\n')])
else:
self.name = 'Line{}'.format(self.lineno)
self.comment = None
def get(type_):
if modifiers is None:
return None
vals = modifiers.mods.get(type_)
if vals is None:
return None
del modifiers.mods[type_]
name = type_.name if hasattr(type_, 'name') else str(type_.__name__)
if type_ not in self.valid_modifiers:
vals[0].raise_error("{} state cannot contain a {} modifier".format(self.state_type, name))
if type_ not in self.multi_modifiers:
if len(vals) > 1:
vals[1].raise_error("{} state can only contain one {} modifier".format(self.state_type, name))
vals = vals[0]
return vals
self.next = get(ASTModNext)
self.timeout = get(ASTModTimeout)
self.heartbeat = get(ASTModHeartbeat)
self.input = get(ASTModInput)
self.result = get(ASTModResult)
self.output = get(ASTModOutput)
self.data = get(ASTModData)
self.parameters = get(ASTModParameters)
self.retry = get(ASTModRetry)
self.catch = get(ASTModCatch)
if modifiers is not None and len(modifiers.mods) > 0:
type_ = list(modifiers.mods.keys())[0]
modifiers.mods[type_][0].raise_error("Unknown state modifer '{}'".format(type_))
def __repr__(self):
return "<ASTState {}:{}>".format(self.state_type, self.name)
class ASTStatePass(ASTState):
state_type = 'Pass'
valid_modifiers = [ASTModInput, ASTModResult, ASTModOutput, ASTModData]
class ASTStateGoto(ASTStatePass):
"""Custom version of Pass that exposes / sets the 'next' modifier"""
valid_modifiers = [ASTModNext]
def __init__(self, state, label):
# Create the state_modifer block manually
block = (None,
ASTModifiers(
ASTModNext(label, label.value),
[]
)
)
super(ASTStateGoto, self).__init__(state, block)
class ASTStateSuccess(ASTState):
state_type = 'Succeed'
valid_modifiers = [ASTModInput, ASTModOutput]
class ASTStateFail(ASTState):
state_type = 'Fail'
def __init__(self, state, error, cause, block):
super(ASTStateFail, self).__init__(state, block)
self.error = error
self.cause = cause
class ASTStateTask(ASTState):
state_type = 'Task'
valid_modifiers = [ASTModTimeout,
ASTModHeartbeat,
ASTModInput,
ASTModResult,
ASTModOutput,
ASTModParameters,
ASTModRetry,
ASTModCatch]
valid_services = ['Arn',
'Lambda',
'Activity']
def __init__(self, service, function, name, block):
super(ASTStateTask, self).__init__(service, block)
if service.value not in self.valid_services and \
service.value not in AWS_SERVICES.keys():
service.raise_error('Invalid Task service')
if function is None:
if service.value in ('Lambda', 'Activity', 'Arn'):
if name is None:
service.raise_error('{} task requires a function name argument'.format(service.value))
function = name
name = None
else:
service.raise_error('{} task requires a function to call'.format(service.value))
else:
if service.value in ('Lambda', 'Activity', 'Arn'):
function.raise_error('Unexpected function name')
else:
try:
function.lookup = function.value # Save value for looking up when checking kwargs
function.value = AWS_SERVICES[service.value][function.value]['name']
except KeyError:
function.raise_error('Invalid Task function')
if name is not None:
name.raise_error('Unexpected argument')
if service.value == 'Arn' and not function.value.startswith('arn:aws:'):
function.raise_error("ARN must start with 'arn:aws:'")
if service.value in ('Lambda', 'Activity') and self.parameters is not None:
tuple(self.parameters.keys())[0].raise_error('Unexpected keyword argument')
if service.value not in ('Lambda', 'Activity', 'Arn'):
required = AWS_SERVICES[service.value][function.lookup]['required_keys']
required = copy.copy(required) # will be mutating to determine missing required arguments
optional = AWS_SERVICES[service.value][function.lookup]['optional_keys']
sync = AWS_SERVICES[service.value][function.lookup]['sync']
if self.parameters:
# self.parameters can be None either if no `parameters:` block is provided
# or if there is a syntax error in the `parameters:` block
for key in self.parameters.keys():
k = key.value
if k.endswith('.$'):
k = k[:-2] # remove the `.$`, which donates that the key uses a JsonPath
if k == 'sync':
sync = self.parameters[key]
if type(sync) != bool:
key.raise_error("Synchronous value must be a boolean")
del self.parameters[key]
elif k in required:
required.remove(k)
elif k not in optional:
key.raise_error("Invalid keyword argument")
if sync == True:
function.value += '.sync'
if len(required) > 0:
missing = ", ".join(required)
function.raise_error("Missing required keyword arguments: {}".format(missing))
self.service = service
self.function = function
class ASTStateWait(ASTState):
state_type = 'Wait'
valid_modifiers = [ASTModInput, ASTModOutput]
def __init__(self, state, wait_type, wait_val, block):
super(ASTStateWait, self).__init__(state, block)
self.type = wait_type
self.val = wait_val
class ASTStateChoice(ASTState):
state_type = 'Choice'
valid_modifiers = [ASTModInput, ASTModOutput]
DEFAULT = None
def __init__(self, state, comment, transform):
super(ASTStateChoice, self).__init__(state, (comment, transform))
# Use an OrderedDict so that logic comparisons happen in the
# same order as in the source file
self.branches = OrderedDict()
# DP ???: Should ASTStateChoice subclasses override the state_type value?
class ASTStateWhile(ASTStateChoice):
def __init__(self, state, comp, block, transform):
comment, states = block
super(ASTStateWhile, self).__init__(state, comment, transform)
self.branches[comp] = states
class ASTStateIfElse(ASTStateChoice):
def __init__(self, state, comp, block, elif_, else_, transform):
comment, states = block
super(ASTStateIfElse, self).__init__(state, comment, transform)
self.branches[comp] = states
if elif_ is not None:
for comp_, states_ in elif_:
self.branches[comp_] = states_
if else_ is not None:
self.branches[ASTStateChoice.DEFAULT] = else_
class ASTStateSwitch(ASTStateChoice):
def __init__(self, state, var, comment, cases, default, transform):
super(ASTStateSwitch, self).__init__(state, comment, transform)
class EQToken(object):
def __init__(self):
self.value = '=='
eq = EQToken()
for case, val, states in cases:
comp = ASTCompOp(var, eq, val)
self.branches[comp] = states
if default is not None:
default, states = default
self.branches[ASTStateChoice.DEFAULT] = states
class ASTParallelBranch(ASTNode):
def __init__(self, parallel, states):
super(ASTParallelBranch, self).__init__(parallel.token)
self.states = states
class ASTStateParallel(ASTState):
state_type = 'Parallel'
valid_modifiers = [ASTModInput,
ASTModResult,
ASTModOutput,
ASTModRetry,
ASTModCatch]
def __init__(self, state, block, parallels, transform, error):
comment, states = block
if transform is not None and error is not None:
transform.update(error)
elif transform is None and error is not None:
transform = error
super(ASTStateParallel, self).__init__(state, (comment, transform))
self.branches = [ASTParallelBranch(state, states)]
if parallels is not None:
for parallel, states_ in parallels:
self.branches.append(ASTParallelBranch(parallel, states_))
class ASTModVersion(ASTModKV):
pass
class ASTStepFunction(ASTNode):
def __init__(self, comment, version, timeout, states):
super(ASTStepFunction, self).__init__() # ???: use the first states's token?
self.comment = comment
self.version = version
self.timeout = timeout
self.states = states
def __repr__(self):
return "\n".join(repr(state) for state in self.states)
##############################
# AST Modification Functions #
TERMINAL_STATES = (
ASTStateSuccess,
ASTStateFail
)
def link_branch(branch):
"""Helper method for link() that reassigns the results to the given branch"""
if hasattr(branch, 'states'):
branch.states = link(branch.states)
else:
branch.raise_error("Trying to link non-branch state")
return branch
def link(states, final=None):
"""AST Transform function that links together the states of a branch
Performs the following actions:
* Sets the next / end attributes for all encountered ASTState objects
* If the final state is a ASTStateChoice and there is no default state
one is created, as you cannot terminate on a Choice state
* Makes the ASTStateWhile into a full loop
* If there is a Catch modifier or the state is a Choice state the sub-states
for each are recursivly linked and pulled up to the current level
* The branches for all Parallel states are recursivly linked
Args:
states (list) : List of ASTState objects
final (String) : Name of the next state to link the final state to
or None to have the final state terminate
Returns:
list : List of ASTState objects with end/next set
Note: This is a different list than the input list
"""
linked = []
total = len(states)
for i in range(total):
state = states[i]
linked.append(state)
next_ = states[i+1].name if i+1 < total else final
if state.next is not None:
pass # State has already been linked
elif isinstance(state, TERMINAL_STATES):
pass
elif isinstance(state, ASTStateChoice):
if ASTStateChoice.DEFAULT not in state.branches:
next__ = next_ # prevent branches from using the new end state
if next__ is None:
# Choice cannot be terminal state, add a Success state to
# terminate on
next__ = ASTStateSuccess(state, None)
next__.name = state.name + "Default"
linked.append(next__)
next__ = next__.name
state.branches[ASTStateChoice.DEFAULT] = next__
# Point the last state of the loop to the conditional, completing the loop construct
if isinstance(state, ASTStateWhile):
key = list(state.branches.keys())[0]
state_ = state.branches[key][-1]
if not isinstance(state_, TERMINAL_STATES):
state_ = ASTStatePass(state, None)
state_.name = state.name + "Loop"
state_.next = state.name
state.branches[key].append(state_)
else:
state.end = next_ is None
state.next = next_
if state.catch is not None:
for catch in state.catch:
states_ = catch.block
linked_ = link(states_, final=next_)
catch.next = linked_[0].name
linked.extend(linked_)
# Different states use the branches variable in different ways
if isinstance(state, ASTStateChoice):
for key in state.branches:
states_ = state.branches[key]
if isstr(states_):
continue # already linked
linked_ = link(state.branches[key], final=next_)
# convert the branch from a list of states to the name of the next state
# this is done because the branch states are moved to the appropriate
# location for the step function
state.branches[key] = linked_[0].name
linked.extend(linked_)
elif isinstance(state, ASTStateParallel):
for branch in state.branches:
link_branch(branch)
return linked
MAX_NAME_LENGTH = 128
def check_names(branch):
"""Recursivly checks all state names to ensure they are valid
Checks performed:
* Name is not greater than 128 characters
* No duplicate state names
Args:
branch (list): List of ASTState objects
Raises:
CompileError : If any of the checks fail
"""
if not hasattr(branch, 'states'):
branch.raise_error("Trying to check names for non-branch state")
to_process = [branch.states]
while len(to_process) > 0:
states = to_process.pop(0)
names = set() # State names are unique to the branch
for state in states:
if len(state.name) > MAX_NAME_LENGTH:
state.raise_error("Name exceedes {} characters".format(MAX_NAME_LENGTH))
if state.name in names:
state.raise_error("Duplicate state name '{}'".format(state.name))
names.add(state.name)
if isinstance(state, ASTStateParallel):
for branch in state.branches:
to_process.append(branch.states)
def resolve_arns(branch, region = '', account_id = ''):
"""AST Transform that sets the `arn` attribute for ASTStateTasks
Args:
branch (list): List of ASTState objects
region (str): AWS Region where the Lambdas / Activities reside
account_id (str): AWS Account ID where the Lambdas / Activities reside
"""
if not hasattr(branch, 'states'):
branch.raise_error("Trying to resolve arns for non-branch state")
for state in branch.states:
if isinstance(state, ASTStateTask):
if state.service.value == 'Arn':
# ARN value already checked for 'arn:aws:' prefix in ASTStateTask constructor
state.arn = state.function.value
else:
# arn:partition:service:region:account:task_type:name
if state.service.value == 'Lambda':
service = 'lambda'
task_type = 'function'
else:
service = 'states'
task_type = state.service.value.lower()
if state.service.value not in ('Lambda', 'Activity'):
region = ''
account_id = ''
parts = ['arn', 'aws',
service,
region,
account_id,
task_type,
state.function.value]
state.arn = ":".join(parts)
elif isinstance(state, ASTStateParallel):
for branch in state.branches:
resolve_arns(branch, region, account_id)
def verify_goto_targets(branch):
"""Recursivly checks that all Goto states target valid state names
Valid state names are those states in the current branch. This means that
a Goto cannot target states in another parallel branch or from a parallel
branch to the main body of the Step Function
Args:
branch (list): List of ASTState objects
Raises:
CompileError : If a Goto state targets an invalid state
"""
if not hasattr(branch, 'states'):
branch.raise_error("Trying to check goto targets for non-branch state")
to_process = [branch.states]
while len(to_process) > 0:
states = to_process.pop(0)
names = set() # Need to know all of the valid state names for the branch
for state in states:
names.add(state.name)
if isinstance(state, ASTStateParallel):
for branch in state.branches:
to_process.append(branch.states)
for state in states:
if isinstance(state.next, ASTModNext):
if state.next.value not in names:
state.next.raise_error("Goto target '{}' doesn't exist".format(state.next.value))
class StateVisitor(object):
"""Generic base class for heaviside users to create a visitor that can modify
ASTStateTasks
"""
def dispatch(self, state):
"""Dispatch the given state to the approprate handler function
Args:
state (ASTState): State to dispatch
"""
if isinstance(state, ASTStateTask):
self.handle_task(state)
else:
raise ValueError('State type {} not supported'.format(type(state)))
def visit(self, branch):
"""Visit all states in all branches of the state machine and dispatch
them to be handled the subclass
Args:
branch (list): List of ASTState objects
"""
if not hasattr(branch, 'states'):
raise ValueError("Trying to visit non-branch state: {}".format(branch))
for state in branch.states:
self.dispatch(state)
if isinstance(state, ASTStateParallel):
for branch in state.branches:
self.visit(branch)
def handle_task(self, state):
"""ASTStateTask handler function placeholder
Args:
state (ASTStateTask): State to handle
"""
pass
|
[
"Derek.Pryor@jhuapl.edu"
] |
Derek.Pryor@jhuapl.edu
|
7c9721087d190c93ec3d0570b7653a0bc525aae6
|
530596633f680ef842d594b58dd4b1016e2a86ff
|
/mapclient/controllers/proxy.py
|
a5bc06ca259b2097fbd227b3b33002a7c08048b8
|
[] |
no_license
|
PublicaMundi/MapClient
|
5c770a124efdfe092228fc7a51c5485ef702734b
|
a526738e5b5a5e01ae3cd9c7275730e2cd6668b3
|
refs/heads/master
| 2016-09-06T08:30:15.528118
| 2015-12-02T11:09:16
| 2015-12-02T11:09:16
| 26,762,602
| 0
| 1
| null | 2016-02-23T16:51:47
| 2014-11-17T15:21:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,501
|
py
|
import logging
from pylons import config, request, response, session, tmpl_context as c
from pylons.controllers.util import abort, redirect_to
from pylons.decorators import jsonify
from mapclient.lib.base import BaseController, render
import urlparse
import requests
log = logging.getLogger(__name__)
MAX_FILE_SIZE = 1024 * 1024 # 1MB
CHUNK_SIZE = 512
class ProxyController(BaseController):
def _validateUrl(self, parts):
if not parts.scheme or not parts.netloc:
abort(409, detail = 'Invalid URL.')
if parts.port and not parts.port in [80, 8080]:
log.warn('Port {port} in url {url} is not allowed.'.format(port = parts.port, url = urlparse.urlunparse(parts)))
abort(409, detail = 'Invalid URL.')
if not parts.query:
log.warn('Missing query string in url {url}.'.format(url = urlparse.urlunparse(parts)))
abort(409, detail = 'Invalid URL.')
invalidQuery = False
query = urlparse.parse_qs(parts.query)
for prop in query:
if not prop in ['service', 'request', 'map']:
invalidQuery = True
log.warn('Query string parameter [{parameter}] is not supported.'.format(parameter = prop))
if prop == 'service' and len(query[prop]) != 1:
invalidQuery = True
log.warn('Query string parameter [{parameter}] should have a single value.'.format(parameter = prop))
if prop == 'service' and query[prop][0].lower() != 'wms':
invalidQuery = True
log.warn('Value {value} for query string parameter [{parameter}] is not supported.'.format(parameter = prop, value = query[prop]))
if prop == 'request' and len(query[prop]) != 1:
invalidQuery = True
log.warn('Query string parameter [{parameter}] should have a single value.'.format(parameter = prop))
if prop == 'request' and query[prop][0].lower() != 'getcapabilities':
invalidQuery = True
log.warn('Value {value} for query string parameter [{parameter}] is not supported.'.format(parameter = prop, value = query[prop]))
if invalidQuery:
abort(409, detail = 'Invalid URL.')
def _isUrlInWhiteList(self, parts):
prefix = urlparse.urlunparse((parts[0], parts[1], parts[2], None, None, None, ))
if 'mapclient.proxy.white-list' in config and prefix in config['mapclient.proxy.white-list'].split(','):
return True
return False
def proxy_resource(self):
size_limit = MAX_FILE_SIZE
if 'mapclient.proxy.limit.default' in config:
size_limit = config['mapclient.proxy.limit.default']
timeout = 3
if not 'url' in request.params:
abort(404, detail = 'Parameter url is required.')
url = request.params['url']
url = url.split('#')[0] # remove potential fragment
''' Chunked proxy for resources. To make sure that the file is not too
large, first, we try to get the content length from the headers.
If the headers to not contain a content length (if it is a chinked
response), we only transfer as long as the transferred data is less
than the maximum file size. '''
parts = urlparse.urlsplit(url)
allowed = self._isUrlInWhiteList(parts)
if not allowed:
self._validateUrl(parts)
log.warn('Proxy resource - {url}'.format(url = url))
if allowed and 'mapclient.proxy.limit.white-list' in config:
size_limit = config['mapclient.proxy.limit.white-list']
try:
method = request.environ["REQUEST_METHOD"]
if method == "POST":
length = int(request.environ["CONTENT_LENGTH"])
headers = {"Content-Type": request.environ["CONTENT_TYPE"]}
body = request.body
r = requests.post(url, data=body, headers=headers, stream=True, timeout = timeout)
else:
r = requests.get(url, stream=True, timeout = timeout)
cl = r.headers['content-length']
if cl and int(cl) > size_limit:
abort(409, '''Content is too large to be proxied. Allowed
file size: {allowed}, Content-Length: {actual}.'''.format(
allowed=size_limit, actual=cl))
response.content_type = r.headers['content-type']
response.charset = r.encoding
length = 0
for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
response.body_file.write(chunk)
length += len(chunk)
if length >= size_limit:
abort(409, headers={'content-encoding': ''},
detail='Content is too large to be proxied.')
except requests.exceptions.HTTPError, error:
details = 'Could not proxy resource. Server responded with %s %s' % (
error.response.status_code, error.response.reason)
abort(409, detail=details)
except requests.exceptions.ConnectionError, error:
details = '''Could not proxy resource because a connection error occurred. %s''' % error
abort(502, detail=details)
except requests.exceptions.Timeout, error:
details = 'Could not proxy resource because the connection timed out.'
abort(504, detail=details)
|
[
"yannis.kouvaras@kupa.gr"
] |
yannis.kouvaras@kupa.gr
|
db24493564bfe520bdc8dd75ca9947696ec68c4b
|
8cf2d365516014d50e9149dea3fc96e4ef84e0d6
|
/old/sample_hw_code/onewiretemp.py
|
59dcaafcd5d21f0077027db1c2f850e7a4e32353
|
[
"MIT"
] |
permissive
|
mikeschmidt69/fermonitor
|
242e343a47cccb94b6e9ba5e4753b6fdf2752055
|
1c1785f25cffc7d14bfca11fdc5e5ef6c20d59ce
|
refs/heads/master
| 2021-06-14T07:14:40.087467
| 2021-03-17T17:57:47
| 2021-03-17T17:57:47
| 170,920,399
| 11
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,521
|
py
|
#
# Copyright (c) 2019 Michael Schmidt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import datetime
import glob
import time
import os
import threading
import logging
from setup_logger import logger
import configparser
logger = logging.getLogger('onewiretemp')
logger.setLevel(logging.INFO)
# Initialize the GPIO Pins
os.system('modprobe w1-gpio') # Turns on the GPIO module
os.system('modprobe w1-therm') # Truns on the Temperature module
# Finds the correct device file that holds the temperature data
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
MINIMUM_INVERVAL = 10 # minimum update interval in seconds
class OneWireTemp (threading.Thread):
# Constructor for class
def __init__(self, _configFile):
threading.Thread.__init__(self)
if os.path.isfile(_configFile) == False:
raise IOError("OneWireTemp configuration file is not valid: "+_configFile)
self.configFile = _configFile
self.interval = MINIMUM_INVERVAL # interval in seconds the sensor should be read
self.lastUpdateTime = datetime.datetime.now() - datetime.timedelta(seconds=self.interval) # last time the sensor was read
self.temp = -273.15 # last read temperature from tilt
self.validData = False
self.useOneWireTemp = True
self._readdata()
self.stopThread = True
# Starts background thread that updates the data from Tilt hydrometer (temp, specific gravity) on regular basis.
# This is not a realtime operation so thread sleeps for 5s after each iteration
def run(self):
logger.info("Starting OneWireTemp monitoring.")
self.stopThread = False
while self.stopThread != True:
self._readConf(self.configFile)
self._update() # calls method that updates data from sensor if update time interval has lapsed
time.sleep(MINIMUM_INVERVAL)
logger.info("OneWireTemp Monitoring Stopped.")
# Returns last read temperature reading from Tilt
def getTemp(self):
return self.temp
# returns True/False if data is valid. Data is not valid until read from the actual Tilt device
def isDataValid(self):
return self.validData
# returns time when date and specific gravity were updated from Tilt
def timeOfData(self):
return self.lastUpdateTime
# stops the background thread updating data coming from configured Tilt
def stop(self):
self.stopThread = True
def _readConf(self,configFile):
if os.path.isfile(configFile) == False:
logger.error("OneWireTemp configuration file is not valid: "+configFile)
else:
ini = configparser.ConfigParser()
logger.debug("Reading OneWireTemp config: " + configFile)
ini.read(configFile)
if 'OneWireTemp' in ini:
config = ini['OneWireTemp']
try:
if config["UpdateIntervalSeconds"] != "" and int(config["UpdateIntervalSeconds"]) >= MINIMUM_INVERVAL:
self.interval = int(config.get("UpdateIntervalSeconds"))
else:
self.interval = MINIMUM_INVERVAL
except KeyError:
self.interval = MINIMUM_INVERVAL
try:
if config["MessageLevel"] == "DEBUG":
logger.setLevel(logging.DEBUG)
elif config["MessageLevel"] == "WARNING":
logger.setLevel(logging.ERROR)
elif config["MessageLevel"] == "ERROR":
logger.setLevel(logging.WARNING)
elif config["MessageLevel"] == "INFO":
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.INFO)
except KeyError:
logger.setLevel(logging.INFO)
else:
logger.error("[OneWireTemp] section not found in ini file: " + configFile)
self.interval = MINIMUM_INVERVAL
logger.debug("Done reading OneWireTemp config.")
# Internal method for checking if the update time interval has lapsed and new data should be read from Tilt
def _update(self):
updateTime = self.lastUpdateTime + datetime.timedelta(seconds=self.interval)
if datetime.datetime.now() > updateTime:
logger.debug("Update data")
self._readdata()
else:
logger.debug("Update interval not reached:"+datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S") +" / "+ updateTime.strftime("%d.%m.%Y %H:%M:%S"))
# A function that reads the sensors data
def _read_temp_raw(self):
lines = ""
try:
f = open(device_file, 'r') # Opens the temperature device file
lines = f.readlines() # Returns the text
f.close()
except:
if self.validData:
logger.warning("Error reading from OneWireTemp sensor.")
self.validData = False
lines = ""
return lines
# convert the value of the sensor into temperature
def _readdata(self):
lines = self._read_temp_raw() # read the temperature device file
# while the first line does not contain 'YES', wait for 0.2s
# and then read the device file again.
retries = 50
while lines[0].strip()[-3:] != 'YES' and retries > 0:
time.sleep(0.2)
lines = self._read_temp_raw()
retries = retries - 1
if retries <= 0:
self.validData = False
else:
# Look for th eposition of the '=' in the second line of the
# device file
equals_pos = lines[1].find('t=')
# If the '=' is found, convert the rest of th eline after the
# '=' into degrees Celsius
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
self.temp = float(temp_string) / 1000.0
if self.validData == False:
logger.info("OneWireTemp sensor reading active.")
self.validData = True
self.lastUpdateTime = datetime.datetime.now()
logger.info("Updated OneWireTemp: "+str(self.temp) + "C")
|
[
"47675124+mikeschmidt69@users.noreply.github.com"
] |
47675124+mikeschmidt69@users.noreply.github.com
|
13b8bba2ae72282e64c69a912f45a013a4f7650f
|
64b2a465c77f235bdbc35cce6286dc7c078e1438
|
/osgar/drivers/opencv.py
|
d7d8d7ef8e1e9019bae798e0697db6bd6848c99f
|
[
"MIT"
] |
permissive
|
tajgr/osgar
|
217d59bfe65e04dc793e73ac308c0227b3525081
|
52b45366bc070957cc97612697b58c926f0188a9
|
refs/heads/master
| 2020-04-09T20:10:45.516355
| 2019-08-07T11:47:50
| 2019-08-07T14:29:16
| 93,401,109
| 0
| 0
| null | 2017-06-05T12:23:01
| 2017-06-05T12:23:01
| null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
"""
Log video stream provided by OpenCV camera
"""
import cv2
from threading import Thread
from osgar.logger import LogWriter
from osgar.bus import BusShutdownException
class LogOpenCVCamera:
def __init__(self, config, bus):
self.input_thread = Thread(target=self.run_input, daemon=True)
self.bus = bus
port = config.get('port', 0)
self.cap = cv2.VideoCapture(port)
self.sleep = config.get('sleep')
def start(self):
self.input_thread.start()
def join(self, timeout=None):
self.input_thread.join(timeout=timeout)
def run_input(self):
while self.bus.is_alive():
# Capture frame-by-frame
ret, frame = self.cap.read()
if ret:
retval, data = cv2.imencode('*.jpeg', frame)
if len(data) > 0:
self.bus.publish('raw', data.tobytes())
if self.sleep is not None:
self.bus.sleep(self.sleep)
self.cap.release()
def request_stop(self):
self.bus.shutdown()
# vim: expandtab sw=4 ts=4
|
[
"md@robotika.cz"
] |
md@robotika.cz
|
4e6f7728e1ccc0ee08f9eab26f26539c32f245f1
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02779/s326665097.py
|
af3b88d1cfb1a25941fa607ee97b983b715cf65f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
from collections import Counter
n = int(input())
a = list(map(int, input().split()))
c = Counter(a)
new = c.values()
for i in new:
if i != 1:
re = "NO"
break
else:
re = "YES"
print(re)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4f0239fbe754bec4f62ac564b2eec8efa2ff6483
|
554999891c412bfb684e678db9237784b3e94e13
|
/model_image.py
|
4888a0a3c3104f09d24dda71346efcfa4bd3c29b
|
[] |
no_license
|
alherondelle/Large_Satellite_Image_AutoEncoder
|
9c5ee37d6ebbc5b0566916859a03b8b6c07e9123
|
5e1321c2419cbefc13d0c9c5e5678420b69d8205
|
refs/heads/master
| 2023-01-18T21:16:15.233479
| 2020-11-27T16:57:07
| 2020-11-27T16:57:07
| 304,578,907
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,189
|
py
|
# -*- coding: utf-8 -*-
import glob
import random
import cv2
import os
import torchvision.transforms as transforms
import torch
import random
import pickle
from tqdm import tqdm
from torch.utils.data import Dataset
from PIL import Image
import numpy as np
import torch.nn as nn
import torchvision
from torch.autograd import Variable
from time import time
from torchvision.utils import save_image
import torch.nn.functional as F
import multiprocessing
import argparse
# Argument parser
# Training parameters
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=64, type=int, help='batch size')
parser.add_argument('--learning_rate', default=1e-3, type=float, help='learning rate')
parser.add_argument('--start_epoch', default=0, type=float, help='starting epoch')
parser.add_argument('--end_epoch', default=150, type=float, help='ending epoch')
parser.add_argument('--train_img', default='./METEOSAT_PCAtf/train', type=str, help ='Path to training dataset')
parser.add_argument('--test_img', default='./METEOSAT_PCAtf/test', type=str, help='Path to the testing dataset')
opt = parser.parse_args()
# Image difference data loader
class METEOSATDataset(Dataset):
def __init__(self, path_):
self.data=[]
for folder in os.listdir(path_):
list_of_file = os.listdir(os.path.join(path_, folder))
for files_ in list_of_file:
self.data.append(os.path.join(folder,files_))
self.path = path_
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_ = self.data[index]
image = np.load(os.path.join(self.path,img_))
image = np.moveaxis(image, 2, 0)
image = torch.from_numpy(image)
return image
# Data loader
data_loader = torch.utils.data.DataLoader(dataset=METEOSATDataset(opt.train_img),
batch_size=opt.batch_size,
shuffle=True, num_workers=0)
test_loader = torch.utils.data.DataLoader(dataset=METEOSATDataset(opt.test_img),
batch_size=1,
shuffle=True, num_workers=0)
# Model architecture
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
## encoder layers ##
# conv layer (depth from 1 --> 16), 3x3 kernels
self.conv1 = nn.Conv2d(2, 16, 3, padding = 1)
# conv layer (depth from 16 --> 4), 3x3 kernels
self.conv2 = nn.Conv2d(16, 8, 3, padding=1)
self.conv3 = nn.Conv2d(8, 4, 3, padding=1)
# pooling layer to reduce x-y dims by two; kernel and stride of 2
self.pool = nn.MaxPool2d(2, 2)
## decoder layers ##
## a kernel of 2 and a stride of 2 will increase the spatial dims by 2
self.t_conv1 = nn.ConvTranspose2d(4, 8, 2, stride=2)
self.t_conv2 = nn.ConvTranspose2d(8, 16, 2, stride=2)
self.t_conv3 = nn.ConvTranspose2d(16, 2, 2, stride=2)
def forward(self, x):
## encode ##
# add hidden layers with relu activation function
# and maxpooling after
x = F.relu(self.conv1(x))
x = self.pool(x)
# add second hidden layer
x = F.relu(self.conv2(x))
x = self.pool(x)
# add third hidden layer
x = F.relu(self.conv3(x))
x = self.pool(x)# => compressed representation
## decode ##
# add transpose conv layers, with relu activation function
x = F.relu(self.t_conv1(x))
x = F.relu(self.t_conv2(x))
x = torch.tanh(self.t_conv3(x))
# output layer (with sigmoid for scaling from 0 to 1)
return x
# Model initialization and weights loading
ae = Autoencoder().cuda()
if opt.start_epoch != 0:
ae.load_state_dict(torch.load("./conv_autoencoder_model_v2_%d.pth" % (opt.start_epoch)))
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(ae.parameters(), lr=opt.learning_rate, weight_decay=1e-5)
# Dataset info for metrics computing
iter_per_epoch = len(data_loader)
data_iter = iter(data_loader)
# Training
for epoch in range(opt.start_epoch, opt.end_epoch):
t0 = time()
for i, img in tqdm(enumerate(data_loader)):
img_ = Variable(img[:,:,:608, :608]).cuda()
# ===================forward=====================
output = ae(img_.float())
loss = criterion(output, img_.float())
# ===================backward====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================log========================
print('epoch [{}/{}], loss:{:.4f}, time:{:.4f}'
.format(epoch+1, opt.end_epoch, loss.item()*100, time() - t0))
if epoch % 5 == 0:
count = 0
test_loss = 0
for i, img in tqdm(enumerate(test_loader)):
count+= 1
img_ = Variable(img[:,:,:608, :608]).cuda()
output = ae(img_.float())
test_loss += torch.mean((img_.detach().cpu() - output.detach().cpu())**2)
print('TEST LOSS : ', test_loss.item()/count)
if epoch % 10 == 0:
torch.save(ae.state_dict(), './conv_autoencoder_model_v2_{}.pth'.format(epoch))
pic = output[0].cpu().detach()
real_pic = img_[0].cpu().detach()
save_image(pic, './image_model_v2_{}.png'.format(epoch))
save_image(real_pic, './image_real_model_v2_{}.png'.format(epoch))
# Saving trained model : Final
torch.save(ae.state_dict(), './conv_autoencoder_model_v2_{}.pth'.format(epoch))
# Stopping train phase & Separating encoder / decoder
ae.eval()
ae_encoder_keys = ["conv1.weight", "conv1.bias", "conv2.weight", "conv2.bias", "conv3.weight", "conv3.bias", 'pool.weight', 'pool.biais']
ae_decoder_keys = ['t_conv1.weight', 't_conv2.weight', 't_conv3.weight', 't_conv1.bias', 't_conv2.bias', 't_conv3.bias']
ae_encoder_param = {k:v for k,v in ae.state_dict().items() if k in ae_encoder_keys}
ae_decoder_param = {k:v for k,v in ae.state_dict().items() if k in ae_decoder_keys}
torch.save(ae_encoder_param, "./conv_encoder_image_v2_%d.pth" % (epoch))
torch.save(ae_decoder_param, "./conv_decoder_image_v2_%d.pth" % (epoch))
|
[
"agathe.lherondelle@mines-paristech.fr"
] |
agathe.lherondelle@mines-paristech.fr
|
bd48767328d1904968baef929946d37d9b971dcd
|
5e629210c351f369208155a11f395d47be9b837b
|
/conditionCompleteion/src/osd/objectService/unitTests/test_diskfile.py
|
9dd7b4b3b27b4b0cd4619e36e37b004d5c54ebc0
|
[] |
no_license
|
confi-surya/pythonicPracto
|
028f2a50bc595b90bee95b235ec9218da3e45fe5
|
c366afd9ab54b8cacda767189f1a13efb5f961b2
|
refs/heads/master
| 2020-03-23T12:56:55.843408
| 2018-07-19T11:51:59
| 2018-07-19T14:37:13
| 141,572,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,853
|
py
|
import cPickle as pickle
import os
import errno
import mock
import unittest
import email
import tempfile
import uuid
#import xattr
from shutil import rmtree
from time import time
from tempfile import mkdtemp
from hashlib import md5
from contextlib import closing, nested
from gzip import GzipFile
from eventlet import tpool
from osd.objectService.unitTests import FakeLogger, temptree
from osd.objectService.unitTests import mock as unit_mock
from osd.objectService import diskfile
from osd.common import utils
#from osd.common.utils import hash_path, mkdirs, normalize_timestamp
from osd.common.utils import mkdirs, normalize_timestamp
from osd.common.ring.ring import hash_path
from osd.common import ring
from osd.common.exceptions import DiskFileNotExist, DiskFileQuarantined, \
DiskFileDeviceUnavailable, DiskFileDeleted, DiskFileNotOpen, \
DiskFileError, ReplicationLockTimeout, PathNotDir, DiskFileCollision, \
DiskFileExpired, SwiftException, DiskFileNoSpace
import osd
class TestDiskFileManager(unittest.TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,'test_object_server_disk_file_mgr')
mkdirs(os.path.join(self.testdir,"export", "fs1"))
mkdirs(os.path.join(self.testdir,"export", "fs1"))
self.filesystems = os.path.join(os.path.join(self.testdir,"export"))
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
self.conf = dict(filesystems=self.filesystems,mount_check='false',
keep_cache_size=2 * 1024)
self.df_mgr = diskfile.DiskFileManager(self.conf,FakeLogger())
def tearDown(self):
rmtree(self.tmpdir, ignore_errors=1)
def test_construct_filesystem_path(self):
res_path = self.df_mgr.construct_filesystem_path("abc")
self.assertEqual(os.path.join(self.filesystems,"abc"),res_path)
def test_get_filesystem_path(self):
res_path = self.df_mgr.get_filesystem_path("abc")
self.assertEqual(os.path.join(self.filesystems,"abc"),res_path)
class TestDiskFile(unittest.TestCase):
def setUp(self):
""" Setup the test"""
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,'test_object_server_disk_file')
self.filesystems = os.path.join(os.path.join(self.testdir,"export"))
self.filesystem = "fs1"
mkdirs(os.path.join(self.filesystems,self.filesystem))
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
self.conf = dict(filesystems=self.filesystems, mount_check='false',
keep_cache_size=2 * 1024, )
self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
def tearDown(self):
"""tear down the test"""
rmtree(self.tmpdir, ignore_errors=1)
tpool.execute = self._orig_tpool_exc
def _create_ondisk_file(self, df, data, timestamp, metadata=None):
""" create the data amd meta data file"""
if timestamp is None:
timestamp = time()
timestamp = normalize_timestamp(timestamp)
if not metadata:
metadata = {}
if 'X-Timestamp' not in metadata:
metadata['X-Timestamp'] = normalize_timestamp(timestamp)
if 'ETag' not in metadata:
etag = md5()
etag.update(data)
metadata['ETag'] = etag.hexdigest()
if 'name' not in metadata:
metadata['name'] = '/a/c/o'
if 'Content-Length' not in metadata:
metadata['Content-Length'] = str(len(data))
hash_name = df._name_hash
mkdirs(df._datadir)
mkdirs(df._metadir)
data_file = os.path.join(df._datadir, df._name_hash + ".data")
meta_file = os.path.join(df._metadir, df._name_hash + ".meta")
with open(data_file, 'wb') as f:
f.write(data)
with open(meta_file,'wb') as f:
f.write(pickle.dumps(metadata, diskfile.PICKLE_PROTOCOL))
f.write("EOF")
def _simple_get_diskfile(self, acc_dir="a1", cont_dir='d1', obj_dir='o1', account='a', container='c', obj='o'):
"""create the DiskFile object"""
return self.df_mgr.get_diskfile(self.filesystem, acc_dir, cont_dir,
obj_dir, account, container, obj)
def _create_test_file(self, data, timestamp=None, metadata=None, account='a', container='c', obj='o'):
""" creates the test file and opens it"""
if metadata is None:
metadata = {}
metadata.setdefault('name', '/%s/%s/%s' % (account, container, obj))
df = self._simple_get_diskfile(account=account, container=container,
obj=obj)
self._create_ondisk_file(df, data, timestamp, metadata)
df = self._simple_get_diskfile(account=account, container=container,
obj=obj)
df.open()
return df
def test_open_not_exist(self):
""" Test for DiskFileNotExist Exception"""
df = self._simple_get_diskfile()
self.assertRaises(DiskFileNotExist, df.open)
def test_open_expired(self):
"""Test for DiskFileExpired Exception.
although it will not be used in Hydra
"""
self.assertRaises(DiskFileExpired,
self._create_test_file,
'1234567890', metadata={'X-Delete-At': '0'})
def test_open_not_expired(self):
""" DiskFileExpired exception should not be raised"""
try:
self._create_test_file(
'1234567890', metadata={'X-Delete-At': str(2 * int(time()))})
except SwiftException as err:
self.fail("Unexpected swift exception raised: %r" % err)
def test_get_metadata(self):
"""get metadata """
df = self._create_test_file('1234567890', timestamp=42)
md = df.get_metadata()
self.assertEqual(md['X-Timestamp'], normalize_timestamp(42))
def test_read_metadata(self):
""" read metadata """
self._create_test_file('1234567890', timestamp=42)
df = self._simple_get_diskfile()
md = df.read_metadata()
self.assertEqual(md['X-Timestamp'], normalize_timestamp(42))
def test_get_metadata_not_opened(self):
""" get metadata when the metadata field is not populated"""
df = self._simple_get_diskfile()
self.assertRaises(DiskFileNotOpen, df.get_metadata)
def test_not_opened(self):
""" test DiskFileNotOpen exception"""
df = self._simple_get_diskfile()
try:
with df:
pass
except DiskFileNotOpen:
pass
else:
self.fail("Expected DiskFileNotOpen exception")
def _get_open_disk_file(self, invalid_type=None, obj_name='o', fsize=1024,
csize=8, mark_deleted=False, prealloc=False,
ts=None, mount_check=False, extra_metadata=None):
'''returns a DiskFile'''
df = self._simple_get_diskfile(obj=obj_name)
data = '0' * fsize
etag = md5()
if ts is not None:
timestamp = ts
else:
timestamp = normalize_timestamp(time())
with df.create() as writer:
upload_size = writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(upload_size),
}
metadata.update(extra_metadata or {})
writer.put(metadata)
if invalid_type == 'ETag':
etag = md5()
etag.update('1' + '0' * (fsize - 1))
etag = etag.hexdigest()
metadata['ETag'] = etag
diskfile.write_metadata(writer._fd_meta, metadata)
elif invalid_type == 'Content-Length':
metadata['Content-Length'] = fsize - 1
diskfile.write_metadata(writer._fd_meta, metadata)
elif invalid_type == 'Bad-Content-Length':
metadata['Content-Length'] = 'zero'
diskfile.write_metadata(writer._fd_meta, metadata)
elif invalid_type == 'Missing-Content-Length':
del metadata['Content-Length']
diskfile.write_metadata(writer._fd_meta, metadata)
elif invalid_type == 'Bad-X-Delete-At':
metadata['X-Delete-At'] = 'bad integer'
diskfile.write_metadata(writer._fd_meta, metadata)
if mark_deleted:
df.delete(timestamp)
data_file = [os.path.join(df._datadir, fname)
for fname in sorted(os.listdir(df._datadir),
reverse=True)
if fname.endswith('.data')]
meta_file = [os.path.join(df._metadir, fname)
for fname in sorted(os.listdir(df._metadir),
reverse=True)
if fname.endswith('.meta')]
''' if invalid_type == 'Corrupt-Xattrs':
# We have to go below read_metadata/write_metadata to get proper
# corruption.
meta_xattr = open(meta_file,'rb').read()
wrong_byte = 'X' if meta_xattr[0] != 'X' else 'Y'
xattr.setxattr(data_files[0], "user.osd.metadata",
wrong_byte + meta_xattr[1:])
elif invalid_type == 'Truncated-Xattrs':
meta_xattr = xattr.getxattr(data_files[0], "user.osd.metadata")
xattr.setxattr(data_files[0], "user.osd.metadata",
meta_xattr[:-1])
'''
if invalid_type == 'Missing-Name':
with open(meta_file,'r') as fd:
md = diskfile.read_metadata(fd)
del md['name']
fd = os.open(meta_file,os.O_WRONLY|os.O_TRUNC)
diskfile.write_metadata(fd, md)
elif invalid_type == 'Bad-Name':
with open(meta_file,'r') as fd:
md = diskfile.read_metadata(fd)
md['name'] = md['name'] + 'garbage'
fd = os.open(meta_file,os.O_WRONLY|os.O_TRUNC)
diskfile.write_metadata(fd, md)
self.conf['disk_chunk_size'] = csize
self.conf['mount_check'] = mount_check
self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
df = self._simple_get_diskfile(obj=obj_name)
df.open()
if invalid_type == 'Zero-Byte':
fp = open(df._data_file, 'w')
fp.close()
df.unit_test_len = fsize
return df
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_delete(self, write):
df = self._get_open_disk_file()
ts = time()
df.delete('data')
data_file_name = df._name_hash + ".data"
meta_file_name = df._name_hash + ".meta"
dl = os.listdir(df._datadir)
ml = os.listdir(df._metadir)
self.assertTrue(data_file_name not in set(dl))
self.assertTrue(meta_file_name in set(ml))
df.delete('meta')
dl = os.listdir(df._datadir)
ml = os.listdir(df._metadir)
self.assertTrue(meta_file_name not in set(ml))
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_open_deleted(self, write):
df = self._get_open_disk_file()
df.delete('data')
df.delete('meta')
data_file_name = df._name_hash + ".data"
meta_file_name = df._name_hash + ".meta"
dl = os.listdir(df._datadir)
ml = os.listdir(df._metadir)
self.assertTrue(data_file_name not in set(dl))
self.assertTrue(meta_file_name not in set(ml))
df = self._simple_get_diskfile()
self.assertRaises(DiskFileNotExist, df.open)
def test_listdir_enoent(self):
oserror = OSError()
oserror.errno = errno.ENOENT
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', side_effect=oserror):
self.assertEqual(self.df_mgr._listdir('path'), [])
self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
def test_listdir_other_oserror(self):
oserror = OSError()
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', side_effect=oserror):
self.assertEqual(self.df_mgr._listdir('path'), [])
self.df_mgr.logger.error.assert_called_once_with(
'ERROR: Skipping %r due to error with listdir attempt: %s',
'path', oserror)
def test_listdir(self):
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', return_value=['abc', 'def']):
self.assertEqual(self.df_mgr._listdir('path'), ['abc', 'def'])
self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
def test_diskfile_names(self):
df = self._simple_get_diskfile()
self.assertEqual(df.account, 'a')
self.assertEqual(df.container, 'c')
self.assertEqual(df.obj, 'o')
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_diskfile_content_length_not_open(self, write):
df = self._simple_get_diskfile()
exc = None
try:
df.content_length
except DiskFileNotOpen as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_content_length(self):
self._get_open_disk_file()
df = self._simple_get_diskfile()
with df.open():
self.assertEqual(df.content_length, 1024)
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_diskfile_timestamp_not_open(self, write):
df = self._simple_get_diskfile()
exc = None
try:
df.timestamp
except DiskFileNotOpen as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_timestamp(self):
self._get_open_disk_file(ts='1383181759.12345')
df = self._simple_get_diskfile()
with df.open():
self.assertEqual(df.timestamp, '1383181759.12345')
def test_write_metdata(self):
df = self._create_test_file('1234567890')
timestamp = normalize_timestamp(time())
metadata = {'X-Timestamp': timestamp, 'X-Object-Meta-test': 'data'}
metadata['name'] = '/a/c/o'
metadata['Content-Length'] = '10'
df.write_metadata(metadata)
metadata = df.read_metadata()
self.assertEquals(metadata['X-Object-Meta-test'],'data')
'''
def test_create_close_oserror(self):
df = self.df_mgr.get_diskfile(self.filesystem, '0', 'abc', '123',
'xyz')
with mock.patch("osd.obj.diskfile.os.close",
mock.MagicMock(side_effect=OSError(
errno.EACCES, os.strerror(errno.EACCES)))):
try:
with df.create():
pass
except Exception as err:
self.fail("Unexpected exception raised: %r" % err)
else:
pass
'''
def test_disk_file_mkstemp_creates_dir(self):
tmpdir = os.path.join(self.filesystems, self.filesystem)
os.rmdir(tmpdir)
df = self._simple_get_diskfile()
with df.create():
self.assert_(os.path.exists(tmpdir))
def test_disk_file_create_tmp_file(self):
tmpdir = os.path.join(self.filesystems, self.filesystem)
os.rmdir(tmpdir)
df = self._simple_get_diskfile()
file_name = '_'.join([hash_path(df.account), hash_path(df.account, df.container),
df._name_hash])
with df.create():
self.assert_(os.path.exists(df._tmpdir))
self.assert_(os.path.exists(os.path.join(df._tmpdir, file_name + ".data")))
self.assert_(os.path.exists(os.path.join(df._tmpdir, file_name + ".meta")))
def test_disk_file_finalize_put(self):
tmpdir = os.path.join(self.filesystems, self.filesystem)
os.rmdir(tmpdir)
df = self._simple_get_diskfile()
metadata = {'X-metadata-value':'data'}
file_name = '_'.join([hash_path(df.account), hash_path(df.account, df.container),
df._name_hash])
with df.create() as writer:
self.assertTrue(os.path.exists(df._tmpdir))
self.assertTrue(os.path.exists(os.path.join(df._tmpdir, file_name + ".data")))
self.assertTrue(os.path.exists(os.path.join(df._tmpdir, file_name + ".meta")))
writer.put(metadata)
self.assertTrue(os.path.exists(os.path.join(df._datadir, df._name_hash + ".data")))
self.assertTrue(os.path.exists(os.path.join(df._metadir, df._name_hash + ".meta")))
self.assertTrue(os.path.exists(df._tmpdir))
self.assertFalse(os.path.exists(os.path.join(df._tmpdir, df._name_hash + ".data")))
self.assertFalse(os.path.exists(os.path.join(df._tmpdir, df._name_hash + ".meta")))
def test_disk_file_reader_iter(self):
df = self._create_test_file('1234567890')
reader = df.reader()
self.assertEqual(''.join(reader), '1234567890')
def test_disk_file_reader_iter_w_quarantine(self):
df = self._create_test_file('1234567890')
reader = df.reader()
reader._obj_size += 1
self.assertRaises(DiskFileQuarantined, ''.join, reader)
def test_disk_file_app_iter_corners(self):
df = self._create_test_file('1234567890')
quarantine_msgs = []
reader = df.reader()
self.assertEquals(''.join(reader.app_iter_range(0, None)),
'1234567890')
df = self._simple_get_diskfile()
with df.open():
reader = df.reader()
self.assertEqual(''.join(reader.app_iter_range(5, None)), '67890')
def test_disk_file_app_iter_range_w_none(self):
df = self._create_test_file('1234567890')
reader = df.reader()
self.assertEqual(''.join(reader.app_iter_range(None, None)),
'1234567890')
def test_disk_file_app_iter_partial_closes(self):
df = self._create_test_file('1234567890')
reader = df.reader()
it = reader.app_iter_range(0, 5)
self.assertEqual(''.join(it), '12345')
self.assertTrue(reader._fp is None)
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_disk_file_app_iter_ranges(self, write):
df = self._create_test_file('012345678911234567892123456789')
reader = df.reader()
it = reader.app_iter_ranges([(0, 10), (10, 20), (20, 30)],
'plain/text',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assertTrue('0123456789' in value)
self.assertTrue('1123456789' in value)
self.assertTrue('2123456789' in value)
"""
def test_disk_file_app_iter_ranges_w_quarantine(self):
df = self._create_test_file('012345678911234567892123456789')
reader = df.reader()
reader._obj_size += 1
try:
it = reader.app_iter_ranges([(0, 30)],
'plain/text',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
except DiskFileQuarantined as e:
err = e
#self.assertEqual('DiskFileQuarantined',str(err))
self.assertTrue('0123456789' in value)
self.assertTrue('1123456789' in value)
self.assertTrue('2123456789' in value)
"""
def test_disk_file_app_iter_ranges_w_no_etag_quarantine(self):
df = self._create_test_file('012345678911234567892123456789')
reader = df.reader()
it = reader.app_iter_ranges([(0, 10)],
'plain/text',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assertTrue('0123456789' in value)
def test_disk_file_app_iter_ranges_edges(self):
df = self._create_test_file('012345678911234567892123456789')
reader = df.reader()
it = reader.app_iter_ranges([(3, 10), (0, 2)], 'application/whatever',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assertTrue('3456789' in value)
self.assertTrue('01' in value)
def test_disk_file_large_app_iter_ranges(self):
# This test case is to make sure that the disk file app_iter_ranges
# method all the paths being tested.
long_str = '01234567890' * 65536
target_strs = ['3456789', long_str[0:65590]]
df = self._create_test_file(long_str)
reader = df.reader()
it = reader.app_iter_ranges([(3, 10), (0, 65590)], 'plain/text',
'5e816ff8b8b8e9a5d355497e5d9e0301', 655360)
# The produced string actually missing the MIME headers
# need to add these headers to make it as real MIME message.
# The body of the message is produced by method app_iter_ranges
# off of DiskFile object.
header = ''.join(['Content-Type: multipart/byteranges;',
'boundary=',
'5e816ff8b8b8e9a5d355497e5d9e0301\r\n'])
value = header + ''.join(it)
parts = map(lambda p: p.get_payload(decode=True),
email.message_from_string(value).walk())[1:3]
self.assertEqual(parts, target_strs)
def test_disk_file_app_iter_ranges_empty(self):
# This test case tests when empty value passed into app_iter_ranges
# When ranges passed into the method is either empty array or None,
# this method will yield empty string
df = self._create_test_file('012345678911234567892123456789')
reader = df.reader()
it = reader.app_iter_ranges([], 'application/whatever',
'\r\n--someheader\r\n', 100)
self.assertEqual(''.join(it), '')
df = self._simple_get_diskfile()
with df.open():
reader = df.reader()
it = reader.app_iter_ranges(None, 'app/something',
'\r\n--someheader\r\n', 150)
self.assertEqual(''.join(it), '')
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_keep_cache(self, write):
df = self._get_open_disk_file(fsize=65)
with mock.patch("osd.objectService.diskfile.drop_buffer_cache") as foo:
for _ in df.reader():
pass
self.assertTrue(foo.called)
df = self._get_open_disk_file(fsize=65)
with mock.patch("osd.objectService.diskfile.drop_buffer_cache") as bar:
for _ in df.reader(keep_cache=False):
pass
self.assertTrue(bar.called)
df = self._get_open_disk_file(fsize=65)
with mock.patch("osd.objectService.diskfile.drop_buffer_cache") as boo:
for _ in df.reader(keep_cache=True):
pass
self.assertFalse(boo.called)
df = self._get_open_disk_file(fsize=5 * 1024, csize=256)
with mock.patch("osd.objectService.diskfile.drop_buffer_cache") as goo:
for _ in df.reader(keep_cache=True):
pass
self.assertTrue(goo.called)
if __name__ =="__main__":
unittest.main()
|
[
"surya.singh@nectechnologies.in"
] |
surya.singh@nectechnologies.in
|
3dd8d95954d8afc63d0f4d3db7a8c16611722f2d
|
eb2a6898e7860a5df49f6cd1fb8d45e36b429520
|
/fxinterview/urls.py
|
cdb2cb6d0e83aac454ed1318e2c84f9a9263fb70
|
[] |
no_license
|
chenfeiyude/fxinterview
|
8abe0b077fa0535a38ce5f1bc547ae215f091773
|
3f35bbd7e800c06e8f756d817e4ea3600e3e6aa6
|
refs/heads/master
| 2021-07-13T01:04:35.097239
| 2020-08-14T13:59:10
| 2020-08-14T13:59:10
| 242,867,200
| 0
| 0
| null | 2021-03-25T23:25:28
| 2020-02-24T23:36:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
"""fxinterview URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
urlpatterns = [
# http://localhost:8000/admin/
url(r'^admin/', admin.site.urls),
url(r'^application/', include('job_applications.urls')),
url(r'^', include('main.urls')),
url(r'^', include('registration.urls')),
]
handler400 = 'main.views.error_400'
handler401 = 'main.views.error_401'
handler403 = 'main.views.error_403'
handler404 = 'main.views.error_404'
handler500 = 'main.views.error_500'
|
[
"chenfeiyu0402@gmail.com"
] |
chenfeiyu0402@gmail.com
|
67aa07a8f93c0131c8362599c6407214f99ced5f
|
b485f081e215a98e2f1de2fb0af0b057c10325d1
|
/indigo_prod/controllers/__init__.py
|
1b8d1182901c2ba830c5f3fbcaadd5627d72c48d
|
[] |
no_license
|
suningwz/gestion_rma-11
|
ae08eaf9b9b0785213eccd07e3046064afc4bc76
|
40089a1211376ed6211982ba1dbe0b9f52ceb233
|
refs/heads/master
| 2023-01-21T10:25:21.835163
| 2020-10-20T08:38:57
| 2020-10-20T08:38:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
# -*- coding: utf-8 -*-
from . import sales_report_export
from . import sales_report_by_invoice
from . import stock_pick_item
from . import sales_report_outstock_item
|
[
"odoo@ibasuite.com"
] |
odoo@ibasuite.com
|
85f48c4129634010b6d34d68fcf2d5be232d965c
|
8315a79787fe8ccab90d8da9e6e3c47b4fd9ea2d
|
/desafio49.py
|
0402720616afcac79a497a2a417c139079a400a6
|
[] |
no_license
|
lmdpadm/Python
|
371b51f4172ef2e7016e3c3ce1bbd13fc5ec5966
|
ab0f77c9bd35ea6933a4bf3c67ec5816f4195fc8
|
refs/heads/main
| 2023-08-07T13:13:39.785067
| 2021-09-23T00:34:28
| 2021-09-23T00:34:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
numero = int(input('Escolha um número e falaremos sua tabuada: '))
for c in range(1, 11):
print('{} x {} = {}'.format(numero, c, numero * c))
|
[
"noreply@github.com"
] |
lmdpadm.noreply@github.com
|
c1c766fca27a06c59babb1f5f5421cb0c6eb6bcf
|
73e8c699b64f94548adfb3a2b2761c9999859835
|
/witch_pay.py
|
3edc2a46c55bf4137fdadd132db59f11adf7ea4b
|
[] |
no_license
|
IamJenver/mytasksPython
|
075e4d4d799cb46ebebaaee464530867aff07262
|
7e4db635884e962a5ff3ba33bc446077972acdc1
|
refs/heads/main
| 2023-03-15T17:13:13.411716
| 2021-03-08T15:22:31
| 2021-03-08T15:22:31
| 325,975,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
# Всем известно, что ведьмак способен одолеть любых чудовищ, однако его услуги обойдутся недешево,
# к тому же ведьмак не принимает купюры, он принимает только чеканные монеты.
# В мире ведьмака существуют монеты с номиналами 1,5,10,25.
price = int(input())
counter = 0
while price - 25 >= 0:
counter += 1
price = price - 25
while price - 10 >= 0:
counter += 1
price = price - 10
while price - 5 >= 0:
counter += 1
price = price - 5
while price - 1 >= 0:
counter += 1
price = price - 1
print(counter)
|
[
"jenver@yandex.ru"
] |
jenver@yandex.ru
|
f5fa8e0a2925359dcd9fd458a986011562317b58
|
f824b00bebe8cd409ae3fe49be40ccc5df7d06a0
|
/WEB(BE)/firearm/apps.py
|
03f2827fb22ba338d93cc9acb1d1b53556cd13dd
|
[
"Apache-2.0"
] |
permissive
|
hspark15429/CLOUD_WEB_Blo-my_Byzantium
|
a7d2a9318fa9aeb21ca2fe97230eefd3325cc0c7
|
38d9da16c79714d41988008dd2a71ada6f2df3f9
|
refs/heads/master
| 2023-08-18T07:43:40.266548
| 2021-10-20T23:02:32
| 2021-10-20T23:02:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from django.apps import AppConfig
class FirearmConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'firearm'
|
[
"noreply@github.com"
] |
hspark15429.noreply@github.com
|
eedd37c21608b7c9d5ea2db1adf565b669952301
|
51492dcfd3398c5201e6428317b9b52ec4fd3259
|
/easy/246.py
|
4b5243aa347dbb9d5f21e8094484de9478a97b2c
|
[] |
no_license
|
ntupenn/exercises
|
682f1a52c51278a929fa5adfdcb8e8404f6012dd
|
d0edb8b909f94c1e2dfaaa63f6a7c4e3e09236e4
|
refs/heads/master
| 2021-05-14T13:17:49.209577
| 2018-02-02T08:35:26
| 2018-02-02T08:35:26
| 116,435,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
'''
A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
Write a function to determine if a number is strobogrammatic. The number is represented as a string.
For example, the numbers "69", "88", and "818" are all strobogrammatic.
'''
def isStrobogrammatic(num):
if not num:
return True
start = 0
end = len(num) - 1
check = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'}
while start <= end:
if num[start] not in check or check[num[start]] != num[end]:
return False
start += 1
end -= 1
return True
|
[
"xuanzhang1118@gmail.com"
] |
xuanzhang1118@gmail.com
|
d091376ea903c1328ac580659f780419ba14131f
|
5f834f8aa0603f4f7adc56fdcd5e227538931f81
|
/diab_logisReg.py
|
2ce02d3f7c7d13896c4c7c0870cb4b25f1af7a59
|
[] |
no_license
|
Kamal-prog-code/HealthCare
|
d9a613bcb315a04b14feead97bb4367034f91606
|
2d2fe464a5d25c1373634663dc1eaf07a9064a30
|
refs/heads/main
| 2023-01-20T22:17:55.157525
| 2020-12-05T20:50:03
| 2020-12-05T20:50:03
| 318,627,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.ml.feature import StandardScaler
from pyspark.ml.classification import LogisticRegression
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.ml.classification import RandomForestClassifier
import pickle
import os
spark = SparkSession.builder.appName('HSP').getOrCreate()
df=spark.read.csv('hdfs://localhost:9000/user/BigDataProj/diab.csv',inferSchema=True,header=True)
from pyspark.sql.functions import col
from sklearn.linear_model import LogisticRegression
new_data = df.select(*(col(c).cast("float").alias(c) for c in df.columns))
from pyspark.sql.functions import col,count,isnan,when
from sklearn.preprocessing import StandardScaler
new_data.select([count(when(col(c).isNull(),c)).alias(c) for c in new_data.columns]).show()
cols=new_data.columns
cols.remove("Outcome")
assembler = VectorAssembler(inputCols=cols,outputCol="features")
data=assembler.transform(new_data)
# data.select("features",'Outcome').show(truncate=False)
train, tesT = df.randomSplit([0.7, 0.3])
x_col = new_data.columns
x_train = train.toPandas()[x_col[:-1]].values
y_train = train.toPandas()['Outcome'].values
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
cls = LogisticRegression()
cls.fit(x_train,y_train)
save_path = 'prediction/'
completeName = os.path.join(save_path, "dblogR.pkl")
pickle.dump(cls, open(completeName, 'wb'))
|
[
"karrerajisyam123@gmail.com"
] |
karrerajisyam123@gmail.com
|
0014b564e562601e559c862a93e27d19ae0a5e54
|
0a06bd445cb3d9a870dca9234c016f8c2dfabf80
|
/tcr
|
6422914050f5b1a08648bedd450e11efd3c42f0e
|
[] |
no_license
|
abailly/greed-tcr
|
1f32c0e898fa3a8272686df3db4a58e63481a636
|
dcd1c94519f4f3b4ebfd78e1e3f3de12e0931df0
|
refs/heads/master
| 2021-04-10T15:44:32.899713
| 2020-03-21T09:26:46
| 2020-03-21T09:26:46
| 248,943,933
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,625
|
#!/usr/bin/env python3
# -*- mode: python -*-
import os
import sys
import argparse
import select
import shlex
import subprocess
import sys
import signal
import time
def run_tests():
return os.system("stack install --fast --test")
def tcr():
if run_tests() == 0:
os.system("git commit -a -m \"working\"")
else:
os.system("git reset --hard HEAD")
# When we receive ctrl-c, kill the watcher
process = None
done = False
def signal_handler(sig, frame):
done = True
if process:
process.kill()
signal.signal(signal.SIGINT, signal_handler)
osname = os.uname().sysname
if osname == 'Darwin':
cmd = "fswatch . --exclude '\.git' --exclude '\.#.*' --exclude 'TAGS' --exclude '.*~' --exclude '/\..+' --exclude '\.newenv.*' --exclude '\.stack-work'"
else:
cmd = "inotifywait -m -r -e modify -e create -e move -e delete . --exclude '\.#.*|.*~|/\..+|\.newenv.*|\.stack-work|TAGS'"
tcr()
while not done:
# start the file watcher subprocess
process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE)
poll = select.poll()
poll.register(process.stdout, select.POLLIN)
# wait for output or subprocess exit
while process.poll() is None:
if poll.poll(1000):
time.sleep(0.5) # debounce because we tend to save lots of files at once
# When we have new output, print it and run the tests
available = process.stdout.peek(1024)
if len(available) > 0:
print(process.stdout.read(len(available)).decode("utf-8"))
tcr()
if process.returncode != 0:
sys.exit(0)
|
[
"arnaud.oqube@gmail.com"
] |
arnaud.oqube@gmail.com
|
|
c666a6a03fd34d2025c018b75a0c275d8bbf4105
|
1b74a36d701985e37d4452a5daddbe617db237d8
|
/test.py
|
8e063c3f95d584cbf30b87796e9480ee6a8564f8
|
[] |
no_license
|
kayo289/syaberu_eakon
|
0d5a184946356cf7611f1ddcd90298211e1f0006
|
c72e7157d0d5b370ebf8e3f89379866c7b40d612
|
refs/heads/master
| 2023-05-30T05:47:45.616115
| 2021-06-17T11:50:45
| 2021-06-17T11:50:45
| 377,072,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,876
|
py
|
#!/usr/bin/env python3
import pandas as pd
import numpy as np
from datetime import datetime as dt
from matplotlib import pyplot as plt
from scipy import stats
from mlxtend.evaluate import cochrans_q
from mlxtend.evaluate import mcnemar_table
from mlxtend.evaluate import mcnemar
import itertools
plt.rcParams['font.family'] = 'Hiragino Sans'
##############################################
# Parameters
##############################################
maxtime = 20 #アンケート回答時間の最大値 (分)
##############################################
# Functions
##############################################
def plotHist(df, colname): # dataframeと列名を与えるとヒストグラムを描画
plt.figure()
plt.hist(df[colname], bins = 50)
plt.xlim(0, 1000)
plt.xlabel(colname)
plt.ylabel('頻度')
plt.savefig("plot/"+colname,
dpi = 400,
bbox_inches='tight')
plt.close()
def continuousParam(df, colname, displayname = ''): # 連続変数の要約
name = '## ' + colname
if displayname != '':
name = '{}: '.format(displayname)
array = df[colname].values
print(name,
'平均 (標準偏差), {:.1f} ({:.1f}); 中央値 (Q1–Q3), {:.1f} ({:.1f}–{:.1f}); 最小値–最大値, {:.1f}–{:.1f}; S-W test p-value, {:.3f} '\
.format(np.mean(array),
np.std(array),
np.median(array),
np.percentile(array, 25),
np.percentile(array, 75),
np.min(array),
np.max(array),
stats.shapiro(array)[1]))
return array
def FigureContinuousQuestionForMovie(df, colname, ylabel, indexlist = ['1', '2', '3','4'],
height = [11, 11, 12, 13], sig0 = [1, 3, 1, 2], sig1 = [2, 4, 3, 4],
ylim0 = 0, ylim1 = 14, yticks = True): # 論文に挿入するための画像描画
list_data_permovie = []
print('## ' + colname + ' ')
for i, j in enumerate(indexlist):
array = continuousParam(df, colname + j, i)
list_data_permovie.append(array)
plt.figure(figsize = (7, 3.5))
plt.boxplot(list_data_permovie) # boxplot
plt.ylim(ylim0, ylim1)
plt.xticks(range(1, 5), ['公益行動条件', '公益通知条件', '私益行動条件', '私益通知条件'])
# plt.hlines(height, sig0, sig1, linewidth = 0.7, color = 'black')
# if yticks:
# plt.yticks(range(0, 11, 2), range(0, 11, 2))
plt.ylabel(ylabel)
for i, j, k in zip(height, sig0, sig1):
plt.text((j + k)/2, i - 0.3, '*', fontsize = 15, horizontalalignment = 'center')
plt.plot([], [], ' ', label='$*: \it{p} < 0.05\,&\,\it{r} > 0.1$')
plt.legend(frameon=False, bbox_to_anchor=(1.02, 0.96), loc='lower right')
plt.savefig("plot/figure_"+ylabel,
dpi = 400,
bbox_inches='tight')
print("保存した")
##############################################
# Main
##############################################
df = pd.read_excel('data/sumdata.xlsx')
print(df.columns)
df = df.dropna()
# 回答時間を計算
starttime = pd.Series([dt.strptime(str(i), '%Y%m%d%H%M%S') for i in df['開始時刻(自動で入力されます。変更しないでください)']]) # 回答開始時刻
df['回答時間(秒)'] = (df['タイムスタンプ'] - starttime).dt.total_seconds() # 回答送信時刻から回答開始時刻を引いて回答にかかった時間を計算
# 回答に欠損のあるデータを除外
df_full = df.dropna()
# 回答時間でデータを抽出
df_crop = df_full[np.logical_and(30 <= df_full['回答時間(秒)'],
df_full['回答時間(秒)'] <= 60*maxtime)] # 37秒以下, maxtime以上を除外
plotHist(df_crop, '回答時間(秒)')
# 各変数のまとめ
print('# 回答者全ての人数:', len(df.index))
print('# 回答に欠損のない回答者の人数:', len(df_full.index))
print('\n# 解析対象の回答者の人数: {} (回答者全体の{:.1f}%)'.format(len(df_crop.index),
100*len(df_crop.index)/len(df.index)))
# コクランのq検定
q,p_value = cochrans_q(np.array(["はい"]*len(df_crop.index)),df_crop["目の前には「エアコンの指示を承認する」ボタンがあります。このあとボタンを押しますか?1"].to_numpy(), df_crop["目の前には「エアコンの指示を承認する」ボタンがあります。このあとボタンを押しますか?2"].to_numpy(), df_crop["目の前には「エアコンの指示を承認する」ボタンがあります。このあとボタンを押しますか?3"].to_numpy(), df_crop["目の前には「エアコンの指示を承認する」ボタンがあります。このあとボタンを押しますか?4"].to_numpy())
print('Q: %.3f' % q)
print('p-value: %.6f' % p_value)
# mcnemar
lis = [1,2,3,4]
for pair in itertools.combinations(lis, 2):
print(f"✅==動画{pair[0]}と{pair[1]}のmcnemar==")
q,p_value = mcnemar(mcnemar_table(np.array(["はい"]*len(df_crop.index)),df_crop[f"目の前には「エアコンの指示を承認する」ボタンがあります。このあとボタンを押しますか?{pair[0]}"].to_numpy(), df_crop[f"目の前には「エアコンの指示を承認する」ボタンがあります。このあとボタンを押しますか?{pair[1]}"].to_numpy()))
print('McNemar\'s Chi^2: %.3f' % q)
print('McNemar\'s p-value: %.6f' % p_value)
if p_value < 0.05:
print(f"# 動画{pair[0]}")
print(df_crop[f'目の前には「エアコンの指示を承認する」ボタンがあります。このあとボタンを押しますか?{pair[0]}'].value_counts(normalize=True) * 100)
print(f"# 動画{pair[1]}")
print(df_crop[f'目の前には「エアコンの指示を承認する」ボタンがあります。このあとボタンを押しますか?{pair[1]}'].value_counts(normalize=True) * 100)
t_value, p_value = stats.ttest_ind(df_crop[f'動画内に登場した喋る家電の好感度を教えてください{pair[0]}'].to_numpy(), df_crop[f'動画内に登場した喋る家電の好感度を教えてください{pair[1]}'].to_numpy(), equal_var=True)
print("👍==好感度について==")
print("t_value:", t_value)
print("p_value:", p_value)
if p_value < 0.008:
print(f"p = {p_value:.3f} のため、帰無仮説が棄却されました。AとBに差があります")
else:
print(f"{p_value:.3f} のため、帰無仮説が採択されました。AとBに差はありません")
t_value, p_value = stats.ttest_ind(df_crop[f'動画内に登場した喋る家電の嫌悪感を教えてください{pair[0]}'].to_numpy(), df_crop[f'動画内に登場した喋る家電の嫌悪感を教えてください{pair[1]}'].to_numpy(), equal_var=True)
print("👎==嫌悪感について==")
print("t_value:", t_value)
print("p_value:", p_value)
if p_value < 0.025:
print(f"p = {p_value:.3f} のため、帰無仮説が棄却されました。AとBに差があります")
else:
print(f"{p_value:.3f} のため、帰無仮説が採択されました。AとBに差はありません")
# # 論文に挿入するための画像描画
# FigureContinuousQuestionForMovie(df_crop, 'この後、動画内にある喋る空気清浄機の電源をつけたいですか?つけたくないですか?',
# ylabel = '受容度', indexlist = ['inverseアA', 'アB', 'inverseイA', 'イB'])
FigureContinuousQuestionForMovie(df_crop, '動画内に登場した喋る家電の好感度を教えてください', ylabel = '好感度')
FigureContinuousQuestionForMovie(df_crop, '動画内に登場した喋る家電の嫌悪感を教えてください', ylabel = '嫌悪感')
|
[
"kayokapi@icloud.com"
] |
kayokapi@icloud.com
|
2bc1c55aa465d41767f5a4a17e88f2902fa650a2
|
115b5356242176b8873ae7e43cd313e41cbd0ee6
|
/webstuff/webscraper/tidytext.py
|
057e7c3a5a8d77e0574b55b38fb0fe5b7a3b444a
|
[] |
no_license
|
squeakus/bitsandbytes
|
b71ec737431bc46b7d93969a7b84bc4514fd365b
|
218687d84db42c13bfd9296c476e54cf3d0b43d2
|
refs/heads/master
| 2023-08-26T19:37:15.190367
| 2023-07-18T21:41:58
| 2023-07-18T21:42:14
| 80,018,346
| 2
| 4
| null | 2022-06-22T04:08:35
| 2017-01-25T13:46:28
|
C
|
UTF-8
|
Python
| false
| false
| 976
|
py
|
from BeautifulSoup import BeautifulSoup
import re
page = open('out2.txt','r')
for idx,line in enumerate(page):
parts = line.split(';')
for part in parts:
#print part, '\n'
if part.startswith('var point = new GLatLng'):
print "\n", part.lstrip('var point = new GLatLng')
m = re.search('table(.+?)table', line)
if m:
found = m.group(1)
found = '<table' + found +'table>'
found = found.replace('\\','')
soup = BeautifulSoup(found)
info = soup.findAll('tr',{'class':'wind_row'})
name = soup.findAll('a')
print name[0].text
for data in info:
direction = str(data.find('img'))
direction = direction.rstrip('.png" />')
direction = direction.lstrip('<img src="images/wind/')
print direction
n = re.search('Wind:(.+?)km', str(data))
if n:
speed = n.group(1)
print speed
|
[
"jonathanbyrn@gmail.com"
] |
jonathanbyrn@gmail.com
|
7df32f5bb38ea86305dd99079c299814ae898f32
|
fe625bc907b35a6c11b0884371b05d1e96cbc499
|
/second/utils/config_tool.py
|
962e66b1f9bcd0b9f1f4f5bcffd3eb1b617616cb
|
[
"MIT"
] |
permissive
|
robin2002/second.pytorch-1
|
9425c2ccf5c522e70de8788f837f1406a09382ff
|
886f69a82445e914567b05e99dc9844d0091aee1
|
refs/heads/master
| 2020-09-26T12:45:24.095875
| 2019-11-21T07:35:05
| 2019-11-21T07:35:05
| 226,257,485
| 0
| 1
|
MIT
| 2019-12-06T06:03:43
| 2019-12-06T06:03:42
| null |
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
# This file contains some config modification function.
# some functions should be only used for KITTI dataset.
from google.protobuf import text_format
from second.protos import pipeline_pb2, second_pb2
from pathlib import Path
import numpy as np
def change_detection_range(model_config, new_range):
assert len(new_range) == 4, "you must provide a list such as [-50, -50, 50, 50]"
old_pc_range = list(model_config.voxel_generator.point_cloud_range)
old_pc_range[:2] = new_range[:2]
old_pc_range[3:5] = new_range[2:]
model_config.voxel_generator.point_cloud_range[:] = old_pc_range
for anchor_generator in model_config.target_assigner.anchor_generators:
a_type = anchor_generator.WhichOneof('anchor_generator')
if a_type == "anchor_generator_range":
a_cfg = anchor_generator.anchor_generator_range
old_a_range = list(a_cfg.anchor_ranges)
old_a_range[:2] = new_range[:2]
old_a_range[3:5] = new_range[2:]
a_cfg.anchor_ranges[:] = old_a_range
elif a_type == "anchor_generator_stride":
a_cfg = anchor_generator.anchor_generator_stride
old_offset = list(a_cfg.offsets)
stride = list(a_cfg.strides)
old_offset[0] = new_range[0] + stride[0] / 2
old_offset[1] = new_range[1] + stride[1] / 2
a_cfg.offsets[:] = old_offset
else:
raise ValueError("unknown")
old_post_range = list(model_config.post_center_limit_range)
old_post_range[:2] = new_range[:2]
old_post_range[3:5] = new_range[2:]
model_config.post_center_limit_range[:] = old_post_range
def get_downsample_factor(model_config):
downsample_factor = np.prod(model_config.rpn.layer_strides)
if len(model_config.rpn.upsample_strides) > 0:
downsample_factor /= model_config.rpn.upsample_strides[-1]
downsample_factor *= model_config.middle_feature_extractor.downsample_factor
downsample_factor = int(downsample_factor)
assert downsample_factor > 0
return downsample_factor
if __name__ == "__main__":
config_path = "/home/yy/deeplearning/deeplearning/mypackages/second/configs/car.lite.1.config"
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
change_detection_range(config, [-50, -50, 50, 50])
proto_str = text_format.MessageToString(config, indent=2)
print(proto_str)
|
[
"scrin@foxmail.com"
] |
scrin@foxmail.com
|
fa50dff68c05675c2c7debb98f9a5b3d75c46c85
|
24331f07b0848c0921142bb12c6d1fbb3cc240bb
|
/web/ndn_client.py
|
db6e092283b2f41ad36e55bb492e7c2241148791
|
[] |
no_license
|
shockjiang/rms
|
3c79a66a7e87705c079b737aa52b4897a9e392b2
|
19a6278ba7a7628b958d538bd15e29d4f81786e8
|
refs/heads/master
| 2020-03-26T01:18:01.276178
| 2015-06-09T01:35:10
| 2015-06-09T01:35:10
| 37,102,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,820
|
py
|
#! /usr/bin/env python
# -*- coding=utf-8 -*-
import Queue
import thread
import threading
import json
import urllib
import time
from binascii import hexlify, unhexlify
import pyndn
import common.security as security
import common.statuscode
from common.settings import get_host,log
import webconfig
class RMSAuthException(Exception):
pass
class QueueItem(object):
STATUS_OK = 0
STATUS_UNVERIFIED = 1
STATUS_BAD = 2
STATUS_TIME_OUT = 3
def __init__(self, name, status, content = None):
self.name = name
self.status = status
self.content = content
class rmsClientInterface(pyndn.Closure):
def __init__(self, recv_queue):
self.handle = pyndn.NDN()
self.recv_queue = recv_queue
def send(self, interest, timeout, retries = webconfig.RETRY_MAX):
self.retries = retries
print "timeout=%s" %(timeout)
self.doSend(interest, timeout)
def retry(self, interest):
if not self.retries:
self.recv_queue.put_nowait(QueueItem(interest, QueueItem.STATUS_TIME_OUT))
return pyndn.RESULT_OK
else:
self.retries -= 1
log.info('interest timed out, retrying...')
return pyndn.RESULT_REEXPRESS
def doSend(self, interest, timeout):
templ = pyndn.Interest()
templ.answerOriginKind = 0
templ.childSelctor = 1
templ.interestLifetime = timeout
self.handle.expressInterest(pyndn.Name(interest), self, templ)
def start(self, timeout = -1):
self.handle.run(timeout)
def stop(self):
self.handle.setRunTimeout(0)
def upcall(self, kind, upcallInfo):
if kind == pyndn.UPCALL_FINAL:
return pyndn.RESULT_OK
if kind == pyndn.UPCALL_INTEREST_TIMED_OUT:
return self.retry(upcallInfo.Interest.name)
# make sure we're getting sane responses
if not kind in [pyndn.UPCALL_CONTENT,
pyndn.UPCALL_CONTENT_UNVERIFIED,
pyndn.UPCALL_CONTENT_BAD]:
log.error("Received invalid kind type: %d" % kind)
return pyndn.RESULT_OK
response_name = upcallInfo.ContentObject.name
s = QueueItem.STATUS_OK
if kind == pyndn.UPCALL_CONTENT_UNVERIFIED:
s = QueueItem.STATUS_UNVERIFIED
if kind == pyndn.UPCALL_CONTENT_BAD:
s = QueueItem.STATUS_BAD
self.recv_queue.put_nowait(QueueItem(response_name, s, upcallInfo.ContentObject.content))
return pyndn.RESULT_OK
STATE_NOT_RUN = -1
STATE_NOT_AUTH = 0
STATE_IDLE = 1
STATE_WAIT_RECV = 2
class rmsClientBase(object):
"""Base class of RMS client application"""
def __init__(self, host, app, pemFile):
self.recv_queue = Queue.Queue()
self.result_queue = Queue.Queue()
self.name_prefix = "/{}/rms/{}".format(host, app)
self.session_id = ''
self.seq = 0
self.pemFile = pemFile
self.cipher = None
self.state = STATE_NOT_RUN
self.auth_cond = threading.Condition()
thread.start_new_thread(self._recv_thread, tuple())
thread.start_new_thread(self._ndn_thread, tuple())
#wait for thread to start
while self.state == STATE_NOT_RUN:
time.sleep(0)
def _ndn_thread(self):
self.ndn_interface = rmsClientInterface(self.recv_queue)
self.state = STATE_NOT_AUTH
self.ndn_interface.start()
def _encrypt(self, data):
return self.cipher.encrypt(data)
def _decrypt(self, data):
return self.cipher.decrypt(data)
def _decode_response(self, data):
space1 = data.find(' ')
if space1 == -1:
raise ValueError
space2 = data.find(' ', space1+1)
if space2 == -1:
raise ValueError
return (int(data[0:space1]), int(data[space1+1:space2]), data[space2+1:])
def Connect(self, timeout):
"""Shake hand and authorize with server (may block)"""
self.connect_timeout = timeout
auth = security.Auth()
if self.state != STATE_NOT_AUTH:
raise ValueError
log.debug('Connecting to %s' % self.name_prefix)
self.auth_cond.acquire()
self._auth_result = None
self.ndn_interface.send(self.name_prefix + '/auth/{}'.format(hex(auth.getDHKey())), timeout, 0)
self.auth_cond.wait(timeout)
self.auth_cond.release()
if not self._auth_result:
raise RMSAuthException('Authorization timed out')
try:
data = json.loads(self._auth_result)
auth.setOtherDHKey(long(data['randS'], 0))
auth.decryptPreMasterKey(unhexlify(data['preMaster']), self.pemFile)
self.cipher = security.AESCipher(auth.genMasterKey())
self.session_id = data['session']
self.state = STATE_IDLE
log.debug('Connected')
except Exception, e:
log.error(e)
raise RMSAuthException('Illegal authorization response received')
def ReConnect(self, timeout):
log.debug('Reconnecting to %s' % self.name_prefix)
self.state = STATE_NOT_AUTH
self.session_id = ''
self.seq = 0
self.cipher = None
self.Connect(timeout)
def _auth_timed_out(self):
log.error('Authorization timed out')
self.auth_cond.acquire()
self._auth_result = None
self.auth_cond.notify_all()
self.auth_cond.release()
def _auth_response(self, content):
self.auth_cond.acquire()
self._auth_result = content
self.auth_cond.notify_all()
self.auth_cond.release()
def IsConnected(self):
return self.state in [STATE_IDLE, STATE_WAIT_RECV]
def Send(self, data, timeout):
"""Send data to server (may block)"""
if self.state != STATE_IDLE:
raise Exception('Not idle')
data = self._encrypt(data)
self.seq += 1
self.state = STATE_WAIT_RECV
self.ndn_interface.send(self.name_prefix + '/{}/{}/'.format(self.session_id, self.seq) + data, timeout)
def _recv_thread(self):
while True:
try:
item = self.recv_queue.get()
except Exception, e:
log.error(e)
continue
if self.state == STATE_NOT_AUTH:
#handle auth response
if item.status == QueueItem.STATUS_TIME_OUT:
self._auth_timed_out()
else:
self._auth_response(item.content)
continue
if item.status == QueueItem.STATUS_TIME_OUT:
log.info("send timed out %s" % item.name)
self.state = STATE_IDLE
continue
if self.state != STATE_WAIT_RECV:
log.warn('content received in a wrong state %d'%self.state)
continue
if item.status in [QueueItem.STATUS_BAD, QueueItem.STATUS_UNVERIFIED]:
log.warn('got bad content')
self.state = STATE_IDLE
elif item.status == QueueItem.STATUS_OK:
log.debug('got content')
try:
(seq, status, content) = self._decode_response(item.content)
if int(status) == common.statuscode.STATUS_AUTH_ERROR:
log.warn("session expired")
self.ReConnect(self.connect_timeout or 10.0)
raise RMSAuthException #quit normal receiving procedure
seq = int(seq)
content = self._decrypt(content)
log.debug("content: %s" %(content))
except RMSAuthException:
pass
except Exception, e:
log.error("unable to decode content, %s" % e)
except:
pass
else:
if seq != self.seq:
log.warn("sequence number error, {} expected, but {} received".format(self.seq, seq))
else:
self.result_queue.put_nowait((status, content))
self.state = STATE_IDLE
else:
log.error('got unknown QueueItem')
def Recv(self, timeout = None):
"""Receive data from server (may block)"""
try:
item = self.result_queue.get(True, timeout)
return item
except:
return (None, None)
def DiscardCurrentResult(self):
if self.state != STATE_WAIT_RECV:
log.warn('not waiting for result')
else:
self.state = STATE_IDLE
def Stop(self):
self.ndn_interface.stop()
|
[
"shock.jiang@gmail.com"
] |
shock.jiang@gmail.com
|
bfb90c8755e3b83e9062e88376453a3cfeeee7ec
|
9c2edc273db48dcb6d31a937510476b7c0b0cc61
|
/pyopengl_sample/tutorial1.py
|
0fd92b98d815dd26d6457ba6f9ac33791867e7e0
|
[] |
no_license
|
miyamotok0105/python_sample
|
4d397ac8a3a723c0789c4c3e568f3319dd754501
|
77101c981bf4f725acd20c9f4c4891b29fbaea61
|
refs/heads/master
| 2022-12-19T22:53:44.949782
| 2020-05-05T05:09:22
| 2020-05-05T05:09:22
| 81,720,469
| 1
| 0
| null | 2022-11-22T02:22:55
| 2017-02-12T11:15:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 391
|
py
|
#!/usr/bin/python
from OpenGL.GL import *
from OpenGL.GLUT import *
def draw():
glClearColor(1.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glFlush()
glutSwapBuffers()
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(320, 240)
glutCreateWindow("PyOpenGL 1")
glutDisplayFunc(draw)
glutMainLoop()
|
[
"miyamotok0105@gmail.com"
] |
miyamotok0105@gmail.com
|
59ccd0e1e483b1dae9d770a68bd9587c99f73706
|
4c4aad4539175e5cb8c988a3a03605547b9e4869
|
/fitnessClub/login_view.py
|
e0ea23a1e750c9c7aff06eb32b09f04fc7844744
|
[] |
no_license
|
purplum/fitness-course-registration-system
|
4cdafe31ae9d21dbb2c5dee21d964a45c2303834
|
9ea00eca46ec19be20c9ca9689fbf948f0ae612f
|
refs/heads/master
| 2020-04-22T03:47:16.369027
| 2018-08-14T14:23:21
| 2018-08-14T14:23:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from back_end import Login
def login_view(request):
return render_to_response("login.html")
def log(request):
if request.method == 'POST':
user = request.POST
conn = Login.login(user['user_account'], user['user_password'],user['user_type'])
if conn == -1:
return render_to_response("error_page.html", {"error": "failed!"})
else:
response = HttpResponseRedirect('/home')
response.set_cookie('user_account',user['user_account'],3600)
response.set_cookie('user_password',user['user_password'],3600)
response.set_cookie('user_type',user['user_type'],3600)
return response
|
[
"shellystar11@outlook.com"
] |
shellystar11@outlook.com
|
a81b7c82c7f94ef1f25c412191abe2385f31780f
|
b3f7b53a6c0f9abb4b5947f490abc962855eedd8
|
/problem/urls.py
|
4b76531205caa265418618e9acdabdf64469b85d
|
[] |
no_license
|
17611165193/shiqing
|
e43dfd9640451e83fa4fc0d0c056a04746720766
|
e4f8949f9c8b8578d21106da647524d091827484
|
refs/heads/master
| 2022-12-12T18:12:26.312807
| 2018-09-18T06:44:20
| 2018-09-18T06:44:20
| 149,234,968
| 0
| 0
| null | 2022-12-08T02:48:14
| 2018-09-18T05:44:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,693
|
py
|
"""thing_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from problem import views
urlpatterns = [
url(r'^set_problem/', views.set_problem),
url(r'^recommend_list/', views.recommend_list),
url(r'^comment/', views.comment),
url(r'^comment_list/', views.comment_list),
url(r'^hot_list/', views.hot_list),
url(r'^collection/', views.collection),
url(r'^collection_list/', views.collection_list),
url(r'^follow_member/', views.follow_member),
url(r'^my_follow/', views.my_follow),
url(r'^private_letter/', views.private_letter),
url(r'^set_private_letter/', views.set_private_letter),
url(r'^message/', views.message),
url(r'^recent_browse/', views.recent_browse),
url(r'^fabulous/', views.fabulous),
url(r'^follow_list/', views.follow_list),
url(r'^set_answer/', views.set_answer),
url(r'^dynamic_message/', views.dynamic_message),
url(r'^details/', views.details),
url(r'^search/', views.search),
url(r'^delete_collection/', views.delete_collection),
]
|
[
"liuwei19990123@163.com"
] |
liuwei19990123@163.com
|
2587d997b84fcb7ae2a6db87804428a88199fe81
|
3914c6b0a7913181cda205a60354e4f0cf2ec63e
|
/src/Voluntariado/asgi.py
|
50108c6ee85e036c734e095588d4141441f9ce77
|
[] |
no_license
|
amado-developer/Voluntariado-API
|
74c8cf8452b2616a9ce5131fae473440d6745d08
|
ff7453ac53b0e1ae2988dc7a499731303e93755d
|
refs/heads/master
| 2023-01-13T16:08:03.895866
| 2020-11-12T18:13:05
| 2020-11-12T18:13:05
| 287,809,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
ASGI config for Voluntariado project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Voluntariado.settings')
application = get_asgi_application()
|
[
"amado_developer@hotmail.com"
] |
amado_developer@hotmail.com
|
1ab114a1176d26d3b841fc5fe2d183ecb240649b
|
e09f6fd52886e1d3be89b0552104d487ab78e0b7
|
/outliers/enron_outliers.py
|
18aec494f20e66f3d657bfd6a67b44e958e24889
|
[] |
no_license
|
tompxu/udacity-introtomachinelearning
|
a80f4d67f8836f77e508968f5d10af89c9f06d93
|
f47df7f90e0718cd9d27b539eb79f268d1f49a79
|
refs/heads/master
| 2020-03-16T20:02:30.806512
| 2018-05-10T19:19:57
| 2018-05-10T19:19:57
| 132,943,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
#!/usr/bin/python
import pickle
import sys
import matplotlib.pyplot
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
### read in data dictionary, convert to numpy array
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "rb")fix_imports=True )
features = ["salary", "bonus"]
data = featureFormat(data_dict, features)
### your code below
|
[
"pengxu.tom@gmail.com"
] |
pengxu.tom@gmail.com
|
97c37aff4c9011d545041e6eae31ed60bd41ca37
|
11a278464db208eb7549b434a90d494b15d99af0
|
/utils/misc.py
|
5f66aeab52f0b0e7bd8b1f94f5d6c216b8352b06
|
[] |
no_license
|
kuafu1994/quantized_training
|
65b503c0a2d54f9f4ab3dc3e963b13bcd1b1d424
|
9164d24834c897886f7543f687f521f76c67ed92
|
refs/heads/master
| 2021-02-14T20:49:06.271151
| 2020-03-24T18:26:02
| 2020-03-24T18:26:02
| 244,833,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
import random
import numpy as np
import torch
torch_dtypes = {
'float': torch.float,
'float32': torch.float32,
'float64': torch.float64,
'double': torch.double,
'float16': torch.float16,
'half': torch.half,
'uint8': torch.uint8,
'int8': torch.int8,
'int16': torch.int16,
'short': torch.short,
'int32': torch.int32,
'int': torch.int,
'int64': torch.int64,
'long': torch.long
}
def onehot(indexes, N=None, ignore_index=None):
"""
Creates a one-representation of indexes with N possible entries
if N is not specified, it will suit the maximum index appearing.
indexes is a long-tensor of indexes
ignore_index will be zero in onehot representation
"""
if N is None:
N = indexes.max() + 1
sz = list(indexes.size())
output = indexes.new().byte().resize_(*sz, N).zero_()
output.scatter_(-1, indexes.unsqueeze(-1), 1)
if ignore_index is not None and ignore_index >= 0:
output.masked_fill_(indexes.eq(ignore_index).unsqueeze(-1), 0)
return output
def set_global_seeds(i):
try:
import torch
except ImportError:
pass
else:
torch.manual_seed(i)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(i)
np.random.seed(i)
random.seed(i)
|
[
"1534472093@qq.com"
] |
1534472093@qq.com
|
b25f51bd9909f386f89f3058a2323e1d1b8c133f
|
6c2608bc87b522da77c792e20330989de17b3005
|
/Chap-7/ex179.py
|
43c10f227985eb4652d2196644fcc6bc8c504dfe
|
[] |
no_license
|
AleByron/AleByron-The-Python-Workbook-second-edition
|
8a0b408c1bbd90c82e6b837fc898ee10341ca8fa
|
491b2fd394aa04e29a4b2dbe9a615c547e239028
|
refs/heads/main
| 2023-01-13T21:01:17.757669
| 2020-11-11T01:29:28
| 2020-11-11T01:29:28
| 306,487,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
def square(n,g):
if 0<n-g**2<10**-12:
return g
else:
g = square(n,(g+(n/g))/2)
return g
def main():
n = 32
g = 1
print(square(n,g))
main()
|
[
"noreply@github.com"
] |
AleByron.noreply@github.com
|
0d918889f8d20d3a4695849eb65eab1ae2ad9c9d
|
edfd1db2b48d4d225bc58be32fbe372a43415112
|
/team-task/airflow2.0/dags/efirmant/lesson3.challenge2.py
|
ea755ca01b0b78617310f0d87c4b0b0748206373
|
[] |
no_license
|
rwidjojo/airflow-training
|
ed83cb9e97ca85ef06de1426f2f41014881a1f22
|
ac82040d8ddc3859df5576eee08d397e824016f1
|
refs/heads/main
| 2023-08-12T21:01:17.672059
| 2021-01-04T09:17:48
| 2021-01-04T09:17:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 915
|
py
|
import logging
from datetime import timedelta
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.hooks.postgres_hook import PostgresHook
owner = 'efirmant' # Replace with your short name
default_args = {
'owner': owner,
'depends_on_past': False,
'start_date': days_ago(2),
}
dag = DAG(
f'{owner}.lesson3.challenge2',
default_args=default_args,
description='Read data from postgresql',
schedule_interval=None,
)
def read_data():
db_conn = PostgresHook(postgres_conn_id='efirmant_posgres2')
result = db_conn.get_records('SELECT order_date, count(order_id) from efirmant_orders GROUP BY order_date')
for row in result:
logging.info(row)
read_task = PythonOperator(
task_id="read",
python_callable=read_data,
dag=dag
)
|
[
"nurcahyopujo@gmail.com"
] |
nurcahyopujo@gmail.com
|
4c3ccf66b8fd7068c014cb41c62600b23d532d8f
|
af1004cc4cb93eab7fa55e7fc287e53490831a0d
|
/FirstScript.py
|
1b18c9076359a6e43fe4623d023efa8cbc5ad39e
|
[] |
no_license
|
ljairaj/Python
|
11687230468a253570768fb0d275c9ca4b36541c
|
499c14e510cbd8826dc2aca50b6c5052501ad5b9
|
refs/heads/master
| 2020-03-15T15:17:48.489802
| 2018-05-05T02:55:51
| 2018-05-05T02:55:51
| 132,208,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,574
|
py
|
import sys
import random
import os
c = "LJR"
x, a, b = 'foo', 10, 1000.0001
# sys.stdout.write(x+"\n");
# print("\n\n\n\n");
# print(c);
# # Code to check prime number
# iVal1 = 2
# iMax = 30
# while (iVal1 <= iMax):
# # print("\nInput Value:"+str(iVal1))
# iVal2 = 2
# isPrime = 0
# while (iVal2 <= (iVal1 / 2)):
# # print("Checking Value:"+str(iVal2)+" Output:"+str(iVal1%iVal2))
# if (iVal1 % iVal2 == 0):
# isPrime = 1
# break
# iVal2 += 1
#
# if (isPrime == 1): print("The number " + str(iVal1) + " is not prime.")
# # else: print("The number "+str(iVal1)+" is not prime.")
#
# iVal1 += 1
# # Code to generate Fibonacci series.
# iVal1 = 0
# iVal2 = 1
# iSum = 1
# iMaxCnt = 10
#
# for iNum in range(0,20):
# print("Value:",iSum)
# iSum = iVal1 + iVal2
#
# if(iNum%2==0):
# iVal1 = iSum
# else:
# iVal2 = iSum
# super_villans = {'Fiddler' : 'Isaac Bowia',
# 'Captain Cold' : 'Leonard Smart',
# 'Weather Wizard' : 'Mark Mardon',
# 'Mirror Master':'San Scudder',
# 'Pied Piper':'Thomas Peterson'}
#
# fruit_list = ['apples','oranges','pineapples','watermelons','bananas']
#
# num_list = [[1,2,3],[30,40],[70,80,90,100]]
#
#
# i = 0
# while(i < len(num_list)):
# j = 0
# print("Value1:", num_list[i])
# while(j < len(num_list[i])):
# print("Value2:", num_list[i][j])
# j += 1
# i += 1
# Input_String = 'This message is to be delivered to [Jairaj,Amit,Mahesh] in regard to the recent projects [Proj1,Proj2].'
def createList(sInpStr, cLookup, temp_list):
iIndx = 0
while(iIndx<len(sInpStr)):
iIndx = sInpStr.find(cLookup,iIndx)
if(iIndx == -1):
break
temp_list.append(iIndx)
iIndx += len(cLookup)
def factorial(n):
if(n==1):
return 1
else:
return n*factorial(n-1)
# Text1 = 'This message is to be delivered to [Jairaj L,Amit Nanda,Mahesh Gopal] in regard to the recent project [Proj1,Proj2]. This will be communicated accordingly.'
Text1 = 'This message is to be delivered to [Jairaj,Amit,Mahesh] in regard to the recent project.'
opnBrk_loc_list = []
createList(Text1,"[",opnBrk_loc_list)
print("Output: ",opnBrk_loc_list)
cldBrk_loc_list = []
createList(Text1,"]",cldBrk_loc_list)
print("Output: ",cldBrk_loc_list)
if(len(opnBrk_loc_list) != len(cldBrk_loc_list)):
print("The string format is not correct.")
exit(1)
Raw_Text1 = ''
sText_val_list = []
sText_val_Map = {}
iStart = 0
for iCnt1 in range(0,len(opnBrk_loc_list)):
Raw_Text1 += (Text1[iStart:int(opnBrk_loc_list[iCnt1])]+"Temptxt"+str(iCnt1))
sText_val_list.append(Text1[int(opnBrk_loc_list[iCnt1]+1):int(cldBrk_loc_list[iCnt1])].split(','))
sText_val_Map["Temptxt"+str(iCnt1)]=Text1[int(opnBrk_loc_list[iCnt1]+1):int(cldBrk_loc_list[iCnt1])].split(',')
iStart = cldBrk_loc_list[iCnt1]+1
if(iCnt1 == len(opnBrk_loc_list)-1):
Raw_Text1 += Text1[iStart:len(Text1)]
print("String Val: ",Raw_Text1)
print("Output1: ",sText_val_list)
print("Output2: ",sText_val_Map)
# iTotalStr = 0
# for iCnt2 in range(0,len(sText_val_list)):
# if(iCnt2 == 0):
# iTotalStr = len(sText_val_list[iCnt2])
# else:
# iTotalStr *= len(sText_val_list[iCnt2])
#
# print("Total Size:",iTotalStr)
# for iLp1 in range(len(sText_val_list[0])):
# for iLp2 in range(len(sText_val_list[1])):
# print("Output: ",sText_val_list[0][iLp1]+" , "+sText_val_list[1][iLp2])
|
[
"jairajl@yahoo.com"
] |
jairajl@yahoo.com
|
7d972446eb1d879cf62519229177e8b1c0ec47c5
|
2733461324b1602373a5973846942952b77951d2
|
/Functions/Solution11.py
|
bb1ac36341c96635c80e29d53d876ac10e7d4588
|
[] |
no_license
|
Suyen-Shrestha/Python-Assignment
|
a14ececd2fcdd600004774cb8267b02b6665eb0e
|
438cecb9d9e3f42e788743c87ebe830381061360
|
refs/heads/master
| 2022-11-12T10:25:32.517165
| 2020-06-28T05:31:47
| 2020-06-28T05:31:47
| 275,513,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
adder = lambda x: x + 15
multiplier = lambda x,y: x*y
print(f'The result after addition of 15 to a number: {adder(5)}')
print(f'The result after multiplication of x and y numbers: {multiplier(7,8)}')
|
[
"suyencrestha@gmail.com"
] |
suyencrestha@gmail.com
|
736235745f10fb292372559ad9387d0047365fb5
|
b3f122622c041622d89af596c3a9862391331a9d
|
/cms/templatetags/cms_tags.py
|
ba2f8b88c64e7ff01707867eca8253f36f35d1e5
|
[] |
no_license
|
noxan/django-mini-cms
|
b6b229f4add63582ef6d976e766a4102e3fa02c7
|
c833e62571fd232ca5c6bc8278a5629c2886e9f1
|
refs/heads/master
| 2016-09-09T17:50:03.314904
| 2012-09-09T22:05:38
| 2012-09-09T22:05:38
| 4,614,741
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
from django import template
from django.core.urlresolvers import reverse
register = template.Library()
class BreadcrumbsListNode(template.Node):
def render(self, context):
page = context['object']
builder = []
builder.append('<ul class="breadcrumb">')
parent = page.parent
while parent is not None:
builder.append(u'<li><a href="%s">%s</a> <span class="divider">/</span></li>' % (reverse('cms:page', args=[parent.slug]), parent.headline))
parent = parent.parent
builder.append(u'<li class="active">%s</li>' % (page.headline))
builder.append(u'</ul>')
return u''.join(builder)
@register.tag
def render_breadcrumbs(parser, token):
return BreadcrumbsListNode()
|
[
"noxan@redmoonstudios.de"
] |
noxan@redmoonstudios.de
|
a56e63d9aeb1c1faec33a03654624c5820c1539b
|
17b7862ae66d7c4dacecd70e14fc80ccbb19c9f7
|
/System_Monitor.py
|
a3f2ee6c1024904513e737a8729b95ccc50e58e6
|
[] |
no_license
|
zongxj/pyscript
|
1a4e188ee18a36a2b8d3fecd5f06790538de32d3
|
af82d004b27c72fa0ed8170f4415021e1263c03d
|
refs/heads/master
| 2021-07-15T06:06:37.725891
| 2020-06-17T10:27:32
| 2020-06-17T10:27:32
| 127,864,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
# coding:utf-8
import psutil
import time
# 获取当前时间
now_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print(now_time+'\n')
###############################
# CPU信息
###############################
# 查看cpu所有信息
Cpu_Time_1 = psutil.cpu_times()
print(Cpu_Time_1)
# 显示cpu所有逻辑信息
Cpu_Time_2 = psutil.cpu_times(percpu=False)
print(Cpu_Time_2)
# 查看用户的cpu时间
Cpu_Time_3 = psutil.cpu_times().user
print(Cpu_Time_3)
# 查看cpu逻辑个数
Cpu_Count_1 = psutil.cpu_count()
print(Cpu_Count_1)
# 查看cpu物理个数
Cpu_Count_2 = psutil.cpu_count(logical=False)
print(Cpu_Count_2)
###############################
# 内存信息
###############################
# 查看系统内存
Mem = psutil.virtual_memory()
print(Mem)
# 获取swap内存信息
Mem = psutil.swap_memory()
print(Mem)
###############################
# 硬盘信息
###############################
# 磁盘I/O信息
Disk = psutil.disk_io_counters()
print(Disk)
# 获取单个分区I/O信息
Disk = psutil.disk_io_counters(perdisk=True)
print(Disk)
# 获取磁盘的完整信息
Disk = psutil.disk_partitions()
print(Disk)
# 获取分区使用信息
Disk = psutil.disk_usage('/')
print(Disk)
###############################
# 网络信息
###############################
# 获取网络总I/O信息
Net = psutil.net_io_counters()
print(Net)
# 输出网络每个接口信息
Net = psutil.net_io_counters(pernic=True)
print(Net)
# 获取网络连接信息 #netstat -anlt
Net = psutil.net_connections()
print(Net)
# 获取本地网卡信息 #ifconfig
Net = psutil.net_if_addrs()
print(Net)
# 获取本地网卡信息
Net = psutil.net_if_stats()
print(Net)
###############################
# 系统信息
###############################
# 获取当前系统用户登录信息
user = psutil.users()
print(user)
# 获取开机时间
boot_time = psutil.boot_time()
started_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(boot_time))
print(started_time)
# 查看系统全部进程
pid = psutil.pids()
print(pid)
|
[
"zongxj_cn@foxmail.com"
] |
zongxj_cn@foxmail.com
|
63cf4e7fc790f00047e1d1b4a59e089134a6a4ce
|
1113e8eec4ccbbcd00c6a9b5466c5239b6f0eb03
|
/cpos/foundation/_callable/core.py
|
d5bcbc5bd5bde2ddfb68fa7a980ecfe3e94c65cb
|
[] |
no_license
|
yizhong120110/CPOS
|
a05858c84e04ce4aa48b3bfb43ee49264ffc5270
|
68ddf3df6d2cd731e6634b09d27aff4c22debd8e
|
refs/heads/master
| 2021-09-01T17:59:53.802095
| 2017-12-28T05:43:06
| 2017-12-28T05:43:06
| 106,247,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,495
|
py
|
# -*- coding: utf-8 -*-
import textwrap
import sys
import os
from ..substrate.utils.logger import logger
from ..substrate.interfaces import Clarified
class Callable(object):
def __init__(self, py_code, fragment_name=''):
self.d = {'py_code':py_code,'fragment_name':fragment_name}
def run(self, np,np_local):
"""
# 在np中注册py_code
"""
try:
#logger.ods (str(self.d['py_code']) , lv = 'dev' , cat = 'foundation.callable')
exec(self.d['py_code'],np,np_local)
return True
except:
logger.oes("callable error:" , lv = 'error' , cat = 'foundation.callable')
return False
def get_name (self):
return self.d['fragment_name']
class DynamicRuntime(Clarified):
NAME = "DynamicRuntime"
# np_init_code and udp_np should be deprecated , keeping it is just for the compatibility with old codes.
# Runtime and Callable should be used like test cases showed below.
def __init__ (self,np_init_code='',upd_np={}, np = {}, np_local = None):
self.np = np
self.np_local = np_local
self.np_init_code = np_init_code
self.prepare_environment()
self.np.update(upd_np)
def call(self,callable_object):
return callable_object.run(self.np,self.np_local)
def prepare_environment(self):
ca = Callable(textwrap.dedent(self.np_init_code.replace('\r', '')))
self.call(ca)
return True
def last_result (self):
# equel to the "_" variable in the py console.
if '_' in (self.np.keys()):
return self.np['_']
return None
def var (self,var_name):
# equel to the "_" variable in the py console.
if var_name in (self.np.keys()):
return self.np[var_name]
return None
def statement_dynamic_call (statement = '', runtime = None):
# args like this : p1=value,p2=value,p3=value , in string.
dr = runtime or DynamicRuntime()
if statement != '':
if not dr.call( Callable( statement ) ):
return None
return dr
def direct_dynamic_call (module_name = '',func_name = '',args = '', runtime = None):
# args like this : p1=value,p2=value,p3=value , in string.
dr = runtime or DynamicRuntime()
if dr.var('_') is None:
dr = statement_dynamic_call('_ = None',dr)
if module_name != '':
statement = 'from %s import %s' % (module_name,'*' if func_name == '' else func_name)
dr = statement_dynamic_call(statement,dr)
if func_name != '' and func_name != '*':
statement = '_ = %s(%s) or _'%(func_name, args)
dr = statement_dynamic_call(statement,dr)
if not dr:
return None
return dr
def direct_dynamic_call_pyfile (pyfile='' , root='' ,func_name = '',args = '', runtime = None):
# args like this : p1=value,p2=value,p3=value , in string.
dr = runtime or DynamicRuntime()
if dr.var('_') is None:
dr = statement_dynamic_call('_ = None',dr)
if pyfile != '':
root = os.path.abspath(root) + os.sep
pyfile = os.path.abspath(os.path.join(root, pyfile.strip('/\\')))
statement = open(pyfile,mode='rb').read()
dr = statement_dynamic_call(statement,dr)
if func_name != '':
statement = '_ = %s(%s) or _'%(func_name, args)
dr = statement_dynamic_call(statement,dr)
if not dr:
return None
return dr
scall = statement_dynamic_call
dcall = direct_dynamic_call
dcallpy = direct_dynamic_call_pyfile
#######################################################################
#TEST
a = 0
def __test_call ():
global a
a = 100
print ('__test_call')
return 0
def _test1 ():
# 使用globals()会对当前环境造成影响,导致open不能正常使用
#dr = DynamicRuntime(np=globals())
dr = DynamicRuntime(np=locals())
dr = dcall('os',runtime = dr)
if dr:
dr = dcall(func_name = 'times',args = '',runtime = dr)
if dr:
dr = dcall(func_name = 'print',args = '_',runtime = dr)
if dr:
dr = dcall(func_name = 'times',args = '',runtime = dr)
if dr:
dr = dcall(func_name = 'print',args = '_',runtime = dr)
if dr:
dr = scall('print(\' Hello \')',runtime = dr)
if dr:
dr = scall('__test_call()',runtime = dr)
print(a)
def _test2 ():
b = 1
c = 1
dr = DynamicRuntime( np = locals())
scall('b = b + 1',dr)
print(dr)
print(b)
## note! we have to obtain the resualt manually. The 'b = b + 1' call will not touch the 'b' in this scope.
# why? ????
#refer to python doc [exec]:
#Note
#The default locals act as described for function locals() below:
# modifications to the default locals dictionary should not be attempted. Pass an explicit locals dictionary
# if you need to see effects of the code on locals after function exec() returns.
#
print (dr.var('b'))
def _test3 ():
dr = scall('t3 = "this is t3" ')
print(dr.var('t3'))
dr = scall('t4 = t3 + " and t4" ',dr)
print(dr.var('t4'))
def _test4 ():
# 如果下面这句执行报错,则说明本地环境被破坏,是np=globals()造成的
#print("++++++++++==========",help(open))
dr = dcallpy(os.path.abspath( __file__ ),'_test4_print')
dr = dcallpy(func_name='_test4_print_2' ,args='1111' ,runtime=dr)
dr = dcallpy(func_name='_test4_print_3' ,args='1111,2222' ,runtime=dr)
def _test4_print():
print("===== my name is _test4_print")
def _test4_print_2(aaaa):
print("===== my name is _test4_print_2 %s"%(aaaa))
def _test4_print_3(aaaa,bbbbb):
print("===== my name is _test4_print_3 %s %s"%(aaaa,bbbbb))
def _test5 ():
dr = scall('')
dr.np['aaaaa'] = 'test is aaaaa'
dr = dcall(func_name = 'print',args = 'aaaaa',runtime = dr)
if __name__ == '__main__':
_test1()
print('==========================================================')
_test2()
print('==========================================================')
_test3()
print('==========================================================')
_test4()
print('==========================================================')
_test5()
|
[
"yizhong120110@gmail.com"
] |
yizhong120110@gmail.com
|
3b97278167640c790740fbd6e9a435d1e87ce6e0
|
baaa8c9486e02f4232f4926cf4e1a2eeee1199b4
|
/accounts/admin.py
|
2395fb1f93dfca90cba93acc7edf2da53b6c172c
|
[] |
no_license
|
bondarenkoav/helpdesk
|
b2be867605d484c34aaea4d8bea876c633947f14
|
866ea2dc6ee5182d6310d800b301270b38490fd2
|
refs/heads/master
| 2023-01-08T09:44:15.852016
| 2022-12-28T10:53:39
| 2022-12-28T10:53:39
| 93,615,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,047
|
py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from accounts.models import Profile
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name = u'Профиль'
verbose_name_plural = u'Профиль'
fk_name = 'user'
class CustomUserAdmin(UserAdmin):
inlines = (ProfileInline, )
def get_inline_instances(self, request, obj=None):
if not obj:
return list()
return super(CustomUserAdmin, self).get_inline_instances(request, obj)
# class ProfileInline(admin.StackedInline):
# model = Profile
# can_delete = False
# verbose_name = u'Профиль'
# verbose_name_plural = u'Профиль'
# fk_name = 'user'
#
#
# # @admin.register(User)
# class CustomUserAdmin(UserAdmin):
# inlines = (ProfileInline, )
# list_display = ('username', 'last_name', 'first_name', 'is_active',
# 'get_phone', 'get_birthday', 'get_groups', 'get_location')
# list_filter = ('is_active', 'groups')
# search_fields = ('username', 'first_name', 'last_name')
#
# list_select_related = True
#
# def get_groups(self, instance):
# list_groups = ''
# for group in instance.groups.all():
# if list_groups == '':
# list_groups = group.name
# else:
# list_groups = list_groups + ', ' + group.name
# return list_groups
# get_groups.short_description = u'Группы'
#
# def get_location(self, instance):
# return instance.profile.location
# get_location.short_description = u'Город'
#
# def get_birthday(self, instance):
# return instance.profile.birthday
# get_birthday.short_description = u'Дата рождения'
#
# def get_phone(self, instance):
# return instance.profile.phone
# get_phone.short_description = u'Номер'
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin)
|
[
"printex.orsk@gmail.com"
] |
printex.orsk@gmail.com
|
ac0b29e663018b4fc53d6e6982827e701c021ea5
|
5d0833882aba2278a587e7292e46b6367ff0b9ec
|
/leetcode/editor/cn/grpc_test/async_streaming/server.py
|
4a864b37b528baf4ceddbff78081083e82ffd395
|
[] |
no_license
|
GitZW/LeetCode
|
d52bcfd2273e9dfdd3e8f66530db398257ed73c8
|
cb2cd758f6c8a0a38f10a6884f70a2413b2204c5
|
refs/heads/master
| 2023-04-11T05:21:24.318139
| 2021-04-30T10:22:47
| 2021-04-30T10:22:47
| 263,698,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,423
|
py
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from concurrent.futures import ThreadPoolExecutor
from typing import Iterable
import threading
import grpc
from google.protobuf.json_format import MessageToJson
import phone_pb2
import phone_pb2_grpc
def create_state_response(call_state: phone_pb2.CallState.State) -> phone_pb2.StreamCallResponse:
response = phone_pb2.StreamCallResponse()
response.call_state.state = call_state
return response
class Phone(phone_pb2_grpc.PhoneServicer):
def __init__(self):
self._id_counter = 0
self._lock = threading.RLock()
def _create_call_session(self) -> phone_pb2.CallInfo:
call_info = phone_pb2.CallInfo()
with self._lock:
call_info.session_id = str(self._id_counter)
self._id_counter += 1
call_info.media = "https://link.to.audio.resources"
logging.info("Created a call session [%s]", MessageToJson(call_info))
return call_info
def _clean_call_session(self, call_info: phone_pb2.CallInfo) -> None:
logging.info("Call session cleaned [%s]", MessageToJson(call_info))
def StreamCall(
self, request_iterator: Iterable[phone_pb2.StreamCallRequest],
context: grpc.ServicerContext
) -> Iterable[phone_pb2.StreamCallResponse]:
try:
request = next(request_iterator)
logging.info("Received a phone call request for number [%s]",
request.phone_number)
except StopIteration:
raise RuntimeError("Failed to receive call request")
# Simulate the acceptance of call request
time.sleep(1)
yield create_state_response(phone_pb2.CallState.NEW)
# Simulate the start of the call session
time.sleep(1)
call_info = self._create_call_session()
context.add_callback(lambda: self._clean_call_session(call_info))
response = phone_pb2.StreamCallResponse()
response.call_info.session_id = call_info.session_id
response.call_info.media = call_info.media
yield response
yield create_state_response(phone_pb2.CallState.ACTIVE)
# Simulate the end of the call
time.sleep(2)
yield create_state_response(phone_pb2.CallState.ENDED)
logging.info("Call finished [%s]", request.phone_number)
def serve(address: str) -> None:
server = grpc.server(ThreadPoolExecutor())
phone_pb2_grpc.add_PhoneServicer_to_server(Phone(), server)
server.add_insecure_port(address)
server.start()
logging.info("Server serving at %s", address)
server.wait_for_termination()
# wait_for_termination
# Block current thread until the server stops.
# This is an EXPERIMENTAL(实验性的) API.
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
serve("[::]:50051")
|
[
"474268013@qq.com"
] |
474268013@qq.com
|
950b52d7c89a1d9123e575f28cbf268da0e15cca
|
5544c8f087646cc937aec6dd05f6ad9359347b13
|
/Python数据处理/Numpy学习/数组统计.py
|
68478e7aa4e7c04f6498f8043e13a7625692a185
|
[] |
no_license
|
liuyangNB/shiyanlouPy
|
b74b98baa70f83536a0584ee6907de74a4c77845
|
0ba28dfeadf2ff34062dd5257aa296edfaa4bd36
|
refs/heads/master
| 2021-04-02T01:32:25.183156
| 2020-03-18T12:48:30
| 2020-03-18T12:48:30
| 248,229,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
import numpy as np
#统计数组中位数
a = np.array([[1,4,3],
[6,2,9],
[4,7,2]])
print("中位数:np.median(a,axis = 0)\n",np.median(a,axis=0))#axis = 0 是对列做处理,投影到x轴 ;axis = 1 反之
#统计平均数
print("平均数:np.mean(a,axis=1)\n",np.mean(a,axis=1))
#统计各个行的的加权平均
print('加权平均:np.average(a,axis=0)\n',np.average(a,axis=0))
#统计各行标准差
print("方差:np.var(a,axis=0)\n",np.var(a,axis=0))
#统计各行标准差
print("标准差:np.std(a,axis=0)\n",np.std(a,axis=0))
|
[
"liuyang.nb@outlook.com"
] |
liuyang.nb@outlook.com
|
8e291920bc9258758fe57e54877cada173a13eef
|
63bf6161532eefa72aa3be8b01cde601b08507dc
|
/python-mapping-example/fhir_model_generator/tests/model/slot_tests.py
|
ad3cec096349f05c2c4414e7b0a4ae6fc7aac7a8
|
[
"Apache-2.0"
] |
permissive
|
Healthedata1/mFHIR
|
4ef370b87e03e973918e5683977d32fe262655bc
|
1b4ea441cfa08b661416a3badedf7e90f2809163
|
refs/heads/master
| 2022-12-10T21:07:03.948406
| 2021-06-18T01:58:23
| 2021-06-18T01:58:23
| 129,964,251
| 9
| 5
| null | 2022-12-09T05:23:54
| 2018-04-17T20:57:15
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,767
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 on 2020-02-10.
# 2020, SMART Health IT.
import os
import io
import unittest
import json
from model import slot
from model.fhirdate import FHIRDate
class SlotTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or \
os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'fhir-parser', 'downloads'))
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Slot", js["resourceType"])
return slot.Slot(js)
def testSlot1(self):
inst = self.instantiate_from("slot-example-busy.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot1(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot1(inst2)
def implSlot1(self, inst):
self.assertEqual(inst.comment, "Assessments should be performed before requesting appointments in this slot.")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T09:15:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T09:15:00Z")
self.assertEqual(inst.id, "1")
self.assertEqual(inst.identifier[0].system, "http://example.org/identifiers/slots")
self.assertEqual(inst.identifier[0].value, "123132")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertTrue(inst.overbooked)
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:00:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:00:00Z")
self.assertEqual(inst.status, "busy")
self.assertEqual(inst.text.status, "generated")
def testSlot2(self):
inst = self.instantiate_from("slot-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot2(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot2(inst2)
def implSlot2(self, inst):
self.assertEqual(inst.appointmentType.coding[0].code, "WALKIN")
self.assertEqual(inst.appointmentType.coding[0].display, "A previously unscheduled walk-in visit")
self.assertEqual(inst.appointmentType.coding[0].system, "http://terminology.hl7.org/CodeSystem/v2-0276")
self.assertEqual(inst.comment, "Assessments should be performed before requesting appointments in this slot.")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T09:30:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T09:30:00Z")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.serviceType[0].coding[0].code, "57")
self.assertEqual(inst.serviceType[0].coding[0].display, "Immunization")
self.assertEqual(inst.specialty[0].coding[0].code, "408480009")
self.assertEqual(inst.specialty[0].coding[0].display, "Clinical immunology")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:15:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:15:00Z")
self.assertEqual(inst.status, "free")
self.assertEqual(inst.text.status, "generated")
def testSlot3(self):
inst = self.instantiate_from("slot-example-unavailable.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot3(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot3(inst2)
def implSlot3(self, inst):
self.assertEqual(inst.comment, "Dr Careful is out of the office")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T09:45:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T09:45:00Z")
self.assertEqual(inst.id, "3")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:30:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:30:00Z")
self.assertEqual(inst.status, "busy-unavailable")
self.assertEqual(inst.text.status, "generated")
def testSlot4(self):
inst = self.instantiate_from("slot-example-tentative.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot4(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot4(inst2)
def implSlot4(self, inst):
self.assertEqual(inst.comment, "Dr Careful is out of the office")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T10:00:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T10:00:00Z")
self.assertEqual(inst.id, "2")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:45:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:45:00Z")
self.assertEqual(inst.status, "busy-tentative")
self.assertEqual(inst.text.status, "generated")
if __name__ == '__main__':
unittest.main()
|
[
"ehaas@healthedatainc.com"
] |
ehaas@healthedatainc.com
|
8efffa090b5cb44f5c8c49eb3255299505942f0c
|
7920395f149cfdd5731946ea53526bb64d463660
|
/lgbCategoricalSelector_v3.py
|
fd75c16fcf6b0d2ec929ab04768c995ca45f1446
|
[] |
no_license
|
mohsinkhn/Avito_kaggle_clean
|
f5229ea231e25cbf8d5b9cd53301ad6554298364
|
4a42493eb441d2956cc9e39aafdbfa02be7d2564
|
refs/heads/master
| 2020-03-20T13:11:05.583060
| 2018-06-24T16:08:51
| 2018-06-24T16:08:51
| 137,449,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,811
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 16 13:32:23 2018
@author: mohsin
"""
import gc
import os
from copy import copy
import pandas as pd
import numpy as np
np.random.seed(786)
from utils import *
from sklearn.model_selection import KFold, cross_val_predict
import logging
import matplotlib.pyplot as plt
import seaborn as sns
import lightgbm as lgb
from tqdm import tqdm
tqdm.pandas(tqdm)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
#%%
def cv_oof_predictions(estimator, X, y, cvlist, est_kwargs, fit_params, predict_test=True, X_test=None, ):
preds = np.zeros(len(y)) #Initialize empty array to hold prediction
test_preds = []
for tr_index, val_index in cvlist:
gc.collect()
X_tr , X_val = X[tr_index], X[val_index]
y_tr , y_val = y[tr_index], y[val_index]
est = estimator.set_params(**est_kwargs)
#print(X_tr.shape, X_val.shape, y_tr.shape, y_val.shape)
est.fit(X_tr, y_tr, eval_set = [(X_tr, y_tr), (X_val, y_val)], eval_metric='rmse',
early_stopping_rounds=50, verbose=0, **fit_params) #Might need to change this depending on estimator
preds[val_index] = est.predict(X_val)
#break
if predict_test:
tpreds = est.predict(X_test)
test_preds.append(tpreds)
if len(test_preds) >0:
test_preds = np.mean(test_preds, axis=0)
return est, preds, test_preds
def eval_lgb_sets(X, y, cvlist, param_sets):
model = lgb.LGBMRegressor(n_estimators=10000)
y_preds_best = np.zeros(len(X))
best_rmse = 1.0
best_i = 0
for i, lgb_params in enumerate(param_sets):
est, y_preds, _ = cv_oof_predictions(model, X, y, cvlist, predict_test=False,
X_test=None, est_kwargs=lgb_params, fit_params={})
score = rmse(y, y_preds)
logger.info("Score for {}th lgb parameter set is {}".format(i, score))
if score < best_rmse:
best_rmse = score
y_preds_best = y_preds
best_i = i
logger.info("Best score is {} with set {}".format(best_rmse, best_i))
return best_rmse, y_preds_best
#%%
if __name__ == "__main__":
############ Models to try ################################################
LOGGER_FILE = "lgbCategoricalSelector_v3_100k.log"
CAT_COLS = ['user_id',
'region',
'city',
'parent_category_name',
'category_name',
'param_1',
'param_2',
'param_3',
'image_top_1',
'user_type']
BASE_FEATURES = ['user_id_lbenc_2', 'region_lbenc_2', 'city_lbenc_2', 'parent_category_name_lbenc_2',
'category_name_lbenc_2', 'param_1_lbenc_2', 'param_2_lbenc_2', 'param_3_lbenc_2',
'image_top_1_lbenc_2', 'user_type_lbenc_2', 'user_id_trenc_3', 'city_trenc_3',
'parent_category_name_trenc_3', 'param_1_trenc_3', 'param_2_trenc_3', 'param_3_trenc_3',
'image_top_1_trenc_3', 'region_parent_category_name_trenc_8', 'region_param_2_trenc_8',
'region_param_3_trenc_8', 'region_image_top_1_trenc_8', 'city_parent_category_name_trenc_8',
'city_category_name_trenc_8', 'city_param_1_trenc_8', 'city_param_3_trenc_8',
'parent_category_name_param_2_trenc_8', 'parent_category_name_param_3_trenc_8',
'parent_category_name_image_top_1_trenc_8', 'parent_category_name_user_type_trenc_8',
'category_name_param_1_trenc_8', 'category_name_image_top_1_trenc_8',
'param_1_image_top_1_trenc_8', 'param_2_image_top_1_trenc_8',
'user_type_region_category_name_param_1_trenc_8',
'user_type_city_category_name_param_1_trenc_8']
TARGET_ENC_BASE_THRESH = 3
LGB_PARAMS1 = {
"n_estimators":5000,
'learning_rate': 0.02,
"num_leaves":255,
"colsample_bytree": 0.8,
"subsample": 0.9,
"reg_alpha": 1,
"reg_lambda": 1,
"min_data_in_leaf": 100,
"max_bin": 255,
"verbose":0
}
LGB_PARAMS = [LGB_PARAMS1]
###################### Logger #########################################
handler = logging.FileHandler(LOGGER_FILE)
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
###################### Read data ##########################################
logger.info("Reading data")
train = pd.read_csv("../input/train.csv", parse_dates=['activation_date'], nrows=100000)
test = pd.read_csv("../input/test.csv", parse_dates=['activation_date'], nrows=100000)
test['deal_probability'] = -1
#City correction
for df in train, test:
df['city'] = df['region'].astype(str) + "_" + df["city"].astype(str)
df = df.fillna(-1)
y = train['deal_probability'].values
cvlist = list(KFold(5, random_state=123).split(y))
logger.info("Done. Read data with shape {} and {}".format(train.shape, test.shape))
del train, test
################### Greedy forward feature selection ######################
#
final_feats = []
final_score = []
for (b_thresh, tenc_thresh, comb_thresh, tenc_comb_thresh) in [
(2, 3, 8, 8),
]:
columns_to_try = [col+'_trencSD_'+str(tenc_thresh) for col in CAT_COLS]
features = BASE_FEATURES[:]
X = np.vstack([np.load("../utility/X_train_{}.npy".format(col), mmap_mode='r') for col in features]).T[:100000, :]
print("Shape for base dataset is ", X.shape)
best_rmse_lgb, y_preds_lgb = eval_lgb_sets(X, y, cvlist, LGB_PARAMS)
logger.info("Best score for base cols in {}".format(best_rmse_lgb))
best_rmse = best_rmse_lgb
y_preds_best = y_preds_lgb
for col in columns_to_try:
logger.info("#######################################")
logger.info("Adding column {} and checking".format(col))
try:
X_col = np.load("../utility/X_train_{}.npy".format(col)).reshape(-1,1)[:100000, :]
#print(X_col[:5])
X = np.hstack((X, X_col))
print(X.shape)
best_rmse_lgb, y_preds_lgb = eval_lgb_sets(X, y, cvlist, LGB_PARAMS)
if best_rmse_lgb < best_rmse:
best_rmse = best_rmse_lgb
y_preds_best = y_preds_lgb
features.append(col)
else:
X = X[:, :-1]
logger.info("{} DID NOT result in improvement".format(col))
logger.info("")
except:
logger.info("Skipping {}".format(col))
continue
logger.info("Current set of features are : {}".format(features))
final_feats.append(features)
final_score.append(rmse(y, y_preds_best))
#for i, feats, score in enumerate(zip(final_feats, final_score)):
logger.info("Score for combinations is {} with final features {}".format(final_score, final_feats))
logger.info("")
handler.close()
logger.removeHandler(handler)
|
[
"mohsinjackspiro@gmail.com"
] |
mohsinjackspiro@gmail.com
|
a58699037dba96bd3c8ee980bfc1ecf6a13aa983
|
32d47968bf1801661b785a1d72d2e21fb9686aaf
|
/lane_det/lane_line.py
|
16c62a5f1dfc86e3097199b63707d0132ec217d1
|
[] |
no_license
|
janardhan5455/lane-detection
|
b466f62e3770617d6ce3497c55e8f8a174d1db7d
|
483559b92b435759f6e74b6969fb69d1e15e8722
|
refs/heads/main
| 2023-03-26T18:25:35.445837
| 2021-03-28T05:03:57
| 2021-03-28T05:03:57
| 352,244,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,103
|
py
|
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
car_cascade = cv.CascadeClassifier('cars.xml')
def canny(img):
grey=cv.cvtColor(img,cv.COLOR_BGR2GRAY)
blur=cv.GaussianBlur(grey,(5,5),0)
canny=cv.Canny(blur,50,150)
return canny
def lane(img):
height=img.shape[0]
polygons=np.array([[(200,height),(1000,height),(500,280)]])
mask=np.zeros_like(img)
cv.fillPoly(mask,polygons,255)
masked_img=cv.bitwise_and(img,mask)
return masked_img
def dis_lines(img,lines):
line_img=np.zeros_like(img)
if lines is not None:
for x1,y1,x2,y2 in lines:
cv.line(line_img,(x1,y1),(x2,y2),(255,0,0),10)
return line_img
def make_coord(img,line_para):
try:
slope,intercept=line_para
except:
slope,intercept=0.0001,0.0001
y1=img.shape[0]
y2=int(y1*(3/5))
x1=int((y1-intercept)/slope)
x2=int((y2-intercept)/slope)
return np.array([x1,y1,x2,y2])
def avg_lines(img,lines):
left_fit=[]
right_fit=[]
for line in lines:
x1,y1,x2,y2=line.reshape(4)
parameters=np.polyfit((x1,x2),(y1,y2),1)
# print(parameters)
slope=parameters[0]
intercept=parameters[1]
if slope <0:
left_fit.append((slope,intercept))
else:
right_fit.append((slope,intercept))
left_fit_avg=np.average(left_fit,axis=0)
right_fit_avg=np.average(right_fit,axis=0)
left_line=make_coord(img,left_fit_avg)
right_line=make_coord(img,right_fit_avg)
return np.array([left_line,right_line])
# img=cv.imread('lane_1.jpg')
# lane_img=np.copy(img)
# canny=canny(lane_img)
# croped_img=lane(canny)
# cv.imshow('canny',canny)
# cv.imshow('croped',croped_img)
# lines=cv.HoughLinesP(croped_img,2,np.pi/180,100,np.array([]),minLineLength=40,maxLineGap=5)
# avged_lines=avg_lines(lane_img,lines)
# line_img=dis_lines(lane_img,avged_lines)
# cv.imshow('line',line_img)
# combo_img=cv.addWeighted(lane_img,0.8,line_img,1,1)
# cv.imshow('mask',combo_img)
# cv.waitKey(0)
cap=cv.VideoCapture('solidWhiteRight.mp4')
while(True):
# print(1)
isTrue,frame=cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
cars = car_cascade.detectMultiScale(gray, 1.05, 9)
for (x,y,w,h) in cars:
plate = frame[y:y + h, x:x + w]
cv.rectangle(frame,(x,y),(x +w, y +h) ,(51 ,51,255),2)
cv.rectangle(frame, (x, y - 40), (x + w, y), (51,51,255), -2)
cv.putText(frame, 'Car', (x, y - 10), cv.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
canny_img=canny(frame)
croped_img=lane(canny_img)
lines=cv.HoughLinesP(croped_img,2,np.pi/180,100,np.array([]),minLineLength=40,maxLineGap=5)
avged_lines=avg_lines(frame,lines)
line_img=dis_lines(frame,avged_lines)
combo_img=cv.addWeighted(frame,0.8,line_img,1,1)
# cv.imshow('car',plate)
cv.imshow('lane_det',combo_img)
if cv.waitKey(1) &0xff==ord('a'):
break
cap.release()
cv.destroyAllWindows
|
[
"noreply@github.com"
] |
janardhan5455.noreply@github.com
|
5eb992d258e71c062dad176eb8359025dccfd6c0
|
b93ec4b9d0a72c94544c873891b15cdf4c48f04c
|
/online-store/backend/migrations/0001_initial_online_store.py
|
30fdc6a232c93d3c0d7a1387174cd657aa482496
|
[] |
no_license
|
TmLev/microservices
|
cef9a7a2f6d14cbe59988f03873e5a74a3b32353
|
ef31cba8656b66d8fdbdd6a947bcf00a3ad3f92a
|
refs/heads/master
| 2023-03-27T11:44:30.882769
| 2020-06-13T07:47:11
| 2020-06-13T07:47:11
| 245,378,240
| 1
| 1
| null | 2021-03-30T14:22:24
| 2020-03-06T09:25:18
|
Python
|
UTF-8
|
Python
| false
| false
| 217
|
py
|
# Generated by Django 3.0.4 on 2020-03-26 17:51
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
]
|
[
"leva.bugatti@gmail.com"
] |
leva.bugatti@gmail.com
|
ad04b02073fbbefec4122752a9ebc0a119a99129
|
532285432469eb3b572f5d8c34bcb517a3b70c0f
|
/students/tfbanks/Lesson02/gridprinter.py
|
d2398c22d620023cfe504a298c6a31c45517a672
|
[] |
no_license
|
toddlikesbeer/Self_Paced-Online
|
4e0d8099d4185e145e7aa2a95d7c8f269ee1c982
|
82ab4aa4900b7d3d3acabb1a4c3cf357ca7f5be7
|
refs/heads/master
| 2020-03-21T23:12:05.212294
| 2018-10-31T02:41:54
| 2018-10-31T02:41:54
| 139,172,267
| 0
| 0
| null | 2018-06-29T16:29:26
| 2018-06-29T16:29:26
| null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
# This is the result of manual processes moved into function form.
# I cleaned up during the process and deleted the old methods so the end result is just the function
def grid (x,y): #Defines a function to print grid to specified dimensions
p = '+ ' # This prints a + sign
d = '- ' # This prints a dash
b = '| ' # this prints a bar
print ((p + d * x) * y + p) #Prints the first line consiting of p's, d's and b's; creating the box based on x dimension, and repeated y times
for r in range (y):
for i in range(x):
print ((b + ' '*x) * y + b) #Prints the bar section to x dimension, repeated y times
print ((p + d * x) * y + p) #completes the "squares" and repeats y times
grid (3,4) #Prints a 4 x 4 matrix of 3x3 squares, + being the corners
|
[
"tfbanks@gmail.com"
] |
tfbanks@gmail.com
|
8764d59d46a5444c80f139b86086313daedfab35
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog/optimized_44595.py
|
4b502c0083653180de1a265b86c37f599c2c98b6
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,838
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((556.227, 579.162, 346.569), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((577.62, 567.506, 410.007), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((592.146, 544.952, 487.833), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((499.902, 472.089, 412.099), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((650.184, 533.797, 673.199), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((561.491, 577.074, 392.605), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((560.64, 577.701, 391.737), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((548.961, 593.488, 372.017), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((524.868, 604.785, 363.173), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((520.518, 631.661, 369.594), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((493.613, 634.177, 377.669), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((484.765, 629.11, 404.082), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((573.32, 581.782, 367.095), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((401.368, 681.202, 444.553), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((517.783, 644.929, 605.75), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((517.783, 644.929, 605.75), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((523.844, 630.733, 581.449), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((527.23, 614.474, 557.945), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((535.187, 601.124, 533.882), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((545.343, 599.826, 507.38), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((558.97, 603.517, 482.597), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((569.17, 602.239, 455.675), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((362.68, 662.516, 597.87), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((769.647, 538.552, 302.63), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((601.522, 590.816, 476.849), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((601.522, 590.816, 476.849), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((577.776, 574.478, 480.971), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((561.588, 550.307, 477.712), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((569.861, 522.146, 475.878), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((545.922, 522.059, 353.142), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((600.455, 513.149, 596.661), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((563.384, 545.507, 397.988), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((563.305, 545.338, 397.921), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((537.502, 535.552, 393.737), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((523.419, 556.314, 405.96), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((510.924, 572.397, 425.288), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((510.416, 600.039, 430.598), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((515.45, 624.916, 418.683), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((510.866, 649.168, 405.149), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((595.256, 639.773, 414.377), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((423.904, 655.559, 395.424), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((625.146, 604.894, 419.111), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((611.326, 582.726, 424.898), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((581.353, 535.47, 439.284), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((552.729, 488.233, 454.57), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((568.434, 467.24, 378.12), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((501.281, 441.109, 532.012), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((533.796, 590.685, 408.018), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((551.47, 584.314, 429.623), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((571.799, 570.297, 444.162), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((596.698, 560.943, 455.419), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((617.449, 542.988, 465.281), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((633.66, 525.076, 482.118), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((602.35, 559.943, 417.847), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((667.147, 487.057, 548.033), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
37a08b96698b20dd1fea9d7b61d6b4b83fbb7d5e
|
2672a2b664ed12f190b68deb51476b451a524561
|
/portal/config.py
|
e45d5065a743935fa64b17b3a1a2a8ea6266d98c
|
[] |
no_license
|
LCBRU/genvasc_portal_web
|
9a2a27b4a2ba0fb2db402efc96eea8b2ed0a86e6
|
11eb562a5e92fd05fd5a902b7e062a2813e7b3f7
|
refs/heads/master
| 2023-01-09T09:59:07.301366
| 2023-01-07T14:44:07
| 2023-01-07T14:44:07
| 132,786,398
| 0
| 0
| null | 2022-01-11T13:17:30
| 2018-05-09T16:45:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,527
|
py
|
import os
from dotenv import load_dotenv
# Load environment variables from '.env' file.
load_dotenv()
class BaseConfig(object):
REMEMBER_COOKIE_NAME = 'GENVASC Remember Me'
REMEMBER_COOKIE_DURATION = 1
MAIL_SERVER = os.environ['MAIL_SERVER']
MAIL_DEBUG = os.environ['MAIL_DEBUG']
SECURITY_EMAIL_SENDER = os.environ['LCBRUIT_EMAIL_ADDRESS']
SECRET_KEY = os.environ['GGPP_FLASK_SECRET_KEY']
DEBUG = os.environ['GGPP_FLASK_DEBUG'] == 'True'
SQLALCHEMY_DATABASE_URI = os.environ['SQLALCHEMY_DATABASE_URI']
SQLALCHEMY_TRACK_MODIFICATIONS = (
os.environ['GGPP_SQLALCHEMY_TRACK_MODIFICATIONS'] == 'True'
)
SQLALCHEMY_ECHO = os.environ['GGPP_SQLALCHEMY_ECHO'] == 'True'
SECURITY_PASSWORD_HASH = os.environ['GGPP_SECURITY_PASSWORD_HASH']
SECURITY_PASSWORD_SALT = os.environ['GGPP_SECURITY_PASSWORD_SALT']
SECURITY_TRACKABLE = os.environ['GGPP_SECURITY_TRACKABLE'] == 'True'
SMTP_SERVER = 'localhost'
APPLICATION_EMAIL_ADDRESS = os.environ['LCBRUIT_EMAIL_ADDRESS']
ERROR_EMAIL_SUBJECT = 'GENVASC Portal Error'
SECURITY_CHANGEABLE = True
SECURITY_RECOVERABLE = True
MAIL_DEFAULT_SENDER = os.environ["LCBRUIT_EMAIL_ADDRESS"]
# Admin user
ADMIN_EMAIL_ADDRESS = os.environ['ADMIN_EMAIL_ADDRESS']
ADMIN_FIRST_NAME = os.environ['ADMIN_FIRST_NAME']
ADMIN_LAST_NAME = os.environ['ADMIN_LAST_NAME']
ADMIN_PASSWORD = os.environ['ADMIN_PASSWORD']
# Celery Settings
broker_url=os.environ["BROKER_URL"]
result_backend=os.environ["CELERY_RESULT_BACKEND"]
CELERY_RATE_LIMIT=os.environ["CELERY_RATE_LIMIT"]
CELERY_REDIRECT_STDOUTS_LEVEL=os.environ["CELERY_REDIRECT_STDOUTS_LEVEL"]
CELERY_DEFAULT_QUEUE=os.environ["CELERY_DEFAULT_QUEUE"]
# Celery Schedules
PRACTICE_ETL_SCHEDULE_MINUTE=os.environ["PRACTICE_ETL_SCHEDULE_MINUTE"]
PRACTICE_ETL_SCHEDULE_HOUR=os.environ["PRACTICE_ETL_SCHEDULE_HOUR"]
# Databases
PRACTICE_DATABASE_URI=os.environ["PRACTICE_DATABASE_URI"]
RECRUIT_DATABASE_URI=os.environ["RECRUIT_DATABASE_URI"]
IMPORT_DATABASE_URI=os.environ["IMPORT_DATABASE_URI"]
class TestConfig(BaseConfig):
"""Configuration for automated testing"""
TESTING = True
SQLALCHEMY_DATABASE_URI="sqlite://"
PRACTICE_DATABASE_URI="sqlite://"
RECRUIT_DATABASE_URI="sqlite://"
WTF_CSRF_ENABLED = False
SMTP_SERVER = None
SQLALCHEMY_ECHO = False
broker_url=os.environ["BROKER_URL"] + '/test'
class TestConfigCRSF(TestConfig):
WTF_CSRF_ENABLED = True
|
[
"rabramley@gmail.com"
] |
rabramley@gmail.com
|
5277e41c0d4047c015a72bf855ef4d2d4ac53a48
|
958f24d5ac8fac8187d7a30f4abbd6df15297595
|
/Facturacion/Facturacion/asgi.py
|
2270532c585a7393766e4e3f54dbb5ec11964c30
|
[] |
no_license
|
daustariz18/Backend
|
0b0b421885d4a35b8f985e75f597c5e04b091a92
|
880c16a99db68e4cf7e312c04b9e8cb2b50cc2a2
|
refs/heads/master
| 2023-08-24T01:26:58.608075
| 2021-10-07T15:27:16
| 2021-10-07T15:27:16
| 414,657,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
ASGI config for Facturacion project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Facturacion.settings')
application = get_asgi_application()
|
[
"diusme@gmail.com"
] |
diusme@gmail.com
|
d9506c0977e5d9a1415e1c9fbc0b2a18b8bdbc3e
|
e1d3233af9a37e4f961acd571bd309e209e579ae
|
/manager/migrations/0003_auto_20190217_1939.py
|
a8a618c566cda2ea973cdf1181824c8c3ab532fe
|
[] |
no_license
|
AkashUR/DesktopManager
|
1e11b510b474e7c8d819a6f7617919a0af42ff83
|
0458b01781c083e1889a1db5f85685e1d50edcd1
|
refs/heads/master
| 2020-05-24T02:27:12.849967
| 2019-05-16T15:39:36
| 2019-05-16T15:39:36
| 187,054,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
# Generated by Django 2.0.2 on 2019-02-17 14:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manager', '0002_log_element'),
]
operations = [
migrations.AlterField(
model_name='log',
name='log_key',
field=models.IntegerField(default='1'),
),
]
|
[
"noreply@github.com"
] |
AkashUR.noreply@github.com
|
6ad33022849320f6b516dc75f13b791033e727d1
|
5f5d081a34c938d438d81217e0adcc7aef1a8491
|
/ImplementationV1.0/process_image.py
|
21b66ada5bb3bf837577d81aef29cb3d1a40ccbd
|
[] |
no_license
|
dilisharanasinghe/depslipts_code
|
e8f7992ad2ec2e99d78e37342de2611dfe19640a
|
d4bd9427f328adcc2ef44566cfb73d10cdfcd713
|
refs/heads/main
| 2023-04-04T07:07:22.900565
| 2020-10-26T13:02:33
| 2020-10-26T13:02:33
| 304,830,346
| 0
| 0
| null | 2020-10-26T13:02:34
| 2020-10-17T08:17:20
| null |
UTF-8
|
Python
| false
| false
| 8,167
|
py
|
import cv2
import numpy as np
class ProcessImage:
def __init__(self, image_file):
self.__image_file = image_file
self.__original_image = cv2.imread(self.__image_file)
self.__total_image_area = self.__original_image.shape[1]*self.__original_image.shape[0]
self.__resized_image = None
def __resize_image(self):
ratio_ = self.__original_image.shape[1] / float(self.__original_image.shape[0])
fixed_height = 500
width = int(float(fixed_height) / ratio_)
self.__total_image_area = fixed_height * width
self.__resized_image = cv2.resize(self.__original_image, (fixed_height, width))
def __get_contours(self):
if self.__resized_image is not None:
gray_image = cv2.cvtColor(self.__resized_image, cv2.COLOR_BGR2GRAY)
smoothed_image = cv2.GaussianBlur(gray_image, (5, 5), sigmaX=0)
edge_image = cv2.Canny(smoothed_image, 100, 200)
contours, hierarchy = cv2.findContours(edge_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours, hierarchy
else:
return None, None
def __check_contours(self, contours):
max_area = 0
max_area_cnt = None
for cnt in contours:
area = cv2.contourArea(cnt)
# print(len(cnt), area)
if area > max_area:
max_area = area
max_area_cnt = cnt
# print(len(cnt), 'max area', area )
# print('Max contour area percentage {0}'.format(float(max_area / self.__total_image_area)))
if max_area > self.__total_image_area * 0.5:
return max_area_cnt
else:
return None
def __perspective_correction(self, contour):
# rect = cv2.minAreaRect(cnt)
# box = cv2.boxPoints(rect)
# box = np.int0(box)
# rectImage = cv2.drawContours(resizedImage, [box], 0, (0, 0, 255), 2)
epsilon = 0.1 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
# print(approx)
# print(drawContourList)
if len(approx) == 4:
# contourImage = cv2.drawContours(self.__resized_image, approx, -1, (0, 255, 0), 4)
# realPoints = approx * originalImage.shape[1]/float(fixed_height)
real_points = approx
real_points = np.reshape(real_points, (4, 2))
ordered_points_ = self.order_points(real_points)
# print(orderedPoints)
warped_image = self.four_point_transform(self.__resized_image, ordered_points_)
# cv2.imshow('contour image', contourImage)
# cv2.imshow('final image', warpedImage)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# cv2.imwrite('output.jpg', warpedImage)
else:
warped_image = self.__resized_image
return warped_image
@staticmethod
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
@staticmethod
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = ProcessImage.order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
@staticmethod
def threshold_image(image):
"""
This function take one argument as
input. this function will convert
input image to binary image
:param image: image
:return: thresholded image
"""
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# gray_image = image
# converting it to binary image
# ret, threshold_img = cv2.threshold(gray_image, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
# ret, threshold_img = cv2.threshold(gray_image, 150, 255, cv2.THRESH_BINARY)
threshold_img = cv2.adaptiveThreshold(gray_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,
11, 6)
# saving image to view threshold image
# kernel = np.ones((2, 2), np.uint8)
# erosion = cv2.erode(img, kernel, iterations=1)
# morphologicalTransfromedImage = cv2.dilate(threshold_img, kernel, iterations=1)
# kernel = np.ones((3, 3), np.uint8)
# morphologicalTransfromedImage = cv2.morphologyEx(morphologicalTransfromedImage, cv2.MORPH_CLOSE, kernel)
# morphologicalTransfromedImage = cv2.morphologyEx(threshold_img, cv2.MORPH_OPEN, kernel, iterations=2)
# cv2.imwrite('thresholded.png', threshold_img)
# cv2.imshow('threshold image', threshold_img)
# cv2.imshow('MT image', morphologicalTransfromedImage)
# Maintain output window until
# user presses a key
# cv2.waitKey(0)
# Destroying present windows on screen
# cv2.destroyAllWindows()
return 255 - threshold_img
# return 255 - morphologicalTransfromedImage
def get_processed_image(self):
self.__resize_image()
contours, hierarchy = self.__get_contours()
if contours is not None:
max_area_contour = self.__check_contours(contours=contours)
if max_area_contour is not None:
warped_image = self.__perspective_correction(contour=max_area_contour)
else:
warped_image = self.__resized_image
thresholded_image = self.threshold_image(warped_image)
else:
thresholded_image = self.threshold_image(self.__resized_image)
return thresholded_image
if __name__ == '__main__':
process_image = ProcessImage('test_data/1602139426.jpg')
thresholded_image = process_image.get_processed_image()
cv2.imshow('thresholded image', thresholded_image)
cv2.waitKey(0)
cv2.destroyWindow()
|
[
"dilisharanasinghe@gmail.com"
] |
dilisharanasinghe@gmail.com
|
eaac79b7de8e57f814c859d5578d8f629cec856d
|
767cf0d423216279ecae831fe323530f0b0b1d51
|
/neuralbj.py
|
0efdb6b0f24f86468550982bd3cefa325c8fc9a4
|
[] |
no_license
|
lowkeyluke/BlackjackAI
|
caa4df0992f659926675f4043ede8a60deba713a
|
e0d1edde2e655b017ed932ee6db1cc43c87e0fd2
|
refs/heads/master
| 2021-07-29T15:04:53.078149
| 2021-07-17T06:33:13
| 2021-07-17T06:33:13
| 106,197,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,149
|
py
|
"""Neural net without game:
Immutable data (non-updating).
Used solely for testing"""
import numpy as np
# activation function: sigmoid
def sigmoid(x, deriv=False):
if deriv:
return x*(1-x)
return 1/(1+np.exp(-x))
# input data, X. Data is replaceable
# [player total, dealer upcard]
X = np.array([[10,10],[20,5],[20,10],[5,5]])
# a bigger array may need to be scaled:
# X = X/np.amax(X, axis=0)
# output data, Y. Data is replaceable
# [stand/hit] (0/1)
Y = np.array([[1],[0],[0],[1]])
# Y does not need to be scaled, already between 0-1
layer3error = np.random.random_integers(1,1,Y.__len__()) # initialize array same size as Y, fill with 1's
for i in layer3error:
while abs(layer3error[i]) > .5: # if any error is greater than .5, re-initialize & re-train
# initialize weights
weight1 = np.random.random((2, Y.__len__())) # (# of arrays, # of items in each array)
weight2 = np.random.random((Y.__len__(), 1)) # try (2*random) - 1
# TRAIN, use for loop 1000s of times.
number_of_training_iterations = 50000
for iteration in range(number_of_training_iterations):
layer1 = X # input
layer2 = sigmoid(np.dot(layer1, weight1)) # multiply input by weight. if > than threshold, activate.
layer3 = sigmoid(np.dot(layer2, weight2)) # layer2 * weight2
# backpropagate
layer3error = Y - layer3
layer3change = layer3error * sigmoid(layer3, deriv=True)
layer2error = layer3change.dot(weight2.T) # layer3change * weight2
layer2change = layer2error * sigmoid(layer2, deriv=True)
# update weights
weight2 += layer2.T.dot(layer3change) # layer2 * layer3change
weight1 += layer1.T.dot(layer2change) # layer1 * layer2change
if iteration%10000 == 0:
print("Error: ", layer3error)
# prediction
p = np.array([15,7])
layer1p = p
layer2p = sigmoid(np.dot(layer1p, weight1))
layer3p = sigmoid(np.dot(layer2p, weight2))
print("Given: ", p)
print("Prediction: ", layer3p)
|
[
"noreply@github.com"
] |
lowkeyluke.noreply@github.com
|
5dcb61257b2836cf5cfd57455cb6d8fd50373f9d
|
05d232f50ee12a8a837f077d7e70b13e404e9031
|
/Amazon_web_scrapping.py
|
5ae6d15441c66b963ef9ff70c804031d2c017048
|
[] |
no_license
|
Nidhig631/AmazonWebScrapping_Using_Python
|
6c020a96dad58997e4f107ee5fda608aa910d29c
|
4551196c50deede477e5f4beae8abf9a2288a88f
|
refs/heads/master
| 2023-07-18T18:52:41.028857
| 2021-09-03T12:49:46
| 2021-09-03T12:49:46
| 401,325,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,308
|
py
|
from bs4 import BeautifulSoup
import requests
import smtplib
import time
import datetime
import csv
import pandas as pd
# Connect to website
URL = 'https://www.amazon.in/Designer-Unicorn-Engineer-Because-Official/dp/B08JFBP1ZF/ref=pd_rhf_dp_s_pop_multi_srecs_sabr_4/262-0019943-4855247?pd_rd_w=0rikd&pf_rd_p=217ae98b-13ca-4bb5-a513-ddb3654ce36a&pf_rd_r=17NNRFYFRZSFJJCJJRE5&pd_rd_r=49606991-8cf9-4c3a-a949-3ddce94bd2fd&pd_rd_wg=ZSr8v&pd_rd_i=B08JFBP1ZF&psc=1'
# header from url:- httpbin.org/get
headers = {
"paste the header here from the url:- httpbin.org/get"}
# Extracting data from Website reading HTML
page = requests.get(URL, headers=headers)
soup1 = BeautifulSoup(page.content, "html.parser")
soup2 = BeautifulSoup(soup1.prettify(), "html.parser")
title = soup2.find(id='productTitle').get_text()
price = soup2.find(id='priceblock_ourprice').get_text()
rating = soup2.find(id='acrPopover').get_text()
# Printing data as per the requirements
title = title.strip()[0:]
print("Title = " + title)
price = price.strip()[1:]
print("Price = " + price)
rating = rating.strip()[0:]
print("Rating = " + rating)
today = datetime.date.today()
print(today)
header = ['Title', 'Price', 'Date']
data = [title, price, today]
with open('Amazon_Scrapper_Dataset.csv', 'w', newline='', encoding='UTF8') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerow(data)
# Now appending data to the csv
with open('Amazon_Scrapper_Dataset.csv', 'a+', newline='', encoding='UTF8') as f:
writer = csv.writer(f)
writer.writerow(data)
df = pd.read_csv('path_name\Amazon_Scrapper_Dataset.csv')
print(df)
def check_price():
# Connect to website
URL = 'https://www.amazon.in/Designer-Unicorn-Engineer-Because-Official/dp/B08JFBP1ZF/ref=pd_rhf_dp_s_pop_multi_srecs_sabr_4/262-0019943-4855247?pd_rd_w=0rikd&pf_rd_p=217ae98b-13ca-4bb5-a513-ddb3654ce36a&pf_rd_r=17NNRFYFRZSFJJCJJRE5&pd_rd_r=49606991-8cf9-4c3a-a949-3ddce94bd2fd&pd_rd_wg=ZSr8v&pd_rd_i=B08JFBP1ZF&psc=1'
# header from url:- httpbin.org/get
headers = {
"paste the header here from the url:- httpbin.org/get"}
# Extracting data from Website reading HTML
page = requests.get(URL, headers=headers)
soup1 = BeautifulSoup(page.content, "html.parser")
soup2 = BeautifulSoup(soup1.prettify(), "html.parser")
title = soup2.find(id='productTitle').get_text()
price = soup2.find(id='priceblock_ourprice').get_text()
rating = soup2.find(id='acrPopover').get_text()
title = title.strip()[0:]
print("Title = " + title)
import datetime
today = datetime.date.today()
import csv
header = ['Title', 'Price', 'Date']
data = [title, price, today]
with open('Amazon_Scrapper_Dataset.csv', 'a+', newline='', encoding='UTF8') as f:
writer = csv.writer(f)
writer.writerow(data)
# if(price < 14):
# send_mail()
while(True):
check_price()
time.sleep(1)
# --In case use this code to snd email for up date if price goes up or down
# def send_mail():
# server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
# server.ehlo()
# server.login('mail_id', 'email_pwd')
# subject = 'subject for the email'
# body = 'matter for the email'
# msg = f"Subject: {subject}\n\n{body}"
# server.sendmail('emailid', msg)
|
[
"nidhig631@gmail.com"
] |
nidhig631@gmail.com
|
42235200cd42835a18eff480fd8af3ec941c2501
|
2b115deaf8aec0f1478aac411a0973847a0b6148
|
/fetch.py
|
0737710c8fcb7355ea6e682830bde82f407adb54
|
[] |
no_license
|
Tiquiero/zhihu-python
|
40d462a0396d083a6ae3345e5e7a8dff9f254a23
|
933c1ebc45f2385423ccc37a245428c5a3557ea7
|
refs/heads/master
| 2020-03-14T02:32:56.816000
| 2018-04-28T10:49:28
| 2018-04-28T10:49:28
| 131,401,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,508
|
py
|
import os
import re
import requests
def download(folder,url):
if not os.path.exists(folder):
os.makedirs(folder)
req = requests.get(url)
if req.status_code == requests.codes.ok:
name = url.split('/')[-1]
f = open("./"+folder+'/'+name,'wb')
f.write(req.content)
f.close()
return True
else:
return False
def init(url):
ua = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}
s = requests.Session()
s.headers.update(ua)
ret=s.get(url)
s.headers.update({"authorization":"Bearer Mi4xaHNqWEFRQUFBQUFBSUt6am9hdzFEUmNBQUFCaEFsVk5uV1dDV3dDVUJSRm9jYVVXV2FiREZfWHZHMDM0b081Vzd3|1519720350|295ebdcd1530503e6caf61a5942fc1dba85ca363"})
return s
def fetch_answer(s,qid,limit,offset):
params={
'sort_by':'default',
'include':'data[*].is_normal,is_collapsed,annotation_action,annotation_detail,collapse_reason,is_sticky,collapsed_by,suggest_edit,comment_count,can_comment,content,editable_content,voteup_count,reshipment_settings,comment_permission,mark_infos,created_time,updated_time,review_info,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp,upvoted_followees;data[*].author.follower_count,badge[?(type=best_answerer)].topics',
'limit':limit,
'offset':offset
}
url ="https://www.zhihu.com/api/v4/questions/"+qid+"/answers"
return s.get(url,params=params)
def fetch_all_answers(url):
session = init(url)
q_id = url.split('/')[-1]
offset = 0
limit=20
answers=[]
is_end=False
while not is_end:
ret=fetch_answer(session,q_id,limit,offset)
answers+=ret.json()['data']
is_end= ret.json()['paging']['is_end']
print("Offset: ",offset)
print("is_end: ",is_end)
offset+=limit
return answers
def grep_image_urls(text):
jpg = re.compile(r'https://[^\s]*?_r\.jpg')
jpeg = re.compile(r'https://[^\s]*?_r\.jpeg')
gif = re.compile(r'https://[^\s]*?_r\.gif')
png = re.compile(r'https://[^\s]*?_r\.png')
imgs=[]
imgs+=jpg.findall(text)
imgs+=jpeg.findall(text)
imgs+=gif.findall(text)
imgs+=png.findall(text)
imgs = list(set(imgs))
return imgs
url = "https://www.zhihu.com/question/37787176"
answers=fetch_all_answers(url)
folder = '37787176'
for ans in answers:
imgs = grep_image_urls(ans['content'])
for url in imgs:
download(folder,url)
|
[
"tiquiero@163.com"
] |
tiquiero@163.com
|
0fcbb9241779fdaf99e46611d4e1b9d0073cf95f
|
0bee19447c1a93a95cc59980e9a38ad3ea71ba49
|
/rnn_layers.py
|
8ef7e5f62c6619d7de7af83718e403242404911b
|
[] |
no_license
|
weilai0980/HeteroRNN
|
de4f078365bfa997b885bf64c9b6a654a1907f5a
|
82421a2e2e5459752f5a5af416123ece551ce9b4
|
refs/heads/master
| 2020-04-25T12:44:31.891307
| 2019-04-19T20:56:06
| 2019-04-19T20:56:06
| 172,787,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,391
|
py
|
import sys
import collections
import hashlib
import numbers
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops.rnn_cell_impl import *
# local
from utils_libs import *
#from mv_rnn_cell import *
#from ts_mv_rnn import *
# ---- residual and plain dense layers ----
def res_lstm(x, hidden_dim, n_layers, scope, dropout_keep_prob):
#dropout
#x = tf.nn.dropout(x, dropout_keep_prob)
with tf.variable_scope(scope):
#Deep lstm: residual or highway connections
lstm_cell = tf.nn.rnn_cell.LSTMCell(hidden_dim, \
initializer= tf.contrib.keras.initializers.glorot_normal())
hiddens, state = tf.nn.dynamic_rnn(cell = lstm_cell, inputs = x, dtype = tf.float32)
for i in range(1, n_layers):
with tf.variable_scope(scope+str(i)):
tmp_h = hiddens
lstm_cell = tf.nn.rnn_cell.LSTMCell(hidden_dim, \
initializer= tf.contrib.keras.initializers.glorot_normal())
hiddens, state = tf.nn.dynamic_rnn(cell = lstm_cell, inputs = hiddens, dtype = tf.float32)
hiddens = hiddens + tmp_h
return hiddens, state
def plain_lstm(x, dim_layers, scope, dropout_keep_prob):
#dropout
#x = tf.nn.dropout(x, dropout_keep_prob)
with tf.variable_scope(scope):
tmp_cell = tf.nn.rnn_cell.LSTMCell(dim_layers[0], \
initializer= tf.contrib.keras.initializers.glorot_normal())
# dropout
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(tmp_cell, state_keep_prob = dropout_keep_prob)
hiddens, state = tf.nn.dynamic_rnn(cell = lstm_cell, inputs = x, dtype = tf.float32)
for i in range(1,len(dim_layers)):
with tf.variable_scope(scope+str(i)):
tmp_cell = tf.nn.rnn_cell.LSTMCell(dim_layers[i], \
initializer= tf.contrib.keras.initializers.glorot_normal())
# dropout
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(tmp_cell, state_keep_prob = dropout_keep_prob)
hiddens, state = tf.nn.dynamic_rnn(cell = lstm_cell, inputs = hiddens, dtype = tf.float32)
return hiddens, state
def res_dense(x, x_dim, hidden_dim, n_layers, scope, dropout_keep_prob):
#dropout
x = tf.nn.dropout(x, dropout_keep_prob)
with tf.variable_scope(scope):
# initilization
w = tf.get_variable('w', [x_dim, hidden_dim], dtype = tf.float32,
initializer = tf.contrib.layers.variance_scaling_initializer())
#initializer = tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.zeros([hidden_dim]))
h = tf.nn.relu(tf.matmul(x, w) + b )
regularization = tf.nn.l2_loss(w)
#dropout
#h = tf.nn.dropout(h, dropout_keep_prob)
for i in range(1, n_layers):
with tf.variable_scope(scope+str(i)):
w = tf.get_variable('w', [hidden_dim, hidden_dim], \
initializer = tf.contrib.layers.variance_scaling_initializer())
#initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.zeros( hidden_dim ))
# residual connection
tmp_h = h
h = tf.nn.relu( tf.matmul(h, w) + b )
h = tmp_h + h
regularization += tf.nn.l2_loss(w)
return h, regularization
def plain_dense(x, x_dim, dim_layers, scope, dropout_keep_prob, max_norm_regul):
#dropout
x = tf.nn.dropout(x, dropout_keep_prob)
with tf.variable_scope(scope):
# initilization
w = tf.get_variable('w', [x_dim, dim_layers[0]], dtype=tf.float32,\
initializer = tf.contrib.layers.variance_scaling_initializer())
#initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.zeros([dim_layers[0]]))
# max norm constraints
if max_norm_regul > 0:
clipped = tf.clip_by_norm(w, clip_norm = max_norm_regul, axes = 1)
clip_w = tf.assign(w, clipped)
h = tf.nn.relu( tf.matmul(x, clip_w) + b )
else:
h = tf.nn.relu( tf.matmul(x, w) + b )
#?
regularization = tf.nn.l2_loss(w)
#regularization = tf.reduce_sum(tf.abs(w))
# dropout
h = tf.nn.dropout(h, dropout_keep_prob)
for i in range(1, len(dim_layers)):
with tf.variable_scope(scope+str(i)):
w = tf.get_variable('w', [dim_layers[i-1], dim_layers[i]], dtype=tf.float32,\
initializer = tf.contrib.layers.variance_scaling_initializer())
#initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.zeros( dim_layers[i] ))
# max norm constraints
if max_norm_regul > 0:
clipped = tf.clip_by_norm(w, clip_norm = max_norm_regul, axes = 1)
clip_w = tf.assign(w, clipped)
h = tf.nn.relu( tf.matmul(h, clip_w) + b )
else:
h = tf.nn.relu( tf.matmul(h, w) + b )
#?
regularization += tf.nn.l2_loss(w)
#regularization += tf.reduce_sum(tf.abs(w))
return h, regularization
def multi_dense(x, x_dim, num_layers, scope, dropout_keep_prob, max_norm_regul, activation_type):
in_dim = x_dim
out_dim = int(in_dim/2)
h = x
regularization = 0.0
for i in range(num_layers):
with tf.variable_scope(scope + str(i)):
# dropout
h = tf.nn.dropout(h, dropout_keep_prob)
w = tf.get_variable('w',
[in_dim, out_dim],
dtype=tf.float32,\
initializer = tf.contrib.layers.xavier_initializer())
#tf.contrib.layers.variance_scaling_initializer())
#initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.zeros(out_dim))
# max norm constraints
if max_norm_regul > 0:
clipped = tf.clip_by_norm(w, clip_norm = max_norm_regul, axes = 1)
clip_w = tf.assign(w, clipped)
tmp_h = tf.matmul(h, clip_w) + b
regularization += tf.nn.l2_loss(clip_w)
#h = tf.nn.relu(tf.matmul(h, clip_w) + b)
else:
tmp_h = tf.matmul(h, w) + b
regularization += tf.nn.l2_loss(w)
# nonlinear activation
if activation_type == 'relu':
h = tf.nn.relu(tmp_h)
elif activation_type == 'leaky_relu':
# leaky relu
h = tf.maximum(tmp_h, 0.3*tmp_h)
else:
print("\n [ERROR] activation type, multi-dense \n")
#?
# regularization += tf.nn.l2_loss(w)
# regularization += tf.reduce_sum(tf.abs(w))
in_dim = out_dim
out_dim = int(out_dim/2)
return h, regularization, in_dim
def dense(x, x_dim, out_dim, scope, dropout_keep_prob, max_norm_regul, activation_type):
h = x
regularization = 0.0
with tf.variable_scope(scope):
# dropout on the input
h = tf.nn.dropout(h, dropout_keep_prob)
w = tf.get_variable('w',
[x_dim, out_dim],
dtype = tf.float32,\
initializer = tf.contrib.layers.xavier_initializer())
#variance_scaling_initializer())
#initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.zeros(out_dim))
# max norm constraints
if max_norm_regul > 0:
clipped = tf.clip_by_norm(w, clip_norm = max_norm_regul, axes = 1)
clip_w = tf.assign(w, clipped)
tmp_h = tf.matmul(h, clip_w) + b
regularization = tf.nn.l2_loss(clip_w)
else:
tmp_h = tf.matmul(h, w) + b
regularization = tf.nn.l2_loss(w)
# activation
if activation_type == 'relu':
h = tf.nn.relu(tmp_h)
elif activation_type == 'leaky_relu':
# leaky relu
h = tf.maximum(tmp_h, 0.3*tmp_h)
elif activation_type == '':
h = tmp_h
else:
print("\n [ERROR] activation type, dense \n")
'''
if bool_no_activation == True:
h = tmp_h
else:
h = tf.nn.relu(tmp_h)
'''
#?
#regularization = tf.nn.l2_loss(w)
return h, regularization
|
[
"tian.guo0980@gmail.com"
] |
tian.guo0980@gmail.com
|
3dd764efee547895b61b17074bef1e80ee82a562
|
9bb6795a12d6e042b962704dab9ec59d92d54e8f
|
/1_numpy/2_reshape.py
|
b5d60e8241460327f2b7b83d534050593e76005f
|
[] |
no_license
|
kimsoosoo0928/Perfect_Guide
|
c5177037512cb06814f0bbfcb70a22d14c9ec1fb
|
9b615d320957babb1a918fb38282062998a1e5c4
|
refs/heads/main
| 2023-07-18T12:29:03.353274
| 2021-08-29T00:31:28
| 2021-08-29T00:31:28
| 396,668,104
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
import numpy as np
array1 = np.arange(10)
print('array1 : \n', array1)
array2 = array1.reshape(2,5)
print('array2 : \n', array2)
array3 = array1.reshape(5,2)
print('array3 : \n', array3)
'''
array1 :
[0 1 2 3 4 5 6 7 8 9]
array2 :
[[0 1 2 3 4]
[5 6 7 8 9]]
array3 :
[[0 1]
[2 3]
[4 5]
[6 7]
[8 9]]
'''
array1 = np.arange(10)
print(array1)
array2 = array1.reshape(-1,5)
print('array2 shape : ', array2.shape)
array3 = array1.reshape(5,-1)
print('array3 shape : ', array3.shape)
'''
[0 1 2 3 4 5 6 7 8 9]
array2 shape : (2, 5)
array3 shape : (5, 2)
'''
array1 = np.arange(8)
array3d = array1.reshape((2,2,2))
print('array3d : \n', array3d.tolist())
array5 = array1.reshape((-1,1))
print('array5 : \n', array5.tolist())
print('array5 shape : \n', array5.shape)
array6 = array1.reshape((-1,1))
print('array6 : \n', array6.tolist())
print('array6 shape : \n', array6.shape)
'''
array3d :
[[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
array5 :
[[0], [1], [2], [3], [4], [5], [6], [7]]
array5 shape :
(8, 1)
array6 :
[[0], [1], [2], [3], [4], [5], [6], [7]]
array6 shape :
(8, 1)
'''
|
[
"kimsoosoo0928@gmail.com"
] |
kimsoosoo0928@gmail.com
|
482245b45b2db988d4939f10948afa5fc56c5af9
|
e1fd9dcbdb7c63a9d72772202fd9462faf728b9d
|
/Comprehensions - Exercise/06. Matrix of Palindromes.py
|
88c292f825dd455576f5caa37696b131d5eca2c3
|
[] |
no_license
|
AngelValAngelov/Python-Advanced-Exercises
|
be34c5903cce17100e9fa2f04bb2394b33fccb87
|
a206207a9a64051c962789b100a7a8945b1b6f80
|
refs/heads/main
| 2023-07-14T05:52:40.959381
| 2021-08-26T20:52:07
| 2021-08-26T20:52:07
| 373,634,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
rows, cols = [int(x) for x in input().split(" ")]
matrix = [print(' '.join([f"{chr(97 + x)}{chr(97 + y + x)}{chr(97 + x)}" for y in range(cols)])) for x in range(rows)]
|
[
"noreply@github.com"
] |
AngelValAngelov.noreply@github.com
|
69acf6cb42853141e98f121c77a9d61f1f1a30cf
|
2c926b4847a44c7f831d47ed0160751d3248e8f4
|
/venv/lib/python3.8/site-packages/hubspot/automation/actions/models/single_field_dependency.py
|
f18ca6bf64e6504458c415ed11f6e4ab7e527d5a
|
[] |
no_license
|
Women-in-Tech-Society/WITS_Site
|
c42cd2c9abe1b5515b80be82dc876a6c3842e42a
|
5dbf22f5ee5a36358f6f279af4c13d86d31653c5
|
refs/heads/main
| 2023-05-11T02:34:05.531902
| 2021-06-01T01:05:12
| 2021-06-01T01:05:12
| 278,658,100
| 0
| 5
| null | 2022-11-22T18:41:35
| 2020-07-10T14:43:28
|
Python
|
UTF-8
|
Python
| false
| false
| 6,688
|
py
|
# coding: utf-8
"""
Custom Workflow Actions
Create custom workflow actions # noqa: E501
The version of the OpenAPI document: v4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.automation.actions.configuration import Configuration
class SingleFieldDependency(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"dependency_type": "str",
"dependent_field_names": "list[str]",
"controlling_field_name": "str",
}
attribute_map = {
"dependency_type": "dependencyType",
"dependent_field_names": "dependentFieldNames",
"controlling_field_name": "controllingFieldName",
}
def __init__(
self,
dependency_type="SINGLE_FIELD",
dependent_field_names=None,
controlling_field_name=None,
local_vars_configuration=None,
): # noqa: E501
"""SingleFieldDependency - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._dependency_type = None
self._dependent_field_names = None
self._controlling_field_name = None
self.discriminator = None
self.dependency_type = dependency_type
self.dependent_field_names = dependent_field_names
self.controlling_field_name = controlling_field_name
@property
def dependency_type(self):
"""Gets the dependency_type of this SingleFieldDependency. # noqa: E501
:return: The dependency_type of this SingleFieldDependency. # noqa: E501
:rtype: str
"""
return self._dependency_type
@dependency_type.setter
def dependency_type(self, dependency_type):
"""Sets the dependency_type of this SingleFieldDependency.
:param dependency_type: The dependency_type of this SingleFieldDependency. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and dependency_type is None
): # noqa: E501
raise ValueError(
"Invalid value for `dependency_type`, must not be `None`"
) # noqa: E501
allowed_values = ["SINGLE_FIELD"] # noqa: E501
if (
self.local_vars_configuration.client_side_validation
and dependency_type not in allowed_values
): # noqa: E501
raise ValueError(
"Invalid value for `dependency_type` ({0}), must be one of {1}".format( # noqa: E501
dependency_type, allowed_values
)
)
self._dependency_type = dependency_type
@property
def dependent_field_names(self):
"""Gets the dependent_field_names of this SingleFieldDependency. # noqa: E501
:return: The dependent_field_names of this SingleFieldDependency. # noqa: E501
:rtype: list[str]
"""
return self._dependent_field_names
@dependent_field_names.setter
def dependent_field_names(self, dependent_field_names):
"""Sets the dependent_field_names of this SingleFieldDependency.
:param dependent_field_names: The dependent_field_names of this SingleFieldDependency. # noqa: E501
:type: list[str]
"""
if (
self.local_vars_configuration.client_side_validation
and dependent_field_names is None
): # noqa: E501
raise ValueError(
"Invalid value for `dependent_field_names`, must not be `None`"
) # noqa: E501
self._dependent_field_names = dependent_field_names
@property
def controlling_field_name(self):
"""Gets the controlling_field_name of this SingleFieldDependency. # noqa: E501
:return: The controlling_field_name of this SingleFieldDependency. # noqa: E501
:rtype: str
"""
return self._controlling_field_name
@controlling_field_name.setter
def controlling_field_name(self, controlling_field_name):
"""Sets the controlling_field_name of this SingleFieldDependency.
:param controlling_field_name: The controlling_field_name of this SingleFieldDependency. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and controlling_field_name is None
): # noqa: E501
raise ValueError(
"Invalid value for `controlling_field_name`, must not be `None`"
) # noqa: E501
self._controlling_field_name = controlling_field_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SingleFieldDependency):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SingleFieldDependency):
return True
return self.to_dict() != other.to_dict()
|
[
"mhuda@uwo.ca"
] |
mhuda@uwo.ca
|
21996a6ac30b58a0c8b012f9581fe26cfbfda0c6
|
2fcd914869a4ae3d80bf1e18c7f9247537dee5e2
|
/exercicio 2.py
|
b73766e5bb953d7b9af48240ef8cf010add5c388
|
[] |
no_license
|
vinicius48/exercicio-1
|
75ea541ebe2466fb00b95962986cf827d37c6951
|
ef1f9b3f81e554dbb80f0f6b1b2304fedea7afe2
|
refs/heads/main
| 2023-07-26T03:18:44.040604
| 2021-09-07T02:03:18
| 2021-09-07T02:03:18
| 401,657,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
matriz_a = [[1,2], [4,-1]]
matriz_oposta_a = [[0,0], [0,0]]
matriz_b = [[0,0], [0,0]]
for l in range(2):
for c in range(2):
matriz_oposta_a[l][c] = matriz_a[l][c] - matriz_a[l][c] * 2
matriz_b[l][c] = matriz_a[l][c] * (1/9)
print(matriz_b, '\n', matriz_oposta_a)
assert matriz_b != matriz_oposta_a
|
[
"noreply@github.com"
] |
vinicius48.noreply@github.com
|
707cbe0e861c502e1e391185502522c94bb8d7ad
|
5d3a26f35eb9c074c43b90db2d1ff78a6c9198c2
|
/Codes in book/PyLorenz.py
|
9ab67c463a9e63a3b11deecce9a9f4da5da7c7af
|
[] |
no_license
|
asukaminato0721/PyOpenGL-Practise
|
3585364a4f99c2e6252d7e71864b6c12431289ed
|
d065fd1b6f8a4944c1eb5225f934eca6bee856a2
|
refs/heads/master
| 2023-09-04T11:17:31.003716
| 2020-07-04T17:20:50
| 2020-07-04T17:20:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,361
|
py
|
# PyLorenz.py
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from random import *
from numpy import *
import sys
# Globals for window width and height
global width
global height
# Initial values of width and height
width = 500
height = 500
def init():
# White background
glClearColor(1.0, 1.0, 1.0, 0.0)
# Set the projection matrix... our "view"
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
# Set the plot window range
# This will coincide with the for... arange loops
gluOrtho2D(-30.0, 30.0, -30.0, 30.0)
# Set the matrix for the object we are drawing
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def plotlorenz():
glClear(GL_COLOR_BUFFER_BIT)
# Enlarge the points for better visibility
glPointSize(2.0)
# The blue plot is the original plot
# Note the values for x,y, and z
x = 0.50000
y = 0.50000
z = 0.50000
dt = 0.0005
glColor3f(1.0, 0.0, 0.0)
# the range is the horizontal width of the window
for n in arange(-30, 30, 0.0005):
# Lorenz’s equations
x = x + (-10*x + 10*y)*dt
y = y + (28*x - y - x*z)*dt
z = z + (-2.66667*z + x*y)*dt
glBegin(GL_POINTS)
glVertex2f(x, z-20)
glEnd()
glFlush()
# The second plot in red is the truncated plot
# Note the small difference in starting x,y,z values!
# x = 0.50000
# y = 0.50000
# z = 0.50000
# dt = 0.0005
# glColor3f(1.0, 0.0, 0.0)
# for n in arange(-30, 30, 0.0005):
# x = x + (-10*x + 10*y)*dt
# y = y + (28*x - y - x*z)*dt
# z = z + (-2.66667*z + x*y)*dt
# glBegin(GL_POINTS)
# glVertex2f(n, y)
# glEnd()
# glFlush()
def keyboard(key, x, y):
# Allows us to quit by pressing 'Esc' or 'q'
if key == chr(27):
sys.exit()
if key == "q":
sys.exit()
def main():
global width
global height
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE)
glutInitWindowPosition(100, 100)
glutInitWindowSize(width, height)
glutCreateWindow("Lorenz")
glutDisplayFunc(plotlorenz)
glutKeyboardFunc(keyboard)
init()
glutMainLoop()
main()
# End Program
|
[
"noreply@github.com"
] |
asukaminato0721.noreply@github.com
|
2a62194338b909a52a0de4280e79ec646882d92a
|
de2a6c2d179cb6d0c583b54e09d1bf29c0892af1
|
/railway/package.py
|
d0795ea2e8ef57b18e40b531c0999900e45a9330
|
[] |
no_license
|
yyww322/RailWay
|
af6bf4a960e191811d63aed894285cebd2ba61cb
|
00af82a3bf62341203956098ccac37972b9ab50f
|
refs/heads/master
| 2021-01-10T01:40:31.582879
| 2015-11-16T09:13:27
| 2015-11-16T09:13:27
| 46,093,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
# -*- coding: utf-8 -*-
# __author__ = 'CoolYan'
from distutils.core import setup
import py2exe
setup(console=['RailWay.py'])
|
[
"110161598@qq.com"
] |
110161598@qq.com
|
9ad7ba4a5f420ba40e95879c3c0394387d39de2f
|
7274ce2b75d49a90c57e9220756bc9beb532c9e1
|
/preprocess/transfer_vector/generate_vector/__init__.py
|
b9ea07c910bd9fb6029e1c6cbbd7fe574314dda0
|
[] |
no_license
|
chauncyzhu/textclassification
|
4eb03f2b11abd67680daab24f373971ce33d89cd
|
0b3960f748ba66278250132d8b16d189cabe4a3f
|
refs/heads/master
| 2021-01-19T08:59:13.889878
| 2018-03-10T05:38:19
| 2018-03-10T05:38:19
| 87,704,238
| 2
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 161
|
py
|
# coding=gbk
"""
传入训练集和测试集,均为dataframe,如果训练集中有对应词则查找bdc值,如果测试集没有对应词,则为0
"""
|
[
"chauncyzhu@163.com"
] |
chauncyzhu@163.com
|
eee3c44ee37f4d9e37e7c411d908b2b1856e371d
|
c13686e36daaf1ba70c4999d152e179c108726fc
|
/clean_data.py
|
09f16f5ad94316da62dbe0304018b1de304b0409
|
[] |
no_license
|
ravishrawal/nlp_sentiment_analysis
|
00b60a5bf09c64012edb0f18fda14bcae9f6cb9a
|
844729cf3f654bd8676c88475239f8d6b2b0c138
|
refs/heads/main
| 2023-07-05T02:10:49.372417
| 2021-08-04T13:49:42
| 2021-08-04T13:49:42
| 390,898,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,865
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 29 20:14:06 2021
@author: ravishrawal
"""
from utils import *
base_path = "/Users/ravishrawal/Desktop/Columbia MS/Summer B/QMSS NLP/final_project/"
test = 'the work-life balance is great! Love teh culture here. a+, 5 stars'
# convert to df & remove irrelevant columns
reviews_df = csv_to_df(base_path + "employee_reviews.txt")
# clean text & tokenize
# -- summary column --
reviews_df["summary"] = reviews_df.summary.apply(to_lowercase)
reviews_df["summary"] = reviews_df.summary.apply(rem_sp_char)
reviews_df["summary"] = reviews_df.summary.apply(rem_non_eng)
reviews_df["summary"] = reviews_df.summary.apply(rem_sw)
reviews_df["summary_stemmed"] = reviews_df.summary.apply(stem_str)
# -- pros column --
reviews_df["pros"] = reviews_df.pros.apply(to_lowercase)
reviews_df["pros"] = reviews_df.pros.apply(rem_sp_char)
reviews_df["pros"] = reviews_df.pros.apply(rem_non_eng)
reviews_df["pros"] = reviews_df.pros.apply(rem_sw)
reviews_df["pros_stemmed"] = reviews_df.pros.apply(stem_str)
# -- cons column --
reviews_df["cons"] = reviews_df.cons.apply(to_lowercase)
reviews_df["cons"] = reviews_df.cons.apply(rem_sp_char)
reviews_df["cons"] = reviews_df.cons.apply(rem_non_eng)
reviews_df["cons"] = reviews_df.cons.apply(rem_sw)
reviews_df["cons_stemmed"] = reviews_df.cons.apply(stem_str)
# -- advice_to_mgmt column --
reviews_df["advice_to_mgmt"] = reviews_df.advice_to_mgmt.apply(to_lowercase)
reviews_df["advice_to_mgmt"] = reviews_df.advice_to_mgmt.apply(rem_sp_char)
reviews_df["advice_to_mgmt"] = reviews_df.advice_to_mgmt.apply(rem_non_eng)
reviews_df["advice_to_mgmt"] = reviews_df.advice_to_mgmt.apply(rem_sw)
reviews_df["advice_to_mgmt_stemmed"] = reviews_df.advice_to_mgmt.apply(stem_str)
# save output
write_pickle(base_path, "reviews_data_cleaned_w_dates.pkl", reviews_df)
|
[
"noreply@github.com"
] |
ravishrawal.noreply@github.com
|
ea8f3fefee0c524940d86a4601d4086cee9338d2
|
75db7f09ce965d89611ffd53201611ef427f65a7
|
/buddysuite/tests/test_databasebuddy/test_db_ui.py
|
c15a2839664ca924dd030dd2102e212d389cf0f2
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
ctSkennerton/BuddySuite
|
d217e645002dffa85ff09025536382b943d9917f
|
d920eec0d5a2e30dd896b0474c3ede79e29e0c13
|
refs/heads/master
| 2021-01-17T17:30:08.119258
| 2017-02-24T23:01:35
| 2017-02-24T23:01:35
| 82,962,634
| 0
| 0
| null | 2017-02-23T19:20:57
| 2017-02-23T19:20:57
| null |
UTF-8
|
Python
| false
| false
| 46,031
|
py
|
import pytest
import os
import re
import sys
import argparse
from copy import deepcopy
import buddy_resources as br
import DatabaseBuddy as Db
def fmt(prog):
return br.CustomHelpFormatter(prog)
parser = argparse.ArgumentParser(prog="DbBuddy.py", formatter_class=fmt, add_help=False, usage=argparse.SUPPRESS,
description='''
\033[1mDatabaseBuddy\033[m
Go forth to the servers of sequence, and discover.
\033[1mUsage examples\033[m:
DbBuddy.py -ls (launch empty live session)
DbBuddy.py "<accn1,accn2,accn3,...>" -<cmd>
DbBuddy.py "<search term1, search term2,...>" -<cmd>
DbBuddy.py "<accn1,search term1>" -<cmd>
DbBuddy.py "/path/to/file_of_accns" -<cmd>
''')
br.db_modifiers["database"]["choices"] = Db.DATABASES
br.flags(parser, ("user_input", "Specify accession numbers or search terms, "
"either in a file or as a comma separated list"),
br.db_flags, br.db_modifiers, Db.VERSION)
# This is to allow py.test to work with its own flags
in_args = parser.parse_args([])
def mock_cmdloop(*args):
print(args)
return True
class MockUsage(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def increment(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
return "".join(self.args)
def mock_filter(_, line, mode):
print("'%s' filter mocked! %s" % (mode, line))
return
class OpenPermissionError(object):
def __init__(self, *args, **kwargs):
pass
@staticmethod
def close():
raise PermissionError
def mock_fileexistserror(*args, **kwargs):
raise FileExistsError(args, kwargs)
def mock_keyboardinterrupt(*args, **kwargs):
raise KeyboardInterrupt(args, kwargs)
def mock_guesserror(*args, **kwargs):
raise br.GuessError("%s, %s" % (args, kwargs))
def mock_systemexit(*args, **kwargs):
sys.exit("%s, %s" % (args, kwargs))
# A few real accession numbers to test things out with
ACCNS = ["NP_001287575.1", "ADH10263.1", "XP_005165403.2", "A0A087WX72", "A0A096MTH0", "A0A0A9YFB0",
"XM_003978475", "ENSAMEG00000011912", "ENSCJAG00000008732", "ENSMEUG00000000523"]
# ###################### argparse_init() ###################### #
def test_argparse_init(capsys, monkeypatch, hf):
monkeypatch.setattr(sys, "argv", ['DatabaseBuddy.py', "Casp9"])
temp_in_args, dbbuddy = Db.argparse_init()
assert hf.string2hash(str(dbbuddy)) == "b61a8e0e0a97f33ec1e85c09391ada64"
monkeypatch.setattr(sys, "argv", ['DatabaseBuddy.py', "Casp9,Panx3", "Cx43"])
temp_in_args, dbbuddy = Db.argparse_init()
assert hf.string2hash(str(dbbuddy)) == "c717f3c1636ab03f0c5f5e86d5e909cb"
monkeypatch.setattr(sys, "argv", ['DatabaseBuddy.py', "-f"])
with pytest.raises(SystemExit):
Db.argparse_init()
out, err = capsys.readouterr()
assert "DbBuddy.py: error: unrecognized arguments: -f" in err
def test_liveshell_init(monkeypatch, capsys, hf):
# Default instantiate
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
assert type(liveshell.tmpdir) == br.TempDir
assert liveshell.terminal_default == "\033[m\033[40m\033[97m"
assert liveshell.prompt == '[95m[1mDbBuddy[m[40m[97m[1m>[m[40m[97m '
assert hf.string2hash(liveshell.doc_leader) == "e71aa4976437bdb0c22eeaacfaea6f9f"
assert hash(liveshell.dbbuddy) == hash(dbbuddy)
assert liveshell.crash_file == crash_file
assert os.path.split(liveshell.history_path)[-1] == "cmd_history"
assert not liveshell.undo
assert not liveshell.hash
assert liveshell.shell_execs == []
assert type(liveshell.usage) == br.Usage
out, err = capsys.readouterr()
assert "Your session is currently unpopulated. Use 'search' to retrieve records." in out
# Set cmd history path
tmp_dir = br.TempDir()
monkeypatch.setitem(Db.CONFIG, "data_dir", tmp_dir.path)
liveshell = Db.LiveShell(dbbuddy, crash_file)
assert liveshell.history_path == "%s%scmd_history" % (tmp_dir.path, os.sep)
# Permission error
os.chmod("%s%scmd_history" % (tmp_dir.path, os.sep), 0o333)
liveshell = Db.LiveShell(dbbuddy, crash_file)
# Windows does not actually change the permissions with os.chmod...
if os.name != "nt":
assert liveshell.history_path == "%s%scmd_history" % (liveshell.tmpdir.path, os.sep)
# Run initial search
monkeypatch.setattr(Db, "retrieve_summary", lambda _: True)
dbbuddy = Db.DbBuddy("Inx15")
Db.LiveShell(dbbuddy, crash_file)
assert not dbbuddy.records
Db.LiveShell(dbbuddy, crash_file)
dbbuddy = Db.DbBuddy("ENSAMEG00000011912")
assert len(dbbuddy.records) == 1
def test_liveshell_precmd(monkeypatch):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
assert liveshell.precmd("foo bar line") == "foo bar line"
def test_liveshell_postcmd(monkeypatch):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
assert liveshell.postcmd("STOP!", "foo bar line") == "STOP!"
assert liveshell.usage.stats['LiveShell'][Db.VERSION.short()]['foo'] == 1
def test_liveshell_dump_session(monkeypatch):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy(_databases="uniprot")
dbbuddy.server_clients["uniprot"] = Db.UniProtRestClient(dbbuddy)
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
pre_dump = liveshell.crash_file.read()
liveshell.dump_session()
assert pre_dump == liveshell.crash_file.read()
liveshell.dbbuddy.search_terms.append("Blahh")
liveshell.dump_session()
assert pre_dump != liveshell.crash_file.read()
assert liveshell.dbbuddy.server_clients['uniprot'].lock
assert not liveshell.dbbuddy.server_clients['ensembl']
assert liveshell.undo
def test_liveshell_default(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
liveshell.default("Dunno...")
out, err = capsys.readouterr()
assert '*** Unknown syntax: Dunno...\n\n' in out
with pytest.raises(SystemExit):
liveshell.default("exit")
out, err = capsys.readouterr()
assert "Goodbye" in out
def test_liveshell_append_slash_if_dir():
tmp_dir = br.TempDir()
tmp_dir.subfile("test.txt")
assert Db.LiveShell._append_slash_if_dir(tmp_dir.path) == "%s%s" % (tmp_dir.path, os.sep)
assert Db.LiveShell._append_slash_if_dir(tmp_dir.subfiles[0]) == tmp_dir.subfiles[0]
def test_liveshell_get_headings(monkeypatch):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db, "retrieve_summary", lambda _: True)
dbbuddy = Db.DbBuddy(",".join(ACCNS))
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
dbbuddy.records['XM_003978475'].summary = {'organism': 'velociraptor'}
assert liveshell.get_headings() == ['ACCN', 'DB', 'Type', 'record', 'organism']
def test_liveshell_filter(monkeypatch, hf, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
load_file = "%s/mock_resources/test_databasebuddy_clients/dbbuddy_save.db" % hf.resource_path
liveshell.do_load(load_file)
# 'keep' (default)
capsys.readouterr()
liveshell.filter("(organism) Mouse")
liveshell.dbbuddy.print()
out, err = capsys.readouterr()
assert hf.string2hash(out) == "9774790626857cd05298b4e9c5e09836"
# 'restore'
liveshell.filter("Phaethon", mode='restore')
liveshell.dbbuddy.print()
out, err = capsys.readouterr()
assert hf.string2hash(out) == "836e1b6810b2e349634face7b19d4999"
# 'remove'
liveshell.filter("Fragment", mode='remove')
liveshell.dbbuddy.print()
out, err = capsys.readouterr()
assert hf.string2hash(out) == "746d5e86ff1d3b23707977e0e41fd210"
# Wrong mode
with pytest.raises(ValueError) as err:
liveshell.filter("Fragment", mode='Foo')
assert "The 'mode' argument in filter() must be 'keep', 'remove', or 'restore', not Foo." in str(err)
# No search string given at all
monkeypatch.setattr("builtins.input", lambda _: False)
liveshell.filter(None)
out, err = capsys.readouterr()
assert "Error: you must specify a search string.\n" in out
# No search string given at first
monkeypatch.setattr("builtins.input", lambda _: "Casein")
liveshell.filter(None, mode="remove")
liveshell.dbbuddy.print()
out, err = capsys.readouterr()
assert hf.string2hash(out) == "fdcfcc6d32d726cba592e5c9d0bfdf44"
monkeypatch.setattr("builtins.input", lambda _: "Apoptosis")
liveshell.filter(None, mode="restore")
liveshell.dbbuddy.print()
out, err = capsys.readouterr()
assert hf.string2hash(out) == "a3249f5616e3ec863d911638e7f82ed8"
# Multiple terms
liveshell.filter('"Baculoviral" "Mitogen"', mode='remove')
liveshell.dbbuddy.print()
out, err = capsys.readouterr()
assert hf.string2hash(out) == "ef0ef9f16687530cadea9a465ff92634"
liveshell.filter("'partial' 'Q[0-9]'", mode='remove')
liveshell.dbbuddy.print()
out, err = capsys.readouterr()
assert hf.string2hash(out) == "4aa2b9aaf54bbcb874e17621da1a43c5"
# Wonkey quotes given as input
error_msg = "Error: It appears that you are trying to mix quote types (\" and ') while specifying " \
"multiple filters. Please pick one or the other.\n\n"
liveshell.filter("'Foo' \"Bar\"", mode='remove')
out, err = capsys.readouterr()
assert error_msg in out
liveshell.filter('"Foo" \'Bar\'', mode='remove')
out, err = capsys.readouterr()
assert error_msg in out
def test_liveshell_do_bash(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
capsys.readouterr()
tmp_file = br.TempFile()
liveshell.do_bash("echo 'hello from bash' > %s" % tmp_file.path)
assert "hello from bash" in tmp_file.read()
monkeypatch.setattr("builtins.input", lambda _: "echo 'Line from input' > %s" % tmp_file.path)
liveshell.do_bash(None)
assert "Line from input" in tmp_file.read()
liveshell.do_bash("cd /this/path/doesnt/exist")
out, err = capsys.readouterr()
assert "-sh: cd: /this/path/doesnt/exist: No such file or directory\n" in out
tmp_dir = br.TempDir()
tmp_dir.subfile("foo.txt")
cwd = os.getcwd()
liveshell.do_bash("cd %s" % tmp_dir.path)
out, err = capsys.readouterr()
assert tmp_dir.path in out
assert os.path.isfile("foo.txt")
os.chdir(cwd)
def test_liveshell_do_database(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
liveshell.do_database("'NcBi_nuc',\t \"ENSEMBl,uniprot")
assert sorted(dbbuddy.databases) == ['ensembl', 'ncbi_nuc', 'uniprot']
liveshell.do_database("ensembl,all")
assert sorted(dbbuddy.databases) == ["ensembl", "ncbi_nuc", "ncbi_prot", "uniprot"]
capsys.readouterr()
liveshell.do_database("Foo ensembl")
out, err = capsys.readouterr()
assert "Invalid database choice(s): foo." in out
assert dbbuddy.databases == ['ensembl']
liveshell.do_database("Foo")
out, err = capsys.readouterr()
assert "Database search list not changed." in out
assert dbbuddy.databases == ['ensembl']
monkeypatch.setattr("builtins.input", lambda _: "'ncbi_nuc', 'ensembl'")
liveshell.do_database(None)
assert sorted(dbbuddy.databases) == ['ensembl', 'ncbi_nuc']
def test_liveshell_do_delete(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
capsys.readouterr()
liveshell.do_delete(None)
out, err = capsys.readouterr()
assert "The live session is already empty.\n\n" in out
dbbuddy.records["Foo"] = "Bar"
liveshell.do_delete("Foo")
out, err = capsys.readouterr()
assert "Sorry, I don't understand what you want to delete." in out
# Delete failures
liveshell.do_delete("fail")
out, err = capsys.readouterr()
assert "Failures list is already empty.\n\n" in out
dbbuddy.failures["Foo"] = "Bar"
monkeypatch.setattr(br, "ask", lambda _, **kwargs: False)
liveshell.do_delete("fail")
out, err = capsys.readouterr()
assert "Aborted...\n" in out
assert len(dbbuddy.failures) == 1
monkeypatch.setattr(br, "ask", lambda _, **kwargs: True)
liveshell.do_delete("fail")
out, err = capsys.readouterr()
assert "List of failures removed.\n\n" in out
assert not dbbuddy.failures
# Delete searches
liveshell.do_delete("search")
out, err = capsys.readouterr()
assert "Search terms list is already empty.\n\n" in out
dbbuddy.search_terms.append("Bar")
monkeypatch.setattr(br, "ask", lambda _, **kwargs: False)
liveshell.do_delete("terms")
out, err = capsys.readouterr()
assert "Aborted...\n" in out
assert len(dbbuddy.search_terms) == 1
monkeypatch.setattr(br, "ask", lambda _, **kwargs: True)
liveshell.do_delete("st")
out, err = capsys.readouterr()
assert "Search terms removed.\n\n" in out
assert not dbbuddy.search_terms
# Delete trash bin
liveshell.do_delete("trash")
out, err = capsys.readouterr()
assert "Trash bin is already empty.\n" in out
dbbuddy.trash_bin["Foo"] = "Bar"
monkeypatch.setattr(br, "ask", lambda _, **kwargs: False)
liveshell.do_delete("trashbin")
out, err = capsys.readouterr()
assert "Aborted...\n" in out
assert len(dbbuddy.trash_bin) == 1
monkeypatch.setattr(br, "ask", lambda _, **kwargs: True)
liveshell.do_delete("tb")
out, err = capsys.readouterr()
assert "Trash bin emptied.\n\n" in out
assert not dbbuddy.trash_bin
# Delete records
del dbbuddy.records["Foo"]
dbbuddy.failures["Foo"] = "Bar"
liveshell.do_delete("records")
out, err = capsys.readouterr()
assert "Records list is already empty.\n" in out
dbbuddy.records["Foo"] = "Bar"
monkeypatch.setattr(br, "ask", lambda _, **kwargs: False)
liveshell.do_delete("main")
out, err = capsys.readouterr()
assert "Aborted...\n" in out
assert len(dbbuddy.records) == 1
monkeypatch.setattr(br, "ask", lambda _, **kwargs: True)
liveshell.do_delete("recs")
out, err = capsys.readouterr()
assert "All records removed from main list (trash bin is still intact).\n\n" in out
assert not dbbuddy.records
# Delete everything
dbbuddy.search_terms.append("Bar")
dbbuddy.trash_bin["Foo"] = "Bar"
dbbuddy.records["Foo"] = "Bar"
monkeypatch.setattr(br, "ask", lambda _, **kwargs: False)
liveshell.do_delete("")
out, err = capsys.readouterr()
assert "Aborted...\n" in out
assert len(dbbuddy.failures) == 1
assert len(dbbuddy.search_terms) == 1
assert len(dbbuddy.trash_bin) == 1
assert len(dbbuddy.records) == 1
monkeypatch.setattr(br, "ask", lambda _, **kwargs: True)
liveshell.do_delete("all")
out, err = capsys.readouterr()
assert "Live session cleared of all data.\n\n" in out
assert not dbbuddy.failures
assert not dbbuddy.search_terms
assert not dbbuddy.trash_bin
assert not dbbuddy.records
def test_liveshell_do_failures(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
liveshell.do_failures("Blahh")
out, err = capsys.readouterr()
assert "No failures to report\n\n" in out
dbbuddy.failures["Foo"] = Db.Failure("Bar", "Fake failure")
liveshell.do_failures()
out, err = capsys.readouterr()
assert "The following failures have occured\n" in out
assert "Bar\nFake failure" in out
def test_liveshell_do_fetch(monkeypatch, capsys):
def mock_big_record_no_dl(_dbbuddy):
_dbbuddy.records["NP_001287575.1"] = Db.Record("NP_001287575.1", _size=5000001)
def mock_big_record_fetch(_dbbuddy):
_dbbuddy.records["NP_001287575.1"].record = True
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
monkeypatch.setattr(Db, "retrieve_summary", lambda _: True)
dbbuddy = Db.DbBuddy("NP_001287575.1")
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
monkeypatch.setattr(Db, "retrieve_summary", mock_big_record_no_dl)
monkeypatch.setattr(br, "ask", lambda _, **kwargs: False)
liveshell.do_fetch("Foo")
assert dbbuddy.records["NP_001287575.1"].size == 5000001
out, err = capsys.readouterr()
assert "Aborted...\n\n" in out
monkeypatch.setattr(Db, "retrieve_sequences", mock_big_record_fetch)
monkeypatch.setattr(br, "ask", lambda _, **kwargs: True)
liveshell.do_fetch(None)
out, err = capsys.readouterr()
print(out)
assert "Retrieved 5.0 M residues of sequence data\n\n" in out
def test_liveshell_do_format(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
monkeypatch.setattr("builtins.input", lambda _: "Foo")
liveshell.do_format(None)
out, err = capsys.readouterr()
assert "'Foo'" in out
assert "is not a valid format" in out
assert dbbuddy.out_format == "summary"
for frmt in Db.FORMATS:
liveshell.do_format(frmt)
out, err = capsys.readouterr()
assert "Output format changed to" in out
assert frmt in out
assert dbbuddy.out_format == frmt
def test_liveshell_do_load(monkeypatch, capsys, hf):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
dbbuddy.server_clients["uniprot"] = Db.UniProtRestClient(dbbuddy)
dbbuddy.server_clients["uniprot"].http_errors_file.write("Hello!")
db_session = "%s/mock_resources/test_databasebuddy_clients/dbbuddy_save.db" % hf.resource_path
monkeypatch.setattr("builtins.input", lambda _: db_session)
liveshell.do_load(None)
out, err = capsys.readouterr()
assert "Session loaded from file.\n\n" in out
assert dbbuddy.server_clients["uniprot"].http_errors_file.read() == ""
headings = liveshell.get_headings()
for heading in ['ACCN', 'DB', 'Type', 'record', 'entry_name', 'length', 'organism-id', 'organism',
'protein_names', 'comments', 'gi_num', 'TaxId', 'status', 'name', 'biotype',
'object_type', 'strand', 'assembly_name', 'name']:
assert heading in headings
for heading in headings:
assert heading in ['ACCN', 'DB', 'Type', 'record', 'entry_name', 'length', 'organism-id', 'organism',
'protein_names', 'comments', 'gi_num', 'TaxId', 'status', 'name', 'biotype',
'object_type', 'strand', 'assembly_name', 'name']
monkeypatch.setattr("builtins.input", lambda _: "/no/file/here")
liveshell.do_load(None)
out, err = capsys.readouterr()
assert "Error: Unable to read the provided file. Are you sure it's a saved DbBuddy live session?\n\n" in out
def test_liveshell_do_keep(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
monkeypatch.setattr(Db.LiveShell, "filter", mock_filter)
liveshell.do_keep(None)
out, err = capsys.readouterr()
assert "'keep' filter mocked! None" in out
def test_liveshell_do_quit(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
dbbuddy.records["Foo"] = "Bar"
monkeypatch.setattr(br, "ask", lambda _, **kwargs: False)
liveshell.do_quit(None)
out, err = capsys.readouterr()
assert "Aborted...\n\n" in out
with pytest.raises(SystemExit):
monkeypatch.setattr(br, "ask", lambda _, **kwargs: True)
liveshell.do_quit(None)
out, err = capsys.readouterr()
assert "Goodbye" in out
def test_liveshell_do_trash(monkeypatch, capsys):
def mock_show(_, line, mode):
print("%s show mocked! %s" % (mode, line))
return
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
monkeypatch.setattr(Db.LiveShell, "do_show", mock_show)
liveshell.do_trash(None)
out, err = capsys.readouterr()
assert "trash_bin show mocked! None" in out
def test_liveshell_do_remove(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
monkeypatch.setattr(Db.LiveShell, "filter", mock_filter)
liveshell.do_remove(None)
out, err = capsys.readouterr()
assert "'remove' filter mocked! None" in out
def test_liveshell_do_restore(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
monkeypatch.setattr(Db.LiveShell, "filter", mock_filter)
liveshell.do_restore(None)
out, err = capsys.readouterr()
assert "'restore' filter mocked! None" in out
def test_liveshell_do_save(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
dbbuddy.records["foo"] = "bar"
# Standard, no problems
tmp_dir = br.TempDir()
monkeypatch.setattr("builtins.input", lambda _: "%s/save_dir/save_file1" % tmp_dir.path)
liveshell.do_save(None)
out, err = capsys.readouterr()
assert "Live session saved\n\n" in out
assert os.path.isfile("%s/save_dir/save_file1.db" % tmp_dir.path)
with open("%s/save_dir/save_file1.db" % tmp_dir.path, "rb") as ifile:
assert len(ifile.read()) in [279, 281] # Different versions of python give different file sizes
# File exists, abort
monkeypatch.setattr(br, "ask", lambda _, **kwargs: False)
liveshell.do_save("%s/save_dir/save_file1" % tmp_dir.path)
out, err = capsys.readouterr()
assert "Abort...\n\n" in out
# PermissionError
monkeypatch.setattr("builtins.open", OpenPermissionError)
liveshell.do_save("%s/save_dir/save_file2" % tmp_dir.path)
out, err = capsys.readouterr()
assert "Error: You do not have write privileges to create a file in the specified directory.\n\n" in out
assert not os.path.isfile("%s/save_dir/save_file2.db" % tmp_dir.path)
def makedirs_permissionerror(*args, **kwargs):
print("makedirs_permissionerror\nargs: %s\nkwargs: %s" % (args, kwargs))
raise PermissionError
monkeypatch.setattr(os, "makedirs", makedirs_permissionerror)
liveshell.do_save("%s/save_dir/deeper_dir/save_file2" % tmp_dir.path)
out, err = capsys.readouterr()
assert "Error: You do not have write privileges to create a directory in the specified path.\n\n" in out
assert not os.path.isfile("%s/save_dir/deeper_dir/save_file2.db" % tmp_dir.path)
def test_liveshell_do_search(monkeypatch):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
dbbuddy.search_terms += ["Foo", "Bar"]
monkeypatch.setattr("builtins.input", lambda _: "Panx1, Panx2")
monkeypatch.setattr(Db, "retrieve_summary", lambda _: True)
liveshell.do_search(None)
assert dbbuddy.search_terms == ["Foo", "Bar", "Panx1", "Panx2"]
assert not dbbuddy.records
assert not dbbuddy.failures
mock_buddy = Db.DbBuddy("Cx43, Cx32")
mock_buddy.records["NewRec1"] = True
mock_buddy.failures["NewFailure1"] = True
monkeypatch.setattr(Db, "DbBuddy", lambda _: mock_buddy)
liveshell.do_search("Blahh")
assert dbbuddy.search_terms == ["Foo", "Bar", "Panx1", "Panx2", "Cx43", "Cx32"]
assert "NewRec1" in dbbuddy.records
assert "NewFailure1" in dbbuddy.failures
def test_liveshell_do_show(monkeypatch, capsys, hf):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
load_file = "%s/mock_resources/test_databasebuddy_clients/dbbuddy_save.db" % hf.resource_path
liveshell.do_load(load_file)
capsys.readouterr()
# Warning message if nothing to show
liveshell.do_show(None, "trash_bin")
out, err = capsys.readouterr()
assert "Nothing in 'trash bin' to show.\n\n" in out
# Specify columns and number of records
liveshell.do_show("ACCN organism 3")
out, err = capsys.readouterr()
assert hf.string2hash(out) == "43f5edc18717e2f7df08818d2ed32b78"
# Large group, say 'no' to display
monkeypatch.setattr(br, "ask", lambda *_, **kwargs: False)
liveshell.do_show(None)
out, err = capsys.readouterr()
assert "Include an integer value with 'show' to return a specific number of records.\n\n" in out
# Large group, show it anyway
monkeypatch.setattr(br, "ask", lambda *_, **kwargs: True)
liveshell.do_show(None)
out, err = capsys.readouterr()
assert hf.string2hash(out) == "edc78c2e17543392933c87d833d8a2ea"
# Try sequence format on LiveShell with only summary data
dbbuddy.out_format = "fasta"
liveshell.do_show(None)
out, err = capsys.readouterr()
assert "Warning: only summary data available; there is nothing to display in fasta format." in out
# Only some records have full sequence data (patch print to true)
dbbuddy.records["P00520"].record = True
dbbuddy.records["Q5R454"].record = True
monkeypatch.setattr(Db.DbBuddy, "print", lambda *_, **kwargs: True)
liveshell.do_show(None)
out, err = capsys.readouterr()
assert "Warning: 1405 records are only summary data, so will not be displayed in fasta format. " \
"Use 'fetch' to retrieve all sequence data." in err
# Raise errors
def mock_length_error(*args, **kwargs):
print("mock_length_error\nargs: %s\nkwargs: %s" % (args, kwargs))
raise ValueError("Sequences must all be the same length")
dbbuddy.out_format = 'nexus'
monkeypatch.setattr(Db.DbBuddy, "print", mock_length_error)
liveshell.do_show(None)
out, err = capsys.readouterr()
assert "Error: 'nexus' format does not support sequences of different length." in out
def mock_qual_score_error(*args, **kwargs):
print("mock_qual_score_error\nargs: %s\nkwargs: %s" % (args, kwargs))
raise ValueError("No suitable quality scores found in letter_annotations of SeqRecord")
dbbuddy.out_format = 'fastq'
monkeypatch.setattr(Db.DbBuddy, "print", mock_qual_score_error)
liveshell.do_show(None)
out, err = capsys.readouterr()
assert "Error: BioPython requires quality scores to output in 'fastq' format, and this data is not " \
"currently available to DatabaseBuddy." in out
def mock_valueerror(*args, **kwargs):
print("mock_valueerror\nargs: %s\nkwargs: %s" % (args, kwargs))
raise ValueError("Unknown ValueError")
monkeypatch.setattr(Db.DbBuddy, "print", mock_valueerror)
with pytest.raises(ValueError) as err:
liveshell.do_show(None)
assert "Unknown ValueError" in str(err)
def test_liveshell_do_sort(monkeypatch, capsys, hf):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
load_file = "%s/mock_resources/test_databasebuddy_clients/dbbuddy_save.db" % hf.resource_path
liveshell.do_load(load_file)
capsys.readouterr()
# Default sort on accession
start_accns = [x for x in dbbuddy.records]
liveshell.do_sort(None)
accns = [x for x in dbbuddy.records]
assert start_accns != accns
assert sorted(start_accns) == accns
# Default sort reversed
liveshell.do_sort("rev")
assert [x for x in dbbuddy.records] == sorted(start_accns, reverse=True)
liveshell.do_sort(None)
assert [x for x in dbbuddy.records] != sorted(start_accns, reverse=True)
liveshell.do_sort("reverse")
assert [x for x in dbbuddy.records] == sorted(start_accns, reverse=True)
# Sort on column
_type = [rec.type for accn, rec in dbbuddy.records.items()]
liveshell.do_sort("Type")
after_sort = [rec.type for accn, rec in dbbuddy.records.items()]
assert after_sort != _type
assert after_sort == sorted(_type)
database = [rec.database for accn, rec in dbbuddy.records.items()]
liveshell.do_sort("DB")
after_sort = [rec.database for accn, rec in dbbuddy.records.items()]
assert after_sort != database
assert after_sort == sorted(database)
organism = [rec.summary['organism'] for accn, rec in dbbuddy.records.items()]
liveshell.do_sort("organism")
after_sort = [rec.summary['organism'] for accn, rec in dbbuddy.records.items()]
assert after_sort != organism
assert after_sort == sorted(organism)
length = [rec.summary['length'] for accn, rec in dbbuddy.records.items()]
liveshell.do_sort("length")
after_sort = [rec.summary['length'] for accn, rec in dbbuddy.records.items()]
assert after_sort != length
assert after_sort == sorted(length)
protein_names = [rec.summary['protein_names'] for accn, rec in dbbuddy.records.items()
if 'protein_names' in rec.summary]
liveshell.do_sort("protein_names")
after_sort = [rec.summary['protein_names'] for accn, rec in dbbuddy.records.items()
if 'protein_names' in rec.summary]
assert after_sort != protein_names
assert after_sort == sorted(protein_names)
gi_num = [rec.summary['gi_num'] for accn, rec in dbbuddy.records.items() if 'gi_num' in rec.summary]
liveshell.do_sort("gi_num")
after_sort = [rec.summary['gi_num'] for accn, rec in dbbuddy.records.items() if 'gi_num' in rec.summary]
assert after_sort != gi_num
assert after_sort == sorted(gi_num)
# Sort on multi-column
dbbuddy.records["A0A0N8ESW5"].record = True
dbbuddy.records["XP_011997944.1"].record = True
liveshell.do_sort("record organism gi_num")
capsys.readouterr()
liveshell.do_show("10")
out, err = capsys.readouterr()
assert hf.string2hash(out) == "c05f7a103d2b50d767407817f43a1828"
def test_liveshell_do_status(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
capsys.readouterr()
liveshell.do_status(None)
out, err = capsys.readouterr()
assert '''\
############################
### DatabaseBuddy object ###
Databases: ncbi_nuc, ncbi_prot, uniprot, ensembl
Out format: summary
Searches: None
Full Recs: 0
Summary Recs: 0
ACCN only: 0
Trash bin: 0
Failures: 0
############################
''' in out
def test_liveshell_do_write(monkeypatch, capsys, hf):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
monkeypatch.setattr(Db.LiveShell, "dump_session", lambda _: True)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
load_file = "%s/mock_resources/test_databasebuddy_clients/dbbuddy_save.db" % hf.resource_path
liveshell.do_load(load_file)
capsys.readouterr()
tmp_dir = br.TempDir()
# write a summary
monkeypatch.setattr("builtins.input", lambda _: "%s/save1" % tmp_dir.path)
liveshell.do_write(None)
assert os.path.isfile("%s/save1" % tmp_dir.path)
with open("%s/save1" % tmp_dir.path, "r") as ifile:
assert len(ifile.read()) == 249980
out, err = capsys.readouterr()
assert re.search("1407 summary records.*written to.*save1", out)
# write ids/accns
dbbuddy.out_format = "ids"
monkeypatch.setattr(br, "ask", lambda _: True)
dbbuddy.records['O14727'].record = Db.Record('O14727', _record=True)
liveshell.do_write("%s/save2" % tmp_dir.path)
assert os.path.isfile("%s/save2" % tmp_dir.path)
with open("%s/save2" % tmp_dir.path, "r") as ifile:
assert len(ifile.read()) == 18661
out, err = capsys.readouterr()
assert re.search("1407 accessions.*written to.*save2", out)
# Abort summary
monkeypatch.setattr(br, "ask", lambda _: False)
liveshell.do_write("%s/save3" % tmp_dir.path)
assert not os.path.isfile("%s/save3" % tmp_dir.path)
out, err = capsys.readouterr()
assert "Abort..." in out
# Permission error
dbbuddy.out_format = "fasta"
monkeypatch.setattr("builtins.open", OpenPermissionError)
liveshell.do_write("%s/save4" % tmp_dir.path)
assert not os.path.isfile("%s/save4" % tmp_dir.path)
out, err = capsys.readouterr()
assert "Error: You do not have write privileges in the specified directory.\n\n" in out
# File exists
monkeypatch.setattr(br, "ask", lambda _: False)
liveshell.do_write("%s/save2" % tmp_dir.path)
out, err = capsys.readouterr()
assert "Abort..." in out
assert "written" not in out
# Not a directory
liveshell.do_write("%s/ghostdir/save5" % tmp_dir.path)
out, err = capsys.readouterr()
assert "The specified directory does not exist. Please create it before continuing" in out
assert "written" not in out
def test_liveshell_do_undo(monkeypatch, capsys, hf):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
liveshell.do_undo(None)
out, err = capsys.readouterr()
assert "There is currently no undo history (only a single undo is possible).\n\n" in out
load_file = "%s/mock_resources/test_databasebuddy_clients/dbbuddy_save.db" % hf.resource_path
liveshell.do_load(load_file)
assert not dbbuddy.trash_bin
liveshell.do_remove("P00520")
assert dbbuddy.trash_bin
liveshell.do_undo(None)
assert not dbbuddy.trash_bin
out, err = capsys.readouterr()
assert "Most recent state reloaded\n\n" in out
liveshell.do_undo(None)
out, err = capsys.readouterr()
assert "There is currently no undo history (only a single undo is possible).\n\n" in out
def test_liveshell_complete_bash(monkeypatch):
# Note, this doesn't work in Windows
if os.name == "nt":
return
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
programs = liveshell.complete_bash("wh")
for program in ['wheel ', 'whereis ', 'whoami ', 'which ', 'who ']:
assert program in programs
def test_liveshell_complete_database(monkeypatch):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
assert sorted(liveshell.complete_database("")) == ['ensembl', 'ncbi_nuc', 'ncbi_prot', 'uniprot']
def test_liveshell_complete_delete(monkeypatch):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
assert liveshell.complete_delete("") == ["all", "failures", "search", "trash", "records"]
def test_liveshell_complete_format(monkeypatch):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
assert liveshell.complete_format("f") == ['full-summary', 'fasta', 'fastq', 'fastq-sanger',
'fastq-solexa', 'fastq-illumina']
def test_liveshell_complete_keep_remove_resort_trash_show_sort(monkeypatch, hf):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
load_file = "%s/mock_resources/test_databasebuddy_clients/dbbuddy_save.db" % hf.resource_path
liveshell.do_load(load_file)
# Keep
assert liveshell.complete_keep("d") == ['(DB) ']
assert liveshell.complete_keep("len") == ['(length) ']
assert liveshell.complete_keep('ac') == ['(ACCN) ']
# Remove
assert liveshell.complete_remove("d") == ['(DB) ']
assert liveshell.complete_remove("len") == ['(length) ']
assert liveshell.complete_remove('ac') == ['(ACCN) ']
# Restor
liveshell.do_remove("Human")
assert liveshell.complete_restore("d") == ['(DB) ']
assert liveshell.complete_restore("len") == ['(length) ']
assert liveshell.complete_restore('ac') == ['(ACCN) ']
# Trash
assert liveshell.complete_trash("d") == ['DB ']
assert liveshell.complete_trash("len") == ['length ']
assert liveshell.complete_trash('ac') == ['ACCN ']
# Show
assert liveshell.complete_show("d") == ['DB ']
assert liveshell.complete_show("len") == ['length ']
assert liveshell.complete_show('ac') == ['ACCN ']
# sort
assert liveshell.complete_sort("d") == ['DB ']
assert liveshell.complete_sort("len") == ['length ']
assert liveshell.complete_sort('ac') == ['ACCN ']
assert liveshell.complete_sort('re') == ['record ', 'reverse ']
def test_liveshell_complete_load_save_write(monkeypatch):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
tmpdir = br.TempDir()
os.chdir(tmpdir.path)
tmpdir.subfile("file.txt")
tmpdir.subdir("extra_dir")
# Load
assert liveshell.complete_load("load fi ", "load fi ", 5, 7) == ['file.txt']
assert liveshell.complete_load("load ", "load ", 5, 5) == ['extra_dir%s' % os.path.sep, 'file.txt']
assert not liveshell.complete_load("load ", "load ", 4, 5)
# Save
assert liveshell.complete_save("save fi ", "save fi ", 5, 7) == ['file.txt']
assert liveshell.complete_save("save ", "save ", 5, 5) == ['extra_dir%s' % os.path.sep, 'file.txt']
assert not liveshell.complete_save("save ", "save ", 4, 5)
# Save
assert liveshell.complete_write("write fi ", "write fi ", 6, 8) == ['file.txt']
assert liveshell.complete_write("write ", "write ", 6, 6) == ['extra_dir%s' % os.path.sep, 'file.txt']
assert not liveshell.complete_write("write ", "write ", 4, 5)
def test_helps(monkeypatch, capsys):
monkeypatch.setattr(Db.LiveShell, "cmdloop", mock_cmdloop)
dbbuddy = Db.DbBuddy()
crash_file = br.TempFile(byte_mode=True)
liveshell = Db.LiveShell(dbbuddy, crash_file)
liveshell.help_bash()
out, err = capsys.readouterr()
assert "Run bash commands" in out
liveshell.help_database()
out, err = capsys.readouterr()
assert "Reset the database" in out
liveshell.help_delete()
out, err = capsys.readouterr()
assert "Remove records completely" in out
liveshell.help_failures()
out, err = capsys.readouterr()
assert "Print the status of" in out
liveshell.help_fetch()
out, err = capsys.readouterr()
assert "Retrieve full records for" in out
liveshell.help_format()
out, err = capsys.readouterr()
assert "Set the output format" in out
liveshell.help_keep()
out, err = capsys.readouterr()
assert "Further refine your results" in out
liveshell.help_quit()
out, err = capsys.readouterr()
assert "End the live session" in out
liveshell.help_load()
out, err = capsys.readouterr()
assert "Recover the contents of a " in out
liveshell.help_trash()
out, err = capsys.readouterr()
assert "Output the records held in" in out
liveshell.help_remove()
out, err = capsys.readouterr()
assert "Further refine your results" in out
liveshell.help_restore()
out, err = capsys.readouterr()
assert "Return a subset of filtered" in out
liveshell.help_save()
out, err = capsys.readouterr()
assert "Save your live session in DB" in out
liveshell.help_search()
out, err = capsys.readouterr()
assert "Search databases (currently set to" in out
liveshell.help_show()
out, err = capsys.readouterr()
assert "Output the records held in" in out
liveshell.help_sort()
out, err = capsys.readouterr()
assert "Alter the order that records" in out
liveshell.help_status()
out, err = capsys.readouterr()
assert "Display the current state of your Live" in out
liveshell.help_undo()
out, err = capsys.readouterr()
assert "Revert the most recent change to your live session." in out
liveshell.help_write()
out, err = capsys.readouterr()
assert "Send records to a file" in out
# ###################### main() ###################### #
def test_main(monkeypatch):
monkeypatch.setattr(sys, "argv", ["DatabaseBuddy", "Casp9,Panx3", "-ls"])
monkeypatch.setattr(Db, "command_line_ui", lambda *_: True)
assert Db.main()
monkeypatch.setattr(Db, "command_line_ui", mock_keyboardinterrupt)
assert not Db.main()
monkeypatch.setattr(Db, "command_line_ui", mock_guesserror)
assert not Db.main()
monkeypatch.setattr(Db, "command_line_ui", mock_systemexit)
assert not Db.main()
monkeypatch.setattr(Db, "command_line_ui", mock_fileexistserror)
monkeypatch.setattr(br, "send_traceback", lambda *_: True)
assert not Db.main()
# ###################### loose command line ui helpers ###################### #
@pytest.mark.loose
def test_exit(monkeypatch, capsys):
class MockExitUsage(object):
@staticmethod
def increment(*args):
print(args)
return True
@staticmethod
def save():
return True
monkeypatch.setattr(br, "Usage", MockExitUsage)
monkeypatch.setattr(Db, "LiveShell", lambda *_: True)
test_in_args = deepcopy(in_args)
with pytest.raises(SystemExit):
Db.command_line_ui(test_in_args, Db.DbBuddy())
out, err = capsys.readouterr()
assert "('DatabaseBuddy', '%s', 'LiveShell', 0)" % Db.VERSION.short() in out
@pytest.mark.loose
def test_error(monkeypatch, capsys):
monkeypatch.setattr(Db, "LiveShell", mock_systemexit)
test_in_args = deepcopy(in_args)
test_in_args.live_shell = True
assert Db.command_line_ui(test_in_args, Db.DbBuddy(), skip_exit=True) is None
test_in_args.live_shell = False
monkeypatch.setattr(Db, "LiveShell", mock_keyboardinterrupt)
assert Db.command_line_ui(test_in_args, Db.DbBuddy(), skip_exit=True) is None
out, err = capsys.readouterr()
assert "DbBuddy object" in out
monkeypatch.setattr(Db, "LiveShell", mock_fileexistserror)
monkeypatch.setattr(br.TempFile, "save", lambda *_: True)
monkeypatch.setattr(br, "send_traceback", lambda *_: True)
capsys.readouterr()
assert Db.command_line_ui(test_in_args, Db.DbBuddy(), skip_exit=True) is None
out, err = capsys.readouterr()
assert "can be loaded by launching DatabaseBuddy and using the 'load' command." in err
@pytest.mark.loose
def test_guess_db(capsys, hf):
test_in_args = deepcopy(in_args)
test_in_args.guess_database = True
with pytest.raises(SystemExit):
Db.command_line_ui(test_in_args, Db.DbBuddy(), skip_exit=True)
out, err = capsys.readouterr()
assert 'Nothing to return' in out
with pytest.raises(SystemExit):
Db.command_line_ui(test_in_args, Db.DbBuddy(",".join(ACCNS) + ",Casp9"), skip_exit=True)
out, err = capsys.readouterr()
assert hf.string2hash(out) == "4b3edb0272b02d8e18ce591304fdea1d"
|
[
"biologyguy@gmail.com"
] |
biologyguy@gmail.com
|
7b76d82117448e96980ae3f51aa746e12feeb50e
|
6a7a9d6201f2c4399aa8ebb5910a8d706b0a5111
|
/examples/Shape Example/shape_model/server.py
|
5f186ff1fd20885027bedf9aa904264d4e7fbb79
|
[
"Apache-2.0"
] |
permissive
|
josepic99/mesas
|
5024177751d1c97fbc2dabd5e3fe230b23d5fb97
|
8453fe36b8536795ce5b80975fc9dbc604da42df
|
refs/heads/master
| 2023-03-13T19:19:00.769987
| 2021-03-09T00:44:39
| 2021-03-09T00:44:39
| 345,835,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
import random
from mesa.visualization.modules import CanvasGrid
from mesa.visualization.ModularVisualization import ModularServer
from shape_model.model import Walker, ShapesModel
def agent_draw(agent):
portrayal = None
if agent is None:
# Actually this if part is unnecessary, but still keeping it for
# aesthetics
pass
elif isinstance(agent, Walker):
print("Uid: {0}, Heading: {1}".format(agent.unique_id, agent.heading))
portrayal = {"Shape": "arrowHead",
"Filled": "true",
"Layer": 2,
"Color": "green",
"Filled": "true",
"heading_x": agent.heading[0],
"heading_y": agent.heading[1],
"text": agent.unique_id,
"text_color": "white",
"scale": 0.8,
}
return portrayal
def launch_shape_model():
width = 15
height = 10
num_agents = 2
pixel_ratio = 50
grid = CanvasGrid(agent_draw, width, height,
width * pixel_ratio, height * pixel_ratio)
server = ModularServer(ShapesModel, [grid], "Shape Model Example",
num_agents, width, height)
server.max_steps = 0
server.port = 8888
server.launch()
if __name__ == "__main__":
random.seed(3)
launch_shape_model()
|
[
"jackiekazil@gmail.com"
] |
jackiekazil@gmail.com
|
527ee2718e614521dae7fd5f0ad151d7657b6168
|
b161ad0cc14ab5b146bc28f65afd8d536ba789bf
|
/test/sample/test_sample.py
|
43820370b9e253d70b13bc584d25c996a6d70845
|
[] |
no_license
|
jdechol/python-data-structures
|
2a48969a195930a0be1b472281e394348fcb9fab
|
3c64b7a9ff60b5a0b890c194af233a1af17b9a4d
|
refs/heads/master
| 2022-12-09T04:22:46.561654
| 2020-09-21T20:27:42
| 2020-09-21T20:27:42
| 295,358,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
from src.sample import sample
import random
def test_quicksort():
numbers = random.sample(range(1000), 1000)
actual = sample.quicksort(numbers)
numbers.sort()
assert actual == numbers
def test_is_palindrome():
assert sample.is_palindrome("abcba")
assert not sample.is_palindrome("ab")
assert sample.is_palindrome("abba")
assert sample.is_palindrome("a")
|
[
"jechols@mavenlink.com"
] |
jechols@mavenlink.com
|
460d5b12c08e5d071fb4b61103902d08761bbfd6
|
7239d389894613ef132edb1198a4f47cb2b65f92
|
/packages/python/plotly/plotly/graph_objs/histogram/_hoverlabel.py
|
fbe525403ca58725c86a720504b77e42a4dc7ec7
|
[
"MIT"
] |
permissive
|
fast3dd13sa/plotly.py
|
2169417b72481ff2937b5a9ce90d426cd1cccd80
|
e778c6b5e6ae9665d7a5e2ddb666f43806df3959
|
refs/heads/master
| 2022-04-26T01:11:46.345181
| 2020-04-27T19:49:56
| 2020-04-27T19:49:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,798
|
py
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram"
_path_str = "histogram.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.histogram.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for namelength
.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
[
"noreply@github.com"
] |
fast3dd13sa.noreply@github.com
|
3c533179afdb9f6b45b2c62b10cff66119de15bd
|
1a6b22f2ec35a41b2a63766c157e028c16593203
|
/tests/rules/test_compare.py
|
ff830a3f609b021b31cfc7dcba2d47c1fef1025d
|
[
"MIT"
] |
permissive
|
WinterComes/arche
|
c4b442111276a824f9589c7ffd70e0cb99687bf1
|
6be3d7a4ec66f33f7af544aa7af4ea95c35bef2e
|
refs/heads/master
| 2020-08-19T02:06:22.518094
| 2019-10-07T14:37:45
| 2019-10-07T14:37:45
| 215,862,662
| 0
| 0
|
MIT
| 2019-10-17T18:44:42
| 2019-10-17T18:44:41
| null |
UTF-8
|
Python
| false
| false
| 3,833
|
py
|
import arche.rules.compare as compare
from arche.rules.result import Level
from conftest import *
import pytest
@pytest.mark.parametrize(
["source", "target", "fields", "normalize", "expected", "more_stats"],
[
(
{
"one": list(range(5)) + ["42"] * 5,
"two": list(range(10)),
"three": [np.nan] * 5 + list(range(5)),
},
{
"one": list(range(5, 10)) + [4] * 6,
"two": list(range(11)),
"three": [np.nan] * 10 + [1],
},
["one", "two", "three"],
False,
{
Level.INFO: [
("10 `non NaN ones` - 9 new, 1 same",),
("10 `non NaN twos` - 0 new, 10 same",),
("1 `twos` are missing", None, {"10 `twos` are missing": {10}}),
("5 `non NaN threes` - 4 new, 1 same",),
],
Level.ERROR: [
(
"5 `ones` are missing",
None,
{"5, 6, 7, 8, 9 `ones` are missing": set(range(5))},
)
],
},
{
"one": {
"same": pd.Series([4], index=[4], dtype="object"),
"new": pd.Series(
[0, 1, 2, 3] + ["42"] * 5, index=[0, 1, 2, 3, 5, 6, 7, 8, 9]
),
"missing": pd.Series(list(range(5, 10))),
},
"two": {
"same": pd.Series(list(range(10))),
"new": pd.Series(dtype=np.int64),
"missing": pd.Series([10], index=[10]),
},
"three": {
"same": pd.Series([1.0], index=[6]),
"new": pd.Series([0.0, 2.0, 3.0, 4.0], index=[5, 7, 8, 9]),
"missing": pd.Series(),
},
},
),
(
{
"four": [{i} for i in range(2)]
+ [{"K": {"k": i}} for i in range(2)]
+ ["l"] * 6
},
{
"four": [{i} for i in range(4)]
+ [{"k": {"k": i}} for i in range(4)]
+ ["L"] * 20
},
["four"],
True,
{
Level.INFO: [
("10 `non NaN fours` - 0 new, 10 same",),
(
"4 `fours` are missing",
None,
{
"{2}, {3}, {'k': {'k': 2}}, {'k': {'k': 3}} `fours` are missing": {
2,
3,
6,
7,
}
},
),
]
},
{
"four": {
"same": pd.Series(
[str({i}) for i in range(2)]
+ [str({"k": {"k": i}}) for i in range(2)]
+ ["l"] * 6
),
"new": pd.Series(dtype=object),
"missing": pd.Series(
["{2}", "{3}", "{'k': {'k': 2}}", "{'k': {'k': 3}}"],
index={2, 3, 6, 7},
),
}
},
),
],
)
def test_fields(source, target, fields, normalize, expected, more_stats):
assert_results_equal(
compare.fields(pd.DataFrame(source), pd.DataFrame(target), fields, normalize),
create_result("Fields Difference", expected, more_stats=more_stats),
check_index_type=False,
)
|
[
"noreply@github.com"
] |
WinterComes.noreply@github.com
|
71b4898b2f07742091ba582a11d90f4317314a77
|
3c5885d48435dec194144afffb8fd4930fcb2bcf
|
/escapeartist/server/callbacks.py
|
04aec18fa3ee814090402e7eaf7ff3b7a4f34603
|
[] |
no_license
|
Mnsk44/pentti-the-escape-artist
|
6912e39dfe3fef23d1e5e7b0c76b82341a46baa2
|
bb7998e03ead1468e7ab16a08dac63699b26d77e
|
refs/heads/main
| 2023-07-03T05:43:15.993580
| 2021-08-09T04:02:47
| 2021-08-09T04:02:47
| 393,693,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,091
|
py
|
"""
Callbacks for dash app
"""
import base64
from io import BytesIO
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
from character.bfspentti import BFSPentti
from character.randompentti import RandomPentti
from character.righthandpentti import RightHandPentti
from map.map import Map
from map.maptoimage import MapToImage
from server.app import app
from util.constants import VICTORY
@app.callback(
[Output("result-div-evil-text", "children"), Output("result-div-evil-png", "children")],
[Input("evil-btn", "n_clicks")],
[State("limit-input", "value"), State("map-dropdown", "value")],
prevent_initial_call=True
)
def evil_random_escape(click, limit, map_path):
map = Map(map_path)
pentti = RandomPentti(map)
pentti.escape_maze(limit)
if pentti._map[pentti.position()] == VICTORY:
result = f"Pentti escaped in {len(pentti._history)} steps"
else:
result = f"Pentti was exhausted after {limit} steps, Pentti did not escape..."
text = html.P(
"You have chosen to not give Pentti any instructions :( he wanders randomly in the maze... "
+ result
)
maptoimage = MapToImage()
maptoimage.convert([pentti._map])
# This is adapted from
# https://stackoverflow.com/questions/60712647/displaying-pil-images-in-dash-plotly
buffer = BytesIO()
maptoimage.save_last_png(buffer)
encoded = base64.b64encode(buffer.getvalue()).decode("utf-8")
im = html.Img(
className="image",
src="data:image/png;base64, " + encoded, style={"height":"35%", "width":"35%"}
)
return text, im
@app.callback(
[Output("result-div-neutral-text", "children"), Output("result-div-neutral-png", "children")],
[Input("neutral-btn", "n_clicks")],
[State("limit-input", "value"), State("map-dropdown", "value")],
prevent_initial_call=True
)
def right_hand_escape(click, limit, map_path):
map = Map(map_path)
pentti = RightHandPentti(map)
pentti.escape_maze(limit)
if pentti._map[pentti.position()] == VICTORY:
result = f"Pentti escaped in {len(pentti._history)} steps"
else:
result = f"Pentti was exhausted after {limit} steps, Pentti did not escape..."
text = html.P(
"You have chosen to instruct Pentti to keep his right hand touching a wall at all times... "
+ result
)
maptoimage = MapToImage()
maptoimage.convert([pentti._map])
# This is adapted from
# https://stackoverflow.com/questions/60712647/displaying-pil-images-in-dash-plotly
buffer = BytesIO()
maptoimage.save_last_png(buffer)
encoded = base64.b64encode(buffer.getvalue()).decode("utf-8")
im = html.Img(
className="image",
src="data:image/png;base64, " + encoded, style={"height":"35%", "width":"35%"}
)
return text, im
@app.callback(
[Output("result-div-good-text", "children"), Output("result-div-good-png", "children")],
[Input("good-btn", "n_clicks")],
[State("limit-input", "value"), State("map-dropdown", "value")],
prevent_initial_call=True
)
def bfs_escape(click, limit, map_path):
map = Map(map_path)
pentti = BFSPentti(map)
pentti.escape_maze(limit)
if pentti._count_path_length() < limit:
result = f"Pentti escaped in {pentti._count_path_length()} steps"
else:
result = f"Pentti was exhausted after {limit} steps, Pentti did not escape..."
text = html.P(
"You have chosen to give a map to Pentti! Pentti will look for a good route to escape! "
+ result
)
maptoimage = MapToImage()
maptoimage.convert([pentti._map])
# This is adapted from
# https://stackoverflow.com/questions/60712647/displaying-pil-images-in-dash-plotly
buffer = BytesIO()
maptoimage.save_last_png(buffer)
encoded = base64.b64encode(buffer.getvalue()).decode("utf-8")
im = html.Img(
className="image",
src="data:image/png;base64, " + encoded, style={"height":"35%", "width":"35%"}
)
return text, im
|
[
"teemuj.mannikko@gmail.com"
] |
teemuj.mannikko@gmail.com
|
ab6068f680ca8ac16e86b2e3a8d6e25bf09fbe95
|
2ac571cc2ef034254913523f5785c18a31656ad8
|
/feeds/serializers.py
|
9e5ae46fa42eab97d9a1ae1a08d43592cfc6c720
|
[] |
no_license
|
Shivam1904/HTTP_200
|
1f17a69c432ba23c5dc23e183611a1e5134d9b20
|
3df6be2dcd2300968f039aae9de833b7c0e9e270
|
refs/heads/master
| 2020-12-31T03:16:45.023011
| 2016-05-18T21:09:55
| 2016-05-18T21:09:55
| 45,245,014
| 1
| 0
| null | 2015-10-30T10:37:11
| 2015-10-30T10:37:11
| null |
UTF-8
|
Python
| false
| false
| 12,153
|
py
|
from django.forms import widgets
from rest_framework import serializers
from feeds.models import *
from django.contrib.auth.models import User
from django.http import request
class UserSerializer(serializers.ModelSerializer):
username = serializers.ReadOnlyField()
password = serializers.CharField(style={'input_type': 'password'}, default='password')
class Meta:
model = User
fields = ('id', 'username', 'email', 'password', 'first_name', 'last_name', 'last_login')
def create(self, validated_data):
"""
Create and return a new `Student` instance, given the validated data.
"""
user = User(email=validated_data['email'], username=validated_data['username'],
first_name=validated_data['first_name'], last_name=validated_data['last_name'])
user.set_password(validated_data['password'])
user.save()
return user
def update(self, instance, validated_data):
print validated_data
instance.username = validated_data.get('username', instance.username)
instance.first_name = validated_data.get('first_name', instance.first_name)
instance.last_name = validated_data.get('last_name', instance.last_name)
instance.email = validated_data.get('email', instance.email)
instance.last_login = validated_data.get('last_login', instance.last_login)
instance.set_password(validated_data.get('password', instance.password))
instance.save()
return instance
class StudentSerializer(serializers.HyperlinkedModelSerializer):
'''
Serializer Class for Student Model
'''
user_details = UserSerializer(source='user')
relevent_count = serializers.ReadOnlyField(source='relevent')
academics_count = serializers.ReadOnlyField(source='academics')
administration_count = serializers.ReadOnlyField(source='administration')
misc_count = serializers.ReadOnlyField(source='misc')
tnp_count = serializers.ReadOnlyField(source='tnp')
events_count = serializers.ReadOnlyField(source='events')
class Meta:
model = Student
fields = ('user_details', 'id', 'univ_roll_no', 'ph_no', 'father_name', 'mother_name', 'address', 'course', 'relevent_count', 'academics_count', 'administration_count',
'tnp_count', 'events_count', 'misc_count', 'relevent_last_seen', 'administration_last_seen', 'academics_last_seen', 'misc_last_seen', 'tnp_last_seen', 'events_last_seen')
def create(self, validated_data):
"""
Create and return a new `Student` instance, given the validated data.
"""
return Student.objects.create(**validated_data)
def update(self, instance, validated_data):
validated_user_data = validated_data['user'].items()
user = User.objects.filter(username=validated_data['username'])
user.update(email=validated_user_data[0][1], first_name=validated_user_data[
2][1], last_name=validated_user_data[3][1])
user[0].set_password(validated_user_data[1][1])
instance.univ_roll_no = validated_data.get('univ_roll_no', instance.univ_roll_no)
instance.ph_no = validated_data.get('ph_no', instance.ph_no)
instance.father_name = validated_data.get('father_name', instance.father_name)
instance.mother_name = validated_data.get('mother_name', instance.mother_name)
instance.address = validated_data.get('address', instance.address)
instance.course = validated_data.get('course', instance.course)
instance.relevent_last_seen = validated_data.get('relevent_last_seen', instance.relevent_last_seen)
instance.academics_last_seen = validated_data.get('academics_last_seen', instance.academics_last_seen)
instance.administration_last_seen = validated_data.get(
'administration_last_seen', instance.administration_last_seen)
instance.misc_last_seen = validated_data.get('misc_last_seen', instance.misc_last_seen)
instance.tnp_last_seen = validated_data.get('tnp_last_seen', instance.tnp_last_seen)
instance.events_last_seen = validated_data.get('events_last_seen', instance.events_last_seen)
instance.save()
return instance
class FacultySerializer(serializers.HyperlinkedModelSerializer):
'''
Serializer Class for Faculty Model
'''
user_fields = UserSerializer(source='user')
relevent_count = serializers.ReadOnlyField(source='relevent')
academics_count = serializers.ReadOnlyField(source='academics')
administration_count = serializers.ReadOnlyField(source='administration')
misc_count = serializers.ReadOnlyField(source='misc')
tnp_count = serializers.ReadOnlyField(source='tnp')
events_count = serializers.ReadOnlyField(source='events')
notices = serializers.HyperlinkedRelatedField(many=True, view_name='notice-detail', read_only=True)
class Meta:
model = Faculty
fields = ('user_fields', 'notices', 'id', 'designation', 'department', 'ph_no', 'address', 'alternate_email', 'relevent_count', 'academics_count', 'administration_count',
'tnp_count', 'events_count', 'misc_count', 'relevent_last_seen', 'administration_last_seen', 'academics_last_seen', 'misc_last_seen', 'tnp_last_seen', 'events_last_seen')
def create(self, validated_data):
"""
Create and return a new `Faculty` instance, given the validated data.
"""
return Faculty.objects.create(**validated_data)
def update(self, instance, validated_data):
validated_user_data = validated_data['user'].items()
user = User.objects.filter(username=validated_data['username'])
user.update(email=validated_user_data[0][1], first_name=validated_user_data[
2][1], last_name=validated_user_data[3][1])
user[0].set_password(validated_user_data[1][1])
instance.designation = validated_data.get('designation', instance.designation)
instance.department = validated_data.get('department', instance.department)
instance.ph_no = validated_data.get('ph_no', instance.ph_no)
instance.address = validated_data.get('address', instance.address)
instance.alternate_email = validated_data.get('alternate_email', instance.alternate_email)
instance.relevent_last_seen = validated_data.get('relevent_last_seen', instance.relevent_last_seen)
instance.academics_last_seen = validated_data.get('academics_last_seen', instance.academics_last_seen)
instance.administration_last_seen = validated_data.get(
'administration_last_seen', instance.administration_last_seen)
instance.misc_last_seen = validated_data.get('misc_last_seen', instance.misc_last_seen)
instance.tnp_last_seen = validated_data.get('tnp_last_seen', instance.tnp_last_seen)
instance.events_last_seen = validated_data.get('events_last_seen', instance.events_last_seen)
instance.save()
return instance
class NoticeSerializer(serializers.ModelSerializer):
'''
Serializer Class for Notices Model
'''
owner = serializers.ReadOnlyField(source='owner.user.username')
bookmark_flag = serializers.SerializerMethodField('check_for_bookmark_flag')
bookmark_id = serializers.SerializerMethodField('check_for_bookmark_id')
def check_for_bookmark_flag(self, Notice):
if BookmarkedNotice.objects.filter(notice__id=Notice.id, user__id=self.context['request'].user.id).count() == 1:
return True
else:
return False
def check_for_bookmark_id(self, Notice):
if BookmarkedNotice.objects.filter(notice=Notice.id).count():
return BookmarkedNotice.objects.get(notice=Notice.id).id
else:
return None
class Meta:
model = Notice
fields = ('owner', 'bookmark_flag', 'bookmark_id', 'id', 'scheduled_time', 'title', 'description', 'ce', 'cs', 'it', 'ee', 'ece', 'eee', 'me', 'mt', 'ic', 'first_year',
'second_year', 'third_year', 'fourth_year', 'btech', 'mtech', 'mba', 'mca', 'other_course', 'file_attached', 'created_at', 'updated_at', 'category', 'subject')
def create(self, validated_data):
"""
Create and return a new `Notices` instance, given the validated data.
"""
return Notice.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.scheduled_time = validated_data.get('scheduled_time', instance.scheduled_time)
instance.title = validated_data.get('title', instance.title)
instance.description = validated_data.get('description', instance.description)
instance.file_attached = validated_data.get('file_attached', instance.file_attached)
instance.created_at = validated_data.get('created_at', instance.created_at)
instance.updated_at = validated_data.get('updated_at', instance.updated_at)
instance.category = validated_data.get('category', instance.category)
instance.subject = validated_data.get('subject', instance.subject)
instance.cs = validated_data.get('cs', instance.cs)
instance.ce = validated_data.get('ce', instance.ce)
instance.ee = validated_data.get('ee', instance.ee)
instance.ece = validated_data.get('ece', instance.ece)
instance.eee = validated_data.get('eee', instance.eee)
instance.it = validated_data.get('it', instance.it)
instance.ic = validated_data.get('ic', instance.ic)
instance.me = validated_data.get('me', instance.me)
instance.mt = validated_data.get('mt', instance.mt)
instance.first_year = validated_data.get('first_year', instance.first_year)
instance.second_year = validated_data.get('second_year', instance.second_year)
instance.third_year = validated_data.get('third_year', instance.third_year)
instance.fourth_year = validated_data.get('fourth_year', instance.fourth_year)
instance.btech = validated_data.get('btech', instance.btech)
instance.mtech = validated_data.get('mtech', instance.mtech)
instance.mba = validated_data.get('mba', instance.mba)
instance.mca = validated_data.get('mca', instance.mca)
instance.other_course = validated_data.get('other_course', instance.other_course)
instance.save()
return instance
class NoticeListSerializer(serializers.HyperlinkedModelSerializer):
'''
Serializer Class for Listing the notices only(not the details) that are available in Model
'''
owner = serializers.ReadOnlyField(source='owner.username')
attachment_flag = serializers.SerializerMethodField('check_for_attachment')
bookmark_flag = serializers.SerializerMethodField('check_for_bookmark_flag')
def check_for_bookmark_flag(self, Notice):
if BookmarkedNotice.objects.filter(notice=Notice.id).count() == 1:
return True
else:
return False
def check_for_attachment(self, Notice):
return Notice.file_attached != ''
class Meta:
model = Notice
fields = ('bookmark_flag', 'id', 'title', 'owner', 'details', 'attachment_flag', 'created_at', 'category')
class BookmarkSerializer(serializers.ModelSerializer):
'''
Serializer class for Bookmarks Model
'''
user = serializers.PrimaryKeyRelatedField(source='user.username', read_only=True)
notice_id = serializers.SlugRelatedField(source='notice', slug_field='pk', queryset=Notice.objects.all())
class Meta:
model = BookmarkedNotice
fields = ('id', 'user', 'notice_id')
def create(self, validated_data):
"""
Create and return a new `BookmarkedNotice` instance, given the validated data.
"""
return BookmarkedNotice.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.user = validated_data.get('user', instance.user)
# instance.notice = validated_data.get('notice',instance.notice)
instance.save()
return instance
|
[
"deshrajdry@gmail.com"
] |
deshrajdry@gmail.com
|
cdd399103ba43752763d8ad26be3a37a30b31469
|
c898fb981d3be036fd5a3de68641b07c3d10b171
|
/flask_jwt_router/testing/__init__.py
|
e2964b0155adcd52f7bfa5f095476bd6d8112dfb
|
[
"MIT"
] |
permissive
|
joegasewicz/flask-jwt-router
|
669226adf9c82a00d783319cbc0790c6190ad518
|
17ad0e184e5692d48ed7fefd35c0d8a5b435c189
|
refs/heads/master
| 2023-03-08T16:49:42.515425
| 2022-06-22T17:20:46
| 2022-06-22T17:20:46
| 191,003,928
| 40
| 9
|
MIT
| 2023-02-16T04:08:37
| 2019-06-09T12:30:31
|
Python
|
UTF-8
|
Python
| false
| false
| 357
|
py
|
"""
jwt_routes = JwtRoutes(app, strategies=[Google])
class BaseJwtRoutes:
pass
class JwtRoutes(BaseJwtRoutes):
pass
# Usage:
if E2E_TESTS == True:
class TestJwtRoutes(TestRouterMixin, BaseJwtRoutes):
pass
jwt_routes = TestJwtRoutes(app, strategies=[GoogleTest])
else:
jwt_routes = JwtRoutes(app, strategies=[Google])
"""
|
[
"noreply@github.com"
] |
joegasewicz.noreply@github.com
|
35b3b957199ad587d7397bee4b61229411c935e9
|
6015d4b07ac1a1b7c784b7c643ecc9e83d4d5972
|
/python_lesson1/Part7_Control_Flow_Lecture.py
|
7605da12076a07948a43e8ef113cff72b056aeec
|
[] |
no_license
|
smiledt/Full-Stack-Web-Developer-Bootcamp
|
38263bf779945aafb0b10dace169006f9fd2fa6d
|
5963711613a358162e85b260de9bf54828203337
|
refs/heads/master
| 2021-02-16T06:06:56.626763
| 2020-05-11T03:30:13
| 2020-05-11T03:30:13
| 244,974,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,910
|
py
|
#########################
#### CONTROL FLOW #######
#########################
# In this lecture we will cover Control Flow in Python, basically how to dictate
# our code behaves in whatever manner we want. Let's start with basic comparison
# Operators:
###########################
## COMPARISON OPERATORS ###
###########################
# Greater than
1 > 2
# Less than
1 < 2
# Greater than or Equal to
1 >= 1
# Less than or Equal to
1 <= 4
# Equality
1 == 1
1 == "1"
'hi' == 'bye'
# Inequality
1 != 2
###########################
### LOGICAL OPERATORS #####
###########################
# AND
(1 > 2) and (2 < 3)
# OR
(1 > 2) or (2 < 3)
# Multiple logical operators
(1 == 2) or (2 == 3) or (4 == 4)
##################################
### if,elif, else Statements #####
##################################
# Indentation is extremely important in Python and is basically Python's way of
# getting rid of enclosing brackets like {} we've seen in the past and are common
# with other languages. This adds to Python's readability and is huge part of the
# "Zen of Python". It is also a big reason why its so popular for beginners. Any
# text editor or IDE should be able to auto-indent for you, but always double check
# this if you ever get errors in your code! Code blocks are then noted by a colon (:).
# Now let's show some examples of if, elif, and else statements:
if 1 < 2:
print('Yep!')
if 1 < 2:
print('yep!')
# If Else - Make sure to line up the else with the if statement to "connect" them
if 1 < 2:
print('first')
else:
print('last')
###
###
if 1 > 2:
print('first')
else:
print('last')
# To add more conditions (like else if) you just use a single phrase "elif"
if 1 == 2:
print('first')
elif 3 == 3:
print('middle')
else:
print('Last')
################################################################################
####################-----------------------------###############################
####################-----------LOOPS-------------###############################
####################-----------------------------###############################
################################################################################
# Time to review loops with Python, such as For Loops and While loops
# Python is unique in that is discards parenthesis and brackets in favor of a
# whitespace system that defines blocks of code through indentation, this forces
# the user to write readable code, which is great for future you looking back at
# your older code later on!
#####################
### FOR LOOPS #######
#####################
# Use For Loops for any sequence of elements. If you try to use a for loop with
# a mapping like a dictionary, it will still work, but it won't loop with any
# order. Let's walk through some examples of how a for loop behaves with the
# various data structures we've learned about!
# For Loop with a list
# Perform an action with each element
seq = [1, 2, 3, 4, 5]
for item in seq:
print(item)
# Perform an action for every element but doesn't actually involve the elements
for item in seq:
print('Yep')
# You can call the loop variable whatever you want:
for jelly in seq:
print(jelly + jelly)
# For Loop with a Dictionary
ages = {"Sam": 3, "Frank": 4, "Dan": 29}
for key in ages:
print("This is the key")
print(key)
print("This is the value")
print(ages[key])
print("\n")
# A list of tuple pairs is a very common format for functions to return data in
# Because it is so common we can use tuple un-packing to deal with this, example:
mypairs = [(1, 10), (3, 30), (5, 50)]
# Normal
for tup in mypairs:
print(tup)
# Tuple un-packing
for item1, item2 in mypairs:
print(item1)
print(item2)
#######################
### WHILE LOOPS #######
#######################
# While loops allow us to continually perform and action until a condition
# becomes true. For example:
i = 1
while i < 5:
print('i is: {}'.format(i))
i = i + 1
#####################
### OTHER TOPICS ####
#####################
# RANGE FUNCTION
# range() can quickly generate integers for you, based on a starting and ending point
# Note that its a generator:
range(5)
list(range(5))
for i in range(5):
print(i)
# Start and ending
range(1, 10)
# Third argument for step-size
range(0, 10, 2)
# List Comprehension
# This technique allows you to quickly create lists with a single line of code.
# You can think of this as deconstructing a for loop with an append(). For Example:
# Starting with:
x = [1, 2, 3, 4]
# We could do this:
out = []
for item in x:
out.append(item**2)
print(out)
# Written in List Comprehension Form
[item**2 for item in x]
# List Comprehension is a great tool, but remember its not always approriate for
# every situation, don't sacrafice readability for a list Comprehension. It's
# speed is very comparable to the for loop.
|
[
"dereksmilees@gmail.com"
] |
dereksmilees@gmail.com
|
90adc3801f23ed865f8ce3373066f9a2a5ee43e3
|
e2bd8debf59f71e2c7fabea03cc108618944b2b0
|
/el_pagination/paginators.py
|
6da5231fca53a0b0e1e586150ed4c8803e1d1b0e
|
[] |
no_license
|
successar/Quizz
|
874c7c8656c33973d5d4f9563073b0434573a333
|
2244ff13568db92e3ff88156982ec44c83418199
|
refs/heads/master
| 2021-01-21T13:11:45.960397
| 2016-05-11T10:34:48
| 2016-05-11T10:34:48
| 53,747,315
| 1
| 1
| null | 2016-05-07T15:00:41
| 2016-03-12T18:36:34
|
Python
|
UTF-8
|
Python
| false
| false
| 4,359
|
py
|
"""Customized Django paginators."""
from __future__ import unicode_literals
from math import ceil
from django.core.paginator import (
EmptyPage,
Page,
PageNotAnInteger,
Paginator,
)
class CustomPage(Page):
"""Handle different number of items on the first page."""
def start_index(self):
"""Return the 1-based index of the first item on this page."""
paginator = self.paginator
# Special case, return zero if no items.
if paginator.count == 0:
return 0
elif self.number == 1:
return 1
return (
(self.number - 2) * paginator.per_page + paginator.first_page + 1)
def end_index(self):
"""Return the 1-based index of the last item on this page."""
paginator = self.paginator
# Special case for the last page because there can be orphans.
if self.number == paginator.num_pages:
return paginator.count
return (self.number - 1) * paginator.per_page + paginator.first_page
class BasePaginator(Paginator):
"""A base paginator class subclassed by the other real paginators.
Handle different number of items on the first page.
"""
def __init__(self, object_list, per_page, **kwargs):
if 'first_page' in kwargs:
self.first_page = kwargs.pop('first_page')
else:
self.first_page = per_page
super(BasePaginator, self).__init__(object_list, per_page, **kwargs)
def get_current_per_page(self, number):
return self.first_page if number == 1 else self.per_page
class DefaultPaginator(BasePaginator):
"""The default paginator used by this application."""
def page(self, number):
number = self.validate_number(number)
if number == 1:
bottom = 0
else:
bottom = ((number - 2) * self.per_page + self.first_page)
top = bottom + self.get_current_per_page(number)
if top + self.orphans >= self.count:
top = self.count
return CustomPage(self.object_list[bottom:top], number, self)
def _get_num_pages(self):
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(0, self.count - self.orphans - self.first_page)
try:
self._num_pages = int(ceil(hits / float(self.per_page))) + 1
except ZeroDivisionError:
self._num_pages = 0 # fallback to a safe value
return self._num_pages
num_pages = property(_get_num_pages)
class LazyPaginator(BasePaginator):
"""Implement lazy pagination."""
def validate_number(self, number):
try:
number = int(number)
except ValueError:
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
number = self.validate_number(number)
current_per_page = self.get_current_per_page(number)
if number == 1:
bottom = 0
else:
bottom = ((number - 2) * self.per_page + self.first_page)
top = bottom + current_per_page
# Retrieve more objects to check if there is a next page.
objects = list(self.object_list[bottom:top + self.orphans + 1])
objects_count = len(objects)
if objects_count > (current_per_page + self.orphans):
# If another page is found, increase the total number of pages.
self._num_pages = number + 1
# In any case, return only objects for this page.
objects = objects[:current_per_page]
elif (number != 1) and (objects_count <= self.orphans):
raise EmptyPage('That page contains no results')
else:
# This is the last page.
self._num_pages = number
return CustomPage(objects, number, self)
def _get_count(self):
raise NotImplementedError
count = property(_get_count)
def _get_num_pages(self):
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
raise NotImplementedError
page_range = property(_get_page_range)
|
[
"successar@gmail.com"
] |
successar@gmail.com
|
0e4438de1d5d50a311c87a31e465c1c6dfd3901c
|
84759a86f19a7a7eb8ccd618c3cf1a3ef3228b13
|
/examples/python/murach_python/book_apps/ch14/product_viewer/product_viewer.py
|
d2f7ab1e5d17fe51f51b963d8efcf0e37e4ea7c5
|
[] |
permissive
|
mattgraham93/mattgraham93.github.io
|
50a77c4dd40398a631e5c026f95c33268c8cc67b
|
2b65735f87427c770187c21c873e5277019c3a5f
|
refs/heads/main
| 2023-07-07T09:07:56.085470
| 2023-06-30T03:51:24
| 2023-06-30T03:51:24
| 154,541,498
| 8
| 0
|
Apache-2.0
| 2020-08-08T16:53:18
| 2018-10-24T17:30:09
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
from objects import Product
def show_products(products):
print("PRODUCTS")
for i in range(len(products)):
product = products[i]
print(str(i+1) + ". " + product.name)
print()
def show_product(product):
print("PRODUCT DATA")
print("Name: {:s}".format(product.name))
print("Price: {:.2f}".format(product.price))
print("Discount percent: {:d}%".format(product.discountPercent))
print("Discount amount: {:.2f}".format(product.getDiscountAmount()))
print("Discount price: {:.2f}".format(product.getDiscountPrice()))
print()
def main():
print("The Product Viewer program")
print()
# a tuple of Product objects
products = (Product("Stanley 13 Ounce Wood Hammer", 12.99, 62),
Product('National Hardware 3/4" Wire Nails', 5.06, 0),
Product("Economy Duct Tape, 60 yds, Silver", 7.24, 0))
show_products(products)
while True:
number = int(input("Enter product number: "))
print()
product = products[number-1]
show_product(product)
choice = input("View another product? (y/n): ")
print()
if choice != "y":
print("Bye!")
break
if __name__ == "__main__":
main()
|
[
"matt.graham@bellevuecollege.edu"
] |
matt.graham@bellevuecollege.edu
|
2e61a6b407a11d07039c7a7032357e40376f6e6b
|
71fb04f723b46a1bf45295be239bcec25e07f98c
|
/examples/layers/preprocessing/classification/random_crop_demo.py
|
0538205cbc1fc4928a66c42bb9231dcb97e275ea
|
[
"Apache-2.0"
] |
permissive
|
keras-team/keras-cv
|
9bca4479474e853ec3a1c541b8be20fea2447a1a
|
e83f229f1b7b847cd712d5cd4810097d3e06d14e
|
refs/heads/master
| 2023-08-31T10:22:08.406394
| 2023-08-30T20:24:57
| 2023-08-30T20:24:57
| 265,079,853
| 818
| 287
|
NOASSERTION
| 2023-09-12T16:49:01
| 2020-05-18T22:39:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,152
|
py
|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""random_crop_demo.py.py shows how to use the RandomCrop
preprocessing layer. Operates on an image of elephant. In this script the image
is loaded, then are passed through the preprocessing layers.
Finally, they are shown using matplotlib.
"""
import demo_utils
from keras_cv.layers.preprocessing import RandomCrop
def main():
many_elephants = demo_utils.load_elephant_tensor(output_size=(300, 300))
layer = RandomCrop(100, 200)
augmented = layer(many_elephants)
demo_utils.gallery_show(augmented.numpy())
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
keras-team.noreply@github.com
|
a214d38572ca60d8058ab74e572ca6b436b58734
|
773ad191e5b35af3b6248b8f43a3d50da6cfab94
|
/toolbox/randbn.py
|
61e4fe1af04f84214859207f461567810a32a1ce
|
[
"BSD-2-Clause"
] |
permissive
|
lauralwatkins/toolbox
|
a792ab0a7123333a1c40fd1af8cec0dca17b6815
|
f4c2e7c62f83ec98872f62bc5df7d947cf387d56
|
refs/heads/master
| 2022-09-30T03:15:37.318914
| 2022-07-19T22:24:30
| 2022-07-19T22:24:30
| 37,667,498
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,729
|
py
|
#!/usr/bin/env python
# --------------------------------------------------------------------------- #
# TOOLBOX.RANDBN
# Laura L Watkins [lauralwatkins@gmail.com]
# --------------------------------------------------------------------------- #
from __future__ import division, print_function
import numpy as np
from scipy import integrate, interpolate
def randbn(fn_name, params=None, num=1, vmin=0., vmax=1., ncdf=20):
"""
Draws numbers randomly from an input distribution in a given range.
INPUTS
fn_name : function name [*]
OPTIONS
params : parameters of the distribution [*][default None]
num : number of random numbers to generate [default 1]
vmin : lower limit of number range [default 0]
vmax : upper limit of number range [default 1]
ncdf : number of points at which to sample the CDF [**][default 20]
NOTES
[*] The function 'fn_name' should calculate the values x of the required
function for a given parameter set p, that is fn_name(x,p).
[**] Sampling the CDF at more points will increase the computation time
as each point requires an integral, but it may be necessary for complex
functions.
"""
values = np.linspace(vmin, vmax, ncdf)
# normalised cumulative distribution function
if params is None: f = lambda x: fn_name(x)
else: f = lambda x: fn_name(x, params)
cdf = np.cumsum([integrate.quad(f, values[max(0,i-1)], values[i])[0] \
for i in range(ncdf)])
cdf /= cdf[-1]
# sample is drawn by calculating the value for randomly-generated CDFs
sample = interpolate.interp1d(cdf, values)(np.random.rand(num))
return sample
|
[
"lauralwatkins@gmail.com"
] |
lauralwatkins@gmail.com
|
84fd92823d3da86ca06d1ea1bef72787f0cfac2b
|
68d0032b178a00adca59929917ef83df28efa71f
|
/process_data.py
|
5e8177eecf2c5950ac5cb35683876feba12fa9bc
|
[] |
no_license
|
AbhishekTaur/Elman_RNN
|
3243657406851eebd1487ba63b4446bce489cd01
|
6cc67b8130797d9869d82337590c66d6264520a3
|
refs/heads/master
| 2020-04-30T21:33:26.118837
| 2019-03-22T15:25:41
| 2019-03-22T15:26:10
| 177,096,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
import pandas as pd
data = pd.read_csv("sample/blackscholes-small-4-8-cleaned.csv")
keys = data.keys()
print("Key\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tValues")
for key in keys:
print(key, "\t\t\t\t\t\t\t\t\t\t\t\t\t", data[key].values[0])
|
[
"abhishektaur123@gmail.com"
] |
abhishektaur123@gmail.com
|
bd0535c661dbe01083d172e9b8c093b5dbd1bbfe
|
e4485d5a94a64b242be2e24b298d0e56f0b55027
|
/day13.py
|
1d2c5525247d5abf550c88038a75938876901e16
|
[] |
no_license
|
13thZygrite/AdventOfCode2017
|
179158320c18819cc7124c05cf0daaf00833b720
|
0766bf4502cd0d15631423cbe7f1140e6fed07fd
|
refs/heads/master
| 2021-08-31T12:37:46.302253
| 2017-12-21T09:33:02
| 2017-12-21T09:33:02
| 112,941,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 728
|
py
|
#!/usr/bin/env python
with open("input_day13") as f:
input = f.readlines()
split = [map(int, line.split(": ")) for line in input]
state = [[] for i in range(split[-1][0] + 1)]
for line in split:
state[line[0]] = [line[1], 1, 0] # Length, direction (1 down, -1 up), current pos
# Part 1
severity = 0
for line in split:
if line[0] % ((line[1] - 2)* 2 + 2) == 0:
severity += line[0]* line[1]
print "Part 1: ", severity
# Part 2
delay = 0
while (1):
severity = 0
failed = False
for line in split:
if (line[0] + delay) % ((line[1] - 2)* 2 + 2) == 0:
failed = True
if not failed:
print "Part 2: ", delay
break
delay += 1
|
[
"themathsmassage@gmail.com"
] |
themathsmassage@gmail.com
|
7fa50c182bf54b2fbf51441eefa0f324279633e7
|
1431b07074b96c7baa6a43a99717da2a658424af
|
/test/utils/Test_Zip_Folder.py
|
d6ecc3e784eaacfbffe1988284d8bf95e88f557b
|
[
"Apache-2.0"
] |
permissive
|
almeidam/pbx-gs-python-utils
|
054a7334070627bc27f682ed78c2986230d1cfab
|
3f8987dd2d1fc27d1d262385280d7303009f5453
|
refs/heads/master
| 2020-04-30T10:44:46.179729
| 2019-03-20T13:59:01
| 2019-03-20T13:59:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
import json
from unittest import TestCase
from utils.Dev import Dev
from utils.Files import Files
from utils.Misc import Misc
from utils.Zip_Folder import Zip_Folder
class Test_Zip_Folder(TestCase):
def test__using_with__no_params(self):
with Zip_Folder() as (zip_file):
assert zip_file is None
def test__using_with_params(self):
target_folder = Files.current_folder()
with Zip_Folder(target_folder) as (zip_file):
assert Files.exists(zip_file) is True
assert Files.exists(zip_file) is False
|
[
"dinis.cruz@owasp.org"
] |
dinis.cruz@owasp.org
|
9bf099e4570aab4e3c827aba4cfa379cb7ad7196
|
a86cb1d0cc2c01ccc5b7d03d25a1b98d4f8b66ca
|
/day_18/crawling_03.py
|
08ce38608a68158041385e8770f169492843e3ce
|
[] |
no_license
|
yongseongCho/python_201911
|
020efd812df909f6d1150c6a15a9a4fa6ee946b6
|
f4696fac81a101d13a95ca0ca602e6478b4d2f58
|
refs/heads/master
| 2020-09-12T12:44:46.364259
| 2019-12-19T13:17:08
| 2019-12-19T13:17:08
| 222,429,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,321
|
py
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup as bs
html = '''
<td class="title">
<div class="tit3">
<a href="/movie/bi/mi/basic.nhn?code=181710"
title="포드 V 페라리">포드 V 페라리</a>
</div>
</td>
'''
soup = bs(html, 'html.parser')
# BeautifulSoup 객체의 find 메소드
# 태그의 이름과 속성의 정보를 조합하여
# 검색하는 경우 활용
# - find 메소드는 검색 결과를 하나만 반환
# - 최초에 발견된 첫 번째 태그를 반환
# 첫 번째로 검색된 td 태그의 객체를 반환
tag = soup.find(name='td')
print(f'tag -> {tag}')
# 첫 번째로 검색된 a 태그의 객체를 반환
tag = soup.find(name='a')
print(f'tag -> {tag}')
# 첫 번째로 검색된
# class 속성이 title인 객체를 반환
tag = soup.find(attrs={'class':'title'})
print(f'tag -> {tag}')
# 첫 번째로 검색된
# class 속성이 tit3인 객체를 반환
tag = soup.find(attrs={'class':'tit3'})
print(f'tag -> {tag}')
# 첫 번째로 검색된
# 태그의 이름이 td이고,
# class 속성이 tit3인 객체를 반환
# - 존재하지 않는 경우 None 값이 반환
tag = soup.find(name='td',
attrs={'class':'tit3'})
print(f'tag -> {tag}')
|
[
"noreply@github.com"
] |
yongseongCho.noreply@github.com
|
5e991e4e3dc2696c8cfb6c76836a9bc9521137d2
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/nlp/Bert-text-classification_for_PyTorch/transformers/src/transformers/models/marian/modeling_flax_marian.py
|
9d8b44c5f9da84470baadab2b4eafaf3dfea6fd6
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 63,778
|
py
|
# coding=utf-8
# Copyright 2021 The Marian Team Authors and The Google Flax Team Authors And The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Flax Marian model."""
import math
import random
from functools import partial
from typing import Callable, Optional, Tuple
import numpy as np
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from jax import lax
from jax.random import PRNGKey
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...modeling_flax_outputs import (
FlaxBaseModelOutput,
FlaxBaseModelOutputWithPastAndCrossAttentions,
FlaxCausalLMOutputWithCrossAttentions,
FlaxSeq2SeqLMOutput,
FlaxSeq2SeqModelOutput,
)
from ...modeling_flax_utils import (
ACT2FN,
FlaxPreTrainedModel,
append_call_sample_docstring,
append_replace_return_docstrings,
overwrite_call_docstring,
)
from ...utils import logging
from .configuration_marian import MarianConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "Helsinki-NLP/opus-mt-en-de"
_CONFIG_FOR_DOC = "MarianConfig"
_TOKENIZER_FOR_DOC = "MarianTokenizer"
MARIAN_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a Flax Linen
[flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`MarianConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
"""
MARIAN_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
MARIAN_ENCODE_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
MARIAN_DECODE_INPUTS_DOCSTRING = r"""
Args:
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
encoder_outputs (`tuple(tuple(jnp.ndarray)`):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
def create_sinusoidal_positions(n_pos, dim):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
sentinel = dim // 2 + dim % 2
out = np.zeros_like(position_enc)
out[:, 0:sentinel] = np.sin(position_enc[:, 0::2])
out[:, sentinel:] = np.cos(position_enc[:, 1::2])
return jnp.array(out)
# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->Marian
class FlaxMarianAttention(nn.Module):
config: MarianConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {self.num_heads})."
)
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.causal:
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size = hidden_states.shape[0]
# get query proj
query_states = self.q_proj(hidden_states)
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self.k_proj(key_value_states)
value_states = self.v_proj(key_value_states)
else:
# self_attention
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = self._split_heads(query_states)
key_states = self._split_heads(key_states)
value_states = self._split_heads(value_states)
# handle cache prepare causal attention mask
if self.causal:
query_length, key_length = query_states.shape[1], key_states.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, float("-inf")).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayer with Bart->Marian
class FlaxMarianEncoderLayer(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32
def setup(self) -> None:
self.embed_dim = self.config.d_model
self.self_attn = FlaxMarianAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.encoder_attention_heads,
dropout=self.config.attention_dropout,
dtype=self.dtype,
)
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
self.fc1 = nn.Dense(
self.config.encoder_ffn_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.fc2 = nn.Dense(
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
residual = hidden_states
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->Marian
class FlaxMarianEncoderLayerCollection(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
FlaxMarianEncoderLayer(self.config, name=str(i), dtype=self.dtype)
for i in range(self.config.encoder_layers)
]
self.layerdrop = self.config.encoder_layerdrop
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for encoder_layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = (hidden_states, all_hidden_states, all_attentions)
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayer with Bart->Marian
class FlaxMarianDecoderLayer(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32
def setup(self) -> None:
self.embed_dim = self.config.d_model
self.self_attn = FlaxMarianAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
causal=True,
dtype=self.dtype,
)
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.encoder_attn = FlaxMarianAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
dtype=self.dtype,
)
self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.fc1 = nn.Dense(
self.config.encoder_ffn_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.fc2 = nn.Dense(
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->Marian
class FlaxMarianDecoderLayerCollection(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
FlaxMarianDecoderLayer(self.config, name=str(i), dtype=self.dtype)
for i in range(self.config.decoder_layers)
]
self.layerdrop = self.config.decoder_layerdrop
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not deterministic and (dropout_probability < self.layerdrop):
layer_outputs = (None, None, None)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
init_cache=init_cache,
output_attentions=output_attentions,
deterministic=deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
class FlaxMarianEncoder(nn.Module):
config: MarianConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.max_source_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
self.embed_positions = create_sinusoidal_positions(self.config.max_position_embeddings, embed_dim)
self.layers = FlaxMarianEncoderLayerCollection(self.config, self.dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
input_shape = input_ids.shape
input_ids = input_ids.reshape(-1, input_shape[-1])
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
positions = jnp.take(self.embed_positions, position_ids, axis=0)
# explictly cast the positions here, since self.embed_positions are not registered as parameters
positions = positions.astype(inputs_embeds.dtype)
hidden_states = inputs_embeds + positions
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
outputs = self.layers(
hidden_states,
attention_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return outputs
return FlaxBaseModelOutput(
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class FlaxMarianDecoder(nn.Module):
config: MarianConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.max_target_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
self.embed_positions = create_sinusoidal_positions(self.config.max_position_embeddings, embed_dim)
self.layers = FlaxMarianDecoderLayerCollection(self.config, self.dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
input_shape = input_ids.shape
input_ids = input_ids.reshape(-1, input_shape[-1])
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# embed positions
positions = jnp.take(self.embed_positions, position_ids, axis=0)
# explictly cast the positions here, since self.embed_positions are not registered as parameters
positions = positions.astype(inputs_embeds.dtype)
hidden_states = inputs_embeds + positions
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
outputs = self.layers(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return outputs
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
class FlaxMarianModule(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.shared = nn.Embed(
self.config.vocab_size,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.encoder = FlaxMarianEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
self.decoder = FlaxMarianDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
def _get_encoder_module(self):
return self.encoder
def _get_decoder_module(self):
return self.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return FlaxSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
class FlaxMarianPreTrainedModel(FlaxPreTrainedModel):
config_class = MarianConfig
base_model_prefix: str = "model"
module_class: nn.Module = None
def __init__(
self,
config: MarianConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
**kwargs
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
# make sure initialization pass will work for FlaxMarianForSequenceClassificationModule
input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
attention_mask = jnp.ones_like(input_ids)
decoder_input_ids = input_ids
decoder_attention_mask = jnp.ones_like(input_ids)
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
return self.module.init(
rngs,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
)["params"]
def init_cache(self, batch_size, max_length, encoder_outputs):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
`attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
"""
# init input variables to retrieve cache
decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
decoder_position_ids = jnp.broadcast_to(
jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
)
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)
init_variables = self.module.init(
jax.random.PRNGKey(0),
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
init_cache=True,
method=_decoder_forward, # we only need to call the decoder to init the cache
)
return unfreeze(init_variables["cache"])
@add_start_docstrings(MARIAN_ENCODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=MarianConfig)
def encode(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> from transformers import MarianTokenizer, FlaxMarianMTModel
>>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=64, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
encode_module = module._get_encoder_module()
return encode_module(input_ids, attention_mask, position_ids, **kwargs)
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
method=_encoder_forward,
)
@add_start_docstrings(MARIAN_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=MarianConfig)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import MarianTokenizer, FlaxMarianMTModel
>>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=64, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> last_decoder_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxMarianAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past = outputs
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past = outputs
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
@add_start_docstrings_to_model_forward(MARIAN_INPUTS_DOCSTRING)
def __call__(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
decoder_input_ids: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# prepare encoder inputs
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# prepare decoder inputs
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
)
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
if decoder_position_ids is None:
batch_size, sequence_length = decoder_input_ids.shape
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
)
@add_start_docstrings(
"The bare Marian Model transformer outputting raw hidden-states without any specific head on top.",
MARIAN_START_DOCSTRING,
)
class FlaxMarianModel(FlaxMarianPreTrainedModel):
config: MarianConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
module_class = FlaxMarianModule
append_call_sample_docstring(
FlaxMarianModel, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC
)
class FlaxMarianMTModule(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
def setup(self):
self.model = FlaxMarianModule(config=self.config, dtype=self.dtype)
self.lm_head = nn.Dense(
self.model.shared.num_embeddings,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
def _get_encoder_module(self):
return self.model.encoder
def _get_decoder_module(self):
return self.model.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
position_ids=position_ids,
decoder_position_ids=decoder_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = self.model.variables["params"]["shared"]["embedding"]
lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
lm_logits = self.lm_head(hidden_states)
lm_logits += self.final_logits_bias.astype(self.dtype)
if not return_dict:
output = (lm_logits,) + outputs[1:]
return output
return FlaxSeq2SeqLMOutput(
logits=lm_logits,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"The MARIAN Model with a language modeling head. Can be used for translation.", MARIAN_START_DOCSTRING
)
class FlaxMarianMTModel(FlaxMarianPreTrainedModel):
module_class = FlaxMarianMTModule
dtype: jnp.dtype = jnp.float32
@add_start_docstrings(MARIAN_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=MarianConfig)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import MarianTokenizer, FlaxMarianMTModel
>>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=64, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> logits = outputs.logits
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxMarianAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
outputs = decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = module.model.variables["params"]["shared"]["embedding"]
lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
lm_logits = module.lm_head(hidden_states)
lm_logits += module.final_logits_bias.astype(self.dtype)
return lm_logits, outputs
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
if past_key_values is None:
lm_logits, decoder_outputs = outputs
else:
(lm_logits, decoder_outputs), past = outputs
if return_dict:
outputs = FlaxCausalLMOutputWithCrossAttentions(
logits=lm_logits,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
)
else:
outputs = (lm_logits,) + decoder_outputs[1:]
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
def _adapt_logits_for_beam_search(self, logits):
"""This function enforces the padding token never to be generated."""
logits = logits.at[:, :, self.config.pad_token_id].set(float("-inf"))
return logits
def prepare_inputs_for_generation(
self,
decoder_input_ids,
max_length,
attention_mask: Optional[jnp.DeviceArray] = None,
decoder_attention_mask: Optional[jnp.DeviceArray] = None,
encoder_outputs=None,
**kwargs
):
# initializing the cache
batch_size, seq_length = decoder_input_ids.shape
past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
# But since the decoder uses a causal mask, those positions are masked anyways.
# Thus we can create a single static attention_mask here, which is more efficient for compilation
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
if decoder_attention_mask is not None:
position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
else:
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
return {
"past_key_values": past_key_values,
"encoder_outputs": encoder_outputs,
"encoder_attention_mask": attention_mask,
"decoder_attention_mask": extended_attention_mask,
"decoder_position_ids": position_ids,
}
def update_inputs_for_generation(self, model_outputs, model_kwargs):
model_kwargs["past_key_values"] = model_outputs.past_key_values
model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
return model_kwargs
FLAX_MARIAN_MT_DOCSTRING = """
Returns:
Example:
```python
>>> from transformers import MarianTokenizer, FlaxMarianMTModel
>>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> text = "My friends are cool but they eat too many carbs."
>>> input_ids = tokenizer(text, max_length=64, return_tensors="jax").input_ids
>>> sequences = model.generate(input_ids, max_length=64, num_beams=2).sequences
>>> outputs = tokenizer.batch_decode(sequences, skip_special_tokens=True)
>>> # should give *Meine Freunde sind cool, aber sie essen zu viele Kohlenhydrate.*
```
"""
overwrite_call_docstring(
FlaxMarianMTModel,
MARIAN_INPUTS_DOCSTRING + FLAX_MARIAN_MT_DOCSTRING,
)
append_replace_return_docstrings(FlaxMarianMTModel, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
|
[
"dongwenbo6@huawei.com"
] |
dongwenbo6@huawei.com
|
81dba2963b823be8d1f748daa2d0969e48aeebcc
|
73dd708f0a69b573f49854fa9416773558df4733
|
/webApi_dotnet.py
|
0d98e6082cffccf45e414334ccc4900052439394
|
[] |
no_license
|
jobFrancoNet/Shopper
|
9c333cf724410430d2f268dba4a970e1f92d5785
|
eb984e1cf18707f24466604ad8e30492e849fbc9
|
refs/heads/master
| 2022-09-23T11:08:22.480513
| 2020-06-05T18:58:10
| 2020-06-05T18:58:10
| 265,583,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
import requests
indirizzo_webapi="http://localhost:57058/api/Values/1"
richiesta=requests.get(indirizzo_webapi)
jsondata=richiesta.json()
print(jsondata)
|
[
"aiutocorsi@hotmail.it"
] |
aiutocorsi@hotmail.it
|
d75ab97fb9184a24f45a05f01fc83903b2dc748e
|
6f8aec72f983715b1dcc1e067e980a440440423a
|
/bruteguard/patterns/singleton.py
|
a9f7c0f51adf9d2ce958d11132938a6d7c1b1ffb
|
[
"MIT"
] |
permissive
|
dcopm999/django-brute-guard
|
41cef7c1f98b275c0ef2176424c8ef1e75002fdb
|
e4c629d81f1cc732ddae2a43042e92ea423884b8
|
refs/heads/master
| 2023-08-02T06:16:54.219332
| 2021-09-30T05:45:10
| 2021-09-30T05:45:10
| 409,435,237
| 0
| 0
|
MIT
| 2021-09-30T05:45:10
| 2021-09-23T03:32:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
from typing import Dict
class SingletonMeta(type):
"""
В Python класс Одиночка можно реализовать по-разному. Возможные способы
включают себя базовый класс, декоратор, метакласс. Мы воспользуемся
метаклассом, поскольку он лучше всего подходит для этой цели.
"""
_instances: Dict[type, type] = {}
def __call__(cls, *args, **kwargs):
"""
Данная реализация не учитывает возможное изменение передаваемых
аргументов в `__init__`.
"""
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class Singleton(object):
def __new__(cls):
if not hasattr(cls, "instance"):
cls.instance = super(Singleton, cls).__new__(cls)
return cls.instance
|
[
"dcopm999@gmail.com"
] |
dcopm999@gmail.com
|
0dd03e74709472bf5290b512b954f095a6b05aee
|
cccbe12d485747620a8409e660da1750c4f32701
|
/director/migrations/versions/30d6f6636351_initial_migration.py
|
2a603bfa87a78d580219061ab85f910ef91e76fe
|
[
"BSD-3-Clause"
] |
permissive
|
keshabb/celery-director
|
1c958ac87c67e507ac220209c9236cf8f822c08f
|
95ece533ba2f4205932e1bb761f1acab977918c4
|
refs/heads/master
| 2021-01-05T10:09:25.908079
| 2020-02-07T18:16:30
| 2020-02-07T18:20:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
"""Initial migration
Revision ID: 30d6f6636351
Revises:
Create Date: 2020-02-07 18:34:41.680883
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import UUIDType
from director.models.utils import JSONBType
# revision identifiers, used by Alembic.
revision = '30d6f6636351'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('workflows',
sa.Column('id', UUIDType(binary=False), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('project', sa.String(), nullable=False),
sa.Column('status', sa.Enum('pending', 'progress', 'success', 'error', 'canceled', name='statustype'), nullable=False),
sa.Column('payload', JSONBType(), nullable=True),
sa.Column('periodic', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_workflows'))
)
op.create_index(op.f('ix_workflows_created_at'), 'workflows', ['created_at'], unique=False)
op.create_table('tasks',
sa.Column('id', UUIDType(binary=False), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('key', sa.String(), nullable=False),
sa.Column('status', sa.Enum('pending', 'progress', 'success', 'error', 'canceled', name='statustype'), nullable=False),
sa.Column('previous', JSONBType(), nullable=True),
sa.Column('workflow_id', UUIDType(binary=False), nullable=False),
sa.ForeignKeyConstraint(['workflow_id'], ['workflows.id'], name=op.f('fk_tasks_workflow_id_workflows')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_tasks'))
)
op.create_index(op.f('ix_tasks_created_at'), 'tasks', ['created_at'], unique=False)
def downgrade():
op.drop_index(op.f('ix_tasks_created_at'), table_name='tasks')
op.drop_table('tasks')
op.drop_index(op.f('ix_workflows_created_at'), table_name='workflows')
op.drop_table('workflows')
|
[
"ncrocfer@gmail.com"
] |
ncrocfer@gmail.com
|
7b896b4589bf69f36141e6756328de16f68c4a47
|
c98b6927dcea7fbab0d5d456a6530ade2d9c74a5
|
/refresh.py
|
5bedb43ab92f73e849c809c7aebc181dc7b4ced3
|
[] |
no_license
|
nrtdemo/AT
|
a8d41cf919165c8d25d3c5577d61b986bcd6a2f6
|
06a14c09c6dac3ea65ee5743c0055e34c3be3692
|
refs/heads/master
| 2021-04-27T06:10:15.442508
| 2018-04-25T14:41:59
| 2018-04-25T14:41:59
| 122,608,112
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cgitb
import cgi
from link.FortyFiftyGbE import FF
from link.PEbangkok import PE
form = cgi.FieldStorage()
cgitb.enable()
if __name__ == "__main__":
print "Content-type: application/json\n\n"
print
if 'link' in form:
Link = form['link'].value
if Link == "40G100G":
ff = FF()
ff.jsonlink()
if Link == "PE":
pe = PE()
pe.jsonlink()
else:
ff = FF()
ff.jsonlink()
|
[
"nrtdemo@hotmail.com"
] |
nrtdemo@hotmail.com
|
2a739b751d27912b4ec246d9d6c54a4b4576bb53
|
441ee516fa509a66eb6a6132ed0fbafeae1a06ae
|
/uploadf/models.py
|
ecd3c53d7d6062ba60639a19f3c1636e76875306
|
[] |
no_license
|
Shirhussain/FileUpload
|
3237627020ec322d4097e757b64f9f0c64feb4e7
|
19d2e848d7d05fd46838f9140c0a5658bbca281a
|
refs/heads/master
| 2022-08-26T13:26:23.859084
| 2020-05-27T22:02:36
| 2020-05-27T22:02:36
| 264,777,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
from django.db import models
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.CharField(max_length=50)
pdf = models.FileField(upload_to="mag/")
cover = models.ImageField(upload_to="mag/cover/", null=True, blank=True)
def __str__(self):
return self.title
def delete(self,*args, **kwargs):
self.pdf.delete()
self.cover.delete()
super().delete(*args, **kwargs)
|
[
"sh.danishyar@gmail.com"
] |
sh.danishyar@gmail.com
|
01a0395d3c8609fa3ce667d63ab94888725a0929
|
5539827c951cdeda292bc660d7d825aa64f0cb76
|
/GuliEdu/wsgi.py
|
336349ebaa5ca1f825e0fb4f8b394ed97ebcd997
|
[] |
no_license
|
dzapologize/GuliEdu
|
f5ed626003b49f6eb7f5590adb3487121cfda10e
|
4916486c4d548178ed50df906efbb08297477b08
|
refs/heads/master
| 2023-01-01T23:58:01.009774
| 2020-10-03T08:40:18
| 2020-10-03T08:40:18
| 299,006,299
| 0
| 0
| null | 2020-10-03T08:38:47
| 2020-09-27T10:23:15
|
Python
|
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for GuliEdu project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'GuliEdu.settings')
application = get_wsgi_application()
|
[
"diaozheng@yunbangshou.com"
] |
diaozheng@yunbangshou.com
|
35ac429c2000eeba0d2ae7692c1779bd7d106feb
|
a61c6fece6fb5b8eef7ff9c68ec3315d9512ce1d
|
/History Maker/ultimate_history.py
|
02b8f4946f170fb949dbdd58d47543ec889e9221
|
[] |
no_license
|
byte127x/random-things
|
cbd2cabf13e2e780b23843aaac949d4ce68019ff
|
d05f834e227f8a956bc7e88fbaf19a9040a3815a
|
refs/heads/main
| 2023-07-15T15:12:09.380001
| 2021-08-21T00:54:24
| 2021-08-21T00:54:24
| 398,433,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
import random as rm
print("Welcome To The Domination Controller\nThis program makes radical algorithms to decide which country should fade from existence!\n")
countries = ['Austria', "Belgium", 'Bulgaria', 'Croatia', 'Cyprus', 'Czech Republic', 'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Greece', 'Hungary', 'Ireland', 'Italy', 'Latvia', 'Lithuania', 'Luxembourg', 'Malta', 'The Netherlands', 'Poland', 'Portugal', 'Romania', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Andorra', 'Armenia', 'Belarus', 'Bosnia and Herzegovina', 'Faroe Isands', 'Georgia', 'Gibraltar', 'Iceland', 'Isle of Man', 'Liechtenstein', 'Macedonia', 'Moldova', 'Monaco', 'Montenegro', 'Norway', 'Russia', 'San Marino', 'Serbia', 'Switzerland', 'Turkey', 'Ukraine', 'United Kingdom']
input('Press Enter to Start!')
fIO = open('Data.txt', 'a')
while True:
revolt_time = (rm.randint(0, 8) == 6)
victim_state = rm.randint(0, len(countries)-1)
swallow_by_neighbor_time = (rm.randint(0, 10) == 9)
print('')
if len(countries) == 1:
print(f'{countries[0]} Won The Supreme Battle!')
fIO.write(f'{countries[0]} Won The Supreme Battle!')
break
elif revolt_time:
fIO.write("REVOLUTION TIME FOR ...")
print("REVOLUTION TIME FOR ...")
fIO.write(countries[victim_state] + '\n')
print(countries[victim_state] + '\n')
elif swallow_by_neighbor_time:
print(f"{countries[victim_state]} Turns its neighbors into puppets!\nThey still exist but are dominated by their puppetmaster\n")
fIO.write(f"{countries[victim_state]} Turns its neighbors into puppets!\nThey still exist but are dominated by their puppetmaster\n")
else:
fIO.write('Battle Time - Who Will Win???')
print('Battle Time - Who Will Win???')
countrybattle = rm.randint(0, len(countries)-1)
if countrybattle == victim_state:
fIO.write(f'It\'s The Same Country! ({countries[victim_state]}) Nothing Happens\n')
print(f'It\'s The Same Country! ({countries[victim_state]}) Nothing Happens\n')
else:
battlers = []
battlers.append(countries[victim_state])
battlers.append(countries[countrybattle])
print(f'It\'s {battlers[1]} vs. {battlers[0]} ')
fIO.write(f'It\'s {battlers[1]} vs. {battlers[0]} ')
if bool(rm.getrandbits(1)):
print(f'{battlers[0]} Wins and incorporates {battlers[1]}\n')
fIO.write(f'{battlers[0]} Wins and incorporates {battlers[1]}\n')
countries.remove(countries[countrybattle])
else:
print(f'{battlers[1]} Wins and incorporates {battlers[0]}\n')
fIO.write(f'{battlers[1]} Wins and incorporates {battlers[0]}\n')
countries.remove(countries[victim_state])
fIO.close()
input('>')
|
[
"noreply@github.com"
] |
byte127x.noreply@github.com
|
174ab03d35f1b83c388a52575470a997450147eb
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/centerface/dependency/centernet/src/lib/datasets/sample/multi_pose.py
|
5f4ff97eb3a893a418e55fcd8f149478108e8a22
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"MIT"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 9,604
|
py
|
"""
MIT License
Copyright (c) 2019 Xingyi Zhou
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import cv2
from dependency.centernet.src.lib.utils.image import color_aug
from dependency.centernet.src.lib.utils.image import get_affine_transform, affine_transform
from dependency.centernet.src.lib.utils.image import gaussian_radius, draw_umich_gaussian
from dependency.extd.utils.augmentations import anchor_crop_image_sampling
def get_border(border, size):
"""
Get border
"""
i = 1
while size - border // i <= border // i: # size > 2 * (border // i)
i *= 2
return border // i
def coco_box_to_bbox(box):
"""
(x1, y1, w, h) -> (x1, y1, x2, y2)
"""
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)
return bbox
def preprocess_train(image, target, config):
"""
Preprocess training data
"""
data_rng = np.random.RandomState(123)
eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32)
eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32).reshape((1, 1, 3))
std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32).reshape((1, 1, 3))
num_objs = len(target)
anns = []
for each in target:
ann = {}
ann['bbox'] = each[0:4]
ann['keypoints'] = each[4:]
anns.append(ann)
cv2.setNumThreads(0)
img, anns = anchor_crop_image_sampling(image, anns)
_, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0
rot = 0
flipped = False
if config.rand_crop:
#s = s * np.random.choice(np.arange(0.8, 1.3, 0.05)) # for 768*768 or 800* 800
s = s * np.random.choice(np.arange(0.6, 1.0, 0.05)) # for 512 * 512
border = s * np.random.choice([0.1, 0.2, 0.25])
w_border = get_border(border, img.shape[1]) # w > 2 * w_border
h_border = get_border(border, img.shape[0]) # h > 2 * h_border
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = config.scale
cf = config.shift
c[0] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
c[1] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
if np.random.random() < config.rotate:
rf = config.rotate
rot = np.clip(np.random.randn() * rf, -rf * 2, rf * 2)
if np.random.random() < config.flip: # opt.flip = 0.5
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(c, s, rot, [config.input_res, config.input_res])
inp = cv2.warpAffine(img, trans_input, (config.input_res, config.input_res), flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if config.color_aug:
color_aug(data_rng, inp, eig_val, eig_vec)
inp = (inp - mean) / std
inp = inp.transpose(2, 0, 1)
output_res = config.output_res
num_joints = config.num_joints
max_objs = config.max_objs
trans_output_rot = get_affine_transform(c, s, rot, [output_res, output_res])
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
# map
hm = np.zeros((config.num_classes, output_res, output_res), dtype=np.float32)
hm_hp = np.zeros((num_joints, output_res, output_res), dtype=np.float32)
wh = np.zeros((output_res, output_res, 2), dtype=np.float32)
reg = np.zeros((output_res, output_res, 2), dtype=np.float32)
ind = np.zeros((output_res, output_res), dtype=np.float32) # as float32, need no data_type change later
reg_mask = np.zeros((max_objs), dtype=np.uint8)
wight_mask = np.zeros((output_res, output_res, 2), dtype=np.float32)
kps = np.zeros((output_res, output_res, num_joints * 2), dtype=np.float32)
kps_mask = np.zeros((output_res, output_res, num_joints * 2), dtype=np.float32)
#
hp_offset = np.zeros((max_objs * num_joints, 2), dtype=np.float32)
hp_ind = np.zeros((max_objs * num_joints), dtype=np.int64)
hp_mask = np.zeros((max_objs * num_joints), dtype=np.int64)
draw_gaussian = draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = coco_box_to_bbox(ann['bbox']) # [x,y,w,h]--[x1,y1,x2,y2]
cls_id = 0 #int(ann['category_id']) - 1
pts = np.array(ann['keypoints'], np.float32).reshape(num_joints, 3) # (x,y,0/1)
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
pts[:, 0] = width - pts[:, 0] - 1
for e in config.flip_idx: # flip_idx = [[0, 1], [3, 4]]
pts[e[0]], pts[e[1]] = pts[e[1]].copy(), pts[e[0]].copy()
bbox[:2] = affine_transform(bbox[:2], trans_output) # [0, 1] -- (x1, y1)
bbox[2:] = affine_transform(bbox[2:], trans_output) # [2, 3] -- (x2, y2)
bbox = np.clip(bbox, 0, output_res - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if (h > 0 and w > 0) or (rot != 0):
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
ind[ct_int[1], ct_int[0]] = 1.0
wh[ct_int[1], ct_int[0], :] = np.log(1. * w / 4), np.log(1. * h / 4)
reg[ct_int[1], ct_int[0], :] = ct[0] - ct_int[0], ct[1] - ct_int[1]
reg_mask[k] = 1.0
wight_mask[ct_int[1], ct_int[0], 0] = 1
wight_mask[ct_int[1], ct_int[0], 1] = 1
# if w*h <= 20: # can get what we want sometime, but unstable
# wight_mask[k] = 15
if w*h <= 40:
wight_mask[ct_int[1], ct_int[0], 0] = 5
wight_mask[ct_int[1], ct_int[0], 1] = 5
if w*h <= 20:
wight_mask[ct_int[1], ct_int[0], 0] = 10
wight_mask[ct_int[1], ct_int[0], 1] = 10
if w*h <= 10:
wight_mask[ct_int[1], ct_int[0], 0] = 15
wight_mask[ct_int[1], ct_int[0], 1] = 15
if w*h <= 4:
wight_mask[ct_int[1], ct_int[0], 0] = 0.1
wight_mask[ct_int[1], ct_int[0], 1] = 0.1
num_kpts = pts[:, 2].sum()
if num_kpts == 0:
hm[cls_id, ct_int[1], ct_int[0]] = 0.9999
hp_radius = gaussian_radius((math.ceil(h), math.ceil(w)))
hp_radius = max(0, int(hp_radius))
for j in range(num_joints):
if pts[j, 2] > 0:
pts[j, :2] = affine_transform(pts[j, :2], trans_output_rot)
if pts[j, 0] >= 0 and pts[j, 0] < output_res and pts[j, 1] >= 0 and pts[j, 1] < output_res:
kps[ct_int[1], ct_int[0], j * 2 : j * 2 + 2] = pts[j, :2] - ct_int
kps[ct_int[1], ct_int[0], j * 2 : j * 2 + 1] = kps[ct_int[1], ct_int[0], j * 2 : j * 2 + 1] / w
kps[ct_int[1], ct_int[0], j * 2 + 1: j * 2 + 2] = kps[ct_int[1], ct_int[0],
j * 2 + 1 : j * 2 + 2] / h
kps_mask[ct_int[1], ct_int[0], j * 2 : j * 2 + 2] = 1.0
pt_int = pts[j, :2].astype(np.int32)
hp_offset[k * num_joints + j] = pts[j, :2] - pt_int
hp_ind[k * num_joints + j] = pt_int[1] * output_res + pt_int[0]
hp_mask[k * num_joints + j] = 1
draw_gaussian(hm_hp[j], pt_int, hp_radius)
kps_mask[ct_int[1], ct_int[0], j * 2 : j * 2 + 2] = \
0.0 if ann['bbox'][2] * ann['bbox'][3] <= 8.0 else 1.0
draw_gaussian(hm[cls_id], ct_int, radius)
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1] +
pts[:, :2].reshape(num_joints * 2).tolist() + [cls_id])
return inp, hm, reg_mask, ind, wh, wight_mask, reg, kps_mask, kps
|
[
"chenhaozhe1@huawei.com"
] |
chenhaozhe1@huawei.com
|
f2c83f4d0fc2e94589608d3b0b8b4d950151f83d
|
470d603645840c014d7c7f6da0727cf9b1b7f618
|
/googleanalytics/utils/string.py
|
a6a7c42a72e0a31d672c5b9656b708e86d48053c
|
[
"ISC"
] |
permissive
|
ranksense/google-analytics
|
5f3982a625d3a4aced16503856e83a30a6d890be
|
a97f7750fa51320fb9f2917e56563ffff06ccdde
|
refs/heads/master
| 2022-12-15T15:54:17.519653
| 2020-09-19T17:05:59
| 2020-09-19T17:05:59
| 287,854,525
| 2
| 1
|
ISC
| 2020-08-16T01:46:25
| 2020-08-16T01:46:24
| null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
import textwrap
# Python 2 and 3 compatibility
try:
unicode = unicode
except NameError:
unicode = str
def format(string, **kwargs):
return textwrap.dedent(string).format(**kwargs).strip()
def affix(prefix, base, suffix, connector='_'):
if prefix:
prefix = prefix + connector
else:
prefix = ''
if suffix:
suffix = connector + suffix
else:
suffix = ''
return prefix + base + suffix
# a supercharged `join` function, analogous to `paste` in the R language
def paste(rows, *delimiters):
delimiter = delimiters[-1]
delimiters = delimiters[:-1]
if len(delimiters):
return paste([paste(row, *delimiters) for row in rows], delimiter)
else:
return delimiter.join(map(unicode, rows))
# a supercharged `split` function, the inverse of `paste`
def cut(s, *delimiters):
delimiter = delimiters[-1]
delimiters = delimiters[:-1]
if len(delimiters):
return [cut(ss, *delimiters) for ss in cut(s, delimiter)]
else:
return s.split(delimiter)
|
[
"stijn@debrouwere.org"
] |
stijn@debrouwere.org
|
c8fe58376e632a3abf6fabe21b845ea9bfca8392
|
493d5df9420ef94d9c5e82acb2d163e2a8c639b7
|
/memo_app/forms.py
|
f9a73ac5260ef3004567845ec9abe38b54032eea
|
[] |
no_license
|
reina0207/django
|
0e3d6422c137be52978526128112ebf319e0f462
|
c42744935043efdcc4b9f3f14641105d082d691a
|
refs/heads/master
| 2023-08-13T14:10:59.979651
| 2021-10-17T01:48:48
| 2021-10-17T01:48:48
| 417,983,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
from django import forms
from .models import Memo
class PostForm(forms.ModelForm):
class Meta:
model = Memo
fields = ['content']
widgets = {
'content':forms.Textarea
}
|
[
"you@example.com"
] |
you@example.com
|
34c0055b2c462a9631a4da5040691bbf2b848594
|
37b1734e625ef52d76bc32055c91943740a1ab64
|
/test.py
|
06ac45e8982057cf4d886cec21cfe7e70ed199ff
|
[] |
no_license
|
coloz/ocr-py
|
83458071572e5212a22f87b1ea4aef1d321aebcd
|
0037a83e877dc81cfe82f57df3274e1811690162
|
refs/heads/master
| 2023-05-02T21:30:36.839077
| 2021-05-17T14:21:30
| 2021-05-17T14:21:30
| 368,213,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
import cv2
if __name__ == '__main__':
video = cv2.imread()
ok, frame = video.read()
if ok:
cv2.imshow("tracker", frame)
while True:
ok, frame = video.read()
if ok:
cv2.imshow("tracker",frame)
k = cv2.waitKey(1) & 0xff
if k == 27:
break
|
[
"clz@clz.me"
] |
clz@clz.me
|
5cbebc094716ebcd2abe250c57520dee3117a1d0
|
d7d25574246fd8585396a02ebd2ca8450e49b082
|
/leetcode-py/leetcode1041.py
|
44b0f6df2298740f5cbbebe712ae04d38cac1548
|
[] |
no_license
|
cicihou/LearningProject
|
b6b1de2300e574835f253935d0c0ae693b194020
|
3a5649357e0f21cbbc5e238351300cd706d533b3
|
refs/heads/master
| 2022-12-04T06:18:14.856766
| 2022-11-29T08:54:16
| 2022-11-29T08:54:16
| 141,606,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
class Solution:
def isRobotBounded(self, instructions: str) -> bool:
'''
lc 657 的变种
这个问题比较 tricky,其实更接近数学问题
实际上只要判断机器人最后是否面朝北,就可以知道是不是回到了原点
关于为什么 instructions *= 4 是最小限度的解释:
https://leetcode.com/problems/robot-bounded-in-circle/discuss/850437/Python-O(n)-solution-explained
https://leetcode.com/problems/robot-bounded-in-circle/discuss/290915/Python-Concise-%2B-Explanation
note: 这个 direc 的判断思想好好记一下,我自己写的时候直觉要用两个数组表达左右方向,其实左右方向只是周期不同
'''
direc = [(0, 1), (-1, 0), (0, -1), (1, 0)]
d = 0
start = (0, 0)
instructions *= 4
for ch in instructions:
if ch == 'G':
nx, ny = direc[d]
start = start[0] + nx, start[1] + ny
else:
if ch == 'L':
d = (d + 1) % 4
if ch == 'R':
d = (d + 3) % 4
return start == (0, 0)
'''
更加 tricky 的数学解法:判断机器人是否朝北,不朝北就一定能走回来
https://leetcode.com/problems/robot-bounded-in-circle/discuss/291221/Python-O(N)-time-O(1)-space-beats-100-detailed-explanations
'''
direc = [(0, 1), (-1, 0), (0, -1), (1, 0)]
d = 0
start = (0, 0)
for ch in instructions:
if ch == 'L':
d = (d+1) % 4
elif ch == 'R':
d = (d+3) % 4
else:
nx, ny = direc[d]
start = start[0] + nx, start[1] + ny
return start == (0, 0) or d != 0
|
[
"houxi_zuel@163.com"
] |
houxi_zuel@163.com
|
fbf9ee05f41aa879f6e8efe0d638a2ad5f92c86f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/53/usersdata/94/21421/submittedfiles/matriz2.py
|
0d65cb52f0ea2e9536be7e87278cf3364bd3fd2d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
def soma_diagonal_principal(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,i]
return soma
def soma_diagonal_secundaria(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,a.shape[0]-i-1]:
return soma
def soma_linha(a):
a=[]
for i in range (0,a.shape[0],1):
soma=0
for j in range (0,a.shape[1],1):
soma=soma+a[i,j]
a.append(soma)
return c
def soma_coluna(a):
a=[]
for j in range(0,a.shape[1],1):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,j]
a.append(soma)
return d
def magico(a):
sdp=soma_diagonal_principal(a)
sds=soma_diagonal_secundaria(a)
sl=soma_linha(a)
sc=soma_coluna(a)
cont=0
for i in range(0,len(sl),1):
if sdp==sds==sl[i]==sc[i]:
cont=cont+1
if cont==len(sl):
return True
else:
return False
linhas=input('digite a quantidade de linhas')
colunas=input('digite a quantidade de colunas')
a=np.zeros((linhas,colunas))
for i in range (0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=input('digite um elemento:')
if magico(a):
print ('S')
else:
print ('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
50d80d14dd6348c83c9f92ad9073c1f1fdbf0848
|
ab204a9b476c82c2543debd5455864a619acfb3b
|
/Travelling/ckdproject1/wsgi.py
|
b76560a5ed5fdfa463f50ca05f19d504c4da5308
|
[] |
no_license
|
chandan9074/Travelling-Web
|
ba05a77ed576a198ec53241b791d1f74ccaf3687
|
246f21a319b9afd7da45942cc936b2f32f7790d8
|
refs/heads/master
| 2022-12-02T07:23:32.672957
| 2020-08-02T07:36:32
| 2020-08-02T07:36:32
| 284,413,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for ckdproject1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ckdproject1.settings')
application = get_wsgi_application()
|
[
"chandan15-11611@diu.edu.bd"
] |
chandan15-11611@diu.edu.bd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.